DELETED 34to35.tcl Index: 34to35.tcl ================================================================== --- 34to35.tcl +++ /dev/null @@ -1,1006 +0,0 @@ -# -# Run this TCL script to generate HTML for the goals.html file. -# -set rcsid {$Id: 34to35.tcl,v 1.4 2007/10/01 13:54:11 drh Exp $} -source common.tcl -header {SQLite Changes From Version 3.4.2 To 3.5.0} - -proc CODE {text} { - puts "
"
-  puts $text
-  puts "
" -} -proc SYNTAX {text} { - puts "
"
-  set t2 [string map {& & < < > >} $text]
-  regsub -all "/(\[^\n/\]+)/" $t2 {\1} t3
-  puts "$t3"
-  puts "
" -} -proc IMAGE {name {caption {}}} { - puts "
" - if {$caption!=""} { - puts "
$caption" - } - puts "
" -} -proc PARAGRAPH {text} { - # regsub -all "/(\[a-zA-Z0-9\]+)/" $text {\1} t2 - #regsub -all "\\*(\[^\n*\]+)\\*" $text {\1} t3 - regsub -all {\[([^]\n]+)\]} $text {[resolve_link \1]} t3 - puts "

[subst -novar -noback $t3]

\n" -} -proc resolve_link {args} { - set a2 [split $args |] - set id [string trim [lindex $a2 0]] - if {[lindex $a2 1]==""} { - set display [string trim [lindex $a2 0]] - } else { - set display [string trim [lrange $a2 1 end]] - } - regsub -all {[^a-zA-Z0-9_]} $id {} id - return "$display" -} -set level(0) 0 -set level(1) 0 -proc HEADING {n name {tag {}}} { - if {$tag!=""} { - puts "" - } - global level - incr level($n) - for {set i [expr {$n+1}]} {$i<10} {incr i} { - set level($i) 0 - } - if {$n==0} { - set num {} - } elseif {$n==1} { - set num $level(1).0 - } else { - set num $level(1) - for {set i 2} {$i<=$n} {incr i} { - append num .$level($i) - } - } - incr n 1 - puts "$num $name" -} - -HEADING 0 {Moving From SQLite 3.4.2 to 3.5.0} - -PARAGRAPH { - SQLite version 3.5.0 introduces a new OS interface layer that - is incompatible with all prior versions of SQLite. In addition, - a few existing interfaces have been generalized to work across all - database connections within a process rather than just all - connections within a thread. The purpose of this article - is to describe the changes to 3.5.0 in detail so that users - of prior versions of SQLite can judge what, if any, effort will - be required to upgrade to newer versions. -} - -HEADING 1 {Overview Of Changes} - -PARAGRAPH { - A quick enumeration of the changes in SQLite version 3.5.0 - is provide here. Subsequent sections will describe these - changes in more detail. -} -PARAGRAPH { -
    -
  1. The OS interface layer has been completely reworked: -
      -
    1. The undocumented sqlite3_os_switch() interface has - been removed.
    2. -
    3. The SQLITE_ENABLE_REDEF_IO compile-time flag no longer functions. - I/O procedures are now always redefinable.
    4. -
    5. Three new objects are defined for specifying I/O procedures: - [sqlite3_vfs], [sqlite3_file], and [sqlite3_io_methods].
    6. -
    7. Three new interfaces are used to create alternative OS interfaces: - [sqlite3_vfs_register()], [sqlite3_vfs_unregister()], and - [sqlite3_vfs_find()].
    8. -
    9. A new interface has been added to provided additional control over - the creation of new database connections: [sqlite3_open_v2()]. - The legacy interfaces of [sqlite3_open()] and - [sqlite3_open16()] continue to be fully supported.
    10. -
  2. -
  3. The optional shared cache and memory management features that - were introduced in version 3.3.0 can now be used across multiple - threads within the same process. Formerly, these extensions only - applied to database connections operating within a single thread. -
      -
    1. The [sqlite3_enable_shared_cache()] interface now applies to all - threads within a process, not to just the one thread in which it - was run.
    2. -
    3. The [sqlite3_soft_heap_limit()] interface now applies to all threads - within a process, not to just the one thread in which it was run.
    4. -
    5. The [sqlite3_release_memory()] interface will now attempt to reduce - the memory usages across all database connections in all threads, not - just connections in the thread where the interface is called.
    6. -
    7. The [sqlite3_thread_cleanup()] interface has become a no-op.
    8. -
  4. -
  5. Restrictions on the use of the same database connection by multiple - threads have been dropped. It is now safe for - multiple threads to use the same database connection at the same - time.
  6. -
  7. There is now a compile-time option that allows an application to - define alternative malloc()/free() implementations without having - to modify any core SQLite code.
  8. -
  9. There is now a compile-time option that allows an application to - define alternative mutex implementations without having - to modify any core SQLite code.
  10. -
-} -PARAGRAPH { - Of these changes, only 1a and 2a through 2c are incompatibilities - in any formal sense. - But users who have previously made custom modifications to the - SQLite source (for example to add a custom OS layer for embedded - hardware) might find that these changes have a larger impact. - On the other hand, an important goal of these changes is to make - it much easier to customize SQLite for use on different operating - systems. -} - -HEADING 1 {The OS Interface Layer} - -PARAGRAPH { - If your system defines a custom OS interface for SQLite or if you - were using the undocumented sqlite3_os_switch() - interface, then you will need to make modifications in order to - upgrade to SQLite version 3.5.0. This may seem painful at first - glance. But as you look more closely, you will probably discover - that your changes are made smaller and easier to understand and manage - by the new SQLite interface. It is likely that your changes will - now also work seamlessly with the SQLite amalgamation. You will - no longer need to make any changes to the code SQLite source code. - All of your changes can be effected by application code and you can - link against a standard, unmodified version of the SQLite amalgamation. - Furthermore, the OS interface layer, which was formerly undocumented, - is now an officially support interface for SQLite. So you have - some assurance that this will be a one-time change and that your - new backend will continue to work in future versions of SQLite. -} - -HEADING 2 {The Virtual File System Object} - -PARAGRAPH { - The new OS interface for SQLite is built around an object named - [sqlite3_vfs]. The "vfs" standard for "Virtual File System". - The sqlite3_vfs object is basically a structure containing pointers - to functions that implement the primitive disk I/O operations that - SQLite needs to perform in order to read and write databases. - In this article, we will often refer a sqlite3_vfs objects as a "VFS". -} - -PARAGRAPH { - SQLite is able to use multiple VFSes at the same time. Each - individual database connection is associated with just one VFS. - But if you have multiple database connections, each connection - can be associated with a different VFS. -} - -PARAGRAPH { - There is always a default VFS. - The legacy interfaces [sqlite3_open()] and [sqlite3_open16()] always - use the default VFS. - The new interface for creating database connections, - [sqlite3_open_v2()], allows you to specify which VFS you want to - use by name. -} - -HEADING 3 {Registering New VFS Objects} - -PARAGRAPH { - Standard builds of SQLite for unix or windows come with a single - VFS named "unix" or "win32", as appropriate. This one VFS is also - the default. So if you are using the legacy open functions, everything - will continue to operate as it has before. The change is that an application - now has the flexibility of adding new VFS modules to implement a - customized OS layer. The [sqlite3_vfs_register()] API can be used - to tell SQLite about one or more application-defined VFS modules: -} - -CODE { -int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt); -} - -PARAGRAPH { - Applications can call sqlite3_vfs_register at any time, though of course - a VFS needs to be registered before it can be used. The first argument - is a pointer to a customized VFS object that the application has prepared. - The second argument is true to make the new VFS the default VFS so that - it will be used by the legacy [sqlite3_open()] and [sqlite3_open16()] APIs. - If the new VFS is not the default, then you will probably have to use - the new [sqlite3_open_v2()] API to use it. Note, however, that if - a new VFS is the only VFS known to SQLite (if SQLite was compiled without - its usual default VFS or if the pre-compiled default VFS was removed - using [sqlite3_vfs_unregister()]) then the new VFS automatic becomes the - default VFS regardless of the makeDflt argument to [sqlite3_vfs_register()]. -} - -PARAGRAPH { - Standard builds include the default "unix" or "win32" VFSes. - But if you use the -DOS_OTHER=1 compile-time option, then SQLite is - built without a default VFS. In that case, the application must - register at least one VFS prior to calling [sqlite3_open()]. - This is the approach that embedded applications should use. - Rather than modifying the SQLite source to to insert an alternative - OS layer as was done in prior releases of SQLite, instead compile - an unmodified SQLite source file (preferably the amalgamation) - with the -DOS_OTHER=1 option, then invoke [sqlite3_vfs_register()] - to define the interface to the underlying filesystem prior to - creating any database connections. -} - -HEADING 3 {Additional Control Over VFS Objects} - -PARAGRAPH { - The [sqlite3_vfs_unregister()] API is used to remove an existing - VFS from the system. -} - -CODE { -int sqlite3_vfs_unregister(sqlite3_vfs*); -} - -PARAGRAPH { - The [sqlite3_vfs_find()] API is used to locate a particular VFS - by name. Its prototype is as follows: -} - -CODE { -sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName); -} - -PARAGRAPH { - The argument is the symbolic name for the desired VFS. If the - argument is a NULL pointer, then the default VFS is returned. - The function returns a pointer to the [sqlite3_vfs] object that - implements the VFS. Or it returns a NULL pointer if no object - could be found that matched the search criteria. -} - -HEADING 3 {Modifications Of Existing VFSes} - -PARAGRAPH { - Once a VFS has been registered, it should never be modified. If - a change in behavior is required, a new VFS should be registered. - The application could, perhaps, use [sqlite3_vfs_find()] to locate - the old VFS, make a copy of the old VFS into a new [sqlite3_vfs] - object, make the desired modifications to the new VFS, unregister - the old VFS, the register the new VFS in its place. Existing - database connections would continue to use the old VFS even after - it is unregistered, but new database connections would use the - new VFS. -} - -HEADING 3 {The VFS Object} - -PARAGRAPH { - A VFS object is an instance of the following structure: -} - -CODE { -typedef struct sqlite3_vfs sqlite3_vfs; -struct sqlite3_vfs { - int iVersion; /* Structure version number */ - int szOsFile; /* Size of subclassed sqlite3_file */ - int mxPathname; /* Maximum file pathname length */ - sqlite3_vfs *pNext; /* Next registered VFS */ - const char *zName; /* Name of this virtual file system */ - void *pAppData; /* Pointer to application-specific data */ - int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*, - int flags, int *pOutFlags); - int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir); - int (*xAccess)(sqlite3_vfs*, const char *zName, int flags); - int (*xGetTempName)(sqlite3_vfs*, char *zOut); - int (*xFullPathname)(sqlite3_vfs*, const char *zName, char *zOut); - void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename); - void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg); - void *(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol); - void (*xDlClose)(sqlite3_vfs*, void*); - int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut); - int (*xSleep)(sqlite3_vfs*, int microseconds); - int (*xCurrentTime)(sqlite3_vfs*, double*); - /* New fields may be appended in figure versions. The iVersion - ** value will increment whenever this happens. */ -}; -} - -PARAGRAPH { - To create a new VFS, an application fills in an instance of this - structure with appropriate values and then calls [sqlite3_vfs_register()]. -} - -PARAGRAPH { - The iVersion field of [sqlite3_vfs] should be 1 for SQLite version 3.5.0. - This number may increase in future versions of SQLite if we have to - modify the VFS object in some way. We hope that this never happens, - but the provision is made in case it does. -} - -PARAGRAPH { - The szOsFile field is the size in bytes of the structure that defines - an open file: the [sqlite3_file] object. This object will be described - more fully below. The point here is that each VFS implementation can - define its own [sqlite3_file] object containing whatever information - the VFS implementation needs to store about an open file. SQLite needs - to know how big this object is, however, in order to preallocate enough - space to hold it. -} - -PARAGRAPH { - The mxPathname field is the maximum length of a file pathname that - this VFS can use. SQLite sometimes has to preallocate buffers of - this size, so it should be as small as reasonably possible. Some - filesystems permit huge pathnames, but in practice pathnames rarely - extend beyond 100 bytes or so. You do not have to put the longest - pathname that the underlying filesystem can handle here. You only - have to put the longest pathname that you want SQLite to be able to - handle. A few hundred is a good value in most cases. -} - -PARAGRAPH { - The pNext field is used internally by SQLite. Specifically, SQLite - uses this field to form a linked list of registered VFSes. -} - -PARAGRAPH { - The zName field is the symbolic name of the VFS. This is the name - that the [sqlite3_vfs_find()] compares against when it is looking for - a VFS. -} - -PARAGRAPH { - The pAppData pointer is unused by the SQLite core. The pointer is - available to store auxiliary information that a VFS information might - want to carry around. -} - -PARAGRAPH { - The remaining fields of the [sqlite3_vfs] object all store pointer - to functions that implement primitive operations. We call these - "methods". The first methods, xOpen, is used to open files on - the underlying storage media. The result is an [sqlite3_file] - object. There are additional methods, defined by the [sqlite3_file] - object itself that are used to read and write and close the file. - The additional methods are detailed below. The filename is in UTF-8. - SQLite will guarantee that the zFilename string passed to - xOpen() is a full pathname as generated by xFullPathname() and - that the string will be valid and unchanged until xClose() is - called. So the [sqlite3_file] can store a pointer to the - filename if it needs to remember the filename for some reason. - The flags argument to xOpen() is a copy of the flags argument - to sqlite3_open_v2(). If sqlite3_open() or sqlite3_open16() - is used, then flags is [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]. - If xOpen() opens a file read-only then it sets *pOutFlags to - include [SQLITE_OPEN_READONLY]. Other bits in *pOutFlags may be - set. - SQLite will also add one of the following flags to the xOpen() - call, depending on the object being opened: - - The file I/O implementation can use the object type flags to - changes the way it deals with files. For example, an application - that does not care about crash recovery or rollback, might make - the open of a journal file a no-op. Writes to this journal are - also a no-op. Any attempt to read the journal returns [SQLITE_IOERR]. - Or the implementation might recognize the a database file will - be doing page-aligned sector reads and writes in a random order - and set up its I/O subsystem accordingly. - SQLite might also add one of the following flags to the xOpen - method: - - The [SQLITE_OPEN_DELETEONCLOSE] flag means the file should be - deleted when it is closed. This will always be set for TEMP - databases and journals and for subjournals. The - [SQLITE_OPEN_EXCLUSIVE] flag means the file should be opened - for exclusive access. This flag is set for all files except - for the main database file. - The [sqlite3_file] structure passed as the third argument to - xOpen is allocated by the caller. xOpen just fills it in. The - caller allocates a minimum of szOsFile bytes for the [sqlite3_file] - structure. -} - -PARAGRAPH { - The differences between an [SQLITE_OPEN_TEMP_DB] database and an - [SQLITE_OPEN_TRANSIENT_DB] database is this: The [SQLITE_OPEN_TEMP_DB] - is used for explicitly declared and named TEMP tables (using the - CREATE TEMP TABLE syntax) or for named tables in a temporary database - that is created by opening a database with a filename that is an empty - string. An [SQLITE_OPEN_TRANSIENT_DB] holds an database table that - SQLite creates automatically in order to evaluate a subquery or - ORDER BY or GROUP BY clause. Both TEMP_DB and TRANSIENT_DB databases - are private and are deleted automatically. TEMP_DB databases last - for the duration of the database connection. TRANSIENT_DB databases - last only for the duration of a single SQL statement. -} - -PARAGRAPH { - The xDelete method is used delete a file. The name of the file is - given in the second parameter. The filename will be in UTF-8. - The VFS must convert the filename into whatever character representation - the underlying operating system expects. If the syncDir parameter is - true, then the xDelete method should not return until the change - to the directory contents for the directory containing the - deleted file have been synced to disk in order to insure that the - file does not "reappear" if a power failure occurs soon after. -} - -PARAGRAPH { - The xAccess method is used to check for access permissions on a file. - The filename will be UTF-8 encoded. The flags argument will be - [SQLITE_ACCESS_EXISTS] to check for the existence of the file, - [SQLITE_ACCESS_READWRITE] to check to see if the file is both readable - and writable, or [SQLITE_ACCESS_READ] to check to see if the file is - at least readable. The "file" named by the second parameter might - be a directory or folder name. -} - -PARAGRAPH { - The xGetTempName method computes the name of a temporary file that - SQLite can use. The name should be written into the buffer given - by the second parameter. SQLite will size that buffer to hold - at least mxPathname bytes. The generated filename should be in UTF-8. - To avoid security problems, the generated temporary filename should - contain enough randomness to prevent an attacker from guessing the - temporary filename in advance. -} - -PARAGRAPH { - The xFullPathname method is used to convert a relative pathname - into a full pathname. The resulting full pathname is written into - the buffer provided by the third parameter. SQLite will size the - output buffer to at least mxPathname bytes. Both the input and - output names should be in UTF-8. -} - -PARAGRAPH { - The xDlOpen, xDlError, xDlSym, and xDlClose methods are all used for - accessing shared libraries at run-time. These methods may be omitted - (and their pointers set to zero) if the library is compiled with - SQLITE_OMIT_LOAD_EXTENSION or if the [sqlite3_enable_load_extension()] - interface is never used to enable dynamic extension loading. The - xDlOpen method opens a shared library or DLL and returns a pointer to - a handle. NULL is returned if the open fails. If the open fails, - the xDlError method can be used to obtain a text error message. - The message is written into the zErrMsg buffer of the third parameter - which is at least nByte bytes in length. The xDlSym returns a pointer - to a symbol in the shared library. The name of the symbol is given - by the second parameter. UTF-8 encoding is assumed. If the symbol - is not found a NULL pointer is returned. The xDlClose routine closes - the shared library. -} - -PARAGRAPH { - The xRandomness method is used exactly once to initialize the - pseudo-random number generator (PRNG) inside of SQLite. Only - the xRandomness method on the default VFS is used. The xRandomness - methods on other VFSes are never accessed by SQLite. - The xRandomness routine requests that nByte bytes of randomness - be written into zOut. The routine returns the actual number of - bytes of randomness obtained. The quality of the randomness so obtained - will determine the quality of the randomness generated by built-in - SQLite functions such as random() and randomblob(). SQLite also - uses its PRNG to generate temporary file names.. On some platforms - (ex: windows) SQLite assumes that temporary file names are unique - without actually testing for collisions, so it is important to have - good-quality randomness even if the random() and randomblob() - functions are never used. -} - -PARAGRAPH { - The xSleep method is used to suspend the calling thread for at - least the number of microseconds given. This method is used to - implement the [sqlite3_sleep()] and [sqlite3_busy_timeout()] APIs. - In the case of [sqlite3_sleep()] the xSleep method of the default - VFS is always used. If the underlying system does not have a - microsecond resolution sleep capability, then the sleep time should - be rounded up. xSleep returns this rounded-up value. -} - -PARAGRAPH { - The xCurrentTime method finds the current time and date and writes - the result as double-precision floating point value into pointer - provided by the second parameter. The time and date is in - coordinated universal time (UTC) and is a fractional julian day number. -} - -HEADING 3 {The Open File Object} - -PARAGRAPH { - The result of opening a file is an instance of an [sqlite3_file] object. - The [sqlite3_file] object is an abstract base class defined as follows: -} - -CODE { -typedef struct sqlite3_file sqlite3_file; -struct sqlite3_file { - const struct sqlite3_io_methods *pMethods; -}; -} - -PARAGRAPH { - Each VFS implementation will subclass the [sqlite3_file] by adding - additional fields at the end to hold whatever information the VFS - needs to know about an open file. It does not matter what information - is stored as long as the total size of the structure does not exceed - the szOsFile value recorded in the [sqlite3_vfs] object. -} - -PARAGRAPH { - The [sqlite3_io_methods] object is a structure that contains pointers - to methods for reading, writing, and otherwise dealing with files. - This object is defined as follows: -} - -CODE { -typedef struct sqlite3_io_methods sqlite3_io_methods; -struct sqlite3_io_methods { - int iVersion; - int (*xClose)(sqlite3_file*); - int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); - int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst); - int (*xTruncate)(sqlite3_file*, sqlite3_int64 size); - int (*xSync)(sqlite3_file*, int flags); - int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize); - int (*xLock)(sqlite3_file*, int); - int (*xUnlock)(sqlite3_file*, int); - int (*xCheckReservedLock)(sqlite3_file*); - int (*xFileControl)(sqlite3_file*, int op, void *pArg); - int (*xSectorSize)(sqlite3_file*); - int (*xDeviceCharacteristics)(sqlite3_file*); - /* Additional methods may be added in future releases */ -}; -} - -PARAGRAPH { - The iVersion field of [sqlite3_io_methods] is provided as insurance - against future enhancements. The iVersion value should always be - 1 for SQLite version 3.5. -} - -PARAGRAPH { - The xClose method closes the file. The space for the [sqlite3_file] - structure is deallocated by the caller. But if the [sqlite3_file] - contains pointers to other allocated memory or resources, those - allocations should be released by the xClose method. -} - -PARAGRAPH { - The xRead method reads iAmt bytes from the file beginning at a byte - offset to iOfst. The data read is stored in the pointer of the - second parameter. xRead returns the [SQLITE_OK] on success, - [SQLITE_IOERR_SHORT_READ] if it was not able to read the full number - of bytes because it reached end-of-file, or [SQLITE_IOERR_READ] for - any other error. -} - -PARAGRAPH { - The xWrite method writes iAmt bytes of data from the second parameter - into the file beginning at an offset of iOfst bytes. If the size of - the file is less than iOfst bytes prior to the write, then xWrite should - ensure that the file is extended with zeros up to iOfst bytes prior - to beginning its write. xWrite continues to extends the file as - necessary so that the size of the file is at least iAmt+iOfst bytes - at the conclusion of the xWrite call. The xWrite method returns - [SQLITE_OK] on success. If the write cannot complete because the - underlying storage medium is full, then [SQLITE_FULL] is returned. - [SQLITE_IOERR_WRITE] should be returned for any other error. -} - -PARAGRAPH { - The xTruncate method truncates a file to be nByte bytes in length. - If the file is already nByte bytes or less in length then this - method is a no-op. The xTruncate method returns [SQLITE_OK] on - success and [SQLITE_IOERR_TRUNCATE] if anything goes wrong. -} - -PARAGRAPH { - The xSync method is used to force previously written data out of - operating system cache and into non-volatile memory. The second - parameter is usually [SQLITE_SYNC_NORMAL]. If the second parameter - is [SQLITE_SYNC_FULL] then the xSync method should make sure that - data has also been flushed through the disk controllers cache. - The [SQLITE_SYNC_FULL] parameter is the equivalent of the F_FULLSYNC - ioctl() on Mac OS X. The xSync method returns - [SQLITE_OK] on success and [SQLITE_IOERR_FSYNC] if anything goes wrong. -} - -PARAGRAPH { - The xFileSize() method determines the current size of the file - in bytes and writes that value into *pSize. It returns [SQLITE_OK] - on success and [SQLITE_IOERR_FSTAT] if something goes wrong. -} - -PARAGRAPH { - The xLock and xUnlock methods are used to set and clear file locks. - SQLite supports five levels of file locks, in order: - - The underlying implementation can support some subset of these locking - levels as long as it meets the other requirements of this paragraph. - The locking level is specified as the second argument to both xLock - and xUnlock. The xLock method increases the locking level to the - specified locking level or higher. The xUnlock method decreases the - locking level to no lower than the level specified. - [SQLITE_LOCK_NONE] means that the file is unlocked. [SQLITE_LOCK_SHARED] - gives permission to read the file. Multiple database connections can - hold [SQLITE_LOCK_SHARED] at the same time. - [SQLITE_LOCK_RESERVED] is like [SQLITE_LOCK_SHARED] in that its is permission - to read the file. But only a single connection can hold a reserved lock - at any point in time. The [SQLITE_LOCK_PENDING] is also permission to - read the file. Other connections can continue to read the file as well, - but no other connection is allowed to escalate a lock from none to shared. - [SQLITE_LOCK_EXCLUSIVE] is permission to write on the file. Only a single - connection can hold an exclusive lock and no other connection can hold - any lock (other than "none") while one connection is hold an exclusive - lock. The xLock returns [SQLITE_OK] on success, [SQLITE_BUSY] if it - is unable to obtain the lock, or [SQLITE_IOERR_RDLOCK] if something else - goes wrong. The xUnlock method returns [SQLITE_OK] on success and - [SQLITE_IOERR_UNLOCK] for problems. -} - -PARAGRAPH { - The xCheckReservedLock method checks to see if another connection or - another process is currently holding a reserved, pending, or exclusive - lock on the file. It returns true or false. -} - -PARAGRAPH { - The xFileControl() method is a generic interface that allows custom - VFS implementations to directly control an open file using the - (new and experimental) - [sqlite3_file_control()] interface. The second "op" argument - is an integer opcode. The third - argument is a generic pointer which is intended to be a pointer - to a structure that may contain arguments or space in which to - write return values. Potential uses for xFileControl() might be - functions to enable blocking locks with timeouts, to change the - locking strategy (for example to use dot-file locks), to inquire - about the status of a lock, or to break stale locks. The SQLite - core reserves opcodes less than 100 for its own use. - A [SQLITE_FCNTL_LOCKSTATE | list of opcodes] less than 100 is available. - Applications that define a custom xFileControl method should use opcodes - greater than 100 to avoid conflicts. -} - -PARAGRAPH { - The xSectorSize returns the "sector size" of the underlying - non-volatile media. A "sector" is defined as the smallest unit of - storage that can be written without disturbing adjacent storage. - On a disk drive the "sector size" has until recently been 512 bytes, - though there is a push to increase this value to 4KiB. SQLite needs - to know the sector size so that it can write a full sector at a - time, and thus avoid corrupting adjacent storage space if a power - lose occurs in the middle of a write. -} - -PARAGRAPH { - The xDeviceCharacteristics method returns an integer bit vector that - defines any special properties that the underlying storage medium might - have that SQLite can use to increase performance. The allowed return - is the bit-wise OR of the following values: - - The [SQLITE_IOCAP_ATOMIC] bit means that all writes to this device are - atomic in the sense that either the entire write occurs or none of it - occurs. The other - [SQLITE_IOCAP_ATOMIC | SQLITE_IOCAP_ATOMICnnn] values indicate that - writes of aligned blocks of the indicated size are atomic. - [SQLITE_IOCAP_SAFE_APPEND] means that when extending a file with new - data, the new data is written first and then the file size is updated. - So if a power failure occurs, there is no chance that the file might have - been extended with randomness. The [SQLITE_IOCAP_SEQUENTIAL] bit means - that all writes occur in the order that they are issued and are not - reordered by the underlying file system. -} - -HEADING 3 {Checklist For Constructing A New VFS} - -PARAGRAPH { - The preceding paragraphs contain a lot of information. - To ease the task of constructing - a new VFS for SQLite we offer the following implementation checklist: -} - -PARAGRAPH { -
    -
  1. Define an appropriate subclass of the [sqlite3_file] object. -
  2. Implement the methods required by the [sqlite3_io_methods] object. -
  3. Create a static and - constant [sqlite3_io_methods] object containing pointers - to the methods from the previous step. -
  4. Implement the xOpen method that opens a file and populates an - [sqlite3_file] object, including setting pMethods to - point to the [sqlite3_io_methods] object from the previous step. -
  5. Implement the other methods required by [sqlite3_vfs]. -
  6. Define a static (but not constant) [sqlite3_vfs] structure that - contains pointers to the xOpen method and the other methods and - which contains the appropriate values for iVersion, szOsFile, - mxPathname, zName, and pAppData. -
  7. Implement a procedure that calls [sqlite3_vfs_register()] and - passes it a pointer to the [sqlite3_vfs] structure from the previous - step. This procedure is probably the only exported symbol in the - source file that implements your VFS. -
-} - -PARAGRAPH { - Within your application, call the procedure implemented in the last - step above as part of your initialization process before any - database connections are opened. -} - -HEADING 1 {The Memory Allocation Subsystem} - -PARAGRAPH { - Beginning with version 3.5, SQLite obtains all of the heap memory it - needs using the routines [sqlite3_malloc()], [sqlite3_free()], and - [sqlite3_realloc()]. These routines have existed in prior versions - of SQLite, but SQLite has previously bypassed these routines and used - its own memory allocator. This all changes in version 3.5.0. -} - -PARAGRAPH { - The SQLite source tree actually contains multiple versions of the - memory allocator. The default high-speed version found in the - "mem1.c" source file is used for most builds. But if the SQLITE_MEMDEBUG - flag is enabled, a separate memory allocator the "mem2.c" source file - is used instead. The mem2.c allocator implements lots of hooks to - do error checking and to simulate memory allocation failures for testing - purposes. Both of these allocators use the malloc()/free() implementation - in the standard C library. -} - -PARAGRAPH { - Applications are not required to use either of these standard memory - allocators. If SQLite is compiled with SQLITE_OMIT_MEMORY_ALLOCATION - then no implementation for the [sqlite3_malloc()], [sqlite3_realloc()], - and [sqlite3_free()] functions is provided. Instead, the application - that links against SQLite must provide its own implementation of these - functions. The application provided memory allocator is not required - to use the malloc()/free() implementation in the standard C library. - An embedded application might provide an alternative memory allocator - that uses memory for a fixed memory pool set aside for the exclusive - use of SQLite, for example. -} - -PARAGRAPH { - Applications that implement their own memory allocator must provide - implementation for the usual three allocation functions - [sqlite3_malloc()], [sqlite3_realloc()], and [sqlite3_free()]. - And they must also implement a fourth function: -} - -CODE { -int sqlite3_memory_alarm( - void(*xCallback)(void *pArg, sqlite3_int64 used, int N), - void *pArg, - sqlite3_int64 iThreshold -); -} - -PARAGRAPH { - The [sqlite3_memory_alarm] routine is used to register - a callback on memory allocation events. - This routine registers or clears a callbacks that fires when - the amount of memory allocated exceeds iThreshold. Only - a single callback can be registered at a time. Each call - to [sqlite3_memory_alarm()] overwrites the previous callback. - The callback is disabled by setting xCallback to a NULL - pointer. -} - -PARAGRAPH { - The parameters to the callback are the pArg value, the - amount of memory currently in use, and the size of the - allocation that provoked the callback. The callback will - presumably invoke [sqlite3_free()] to free up memory space. - The callback may invoke [sqlite3_malloc()] or [sqlite3_realloc()] - but if it does, no additional callbacks will be invoked by - the recursive calls. -} - -PARAGRAPH { - The [sqlite3_soft_heap_limit()] interface works by registering - a memory alarm at the soft heap limit and invoking - [sqlite3_release_memory()] in the alarm callback. Application - programs should not attempt to use the [sqlite3_memory_alarm()] - interface because doing so will interfere with the - [sqlite3_soft_heap_limit()] module. This interface is exposed - only so that applications can provide their own - alternative implementation when the SQLite core is - compiled with SQLITE_OMIT_MEMORY_ALLOCATION. -} - -PARAGRAPH { - The built-in memory allocators in SQLite also provide the following - additional interfaces: -} - -CODE { -sqlite3_int64 sqlite3_memory_used(void); -sqlite3_int64 sqlite3_memory_highwater(int resetFlag); -} - -PARAGRAPH { - These interfaces can be used by an application to monitor how - much memory SQLite is using. The [sqlite3_memory_used()] routine - returns the number of bytes of memory currently in use and the - [sqlite3_memory_highwater()] returns the maximum instantaneous - memory usage. Neither routine includes the overhead associated - with the memory allocator. These routines are provided for use - by the application. SQLite never invokes them itself. So if - the application is providing its own memory allocation subsystem, - it can omit these interfaces if desired. -} - -HEADING 1 {The Mutex Subsystem} - -PARAGRAPH { - SQLite has always been threadsafe in the sense that it is safe to - use different SQLite database connections in different threads at the - same time. The constraint was that the same database connection - could not be used in two separate threads at once. SQLite version 3.5.0 - relaxes this constraint. -} - -PARAGRAPH { - In order to allow multiple threads to use the same database connection - at the same time, SQLite must make extensive use of mutexes. And for - this reason a new mutex subsystem as been added. The mutex subsystem - as the following interface: -} - -CODE { -sqlite3_mutex *sqlite3_mutex_alloc(int); -void sqlite3_mutex_free(sqlite3_mutex*); -void sqlite3_mutex_enter(sqlite3_mutex*); -int sqlite3_mutex_try(sqlite3_mutex*); -void sqlite3_mutex_leave(sqlite3_mutex*); -} - -PARAGRAPH { - Though these routines exist for the use of the SQLite core, - application code is free to use these routines as well, if desired. - A mutex is an [sqlite3_mutex] object. The [sqlite3_mutex_alloc()] - routine allocates a new mutex object and returns a pointer to it. - The argument to [sqlite3_mutex_alloc()] should be - [SQLITE_MUTEX_FAST] or [SQLITE_MUTEX_RECURSIVE] for non-recursive - and recursive mutexes, respectively. If the underlying system does - not provide non-recursive mutexes, then a recursive mutex can be - substituted in that case. The argument to [sqlite3_mutex_alloc()] - can also be a constant designating one of several static mutexes: - - These static mutexes are reserved for use internally by SQLite - and should not be used by the application. The static mutexes - are all non-recursive. -} - -PARAGRAPH { - The [sqlite3_mutex_free()] routine should be used to deallocate - a non-static mutex. If a static mutex is passed to this routine - then the behavior is undefined. -} - -PARAGRAPH { - The [sqlite3_mutex_enter()] attempts to enter the mutex and blocks - if another threads is already there. [sqlite3_mutex_try()] attempts - to enter and returns [SQLITE_OK] on success or [SQLITE_BUSY] if another - thread is already there. [sqlite3_mutex_leave()] exits a mutex. - The mutex is held until the number of exits matches the number of - entrances. If [sqlite3_mutex_leave()] is called on a mutex that - the thread is not currently holding, then the behavior is undefined. - If any routine is called for a deallocated mutex, then the behavior - is undefined. -} - -PARAGRAPH { - The SQLite source code provides multiple implementations of these - APIs, suitable for varying environments. If SQLite is compiled with - the SQLITE_THREADSAFE=0 flag then a no-op mutex implementation that - is fast but does no real mutual exclusion is provided. That - implementation is suitable for use in single-threaded applications - or applications that only use SQLite in a single thread. Other - real mutex implementations are provided based on the underlying - operating system. -} - -PARAGRAPH { - Embedded applications may wish to provide their own mutex implementation. - If SQLite is compiled with the -DSQLITE_MUTEX_APPDEF=1 compile-time flag - then the SQLite core provides no mutex subsystem and a mutex subsystem - that matches the interface described above must be provided by the - application that links against SQLite. -} - -HEADING 1 {Other Interface Changes} - -PARAGRAPH { - Version 3.5.0 of SQLite changes the behavior of a few APIs in ways - that are technically incompatible. However, these APIs are seldom - used and even when they are used it is difficult to imagine a - scenario where the change might break something. The changes - actually makes these interface much more useful and powerful. -} - -PARAGRAPH { - Prior to version 3.5.0, the [sqlite3_enable_shared_cache()] API - would enable and disable the shared cache feature for all connections - within a single thread - the same thread from which the - sqlite3_enable_shared_cache() routine was called. Database connections - that used the shared cache were restricted to running in the same - thread in which they were opened. Beginning with version 3.5.0, - the sqlite3_enable_shared_cache() applies to all database connections - in all threads within the process. Now database connections running - in separate threads can share a cache. And database connections that - use shared cache can migrate from one thread to another. -} - -PARAGRAPH { - Prior to version 3.5.0 the [sqlite3_soft_heap_limit()] set an upper - bound on heap memory usage for all database connections within a - single thread. Each thread could have its own heap limit. Beginning - in version 3.5.0, there is a single heap limit for the entire process. - This seems more restrictive (one limit as opposed to many) but in - practice it is what most users want. -} - -PARAGRAPH { - Prior to version 3.5.0 the [sqlite3_release_memory()] function would - try to reclaim memory from all database connections in the same thread - as the sqlite3_release_memory() call. Beginning with version 3.5.0, - the sqlite3_release_memory() function will attempt to reclaim memory - from all database connections in all threads. -} - -HEADING 1 {Summary} - -PARAGRAPH { - The transition from SQLite version 3.4.2 to 3.5.0 is a major change. - Every source code file in the SQLite core had to be modified, some - extensively. And the change introduced some minor incompatibilities - in the C interface. But we feel that the benefits of the transition - from 3.4.2 to 3.5.0 far outweigh the pain of porting. The new - VFS layer is now well-defined and stable and should simplify future - customizations. The VFS layer, and the separable memory allocator - and mutex subsystems allow a standard SQLite source code amalgamation - to be used in an embedded project without change, greatly simplifying - configuration management. And the resulting system is much more - tolerant of highly threaded designs. -} DELETED arch.fig Index: arch.fig ================================================================== --- arch.fig +++ /dev/null @@ -1,64 +0,0 @@ -#FIG 3.2 -Portrait -Center -Inches -Letter -100.00 -Single --2 -1200 2 -2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 - 1 1 3.00 75.00 135.00 - 3675 8550 3675 9075 -2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 - 1 1 3.00 75.00 135.00 - 3675 7200 3675 7725 -2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 - 1 1 3.00 75.00 135.00 - 3675 5775 3675 6300 -2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 - 1 1 3.00 75.00 135.00 - 3675 3975 3675 4500 -2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 - 1 1 3.00 75.00 135.00 - 3675 2625 3675 3150 -2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 - 1 1 3.00 75.00 135.00 - 3675 1275 3675 1800 -2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 - 1 1 3.00 75.00 135.00 - 3675 9900 3675 10425 -2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 - 2550 10425 4875 10425 4875 11250 2550 11250 2550 10425 -2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 - 2550 9075 4875 9075 4875 9900 2550 9900 2550 9075 -2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 - 2550 7725 4875 7725 4875 8550 2550 8550 2550 7725 -2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 - 2550 6300 4875 6300 4875 7200 2550 7200 2550 6300 -2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 - 2550 4500 4875 4500 4875 5775 2550 5775 2550 4500 -2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 - 2550 3150 4875 3150 4875 3975 2550 3975 2550 3150 -2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 - 2550 1800 4875 1800 4875 2625 2550 2625 2550 1800 -2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 - 2550 450 4875 450 4875 1275 2550 1275 2550 450 -4 1 0 100 0 0 20 0.0000 4 195 1020 3675 750 Interface\001 -4 1 0 100 0 0 14 0.0000 4 195 2040 3675 1125 main.c table.c tclsqlite.c\001 -4 1 0 100 0 0 20 0.0000 4 195 1920 3675 6675 Virtual Machine\001 -4 1 0 100 0 0 14 0.0000 4 150 570 3675 7050 vdbe.c\001 -4 1 0 100 0 0 20 0.0000 4 195 1830 3675 4875 Code Generator\001 -4 1 0 100 0 0 14 0.0000 4 195 1860 3675 5175 build.c delete.c expr.c\001 -4 1 0 100 0 0 14 0.0000 4 195 2115 3675 5400 insert.c select.c update.c\001 -4 1 0 100 0 0 14 0.0000 4 150 705 3675 5625 where.c\001 -4 1 0 100 0 0 20 0.0000 4 195 735 3675 3450 Parser\001 -4 1 0 100 0 0 20 0.0000 4 195 1140 3675 2100 Tokenizer\001 -4 1 0 100 0 0 14 0.0000 4 150 870 3675 2475 tokenize.c\001 -4 1 0 100 0 0 20 0.0000 4 255 1350 3675 9375 Page Cache\001 -4 1 0 100 0 0 14 0.0000 4 150 630 3675 3825 parse.y\001 -4 1 0 100 0 0 14 0.0000 4 150 600 3675 8400 btree.c\001 -4 1 0 100 0 0 14 0.0000 4 150 645 3675 9750 pager.c\001 -4 1 0 100 0 0 20 0.0000 4 195 1620 3675 8025 B-tree Driver\001 -4 1 0 100 0 0 14 0.0000 4 105 345 3675 11100 os.c\001 -4 1 0 100 0 0 20 0.0000 4 195 1470 3675 10725 OS Interface\001 DELETED arch.gif Index: arch.gif ================================================================== --- arch.gif +++ /dev/null cannot compute difference between binary files DELETED arch.png Index: arch.png ================================================================== --- arch.png +++ /dev/null cannot compute difference between binary files DELETED arch.tcl Index: arch.tcl ================================================================== --- arch.tcl +++ /dev/null @@ -1,221 +0,0 @@ -# -# Run this Tcl script to generate the sqlite.html file. -# -set rcsid {$Id: arch.tcl,v 1.16 2004/10/10 17:24:54 drh Exp $} -source common.tcl -header {Architecture of SQLite} -puts { -

The Architecture Of SQLite

- -

Introduction

- - - - -
Block Diagram Of SQLite
-

This document describes the architecture of the SQLite library. -The information here is useful to those who want to understand or -modify the inner workings of SQLite. -

- -

-A block diagram showing the main components of SQLite -and how they interrelate is shown at the right. The text that -follows will provide a quick overview of each of these components. -

- - -

-This document describes SQLite version 3.0. Version 2.8 and -earlier are similar but the details differ. -

- -

Interface

- -

Much of the public interface to the SQLite library is implemented by -functions found in the main.c, legacy.c, and -vdbeapi.c source files -though some routines are -scattered about in other files where they can have access to data -structures with file scope. The -sqlite3_get_table() routine is implemented in table.c. -sqlite3_mprintf() is found in printf.c. -sqlite3_complete() is in tokenize.c. -The Tcl interface is implemented by tclsqlite.c. More -information on the C interface to SQLite is -available separately.

- -

To avoid name collisions with other software, all external -symbols in the SQLite library begin with the prefix sqlite3. -Those symbols that are intended for external use (in other words, -those symbols which form the API for SQLite) begin -with sqlite3_.

- -

Tokenizer

- -

When a string containing SQL statements is to be executed, the -interface passes that string to the tokenizer. The job of the tokenizer -is to break the original string up into tokens and pass those tokens -one by one to the parser. The tokenizer is hand-coded in C in -the file tokenize.c. - -

Note that in this design, the tokenizer calls the parser. People -who are familiar with YACC and BISON may be used to doing things the -other way around -- having the parser call the tokenizer. The author -of SQLite -has done it both ways and finds things generally work out nicer for -the tokenizer to call the parser. YACC has it backwards.

- -

Parser

- -

The parser is the piece that assigns meaning to tokens based on -their context. The parser for SQLite is generated using the -Lemon LALR(1) parser -generator. Lemon does the same job as YACC/BISON, but it uses -a different input syntax which is less error-prone. -Lemon also generates a parser which is reentrant and thread-safe. -And lemon defines the concept of a non-terminal destructor so -that it does not leak memory when syntax errors are encountered. -The source file that drives Lemon is found in parse.y.

- -

Because -lemon is a program not normally found on development machines, the -complete source code to lemon (just one C file) is included in the -SQLite distribution in the "tool" subdirectory. Documentation on -lemon is found in the "doc" subdirectory of the distribution. -

- -

Code Generator

- -

After the parser assembles tokens into complete SQL statements, -it calls the code generator to produce virtual machine code that -will do the work that the SQL statements request. There are many -files in the code generator: -attach.c, -auth.c, -build.c, -delete.c, -expr.c, -insert.c, -pragma.c, -select.c, -trigger.c, -update.c, -vacuum.c -and where.c. -In these files is where most of the serious magic happens. -expr.c handles code generation for expressions. -where.c handles code generation for WHERE clauses on -SELECT, UPDATE and DELETE statements. The files attach.c, -delete.c, insert.c, select.c, trigger.c -update.c, and vacuum.c handle the code generation -for SQL statements with the same names. (Each of these files calls routines -in expr.c and where.c as necessary.) All other -SQL statements are coded out of build.c. -The auth.c file implements the functionality of -sqlite3_set_authorizer().

- -

Virtual Machine

- -

The program generated by the code generator is executed by -the virtual machine. Additional information about the virtual -machine is available separately. -To summarize, the virtual machine implements an abstract computing -engine specifically designed to manipulate database files. The -machine has a stack which is used for intermediate storage. -Each instruction contains an opcode and -up to three additional operands.

- -

The virtual machine itself is entirely contained in a single -source file vdbe.c. The virtual machine also has -its own header files: vdbe.h that defines an interface -between the virtual machine and the rest of the SQLite library and -vdbeInt.h which defines structure private the virtual machine. -The vdbeaux.c file contains utilities used by the virtual -machine and interface modules used by the rest of the library to -construct VM programs. The vdbeapi.c file contains external -interfaces to the virtual machine such as the -sqlite3_bind_... family of functions. Individual values -(strings, integer, floating point numbers, and BLOBs) are stored -in an internal object named "Mem" which is implemented by -vdbemem.c.

- -

-SQLite implements SQL functions using callbacks to C-language routines. -Even the built-in SQL functions are implemented this way. Most of -the built-in SQL functions (ex: coalesce(), count(), -substr(), and so forth) can be found in func.c. -Date and time conversion functions are found in date.c. -

- -

B-Tree

- -

An SQLite database is maintained on disk using a B-tree implementation -found in the btree.c source file. A separate B-tree is used for -each table and index in the database. All B-trees are stored in the -same disk file. Details of the file format are recorded in a large -comment at the beginning of btree.c.

- -

The interface to the B-tree subsystem is defined by the header file -btree.h. -

- -

Page Cache

- -

The B-tree module requests information from the disk in fixed-size -chunks. The default chunk size is 1024 bytes but can vary between 512 -and 65536 bytes. -The page cache is responsible for reading, writing, and -caching these chunks. -The page cache also provides the rollback and atomic commit abstraction -and takes care of locking of the database file. The -B-tree driver requests particular pages from the page cache and notifies -the page cache when it wants to modify pages or commit or rollback -changes and the page cache handles all the messy details of making sure -the requests are handled quickly, safely, and efficiently.

- -

The code to implement the page cache is contained in the single C -source file pager.c. The interface to the page cache subsystem -is defined by the header file pager.h. -

- -

OS Interface

- -

-In order to provide portability between POSIX and Win32 operating systems, -SQLite uses an abstraction layer to interface with the operating system. -The interface to the OS abstraction layer is defined in -os.h. Each supported operating system has its own implementation: -os_unix.c for Unix, os_win.c for windows, and so forth. -Each of these operating-specific implements typically has its own -header file: os_unix.h, os_win.h, etc. -

- -

Utilities

- -

-Memory allocation and caseless string comparison routines are located -in util.c. -Symbol tables used by the parser are maintained by hash tables found -in hash.c. The utf.c source file contains Unicode -conversion subroutines. -SQLite has its own private implementation of printf() (with -some extensions) in printf.c and its own random number generator -in random.c. -

- -

Test Code

- -

-If you count regression test scripts, -more than half the total code base of SQLite is devoted to testing. -There are many assert() statements in the main code files. -In additional, the source files test1.c through test5.c -together with md5.c implement extensions used for testing -purposes only. The os_test.c backend interface is used to -simulate power failures to verify the crash-recovery mechanism in -the pager. -

- -} -footer $rcsid DELETED arch2.fig Index: arch2.fig ================================================================== --- arch2.fig +++ /dev/null @@ -1,123 +0,0 @@ -#FIG 3.2 -Landscape -Center -Inches -Letter -100.00 -Single --2 -1200 2 -0 32 #000000 -0 33 #868686 -0 34 #dfefd7 -0 35 #d7efef -0 36 #efdbef -0 37 #efdbd7 -0 38 #e7efcf -0 39 #9e9e9e -6 3225 3900 4650 6000 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 5475 4575 5475 4575 5925 3225 5925 3225 5475 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 5550 4650 5550 4650 6000 3300 6000 3300 5550 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 4650 4575 4650 4575 5100 3225 5100 3225 4650 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 4725 4650 4725 4650 5175 3300 5175 3300 4725 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 3900 4575 3900 4575 4350 3225 4350 3225 3900 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 3975 4650 3975 4650 4425 3300 4425 3300 3975 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 4350 3900 4650 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 5100 3900 5475 -4 1 0 50 0 2 12 0.0000 4 135 1050 3900 5775 OS Interface\001 -4 1 0 50 0 2 12 0.0000 4 135 615 3900 4200 B-Tree\001 -4 1 0 50 0 2 12 0.0000 4 180 495 3900 4950 Pager\001 --6 -6 5400 4725 6825 5250 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 4725 6750 4725 6750 5175 5400 5175 5400 4725 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 4800 6825 4800 6825 5250 5475 5250 5475 4800 -4 1 0 50 0 2 12 0.0000 4 135 630 6000 5025 Utilities\001 --6 -6 5400 5550 6825 6075 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 5550 6750 5550 6750 6000 5400 6000 5400 5550 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 5625 6825 5625 6825 6075 5475 6075 5475 5625 -4 1 0 50 0 2 12 0.0000 4 135 855 6000 5850 Test Code\001 --6 -6 5400 2775 6825 3750 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 2850 6825 2850 6825 3750 5475 3750 5475 2850 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 2775 6750 2775 6750 3675 5400 3675 5400 2775 -4 1 0 50 0 2 12 0.0000 4 135 420 6075 3150 Code\001 -4 1 0 50 0 2 12 0.0000 4 135 855 6075 3375 Generator\001 --6 -6 5400 1950 6825 2475 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 1950 6750 1950 6750 2400 5400 2400 5400 1950 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 2025 6825 2025 6825 2475 5475 2475 5475 2025 -4 1 0 50 0 2 12 0.0000 4 135 570 6075 2250 Parser\001 --6 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 1050 6750 1050 6750 1500 5400 1500 5400 1050 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 1125 6825 1125 6825 1575 5475 1575 5475 1125 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 1050 4575 1050 4575 1500 3225 1500 3225 1050 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 1125 4650 1125 4650 1575 3300 1575 3300 1125 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 1800 4575 1800 4575 2250 3225 2250 3225 1800 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 1875 4650 1875 4650 2325 3300 2325 3300 1875 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 2550 4575 2550 4575 3000 3225 3000 3225 2550 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 2625 4650 2625 4650 3075 3300 3075 3300 2625 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 1500 3900 1800 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 2250 3900 2550 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 3000 3900 3900 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 4575 1950 5400 1350 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 5400 2925 4650 2325 -2 2 0 1 0 34 55 0 20 0.000 0 0 -1 0 0 5 - 2850 750 4875 750 4875 3375 2850 3375 2850 750 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 6075 1500 6075 1950 -2 3 0 1 0 35 55 0 20 0.000 0 0 -1 0 0 5 - 2850 3675 4875 3675 4875 6225 2850 6225 2850 3675 -2 2 0 1 0 37 55 0 20 0.000 0 0 -1 0 0 5 - 5175 750 7200 750 7200 4050 5175 4050 5175 750 -2 2 0 1 0 38 55 0 20 0.000 0 0 -1 0 0 5 - 5175 4425 7200 4425 7200 6225 5175 6225 5175 4425 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 6075 2475 6075 2775 -4 1 0 50 0 2 12 0.0000 4 135 855 6075 1350 Tokenizer\001 -4 1 0 50 0 1 12 1.5708 4 180 1020 7125 2250 SQL Compiler\001 -4 1 0 50 0 1 12 1.5708 4 135 345 3075 2025 Core\001 -4 1 0 50 0 2 12 0.0000 4 135 1290 3900 2850 Virtual Machine\001 -4 1 0 50 0 2 12 0.0000 4 165 1185 3900 1995 SQL Command\001 -4 1 0 50 0 2 12 0.0000 4 135 855 3900 2183 Processor\001 -4 1 0 50 0 2 14 0.0000 4 150 870 3900 1350 Interface\001 -4 1 0 50 0 1 12 1.5708 4 135 885 7125 5400 Accessories\001 -4 1 0 50 0 1 12 1.5708 4 135 645 3075 4875 Backend\001 DELETED arch2.gif Index: arch2.gif ================================================================== --- arch2.gif +++ /dev/null cannot compute difference between binary files DELETED arch2b.fig Index: arch2b.fig ================================================================== --- arch2b.fig +++ /dev/null @@ -1,125 +0,0 @@ -#FIG 3.2 -Landscape -Center -Inches -Letter -100.00 -Single --2 -1200 2 -0 32 #000000 -0 33 #868686 -0 34 #dfefd7 -0 35 #d7efef -0 36 #efdbef -0 37 #efdbd7 -0 38 #e7efcf -0 39 #9e9e9e -6 3225 3900 4650 6000 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 5475 4575 5475 4575 5925 3225 5925 3225 5475 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 5550 4650 5550 4650 6000 3300 6000 3300 5550 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 4650 4575 4650 4575 5100 3225 5100 3225 4650 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 4725 4650 4725 4650 5175 3300 5175 3300 4725 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 3900 4575 3900 4575 4350 3225 4350 3225 3900 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 3975 4650 3975 4650 4425 3300 4425 3300 3975 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 4350 3900 4650 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 5100 3900 5475 -4 1 0 50 0 2 12 0.0000 4 135 1050 3900 5775 OS Interface\001 -4 1 0 50 0 2 12 0.0000 4 135 615 3900 4200 B-Tree\001 -4 1 0 50 0 2 12 0.0000 4 180 495 3900 4950 Pager\001 --6 -6 5175 4275 7200 6150 -6 5400 4519 6825 5090 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 4519 6750 4519 6750 5009 5400 5009 5400 4519 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 4601 6825 4601 6825 5090 5475 5090 5475 4601 -4 1 0 50 0 2 12 0.0000 4 135 630 6000 4845 Utilities\001 --6 -6 5400 5416 6825 5987 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 5416 6750 5416 6750 5906 5400 5906 5400 5416 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 5498 6825 5498 6825 5987 5475 5987 5475 5498 -4 1 0 50 0 2 12 0.0000 4 135 855 6000 5742 Test Code\001 --6 -2 2 0 1 0 38 55 0 20 0.000 0 0 -1 0 0 5 - 5175 4275 7200 4275 7200 6150 5175 6150 5175 4275 -4 1 0 50 0 1 12 1.5708 4 135 885 7125 5253 Accessories\001 --6 -6 5400 2700 6825 3675 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 2775 6825 2775 6825 3675 5475 3675 5475 2775 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 2700 6750 2700 6750 3600 5400 3600 5400 2700 -4 1 0 50 0 2 12 0.0000 4 135 420 6075 3075 Code\001 -4 1 0 50 0 2 12 0.0000 4 135 855 6075 3300 Generator\001 --6 -6 5400 1875 6825 2400 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 1875 6750 1875 6750 2325 5400 2325 5400 1875 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 1950 6825 1950 6825 2400 5475 2400 5475 1950 -4 1 0 50 0 2 12 0.0000 4 135 570 6075 2175 Parser\001 --6 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 1050 6750 1050 6750 1500 5400 1500 5400 1050 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 1125 6825 1125 6825 1575 5475 1575 5475 1125 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 1050 4575 1050 4575 1500 3225 1500 3225 1050 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 1125 4650 1125 4650 1575 3300 1575 3300 1125 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 1800 4575 1800 4575 2250 3225 2250 3225 1800 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 1875 4650 1875 4650 2325 3300 2325 3300 1875 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 2550 4575 2550 4575 3000 3225 3000 3225 2550 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 2625 4650 2625 4650 3075 3300 3075 3300 2625 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 1500 3900 1800 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 2250 3900 2550 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 3000 3900 3900 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 4575 1950 5400 1350 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 5400 2925 4650 2175 -2 2 0 1 0 34 55 0 20 0.000 0 0 -1 0 0 5 - 2850 750 4875 750 4875 3375 2850 3375 2850 750 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 6075 1500 6075 1800 -2 3 0 1 0 35 55 0 20 0.000 0 0 -1 0 0 5 - 2850 3675 4875 3675 4875 6150 2850 6150 2850 3675 -2 2 0 1 0 37 55 0 20 0.000 0 0 -1 0 0 5 - 5175 750 7200 750 7200 3975 5175 3975 5175 750 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 6075 2400 6075 2700 -4 1 0 50 0 2 12 0.0000 4 135 855 6075 1350 Tokenizer\001 -4 1 0 50 0 1 12 1.5708 4 180 1020 7125 2250 SQL Compiler\001 -4 1 0 50 0 1 12 1.5708 4 135 345 3075 2025 Core\001 -4 1 0 50 0 2 12 0.0000 4 135 1290 3900 2850 Virtual Machine\001 -4 1 0 50 0 2 12 0.0000 4 165 1185 3900 1995 SQL Command\001 -4 1 0 50 0 2 12 0.0000 4 135 855 3900 2183 Processor\001 -4 1 0 50 0 2 14 0.0000 4 150 870 3900 1350 Interface\001 -4 1 0 50 0 1 12 1.5708 4 135 645 3075 4875 Backend\001 ADDED art/arch.fig Index: art/arch.fig ================================================================== --- /dev/null +++ art/arch.fig @@ -0,0 +1,64 @@ +#FIG 3.2 +Portrait +Center +Inches +Letter +100.00 +Single +-2 +1200 2 +2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 + 1 1 3.00 75.00 135.00 + 3675 8550 3675 9075 +2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 + 1 1 3.00 75.00 135.00 + 3675 7200 3675 7725 +2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 + 1 1 3.00 75.00 135.00 + 3675 5775 3675 6300 +2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 + 1 1 3.00 75.00 135.00 + 3675 3975 3675 4500 +2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 + 1 1 3.00 75.00 135.00 + 3675 2625 3675 3150 +2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 + 1 1 3.00 75.00 135.00 + 3675 1275 3675 1800 +2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 + 1 1 3.00 75.00 135.00 + 3675 9900 3675 10425 +2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 + 2550 10425 4875 10425 4875 11250 2550 11250 2550 10425 +2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 + 2550 9075 4875 9075 4875 9900 2550 9900 2550 9075 +2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 + 2550 7725 4875 7725 4875 8550 2550 8550 2550 7725 +2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 + 2550 6300 4875 6300 4875 7200 2550 7200 2550 6300 +2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 + 2550 4500 4875 4500 4875 5775 2550 5775 2550 4500 +2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 + 2550 3150 4875 3150 4875 3975 2550 3975 2550 3150 +2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 + 2550 1800 4875 1800 4875 2625 2550 2625 2550 1800 +2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 + 2550 450 4875 450 4875 1275 2550 1275 2550 450 +4 1 0 100 0 0 20 0.0000 4 195 1020 3675 750 Interface\001 +4 1 0 100 0 0 14 0.0000 4 195 2040 3675 1125 main.c table.c tclsqlite.c\001 +4 1 0 100 0 0 20 0.0000 4 195 1920 3675 6675 Virtual Machine\001 +4 1 0 100 0 0 14 0.0000 4 150 570 3675 7050 vdbe.c\001 +4 1 0 100 0 0 20 0.0000 4 195 1830 3675 4875 Code Generator\001 +4 1 0 100 0 0 14 0.0000 4 195 1860 3675 5175 build.c delete.c expr.c\001 +4 1 0 100 0 0 14 0.0000 4 195 2115 3675 5400 insert.c select.c update.c\001 +4 1 0 100 0 0 14 0.0000 4 150 705 3675 5625 where.c\001 +4 1 0 100 0 0 20 0.0000 4 195 735 3675 3450 Parser\001 +4 1 0 100 0 0 20 0.0000 4 195 1140 3675 2100 Tokenizer\001 +4 1 0 100 0 0 14 0.0000 4 150 870 3675 2475 tokenize.c\001 +4 1 0 100 0 0 20 0.0000 4 255 1350 3675 9375 Page Cache\001 +4 1 0 100 0 0 14 0.0000 4 150 630 3675 3825 parse.y\001 +4 1 0 100 0 0 14 0.0000 4 150 600 3675 8400 btree.c\001 +4 1 0 100 0 0 14 0.0000 4 150 645 3675 9750 pager.c\001 +4 1 0 100 0 0 20 0.0000 4 195 1620 3675 8025 B-tree Driver\001 +4 1 0 100 0 0 14 0.0000 4 105 345 3675 11100 os.c\001 +4 1 0 100 0 0 20 0.0000 4 195 1470 3675 10725 OS Interface\001 ADDED art/arch2.fig Index: art/arch2.fig ================================================================== --- /dev/null +++ art/arch2.fig @@ -0,0 +1,123 @@ +#FIG 3.2 +Landscape +Center +Inches +Letter +100.00 +Single +-2 +1200 2 +0 32 #000000 +0 33 #868686 +0 34 #dfefd7 +0 35 #d7efef +0 36 #efdbef +0 37 #efdbd7 +0 38 #e7efcf +0 39 #9e9e9e +6 3225 3900 4650 6000 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 3225 5475 4575 5475 4575 5925 3225 5925 3225 5475 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 3300 5550 4650 5550 4650 6000 3300 6000 3300 5550 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 3225 4650 4575 4650 4575 5100 3225 5100 3225 4650 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 3300 4725 4650 4725 4650 5175 3300 5175 3300 4725 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 3225 3900 4575 3900 4575 4350 3225 4350 3225 3900 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 3300 3975 4650 3975 4650 4425 3300 4425 3300 3975 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 3900 4350 3900 4650 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 3900 5100 3900 5475 +4 1 0 50 0 2 12 0.0000 4 135 1050 3900 5775 OS Interface\001 +4 1 0 50 0 2 12 0.0000 4 135 615 3900 4200 B-Tree\001 +4 1 0 50 0 2 12 0.0000 4 180 495 3900 4950 Pager\001 +-6 +6 5400 4725 6825 5250 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 5400 4725 6750 4725 6750 5175 5400 5175 5400 4725 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 5475 4800 6825 4800 6825 5250 5475 5250 5475 4800 +4 1 0 50 0 2 12 0.0000 4 135 630 6000 5025 Utilities\001 +-6 +6 5400 5550 6825 6075 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 5400 5550 6750 5550 6750 6000 5400 6000 5400 5550 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 5475 5625 6825 5625 6825 6075 5475 6075 5475 5625 +4 1 0 50 0 2 12 0.0000 4 135 855 6000 5850 Test Code\001 +-6 +6 5400 2775 6825 3750 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 5475 2850 6825 2850 6825 3750 5475 3750 5475 2850 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 5400 2775 6750 2775 6750 3675 5400 3675 5400 2775 +4 1 0 50 0 2 12 0.0000 4 135 420 6075 3150 Code\001 +4 1 0 50 0 2 12 0.0000 4 135 855 6075 3375 Generator\001 +-6 +6 5400 1950 6825 2475 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 5400 1950 6750 1950 6750 2400 5400 2400 5400 1950 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 5475 2025 6825 2025 6825 2475 5475 2475 5475 2025 +4 1 0 50 0 2 12 0.0000 4 135 570 6075 2250 Parser\001 +-6 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 5400 1050 6750 1050 6750 1500 5400 1500 5400 1050 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 5475 1125 6825 1125 6825 1575 5475 1575 5475 1125 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 3225 1050 4575 1050 4575 1500 3225 1500 3225 1050 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 3300 1125 4650 1125 4650 1575 3300 1575 3300 1125 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 3225 1800 4575 1800 4575 2250 3225 2250 3225 1800 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 3300 1875 4650 1875 4650 2325 3300 2325 3300 1875 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 3225 2550 4575 2550 4575 3000 3225 3000 3225 2550 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 3300 2625 4650 2625 4650 3075 3300 3075 3300 2625 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 3900 1500 3900 1800 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 3900 2250 3900 2550 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 3900 3000 3900 3900 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 4575 1950 5400 1350 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 5400 2925 4650 2325 +2 2 0 1 0 34 55 0 20 0.000 0 0 -1 0 0 5 + 2850 750 4875 750 4875 3375 2850 3375 2850 750 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 6075 1500 6075 1950 +2 3 0 1 0 35 55 0 20 0.000 0 0 -1 0 0 5 + 2850 3675 4875 3675 4875 6225 2850 6225 2850 3675 +2 2 0 1 0 37 55 0 20 0.000 0 0 -1 0 0 5 + 5175 750 7200 750 7200 4050 5175 4050 5175 750 +2 2 0 1 0 38 55 0 20 0.000 0 0 -1 0 0 5 + 5175 4425 7200 4425 7200 6225 5175 6225 5175 4425 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 6075 2475 6075 2775 +4 1 0 50 0 2 12 0.0000 4 135 855 6075 1350 Tokenizer\001 +4 1 0 50 0 1 12 1.5708 4 180 1020 7125 2250 SQL Compiler\001 +4 1 0 50 0 1 12 1.5708 4 135 345 3075 2025 Core\001 +4 1 0 50 0 2 12 0.0000 4 135 1290 3900 2850 Virtual Machine\001 +4 1 0 50 0 2 12 0.0000 4 165 1185 3900 1995 SQL Command\001 +4 1 0 50 0 2 12 0.0000 4 135 855 3900 2183 Processor\001 +4 1 0 50 0 2 14 0.0000 4 150 870 3900 1350 Interface\001 +4 1 0 50 0 1 12 1.5708 4 135 885 7125 5400 Accessories\001 +4 1 0 50 0 1 12 1.5708 4 135 645 3075 4875 Backend\001 ADDED art/arch2b.fig Index: art/arch2b.fig ================================================================== --- /dev/null +++ art/arch2b.fig @@ -0,0 +1,125 @@ +#FIG 3.2 +Landscape +Center +Inches +Letter +100.00 +Single +-2 +1200 2 +0 32 #000000 +0 33 #868686 +0 34 #dfefd7 +0 35 #d7efef +0 36 #efdbef +0 37 #efdbd7 +0 38 #e7efcf +0 39 #9e9e9e +6 3225 3900 4650 6000 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 3225 5475 4575 5475 4575 5925 3225 5925 3225 5475 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 3300 5550 4650 5550 4650 6000 3300 6000 3300 5550 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 3225 4650 4575 4650 4575 5100 3225 5100 3225 4650 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 3300 4725 4650 4725 4650 5175 3300 5175 3300 4725 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 3225 3900 4575 3900 4575 4350 3225 4350 3225 3900 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 3300 3975 4650 3975 4650 4425 3300 4425 3300 3975 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 3900 4350 3900 4650 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 3900 5100 3900 5475 +4 1 0 50 0 2 12 0.0000 4 135 1050 3900 5775 OS Interface\001 +4 1 0 50 0 2 12 0.0000 4 135 615 3900 4200 B-Tree\001 +4 1 0 50 0 2 12 0.0000 4 180 495 3900 4950 Pager\001 +-6 +6 5175 4275 7200 6150 +6 5400 4519 6825 5090 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 5400 4519 6750 4519 6750 5009 5400 5009 5400 4519 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 5475 4601 6825 4601 6825 5090 5475 5090 5475 4601 +4 1 0 50 0 2 12 0.0000 4 135 630 6000 4845 Utilities\001 +-6 +6 5400 5416 6825 5987 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 5400 5416 6750 5416 6750 5906 5400 5906 5400 5416 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 5475 5498 6825 5498 6825 5987 5475 5987 5475 5498 +4 1 0 50 0 2 12 0.0000 4 135 855 6000 5742 Test Code\001 +-6 +2 2 0 1 0 38 55 0 20 0.000 0 0 -1 0 0 5 + 5175 4275 7200 4275 7200 6150 5175 6150 5175 4275 +4 1 0 50 0 1 12 1.5708 4 135 885 7125 5253 Accessories\001 +-6 +6 5400 2700 6825 3675 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 5475 2775 6825 2775 6825 3675 5475 3675 5475 2775 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 5400 2700 6750 2700 6750 3600 5400 3600 5400 2700 +4 1 0 50 0 2 12 0.0000 4 135 420 6075 3075 Code\001 +4 1 0 50 0 2 12 0.0000 4 135 855 6075 3300 Generator\001 +-6 +6 5400 1875 6825 2400 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 5400 1875 6750 1875 6750 2325 5400 2325 5400 1875 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 5475 1950 6825 1950 6825 2400 5475 2400 5475 1950 +4 1 0 50 0 2 12 0.0000 4 135 570 6075 2175 Parser\001 +-6 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 5400 1050 6750 1050 6750 1500 5400 1500 5400 1050 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 5475 1125 6825 1125 6825 1575 5475 1575 5475 1125 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 3225 1050 4575 1050 4575 1500 3225 1500 3225 1050 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 3300 1125 4650 1125 4650 1575 3300 1575 3300 1125 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 3225 1800 4575 1800 4575 2250 3225 2250 3225 1800 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 3300 1875 4650 1875 4650 2325 3300 2325 3300 1875 +2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 + 3225 2550 4575 2550 4575 3000 3225 3000 3225 2550 +2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 + 3300 2625 4650 2625 4650 3075 3300 3075 3300 2625 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 3900 1500 3900 1800 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 3900 2250 3900 2550 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 3900 3000 3900 3900 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 4575 1950 5400 1350 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 5400 2925 4650 2175 +2 2 0 1 0 34 55 0 20 0.000 0 0 -1 0 0 5 + 2850 750 4875 750 4875 3375 2850 3375 2850 750 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 6075 1500 6075 1800 +2 3 0 1 0 35 55 0 20 0.000 0 0 -1 0 0 5 + 2850 3675 4875 3675 4875 6150 2850 6150 2850 3675 +2 2 0 1 0 37 55 0 20 0.000 0 0 -1 0 0 5 + 5175 750 7200 750 7200 3975 5175 3975 5175 750 +2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 6075 2400 6075 2700 +4 1 0 50 0 2 12 0.0000 4 135 855 6075 1350 Tokenizer\001 +4 1 0 50 0 1 12 1.5708 4 180 1020 7125 2250 SQL Compiler\001 +4 1 0 50 0 1 12 1.5708 4 135 345 3075 2025 Core\001 +4 1 0 50 0 2 12 0.0000 4 135 1290 3900 2850 Virtual Machine\001 +4 1 0 50 0 2 12 0.0000 4 165 1185 3900 1995 SQL Command\001 +4 1 0 50 0 2 12 0.0000 4 135 855 3900 2183 Processor\001 +4 1 0 50 0 2 14 0.0000 4 150 870 3900 1350 Interface\001 +4 1 0 50 0 1 12 1.5708 4 135 645 3075 4875 Backend\001 DELETED audit.tcl Index: audit.tcl ================================================================== --- audit.tcl +++ /dev/null @@ -1,214 +0,0 @@ -# -# Run this Tcl script to generate the audit.html file. -# -set rcsid {$Id: audit.tcl,v 1.1 2002/07/13 16:52:35 drh Exp $} - -puts { - - SQLite Security Audit Procedure - - -

-SQLite Security Audit Procedure -

} -puts "

-(This page was last modified on [lrange $rcsid 3 4] UTC) -

" - -puts { -

-A security audit for SQLite consists of two components. First, there is -a check for common errors that often lead to security problems. Second, -an attempt is made to construct a proof that SQLite has certain desirable -security properties. -

- -

Part I: Things to check

- -

-Scan all source code and check for the following common errors: -

- -
    -
  1. -Verify that the destination buffer is large enough to hold its result -in every call to the following routines: -

      -
    • strcpy()
    • -
    • strncpy()
    • -
    • strcat()
    • -
    • memcpy()
    • -
    • memset()
    • -
    • memmove()
    • -
    • bcopy()
    • -
    • sprintf()
    • -
    • scanf()
    • -
    -

  2. -
  3. -Verify that pointers returned by subroutines are not NULL before using -the pointers. In particular, make sure the return values for the following -routines are checked before they are used: -

      -
    • malloc()
    • -
    • realloc()
    • -
    • sqliteMalloc()
    • -
    • sqliteRealloc()
    • -
    • sqliteStrDup()
    • -
    • sqliteStrNDup()
    • -
    • sqliteExpr()
    • -
    • sqliteExprFunction()
    • -
    • sqliteExprListAppend()
    • -
    • sqliteResultSetOfSelect()
    • -
    • sqliteIdListAppend()
    • -
    • sqliteSrcListAppend()
    • -
    • sqliteSelectNew()
    • -
    • sqliteTableNameToTable()
    • -
    • sqliteTableTokenToSrcList()
    • -
    • sqliteWhereBegin()
    • -
    • sqliteFindTable()
    • -
    • sqliteFindIndex()
    • -
    • sqliteTableNameFromToken()
    • -
    • sqliteGetVdbe()
    • -
    • sqlite_mprintf()
    • -
    • sqliteExprDup()
    • -
    • sqliteExprListDup()
    • -
    • sqliteSrcListDup()
    • -
    • sqliteIdListDup()
    • -
    • sqliteSelectDup()
    • -
    • sqliteFindFunction()
    • -
    • sqliteTriggerSelectStep()
    • -
    • sqliteTriggerInsertStep()
    • -
    • sqliteTriggerUpdateStep()
    • -
    • sqliteTriggerDeleteStep()
    • -
    -

  4. -
  5. -On all functions and procedures, verify that pointer parameters are not NULL -before dereferencing those parameters. -

  6. -
  7. -Check to make sure that temporary files are opened safely: that the process -will not overwrite an existing file when opening the temp file and that -another process is unable to substitute a file for the temp file being -opened. -

  8. -
- - - -

Part II: Things to prove

- -

-Prove that SQLite exhibits the characteristics outlined below: -

- -
    -
  1. -The following are preconditions:

    -

      -
    • Z is an arbitrary-length NUL-terminated string.
    • -
    • An existing SQLite database has been opened. The return value - from the call to sqlite_open() is stored in the variable - db.
    • -
    • The database contains at least one table of the form: -
      -CREATE TABLE t1(a CLOB);
      -
    • -
    • There are no user-defined functions other than the standard - build-in functions.
    • -

    -

    The following statement of C code is executed:

    -
    -sqlite_exec_printf(
    -   db,
    -   "INSERT INTO t1(a) VALUES('%q');", 
    -   0, 0, 0, Z
    -);
    -
    -

    Prove the following are true for all possible values of string Z:

    -
      -
    1. -The call to sqlite_exec_printf() will -return in a length of time that is a polynomial in strlen(Z). -It might return an error code but it will not crash. -

    2. -
    3. -At most one new row will be inserted into table t1. -

    4. -
    5. -No preexisting rows of t1 will be deleted or modified. -

    6. -
    7. -No tables other than t1 will be altered in any way. -

    8. -
    9. -No preexisting files on the host computers filesystem, other than -the database file itself, will be deleted or modified. -

    10. -
    11. -For some constants K1 and K2, -if at least K1*strlen(Z) + K2 bytes of contiguous memory are -available to malloc(), then the call to sqlite_exec_printf() -will not return SQLITE_NOMEM. -

    12. -
    -

  2. - - -
  3. -The following are preconditions: -

      -
    • Z is an arbitrary-length NUL-terminated string.
    • -
    • An existing SQLite database has been opened. The return value - from the call to sqlite_open() is stored in the variable - db.
    • -
    • There exists a callback function cb() that appends all - information passed in through its parameters into a single - data buffer called Y.
    • -
    • There are no user-defined functions other than the standard - build-in functions.
    • -

    -

    The following statement of C code is executed:

    -
    -sqlite_exec(db, Z, cb, 0, 0);
    -
    -

    Prove the following are true for all possible values of string Z:

    -
      -
    1. -The call to sqlite_exec() will -return in a length of time which is a polynomial in strlen(Z). -It might return an error code but it will not crash. -

    2. -
    3. -After sqlite_exec() returns, the buffer Y will not contain -any content from any preexisting file on the host computers file system, -except for the database file. -

    4. -
    5. -After the call to sqlite_exec() returns, the database file will -still be well-formed. It might not contain the same data, but it will -still be a properly constructed SQLite database file. -

    6. -
    7. -No preexisting files on the host computers filesystem, other than -the database file itself, will be deleted or modified. -

    8. -
    9. -For some constants K1 and K2, -if at least K1*strlen(Z) + K2 bytes of contiguous memory are -available to malloc(), then the call to sqlite_exec() -will not return SQLITE_NOMEM. -

    10. -
    -

  4. - -
-} -puts { -


-

-Back to the SQLite Home Page -

- -} DELETED autoinc.tcl Index: autoinc.tcl ================================================================== --- autoinc.tcl +++ /dev/null @@ -1,109 +0,0 @@ -# -# Run this Tcl script to generate the autoinc.html file. -# -set rcsid {$Id: } -source common.tcl - -if {[llength $argv]>0} { - set outputdir [lindex $argv 0] -} else { - set outputdir "" -} - -header {SQLite Autoincrement} -puts { -

SQLite Autoincrement

- -

-In SQLite, every row of every table has an integer ROWID. -The ROWID for each row is unique among all rows in the same table. -In SQLite version 2.8 the ROWID is a 32-bit signed integer. -Version 3.0 of SQLite expanded the ROWID to be a 64-bit signed integer. -

- -

-You can access the ROWID of an SQLite table using one the special column -names ROWID, _ROWID_, or OID. -Except if you declare an ordinary table column to use one of those special -names, then the use of that name will refer to the declared column not -to the internal ROWID. -

- -

-If a table contains a column of type INTEGER PRIMARY KEY, then that -column becomes an alias for the ROWID. You can then access the ROWID -using any of four different names, the original three names described above -or the name given to the INTEGER PRIMARY KEY column. All these names are -aliases for one another and work equally well in any context. -

- -

-When a new row is inserted into an SQLite table, the ROWID can either -be specified as part of the INSERT statement or it can be assigned -automatically by the database engine. To specify a ROWID manually, -just include it in the list of values to be inserted. For example: -

- -
-CREATE TABLE test1(a INT, b TEXT);
-INSERT INTO test1(rowid, a, b) VALUES(123, 5, 'hello');
-
- -

-If no ROWID is specified on the insert, an appropriate ROWID is created -automatically. The usual algorithm is to give the newly created row -a ROWID that is one larger than the largest ROWID in the table prior -to the insert. If the table is initially empty, then a ROWID of 1 is -used. If the largest ROWID is equal to the largest possible integer -(9223372036854775807 in SQLite version 3.0 and later) then the database -engine starts picking candidate ROWIDs at random until it finds one -that is not previously used. -

- -

-The normal ROWID selection algorithm described above -will generate monotonically increasing -unique ROWIDs as long as you never use the maximum ROWID value and you never -delete the entry in the table with the largest ROWID. -If you ever delete rows or if you ever create a row with the maximum possible -ROWID, then ROWIDs from previously deleted rows might be reused when creating -new rows and newly created ROWIDs might not be in strictly accending order. -

- - -

The AUTOINCREMENT Keyword

- -

-If a column has the type INTEGER PRIMARY KEY AUTOINCREMENT then a slightly -different ROWID selection algorithm is used. -The ROWID chosen for the new row is one larger than the largest ROWID -that has ever before existed in that same table. If the table has never -before contained any data, then a ROWID of 1 is used. If the table -has previously held a row with the largest possible ROWID, then new INSERTs -are not allowed and any attempt to insert a new row will fail with an -SQLITE_FULL error. -

- -

-SQLite keeps track of the largest ROWID that a table has ever held using -the special SQLITE_SEQUENCE table. The SQLITE_SEQUENCE table is created -and initialized automatically whenever a normal table that contains an -AUTOINCREMENT column is created. The content of the SQLITE_SEQUENCE table -can be modified using ordinary UPDATE, INSERT, and DELETE statements. -But making modifications to this table will likely perturb the AUTOINCREMENT -key generation algorithm. Make sure you know what you are doing before -you undertake such changes. -

- -

-The behavior implemented by the AUTOINCREMENT keyword is subtly different -from the default behavior. With AUTOINCREMENT, rows with automatically -selected ROWIDs are guaranteed to have ROWIDs that have never been used -before by the same table in the same database. And the automatically generated -ROWIDs are guaranteed to be monotonically increasing. These are important -properties in certain applications. But if your application does not -need these properties, you should probably stay with the default behavior -since the use of AUTOINCREMENT requires additional work to be done -as each row is inserted and thus causes INSERTs to run a little slower. -} -footer $rcsid DELETED c_interface.tcl Index: c_interface.tcl ================================================================== --- c_interface.tcl +++ /dev/null @@ -1,1116 +0,0 @@ -# -# Run this Tcl script to generate the sqlite.html file. -# -set rcsid {$Id: c_interface.tcl,v 1.43 2004/11/19 11:59:24 danielk1977 Exp $} -source common.tcl -header {The C language interface to the SQLite library} -puts { -

The C language interface to the SQLite library

- -

The SQLite library is designed to be very easy to use from -a C or C++ program. This document gives an overview of the C/C++ -programming interface.

- -

1.0 The Core API

- -

The interface to the SQLite library consists of three core functions, -one opaque data structure, and some constants used as return values. -The core interface is as follows:

- -
-typedef struct sqlite sqlite;
-#define SQLITE_OK           0   /* Successful result */
-
-sqlite *sqlite_open(const char *dbname, int mode, char **errmsg);
-
-void sqlite_close(sqlite *db);
-
-int sqlite_exec(
-  sqlite *db,
-  char *sql,
-  int (*xCallback)(void*,int,char**,char**),
-  void *pArg,
-  char **errmsg
-);
-
- -

-The above is all you really need to know in order to use SQLite -in your C or C++ programs. There are other interface functions -available (and described below) but we will begin by describing -the core functions shown above. -

- - -

1.1 Opening a database

- -

Use the sqlite_open function to open an existing SQLite -database or to create a new SQLite database. The first argument -is the database name. The second argument is intended to signal -whether the database is going to be used for reading and writing -or just for reading. But in the current implementation, the -second argument to sqlite_open is ignored. -The third argument is a pointer to a string pointer. -If the third argument is not NULL and an error occurs -while trying to open the database, then an error message will be -written to memory obtained from malloc() and *errmsg will be made -to point to this error message. The calling function is responsible -for freeing the memory when it has finished with it.

- -

The name of an SQLite database is the name of a file that will -contain the database. If the file does not exist, SQLite attempts -to create and initialize it. If the file is read-only (due to -permission bits or because it is located on read-only media like -a CD-ROM) then SQLite opens the database for reading only. The -entire SQL database is stored in a single file on the disk. But -additional temporary files may be created during the execution of -an SQL command in order to store the database rollback journal or -temporary and intermediate results of a query.

- -

The return value of the sqlite_open function is a -pointer to an opaque sqlite structure. This pointer will -be the first argument to all subsequent SQLite function calls that -deal with the same database. NULL is returned if the open fails -for any reason.

- -
-

1.2 Closing the database

- -

To close an SQLite database, call the sqlite_close -function passing it the sqlite structure pointer that was obtained -from a prior call to sqlite_open. -If a transaction is active when the database is closed, the transaction -is rolled back.

- -
-

1.3 Executing SQL statements

- -

The sqlite_exec function is used to process SQL statements -and queries. This function requires 5 parameters as follows:

- -
    -
  1. A pointer to the sqlite structure obtained from a prior call - to sqlite_open.

  2. -
  3. A null-terminated string containing the text of one or more - SQL statements and/or queries to be processed.

  4. -
  5. A pointer to a callback function which is invoked once for each - row in the result of a query. This argument may be NULL, in which - case no callbacks will ever be invoked.

  6. -
  7. A pointer that is forwarded to become the first argument - to the callback function.

  8. -
  9. A pointer to an error string. Error messages are written to space - obtained from malloc() and the error string is made to point to - the malloced space. The calling function is responsible for freeing - this space when it has finished with it. - This argument may be NULL, in which case error messages are not - reported back to the calling function.

  10. -
- -

-The callback function is used to receive the results of a query. A -prototype for the callback function is as follows:

- -
-int Callback(void *pArg, int argc, char **argv, char **columnNames){
-  return 0;
-}
-
- -
-

The first argument to the callback is just a copy of the fourth argument -to sqlite_exec This parameter can be used to pass arbitrary -information through to the callback function from client code. -The second argument is the number of columns in the query result. -The third argument is an array of pointers to strings where each string -is a single column of the result for that record. Note that the -callback function reports a NULL value in the database as a NULL pointer, -which is very different from an empty string. If the i-th parameter -is an empty string, we will get:

-
-argv[i][0] == 0
-
-

But if the i-th parameter is NULL we will get:

-
-argv[i] == 0
-
- -

The names of the columns are contained in first argc -entries of the fourth argument. -If the SHOW_DATATYPES pragma -is on (it is off by default) then -the second argc entries in the 4th argument are the datatypes -for the corresponding columns. -

- -

If the -EMPTY_RESULT_CALLBACKS pragma is set to ON and the result of -a query is an empty set, then the callback is invoked once with the -third parameter (argv) set to 0. In other words -

-argv == 0
-
-The second parameter (argc) -and the fourth parameter (columnNames) are still valid -and can be used to determine the number and names of the result -columns if there had been a result. -The default behavior is not to invoke the callback at all if the -result set is empty.

- - -

The callback function should normally return 0. If the callback -function returns non-zero, the query is immediately aborted and -sqlite_exec will return SQLITE_ABORT.

- -

1.4 Error Codes

- -

-The sqlite_exec function normally returns SQLITE_OK. But -if something goes wrong it can return a different value to indicate -the type of error. Here is a complete list of the return codes: -

- -
-#define SQLITE_OK           0   /* Successful result */
-#define SQLITE_ERROR        1   /* SQL error or missing database */
-#define SQLITE_INTERNAL     2   /* An internal logic error in SQLite */
-#define SQLITE_PERM         3   /* Access permission denied */
-#define SQLITE_ABORT        4   /* Callback routine requested an abort */
-#define SQLITE_BUSY         5   /* The database file is locked */
-#define SQLITE_LOCKED       6   /* A table in the database is locked */
-#define SQLITE_NOMEM        7   /* A malloc() failed */
-#define SQLITE_READONLY     8   /* Attempt to write a readonly database */
-#define SQLITE_INTERRUPT    9   /* Operation terminated by sqlite_interrupt() */
-#define SQLITE_IOERR       10   /* Some kind of disk I/O error occurred */
-#define SQLITE_CORRUPT     11   /* The database disk image is malformed */
-#define SQLITE_NOTFOUND    12   /* (Internal Only) Table or record not found */
-#define SQLITE_FULL        13   /* Insertion failed because database is full */
-#define SQLITE_CANTOPEN    14   /* Unable to open the database file */
-#define SQLITE_PROTOCOL    15   /* Database lock protocol error */
-#define SQLITE_EMPTY       16   /* (Internal Only) Database table is empty */
-#define SQLITE_SCHEMA      17   /* The database schema changed */
-#define SQLITE_TOOBIG      18   /* Too much data for one row of a table */
-#define SQLITE_CONSTRAINT  19   /* Abort due to contraint violation */
-#define SQLITE_MISMATCH    20   /* Data type mismatch */
-#define SQLITE_MISUSE      21   /* Library used incorrectly */
-#define SQLITE_NOLFS       22   /* Uses OS features not supported on host */
-#define SQLITE_AUTH        23   /* Authorization denied */
-#define SQLITE_ROW         100  /* sqlite_step() has another row ready */
-#define SQLITE_DONE        101  /* sqlite_step() has finished executing */
-
- -

-The meanings of these various return values are as follows: -

- -
-
-
SQLITE_OK
-

This value is returned if everything worked and there were no errors. -

-
SQLITE_INTERNAL
-

This value indicates that an internal consistency check within -the SQLite library failed. This can only happen if there is a bug in -the SQLite library. If you ever get an SQLITE_INTERNAL reply from -an sqlite_exec call, please report the problem on the SQLite -mailing list. -

-
SQLITE_ERROR
-

This return value indicates that there was an error in the SQL -that was passed into the sqlite_exec. -

-
SQLITE_PERM
-

This return value says that the access permissions on the database -file are such that the file cannot be opened. -

-
SQLITE_ABORT
-

This value is returned if the callback function returns non-zero. -

-
SQLITE_BUSY
-

This return code indicates that another program or thread has -the database locked. SQLite allows two or more threads to read the -database at the same time, but only one thread can have the database -open for writing at the same time. Locking in SQLite is on the -entire database.

-

-
SQLITE_LOCKED
-

This return code is similar to SQLITE_BUSY in that it indicates -that the database is locked. But the source of the lock is a recursive -call to sqlite_exec. This return can only occur if you attempt -to invoke sqlite_exec from within a callback routine of a query -from a prior invocation of sqlite_exec. Recursive calls to -sqlite_exec are allowed as long as they do -not attempt to write the same table. -

-
SQLITE_NOMEM
-

This value is returned if a call to malloc fails. -

-
SQLITE_READONLY
-

This return code indicates that an attempt was made to write to -a database file that is opened for reading only. -

-
SQLITE_INTERRUPT
-

This value is returned if a call to sqlite_interrupt -interrupts a database operation in progress. -

-
SQLITE_IOERR
-

This value is returned if the operating system informs SQLite -that it is unable to perform some disk I/O operation. This could mean -that there is no more space left on the disk. -

-
SQLITE_CORRUPT
-

This value is returned if SQLite detects that the database it is -working on has become corrupted. Corruption might occur due to a rogue -process writing to the database file or it might happen due to an -perviously undetected logic error in of SQLite. This value is also -returned if a disk I/O error occurs in such a way that SQLite is forced -to leave the database file in a corrupted state. The latter should only -happen due to a hardware or operating system malfunction. -

-
SQLITE_FULL
-

This value is returned if an insertion failed because there is -no space left on the disk, or the database is too big to hold any -more information. The latter case should only occur for databases -that are larger than 2GB in size. -

-
SQLITE_CANTOPEN
-

This value is returned if the database file could not be opened -for some reason. -

-
SQLITE_PROTOCOL
-

This value is returned if some other process is messing with -file locks and has violated the file locking protocol that SQLite uses -on its rollback journal files. -

-
SQLITE_SCHEMA
-

When the database first opened, SQLite reads the database schema -into memory and uses that schema to parse new SQL statements. If another -process changes the schema, the command currently being processed will -abort because the virtual machine code generated assumed the old -schema. This is the return code for such cases. Retrying the -command usually will clear the problem. -

-
SQLITE_TOOBIG
-

SQLite will not store more than about 1 megabyte of data in a single -row of a single table. If you attempt to store more than 1 megabyte -in a single row, this is the return code you get. -

-
SQLITE_CONSTRAINT
-

This constant is returned if the SQL statement would have violated -a database constraint. -

-
SQLITE_MISMATCH
-

This error occurs when there is an attempt to insert non-integer -data into a column labeled INTEGER PRIMARY KEY. For most columns, SQLite -ignores the data type and allows any kind of data to be stored. But -an INTEGER PRIMARY KEY column is only allowed to store integer data. -

-
SQLITE_MISUSE
-

This error might occur if one or more of the SQLite API routines -is used incorrectly. Examples of incorrect usage include calling -sqlite_exec after the database has been closed using -sqlite_close or -calling sqlite_exec with the same -database pointer simultaneously from two separate threads. -

-
SQLITE_NOLFS
-

This error means that you have attempts to create or access a file -database file that is larger that 2GB on a legacy Unix machine that -lacks large file support. -

-
SQLITE_AUTH
-

This error indicates that the authorizer callback -has disallowed the SQL you are attempting to execute. -

-
SQLITE_ROW
-

This is one of the return codes from the -sqlite_step routine which is part of the non-callback API. -It indicates that another row of result data is available. -

-
SQLITE_DONE
-

This is one of the return codes from the -sqlite_step routine which is part of the non-callback API. -It indicates that the SQL statement has been completely executed and -the sqlite_finalize routine is ready to be called. -

-
-
- -

2.0 Accessing Data Without Using A Callback Function

- -

-The sqlite_exec routine described above used to be the only -way to retrieve data from an SQLite database. But many programmers found -it inconvenient to use a callback function to obtain results. So beginning -with SQLite version 2.7.7, a second access interface is available that -does not use callbacks. -

- -

-The new interface uses three separate functions to replace the single -sqlite_exec function. -

- -
-typedef struct sqlite_vm sqlite_vm;
-
-int sqlite_compile(
-  sqlite *db,              /* The open database */
-  const char *zSql,        /* SQL statement to be compiled */
-  const char **pzTail,     /* OUT: uncompiled tail of zSql */
-  sqlite_vm **ppVm,        /* OUT: the virtual machine to execute zSql */
-  char **pzErrmsg          /* OUT: Error message. */
-);
-
-int sqlite_step(
-  sqlite_vm *pVm,          /* The virtual machine to execute */
-  int *pN,                 /* OUT: Number of columns in result */
-  const char ***pazValue,  /* OUT: Column data */
-  const char ***pazColName /* OUT: Column names and datatypes */
-);
-
-int sqlite_finalize(
-  sqlite_vm *pVm,          /* The virtual machine to be finalized */
-  char **pzErrMsg          /* OUT: Error message */
-);
-
- -

-The strategy is to compile a single SQL statement using -sqlite_compile then invoke sqlite_step multiple times, -once for each row of output, and finally call sqlite_finalize -to clean up after the SQL has finished execution. -

- -

2.1 Compiling An SQL Statement Into A Virtual Machine

- -

-The sqlite_compile "compiles" a single SQL statement (specified -by the second parameter) and generates a virtual machine that is able -to execute that statement. -As with must interface routines, the first parameter must be a pointer -to an sqlite structure that was obtained from a prior call to -sqlite_open. - -

-A pointer to the virtual machine is stored in a pointer which is passed -in as the 4th parameter. -Space to hold the virtual machine is dynamically allocated. To avoid -a memory leak, the calling function must invoke -sqlite_finalize on the virtual machine after it has finished -with it. -The 4th parameter may be set to NULL if an error is encountered during -compilation. -

- -

-If any errors are encountered during compilation, an error message is -written into memory obtained from malloc and the 5th parameter -is made to point to that memory. If the 5th parameter is NULL, then -no error message is generated. If the 5th parameter is not NULL, then -the calling function should dispose of the memory containing the error -message by calling sqlite_freemem. -

- -

-If the 2nd parameter actually contains two or more statements of SQL, -only the first statement is compiled. (This is different from the -behavior of sqlite_exec which executes all SQL statements -in its input string.) The 3rd parameter to sqlite_compile -is made to point to the first character beyond the end of the first -statement of SQL in the input. If the 2nd parameter contains only -a single SQL statement, then the 3rd parameter will be made to point -to the '\000' terminator at the end of the 2nd parameter. -

- -

-On success, sqlite_compile returns SQLITE_OK. -Otherwise and error code is returned. -

- -

2.2 Step-By-Step Execution Of An SQL Statement

- -

-After a virtual machine has been generated using sqlite_compile -it is executed by one or more calls to sqlite_step. Each -invocation of sqlite_step, except the last one, -returns a single row of the result. -The number of columns in the result is stored in the integer that -the 2nd parameter points to. -The pointer specified by the 3rd parameter is made to point -to an array of pointers to column values. -The pointer in the 4th parameter is made to point to an array -of pointers to column names and datatypes. -The 2nd through 4th parameters to sqlite_step convey the -same information as the 2nd through 4th parameters of the -callback routine when using -the sqlite_exec interface. Except, with sqlite_step -the column datatype information is always included in the in the -4th parameter regardless of whether or not the -SHOW_DATATYPES pragma -is on or off. -

- -

-Each invocation of sqlite_step returns an integer code that -indicates what happened during that step. This code may be -SQLITE_BUSY, SQLITE_ROW, SQLITE_DONE, SQLITE_ERROR, or -SQLITE_MISUSE. -

- -

-If the virtual machine is unable to open the database file because -it is locked by another thread or process, sqlite_step -will return SQLITE_BUSY. The calling function should do some other -activity, or sleep, for a short amount of time to give the lock a -chance to clear, then invoke sqlite_step again. This can -be repeated as many times as desired. -

- -

-Whenever another row of result data is available, -sqlite_step will return SQLITE_ROW. The row data is -stored in an array of pointers to strings and the 2nd parameter -is made to point to this array. -

- -

-When all processing is complete, sqlite_step will return -either SQLITE_DONE or SQLITE_ERROR. SQLITE_DONE indicates that the -statement completed successfully and SQLITE_ERROR indicates that there -was a run-time error. (The details of the error are obtained from -sqlite_finalize.) It is a misuse of the library to attempt -to call sqlite_step again after it has returned SQLITE_DONE -or SQLITE_ERROR. -

- -

-When sqlite_step returns SQLITE_DONE or SQLITE_ERROR, -the *pN and *pazColName values are set to the number of columns -in the result set and to the names of the columns, just as they -are for an SQLITE_ROW return. This allows the calling code to -find the number of result columns and the column names and datatypes -even if the result set is empty. The *pazValue parameter is always -set to NULL when the return codes is SQLITE_DONE or SQLITE_ERROR. -If the SQL being executed is a statement that does not -return a result (such as an INSERT or an UPDATE) then *pN will -be set to zero and *pazColName will be set to NULL. -

- -

-If you abuse the library by trying to call sqlite_step -inappropriately it will attempt return SQLITE_MISUSE. -This can happen if you call sqlite_step() on the same virtual machine -at the same -time from two or more threads or if you call sqlite_step() -again after it returned SQLITE_DONE or SQLITE_ERROR or if you -pass in an invalid virtual machine pointer to sqlite_step(). -You should not depend on the SQLITE_MISUSE return code to indicate -an error. It is possible that a misuse of the interface will go -undetected and result in a program crash. The SQLITE_MISUSE is -intended as a debugging aid only - to help you detect incorrect -usage prior to a mishap. The misuse detection logic is not guaranteed -to work in every case. -

- -

2.3 Deleting A Virtual Machine

- -

-Every virtual machine that sqlite_compile creates should -eventually be handed to sqlite_finalize. The sqlite_finalize() -procedure deallocates the memory and other resources that the virtual -machine uses. Failure to call sqlite_finalize() will result in -resource leaks in your program. -

- -

-The sqlite_finalize routine also returns the result code -that indicates success or failure of the SQL operation that the -virtual machine carried out. -The value returned by sqlite_finalize() will be the same as would -have been returned had the same SQL been executed by sqlite_exec. -The error message returned will also be the same. -

- -

-It is acceptable to call sqlite_finalize on a virtual machine -before sqlite_step has returned SQLITE_DONE. Doing so has -the effect of interrupting the operation in progress. Partially completed -changes will be rolled back and the database will be restored to its -original state (unless an alternative recovery algorithm is selected using -an ON CONFLICT clause in the SQL being executed.) The effect is the -same as if a callback function of sqlite_exec had returned -non-zero. -

- -

-It is also acceptable to call sqlite_finalize on a virtual machine -that has never been passed to sqlite_step even once. -

- -

3.0 The Extended API

- -

Only the three core routines described in section 1.0 are required to use -SQLite. But there are many other functions that provide -useful interfaces. These extended routines are as follows: -

- -
-int sqlite_last_insert_rowid(sqlite*);
-
-int sqlite_changes(sqlite*);
-
-int sqlite_get_table(
-  sqlite*,
-  char *sql,
-  char ***result,
-  int *nrow,
-  int *ncolumn,
-  char **errmsg
-);
-
-void sqlite_free_table(char**);
-
-void sqlite_interrupt(sqlite*);
-
-int sqlite_complete(const char *sql);
-
-void sqlite_busy_handler(sqlite*, int (*)(void*,const char*,int), void*);
-
-void sqlite_busy_timeout(sqlite*, int ms);
-
-const char sqlite_version[];
-
-const char sqlite_encoding[];
-
-int sqlite_exec_printf(
-  sqlite*,
-  char *sql,
-  int (*)(void*,int,char**,char**),
-  void*,
-  char **errmsg,
-  ...
-);
-
-int sqlite_exec_vprintf(
-  sqlite*,
-  char *sql,
-  int (*)(void*,int,char**,char**),
-  void*,
-  char **errmsg,
-  va_list
-);
-
-int sqlite_get_table_printf(
-  sqlite*,
-  char *sql,
-  char ***result,
-  int *nrow,
-  int *ncolumn,
-  char **errmsg,
-  ...
-);
-
-int sqlite_get_table_vprintf(
-  sqlite*,
-  char *sql,
-  char ***result,
-  int *nrow,
-  int *ncolumn,
-  char **errmsg,
-  va_list
-);
-
-char *sqlite_mprintf(const char *zFormat, ...);
-
-char *sqlite_vmprintf(const char *zFormat, va_list);
-
-void sqlite_freemem(char*);
-
-void sqlite_progress_handler(sqlite*, int, int (*)(void*), void*);
-
-
- -

All of the above definitions are included in the "sqlite.h" -header file that comes in the source tree.

- -

3.1 The ROWID of the most recent insert

- -

Every row of an SQLite table has a unique integer key. If the -table has a column labeled INTEGER PRIMARY KEY, then that column -serves as the key. If there is no INTEGER PRIMARY KEY column then -the key is a unique integer. The key for a row can be accessed in -a SELECT statement or used in a WHERE or ORDER BY clause using any -of the names "ROWID", "OID", or "_ROWID_".

- -

When you do an insert into a table that does not have an INTEGER PRIMARY -KEY column, or if the table does have an INTEGER PRIMARY KEY but the value -for that column is not specified in the VALUES clause of the insert, then -the key is automatically generated. You can find the value of the key -for the most recent INSERT statement using the -sqlite_last_insert_rowid API function.

- -

3.2 The number of rows that changed

- -

The sqlite_changes API function returns the number of rows -that have been inserted, deleted, or modified since the database was -last quiescent. A "quiescent" database is one in which there are -no outstanding calls to sqlite_exec and no VMs created by -sqlite_compile that have not been finalized by sqlite_finalize. -In common usage, sqlite_changes returns the number -of rows inserted, deleted, or modified by the most recent sqlite_exec -call or since the most recent sqlite_compile. But if you have -nested calls to sqlite_exec (that is, if the callback routine -of one sqlite_exec invokes another sqlite_exec) or if -you invoke sqlite_compile to create a new VM while there is -still another VM in existance, then -the meaning of the number returned by sqlite_changes is more -complex. -The number reported includes any changes -that were later undone by a ROLLBACK or ABORT. But rows that are -deleted because of a DROP TABLE are not counted.

- -

SQLite implements the command "DELETE FROM table" (without -a WHERE clause) by dropping the table then recreating it. -This is much faster than deleting the elements of the table individually. -But it also means that the value returned from sqlite_changes -will be zero regardless of the number of elements that were originally -in the table. If an accurate count of the number of elements deleted -is necessary, use "DELETE FROM table WHERE 1" instead.

- -

3.3 Querying into memory obtained from malloc()

- -

The sqlite_get_table function is a wrapper around -sqlite_exec that collects all the information from successive -callbacks and writes it into memory obtained from malloc(). This -is a convenience function that allows the application to get the -entire result of a database query with a single function call.

- -

The main result from sqlite_get_table is an array of pointers -to strings. There is one element in this array for each column of -each row in the result. NULL results are represented by a NULL -pointer. In addition to the regular data, there is an added row at the -beginning of the array that contains the name of each column of the -result.

- -

As an example, consider the following query:

- -
-SELECT employee_name, login, host FROM users WHERE login LIKE 'd%'; -
- -

This query will return the name, login and host computer name -for every employee whose login begins with the letter "d". If this -query is submitted to sqlite_get_table the result might -look like this:

- -
-nrow = 2
-ncolumn = 3
-result[0] = "employee_name"
-result[1] = "login"
-result[2] = "host"
-result[3] = "dummy"
-result[4] = "No such user"
-result[5] = 0
-result[6] = "D. Richard Hipp"
-result[7] = "drh"
-result[8] = "zadok" -
- -

Notice that the "host" value for the "dummy" record is NULL so -the result[] array contains a NULL pointer at that slot.

- -

If the result set of a query is empty, then by default -sqlite_get_table will set nrow to 0 and leave its -result parameter is set to NULL. But if the EMPTY_RESULT_CALLBACKS -pragma is ON then the result parameter is initialized to the names -of the columns only. For example, consider this query which has -an empty result set:

- -
-SELECT employee_name, login, host FROM users WHERE employee_name IS NULL; -
- -

-The default behavior gives this results: -

- -
-nrow = 0
-ncolumn = 0
-result = 0
-
- -

-But if the EMPTY_RESULT_CALLBACKS pragma is ON, then the following -is returned: -

- -
-nrow = 0
-ncolumn = 3
-result[0] = "employee_name"
-result[1] = "login"
-result[2] = "host"
-
- -

Memory to hold the information returned by sqlite_get_table -is obtained from malloc(). But the calling function should not try -to free this information directly. Instead, pass the complete table -to sqlite_free_table when the table is no longer needed. -It is safe to call sqlite_free_table with a NULL pointer such -as would be returned if the result set is empty.

- -

The sqlite_get_table routine returns the same integer -result code as sqlite_exec.

- -

3.4 Interrupting an SQLite operation

- -

The sqlite_interrupt function can be called from a -different thread or from a signal handler to cause the current database -operation to exit at its first opportunity. When this happens, -the sqlite_exec routine (or the equivalent) that started -the database operation will return SQLITE_INTERRUPT.

- -

3.5 Testing for a complete SQL statement

- -

The next interface routine to SQLite is a convenience function used -to test whether or not a string forms a complete SQL statement. -If the sqlite_complete function returns true when its input -is a string, then the argument forms a complete SQL statement. -There are no guarantees that the syntax of that statement is correct, -but we at least know the statement is complete. If sqlite_complete -returns false, then more text is required to complete the SQL statement.

- -

For the purpose of the sqlite_complete function, an SQL -statement is complete if it ends in a semicolon.

- -

The sqlite command-line utility uses the sqlite_complete -function to know when it needs to call sqlite_exec. After each -line of input is received, sqlite calls sqlite_complete -on all input in its buffer. If sqlite_complete returns true, -then sqlite_exec is called and the input buffer is reset. If -sqlite_complete returns false, then the prompt is changed to -the continuation prompt and another line of text is read and added to -the input buffer.

- -

3.6 Library version string

- -

The SQLite library exports the string constant named -sqlite_version which contains the version number of the -library. The header file contains a macro SQLITE_VERSION -with the same information. If desired, a program can compare -the SQLITE_VERSION macro against the sqlite_version -string constant to verify that the version number of the -header file and the library match.

- -

3.7 Library character encoding

- -

By default, SQLite assumes that all data uses a fixed-size -8-bit character (iso8859). But if you give the --enable-utf8 option -to the configure script, then the library assumes UTF-8 variable -sized characters. This makes a difference for the LIKE and GLOB -operators and the LENGTH() and SUBSTR() functions. The static -string sqlite_encoding will be set to either "UTF-8" or -"iso8859" to indicate how the library was compiled. In addition, -the sqlite.h header file will define one of the -macros SQLITE_UTF8 or SQLITE_ISO8859, as appropriate.

- -

Note that the character encoding mechanism used by SQLite cannot -be changed at run-time. This is a compile-time option only. The -sqlite_encoding character string just tells you how the library -was compiled.

- -

3.8 Changing the library's response to locked files

- -

The sqlite_busy_handler procedure can be used to register -a busy callback with an open SQLite database. The busy callback will -be invoked whenever SQLite tries to access a database that is locked. -The callback will typically do some other useful work, or perhaps sleep, -in order to give the lock a chance to clear. If the callback returns -non-zero, then SQLite tries again to access the database and the cycle -repeats. If the callback returns zero, then SQLite aborts the current -operation and returns SQLITE_BUSY.

- -

The arguments to sqlite_busy_handler are the opaque -structure returned from sqlite_open, a pointer to the busy -callback function, and a generic pointer that will be passed as -the first argument to the busy callback. When SQLite invokes the -busy callback, it sends it three arguments: the generic pointer -that was passed in as the third argument to sqlite_busy_handler, -the name of the database table or index that the library is trying -to access, and the number of times that the library has attempted to -access the database table or index.

- -

For the common case where we want the busy callback to sleep, -the SQLite library provides a convenience routine sqlite_busy_timeout. -The first argument to sqlite_busy_timeout is a pointer to -an open SQLite database and the second argument is a number of milliseconds. -After sqlite_busy_timeout has been executed, the SQLite library -will wait for the lock to clear for at least the number of milliseconds -specified before it returns SQLITE_BUSY. Specifying zero milliseconds for -the timeout restores the default behavior.

- -

3.9 Using the _printf() wrapper functions

- -

The four utility functions

- -

-

-

- -

implement the same query functionality as sqlite_exec -and sqlite_get_table. But instead of taking a complete -SQL statement as their second argument, the four _printf -routines take a printf-style format string. The SQL statement to -be executed is generated from this format string and from whatever -additional arguments are attached to the end of the function call.

- -

There are two advantages to using the SQLite printf -functions instead of sprintf. First of all, with the -SQLite printf routines, there is never a danger of overflowing a -static buffer as there is with sprintf. The SQLite -printf routines automatically allocate (and later frees) -as much memory as is -necessary to hold the SQL statements generated.

- -

The second advantage the SQLite printf routines have over -sprintf are two new formatting options specifically designed -to support string literals in SQL. Within the format string, -the %q formatting option works very much like %s in that it -reads a null-terminated string from the argument list and inserts -it into the result. But %q translates the inserted string by -making two copies of every single-quote (') character in the -substituted string. This has the effect of escaping the end-of-string -meaning of single-quote within a string literal. The %Q formatting -option works similar; it translates the single-quotes like %q and -additionally encloses the resulting string in single-quotes. -If the argument for the %Q formatting options is a NULL pointer, -the resulting string is NULL without single quotes. -

- -

Consider an example. Suppose you are trying to insert a string -value into a database table where the string value was obtained from -user input. Suppose the string to be inserted is stored in a variable -named zString. The code to do the insertion might look like this:

- -
-sqlite_exec_printf(db,
-  "INSERT INTO table1 VALUES('%s')",
-  0, 0, 0, zString);
-
- -

If the zString variable holds text like "Hello", then this statement -will work just fine. But suppose the user enters a string like -"Hi y'all!". The SQL statement generated reads as follows: - -

-INSERT INTO table1 VALUES('Hi y'all')
-
- -

This is not valid SQL because of the apostrophy in the word "y'all". -But if the %q formatting option is used instead of %s, like this:

- -
-sqlite_exec_printf(db,
-  "INSERT INTO table1 VALUES('%q')",
-  0, 0, 0, zString);
-
- -

Then the generated SQL will look like the following:

- -
-INSERT INTO table1 VALUES('Hi y''all')
-
- -

Here the apostrophy has been escaped and the SQL statement is well-formed. -When generating SQL on-the-fly from data that might contain a -single-quote character ('), it is always a good idea to use the -SQLite printf routines and the %q formatting option instead of sprintf. -

- -

If the %Q formatting option is used instead of %q, like this:

- -
-sqlite_exec_printf(db,
-  "INSERT INTO table1 VALUES(%Q)",
-  0, 0, 0, zString);
-
- -

Then the generated SQL will look like the following:

- -
-INSERT INTO table1 VALUES('Hi y''all')
-
- -

If the value of the zString variable is NULL, the generated SQL -will look like the following:

- -
-INSERT INTO table1 VALUES(NULL)
-
- -

All of the _printf() routines above are built around the following -two functions:

- -
-char *sqlite_mprintf(const char *zFormat, ...);
-char *sqlite_vmprintf(const char *zFormat, va_list);
-
- -

The sqlite_mprintf() routine works like the the standard library -sprintf() except that it writes its results into memory obtained -from malloc() and returns a pointer to the malloced buffer. -sqlite_mprintf() also understands the %q and %Q extensions described -above. The sqlite_vmprintf() is a varargs version of the same -routine. The string pointer that these routines return should be freed -by passing it to sqlite_freemem(). -

- -

3.10 Performing background jobs during large queries

- -

The sqlite_progress_handler() routine can be used to register a -callback routine with an SQLite database to be invoked periodically during long -running calls to sqlite_exec(), sqlite_step() and the various -wrapper functions. -

- -

The callback is invoked every N virtual machine operations, where N is -supplied as the second argument to sqlite_progress_handler(). The third -and fourth arguments to sqlite_progress_handler() are a pointer to the -routine to be invoked and a void pointer to be passed as the first argument to -it. -

- -

The time taken to execute each virtual machine operation can vary based on -many factors. A typical value for a 1 GHz PC is between half and three million -per second but may be much higher or lower, depending on the query. As such it -is difficult to schedule background operations based on virtual machine -operations. Instead, it is recommended that a callback be scheduled relatively -frequently (say every 1000 instructions) and external timer routines used to -determine whether or not background jobs need to be run. -

- - -

4.0 Adding New SQL Functions

- -

Beginning with version 2.4.0, SQLite allows the SQL language to be -extended with new functions implemented as C code. The following interface -is used: -

- -
-typedef struct sqlite_func sqlite_func;
-
-int sqlite_create_function(
-  sqlite *db,
-  const char *zName,
-  int nArg,
-  void (*xFunc)(sqlite_func*,int,const char**),
-  void *pUserData
-);
-int sqlite_create_aggregate(
-  sqlite *db,
-  const char *zName,
-  int nArg,
-  void (*xStep)(sqlite_func*,int,const char**),
-  void (*xFinalize)(sqlite_func*),
-  void *pUserData
-);
-
-char *sqlite_set_result_string(sqlite_func*,const char*,int);
-void sqlite_set_result_int(sqlite_func*,int);
-void sqlite_set_result_double(sqlite_func*,double);
-void sqlite_set_result_error(sqlite_func*,const char*,int);
-
-void *sqlite_user_data(sqlite_func*);
-void *sqlite_aggregate_context(sqlite_func*, int nBytes);
-int sqlite_aggregate_count(sqlite_func*);
-
- -

-The sqlite_create_function() interface is used to create -regular functions and sqlite_create_aggregate() is used to -create new aggregate functions. In both cases, the db -parameter is an open SQLite database on which the functions should -be registered, zName is the name of the new function, -nArg is the number of arguments, and pUserData is -a pointer which is passed through unchanged to the C implementation -of the function. Both routines return 0 on success and non-zero -if there are any errors. -

- -

-The length of a function name may not exceed 255 characters. -Any attempt to create a function whose name exceeds 255 characters -in length will result in an error. -

- -

-For regular functions, the xFunc callback is invoked once -for each function call. The implementation of xFunc should call -one of the sqlite_set_result_... interfaces to return its -result. The sqlite_user_data() routine can be used to -retrieve the pUserData pointer that was passed in when the -function was registered. -

- -

-For aggregate functions, the xStep callback is invoked once -for each row in the result and then xFinalize is invoked at the -end to compute a final answer. The xStep routine can use the -sqlite_aggregate_context() interface to allocate memory that -will be unique to that particular instance of the SQL function. -This memory will be automatically deleted after xFinalize is called. -The sqlite_aggregate_count() routine can be used to find out -how many rows of data were passed to the aggregate. The xFinalize -callback should invoke one of the sqlite_set_result_... -interfaces to set the final result of the aggregate. -

- -

-SQLite now implements all of its built-in functions using this -interface. For additional information and examples on how to create -new SQL functions, review the SQLite source code in the file -func.c. -

- -

5.0 Multi-Threading And SQLite

- -

-If SQLite is compiled with the THREADSAFE preprocessor macro set to 1, -then it is safe to use SQLite from two or more threads of the same process -at the same time. But each thread should have its own sqlite* -pointer returned from sqlite_open. It is never safe for two -or more threads to access the same sqlite* pointer at the same time. -

- -

-In precompiled SQLite libraries available on the website, the Unix -versions are compiled with THREADSAFE turned off but the windows -versions are compiled with THREADSAFE turned on. If you need something -different that this you will have to recompile. -

- -

-Under Unix, an sqlite* pointer should not be carried across a -fork() system call into the child process. The child process -should open its own copy of the database after the fork(). -

- -

6.0 Usage Examples

- -

For examples of how the SQLite C/C++ interface can be used, -refer to the source code for the sqlite program in the -file src/shell.c of the source tree. -Additional information about sqlite is available at -sqlite.html. -See also the sources to the Tcl interface for SQLite in -the source file src/tclsqlite.c.

-} -footer $rcsid DELETED capi3.tcl Index: capi3.tcl ================================================================== --- capi3.tcl +++ /dev/null @@ -1,516 +0,0 @@ -set rcsid {$Id: capi3.tcl,v 1.10 2007/04/27 17:16:22 drh Exp $} -source common.tcl -header {C/C++ Interface For SQLite Version 3} - -proc AddHyperlinks {txt} { - regsub -all {([^:alnum:>])(sqlite3_\w+)(\([^\)]*\))} $txt \ - {\1\2\3} t2 - puts $t2 -} - -AddHyperlinks { -

C/C++ Interface For SQLite Version 3

- -

1.0 Overview

- -

-SQLite version 3.0 is a new version of SQLite, derived from -the SQLite 2.8.13 code base, but with an incompatible file format -and API. -SQLite version 3.0 was created to answer demand for the following features: -

- - - -

-It was necessary to move to version 3.0 to implement these features because -each requires incompatible changes to the database file format. Other -incompatible changes, such as a cleanup of the API, were introduced at the -same time under the theory that it is best to get your incompatible changes -out of the way all at once. -

- -

-The API for version 3.0 is similar to the version 2.X API, -but with some important changes. Most noticeably, the "sqlite_" -prefix that occurs on the beginning of all API functions and data -structures are changed to "sqlite3_". -This avoids confusion between the two APIs and allows linking against both -SQLite 2.X and SQLite 3.0 at the same time. -

- -

-There is no agreement on what the C datatype for a UTF-16 -string should be. Therefore, SQLite uses a generic type of void* -to refer to UTF-16 strings. Client software can cast the void* -to whatever datatype is appropriate for their system. -

- -

2.0 C/C++ Interface

- -

-The API for SQLite 3.0 includes 83 separate functions in addition -to several data structures and #defines. (A complete -API reference is provided as a separate document.) -Fortunately, the interface is not nearly as complex as its size implies. -Simple programs can still make do with only 3 functions: -sqlite3_open(), -sqlite3_exec(), and -sqlite3_close(). -More control over the execution of the database engine is provided -using -sqlite3_prepare() -to compile an SQLite statement into byte code and -sqlite3_step() -to execute that bytecode. -A family of routines with names beginning with -sqlite3_column_ -is used to extract information about the result set of a query. -Many interface functions come in pairs, with both a UTF-8 and -UTF-16 version. And there is a collection of routines -used to implement user-defined SQL functions and user-defined -text collating sequences. -

- - -

2.1 Opening and closing a database

- -
-   typedef struct sqlite3 sqlite3;
-   int sqlite3_open(const char*, sqlite3**);
-   int sqlite3_open16(const void*, sqlite3**);
-   int sqlite3_close(sqlite3*);
-   const char *sqlite3_errmsg(sqlite3*);
-   const void *sqlite3_errmsg16(sqlite3*);
-   int sqlite3_errcode(sqlite3*);
-
- -

-The sqlite3_open() routine returns an integer error code rather than -a pointer to the sqlite3 structure as the version 2 interface did. -The difference between sqlite3_open() -and sqlite3_open16() is that sqlite3_open16() takes UTF-16 (in host native -byte order) for the name of the database file. If a new database file -needs to be created, then sqlite3_open16() sets the internal text -representation to UTF-16 whereas sqlite3_open() sets the text -representation to UTF-8. -

- -

-The opening and/or creating of the database file is deferred until the -file is actually needed. This allows options and parameters, such -as the native text representation and default page size, to be -set using PRAGMA statements. -

- -

-The sqlite3_errcode() routine returns a result code for the most -recent major API call. sqlite3_errmsg() returns an English-language -text error message for the most recent error. The error message is -represented in UTF-8 and will be ephemeral - it could disappear on -the next call to any SQLite API function. sqlite3_errmsg16() works like -sqlite3_errmsg() except that it returns the error message represented -as UTF-16 in host native byte order. -

- -

-The error codes for SQLite version 3 are unchanged from version 2. -They are as follows: -

- -
-#define SQLITE_OK           0   /* Successful result */
-#define SQLITE_ERROR        1   /* SQL error or missing database */
-#define SQLITE_INTERNAL     2   /* An internal logic error in SQLite */
-#define SQLITE_PERM         3   /* Access permission denied */
-#define SQLITE_ABORT        4   /* Callback routine requested an abort */
-#define SQLITE_BUSY         5   /* The database file is locked */
-#define SQLITE_LOCKED       6   /* A table in the database is locked */
-#define SQLITE_NOMEM        7   /* A malloc() failed */
-#define SQLITE_READONLY     8   /* Attempt to write a readonly database */
-#define SQLITE_INTERRUPT    9   /* Operation terminated by sqlite_interrupt() */
-#define SQLITE_IOERR       10   /* Some kind of disk I/O error occurred */
-#define SQLITE_CORRUPT     11   /* The database disk image is malformed */
-#define SQLITE_NOTFOUND    12   /* (Internal Only) Table or record not found */
-#define SQLITE_FULL        13   /* Insertion failed because database is full */
-#define SQLITE_CANTOPEN    14   /* Unable to open the database file */
-#define SQLITE_PROTOCOL    15   /* Database lock protocol error */
-#define SQLITE_EMPTY       16   /* (Internal Only) Database table is empty */
-#define SQLITE_SCHEMA      17   /* The database schema changed */
-#define SQLITE_TOOBIG      18   /* Too much data for one row of a table */
-#define SQLITE_CONSTRAINT  19   /* Abort due to contraint violation */
-#define SQLITE_MISMATCH    20   /* Data type mismatch */
-#define SQLITE_MISUSE      21   /* Library used incorrectly */
-#define SQLITE_NOLFS       22   /* Uses OS features not supported on host */
-#define SQLITE_AUTH        23   /* Authorization denied */
-#define SQLITE_ROW         100  /* sqlite_step() has another row ready */
-#define SQLITE_DONE        101  /* sqlite_step() has finished executing */
-
- -

2.2 Executing SQL statements

- -
-   typedef int (*sqlite_callback)(void*,int,char**, char**);
-   int sqlite3_exec(sqlite3*, const char *sql, sqlite_callback, void*, char**);
-
- -

-The sqlite3_exec function works much as it did in SQLite version 2. -Zero or more SQL statements specified in the second parameter are compiled -and executed. Query results are returned to a callback routine. -See the API reference for additional -information. -

- -

-In SQLite version 3, the sqlite3_exec routine is just a wrapper around -calls to the prepared statement interface. -

- -
-   typedef struct sqlite3_stmt sqlite3_stmt;
-   int sqlite3_prepare(sqlite3*, const char*, int, sqlite3_stmt**, const char**);
-   int sqlite3_prepare16(sqlite3*, const void*, int, sqlite3_stmt**, const void**);
-   int sqlite3_finalize(sqlite3_stmt*);
-   int sqlite3_reset(sqlite3_stmt*);
-
- -

-The sqlite3_prepare interface compiles a single SQL statement into byte code -for later execution. This interface is now the preferred way of accessing -the database. -

- -

-The SQL statement is a UTF-8 string for sqlite3_prepare(). -The sqlite3_prepare16() works the same way except -that it expects a UTF-16 string as SQL input. -Only the first SQL statement in the input string is compiled. -The fourth parameter is filled in with a pointer to the next (uncompiled) -SQLite statement in the input string, if any. -The sqlite3_finalize() routine deallocates a prepared SQL statement. -All prepared statements must be finalized before the database can be -closed. -The sqlite3_reset() routine resets a prepared SQL statement so that it -can be executed again. -

- -

-The SQL statement may contain tokens of the form "?" or "?nnn" or ":aaa" -where "nnn" is an integer and "aaa" is an identifier. -Such tokens represent unspecified literal values (or "wildcards") -to be filled in later by the -sqlite3_bind interface. -Each wildcard has an associated number which is its sequence in the -statement or the "nnn" in the case of a "?nnn" form. -It is allowed for the same wildcard -to occur more than once in the same SQL statement, in which case -all instance of that wildcard will be filled in with the same value. -Unbound wildcards have a value of NULL. -

- -
-   int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
-   int sqlite3_bind_double(sqlite3_stmt*, int, double);
-   int sqlite3_bind_int(sqlite3_stmt*, int, int);
-   int sqlite3_bind_int64(sqlite3_stmt*, int, long long int);
-   int sqlite3_bind_null(sqlite3_stmt*, int);
-   int sqlite3_bind_text(sqlite3_stmt*, int, const char*, int n, void(*)(void*));
-   int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
-   int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
-
- -

-There is an assortment of sqlite3_bind routines used to assign values -to wildcards in a prepared SQL statement. Unbound wildcards -are interpreted as NULLs. Bindings are not reset by sqlite3_reset(). -But wildcards can be rebound to new values after an sqlite3_reset(). -

- -

-After an SQL statement has been prepared (and optionally bound), it -is executed using: -

- -
-   int sqlite3_step(sqlite3_stmt*);
-
- -

-The sqlite3_step() routine return SQLITE_ROW if it is returning a single -row of the result set, or SQLITE_DONE if execution has completed, either -normally or due to an error. It might also return SQLITE_BUSY if it is -unable to open the database file. If the return value is SQLITE_ROW, then -the following routines can be used to extract information about that row -of the result set: -

- -
-   const void *sqlite3_column_blob(sqlite3_stmt*, int iCol);
-   int sqlite3_column_bytes(sqlite3_stmt*, int iCol);
-   int sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
-   int sqlite3_column_count(sqlite3_stmt*);
-   const char *sqlite3_column_decltype(sqlite3_stmt *, int iCol);
-   const void *sqlite3_column_decltype16(sqlite3_stmt *, int iCol);
-   double sqlite3_column_double(sqlite3_stmt*, int iCol);
-   int sqlite3_column_int(sqlite3_stmt*, int iCol);
-   long long int sqlite3_column_int64(sqlite3_stmt*, int iCol);
-   const char *sqlite3_column_name(sqlite3_stmt*, int iCol);
-   const void *sqlite3_column_name16(sqlite3_stmt*, int iCol);
-   const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol);
-   const void *sqlite3_column_text16(sqlite3_stmt*, int iCol);
-   int sqlite3_column_type(sqlite3_stmt*, int iCol);
-
- -

-The -sqlite3_column_count() -function returns the number of columns in -the results set. sqlite3_column_count() can be called at any time after -sqlite3_prepare(). -sqlite3_data_count() -works similarly to -sqlite3_column_count() except that it only works following sqlite3_step(). -If the previous call to sqlite3_step() returned SQLITE_DONE or an error code, -then sqlite3_data_count() will return 0 whereas sqlite3_column_count() will -continue to return the number of columns in the result set. -

- -

Returned data is examined using the other sqlite3_column_***() functions, -all of which take a column number as their second parameter. Columns are -zero-indexed from left to right. Note that this is different to parameters, -which are indexed starting at one. -

- -

-The sqlite3_column_type() function returns the -datatype for the value in the Nth column. The return value is one -of these: -

- -
-   #define SQLITE_INTEGER  1
-   #define SQLITE_FLOAT    2
-   #define SQLITE_TEXT     3
-   #define SQLITE_BLOB     4
-   #define SQLITE_NULL     5
-
- -

-The sqlite3_column_decltype() routine returns text which is the -declared type of the column in the CREATE TABLE statement. For an -expression, the return type is an empty string. sqlite3_column_name() -returns the name of the Nth column. sqlite3_column_bytes() returns -the number of bytes in a column that has type BLOB or the number of bytes -in a TEXT string with UTF-8 encoding. sqlite3_column_bytes16() returns -the same value for BLOBs but for TEXT strings returns the number of bytes -in a UTF-16 encoding. -sqlite3_column_blob() return BLOB data. -sqlite3_column_text() return TEXT data as UTF-8. -sqlite3_column_text16() return TEXT data as UTF-16. -sqlite3_column_int() return INTEGER data in the host machines native -integer format. -sqlite3_column_int64() returns 64-bit INTEGER data. -Finally, sqlite3_column_double() return floating point data. -

- -

-It is not necessary to retrieve data in the format specify by -sqlite3_column_type(). If a different format is requested, the data -is converted automatically. -

- -

-Data format conversions can invalidate the pointer returned by -prior calls to sqlite3_column_blob(), sqlite3_column_text(), and/or -sqlite3_column_text16(). Pointers might be invalided in the following -cases: -

- -

-Note that conversions between UTF-16be and UTF-16le -are always done in place and do -not invalidate a prior pointer, though of course the content of the buffer -that the prior pointer points to will have been modified. Other kinds -of conversion are done in place when it is possible, but sometime it is -not possible and in those cases prior pointers are invalidated. -

- -

-The safest and easiest to remember policy is this: assume that any -result from -

-is invalided by subsequent calls to - -This means that you should always call sqlite3_column_bytes() or -sqlite3_column_bytes16() before calling sqlite3_column_blob(), -sqlite3_column_text(), or sqlite3_column_text16(). -

- -

2.3 User-defined functions

- -

-User defined functions can be created using the following routine: -

- -
-   typedef struct sqlite3_value sqlite3_value;
-   int sqlite3_create_function(
-     sqlite3 *,
-     const char *zFunctionName,
-     int nArg,
-     int eTextRep,
-     void*,
-     void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
-     void (*xStep)(sqlite3_context*,int,sqlite3_value**),
-     void (*xFinal)(sqlite3_context*)
-   );
-   int sqlite3_create_function16(
-     sqlite3*,
-     const void *zFunctionName,
-     int nArg,
-     int eTextRep,
-     void*,
-     void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
-     void (*xStep)(sqlite3_context*,int,sqlite3_value**),
-     void (*xFinal)(sqlite3_context*)
-   );
-   #define SQLITE_UTF8     1
-   #define SQLITE_UTF16    2
-   #define SQLITE_UTF16BE  3
-   #define SQLITE_UTF16LE  4
-   #define SQLITE_ANY      5
-
- -

-The nArg parameter specifies the number of arguments to the function. -A value of 0 indicates that any number of arguments is allowed. The -eTextRep parameter specifies what representation text values are expected -to be in for arguments to this function. The value of this parameter should -be one of the parameters defined above. SQLite version 3 allows multiple -implementations of the same function using different text representations. -The database engine chooses the function that minimization the number -of text conversions required. -

- -

-Normal functions specify only xFunc and leave xStep and xFinal set to NULL. -Aggregate functions specify xStep and xFinal and leave xFunc set to NULL. -There is no separate sqlite3_create_aggregate() API. -

- -

-The function name is specified in UTF-8. A separate sqlite3_create_function16() -API works the same as sqlite_create_function() -except that the function name is specified in UTF-16 host byte order. -

- -

-Notice that the parameters to functions are now pointers to sqlite3_value -structures instead of pointers to strings as in SQLite version 2.X. -The following routines are used to extract useful information from these -"values": -

- -
-   const void *sqlite3_value_blob(sqlite3_value*);
-   int sqlite3_value_bytes(sqlite3_value*);
-   int sqlite3_value_bytes16(sqlite3_value*);
-   double sqlite3_value_double(sqlite3_value*);
-   int sqlite3_value_int(sqlite3_value*);
-   long long int sqlite3_value_int64(sqlite3_value*);
-   const unsigned char *sqlite3_value_text(sqlite3_value*);
-   const void *sqlite3_value_text16(sqlite3_value*);
-   int sqlite3_value_type(sqlite3_value*);
-
- -

-Function implementations use the following APIs to acquire context and -to report results: -

- -
-   void *sqlite3_aggregate_context(sqlite3_context*, int nbyte);
-   void *sqlite3_user_data(sqlite3_context*);
-   void sqlite3_result_blob(sqlite3_context*, const void*, int n, void(*)(void*));
-   void sqlite3_result_double(sqlite3_context*, double);
-   void sqlite3_result_error(sqlite3_context*, const char*, int);
-   void sqlite3_result_error16(sqlite3_context*, const void*, int);
-   void sqlite3_result_int(sqlite3_context*, int);
-   void sqlite3_result_int64(sqlite3_context*, long long int);
-   void sqlite3_result_null(sqlite3_context*);
-   void sqlite3_result_text(sqlite3_context*, const char*, int n, void(*)(void*));
-   void sqlite3_result_text16(sqlite3_context*, const void*, int n, void(*)(void*));
-   void sqlite3_result_value(sqlite3_context*, sqlite3_value*);
-   void *sqlite3_get_auxdata(sqlite3_context*, int);
-   void sqlite3_set_auxdata(sqlite3_context*, int, void*, void (*)(void*));
-
- -

2.4 User-defined collating sequences

- -

-The following routines are used to implement user-defined -collating sequences: -

- -
-   sqlite3_create_collation(sqlite3*, const char *zName, int eTextRep, void*,
-      int(*xCompare)(void*,int,const void*,int,const void*));
-   sqlite3_create_collation16(sqlite3*, const void *zName, int eTextRep, void*,
-      int(*xCompare)(void*,int,const void*,int,const void*));
-   sqlite3_collation_needed(sqlite3*, void*, 
-      void(*)(void*,sqlite3*,int eTextRep,const char*));
-   sqlite3_collation_needed16(sqlite3*, void*,
-      void(*)(void*,sqlite3*,int eTextRep,const void*));
-
- -

-The sqlite3_create_collation() function specifies a collating sequence name -and a comparison function to implement that collating sequence. The -comparison function is only used for comparing text values. The eTextRep -parameter is one of SQLITE_UTF8, SQLITE_UTF16LE, SQLITE_UTF16BE, or -SQLITE_ANY to specify which text representation the comparison function works -with. Separate comparison functions can exist for the same collating -sequence for each of the UTF-8, UTF-16LE and UTF-16BE text representations. -The sqlite3_create_collation16() works like sqlite3_create_collation() except -that the collation name is specified in UTF-16 host byte order instead of -in UTF-8. -

- -

-The sqlite3_collation_needed() routine registers a callback which the -database engine will invoke if it encounters an unknown collating sequence. -The callback can lookup an appropriate comparison function and invoke -sqlite_3_create_collation() as needed. The fourth parameter to the callback -is the name of the collating sequence in UTF-8. For sqlite3_collation_need16() -the callback sends the collating sequence name in UTF-16 host byte order. -

-} -footer $rcsid DELETED changes.tcl Index: changes.tcl ================================================================== --- changes.tcl +++ /dev/null @@ -1,1880 +0,0 @@ -# -# Run this script to generated a changes.html output file -# -source common.tcl -header {SQLite changes} -puts { -

-This page provides a high-level summary of changes to SQLite. -For more detail, refer the the checkin logs generated by -CVS at - -http://www.sqlite.org/cvstrac/timeline. -

- -
-} - - -proc chng {date desc} { - if {[regexp {\(([0-9.]+)\)} $date all vers]} { - set label [string map {. _} $vers] - puts "" - } - puts "
$date
" - regsub -all {[Tt]icket #(\d+)} $desc \ - {
\0} desc - puts "

    $desc

" - puts "
" -} - -chng {2007 Nov 05 (3.5.2)} { -
  • Dropped support for the SQLITE_OMIT_MEMORY_ALLOCATION compile-time -option. -
  • Always open files using FILE_FLAG_RANDOM_ACCESS under windows. -
  • The 3rd parameter of the built-in SUBSTR() function is now optional. -
  • Bug fix: do not invoke the authorizer when reparsing the schema after -a schema change. -
  • Added the experimental malloc-free memory allocator in mem3.c. -
  • Virtual machine stores 64-bit integer and floating point constants -in binary instead of text for a performance boost. -
  • Fix a race condition in test_async.c. -
  • Added the ".timer" command to the CLI -} - -chng {2007 Oct 04 (3.5.1)} { -
  • Nota Bene: We are not using terms "alpha" or "beta" on this - release because the code is stable and because if we use those terms, - nobody will upgrade. However, we still reserve the right to make - incompatible changes to the new VFS interface in future releases.
  • - -
  • Fix a bug in the handling of SQLITE_FULL errors that could lead - to database corruption. Ticket #2686. -
  • The test_async.c drive now does full file locking and works correctly - when used simultaneously by multiple processes on the same database. -
  • The CLI ignores whitespace (including comments) at the end of lines -
  • Make sure the query optimizer checks dependences on all terms of - a compound SELECT statement. Ticket #2640. -
  • Add demonstration code showing how to build a VFS for a raw - mass storage without a filesystem. -
  • Added an output buffer size parameter to the xGetTempname() method - of the VFS layer. -
  • Sticky SQLITE_FULL or SQLITE_IOERR errors in the pager are reset - when a new transaction is started. -} - - -chng {2007 Sep 04 (3.5.0) alpha} { -
  • Redesign the OS interface layer. See - 34to35.html for details. - *** Potentially incompatible change *** -
  • The - sqlite3_release_memory(), - - sqlite3_soft_heap_limit(), - and - sqlite3_enable_shared_cache() interfaces now work cross all - threads in the process, not just the single thread in which they - are invoked. - *** Potentially incompatible change *** -
  • Added the - sqlite3_open_v2() - interface. -
  • Reimplemented the memory allocation subsystem and made it - replacable at compile-time. -
  • Created a new mutex subsystem and made it replacable at - compile-time. -
  • The same database connection may now be used simultaneously by - separate threads. -} - - -chng {2007 August 13 (3.4.2)} { -
  • Fix a database corruption bug that might occur if a ROLLBACK command -is executed in auto-vacuum mode -and a very small -soft_heap_limit is set. -Ticket #2565. - -
  • Add the ability to run a full regression test with a small -soft_heap_limit. - -
  • Fix other minor problems with using small soft heap limits. - -
  • Work-around for -GCC bug 32575. - -
  • Improved error detection of misused aggregate functions. - -
  • Improvements to the amalgamation generator script so that all symbols -are prefixed with either SQLITE_PRIVATE or SQLITE_API. -} - -chng {2007 July 20 (3.4.1)} { -
  • Fix a bug in VACUUM that can lead to - - database corruption if two - processes are connected to the database at the same time and one - VACUUMs then the other then modifies the database.
  • -
  • The expression "+column" is now considered the same as "column" - when computing the collating sequence to use on the expression.
  • -
  • In the TCL language interface, - "@variable" instead of "$variable" always binds as a blob.
  • -
  • Added PRAGMA freelist_count - for determining the current size of the freelist.
  • -
  • The - PRAGMA auto_vacuum=incremental setting is now persistent.
  • -
  • Add FD_CLOEXEC to all open files under unix.
  • -
  • Fix a bug in the - min()/max() optimization when applied to - descending indices.
  • -
  • Make sure the TCL language interface works correctly with 64-bit - integers on 64-bit machines.
  • -
  • Allow the value -9223372036854775808 as an integer literal in SQL - statements.
  • -
  • Add the capability of "hidden" columns in virtual tables.
  • -
  • Use the macro SQLITE_PRIVATE (defaulting to "static") on all - internal functions in the amalgamation.
  • -
  • Add pluggable tokenizers and ICU - tokenization support to FTS2
  • -
  • Other minor bug fixes and documentation enhancements
  • -} - -chng {2007 June 18 (3.4.0)} { -
  • Fix a bug that can lead to database corruption if an SQLITE_BUSY error - occurs in the middle of an explicit transaction and that transaction - is later committed. - Ticket #2409. - See the - - CorruptionFollowingBusyError wiki page for details. -
  • Fix a bug that can lead to database corruption if autovacuum mode is - on and a malloc() failure follows a CREATE TABLE or CREATE INDEX statement - which itself follows a cache overflow inside a transaction. See - ticket #2418. -
  • -
  • Added explicit upper bounds on the sizes and - quantities of things SQLite can process. This change might cause - compatibility problems for - applications that use SQLite in the extreme, which is why the current - release is 3.4.0 instead of 3.3.18.
  • -
  • Added support for - Incremental BLOB I/O.
  • -
  • Added the zeroblob API - and the zeroblob() SQL function.
  • -
  • Added support for - Incremental Vacuum.
  • -
  • Added the SQLITE_MIXED_ENDIAN_64BIT_FLOAT compile-time option to suppport - ARM7 processors with goofy endianness.
  • -
  • Removed all instances of sprintf() and strcpy() from the core library.
  • -
  • Added support for - International Components for Unicode (ICU) to the full-text search - extensions. -

    -

      -
    • In the windows OS driver, reacquire a SHARED lock if an attempt to - acquire an EXCLUSIVE lock fails. Ticket #2354
    • -
    • Fix the REPLACE() function so that it returns NULL if the second argument - is an empty string. Ticket #2324.
    • -
    • Document the hazards of type coversions in - sqlite3_column_blob() - and related APIs. Fix unnecessary type conversions. Ticket #2321.
    • -
    • Internationalization of the TRIM() function. Ticket #2323
    • -
    • Use memmove() instead of memcpy() when moving between memory regions - that might overlap. Ticket #2334
    • -
    • Fix an optimizer bug involving subqueries in a compound SELECT that has - both an ORDER BY and a LIMIT clause. Ticket #2339.
    • -
    • Make sure the sqlite3_snprintf() - interface does not zero-terminate the buffer if the buffer size is - less than 1. Ticket #2341
    • -
    • Fix the built-in printf logic so that it prints "NaN" not "Inf" for - floating-point NaNs. Ticket #2345
    • -
    • When converting BLOB to TEXT, use the text encoding of the main database. - Ticket #2349
    • -
    • Keep the full precision of integers (if possible) when casting to - NUMERIC. Ticket #2364
    • -
    • Fix a bug in the handling of UTF16 codepoint 0xE000
    • -
    • Consider explicit collate clauses when matching WHERE constraints - to indices in the query optimizer. Ticket #2391
    • -
    • Fix the query optimizer to correctly handle constant expressions in - the ON clause of a LEFT JOIN. Ticket #2403
    • -
    • Fix the query optimizer to handle rowid comparisions to NULL - correctly. Ticket #2404
    • -
    • Fix many potental segfaults that could be caused by malicious SQL - statements.
    • -} - -chng {2007 April 25 (3.3.17)} { -
    • When the "write_version" value of the database header is larger than - what the library understands, make the database read-only instead of - unreadable.
    • -
    • Other minor bug fixes
    • -} - -chng {2007 April 18 (3.3.16)} { -
    • Fix a bug that caused VACUUM to fail if NULLs appeared in a - UNIQUE column.
    • -
    • Reinstate performance improvements that were added in 3.3.14 - but regressed in 3.3.15.
    • -
    • Fix problems with the handling of ORDER BY expressions on - compound SELECT statements in subqueries.
    • -
    • Fix a potential segfault when destroying locks on WinCE in - a multi-threaded environment.
    • -
    • Documentation updates.
    • -} - -chng {2007 April 9 (3.3.15)} { -
    • Fix a bug introduced in 3.3.14 that caused a rollback of - CREATE TEMP TABLE to leave the database connection wedged.
    • -
    • Fix a bug that caused an extra NULL row to be returned when - a descending query was interrupted by a change to the database.
    • -
    • The FOR EACH STATEMENT clause on a trigger now causes a syntax - error. It used to be silently ignored.
    • -
    • Fix an obscure and relatively harmless problem that might have caused - a resource leak following an I/O error.
    • -
    • Many improvements to the test suite. Test coverage now exceeded 98%
    • -} - -chng {2007 April 2 (3.3.14)} { -
    • Fix a bug - in 3.3.13 that could cause a segfault when the IN operator - is used one one term of a two-column index and the right-hand side of - the IN operator contains a NULL.
    • -
    • Added a new OS interface method for determining the sector size - of underlying media: sqlite3OsSectorSize().
    • -
    • A new algorithm for statements of the form - INSERT INTO table1 SELECT * FROM table2 - is faster and reduces fragmentation. VACUUM uses statements of - this form and thus runs faster and defragments better.
    • -
    • Performance enhancements through reductions in disk I/O: -
        -
      • Do not read the last page of an overflow chain when - deleting the row - just add that page to the freelist.
      • -
      • Do not store pages being deleted in the - rollback journal.
      • -
      • Do not read in the (meaningless) content of - pages extracted from the freelist.
      • -
      • Do not flush the page cache (and thus avoiding - a cache refill) unless another process changes the underlying - database file.
      • -
      • Truncate rather than delete the rollback journal when committing - a transaction in exclusive access mode, or when committing the TEMP - database.
      • -
    • -
    • Added support for exclusive access mode using - - "PRAGMA locking_mode=EXCLUSIVE"
    • -
    • Use heap space instead of stack space for large buffers in the - pager - useful on embedded platforms with stack-space - limitations.
    • -
    • Add a makefile target "sqlite3.c" that builds an amalgamation containing - the core SQLite library C code in a single file.
    • -
    • Get the library working correctly when compiled - with GCC option "-fstrict-aliasing".
    • -
    • Removed the vestigal SQLITE_PROTOCOL error.
    • -
    • Improvements to test coverage, other minor bugs fixed, - memory leaks plugged, - code refactored and/or recommented in places for easier reading.
    • -} - -chng {2007 February 13 (3.3.13)} { -
    • Add a "fragmentation" measurement in the output of sqlite3_analyzer.
    • -
    • Add the COLLATE operator used to explicitly set the collating sequence -used by an expression. This feature is considered experimental pending -additional testing.
    • -
    • Allow up to 64 tables in a join - the old limit was 32.
    • -
    • Added two new experimental functions: -randomBlob() and -hex(). -Their intended use is to facilitate generating -UUIDs. -
    • -
    • Fix a problem where -PRAGMA count_changes was -causing incorrect results for updates on tables with triggers
    • -
    • Fix a bug in the ORDER BY clause optimizer for joins where the -left-most table in the join is constrained by a UNIQUE index.
    • -
    • Fixed a bug in the "copy" method of the TCL interface.
    • -
    • Bug fixes in fts1 and fts2 modules.
    • -} - -chng {2007 January 27 (3.3.12)} { -
    • Fix another bug in the IS NULL optimization that was added in -version 3.3.9.
    • -
    • Fix a assertion fault that occurred on deeply nested views.
    • -
    • Limit the amount of output that -PRAGMA integrity_check -generates.
    • -
    • Minor syntactic changes to support a wider variety of compilers.
    • -} - -chng {2007 January 22 (3.3.11)} { -
    • Fix another bug in the implementation of the new -sqlite3_prepare_v2() API. -We'll get it right eventually...
    • -
    • Fix a bug in the IS NULL optimization that was added in version 3.3.9 - -the bug was causing incorrect results on certain LEFT JOINs that included -in the WHERE clause an IS NULL constraint for the right table of the -LEFT JOIN.
    • -
    • Make AreFileApisANSI() a no-op macro in winCE since winCE does not -support this function.
    • -} - -chng {2007 January 9 (3.3.10)} { -
    • Fix bugs in the implementation of the new -sqlite3_prepare_v2() API -that can lead to segfaults.
    • -
    • Fix 1-second round-off errors in the - -strftime() function
    • -
    • Enhance the windows OS layer to provide detailed error codes
    • -
    • Work around a win2k problem so that SQLite can use single-character -database file names
    • -
    • The -user_version and -schema_version pragmas -correctly set their column names in the result set
    • -
    • Documentation updates
    • -} - -chng {2007 January 4 (3.3.9)} { -
    • Fix bugs in pager.c that could lead to database corruption if two -processes both try to recover a hot journal at the same instant
    • -
    • Added the sqlite3_prepare_v2() -API.
    • -
    • Fixed the ".dump" command in the command-line shell to show -indices, triggers and views again.
    • -
    • Change the table_info pragma so that it returns NULL for the default -value if there is no default value
    • -
    • Support for non-ASCII characters in win95 filenames
    • -
    • Query optimizer enhancements: -
        -
      • Optimizer does a better job of using indices to satisfy ORDER BY -clauses that sort on the integer primary key
      • -
      • Use an index to satisfy an IS NULL operator in the WHERE clause
      • -
      • Fix a bug that was causing the optimizer to miss an OR optimization -opportunity
      • -
      • The optimizer has more freedom to reorder tables in the FROM clause -even in there are LEFT joins.
      • -
      -
    • Extension loading supported added to winCE
    • -
    • Allow constraint names on the DEFAULT clause in a table definition
    • -
    • Added the ".bail" command to the command-line shell
    • -
    • Make CSV (comma separate value) output from the command-line shell -more closely aligned to accepted practice
    • -
    • Experimental FTS2 module added
    • -
    • Use sqlite3_mprintf() instead of strdup() to avoid libc dependencies
    • -
    • VACUUM uses a temporary file in the official TEMP folder, not in the -same directory as the original database
    • -
    • The prefix on temporary filenames on windows is changed from "sqlite" -to "etilqs".
    • -} - -chng {2006 October 9 (3.3.8)} { -
    • Support for full text search using the -FTS1 module -(beta)
    • -
    • Added OS-X locking patches (beta - disabled by default)
    • -
    • Introduce extended error codes and add error codes for various -kinds of I/O errors.
    • -
    • Added support for IF EXISTS on CREATE/DROP TRIGGER/VIEW
    • -
    • Fix the regression test suite so that it works with Tcl8.5
    • -
    • Enhance sqlite3_set_authorizer() to provide notification of calls to - SQL functions.
    • -
    • Added experimental API: sqlite3_auto_extension()
    • -
    • Various minor bug fixes
    • -} - -chng {2006 August 12 (3.3.7)} { -
    • Added support for -virtual tables -(beta)
    • -
    • Added support for - -dynamically loaded extensions (beta)
    • -
    • The -sqlite3_interrupt() -routine can be called for a different thread
    • -
    • Added the MATCH operator.
    • -
    • The default file format is now 1. -} - -chng {2006 June 6 (3.3.6)} { -
    • Plays better with virus scanners on windows
    • -
    • Faster :memory: databases
    • -
    • Fix an obscure segfault in UTF-8 to UTF-16 conversions
    • -
    • Added driver for OS/2
    • -
    • Correct column meta-information returned for aggregate queries
    • -
    • Enhanced output from EXPLAIN QUERY PLAN
    • -
    • LIMIT 0 now works on subqueries
    • -
    • Bug fixes and performance enhancements in the query optimizer
    • -
    • Correctly handle NULL filenames in ATTACH and DETACH
    • -
    • Inproved syntax error messages in the parser
    • -
    • Fix type coercion rules for the IN operator
    • -} - -chng {2006 April 5 (3.3.5)} { -
    • CHECK constraints use conflict resolution algorithms correctly.
    • -
    • The SUM() function throws an error on integer overflow.
    • -
    • Choose the column names in a compound query from the left-most SELECT - instead of the right-most.
    • -
    • The sqlite3_create_collation() function - honors the SQLITE_UTF16_ALIGNED flag.
    • -
    • SQLITE_SECURE_DELETE compile-time option causes deletes to overwrite - old data with zeros.
    • -
    • Detect integer overflow in abs().
    • -
    • The random() function provides 64 bits of randomness instead of - only 32 bits.
    • -
    • Parser detects and reports automaton stack overflow.
    • -
    • Change the round() function to return REAL instead of TEXT.
    • -
    • Allow WHERE clause terms on the left table of a LEFT OUTER JOIN to - contain aggregate subqueries.
    • -
    • Skip over leading spaces in text to numeric conversions.
    • -
    • Various minor bug and documentation typo fixes and - performance enhancements.
    • -} - -chng {2006 February 11 (3.3.4)} { -
    • Fix a blunder in the Unix mutex implementation that can lead to -deadlock on multithreaded systems.
    • -
    • Fix an alignment problem on 64-bit machines
    • -
    • Added the fullfsync pragma.
    • -
    • Fix an optimizer bug that could have caused some unusual LEFT OUTER JOINs -to give incorrect results.
    • -
    • The SUM function detects integer overflow and converts to accumulating -an approximate result using floating point numbers
    • -
    • Host parameter names can begin with '@' for compatibility with SQL Server. -
    • -
    • Other miscellaneous bug fixes
    • -} - -chng {2006 January 31 (3.3.3)} { -
    • Removed support for an ON CONFLICT clause on CREATE INDEX - it never -worked correctly so this should not present any backward compatibility -problems.
    • -
    • Authorizer callback now notified of ALTER TABLE ADD COLUMN commands
    • -
    • After any changes to the TEMP database schema, all prepared statements -are invalidated and must be recreated using a new call to -sqlite3_prepare()
    • -
    • Other minor bug fixes in preparation for the first stable release -of version 3.3
    • -} - -chng {2006 January 24 (3.3.2 beta)} { -
    • Bug fixes and speed improvements. Improved test coverage.
    • -
    • Changes to the OS-layer interface: mutexes must now be recursive.
    • -
    • Discontinue the use of thread-specific data for out-of-memory -exception handling
    • -} - -chng {2006 January 16 (3.3.1 alpha)} { -
    • Countless bug fixes
    • -
    • Speed improvements
    • -
    • Database connections can now be used by multiple threads, not just -the thread in which they were created.
    • -} - -chng {2006 January 10 (3.3.0 alpha)} { -
    • CHECK constraints
    • -
    • IF EXISTS and IF NOT EXISTS clauses on CREATE/DROP TABLE/INDEX.
    • -
    • DESC indices
    • -
    • More efficient encoding of boolean values resulting in smaller database -files
    • -
    • More aggressive SQLITE_OMIT_FLOATING_POINT
    • -
    • Separate INTEGER and REAL affinity
    • -
    • Added a virtual function layer for the OS interface
    • -
    • "exists" method added to the TCL interface
    • -
    • Improved response to out-of-memory errors
    • -
    • Database cache can be optionally shared between connections -in the same thread
    • -
    • Optional READ UNCOMMITTED isolation (instead of the default -isolation level of SERIALIZABLE) and table level locking when -database connections share a common cache.
    • -} - -chng {2005 December 19 (3.2.8)} { -
    • Fix an obscure bug that can cause database corruption under the -following unusual circumstances: A large INSERT or UPDATE statement which -is part of an even larger transaction fails due to a uniqueness contraint -but the containing transaction commits.
    • -} - -chng {2005 December 19 (2.8.17)} { -
    • Fix an obscure bug that can cause database corruption under the -following unusual circumstances: A large INSERT or UPDATE statement which -is part of an even larger transaction fails due to a uniqueness contraint -but the containing transaction commits.
    • -} - -chng {2005 September 24 (3.2.7)} { -
    • GROUP BY now considers NULLs to be equal again, as it should -
    • -
    • Now compiles on Solaris and OpenBSD and other Unix variants -that lack the fdatasync() function
    • -
    • Now compiles on MSVC++6 again
    • -
    • Fix uninitialized variables causing malfunctions for various obscure -queries
    • -
    • Correctly compute a LEFT OUTER JOINs that is constrained on the -left table only
    • -} - -chng {2005 September 17 (3.2.6)} { -
    • Fix a bug that can cause database corruption if a VACUUM (or - autovacuum) fails and is rolled back on a database that is - larger than 1GiB
    • -
    • LIKE optiization now works for columns with COLLATE NOCASE
    • -
    • ORDER BY and GROUP BY now use bounded memory
    • -
    • Added support for COUNT(DISTINCT expr)
    • -
    • Change the way SUM() handles NULL values in order to comply with - the SQL standard
    • -
    • Use fdatasync() instead of fsync() where possible in order to speed - up commits slightly
    • -
    • Use of the CROSS keyword in a join turns off the table reordering - optimization
    • -
    • Added the experimental and undocumented EXPLAIN QUERY PLAN capability
    • -
    • Use the unicode API in windows
    • -} - -chng {2005 August 27 (3.2.5)} { -
    • Fix a bug effecting DELETE and UPDATE statements that changed -more than 40960 rows.
    • -
    • Change the makefile so that it no longer requires GNUmake extensions
    • -
    • Fix the --enable-threadsafe option on the configure script
    • -
    • Fix a code generator bug that occurs when the left-hand side of an IN -operator is constant and the right-hand side is a SELECT statement
    • -
    • The PRAGMA synchronous=off statement now disables syncing of the -master journal file in addition to the normal rollback journals
    • -} - -chng {2005 August 24 (3.2.4)} { -
    • Fix a bug introduced in the previous release -that can cause a segfault while generating code -for complex WHERE clauses.
    • -
    • Allow floating point literals to begin or end with a decimal point.
    • -} - -chng {2005 August 21 (3.2.3)} { -
    • Added support for the CAST operator
    • -
    • Tcl interface allows BLOB values to be transferred to user-defined -functions
    • -
    • Added the "transaction" method to the Tcl interface
    • -
    • Allow the DEFAULT value of a column to call functions that have constant -operands
    • -
    • Added the ANALYZE command for gathering statistics on indices and -using those statistics when picking an index in the optimizer
    • -
    • Remove the limit (formerly 100) on the number of terms in the -WHERE clause
    • -
    • The right-hand side of the IN operator can now be a list of expressions -instead of just a list of constants
    • -
    • Rework the optimizer so that it is able to make better use of indices
    • -
    • The order of tables in a join is adjusted automatically to make -better use of indices
    • -
    • The IN operator is now a candidate for optimization even if the left-hand -side is not the left-most term of the index. Multiple IN operators can be -used with the same index.
    • -
    • WHERE clause expressions using BETWEEN and OR are now candidates -for optimization
    • -
    • Added the "case_sensitive_like" pragma and the SQLITE_CASE_SENSITIVE_LIKE -compile-time option to set its default value to "on".
    • -
    • Use indices to help with GLOB expressions and LIKE expressions too -when the case_sensitive_like pragma is enabled
    • -
    • Added support for grave-accent quoting for compatibility with MySQL
    • -
    • Improved test coverage
    • -
    • Dozens of minor bug fixes
    • -} - -chng {2005 June 13 (3.2.2)} { -
    • Added the sqlite3_db_handle() API
    • -
    • Added the sqlite3_get_autocommit() API
    • -
    • Added a REGEXP operator to the parser. There is no function to back -up this operator in the standard build but users can add their own using -sqlite3_create_function()
    • -
    • Speed improvements and library footprint reductions.
    • -
    • Fix byte alignment problems on 64-bit architectures.
    • -
    • Many, many minor bug fixes and documentation updates.
    • -} - -chng {2005 March 29 (3.2.1)} { -
    • Fix a memory allocation error in the new ADD COLUMN comment.
    • -
    • Documentation updates
    • -} - -chng {2005 March 21 (3.2.0)} { -
    • Added support for ALTER TABLE ADD COLUMN.
    • -
    • Added support for the "T" separator in ISO-8601 date/time strings.
    • -
    • Improved support for Cygwin.
    • -
    • Numerous bug fixes and documentation updates.
    • -} - -chng {2005 March 16 (3.1.6)} { -
    • Fix a bug that could cause database corruption when inserting - record into tables with around 125 columns.
    • -
    • sqlite3_step() is now much more likely to invoke the busy handler - and less likely to return SQLITE_BUSY.
    • -
    • Fix memory leaks that used to occur after a malloc() failure.
    • -} - -chng {2005 March 11 (3.1.5)} { -
    • The ioctl on OS-X to control syncing to disk is F_FULLFSYNC, - not F_FULLSYNC. The previous release had it wrong.
    • -} - -chng {2005 March 10 (3.1.4)} { -
    • Fix a bug in autovacuum that could cause database corruption if -a CREATE UNIQUE INDEX fails because of a constraint violation. -This problem only occurs if the new autovacuum feature introduced in -version 3.1 is turned on.
    • -
    • The F_FULLSYNC ioctl (currently only supported on OS-X) is disabled -if the synchronous pragma is set to something other than "full".
    • -
    • Add additional forward compatibility to the future version 3.2 database -file format.
    • -
    • Fix a bug in WHERE clauses of the form (rowid<'2')
    • -
    • New SQLITE_OMIT_... compile-time options added
    • -
    • Updates to the man page
    • -
    • Remove the use of strcasecmp() from the shell
    • -
    • Windows DLL exports symbols Tclsqlite_Init and Sqlite_Init
    • -} - -chng {2005 February 19 (3.1.3)} { -
    • Fix a problem with VACUUM on databases from which tables containing -AUTOINCREMENT have been dropped.
    • -
    • Add forward compatibility to the future version 3.2 database file -format.
    • -
    • Documentation updates
    • -} - -chng {2005 February 15 (3.1.2)} { -
    • Fix a bug that can lead to database corruption if there are two -open connections to the same database and one connection does a VACUUM -and the second makes some change to the database.
    • -
    • Allow "?" parameters in the LIMIT clause.
    • -
    • Fix VACUUM so that it works with AUTOINCREMENT.
    • -
    • Fix a race condition in AUTOVACUUM that can lead to corrupt databases
    • -
    • Add a numeric version number to the sqlite3.h include file.
    • -
    • Other minor bug fixes and performance enhancements.
    • -} - -chng {2005 February 15 (2.8.16)} { -
    • Fix a bug that can lead to database corruption if there are two -open connections to the same database and one connection does a VACUUM -and the second makes some change to the database.
    • -
    • Correctly handle quoted names in CREATE INDEX statements.
    • -
    • Fix a naming conflict between sqlite.h and sqlite3.h.
    • -
    • Avoid excess heap usage when copying expressions.
    • -
    • Other minor bug fixes.
    • -} - -chng {2005 February 1 (3.1.1 BETA)} { -
    • Automatic caching of prepared statements in the TCL interface
    • -
    • ATTACH and DETACH as well as some other operations cause existing - prepared statements to expire.
    • -
    • Numerious minor bug fixes
    • -} - -chng {2005 January 21 (3.1.0 ALPHA)} { -
    • Autovacuum support added
    • -
    • CURRENT_TIME, CURRENT_DATE, and CURRENT_TIMESTAMP added
    • -
    • Support for the EXISTS clause added.
    • -
    • Support for correlated subqueries added.
    • -
    • Added the ESCAPE clause on the LIKE operator.
    • -
    • Support for ALTER TABLE ... RENAME TABLE ... added
    • -
    • AUTOINCREMENT keyword supported on INTEGER PRIMARY KEY
    • -
    • Many SQLITE_OMIT_ macros inserts to omit features at compile-time - and reduce the library footprint.
    • -
    • The REINDEX command was added.
    • -
    • The engine no longer consults the main table if it can get - all the information it needs from an index.
    • -
    • Many nuisance bugs fixed.
    • -} - -chng {2004 October 11 (3.0.8)} { -
    • Add support for DEFERRED, IMMEDIATE, and EXCLUSIVE transactions.
    • -
    • Allow new user-defined functions to be created when there are -already one or more precompiled SQL statements.
    • -
    • Fix portability problems for Mingw/MSYS.
    • -
    • Fix a byte alignment problem on 64-bit Sparc machines.
    • -
    • Fix the ".import" command of the shell so that it ignores \r -characters at the end of lines.
    • -
    • The "csv" mode option in the shell puts strings inside double-quotes.
    • -
    • Fix typos in documentation.
    • -
    • Convert array constants in the code to have type "const".
    • -
    • Numerous code optimizations, specially optimizations designed to -make the code footprint smaller.
    • -} - -chng {2004 September 18 (3.0.7)} { -
    • The BTree module allocates large buffers using malloc() instead of - off of the stack, in order to play better on machines with limited - stack space.
    • -
    • Fixed naming conflicts so that versions 2.8 and 3.0 can be - linked and used together in the same ANSI-C source file.
    • -
    • New interface: sqlite3_bind_parameter_index()
    • -
    • Add support for wildcard parameters of the form: "?nnn"
    • -
    • Fix problems found on 64-bit systems.
    • -
    • Removed encode.c file (containing unused routines) from the - version 3.0 source tree.
    • -
    • The sqlite3_trace() callbacks occur before each statement - is executed, not when the statement is compiled.
    • -
    • Makefile updates and miscellaneous bug fixes.
    • -} - -chng {2004 September 02 (3.0.6 beta)} { -
    • Better detection and handling of corrupt database files.
    • -
    • The sqlite3_step() interface returns SQLITE_BUSY if it is unable - to commit a change because of a lock
    • -
    • Combine the implementations of LIKE and GLOB into a single - pattern-matching subroutine.
    • -
    • Miscellaneous code size optimizations and bug fixes
    • -} - -chng {2004 August 29 (3.0.5 beta)} { -
    • Support for ":AAA" style bind parameter names.
    • -
    • Added the new sqlite3_bind_parameter_name() interface.
    • -
    • Support for TCL variable names embedded in SQL statements in the - TCL bindings.
    • -
    • The TCL bindings transfer data without necessarily doing a conversion - to a string.
    • -
    • The database for TEMP tables is not created until it is needed.
    • -
    • Add the ability to specify an alternative temporary file directory - using the "sqlite_temp_directory" global variable.
    • -
    • A compile-time option (SQLITE_BUSY_RESERVED_LOCK) causes the busy - handler to be called when there is contention for a RESERVED lock.
    • -
    • Various bug fixes and optimizations
    • -} - -chng {2004 August 8 (3.0.4 beta)} { -
    • CREATE TABLE and DROP TABLE now work correctly as prepared statements.
    • -
    • Fix a bug in VACUUM and UNIQUE indices.
    • -
    • Add the ".import" command to the command-line shell.
    • -
    • Fix a bug that could cause index corruption when an attempt to - delete rows of a table is blocked by a pending query.
    • -
    • Library size optimizations.
    • -
    • Other minor bug fixes.
    • -} - -chng {2004 July 22 (2.8.15)} { -
    • This is a maintenance release only. Various minor bugs have been -fixed and some portability enhancements are added.
    • -} - -chng {2004 July 22 (3.0.3 beta)} { -
    • The second beta release for SQLite 3.0.
    • -
    • Add support for "PRAGMA page_size" to adjust the page size of -the database.
    • -
    • Various bug fixes and documentation updates.
    • -} - -chng {2004 June 30 (3.0.2 beta)} { -
    • The first beta release for SQLite 3.0.
    • -} - -chng {2004 June 22 (3.0.1 alpha)} { -
    • - *** Alpha Release - Research And Testing Use Only *** -
    • Lots of bug fixes.
    • -} - -chng {2004 June 18 (3.0.0 alpha)} { -
    • - *** Alpha Release - Research And Testing Use Only *** -
    • Support for internationalization including UTF-8, UTF-16, and - user defined collating sequences.
    • -
    • New file format that is 25% to 35% smaller for typical use.
    • -
    • Improved concurrency.
    • -
    • Atomic commits for ATTACHed databases.
    • -
    • Remove cruft from the APIs.
    • -
    • BLOB support.
    • -
    • 64-bit rowids.
    • -
    • More information. -} - -chng {2004 June 9 (2.8.14)} { -
    • Fix the min() and max() optimizer so that it works when the FROM - clause consists of a subquery.
    • -
    • Ignore extra whitespace at the end of of "." commands in the shell.
    • -
    • Bundle sqlite_encode_binary() and sqlite_decode_binary() with the - library.
    • -
    • The TEMP_STORE and DEFAULT_TEMP_STORE pragmas now work.
    • -
    • Code changes to compile cleanly using OpenWatcom.
    • -
    • Fix VDBE stack overflow problems with INSTEAD OF triggers and - NULLs in IN operators.
    • -
    • Add the global variable sqlite_temp_directory which if set defines the - directory in which temporary files are stored.
    • -
    • sqlite_interrupt() plays well with VACUUM.
    • -
    • Other minor bug fixes.
    • -} - -chng {2004 March 8 (2.8.13)} { -
    • Refactor parts of the code in order to make the code footprint - smaller. The code is now also a little bit faster.
    • -
    • sqlite_exec() is now implemented as a wrapper around sqlite_compile() - and sqlite_step().
    • -
    • The built-in min() and max() functions now honor the difference between - NUMERIC and TEXT datatypes. Formerly, min() and max() always assumed - their arguments were of type NUMERIC.
    • -
    • New HH:MM:SS modifier to the built-in date/time functions.
    • -
    • Experimental sqlite_last_statement_changes() API added. Fixed the - the last_insert_rowid() function so that it works correctly with - triggers.
    • -
    • Add functions prototypes for the database encryption API.
    • -
    • Fix several nuisance bugs.
    • -} - -chng {2004 February 8 (2.8.12)} { -
    • Fix a bug that will might corrupt the rollback journal if a power failure - or external program halt occurs in the middle of a COMMIT. The corrupt - journal can lead to database corruption when it is rolled back.
    • -
    • Reduce the size and increase the speed of various modules, especially - the virtual machine.
    • -
    • Allow "<expr> IN <table>" as a shorthand for - "<expr> IN (SELECT * FROM <table>".
    • -
    • Optimizations to the sqlite_mprintf() routine.
    • -
    • Make sure the MIN() and MAX() optimizations work within subqueries.
    • -} - -chng {2004 January 14 (2.8.11)} { -
    • Fix a bug in how the IN operator handles NULLs in subqueries. The bug - was introduced by the previous release.
    • -} - -chng {2004 January 13 (2.8.10)} { -
    • Fix a potential database corruption problem on Unix caused by the fact - that all posix advisory locks are cleared whenever you close() a file. - The work around it to embargo all close() calls while locks are - outstanding.
    • -
    • Performance enhancements on some corner cases of COUNT(*).
    • -
    • Make sure the in-memory backend response sanely if malloc() fails.
    • -
    • Allow sqlite_exec() to be called from within user-defined SQL - functions.
    • -
    • Improved accuracy of floating-point conversions using "long double".
    • -
    • Bug fixes in the experimental date/time functions.
    • -} - -chng {2004 January 5 (2.8.9)} { -
    • Fix a 32-bit integer overflow problem that could result in corrupt - indices in a database if large negative numbers (less than -2147483648) - were inserted into a indexed numeric column.
    • -
    • Fix a locking problem on multi-threaded Linux implementations.
    • -
    • Always use "." instead of "," as the decimal point even if the locale - requests ",".
    • -
    • Added UTC to localtime conversions to the experimental date/time - functions.
    • -
    • Bug fixes to date/time functions.
    • -} - -chng {2003 December 17 (2.8.8)} { -
    • Fix a critical bug introduced into 2.8.0 which could cause - database corruption.
    • -
    • Fix a problem with 3-way joins that do not use indices
    • -
    • The VACUUM command now works with the non-callback API
    • -
    • Improvements to the "PRAGMA integrity_check" command
    • -} - -chng {2003 December 4 (2.8.7)} { -
    • Added experimental sqlite_bind() and sqlite_reset() APIs.
    • -
    • If the name of the database is an empty string, open a new database - in a temporary file that is automatically deleted when the database - is closed.
    • -
    • Performance enhancements in the lemon-generated parser
    • -
    • Experimental date/time functions revised.
    • -
    • Disallow temporary indices on permanent tables.
    • -
    • Documentation updates and typo fixes
    • -
    • Added experimental sqlite_progress_handler() callback API
    • -
    • Removed support for the Oracle8 outer join syntax.
    • -
    • Allow GLOB and LIKE operators to work as functions.
    • -
    • Other minor documentation and makefile changes and bug fixes.
    • -} - -chng {2003 August 21 (2.8.6)} { -
    • Moved the CVS repository to www.sqlite.org
    • -
    • Update the NULL-handling documentation.
    • -
    • Experimental date/time functions added.
    • -
    • Bug fix: correctly evaluate a view of a view without segfaulting.
    • -
    • Bug fix: prevent database corruption if you dropped a - trigger that had the same name as a table.
    • -
    • Bug fix: allow a VACUUM (without segfaulting) on an empty - database after setting the EMPTY_RESULT_CALLBACKS pragma.
    • -
    • Bug fix: if an integer value will not fit in a 32-bit int, store it in - a double instead.
    • -
    • Bug fix: Make sure the journal file directory entry is committed to disk - before writing the database file.
    • -} - -chng {2003 July 22 (2.8.5)} { -
    • Make LIMIT work on a compound SELECT statement.
    • -
    • LIMIT 0 now shows no rows. Use LIMIT -1 to see all rows.
    • -
    • Correctly handle comparisons between an INTEGER PRIMARY KEY and - a floating point number.
    • -
    • Fix several important bugs in the new ATTACH and DETACH commands.
    • -
    • Updated the NULL-handling document.
    • -
    • Allow NULL arguments in sqlite_compile() and sqlite_step().
    • -
    • Many minor bug fixes
    • -} - -chng {2003 June 29 (2.8.4)} { -
    • Enhanced the "PRAGMA integrity_check" command to verify indices.
    • -
    • Added authorization hooks for the new ATTACH and DETACH commands.
    • -
    • Many documentation updates
    • -
    • Many minor bug fixes
    • -} - -chng {2003 June 4 (2.8.3)} { -
    • Fix a problem that will corrupt the indices on a table if you - do an INSERT OR REPLACE or an UPDATE OR REPLACE on a table that - contains an INTEGER PRIMARY KEY plus one or more indices.
    • -
    • Fix a bug in windows locking code so that locks work correctly - when simultaneously accessed by Win95 and WinNT systems.
    • -
    • Add the ability for INSERT and UPDATE statements to refer to the - "rowid" (or "_rowid_" or "oid") columns.
    • -
    • Other important bug fixes
    • -} - -chng {2003 May 17 (2.8.2)} { -
    • Fix a problem that will corrupt the database file if you drop a - table from the main database that has a TEMP index.
    • -} - -chng {2003 May 16 (2.8.1)} { -
    • Reactivated the VACUUM command that reclaims unused disk space in - a database file.
    • -
    • Added the ATTACH and DETACH commands to allow interacting with multiple - database files at the same time.
    • -
    • Added support for TEMP triggers and indices.
    • -
    • Added support for in-memory databases.
    • -
    • Removed the experimental sqlite_open_aux_file(). Its function is - subsumed in the new ATTACH command.
    • -
    • The precedence order for ON CONFLICT clauses was changed so that - ON CONFLICT clauses on BEGIN statements have a higher precedence than - ON CONFLICT clauses on constraints. -
    • Many, many bug fixes and compatibility enhancements.
    • -} - -chng {2003 Feb 16 (2.8.0)} { -
    • Modified the journal file format to make it more resistant to corruption - that can occur after an OS crash or power failure.
    • -
    • Added a new C/C++ API that does not use callback for returning data.
    • -} - -chng {2003 Jan 25 (2.7.6)} { -
    • Performance improvements. The library is now much faster.
    • -
    • Added the sqlite_set_authorizer() API. Formal documentation has - not been written - see the source code comments for instructions on - how to use this function.
    • -
    • Fix a bug in the GLOB operator that was preventing it from working - with upper-case letters.
    • -
    • Various minor bug fixes.
    • -} - -chng {2002 Dec 27 (2.7.5)} { -
    • Fix an uninitialized variable in pager.c which could (with a probability - of about 1 in 4 billion) result in a corrupted database.
    • -} - -chng {2002 Dec 17 (2.7.4)} { -
    • Database files can now grow to be up to 2^41 bytes. The old limit - was 2^31 bytes.
    • -
    • The optimizer will now scan tables in the reverse if doing so will - satisfy an ORDER BY ... DESC clause.
    • -
    • The full pathname of the database file is now remembered even if - a relative path is passed into sqlite_open(). This allows - the library to continue operating correctly after a chdir().
    • -
    • Speed improvements in the VDBE.
    • -
    • Lots of little bug fixes.
    • -} - -chng {2002 Oct 30 (2.7.3)} { -
    • Various compiler compatibility fixes.
    • -
    • Fix a bug in the "expr IN ()" operator.
    • -
    • Accept column names in parentheses.
    • -
    • Fix a problem with string memory management in the VDBE
    • -
    • Fix a bug in the "table_info" pragma"
    • -
    • Export the sqlite_function_type() API function in the Windows DLL
    • -
    • Fix locking behavior under windows
    • -
    • Fix a bug in LEFT OUTER JOIN
    • -} - -chng {2002 Sep 25 (2.7.2)} { -
    • Prevent journal file overflows on huge transactions.
    • -
    • Fix a memory leak that occurred when sqlite_open() failed.
    • -
    • Honor the ORDER BY and LIMIT clause of a SELECT even if the - result set is used for an INSERT.
    • -
    • Do not put write locks on the file used to hold TEMP tables.
    • -
    • Added documentation on SELECT DISTINCT and on how SQLite handles NULLs.
    • -
    • Fix a problem that was causing poor performance when many thousands - of SQL statements were executed by a single sqlite_exec() call.
    • -} - -chng {2002 Aug 31 (2.7.1)} { -
    • Fix a bug in the ORDER BY logic that was introduced in version 2.7.0
    • -
    • C-style comments are now accepted by the tokenizer.
    • -
    • INSERT runs a little faster when the source is a SELECT statement.
    • -} - -chng {2002 Aug 25 (2.7.0)} { -
    • Make a distinction between numeric and text values when sorting. - Text values sort according to memcmp(). Numeric values sort in - numeric order.
    • -
    • Allow multiple simultaneous readers under windows by simulating - the reader/writers locks that are missing from Win95/98/ME.
    • -
    • An error is now returned when trying to start a transaction if - another transaction is already active.
    • -} - -chng {2002 Aug 12 (2.6.3)} { -
    • Add the ability to read both little-endian and big-endian databases. - So database created under SunOS or MacOSX can be read and written - under Linux or Windows and vice versa.
    • -
    • Convert to the new website: http://www.sqlite.org/
    • -
    • Allow transactions to span Linux Threads
    • -
    • Bug fix in the processing of the ORDER BY clause for GROUP BY queries
    • -} - -chng {2002 Jly 30 (2.6.2)} { -
    • Text files read by the COPY command can now have line terminators - of LF, CRLF, or CR.
    • -
    • SQLITE_BUSY is handled correctly if encountered during database - initialization.
    • -
    • Fix to UPDATE triggers on TEMP tables.
    • -
    • Documentation updates.
    • -} - -chng {2002 Jly 19 (2.6.1)} { -
    • Include a static string in the library that responds to the RCS - "ident" command and which contains the library version number.
    • -
    • Fix an assertion failure that occurred when deleting all rows of - a table with the "count_changes" pragma turned on.
    • -
    • Better error reporting when problems occur during the automatic - 2.5.6 to 2.6.0 database format upgrade.
    • -} - -chng {2002 Jly 17 (2.6.0)} { -
    • Change the format of indices to correct a design flaw the originated - with version 2.1.0. *** This is an incompatible - file format change *** When version 2.6.0 or later of the - library attempts to open a database file created by version 2.5.6 or - earlier, it will automatically and irreversibly convert the file format. - Make backup copies of older database files before opening them with - version 2.6.0 of the library. -
    • -} - -chng {2002 Jly 7 (2.5.6)} { -
    • Fix more problems with rollback. Enhance the test suite to exercise - the rollback logic extensively in order to prevent any future problems. -
    • -} - -chng {2002 Jly 6 (2.5.5)} { -
    • Fix a bug which could cause database corruption during a rollback. - This bugs was introduced in version 2.4.0 by the freelist - optimization of checking [410].
    • -
    • Fix a bug in aggregate functions for VIEWs.
    • -
    • Other minor changes and enhancements.
    • -} - -chng {2002 Jly 1 (2.5.4)} { -
    • Make the "AS" keyword optional again.
    • -
    • The datatype of columns now appear in the 4th argument to the - callback.
    • -
    • Added the sqlite_open_aux_file() API, though it is still - mostly undocumented and untested.
    • -
    • Added additional test cases and fixed a few bugs that those - test cases found.
    • -} - -chng {2002 Jun 24 (2.5.3)} { -
    • Bug fix: Database corruption can occur due to the optimization - that was introduced in version 2.4.0 (check-in [410]). The problem - should now be fixed. The use of versions 2.4.0 through 2.5.2 is - not recommended.
    • -} - -chng {2002 Jun 24 (2.5.2)} { -
    • Added the new SQLITE_TEMP_MASTER table which records the schema - for temporary tables in the same way that SQLITE_MASTER does for - persistent tables.
    • -
    • Added an optimization to UNION ALL
    • -
    • Fixed a bug in the processing of LEFT OUTER JOIN
    • -
    • The LIMIT clause now works on subselects
    • -
    • ORDER BY works on subselects
    • -
    • There is a new TypeOf() function used to determine if an expression - is numeric or text.
    • -
    • Autoincrement now works for INSERT from a SELECT.
    • -} - -chng {2002 Jun 19 (2.5.1)} { -
    • The query optimizer now attempts to implement the ORDER BY clause - using an index. Sorting is still used if not suitable index is - available.
    • -} - -chng {2002 Jun 17 (2.5.0)} { -
    • Added support for row triggers.
    • -
    • Added SQL-92 compliant handling of NULLs.
    • -
    • Add support for the full SQL-92 join syntax and LEFT OUTER JOINs.
    • -
    • Double-quoted strings interpreted as column names not text literals.
    • -
    • Parse (but do not implement) foreign keys.
    • -
    • Performance improvements in the parser, pager, and WHERE clause code - generator.
    • -
    • Make the LIMIT clause work on subqueries. (ORDER BY still does not - work, though.)
    • -
    • Added the "%Q" expansion to sqlite_*_printf().
    • -
    • Bug fixes too numerous to mention (see the change log).
    • -} - -chng {2002 May 09 (2.4.12)} { -
    • Added logic to detect when the library API routines are called out - of sequence.
    • -} - -chng {2002 May 08 (2.4.11)} { -
    • Bug fix: Column names in the result set were not being generated - correctly for some (rather complex) VIEWs. This could cause a - segfault under certain circumstances.
    • -} - -chng {2002 May 02 (2.4.10)} { -
    • Bug fix: Generate correct column headers when a compound SELECT is used - as a subquery.
    • -
    • Added the sqlite_encode_binary() and sqlite_decode_binary() functions to - the source tree. But they are not yet linked into the library.
    • -
    • Documentation updates.
    • -
    • Export the sqlite_changes() function from windows DLLs.
    • -
    • Bug fix: Do not attempt the subquery flattening optimization on queries - that lack a FROM clause. To do so causes a segfault.
    • -} - -chng {2002 Apr 21 (2.4.9)} { -
    • Fix a bug that was causing the precompiled binary of SQLITE.EXE to - report "out of memory" under Windows 98.
    • -} - -chng {2002 Apr 20 (2.4.8)} { -
    • Make sure VIEWs are created after their corresponding TABLEs in the - output of the .dump command in the shell.
    • -
    • Speed improvements: Do not do synchronous updates on TEMP tables.
    • -
    • Many improvements and enhancements to the shell.
    • -
    • Make the GLOB and LIKE operators functions that can be overridden - by a programmer. This allows, for example, the LIKE operator to - be changed to be case sensitive.
    • -} - -chng {2002 Apr 06 (2.4.7)} { -
    • Add the ability to put TABLE.* in the column list of a - SELECT statement.
    • -
    • Permit SELECT statements without a FROM clause.
    • -
    • Added the last_insert_rowid() SQL function.
    • -
    • Do not count rows where the IGNORE conflict resolution occurs in - the row count.
    • -
    • Make sure functions expressions in the VALUES clause of an INSERT - are correct.
    • -
    • Added the sqlite_changes() API function to return the number - of row that changed in the most recent operation.
    • -} - -chng {2002 Apr 02 (2.4.6)} { -
    • Bug fix: Correctly handle terms in the WHERE clause of a join that - do not contain a comparison operator.
    • -} - -chng {2002 Apr 01 (2.4.5)} { -
    • Bug fix: Correctly handle functions that appear in the WHERE clause - of a join.
    • -
    • When the PRAGMA vdbe_trace=ON is set, correctly print the P3 operand - value when it is a pointer to a structure rather than a pointer to - a string.
    • -
    • When inserting an explicit NULL into an INTEGER PRIMARY KEY, convert - the NULL value into a unique key automatically.
    • -} - -chng {2002 Mar 24 (2.4.4)} { -
    • Allow "VIEW" to be a column name
    • -
    • Added support for CASE expressions (patch from Dan Kennedy)
    • -
    • Added RPMS to the delivery (patches from Doug Henry)
    • -
    • Fix typos in the documentation
    • -
    • Cut over configuration management to a new CVS repository with - its own CVSTrac bug tracking system.
    • -} - -chng {2002 Mar 22 (2.4.3)} { -
    • Fix a bug in SELECT that occurs when a compound SELECT is used as a - subquery in the FROM of a SELECT.
    • -
    • The sqlite_get_table() function now returns an error if you - give it two or more SELECTs that return different numbers of columns.
    • -} - -chng {2002 Mar 14 (2.4.2)} { -
    • Bug fix: Fix an assertion failure that occurred when ROWID was a column - in a SELECT statement on a view.
    • -
    • Bug fix: Fix an uninitialized variable in the VDBE that would could an - assert failure.
    • -
    • Make the os.h header file more robust in detecting when the compile is - for windows and when it is for unix.
    • -} - -chng {2002 Mar 13 (2.4.1)} { -
    • Using an unnamed subquery in a FROM clause would cause a segfault.
    • -
    • The parser now insists on seeing a semicolon or the end of input before - executing a statement. This avoids an accidental disaster if the - WHERE keyword is misspelled in an UPDATE or DELETE statement.
    • -} - - -chng {2002 Mar 10 (2.4.0)} { -
    • Change the name of the sanity_check PRAGMA to integrity_check - and make it available in all compiles.
    • -
    • SELECT min() or max() of an indexed column with no WHERE or GROUP BY - clause is handled as a special case which avoids a complete table scan.
    • -
    • Automatically generated ROWIDs are now sequential.
    • -
    • Do not allow dot-commands of the command-line shell to occur in the - middle of a real SQL command.
    • -
    • Modifications to the "lemon" parser generator so that the parser tables - are 4 times smaller.
    • -
    • Added support for user-defined functions implemented in C.
    • -
    • Added support for new functions: coalesce(), lower(), - upper(), and random() -
    • Added support for VIEWs.
    • -
    • Added the subquery flattening optimizer.
    • -
    • Modified the B-Tree and Pager modules so that disk pages that do not - contain real data (free pages) are not journaled and are not - written from memory back to the disk when they change. This does not - impact database integrity, since the - pages contain no real data, but it does make large INSERT operations - about 2.5 times faster and large DELETEs about 5 times faster.
    • -
    • Made the CACHE_SIZE pragma persistent
    • -
    • Added the SYNCHRONOUS pragma
    • -
    • Fixed a bug that was causing updates to fail inside of transactions when - the database contained a temporary table.
    • -} - -chng {2002 Feb 18 (2.3.3)} { -
    • Allow identifiers to be quoted in square brackets, for compatibility - with MS-Access.
    • -
    • Added support for sub-queries in the FROM clause of a SELECT.
    • -
    • More efficient implementation of sqliteFileExists() under Windows. - (by Joel Luscy)
    • -
    • The VALUES clause of an INSERT can now contain expressions, including - scalar SELECT clauses.
    • -
    • Added support for CREATE TABLE AS SELECT
    • -
    • Bug fix: Creating and dropping a table all within a single - transaction was not working.
    • -} - -chng {2002 Feb 14 (2.3.2)} { -
    • Bug fix: There was an incorrect assert() in pager.c. The real code was - all correct (as far as is known) so everything should work OK if you - compile with -DNDEBUG=1. When asserts are not disabled, there - could be a fault.
    • -} - -chng {2002 Feb 13 (2.3.1)} { -
    • Bug fix: An assertion was failing if "PRAGMA full_column_names=ON;" was - set and you did a query that used a rowid, like this: - "SELECT rowid, * FROM ...".
    • -} - -chng {2002 Jan 30 (2.3.0)} { -
    • Fix a serious bug in the INSERT command which was causing data to go - into the wrong columns if the data source was a SELECT and the INSERT - clauses specified its columns in some order other than the default.
    • -
    • Added the ability to resolve constraint conflicts is ways other than - an abort and rollback. See the documentation on the "ON CONFLICT" - clause for details.
    • -
    • Temporary files are now automatically deleted by the operating system - when closed. There are no more dangling temporary files on a program - crash. (If the OS crashes, fsck will delete the file after reboot - under Unix. I do not know what happens under Windows.)
    • -
    • NOT NULL constraints are honored.
    • -
    • The COPY command puts NULLs in columns whose data is '\N'.
    • -
    • In the COPY command, backslash can now be used to escape a newline.
    • -
    • Added the SANITY_CHECK pragma.
    • -} - -chng {2002 Jan 28 (2.2.5)} { -
    • Important bug fix: the IN operator was not working if either the - left-hand or right-hand side was derived from an INTEGER PRIMARY KEY.
    • -
    • Do not escape the backslash '\' character in the output of the - sqlite command-line access program.
    • -} - -chng {2002 Jan 22 (2.2.4)} { -
    • The label to the right of an AS in the column list of a SELECT can now - be used as part of an expression in the WHERE, ORDER BY, GROUP BY, and/or - HAVING clauses.
    • -
    • Fix a bug in the -separator command-line option to the sqlite - command.
    • -
    • Fix a problem with the sort order when comparing upper-case strings against - characters greater than 'Z' but less than 'a'.
    • -
    • Report an error if an ORDER BY or GROUP BY expression is constant.
    • -} - -chng {2002 Jan 16 (2.2.3)} { -
    • Fix warning messages in VC++ 7.0. (Patches from nicolas352001)
    • -
    • Make the library thread-safe. (The code is there and appears to work - but has not been stressed.)
    • -
    • Added the new sqlite_last_insert_rowid() API function.
    • -} - -chng {2002 Jan 13 (2.2.2)} { -
    • Bug fix: An assertion was failing when a temporary table with an index - had the same name as a permanent table created by a separate process.
    • -
    • Bug fix: Updates to tables containing an INTEGER PRIMARY KEY and an - index could fail.
    • -} - -chng {2002 Jan 9 (2.2.1)} { -
    • Bug fix: An attempt to delete a single row of a table with a WHERE - clause of "ROWID=x" when no such rowid exists was causing an error.
    • -
    • Bug fix: Passing in a NULL as the 3rd parameter to sqlite_open() - would sometimes cause a coredump.
    • -
    • Bug fix: DROP TABLE followed by a CREATE TABLE with the same name all - within a single transaction was causing a coredump.
    • -
    • Makefile updates from A. Rottmann
    • -} - -chng {2001 Dec 22 (2.2.0)} { -
    • Columns of type INTEGER PRIMARY KEY are actually used as the primary - key in underlying B-Tree representation of the table.
    • -
    • Several obscure, unrelated bugs were found and fixed while - implemented the integer primary key change of the previous bullet.
    • -
    • Added the ability to specify "*" as part of a larger column list in - the result section of a SELECT statement. For example: - "SELECT rowid, * FROM table1;".
    • -
    • Updates to comments and documentation.
    • -} - -chng {2001 Dec 14 (2.1.7)} { -
    • Fix a bug in CREATE TEMPORARY TABLE which was causing the - table to be initially allocated in the main database file instead - of in the separate temporary file. This bug could cause the library - to suffer an assertion failure and it could cause "page leaks" in the - main database file. -
    • Fix a bug in the b-tree subsystem that could sometimes cause the first - row of a table to be repeated during a database scan.
    • -} - -chng {2001 Dec 14 (2.1.6)} { -
    • Fix the locking mechanism yet again to prevent - sqlite_exec() from returning SQLITE_PROTOCOL - unnecessarily. This time the bug was a race condition in - the locking code. This change effects both POSIX and Windows users.
    • -} - -chng {2001 Dec 6 (2.1.5)} { -
    • Fix for another problem (unrelated to the one fixed in 2.1.4) - that sometimes causes sqlite_exec() to return SQLITE_PROTOCOL - unnecessarily. This time the bug was - in the POSIX locking code and should not effect windows users.
    • -} - -chng {2001 Dec 4 (2.1.4)} { -
    • Sometimes sqlite_exec() would return SQLITE_PROTOCOL when it - should have returned SQLITE_BUSY.
    • -
    • The fix to the previous bug uncovered a deadlock which was also - fixed.
    • -
    • Add the ability to put a single .command in the second argument - of the sqlite shell
    • -
    • Updates to the FAQ
    • -} - -chng {2001 Nov 23 (2.1.3)} { -
    • Fix the behavior of comparison operators - (ex: "<", "==", etc.) - so that they are consistent with the order of entries in an index.
    • -
    • Correct handling of integers in SQL expressions that are larger than - what can be represented by the machine integer.
    • -} - -chng {2001 Nov 22 (2.1.2)} { -
    • Changes to support 64-bit architectures.
    • -
    • Fix a bug in the locking protocol.
    • -
    • Fix a bug that could (rarely) cause the database to become - unreadable after a DROP TABLE due to corruption to the SQLITE_MASTER - table.
    • -
    • Change the code so that version 2.1.1 databases that were rendered - unreadable by the above bug can be read by this version of - the library even though the SQLITE_MASTER table is (slightly) - corrupted.
    • -} - -chng {2001 Nov 13 (2.1.1)} { -
    • Bug fix: Sometimes arbitrary strings were passed to the callback - function when the actual value of a column was NULL.
    • -} - -chng {2001 Nov 12 (2.1.0)} { -
    • Change the format of data records so that records up to 16MB in size - can be stored.
    • -
    • Change the format of indices to allow for better query optimization.
    • -
    • Implement the "LIMIT ... OFFSET ..." clause on SELECT statements.
    • -} - -chng {2001 Nov 3 (2.0.8)} { -
    • Made selected parameters in API functions const. This should - be fully backwards compatible.
    • -
    • Documentation updates
    • -
    • Simplify the design of the VDBE by restricting the number of sorters - and lists to 1. - In practice, no more than one sorter and one list was ever used anyhow. -
    • -} - -chng {2001 Oct 21 (2.0.7)} { -
    • Any UTF-8 character or ISO8859 character can be used as part of - an identifier.
    • -
    • Patches from Christian Werner to improve ODBC compatibility and to - fix a bug in the round() function.
    • -
    • Plug some memory leaks that use to occur if malloc() failed. - We have been and continue to be memory leak free as long as - malloc() works.
    • -
    • Changes to some test scripts so that they work on Windows in - addition to Unix.
    • -} - -chng {2001 Oct 19 (2.0.6)} { -
    • Added the EMPTY_RESULT_CALLBACKS pragma
    • -
    • Support for UTF-8 and ISO8859 characters in column and table names.
    • -
    • Bug fix: Compute correct table names with the FULL_COLUMN_NAMES pragma - is turned on.
    • -} - -chng {2001 Oct 14 (2.0.5)} { -
    • Added the COUNT_CHANGES pragma.
    • -
    • Changes to the FULL_COLUMN_NAMES pragma to help out the ODBC driver.
    • -
    • Bug fix: "SELECT count(*)" was returning NULL for empty tables. - Now it returns 0.
    • -} - -chng {2001 Oct 13 (2.0.4)} { -
    • Bug fix: an obscure and relatively harmless bug was causing one of - the tests to fail when gcc optimizations are turned on. This release - fixes the problem.
    • -} - -chng {2001 Oct 13 (2.0.3)} { -
    • Bug fix: the sqlite_busy_timeout() function was delaying 1000 - times too long before failing.
    • -
    • Bug fix: an assertion was failing if the disk holding the database - file became full or stopped accepting writes for some other reason. - New tests were added to detect similar problems in the future.
    • -
    • Added new operators: & (bitwise-and) - | (bitwise-or), ~ (ones-complement), - << (shift left), >> (shift right).
    • -
    • Added new functions: round() and abs().
    • -} - -chng {2001 Oct 9 (2.0.2)} { -
    • Fix two bugs in the locking protocol. (One was masking the other.)
    • -
    • Removed some unused "#include " that were causing problems - for VC++.
    • -
    • Fixed sqlite.h so that it is usable from C++
    • -
    • Added the FULL_COLUMN_NAMES pragma. When set to "ON", the names of - columns are reported back as TABLE.COLUMN instead of just COLUMN.
    • -
    • Added the TABLE_INFO() and INDEX_INFO() pragmas to help support the - ODBC interface.
    • -
    • Added support for TEMPORARY tables and indices.
    • -} - -chng {2001 Oct 2 (2.0.1)} { -
    • Remove some C++ style comments from btree.c so that it will compile - using compilers other than gcc.
    • -
    • The ".dump" output from the shell does not work if there are embedded - newlines anywhere in the data. This is an old bug that was carried - forward from version 1.0. To fix it, the ".dump" output no longer - uses the COPY command. It instead generates INSERT statements.
    • -
    • Extend the expression syntax to support "expr NOT NULL" (with a - space between the "NOT" and the "NULL") in addition to "expr NOTNULL" - (with no space).
    • -} - -chng {2001 Sep 28 (2.0.0)} { -
    • Automatically build binaries for Linux and Windows and put them on - the website.
    • -} - -chng {2001 Sep 28 (2.0-alpha-4)} { -
    • Incorporate makefile patches form A. Rottmann to use LIBTOOL
    • -} - -chng {2001 Sep 27 (2.0-alpha-3)} { -
    • SQLite now honors the UNIQUE keyword in CREATE UNIQUE INDEX. Primary - keys are required to be unique.
    • -
    • File format changed back to what it was for alpha-1
    • -
    • Fixes to the rollback and locking behavior
    • -} - -chng {2001 Sep 20 (2.0-alpha-2)} { -
    • Initial release of version 2.0. The idea of renaming the library - to "SQLus" was abandoned in favor of keeping the "SQLite" name and - bumping the major version number.
    • -
    • The pager and btree subsystems added back. They are now the only - available backend.
    • -
    • The Dbbe abstraction and the GDBM and memory drivers were removed.
    • -
    • Copyright on all code was disclaimed. The library is now in the - public domain.
    • -} - -chng {2001 Jul 23 (1.0.32)} { -
    • Pager and btree subsystems removed. These will be used in a follow-on - SQL server library named "SQLus".
    • -
    • Add the ability to use quoted strings as table and column names in - expressions.
    • -} - -chng {2001 Apr 14 (1.0.31)} { -
    • Pager subsystem added but not yet used.
    • -
    • More robust handling of out-of-memory errors.
    • -
    • New tests added to the test suite.
    • -} - -chng {2001 Apr 6 (1.0.30)} { -
    • Remove the sqlite_encoding TCL variable that was introduced - in the previous version.
    • -
    • Add options -encoding and -tcl-uses-utf to the - sqlite TCL command.
    • -
    • Add tests to make sure that tclsqlite was compiled using Tcl header - files and libraries that match.
    • -} - -chng {2001 Apr 5 (1.0.29)} { -
    • The library now assumes data is stored as UTF-8 if the --enable-utf8 - option is given to configure. The default behavior is to assume - iso8859-x, as it has always done. This only makes a difference for - LIKE and GLOB operators and the LENGTH and SUBSTR functions.
    • -
    • If the library is not configured for UTF-8 and the Tcl library - is one of the newer ones that uses UTF-8 internally, - then a conversion from UTF-8 to iso8859 and - back again is done inside the TCL interface.
    • -} - -chng {2001 Apr 4 (1.0.28)} { -
    • Added limited support for transactions. At this point, transactions - will do table locking on the GDBM backend. There is no support (yet) - for rollback or atomic commit.
    • -
    • Added special column names ROWID, OID, and _ROWID_ that refer to the - unique random integer key associated with every row of every table.
    • -
    • Additional tests added to the regression suite to cover the new ROWID - feature and the TCL interface bugs mentioned below.
    • -
    • Changes to the "lemon" parser generator to help it work better when - compiled using MSVC.
    • -
    • Bug fixes in the TCL interface identified by Oleg Oleinick.
    • -} - -chng {2001 Mar 20 (1.0.27)} { -
    • When doing DELETE and UPDATE, the library used to write the record - numbers of records to be deleted or updated into a temporary file. - This is changed so that the record numbers are held in memory.
    • -
    • The DELETE command without a WHILE clause just removes the database - files from the disk, rather than going through and deleting record - by record.
    • -} - -chng {2001 Mar 20 (1.0.26)} { -
    • A serious bug fixed on Windows. Windows users should upgrade. - No impact to Unix.
    • -} - -chng {2001 Mar 15 (1.0.25)} { -
    • Modify the test scripts to identify tests that depend on system - load and processor speed and - to warn the user that a failure of one of those (rare) tests does - not necessarily mean the library is malfunctioning. No changes to - code. -
    • -} - -chng {2001 Mar 14 (1.0.24)} { -
    • Fix a bug which was causing - the UPDATE command to fail on systems where "malloc(0)" returns - NULL. The problem does not appear Windows, Linux, or HPUX but does - cause the library to fail on QNX. -
    • -} - -chng {2001 Feb 19 (1.0.23)} { -
    • An unrelated (and minor) bug from Mark Muranwski fixed. The algorithm - for figuring out where to put temporary files for a "memory:" database - was not working quite right. -
    • -} - -chng {2001 Feb 19 (1.0.22)} { -
    • The previous fix was not quite right. This one seems to work better. -
    • -} - -chng {2001 Feb 19 (1.0.21)} { -
    • The UPDATE statement was not working when the WHERE clause contained - some terms that could be satisfied using indices and other terms that - could not. Fixed.
    • -} - -chng {2001 Feb 11 (1.0.20)} { -
    • Merge development changes into the main trunk. Future work toward - using a BTree file structure will use a separate CVS source tree. This - CVS tree will continue to support the GDBM version of SQLite only.
    • -} - -chng {2001 Feb 6 (1.0.19)} { -
    • Fix a strange (but valid) C declaration that was causing problems - for QNX. No logical changes.
    • -} - -chng {2001 Jan 4 (1.0.18)} { -
    • Print the offending SQL statement when an error occurs.
    • -
    • Do not require commas between constraints in CREATE TABLE statements.
    • -
    • Added the "-echo" option to the shell.
    • -
    • Changes to comments.
    • -} - -chng {2000 Dec 10 (1.0.17)} { -
    • Rewrote sqlite_complete() to make it faster.
    • -
    • Minor tweaks to other code to make it run a little faster.
    • -
    • Added new tests for sqlite_complete() and for memory leaks.
    • -} - -chng {2000 Dec 4 (1.0.16)} { -
    • Documentation updates. Mostly fixing of typos and spelling errors.
    • -} - -chng {2000 Oct 23 (1.0.15)} { -
    • Documentation updates
    • -
    • Some sanity checking code was removed from the inner loop of vdbe.c - to help the library to run a little faster. The code is only - removed if you compile with -DNDEBUG.
    • -} - -chng {2000 Oct 19 (1.0.14)} { -
    • Added a "memory:" backend driver that stores its database in an - in-memory hash table.
    • -} - -chng {2000 Oct 18 (1.0.13)} { -
    • Break out the GDBM driver into a separate file in anticipation - to added new drivers.
    • -
    • Allow the name of a database to be prefixed by the driver type. - For now, the only driver type is "gdbm:".
    • -} - -chng {2000 Oct 16 (1.0.12)} { -
    • Fixed an off-by-one error that was causing a coredump in - the '%q' format directive of the new - sqlite_..._printf() routines.
    • -
    • Added the sqlite_interrupt() interface.
    • -
    • In the shell, sqlite_interrupt() is invoked when the - user presses Control-C
    • -
    • Fixed some instances where sqlite_exec() was - returning the wrong error code.
    • -} - -chng {2000 Oct 11 (1.0.10)} { -
    • Added notes on how to compile for Windows95/98.
    • -
    • Removed a few variables that were not being used. Etc.
    • -} - -chng {2000 Oct 8 (1.0.9)} { -
    • Added the sqlite_..._printf() interface routines.
    • -
    • Modified the sqlite shell program to use the new interface - routines.
    • -
    • Modified the sqlite shell program to print the schema for - the built-in SQLITE_MASTER table, if explicitly requested.
    • -} - -chng {2000 Sep 30 (1.0.8)} { -
    • Begin writing documentation on the TCL interface.
    • -} - -chng {2000 Sep 29 (Not Released)} { -
    • Added the sqlite_get_table() API
    • -
    • Updated the documentation for due to the above change.
    • -
    • Modified the sqlite shell to make use of the new - sqlite_get_table() API in order to print a list of tables - in multiple columns, similar to the way "ls" prints filenames.
    • -
    • Modified the sqlite shell to print a semicolon at the - end of each CREATE statement in the output of the ".schema" command.
    • -} - -chng {2000 Sep 21 (Not Released)} { -
    • Change the tclsqlite "eval" method to return a list of results if - no callback script is specified.
    • -
    • Change tclsqlite.c to use the Tcl_Obj interface
    • -
    • Add tclsqlite.c to the libsqlite.a library
    • -} - -chng {2000 Sep 13 (Version 1.0.5)} { -
    • Changed the print format for floating point values from "%g" to "%.15g". -
    • -
    • Changed the comparison function so that numbers in exponential notation - (ex: 1.234e+05) sort in numerical order.
    • -} - -chng {2000 Aug 28 (Version 1.0.4)} { -
    • Added functions length() and substr().
    • -
    • Fix a bug in the sqlite shell program that was causing - a coredump when the output mode was "column" and the first row - of data contained a NULL.
    • -} - -chng {2000 Aug 22 (Version 1.0.3)} { -
    • In the sqlite shell, print the "Database opened READ ONLY" message - to stderr instead of stdout.
    • -
    • In the sqlite shell, now print the version number on initial startup.
    • -
    • Add the sqlite_version[] string constant to the library
    • -
    • Makefile updates
    • -
    • Bug fix: incorrect VDBE code was being generated for the following - circumstance: a query on an indexed table containing a WHERE clause with - an IN operator that had a subquery on its right-hand side.
    • -} - -chng {2000 Aug 18 (Version 1.0.1)} { -
    • Fix a bug in the configure script.
    • -
    • Minor revisions to the website.
    • -} - -chng {2000 Aug 17 (Version 1.0)} { -
    • Change the sqlite program so that it can read - databases for which it lacks write permission. (It used to - refuse all access if it could not write.)
    • -} - -chng {2000 Aug 9} { -
    • Treat carriage returns as white space.
    • -} - -chng {2000 Aug 8} { -
    • Added pattern matching to the ".table" command in the "sqlite" -command shell.
    • -} - -chng {2000 Aug 4} { -
    • Documentation updates
    • -
    • Added "busy" and "timeout" methods to the Tcl interface
    • -} - -chng {2000 Aug 3} { -
    • File format version number was being stored in sqlite_master.tcl - multiple times. This was harmless, but unnecessary. It is now fixed.
    • -} - -chng {2000 Aug 2} { -
    • The file format for indices was changed slightly in order to work - around an inefficiency that can sometimes come up with GDBM when - there are large indices having many entries with the same key. - ** Incompatible Change **
    • -} - -chng {2000 Aug 1} { -
    • The parser's stack was overflowing on a very long UPDATE statement. - This is now fixed.
    • -} - -chng {2000 July 31} { -
    • Finish the VDBE tutorial.
    • -
    • Added documentation on compiling to WindowsNT.
    • -
    • Fix a configuration program for WindowsNT.
    • -
    • Fix a configuration problem for HPUX.
    • -} - -chng {2000 July 29} { -
    • Better labels on column names of the result.
    • -} - -chng {2000 July 28} { -
    • Added the sqlite_busy_handler() - and sqlite_busy_timeout() interface.
    • -} - -chng {2000 June 23} { -
    • Begin writing the VDBE tutorial.
    • -} - -chng {2000 June 21} { -
    • Clean up comments and variable names. Changes to documentation. - No functional changes to the code.
    • -} - -chng {2000 June 19} { -
    • Column names in UPDATE statements were case sensitive. - This mistake has now been fixed.
    • -} - -chng {2000 June 16} { -
    • Added the concatenate string operator (||)
    • -} - -chng {2000 June 12} { -
    • Added the fcnt() function to the SQL interpreter. The fcnt() function - returns the number of database "Fetch" operations that have occurred. - This function is designed for use in test scripts to verify that - queries are efficient and appropriately optimized. Fcnt() has no other - useful purpose, as far as I know.
    • -
    • Added a bunch more tests that take advantage of the new fcnt() function. - The new tests did not uncover any new problems.
    • -} - -chng {2000 June 8} { -
    • Added lots of new test cases
    • -
    • Fix a few bugs discovered while adding test cases
    • -
    • Begin adding lots of new documentation
    • -} - -chng {2000 June 6} { -
    • Added compound select operators: UNION, UNION ALL, -INTERSECT, and EXCEPT
    • -
    • Added support for using (SELECT ...) within expressions
    • -
    • Added support for IN and BETWEEN operators
    • -
    • Added support for GROUP BY and HAVING
    • -
    • NULL values are now reported to the callback as a NULL pointer - rather than an empty string.
    • -} - -chng {2000 June 3} { -
    • Added support for default values on columns of a table.
    • -
    • Improved test coverage. Fixed a few obscure bugs found by the -improved tests.
    • -} - -chng {2000 June 2} { -
    • All database files to be modified by an UPDATE, INSERT or DELETE are -now locked before any changes are made to any files. -This makes it safe (I think) to access -the same database simultaneously from multiple processes.
    • -
    • The code appears stable so we are now calling it "beta".
    • -} - -chng {2000 June 1} { -
    • Better support for file locking so that two or more processes -(or threads) -can access the same database simultaneously. More work needed in -this area, though.
    • -} - -chng {2000 May 31} { -
    • Added support for aggregate functions (Ex: COUNT(*), MIN(...)) -to the SELECT statement.
    • -
    • Added support for SELECT DISTINCT ...
    • -} - -chng {2000 May 30} { -
    • Added the LIKE operator.
    • -
    • Added a GLOB operator: similar to LIKE -but it uses Unix shell globbing wildcards instead of the '%' -and '_' wildcards of SQL.
    • -
    • Added the COPY command patterned after -PostgreSQL so that SQLite -can now read the output of the pg_dump database dump utility -of PostgreSQL.
    • -
    • Added a VACUUM command that that calls the -gdbm_reorganize() function on the underlying database -files.
    • -
    • And many, many bug fixes...
    • -} - -chng {2000 May 29} { -
    • Initial Public Release of Alpha code
    • -} - -puts { -
  • -} -footer {$Id:} DELETED common.tcl Index: common.tcl ================================================================== --- common.tcl +++ /dev/null @@ -1,90 +0,0 @@ -# This file contains TCL procedures used to generate standard parts of -# web pages. -# - -proc header {txt} { - puts "$txt" - puts {
    } - puts \ -{ - - - - - - - - -
    - - - - - - -
    - - -
    } - puts
    -} - -proc footer {{rcsid {}}} { - puts { - - -
    } - set date [lrange $rcsid 3 4] - if {$date!=""} { - puts "This page last modified on $date" - } - puts {} -} - - -# The following proc is used to ensure consistent formatting in the -# HTML generated by lang.tcl and pragma.tcl. -# -proc Syntax {args} { - puts {} - foreach {rule body} $args { - puts "" - regsub -all < $body {%LT} body - regsub -all > $body {%GT} body - regsub -all %LT $body {} body - regsub -all %GT $body {} body - regsub -all {[]|[*?]} $body {&} body - regsub -all "\n" [string trim $body] "
    \n" body - regsub -all "\n *" $body "\n\\ \\ \\ \\ " body - regsub -all {[|,.*()]} $body {&} body - regsub -all { = } $body { = } body - regsub -all {STAR} $body {*} body - ## These metacharacters must be handled to undo being - ## treated as SQL punctuation characters above. - regsub -all {RPPLUS} $body {
    )+} body - regsub -all {LP} $body {(} body - regsub -all {RP} $body {)} body - ## Place the left-hand side of the rule in the 2nd table column. - puts "" - } - puts {
    " - puts "$rule ::=$body
    } -} DELETED compile.tcl Index: compile.tcl ================================================================== --- compile.tcl +++ /dev/null @@ -1,278 +0,0 @@ -# -# Run this Tcl script to generate the compile.html file. -# -set rcsid {$Id: compile.tcl,v 1.5 2005/03/19 15:10:45 drh Exp $ } -source common.tcl -header {Compilation Options For SQLite} - -puts { -

    Compilation Options For SQLite

    - -

    -For most purposes, SQLite can be built just fine using the default -compilation options. However, if required, the compile-time options -documented below can be used to -omit SQLite features (resulting in -a smaller compiled library size) or to change the -default values of some parameters. -

    -

    -Every effort has been made to ensure that the various combinations -of compilation options work harmoniously and produce a working library. -Nevertheless, it is strongly recommended that the SQLite test-suite -be executed to check for errors before using an SQLite library built -with non-standard compilation options. -

    - -

    Options To Set Default Parameter Values

    - -

    SQLITE_DEFAULT_AUTOVACUUM=<1 or 0>
    -This macro determines if SQLite creates databases with the -auto-vacuum -flag set by default. The default value is 0 (do not create auto-vacuum -databases). In any case the compile-time default may be overridden by the -"PRAGMA auto_vacuum" command. -

    - -

    SQLITE_DEFAULT_CACHE_SIZE=<pages>
    -This macro sets the default size of the page-cache for each attached -database, in pages. This can be overridden by the "PRAGMA cache_size" -comamnd. The default value is 2000. -

    - -

    SQLITE_DEFAULT_PAGE_SIZE=<bytes>
    -This macro is used to set the default page-size used when a -database is created. The value assigned must be a power of 2. The -default value is 1024. The compile-time default may be overridden at -runtime by the "PRAGMA page_size" command. -

    - -

    SQLITE_DEFAULT_TEMP_CACHE_SIZE=<pages>
    -This macro sets the default size of the page-cache for temporary files -created by SQLite to store intermediate results, in pages. It does -not affect the page-cache for the temp database, where tables created -using "CREATE TEMP TABLE" are stored. The default value is 500. -

    - -

    SQLITE_MAX_PAGE_SIZE=<bytes>
    -This is used to set the maximum allowable page-size that can -be specified by the "PRAGMA page_size" command. The default value -is 8192. -

    - - -

    Options To Omit Features

    - -

    The following options are used to reduce the size of the compiled -library by omiting optional features. This is probably only useful -in embedded systems where space is especially tight, as even with all -features included the SQLite library is relatively small. Don't forget -to tell your compiler to optimize for binary size! (the -Os option if -using GCC).

    - -

    The macros in this section do not require values. The following -compilation switches all have the same effect:
    --DSQLITE_OMIT_ALTERTABLE
    --DSQLITE_OMIT_ALTERTABLE=1
    --DSQLITE_OMIT_ALTERTABLE=0 -

    - -

    If any of these options are defined, then the same set of SQLITE_OMIT_XXX -options must also be defined when using the 'lemon' tool to generate a parse.c -file. Because of this, these options may only used when the library is built -from source, not from the collection of pre-packaged C files provided for -non-UNIX like platforms on the website. -

    - -

    SQLITE_OMIT_ALTERTABLE
    -When this option is defined, the -ALTER TABLE command is not included in the -library. Executing an ALTER TABLE statement causes a parse error. -

    - -

    SQLITE_OMIT_AUTHORIZATION
    -Defining this option omits the authorization callback feature from the -library. The -sqlite3_set_authorizer() API function is not present in the library. -

    - -

    SQLITE_OMIT_AUTOVACUUM
    -If this option is defined, the library cannot create or write to -databases that support -auto-vacuum. Executing a -"PRAGMA auto_vacuum" statement is not an error, but does not return a value -or modify the auto-vacuum flag in the database file. If a database that -supports auto-vacuum is opened by a library compiled with this option, it -is automatically opened in read-only mode. -

    - -

    SQLITE_OMIT_AUTOINCREMENT
    -This option is used to omit the AUTOINCREMENT functionality. When this -is macro is defined, columns declared as "INTEGER PRIMARY KEY AUTOINCREMENT" -behave in the same way as columns declared as "INTEGER PRIMARY KEY" when a -NULL is inserted. The sqlite_sequence system table is neither created, nor -respected if it already exists. -

    -

    TODO: Need a link here - AUTOINCREMENT is not yet documented

    - -

    SQLITE_OMIT_BLOB_LITERAL
    -When this option is defined, it is not possible to specify a blob in -an SQL statement using the X'ABCD' syntax.

    -} -#

    WARNING: The VACUUM command depends on this syntax for vacuuming databases -#that contain blobs, so disabling this functionality may render a database -#unvacuumable. -#

    -#

    TODO: Need a link here - is that syntax documented anywhere?

    -puts { - -

    SQLITE_OMIT_COMPLETE
    -This option causes the -sqlite3_complete API to be omitted. -

    - -

    SQLITE_OMIT_COMPOUND_SELECT
    -This option is used to omit the compound SELECT functionality. -SELECT statements that use the -UNION, UNION ALL, INTERSECT or EXCEPT compound SELECT operators will -cause a parse error. -

    - -

    SQLITE_OMIT_CONFLICT_CLAUSE
    -In the future, this option will be used to omit the -ON CONFLICT clause from the library. -

    - -

    SQLITE_OMIT_DATETIME_FUNCS
    -If this option is defined, SQLite's built-in date and time manipulation -functions are omitted. Specifically, the SQL functions julianday(), date(), -time(), datetime() and strftime() are not available. The default column -values CURRENT_TIME, CURRENT_DATE and CURRENT_DATETIME are still available. -

    - -

    SQLITE_OMIT_EXPLAIN
    -Defining this option causes the EXPLAIN command to be omitted from the -library. Attempting to execute an EXPLAIN statement will cause a parse -error. -

    - -

    SQLITE_OMIT_FLOATING_POINT
    -This option is used to omit floating-point number support from the SQLite -library. When specified, specifying a floating point number as a literal -(i.e. "1.01") results in a parse error. -

    -

    In the future, this option may also disable other floating point -functionality, for example the sqlite3_result_double(), -sqlite3_bind_double(), sqlite3_value_double() and sqlite3_column_double() -API functions. -

    - -

    SQLITE_OMIT_FOREIGN_KEY
    -If this option is defined, FOREIGN KEY clauses in column declarations are -ignored. -

    - -

    SQLITE_OMIT_INTEGRITY_CHECK
    -This option may be used to omit the -"PRAGMA integrity_check" -command from the compiled library. -

    - -

    SQLITE_OMIT_MEMORYDB
    -When this is defined, the library does not respect the special database -name ":memory:" (normally used to create an in-memory database). If -":memory:" is passed to sqlite3_open(), a file with this name will be -opened or created. -

    - -

    SQLITE_OMIT_PAGER_PRAGMAS
    -Defining this option omits pragmas related to the pager subsystem from -the build. Currently, the -default_cache_size and -cache_size pragmas are omitted. -

    - -

    SQLITE_OMIT_PRAGMA
    -This option is used to omit the PRAGMA command -from the library. Note that it is useful to define the macros that omit -specific pragmas in addition to this, as they may also remove supporting code -in other sub-systems. This macro removes the PRAGMA command only. -

    - -

    SQLITE_OMIT_PROGRESS_CALLBACK
    -This option may be defined to omit the capability to issue "progress" -callbacks during long-running SQL statements. The -sqlite3_progress_handler() -API function is not present in the library. - -

    SQLITE_OMIT_REINDEX
    -When this option is defined, the REINDEX -command is not included in the library. Executing a REINDEX statement causes -a parse error. -

    - -

    SQLITE_OMIT_SCHEMA_PRAGMAS
    -Defining this option omits pragmas for querying the database schema from -the build. Currently, the -table_info, -index_info, -index_list and -database_list -pragmas are omitted. -

    - -

    SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS
    -Defining this option omits pragmas for querying and modifying the -database schema version and user version from the build. Specifically, the -schema_version and -user_version -pragmas are omitted. - -

    SQLITE_OMIT_SUBQUERY
    -

    If defined, support for sub-selects and the IN() operator are omitted. -

    - -

    SQLITE_OMIT_TCL_VARIABLE
    -

    If this macro is defined, then the special "$" syntax -used to automatically bind SQL variables to TCL variables is omitted. -

    - -

    SQLITE_OMIT_TRIGGER
    -Defining this option omits support for VIEW objects. Neither the -CREATE TRIGGER or -DROP TRIGGER -commands are available in this case, attempting to execute either will result -in a parse error. -

    -

    -WARNING: If this macro is defined, it will not be possible to open a database -for which the schema contains TRIGGER objects. -

    - -

    SQLITE_OMIT_UTF16
    -This macro is used to omit support for UTF16 text encoding. When this is -defined all API functions that return or accept UTF16 encoded text are -unavailable. These functions can be identified by the fact that they end -with '16', for example sqlite3_prepare16(), sqlite3_column_text16() and -sqlite3_bind_text16(). -

    - -

    SQLITE_OMIT_VACUUM
    -When this option is defined, the VACUUM -command is not included in the library. Executing a VACUUM statement causes -a parse error. -

    - -

    SQLITE_OMIT_VIEW
    -Defining this option omits support for VIEW objects. Neither the -CREATE VIEW or -DROP VIEW -commands are available in this case, attempting to execute either will result -in a parse error. -

    -

    -WARNING: If this macro is defined, it will not be possible to open a database -for which the schema contains VIEW objects. -

    -} -footer $rcsid DELETED conflict.tcl Index: conflict.tcl ================================================================== --- conflict.tcl +++ /dev/null @@ -1,91 +0,0 @@ -# -# Run this Tcl script to generate the constraint.html file. -# -set rcsid {$Id: conflict.tcl,v 1.4 2004/10/10 17:24:55 drh Exp $ } -source common.tcl -header {Constraint Conflict Resolution in SQLite} -puts { -

    Constraint Conflict Resolution in SQLite

    - -

    -In most SQL databases, if you have a UNIQUE constraint on -a table and you try to do an UPDATE or INSERT that violates -the constraint, the database will abort the operation in -progress, back out any prior changes associated with -UPDATE or INSERT command, and return an error. -This is the default behavior of SQLite. -Beginning with version 2.3.0, though, SQLite allows you to -define alternative ways for dealing with constraint violations. -This article describes those alternatives and how to use them. -

    - -

    Conflict Resolution Algorithms

    - -

    -SQLite defines five constraint conflict resolution algorithms -as follows: -

    - -
    -
    ROLLBACK
    -

    When a constraint violation occurs, an immediate ROLLBACK -occurs, thus ending the current transaction, and the command aborts -with a return code of SQLITE_CONSTRAINT. If no transaction is -active (other than the implied transaction that is created on every -command) then this algorithm works the same as ABORT.

    - -
    ABORT
    -

    When a constraint violation occurs, the command backs out -any prior changes it might have made and aborts with a return code -of SQLITE_CONSTRAINT. But no ROLLBACK is executed so changes -from prior commands within the same transaction -are preserved. This is the default behavior for SQLite.

    - -
    FAIL
    -

    When a constraint violation occurs, the command aborts with a -return code SQLITE_CONSTRAINT. But any changes to the database that -the command made prior to encountering the constraint violation -are preserved and are not backed out. For example, if an UPDATE -statement encountered a constraint violation on the 100th row that -it attempts to update, then the first 99 row changes are preserved -by change to rows 100 and beyond never occur.

    - -
    IGNORE
    -

    When a constraint violation occurs, the one row that contains -the constraint violation is not inserted or changed. But the command -continues executing normally. Other rows before and after the row that -contained the constraint violation continue to be inserted or updated -normally. No error is returned.

    - -
    REPLACE
    -

    When a UNIQUE constraint violation occurs, the pre-existing row -that caused the constraint violation is removed prior to inserting -or updating the current row. Thus the insert or update always occurs. -The command continues executing normally. No error is returned.

    -
    - -

    Why So Many Choices?

    - -

    SQLite provides multiple conflict resolution algorithms for a -couple of reasons. First, SQLite tries to be roughly compatible with as -many other SQL databases as possible, but different SQL database -engines exhibit different conflict resolution strategies. For -example, PostgreSQL always uses ROLLBACK, Oracle always uses ABORT, and -MySQL usually uses FAIL but can be instructed to use IGNORE or REPLACE. -By supporting all five alternatives, SQLite provides maximum -portability.

    - -

    Another reason for supporting multiple algorithms is that sometimes -it is useful to use an algorithm other than the default. -Suppose, for example, you are -inserting 1000 records into a database, all within a single -transaction, but one of those records is malformed and causes -a constraint error. Under PostgreSQL or Oracle, none of the -1000 records would get inserted. In MySQL, some subset of the -records that appeared before the malformed record would be inserted -but the rest would not. Neither behavior is especially helpful. -What you really want is to use the IGNORE algorithm to insert -all but the malformed record.

    - -} -footer $rcsid DELETED copyright-release.html Index: copyright-release.html ================================================================== --- copyright-release.html +++ /dev/null @@ -1,109 +0,0 @@ - - -

    -Copyright Release for
    -Contributions To SQLite -

    - -

    -SQLite is software that implements an embeddable SQL database engine. -SQLite is available for free download from http://www.sqlite.org/. -The principal author and maintainer of SQLite has disclaimed all -copyright interest in his contributions to SQLite -and thus released his contributions into the public domain. -In order to keep the SQLite software unencumbered by copyright -claims, the principal author asks others who may from time to -time contribute changes and enhancements to likewise disclaim -their own individual copyright interest. -

    - -

    -Because the SQLite software found at http://www.sqlite.org/ is in the -public domain, anyone is free to download the SQLite software -from that website, make changes to the software, use, distribute, -or sell the modified software, under either the original name or -under some new name, without any need to obtain permission, pay -royalties, acknowledge the original source of the software, or -in any other way compensate, identify, or notify the original authors. -Nobody is in any way compelled to contribute their SQLite changes and -enhancements back to the SQLite website. This document concerns -only changes and enhancements to SQLite that are intentionally and -deliberately contributed back to the SQLite website. -

    - -

    -For the purposes of this document, "SQLite software" shall mean any -computer source code, documentation, makefiles, test scripts, or -other information that is published on the SQLite website, -http://www.sqlite.org/. Precompiled binaries are excluded from -the definition of "SQLite software" in this document because the -process of compiling the software may introduce information from -outside sources which is not properly a part of SQLite. -

    - -

    -The header comments on the SQLite source files exhort the reader to -share freely and to never take more than one gives. -In the spirit of that exhortation I make the following declarations: -

    - -
      -
    1. -I dedicate to the public domain -any and all copyright interest in the SQLite software that -was publicly available on the SQLite website (http://www.sqlite.org/) prior -to the date of the signature below and any changes or enhancements to -the SQLite software -that I may cause to be published on that website in the future. -I make this dedication for the benefit of the public at large and -to the detriment of my heirs and successors. I intend this -dedication to be an overt act of relinquishment in perpetuity of -all present and future rights to the SQLite software under copyright -law. -

    2. - -
    3. -To the best of my knowledge and belief, the changes and enhancements that -I have contributed to SQLite are either originally written by me -or are derived from prior works which I have verified are also -in the public domain and are not subject to claims of copyright -by other parties. -

    4. - -
    5. -To the best of my knowledge and belief, no individual, business, organization, -government, or other entity has any copyright interest -in the SQLite software as it existed on the -SQLite website as of the date on the signature line below. -

    6. - -
    7. -I agree never to publish any additional information -to the SQLite website (by CVS, email, scp, FTP, or any other means) unless -that information is an original work of authorship by me or is derived from -prior published versions of SQLite. -I agree never to copy and paste code into the SQLite code base from -other sources. -I agree never to publish on the SQLite website any information that -would violate a law or breach a contract. -

    8. -
    - -

    - - - - - -
    -Signature: -

     

    -

     

    -

     

    -
    -Date: -
    -Name (printed): -
    - - DELETED copyright-release.pdf Index: copyright-release.pdf ================================================================== --- copyright-release.pdf +++ /dev/null cannot compute difference between binary files DELETED copyright.tcl Index: copyright.tcl ================================================================== --- copyright.tcl +++ /dev/null @@ -1,126 +0,0 @@ -set rcsid {$Id: copyright.tcl,v 1.7 2007/05/06 21:20:43 drh Exp $} -source common.tcl -header {SQLite Copyright} -puts { -

    SQLite Copyright

    - - - -
    -
    -SQLite is in the
    -Public Domain -
    - -

    -All of the deliverable code in SQLite has been dedicated to the -public domain -by the authors. -All code authors, and representatives of the companies they work for, -have signed affidavits dedicating their contributions to -the public domain and originals of -those signed affidavits are stored in a firesafe at the main offices -of Hwaci. -Anyone is free to copy, modify, publish, use, compile, sell, or distribute -the original SQLite code, either in source code form or as a compiled binary, -for any purpose, commercial or non-commercial, and by any means. -

    - -

    -The previous paragraph applies to the deliverable code in SQLite - -those parts of the SQLite library that you actually bundle and -ship with a larger application. Portions of the documentation and -some code used as part of the build process might fall under -other licenses. The details here are unclear. We do not worry -about the licensing of the documentation and build code so much -because none of these things are part of the core deliverable -SQLite library. -

    - -

    -All of the deliverable code in SQLite has been written from scratch. -No code has been taken from other projects or from the open -internet. Every line of code can be traced back to its original -author, and all of those authors have public domain dedications -on file. So the SQLite code base is clean and is -uncontaminated with licensed code from other projects. -

    - -

    Obtaining An Explicit License To Use SQLite

    - -

    -Even though SQLite is in the public domain and does not require -a license, some users want to obtain a license anyway. Some reasons -for obtaining a license include: -

    - -
      -
    • You are using SQLite in a jurisdiction that does not recognize - the public domain.
    • -
    • You are using SQLite in a jurisdiction that does not recognize - the right of an author to dedicate their work to the public - domain.
    • -
    • You want to hold a tangible legal document - as evidence that you have the legal right to use and distribute - SQLite.
    • -
    • Your legal department tells you that you have to purchase a license. -
    • -
    - -

    -If you feel like you really have to purchase a license for SQLite, -Hwaci, the company that employs -the architect and principal developers of SQLite, will sell you -one. -Please contact: -

    - -
    -D. Richard Hipp
    -Hwaci - Applied Software Research
    -704.948.4565
    -drh@hwaci.com -
    - -

    Contributed Code

    - -

    -In order to keep SQLite completely free and unencumbered by copyright, -all new contributors to the SQLite code base are asked to dedicate -their contributions to the public domain. -If you want to send a patch or enhancement for possible inclusion in the -SQLite source tree, please accompany the patch with the following statement: -

    - -
    -The author or authors of this code dedicate any and all copyright interest -in this code to the public domain. We make this dedication for the benefit -of the public at large and to the detriment of our heirs and successors. -We intend this dedication to be an overt act of relinquishment in -perpetuity of all present and future rights to this code under copyright law. -
    - -

    -We are not able to accept patches or changes to -SQLite that are not accompanied by a statement such as the above. -In addition, if you make -changes or enhancements as an employee, then a simple statement such as the -above is insufficient. You must also send by surface mail a copyright release -signed by a company officer. -A signed original of the copyright release should be mailed to:

    - -
    -Hwaci
    -6200 Maple Cove Lane
    -Charlotte, NC 28269
    -USA -
    - -

    -A template copyright release is available -in PDF or -HTML. -You can use this release to make future changes. -

    -} -footer $rcsid DELETED datatype3.tcl Index: datatype3.tcl ================================================================== --- datatype3.tcl +++ /dev/null @@ -1,440 +0,0 @@ -set rcsid {$Id: datatype3.tcl,v 1.17 2007/06/20 16:13:23 drh Exp $} -source common.tcl -header {Datatypes In SQLite Version 3} -puts { -

    Datatypes In SQLite Version 3

    - -

    1. Storage Classes

    - -

    Version 2 of SQLite stores all column values as ASCII text. -Version 3 enhances this by providing the ability to store integer and -real numbers in a more compact format and the capability to store -BLOB data.

    - -

    Each value stored in an SQLite database (or manipulated by the -database engine) has one of the following storage classes:

    -
      -
    • NULL. The value is a NULL value.

      -
    • INTEGER. The value is a signed integer, stored in 1, - 2, 3, 4, 6, or 8 bytes depending on the magnitude of the value.

      -
    • REAL. The value is a floating point value, stored as - an 8-byte IEEE floating point number.

      -
    • TEXT. The value is a text string, stored using the - database encoding (UTF-8, UTF-16BE or UTF-16-LE).

      -
    • BLOB. The value is a blob of data, stored exactly as - it was input.

      -
    - -

    As in SQLite version 2, any column in a version 3 database except an INTEGER -PRIMARY KEY may be used to store any type of value. The exception to -this rule is described below under 'Strict Affinity Mode'.

    - -

    All values supplied to SQLite, whether as literals embedded in SQL -statements or values bound to pre-compiled SQL statements -are assigned a storage class before the SQL statement is executed. -Under circumstances described below, the -database engine may convert values between numeric storage classes -(INTEGER and REAL) and TEXT during query execution. -

    - -

    Storage classes are initially assigned as follows:

    -
      -
    • Values specified as literals as part of SQL statements are - assigned storage class TEXT if they are enclosed by single or double - quotes, INTEGER if the literal is specified as an unquoted number - with no decimal point or exponent, REAL if the literal is an - unquoted number with a decimal point or exponent and NULL if the - value is a NULL. Literals with storage class BLOB are specified - using the X'ABCD' notation.

      -
    • Values supplied using the sqlite3_bind_* APIs are assigned - the storage class that most closely matches the native type bound - (i.e. sqlite3_bind_blob() binds a value with storage class BLOB).

      -
    -

    The storage class of a value that is the result of an SQL scalar -operator depends on the outermost operator of the expression. -User-defined functions may return values with any storage class. It -is not generally possible to determine the storage class of the -result of an expression at compile time.

    - - -

    2. Column Affinity

    - -

    -In SQLite version 3, the type of a value is associated with the value -itself, not with the column or variable in which the value is stored. -(This is sometimes called - -manifest typing.) -All other SQL databases engines that we are aware of use the more -restrictive system of static typing where the type is associated with -the container, not the value. -

    - -

    -In order to maximize compatibility between SQLite and other database -engines, SQLite support the concept of "type affinity" on columns. -The type affinity of a column is the recommended type for data stored -in that column. The key here is that the type is recommended, not -required. Any column can still store any type of data, in theory. -It is just that some columns, given the choice, will prefer to use -one storage class over another. The preferred storage class for -a column is called its "affinity". -

    - -

    Each column in an SQLite 3 database is assigned one of the -following type affinities:

    -
      -
    • TEXT
    • -
    • NUMERIC
    • -
    • INTEGER
    • -
    • REAL
    • -
    • NONE
    • -
    - -

    A column with TEXT affinity stores all data using storage classes -NULL, TEXT or BLOB. If numerical data is inserted into a column with -TEXT affinity it is converted to text form before being stored.

    - -

    A column with NUMERIC affinity may contain values using all five -storage classes. When text data is inserted into a NUMERIC column, an -attempt is made to convert it to an integer or real number before it -is stored. If the conversion is successful, then the value is stored -using the INTEGER or REAL storage class. If the conversion cannot be -performed the value is stored using the TEXT storage class. No -attempt is made to convert NULL or blob values.

    - -

    A column that uses INTEGER affinity behaves in the same way as a -column with NUMERIC affinity, except that if a real value with no -floating point component (or text value that converts to such) is -inserted it is converted to an integer and stored using the INTEGER -storage class.

    - -

    A column with REAL affinity behaves like a column with NUMERIC -affinity except that it forces integer values into floating point -representation. (As an optimization, integer values are stored on -disk as integers in order to take up less space and are only converted -to floating point as the value is read out of the table.)

    - -

    A column with affinity NONE does not prefer one storage class over -another. It makes no attempt to coerce data before -it is inserted.

    - -

    2.1 Determination Of Column Affinity

    - -

    The type affinity of a column is determined by the declared type -of the column, according to the following rules:

    -
      -
    1. If the datatype contains the string "INT" then it - is assigned INTEGER affinity.

      - -
    2. If the datatype of the column contains any of the strings - "CHAR", "CLOB", or "TEXT" then that - column has TEXT affinity. Notice that the type VARCHAR contains the - string "CHAR" and is thus assigned TEXT affinity.

      - -
    3. If the datatype for a column - contains the string "BLOB" or if - no datatype is specified then the column has affinity NONE.

      - -
    4. If the datatype for a column - contains any of the strings "REAL", "FLOA", - or "DOUB" then the column has REAL affinity

      - -
    5. Otherwise, the affinity is NUMERIC.

      -
    - -

    If a table is created using a "CREATE TABLE <table> AS -SELECT..." statement, then all columns have no datatype specified -and they are given no affinity.

    - -

    2.2 Column Affinity Example

    - -
    -
    CREATE TABLE t1(
    -    t  TEXT,
    -    nu NUMERIC, 
    -    i  INTEGER,
    -    no BLOB
    -);
    -
    --- Storage classes for the following row:
    --- TEXT, REAL, INTEGER, TEXT
    -INSERT INTO t1 VALUES('500.0', '500.0', '500.0', '500.0');
    -
    --- Storage classes for the following row:
    --- TEXT, REAL, INTEGER, REAL
    -INSERT INTO t1 VALUES(500.0, 500.0, 500.0, 500.0);
    -
    -
    - - -

    3. Comparison Expressions

    - -

    Like SQLite version 2, version 3 -features the binary comparison operators '=', -'<', '<=', '>=' and '!=', an operation to test for set -membership, 'IN', and the ternary comparison operator 'BETWEEN'.

    -

    The results of a comparison depend on the storage classes of the -two values being compared, according to the following rules:

    -
      -
    • A value with storage class NULL is considered less than any - other value (including another value with storage class NULL).

      - -
    • An INTEGER or REAL value is less than any TEXT or BLOB value. - When an INTEGER or REAL is compared to another INTEGER or REAL, a - numerical comparison is performed.

      - -
    • A TEXT value is less than a BLOB value. When two TEXT values - are compared, the C library function memcmp() is usually used to - determine the result. However this can be overridden, as described - under 'User-defined collation Sequences' below.

      - -
    • When two BLOB values are compared, the result is always - determined using memcmp().

      -
    - -

    SQLite may attempt to convert values between the numeric storage -classes (INTEGER and REAL) and TEXT before performing a comparison. -For binary comparisons, this is done in the cases enumerated below. -The term "expression" used in the bullet points below means any -SQL scalar expression or literal other than a column value. Note that -if X and Y.Z are a column names, then +X and +Y.Z are considered -expressions.

    -
      -
    • When a column value is compared to the result of an - expression, the affinity of the column is applied to the result of - the expression before the comparison takes place.

      - -
    • When two column values are compared, if one column has - INTEGER or REAL or NUMERIC affinity and the other does not, - then NUMERIC affinity is applied to any values with storage - class TEXT extracted from the non-NUMERIC column.

      - -
    • When the results of two expressions are compared, no - conversions occur. The results are compared as is. If a string - is compared to a number, the number will always be less than the - string.

      -
    - -

    -In SQLite, the expression "a BETWEEN b AND c" is equivalent to "a >= b -AND a <= c", even if this means that different affinities are applied to -'a' in each of the comparisons required to evaluate the expression. -

    - -

    Expressions of the type "a IN (SELECT b ....)" are handled by the three -rules enumerated above for binary comparisons (e.g. in a -similar manner to "a = b"). For example if 'b' is a column value -and 'a' is an expression, then the affinity of 'b' is applied to 'a' -before any comparisons take place.

    - -

    SQLite treats the expression "a IN (x, y, z)" as equivalent to "a = +x OR -a = +y OR a = +z". The values to the right of the IN operator (the "x", "y", -and "z" values in this example) are considered to be expressions, even if they -happen to be column values. If the value of the left of the IN operator is -a column, then the affinity of that column is used. If the value is an -expression then no conversions occur. -

    - -

    3.1 Comparison Example

    - -
    -
    -CREATE TABLE t1(
    -    a TEXT,
    -    b NUMERIC,
    -    c BLOB
    -);
    -
    --- Storage classes for the following row:
    --- TEXT, REAL, TEXT
    -INSERT INTO t1 VALUES('500', '500', '500');
    -
    --- 60 and 40 are converted to '60' and '40' and values are compared as TEXT.
    -SELECT a < 60, a < 40 FROM t1;
    -1|0
    -
    --- Comparisons are numeric. No conversions are required.
    -SELECT b < 60, b < 600 FROM t1;
    -0|1
    -
    --- Both 60 and 600 (storage class NUMERIC) are less than '500'
    --- (storage class TEXT).
    -SELECT c < 60, c < 600 FROM t1;
    -0|0
    -
    -
    -

    4. Operators

    - -

    All mathematical operators (which is to say, all operators other -than the concatenation operator "||") apply NUMERIC -affinity to all operands prior to being carried out. If one or both -operands cannot be converted to NUMERIC then the result of the -operation is NULL.

    - -

    For the concatenation operator, TEXT affinity is applied to both -operands. If either operand cannot be converted to TEXT (because it -is NULL or a BLOB) then the result of the concatenation is NULL.

    - -

    5. Sorting, Grouping and Compound SELECTs

    - -

    When values are sorted by an ORDER by clause, values with storage -class NULL come first, followed by INTEGER and REAL values -interspersed in numeric order, followed by TEXT values usually in -memcmp() order, and finally BLOB values in memcmp() order. No storage -class conversions occur before the sort.

    - -

    When grouping values with the GROUP BY clause values with -different storage classes are considered distinct, except for INTEGER -and REAL values which are considered equal if they are numerically -equal. No affinities are applied to any values as the result of a -GROUP by clause.

    - -

    The compound SELECT operators UNION, -INTERSECT and EXCEPT perform implicit comparisons between values. -Before these comparisons are performed an affinity may be applied to -each value. The same affinity, if any, is applied to all values that -may be returned in a single column of the compound SELECT result set. -The affinity applied is the affinity of the column returned by the -left most component SELECTs that has a column value (and not some -other kind of expression) in that position. If for a given compound -SELECT column none of the component SELECTs return a column value, no -affinity is applied to the values from that column before they are -compared.

    - -

    6. Other Affinity Modes

    - -

    The above sections describe the operation of the database engine -in 'normal' affinity mode. SQLite version 3 will feature two other affinity -modes, as follows:

    -
      -
    • Strict affinity mode. In this mode if a conversion - between storage classes is ever required, the database engine - returns an error and the current statement is rolled back.

      - -
    • No affinity mode. In this mode no conversions between - storage classes are ever performed. Comparisons between values of - different storage classes (except for INTEGER and REAL) are always - false.

      -
    - -
    -

    7. User-defined Collation Sequences

    - -

    -By default, when SQLite compares two text values, the result of the -comparison is determined using memcmp(), regardless of the encoding of the -string. SQLite v3 provides the ability for users to supply arbitrary -comparison functions, known as user-defined collation sequences, to be used -instead of memcmp(). -

    -

    -Aside from the default collation sequence BINARY, implemented using -memcmp(), SQLite features one extra built-in collation sequences -intended for testing purposes, the NOCASE collation: -

    -
      -
    • BINARY - Compares string data using memcmp(), regardless - of text encoding.
    • -
    • NOCASE - The same as binary, except the 26 upper case - characters used by the English language are - folded to their lower case equivalents before - the comparison is performed.
    - - -

    7.1 Assigning Collation Sequences from SQL

    - -

    -Each column of each table has a default collation type. If a collation type -other than BINARY is required, a COLLATE clause is specified as part of the -column definition to define it. -

    - -

    -Whenever two text values are compared by SQLite, a collation sequence is -used to determine the results of the comparison according to the following -rules. Sections 3 and 5 of this document describe the circumstances under -which such a comparison takes place. -

    - -

    -For binary comparison operators (=, <, >, <= and >=) if either operand is a -column, then the default collation type of the column determines the -collation sequence to use for the comparison. If both operands are columns, -then the collation type for the left operand determines the collation -sequence used. If neither operand is a column, then the BINARY collation -sequence is used. For the purposes of this paragraph, a column name -preceded by one or more unary "+" operators is considered a column name. -

    - -

    -The expression "x BETWEEN y and z" is equivalent to "x >= y AND x <= -z". The expression "x IN (SELECT y ...)" is handled in the same way as the -expression "x = y" for the purposes of determining the collation sequence -to use. The collation sequence used for expressions of the form "x IN (y, z -...)" is the default collation type of x if x is a column, or BINARY -otherwise. -

    - -

    -An ORDER BY clause that is part of a SELECT -statement may be assigned a collation sequence to be used for the sort -operation explicitly. In this case the explicit collation sequence is -always used. Otherwise, if the expression sorted by an ORDER BY clause is -a column, then the default collation type of the column is used to -determine sort order. If the expression is not a column, then the BINARY -collation sequence is used. -

    - -

    7.2 Collation Sequences Example

    -

    -The examples below identify the collation sequences that would be used to -determine the results of text comparisons that may be performed by various -SQL statements. Note that a text comparison may not be required, and no -collation sequence used, in the case of numeric, blob or NULL values. -

    -
    -
    -CREATE TABLE t1(
    -    a,                 -- default collation type BINARY
    -    b COLLATE BINARY,  -- default collation type BINARY
    -    c COLLATE REVERSE, -- default collation type REVERSE
    -    d COLLATE NOCASE   -- default collation type NOCASE
    -);
    -
    --- Text comparison is performed using the BINARY collation sequence.
    -SELECT (a = b) FROM t1;
    -
    --- Text comparison is performed using the NOCASE collation sequence.
    -SELECT (d = a) FROM t1;
    -
    --- Text comparison is performed using the BINARY collation sequence.
    -SELECT (a = d) FROM t1;
    -
    --- Text comparison is performed using the REVERSE collation sequence.
    -SELECT ('abc' = c) FROM t1;
    -
    --- Text comparison is performed using the REVERSE collation sequence.
    -SELECT (c = 'abc') FROM t1;
    -
    --- Grouping is performed using the NOCASE collation sequence (i.e. values
    --- 'abc' and 'ABC' are placed in the same group).
    -SELECT count(*) GROUP BY d FROM t1;
    -
    --- Grouping is performed using the BINARY collation sequence.
    -SELECT count(*) GROUP BY (d || '') FROM t1;
    -
    --- Sorting is performed using the REVERSE collation sequence.
    -SELECT * FROM t1 ORDER BY c;
    -
    --- Sorting is performed using the BINARY collation sequence.
    -SELECT * FROM t1 ORDER BY (c || '');
    -
    --- Sorting is performed using the NOCASE collation sequence.
    -SELECT * FROM t1 ORDER BY c COLLATE NOCASE;
    -
    -
    -
    - -} -footer $rcsid DELETED datatypes.tcl Index: datatypes.tcl ================================================================== --- datatypes.tcl +++ /dev/null @@ -1,243 +0,0 @@ -# -# Run this script to generated a datatypes.html output file -# -set rcsid {$Id: datatypes.tcl,v 1.8 2004/10/10 17:24:55 drh Exp $} -source common.tcl -header {Datatypes In SQLite version 2} -puts { -

    Datatypes In SQLite Version 2

    - -

    1.0   Typelessness

    -

    -SQLite is "typeless". This means that you can store any -kind of data you want in any column of any table, regardless of the -declared datatype of that column. -(See the one exception to this rule in section 2.0 below.) -This behavior is a feature, not -a bug. A database is suppose to store and retrieve data and it -should not matter to the database what format that data is in. -The strong typing system found in most other SQL engines and -codified in the SQL language spec is a misfeature - -it is an example of the implementation showing through into the -interface. SQLite seeks to overcome this misfeature by allowing -you to store any kind of data into any kind of column and by -allowing flexibility in the specification of datatypes. -

    - -

    -A datatype to SQLite is any sequence of zero or more names -optionally followed by a parenthesized lists of one or two -signed integers. Notice in particular that a datatype may -be zero or more names. That means that an empty -string is a valid datatype as far as SQLite is concerned. -So you can declare tables where the datatype of each column -is left unspecified, like this: -

    - -
    -CREATE TABLE ex1(a,b,c);
    -
    - -

    -Even though SQLite allows the datatype to be omitted, it is -still a good idea to include it in your CREATE TABLE statements, -since the data type often serves as a good hint to other -programmers about what you intend to put in the column. And -if you ever port your code to another database engine, that -other engine will probably require a datatype of some kind. -SQLite accepts all the usual datatypes. For example: -

    - -
    -CREATE TABLE ex2(
    -  a VARCHAR(10),
    -  b NVARCHAR(15),
    -  c TEXT,
    -  d INTEGER,
    -  e FLOAT,
    -  f BOOLEAN,
    -  g CLOB,
    -  h BLOB,
    -  i TIMESTAMP,
    -  j NUMERIC(10,5)
    -  k VARYING CHARACTER (24),
    -  l NATIONAL VARYING CHARACTER(16)
    -);
    -
    - -

    -And so forth. Basically any sequence of names optionally followed by -one or two signed integers in parentheses will do. -

    - -

    2.0   The INTEGER PRIMARY KEY

    - -

    -One exception to the typelessness of SQLite is a column whose type -is INTEGER PRIMARY KEY. (And you must use "INTEGER" not "INT". -A column of type INT PRIMARY KEY is typeless just like any other.) -INTEGER PRIMARY KEY columns must contain a 32-bit signed integer. Any -attempt to insert non-integer data will result in an error. -

    - -

    -INTEGER PRIMARY KEY columns can be used to implement the equivalent -of AUTOINCREMENT. If you try to insert a NULL into an INTEGER PRIMARY -KEY column, the column will actually be filled with a integer that is -one greater than the largest key already in the table. Or if the -largest key is 2147483647, then the column will be filled with a -random integer. Either way, the INTEGER PRIMARY KEY column will be -assigned a unique integer. You can retrieve this integer using -the sqlite_last_insert_rowid() API function or using the -last_insert_rowid() SQL function in a subsequent SELECT statement. -

    - -

    3.0   Comparison and Sort Order

    - -

    -SQLite is typeless for the purpose of deciding what data is allowed -to be stored in a column. But some notion of type comes into play -when sorting and comparing data. For these purposes, a column or -an expression can be one of two types: numeric and text. -The sort or comparison may give different results depending on which -type of data is being sorted or compared. -

    - -

    -If data is of type text then the comparison is determined by -the standard C data comparison functions memcmp() or -strcmp(). The comparison looks at bytes from two inputs one -by one and returns the first non-zero difference. -Strings are '\000' terminated so shorter -strings sort before longer strings, as you would expect. -

    - -

    -For numeric data, this situation is more complex. If both inputs -look like well-formed numbers, then they are converted -into floating point values using atof() and compared numerically. -If one input is not a well-formed number but the other is, then the -number is considered to be less than the non-number. If neither inputs -is a well-formed number, then strcmp() is used to do the -comparison. -

    - -

    -Do not be confused by the fact that a column might have a "numeric" -datatype. This does not mean that the column can contain only numbers. -It merely means that if the column does contain a number, that number -will sort in numerical order. -

    - -

    -For both text and numeric values, NULL sorts before any other value. -A comparison of any value against NULL using operators like "<" or -">=" is always false. -

    - -

    4.0   How SQLite Determines Datatypes

    - -

    -For SQLite version 2.6.3 and earlier, all values used the numeric datatype. -The text datatype appears in version 2.7.0 and later. In the sequel it -is assumed that you are using version 2.7.0 or later of SQLite. -

    - -

    -For an expression, the datatype of the result is often determined by -the outermost operator. For example, arithmetic operators ("+", "*", "%") -always return a numeric results. The string concatenation operator -("||") returns a text result. And so forth. If you are ever in doubt -about the datatype of an expression you can use the special typeof() -SQL function to determine what the datatype is. For example: -

    - -
    -sqlite> SELECT typeof('abc'+123);
    -numeric
    -sqlite> SELECT typeof('abc'||123);
    -text
    -
    - -

    -For table columns, the datatype is determined by the type declaration -of the CREATE TABLE statement. The datatype is text if and only if -the type declaration contains one or more of the following strings: -

    - -
    -BLOB
    -CHAR
    -CLOB
    -TEXT -
    - -

    -The search for these strings in the type declaration is case insensitive, -of course. If any of the above strings occur anywhere in the type -declaration, then the datatype of the column is text. Notice that -the type "VARCHAR" contains "CHAR" as a substring so it is considered -text.

    - -

    If none of the strings above occur anywhere in the type declaration, -then the datatype is numeric. Note in particular that the datatype for columns -with an empty type declaration is numeric. -

    - -

    5.0   Examples

    - -

    -Consider the following two command sequences: -

    - -
    -CREATE TABLE t1(a INTEGER UNIQUE);        CREATE TABLE t2(b TEXT UNIQUE);
    -INSERT INTO t1 VALUES('0');               INSERT INTO t2 VALUES(0);
    -INSERT INTO t1 VALUES('0.0');             INSERT INTO t2 VALUES(0.0);
    -
    - -

    In the sequence on the left, the second insert will fail. In this case, -the strings '0' and '0.0' are treated as numbers since they are being -inserted into a numeric column but 0==0.0 which violates the uniqueness -constraint. However, the second insert in the right-hand sequence works. In -this case, the constants 0 and 0.0 are treated a strings which means that -they are distinct.

    - -

    SQLite always converts numbers into double-precision (64-bit) floats -for comparison purposes. This means that a long sequence of digits that -differ only in insignificant digits will compare equal if they -are in a numeric column but will compare unequal if they are in a text -column. We have:

    - -
    -INSERT INTO t1                            INSERT INTO t2
    -   VALUES('12345678901234567890');           VALUES(12345678901234567890);
    -INSERT INTO t1                            INSERT INTO t2
    -   VALUES('12345678901234567891');           VALUES(12345678901234567891);
    -
    - -

    As before, the second insert on the left will fail because the comparison -will convert both strings into floating-point number first and the only -difference in the strings is in the 20-th digit which exceeds the resolution -of a 64-bit float. In contrast, the second insert on the right will work -because in that case, the numbers being inserted are strings and are -compared using memcmp().

    - -

    -Numeric and text types make a difference for the DISTINCT keyword too: -

    - -
    -CREATE TABLE t3(a INTEGER);               CREATE TABLE t4(b TEXT);
    -INSERT INTO t3 VALUES('0');               INSERT INTO t4 VALUES(0);
    -INSERT INTO t3 VALUES('0.0');             INSERT INTO t4 VALUES(0.0);
    -SELECT DISTINCT * FROM t3;                SELECT DISTINCT * FROM t4;
    -
    - -

    -The SELECT statement on the left returns a single row since '0' and '0.0' -are treated as numbers and are therefore indistinct. But the SELECT -statement on the right returns two rows since 0 and 0.0 are treated -a strings which are different.

    -} -footer $rcsid DELETED different.tcl Index: different.tcl ================================================================== --- different.tcl +++ /dev/null @@ -1,224 +0,0 @@ -set rcsid {$Id: different.tcl,v 1.8 2006/12/18 14:12:21 drh Exp $} -source common.tcl -header {Distinctive Features Of SQLite} -puts { -

    -This page highlights some of the characteristics of SQLite that are -unusual and which make SQLite different from many other SQL -database engines. -

    -} -proc feature {tag name text} { - puts "" - puts "

    $name

    \n" - puts "
    $text
    \n" -} - -feature zeroconfig {Zero-Configuration} { - SQLite does not need to be "installed" before it is used. - There is no "setup" procedure. There is no - server process that needs to be started, stopped, or configured. - There is - no need for an administrator to create a new database instance or assign - access permissions to users. - SQLite uses no configuration files. - Nothing needs to be done to tell the system that SQLite is running. - No actions are required to recover after a system crash or power failure. - There is nothing to troubleshoot. -

    - SQLite just works. -

    - Other more familiar database engines run great once you get them going. - But doing the initial installation and configuration can be - intimidatingly complex. -} - -feature serverless {Serverless} { - Most SQL database engines are implemented as a separate server - process. Programs that want to access the database communicate - with the server using some kind of interprocess communcation - (typically TCP/IP) to send requests to the server and to receive - back results. SQLite does not work this way. With SQLite, the - process that wants to access the database reads and writes - directly from the database files on disk. There is no intermediary - server process. -

    - There are advantages and disadvantages to being serverless. The - main advantage is that there is no separate server process - to install, setup, configure, initialize, manage, and troubleshoot. - This is one reason why SQLite is a "zero-configuration" database - engine. Programs that use SQLite require no administrative support - for setting up the database engine before they are run. Any program - that is able to access the disk is able to use an SQLite database. -

    - On the other hand, a database engine that uses a server can provide - better protection from bugs in the client application - stray pointers - in a client cannot corrupt memory on the server. And because a server - is a single persistent process, it is able control database access with - more precision, allowing for finer grain locking and better concurrancy. -

    - Most SQL database engines are client/server based. Of those that are - serverless, SQLite is the only one that this author knows of that - allows multiple applications to access the same database at the same time. -} - -feature onefile {Single Database File} { - An SQLite database is a single ordinary disk file that can be located - anywhere in the directory hierarchy. If SQLite can read - the disk file then it can read anything in the database. If the disk - file and its directory are writable, then SQLite can change anything - in the database. Database files can easily be copied onto a USB - memory stick or emailed for sharing. -

    - Other SQL database engines tend to store data as a large collection of - files. Often these files are in a standard location that only the - database engine itself can access. This makes the data more secure, - but also makes it harder to access. Some SQL database engines provide - the option of writing directly to disk and bypassing the filesystem - all together. This provides added performance, but at the cost of - considerable setup and maintenance complexity. -} - -feature small {Compact} { - When optimized for size, the whole SQLite library with everything enabled - is less than 225KiB in size (as measured on an ix86 using the "size" - utility from the GNU compiler suite.) Unneeded features can be disabled - at compile-time to further reduce the size of the library to under - 170KiB if desired. -

    - Most other SQL database engines are much larger than this. IBM boasts - that it's recently released CloudScape database engine is "only" a 2MiB - jar file - 10 times larger than SQLite even after it is compressed! - Firebird boasts that it's client-side library is only 350KiB. That's - 50% larger than SQLite and does not even contain the database engine. - The Berkeley DB library from Sleepycat is 450KiB and it omits SQL - support, providing the programmer with only simple key/value pairs. -} - -feature typing {Manifest typing} { - Most SQL database engines use static typing. A datatype is associated - with each column in a table and only values of that particular datatype - are allowed to be stored in that column. SQLite relaxes this restriction - by using manifest typing. - In manifest typing, the datatype is a property of the value itself, not - of the column in which the value is stored. - SQLite thus allows the user to store - any value of any datatype into any column regardless of the declared type - of that column. (There are some exceptions to this rule: An INTEGER - PRIMARY KEY column may only store integers. And SQLite attempts to coerce - values into the declared datatype of the column when it can.) -

    - As far as we can tell, the SQL language specification allows the use - of manifest typing. Nevertheless, most other SQL database engines are - statically typed and so some people - feel that the use of manifest typing is a bug in SQLite. But the authors - of SQLite feel very strongly that this is a feature. The use of manifest - typing in SQLite is a deliberate design decision which has proven in practice - to make SQLite more reliable and easier to use, especially when used in - combination with dynamically typed programming languages such as Tcl and - Python. -} - -feature flex {Variable-length records} { - Most other SQL database engines allocated a fixed amount of disk space - for each row in most tables. They play special tricks for handling - BLOBs and CLOBs which can be of wildly varying length. But for most - tables, if you declare a column to be a VARCHAR(100) then the database - engine will allocate - 100 bytes of disk space regardless of how much information you actually - store in that column. -

    - SQLite, in contrast, use only the amount of disk space actually - needed to store the information in a row. If you store a single - character in a VARCHAR(100) column, then only a single byte of disk - space is consumed. (Actually two bytes - there is some overhead at - the beginning of each column to record its datatype and length.) -

    - The use of variable-length records by SQLite has a number of advantages. - It results in smaller database files, obviously. It also makes the - database run faster, since there is less information to move to and from - disk. And, the use of variable-length records makes it possible for - SQLite to employ manifest typing instead of static typing. -} - -feature readable {Readable source code} { - The source code to SQLite is designed to be readable and accessible to - the average programmer. All procedures and data structures and many - automatic variables are carefully commented with useful information about - what they do. Boilerplate commenting is omitted. -} - -feature vdbe {SQL statements compile into virtual machine code} { - Every SQL database engine compiles each SQL statement into some kind of - internal data structure which is then used to carry out the work of the - statement. But in most SQL engines that internal data structure is a - complex web of interlinked structures and objects. In SQLite, the compiled - form of statements is a short program in a machine-language like - representation. Users of the database can view this - virtual machine language - by prepending the EXPLAIN keyword - to a query. -

    - The use of a virtual machine in SQLite has been a great benefit to - library's development. The virtual machine provides a crisp, well-defined - junction between the front-end of SQLite (the part that parses SQL - statements and generates virtual machine code) and the back-end (the - part that executes the virtual machine code and computes a result.) - The virtual machine allows the developers to see clearly and in an - easily readable form what SQLite is trying to do with each statement - it compiles, which is a tremendous help in debugging. - Depending on how it is compiled, SQLite also has the capability of - tracing the execution of the virtual machine - printing each - virtual machine instruction and its result as it executes. -} - -#feature binding {Tight bindings to dynamic languages} { -# Because it is embedded, SQLite can have a much tighter and more natural -# binding to high-level dynamic languages such as Tcl, Perl, Python, -# PHP, and Ruby. -# For example, -#} - -feature license {Public domain} { - The source code for SQLite is in the public domain. No claim of copyright - is made on any part of the core source code. (The documentation and test - code is a different matter - some sections of documentation and test logic - are governed by open-sources licenses.) All contributors to the - SQLite core software have signed affidavits specifically disavowing any - copyright interest in the code. This means that anybody is able to legally - do anything they want with the SQLite source code. -

    - There are other SQL database engines with liberal licenses that allow - the code to be broadly and freely used. But those other engines are - still governed by copyright law. SQLite is different in that copyright - law simply does not apply. -

    - The source code files for other SQL database engines typically begin - with a comment describing your license rights to view and copy that file. - The SQLite source code contains no license since it is not governed by - copyright. Instead of a license, the SQLite source code offers a blessing: -

    - May you do good and not evil
    - May you find forgiveness for yourself and forgive others
    - May you share freely, never taking more than you give.
    -
    -} - -feature extensions {SQL language extensions} { - SQLite provides a number of enhancements to the SQL language - not normally found in other database engines. - The EXPLAIN keyword and manifest typing have already been mentioned - above. SQLite also provides statements such as - REPLACE and the - ON CONFLICT clause that allow for - added control over the resolution of constraint conflicts. - SQLite supports ATTACH and - DETACH commands that allow multiple - independent databases to be used together in the same query. - And SQLite defines APIs that allows the user to add new - SQL functions - and collating sequences. -} - - -footer $rcsid DELETED direct1b.gif Index: direct1b.gif ================================================================== --- direct1b.gif +++ /dev/null cannot compute difference between binary files DELETED docs.tcl Index: docs.tcl ================================================================== --- docs.tcl +++ /dev/null @@ -1,159 +0,0 @@ -# This script generates the "docs.html" page that describes various -# sources of documentation available for SQLite. -# -set rcsid {$Id: docs.tcl,v 1.15 2007/10/04 00:29:29 drh Exp $} -source common.tcl -header {SQLite Documentation} -puts { -

    Available Documentation

    - -} - -proc doc {name url desc} { - puts {" - puts {} - puts {} -} - -doc {Appropriate Uses For SQLite} {whentouse.html} { - This document describes situations where SQLite is an approriate - database engine to use versus situations where a client/server - database engine might be a better choice. -} - -doc {Distinctive Features} {different.html} { - This document enumerates and describes some of the features of - SQLite that make it different from other SQL database engines. -} - -doc {SQLite In 5 Minutes Or Less} {quickstart.html} { - A very quick introduction to programming with SQLite. -} - -doc {SQL Syntax} {lang.html} { - This document describes the SQL language that is understood by - SQLite. -} -doc {Version 3 C/C++ API
    Reference} {capi3ref.html} { - This document describes each API function separately. -} -doc {Sharing Cache Mode} {sharedcache.html} { - Version 3.3.0 and later supports the ability for two or more - database connections to share the same page and schema cache. - This feature is useful for certain specialized applications. -} -doc {Tcl API} {tclsqlite.html} { - A description of the TCL interface bindings for SQLite. -} - -doc {How SQLite Implements Atomic Commit} {ac/atomiccommit.html} { - A description of the logic within SQLite that implements - transactions with atomic commit, even in the face of power - failures. -} -doc {Moving From SQLite 3.4 to 3.5} {34to35.html} { - A document describing the differences between SQLite version 3.4.2 - and 3.5.0. -} - -doc {Pragma commands} {pragma.html} { - This document describes SQLite performance tuning options and other - special purpose database commands. -} -doc {SQLite Version 3} {version3.html} { - A summary of of the changes between SQLite version 2.8 and SQLite version 3.0. -} -doc {Version 3 C/C++ API} {capi3.html} { - A description of the C/C++ interface bindings for SQLite version 3.0.0 - and following. -} -doc {Version 3 DataTypes } {datatype3.html} { - SQLite version 3 introduces the concept of manifest typing, where the - type of a value is associated with the value itself, not the column that - it is stored in. - This page describes data typing for SQLite version 3 in further detail. -} - -doc {Locking And Concurrency
    In SQLite Version 3} {lockingv3.html} { - A description of how the new locking code in version 3 increases - concurrancy and decreases the problem of writer starvation. -} - -doc {Overview Of The Optimizer} {optoverview.html} { - A quick overview of the various query optimizations that are - attempted by the SQLite code generator. -} - - -doc {Null Handling} {nulls.html} { - Different SQL database engines handle NULLs in different ways. The - SQL standards are ambiguous. This document describes how SQLite handles - NULLs in comparison with other SQL database engines. -} - -doc {Copyright} {copyright.html} { - SQLite is in the public domain. This document describes what that means - and the implications for contributors. -} - -doc {Unsupported SQL} {omitted.html} { - This page describes features of SQL that SQLite does not support. -} - -doc {Version 2 C/C++ API} {c_interface.html} { - A description of the C/C++ interface bindings for SQLite through version - 2.8 -} - - -doc {Version 2 DataTypes } {datatypes.html} { - A description of how SQLite version 2 handles SQL datatypes. - Short summary: Everything is a string. -} - -doc {Release History} {changes.html} { - A chronology of SQLite releases going back to version 1.0.0 -} - - -doc {Speed Comparison} {speed.html} { - The speed of version 2.7.6 of SQLite is compared against PostgreSQL and - MySQL. -} - -doc {Architecture} {arch.html} { - An architectural overview of the SQLite library, useful for those who want - to hack the code. -} - -doc {VDBE Tutorial} {vdbe.html} { - The VDBE is the subsystem within SQLite that does the actual work of - executing SQL statements. This page describes the principles of operation - for the VDBE in SQLite version 2.7. This is essential reading for anyone - who want to modify the SQLite sources. -} - -doc {VDBE Opcodes} {opcode.html} { - This document is an automatically generated description of the various - opcodes that the VDBE understands. Programmers can use this document as - a reference to better understand the output of EXPLAIN listings from - SQLite. -} - -doc {Compilation Options} {compile.html} { - This document describes the compile time options that may be set to - modify the default behaviour of the library or omit optional features - in order to reduce binary size. -} - -doc {Backwards Compatibility} {formatchng.html} { - This document details all of the incompatible changes to the SQLite - file format that have occurred since version 1.0.0. -} - -puts {
    } - regsub -all { +} $name {\ } name - puts "$name} - puts $desc - puts {
    } -footer $rcsid DELETED download.tcl Index: download.tcl ================================================================== --- download.tcl +++ /dev/null @@ -1,236 +0,0 @@ -# -# Run this TCL script to generate HTML for the download.html file. -# -set rcsid {$Id: download.tcl,v 1.27 2007/05/08 18:30:36 drh Exp $} -source common.tcl -header {SQLite Download Page} - -puts { -

    SQLite Download Page

    - -} - -proc Product {pattern desc} { - regsub {V[23]} $pattern {*} p3 - regsub V2 $pattern {(2[0-9a-z._]+)} pattern - regsub V3 $pattern {(3[0-9a-z._]+)} pattern - set p2 [string map {* .*} $pattern] - set flist [glob -nocomplain $p3] - foreach file [lsort -dict $flist] { - if {![regexp ^$p2\$ $file all version]} continue - regsub -all _ $version . version - set size [file size $file] - set units bytes - if {$size>1024*1024} { - set size [format %.2f [expr {$size/(1024.0*1024.0)}]] - set units MiB - } elseif {$size>1024} { - set size [format %.2f [expr {$size/(1024.0)}]] - set units KiB - } - puts "" - puts "" - puts "" - regsub -all VERSION $desc $version d2 - puts "" - } -} -cd doc - -proc Heading {title} { - puts "" -} - -Heading {Precompiled Binaries for Linux} - -Product sqlite3-V3.bin.gz { - A command-line program for accessing and modifying - SQLite version 3.* databases. - See the documentation for additional information. -} - -Product sqlite-V3.bin.gz { - A command-line program for accessing and modifying - SQLite databases. - See the documentation for additional information. -} - -Product tclsqlite-V3.so.gz { - Bindings for Tcl/Tk. - You can import this shared library into either - tclsh or wish to get SQLite database access from Tcl/Tk. - See the documentation for details. -} - -Product sqlite-V3.so.gz { - A precompiled shared-library for Linux without the TCL bindings. -} - -Product fts1-V3.so.gz { - A precompiled - FTS1 Module - for Linux. -} - -Product fts2-V3.so.gz { - A precompiled - FTS2 Module - for Linux. -} - -Product sqlite-devel-V3.i386.rpm { - RPM containing documentation, header files, and static library for - SQLite version VERSION. -} -Product sqlite-V3-1.i386.rpm { - RPM containing shared libraries and the sqlite command-line - program for SQLite version VERSION. -} - -Product sqlite*_analyzer-V3.bin.gz { - An analysis program for database files compatible with SQLite - version VERSION and later. -} - -Heading {Precompiled Binaries For Windows} - -Product sqlite-V3.zip { - A command-line program for accessing and modifing SQLite databases. - See the documentation for additional information. -} -Product tclsqlite-V3.zip { - Bindings for Tcl/Tk. - You can import this shared library into either - tclsh or wish to get SQLite database access from Tcl/Tk. - See the documentation for details. -} -Product sqlitedll-V3.zip { - This is a DLL of the SQLite library without the TCL bindings. - The only external dependency is MSVCRT.DLL. -} - -Product fts1dll-V3.zip { - A precompiled - FTS1 Module - for win32. -} - -Product fts2dll-V3.zip { - A precompiled - FTS2 Module - for win32. -} - -Product sqlite*_analyzer-V3.zip { - An analysis program for database files compatible with SQLite version - VERSION and later. -} - - -Heading {Source Code} - -Product {sqlite-V3.tar.gz} { - A tarball of the complete source tree for SQLite version VERSION - including all of the documentation. -} - -Product {sqlite-source-V3.zip} { - This ZIP archive contains preprocessed C code for the SQLite library as - individual source files. - Unlike the tarballs below, all of the preprocessing and automatic - code generation has already been done on these C code files, so they - can be converted to object code directly with any ordinary C compiler. -} - -Product {sqlite-amalgamation-V3.zip} { - This ZIP archive contains all preprocessed C code combined into a - single source file (the - - amalgamation). -} - -Product {sqlite-V3-tea.tar.gz} { - A tarball of proprocessed source code together with a - Tcl Extension Architecture (TEA) - compatible configure script and makefile. -} - -Product {sqlite-V3.src.rpm} { - An RPM containing complete source code for SQLite version VERSION -} - -Heading {Cross-Platform Binaries} - -Product {sqlite-V3.kit} { - A starkit containing - precompiled SQLite binaries and Tcl bindings for Linux-x86, Windows, - and Mac OS-X ppc and x86. -} - -Heading {Historical Binaries And Source Code} - -Product sqlite-V2.bin.gz { - A command-line program for accessing and modifying - SQLite version 2.* databases on Linux-x86. -} -Product sqlite-V2.zip { - A command-line program for accessing and modifying - SQLite version 2.* databases on win32. -} - -Product sqlite*_analyzer-V2.bin.gz { - An analysis program for version 2.* database files on Linux-x86 -} -Product sqlite*_analyzer-V2.zip { - An analysis program for version 2.* database files on win32. -} -Product {sqlite-source-V2.zip} { - This ZIP archive contains C source code for the SQLite library - version VERSION. -} - - - - -puts { -
    " - puts "$file
    ($size $units)
    [string trim $d2]
    $title
    - - -

    Direct Access To The Sources Via Anonymous CVS

    - -

    -All SQLite source code is maintained in a -CVS repository that is -available for read-only access by anyone. You can -interactively view the -repository contents and download individual files -by visiting - -http://www.sqlite.org/cvstrac/dir?d=sqlite. -To access the repository directly, use the following -commands: -

    - -
    -cvs -d :pserver:anonymous@www.sqlite.org:/sqlite login
    -cvs -d :pserver:anonymous@www.sqlite.org:/sqlite checkout sqlite
    -
    - -

    -When the first command prompts you for a password, enter "anonymous". -

    - -

    -To access the SQLite version 2.8 sources, begin by getting the 3.0 -tree as described above. Then update to the "version_2" branch -as follows: -

    - -
    -cvs update -r version_2
    -
    - -} - -footer $rcsid DELETED dynload.tcl Index: dynload.tcl ================================================================== --- dynload.tcl +++ /dev/null @@ -1,70 +0,0 @@ -# -# Run this Tcl script to generate the dynload.html file. -# -set rcsid {$Id: dynload.tcl,v 1.1 2001/02/11 16:58:22 drh Exp $} - -puts { - - How to build a dynamically loaded Tcl extension for SQLite - - -

    -How To Build A Dynamically Loaded Tcl Extension -

    } -puts {

    -This note was contributed by -Bill Saunders. Thanks, Bill! - -

    -To compile the SQLite Tcl extension into a dynamically loaded module -I did the following: -

    - -
      -
    1. Do a standard compile -(I had a dir called bld at the same level as sqlite ie - /root/bld - /root/sqlite -I followed the directions and did a standard build in the bld -directory)

    2. - -
    3. -Now do the following in the bld directory -

      -gcc -shared -I. -lgdbm ../sqlite/src/tclsqlite.c libsqlite.a -o sqlite.so
      -

    4. - -
    5. -This should produce the file sqlite.so in the bld directory

    6. - -
    7. -Create a pkgIndex.tcl file that contains this line - -

      -package ifneeded sqlite 1.0 [list load [file join $dir sqlite.so]]
      -

    8. - -
    9. -To use this put sqlite.so and pkgIndex.tcl in the same directory

    10. - -
    11. -From that directory start wish

    12. - -
    13. -Execute the following tcl command (tells tcl where to fine loadable -modules) -

      -lappend auto_path [exec pwd]
      -

    14. - -
    15. -Load the package -

      -package require sqlite
      -

    16. - -
    17. -Have fun....

    18. - - -} DELETED faq.tcl Index: faq.tcl ================================================================== --- faq.tcl +++ /dev/null @@ -1,463 +0,0 @@ -# -# Run this script to generated a faq.html output file -# -set rcsid {$Id: faq.tcl,v 1.40 2007/09/04 01:58:27 drh Exp $} -source common.tcl -header {SQLite Frequently Asked Questions} - -set cnt 1 -proc faq {question answer} { - set ::faq($::cnt) [list [string trim $question] [string trim $answer]] - incr ::cnt -} - -############# -# Enter questions and answers here. - -faq { - How do I create an AUTOINCREMENT field. -} { -

      Short answer: A column declared INTEGER PRIMARY KEY will - autoincrement.

      - -

      Here is the long answer: - If you declare a column of a table to be INTEGER PRIMARY KEY, then - whenever you insert a NULL - into that column of the table, the NULL is automatically converted - into an integer which is one greater than the largest value of that - column over all other rows in the table, or 1 if the table is empty. - (If the largest possible integer key, 9223372036854775807, then an - unused key value is chosen at random.) - For example, suppose you have a table like this: -

      -CREATE TABLE t1(
      -  a INTEGER PRIMARY KEY,
      -  b INTEGER
      -);
      -
      -

      With this table, the statement

      -
      -INSERT INTO t1 VALUES(NULL,123);
      -
      -

      is logically equivalent to saying:

      -
      -INSERT INTO t1 VALUES((SELECT max(a) FROM t1)+1,123);
      -
      - -

      There is a new API function named - - sqlite3_last_insert_rowid() which will return the integer key - for the most recent insert operation.

      - -

      Note that the integer key is one greater than the largest - key that was in the table just prior to the insert. The new key - will be unique over all keys currently in the table, but it might - overlap with keys that have been previously deleted from the - table. To create keys that are unique over the lifetime of the - table, add the AUTOINCREMENT keyword to the INTEGER PRIMARY KEY - declaration. Then the key chosen will be one more than than the - largest key that has ever existed in that table. If the largest - possible key has previously existed in that table, then the INSERT - will fail with an SQLITE_FULL error code.

      -} - -faq { - What datatypes does SQLite support? -} { -

      See http://www.sqlite.org/datatype3.html.

      -} - -faq { - SQLite lets me insert a string into a database column of type integer! -} { -

      This is a feature, not a bug. SQLite does not enforce data type - constraints. Any data can be - inserted into any column. You can put arbitrary length strings into - integer columns, floating point numbers in boolean columns, or dates - in character columns. The datatype you assign to a column in the - CREATE TABLE command does not restrict what data can be put into - that column. Every column is able to hold - an arbitrary length string. (There is one exception: Columns of - type INTEGER PRIMARY KEY may only hold a 64-bit signed integer. - An error will result - if you try to put anything other than an integer into an - INTEGER PRIMARY KEY column.)

      - -

      But SQLite does use the declared type of a column as a hint - that you prefer values in that format. So, for example, if a - column is of type INTEGER and you try to insert a string into - that column, SQLite will attempt to convert the string into an - integer. If it can, it inserts the integer instead. If not, - it inserts the string. This feature is sometimes - call type or column affinity. -

      -} - -faq { - Why doesn't SQLite allow me to use '0' and '0.0' as the primary - key on two different rows of the same table? -} { -

      Your primary key must have a numeric type. Change the datatype of - your primary key to TEXT and it should work.

      - -

      Every row must have a unique primary key. For a column with a - numeric type, SQLite thinks that '0' and '0.0' are the - same value because they compare equal to one another numerically. - (See the previous question.) Hence the values are not unique.

      -} - - -faq { - Can multiple applications or multiple instances of the same - application access a single database file at the same time? -} { -

      Multiple processes can have the same database open at the same - time. Multiple processes can be doing a SELECT - at the same time. But only one process can be making changes to - the database at any moment in time, however.

      - -

      SQLite uses reader/writer locks to control access to the database. - (Under Win95/98/ME which lacks support for reader/writer locks, a - probabilistic simulation is used instead.) - But use caution: this locking mechanism might - not work correctly if the database file is kept on an NFS filesystem. - This is because fcntl() file locking is broken on many NFS implementations. - You should avoid putting SQLite database files on NFS if multiple - processes might try to access the file at the same time. On Windows, - Microsoft's documentation says that locking may not work under FAT - filesystems if you are not running the Share.exe daemon. People who - have a lot of experience with Windows tell me that file locking of - network files is very buggy and is not dependable. If what they - say is true, sharing an SQLite database between two or more Windows - machines might cause unexpected problems.

      - -

      We are aware of no other embedded SQL database engine that - supports as much concurrancy as SQLite. SQLite allows multiple processes - to have the database file open at once, and for multiple processes to - read the database at once. When any process wants to write, it must - lock the entire database file for the duration of its update. But that - normally only takes a few milliseconds. Other processes just wait on - the writer to finish then continue about their business. Other embedded - SQL database engines typically only allow a single process to connect to - the database at once.

      - -

      However, client/server database engines (such as PostgreSQL, MySQL, - or Oracle) usually support a higher level of concurrency and allow - multiple processes to be writing to the same database at the same time. - This is possible in a client/server database because there is always a - single well-controlled server process available to coordinate access. - If your application has a need for a lot of concurrency, then you should - consider using a client/server database. But experience suggests that - most applications need much less concurrency than their designers imagine. -

      - -

      When SQLite tries to access a file that is locked by another - process, the default behavior is to return SQLITE_BUSY. You can - adjust this behavior from C code using the - sqlite3_busy_handler() or - sqlite3_busy_timeout() - API functions.

      -} - -faq { - Is SQLite threadsafe? -} { -

      Yes. Sometimes. In order to be thread-safe, SQLite must be compiled - with the SQLITE_THREADSAFE preprocessor macro set to 1. Both the windows - and linux precompiled binaries in the distribution are compiled this way. - If you are unsure if the SQLite library you are linking against is compiled - to be threadsafe you can call the - sqlite3_threadsafe() - interface to find out. -

      - -

      Prior to version 3.3.1, - an sqlite3 structure could only be used in the same thread - that called sqlite3_open - to create it. - You could not open a - database in one thread then pass the handle off to another thread for - it to use. This was due to limitations (bugs?) in many common threading - implementations such as on RedHat9. Specifically, an fcntl() lock - created by one thread cannot be removed or modified by a different - thread on the troublesome systems. And since SQLite uses fcntl() - locks heavily for concurrency control, serious problems arose if you - start moving database connections across threads.

      - -

      The restriction on moving database connections across threads - was relaxed somewhat in version 3.3.1. With that and subsequent - versions, it is safe to move a connection handle across threads - as long as the connection is not holding any fcntl() locks. You - can safely assume that no locks are being held if no - transaction is pending and all statements have been finalized.

      - -

      Under UNIX, you should not carry an open SQLite database across - a fork() system call into the child process. Problems will result - if you do.

      -} - -faq { - How do I list all tables/indices contained in an SQLite database -} { -

      If you are running the sqlite3 command-line access program - you can type ".tables" to get a list of all tables. Or you - can type ".schema" to see the complete database schema including - all tables and indices. Either of these commands can be followed by - a LIKE pattern that will restrict the tables that are displayed.

      - -

      From within a C/C++ program (or a script using Tcl/Ruby/Perl/Python - bindings) you can get access to table and index names by doing a SELECT - on a special table named "SQLITE_MASTER". Every SQLite database - has an SQLITE_MASTER table that defines the schema for the database. - The SQLITE_MASTER table looks like this:

      -
      -CREATE TABLE sqlite_master (
      -  type TEXT,
      -  name TEXT,
      -  tbl_name TEXT,
      -  rootpage INTEGER,
      -  sql TEXT
      -);
      -
      -

      For tables, the type field will always be 'table' and the - name field will be the name of the table. So to get a list of - all tables in the database, use the following SELECT command:

      -
      -SELECT name FROM sqlite_master
      -WHERE type='table'
      -ORDER BY name;
      -
      -

      For indices, type is equal to 'index', name is the - name of the index and tbl_name is the name of the table to which - the index belongs. For both tables and indices, the sql field is - the text of the original CREATE TABLE or CREATE INDEX statement that - created the table or index. For automatically created indices (used - to implement the PRIMARY KEY or UNIQUE constraints) the sql field - is NULL.

      - -

      The SQLITE_MASTER table is read-only. You cannot change this table - using UPDATE, INSERT, or DELETE. The table is automatically updated by - CREATE TABLE, CREATE INDEX, DROP TABLE, and DROP INDEX commands.

      - -

      Temporary tables do not appear in the SQLITE_MASTER table. Temporary - tables and their indices and triggers occur in another special table - named SQLITE_TEMP_MASTER. SQLITE_TEMP_MASTER works just like SQLITE_MASTER - except that it is only visible to the application that created the - temporary tables. To get a list of all tables, both permanent and - temporary, one can use a command similar to the following: -

      -SELECT name FROM 
      -   (SELECT * FROM sqlite_master UNION ALL
      -    SELECT * FROM sqlite_temp_master)
      -WHERE type='table'
      -ORDER BY name
      -
      -} - -faq { - Are there any known size limits to SQLite databases? -} { -

      See limits.html for a full discussion of - the limits of SQLite.

      -} - -faq { - What is the maximum size of a VARCHAR in SQLite? -} { -

      SQLite does not enforce the length of a VARCHAR. You can declare - a VARCHAR(10) and SQLite will be happy to let you put 500 characters - in it. And it will keep all 500 characters intact - it never truncates. -

      -} - -faq { - Does SQLite support a BLOB type? -} { -

      SQLite versions 3.0 and later allow you to store BLOB data in any - column, even columns that are declared to hold some other type.

      -} - -faq { - How do I add or delete columns from an existing table in SQLite. -} { -

      SQLite has limited - ALTER TABLE support that you can - use to add a column to the end of a table or to change the name of - a table. - If you what make more complex changes the structure of a table, - you will have to recreate the - table. You can save existing data to a temporary table, drop the - old table, create the new table, then copy the data back in from - the temporary table.

      - -

      For example, suppose you have a table named "t1" with columns - names "a", "b", and "c" and that you want to delete column "c" from - this table. The following steps illustrate how this could be done: -

      - -
      -BEGIN TRANSACTION;
      -CREATE TEMPORARY TABLE t1_backup(a,b);
      -INSERT INTO t1_backup SELECT a,b FROM t1;
      -DROP TABLE t1;
      -CREATE TABLE t1(a,b);
      -INSERT INTO t1 SELECT a,b FROM t1_backup;
      -DROP TABLE t1_backup;
      -COMMIT;
      -
      -} - -faq { - I deleted a lot of data but the database file did not get any - smaller. Is this a bug? -} { -

      No. When you delete information from an SQLite database, the - unused disk space is added to an internal "free-list" and is reused - the next time you insert data. The disk space is not lost. But - neither is it returned to the operating system.

      - -

      If you delete a lot of data and want to shrink the database file, - run the VACUUM command. - VACUUM will reconstruct - the database from scratch. This will leave the database with an empty - free-list and a file that is minimal in size. Note, however, that the - VACUUM can take some time to run (around a half second per megabyte - on the Linux box where SQLite is developed) and it can use up to twice - as much temporary disk space as the original file while it is running. -

      - -

      As of SQLite version 3.1, an alternative to using the VACUUM command - is auto-vacuum mode, enabled using the - auto_vacuum pragma.

      -} - -faq { - Can I use SQLite in my commercial product without paying royalties? -} { -

      Yes. SQLite is in the - public domain. No claim of ownership is made - to any part of the code. You can do anything you want with it.

      -} - -faq { - How do I use a string literal that contains an embedded single-quote (') - character? -} { -

      The SQL standard specifies that single-quotes in strings are escaped - by putting two single quotes in a row. SQL works like the Pascal programming - language in the regard. SQLite follows this standard. Example: -

      - -
      -    INSERT INTO xyz VALUES('5 O''clock');
      -  
      -} - -faq {What is an SQLITE_SCHEMA error, and why am I getting one?} { -

      An SQLITE_SCHEMA error is returned when a - prepared SQL statement is no longer valid and cannot be executed. - When this occurs, the statement must be recompiled from SQL using - the - sqlite3_prepare() API. - In SQLite version 3, an SQLITE_SCHEMA error can - only occur when using the - sqlite3_prepare()/sqlite3_step()/sqlite3_finalize() - API to execute SQL, not when using the - sqlite3_exec(). This was not - the case in version 2.

      - -

      The most common reason for a prepared statement to become invalid - is that the schema of the database was modified after the SQL was - prepared (possibly by another process). The other reasons this can - happen are:

      -
        -
      • A database was DETACHed. -
      • The database was VACUUMed -
      • A user-function definition was deleted or changed. -
      • A collation sequence definition was deleted or changed. -
      • The authorization function was changed. -
      - -

      In all cases, the solution is to recompile the statement from SQL - and attempt to execute it again. Because a prepared statement can be - invalidated by another process changing the database schema, all code - that uses the - sqlite3_prepare()/sqlite3_step()/sqlite3_finalize() - API should be prepared to handle SQLITE_SCHEMA errors. An example - of one approach to this follows:

      - -
      -
      -    int rc;
      -    sqlite3_stmt *pStmt;
      -    char zSql[] = "SELECT .....";
      -
      -    do {
      -      /* Compile the statement from SQL. Assume success. */
      -      sqlite3_prepare(pDb, zSql, -1, &pStmt, 0);
      -
      -      while( SQLITE_ROW==sqlite3_step(pStmt) ){
      -        /* Do something with the row of available data */
      -      }
      -
      -      /* Finalize the statement. If an SQLITE_SCHEMA error has
      -      ** occured, then the above call to sqlite3_step() will have
      -      ** returned SQLITE_ERROR. sqlite3_finalize() will return
      -      ** SQLITE_SCHEMA. In this case the loop will execute again.
      -      */
      -      rc = sqlite3_finalize(pStmt);
      -    } while( rc==SQLITE_SCHEMA );
      -    
      -  
      -} - -faq {Why does ROUND(9.95,1) return 9.9 instead of 10.0? - Shouldn't 9.95 round up?} { -

      SQLite uses binary arithmetic and in binary, there is no - way to write 9.95 in a finite number of bits. The closest to - you can get to 9.95 in a 64-bit IEEE float (which is what - SQLite uses) is 9.949999999999999289457264239899814128875732421875. - So when you type "9.95", SQLite really understands the number to be - the much longer value shown above. And that value rounds down.

      - -

      This kind of problem comes up all the time when dealing with - floating point binary numbers. The general rule to remember is - that most fractional numbers that have a finite representation in decimal - (a.k.a "base-10") - do not have a finite representation in binary (a.k.a "base-2"). - And so they are - approximated using the closest binary number available. That - approximation is usually very close, but it will be slightly off - and in some cases can cause your results to be a little different - from what you might expect.

      -} - -# End of questions and answers. -############# - -puts {

      Frequently Asked Questions

      } - -# puts {
      } -# for {set i 1} {$i<$cnt} {incr i} { -# puts "
      ($i)
      " -# puts "
      [lindex $faq($i) 0]
      " -# } -# puts {
      } -puts {
        } -for {set i 1} {$i<$cnt} {incr i} { - puts "
      1. [lindex $faq($i) 0]
      2. " -} -puts {
      } - -for {set i 1} {$i<$cnt} {incr i} { - puts "
      " - puts "

      ($i) [lindex $faq($i) 0]

      \n" - puts "
      [lindex $faq($i) 1]
      \n" -} - -puts {
    } -footer $rcsid DELETED fileformat.tcl Index: fileformat.tcl ================================================================== --- fileformat.tcl +++ /dev/null @@ -1,785 +0,0 @@ -# -# Run this script to generated a fileformat.html output file -# -set rcsid {$Id: fileformat.tcl,v 1.13 2004/10/10 17:24:55 drh Exp $} -source common.tcl -header {SQLite Database File Format (Version 2)} -puts { -

    SQLite 2.X Database File Format

    - -

    -This document describes the disk file format for SQLite versions 2.1 -through 2.8. SQLite version 3.0 and following uses a very different -format which is described separately. -

    - -

    1.0   Layers

    - -

    -SQLite is implemented in layers. -(See the architecture description.) -The format of database files is determined by three different -layers in the architecture. -

    - -
      -
    • The schema layer implemented by the VDBE.
    • -
    • The b-tree layer implemented by btree.c
    • -
    • The pager layer implemented by pager.c
    • -
    - -

    -We will describe each layer beginning with the bottom (pager) -layer and working upwards. -

    - -

    2.0   The Pager Layer

    - -

    -An SQLite database consists of -"pages" of data. Each page is 1024 bytes in size. -Pages are numbered beginning with 1. -A page number of 0 is used to indicate "no such page" in the -B-Tree and Schema layers. -

    - -

    -The pager layer is responsible for implementing transactions -with atomic commit and rollback. It does this using a separate -journal file. Whenever a new transaction is started, a journal -file is created that records the original state of the database. -If the program terminates before completing the transaction, the next -process to open the database can use the journal file to restore -the database to its original state. -

    - -

    -The journal file is located in the same directory as the database -file and has the same name as the database file but with the -characters "-journal" appended. -

    - -

    -The pager layer does not impose any content restrictions on the -main database file. As far as the pager is concerned, each page -contains 1024 bytes of arbitrary data. But there is structure to -the journal file. -

    - -

    -A journal file begins with 8 bytes as follows: -0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, and 0xd6. -Processes that are attempting to rollback a journal use these 8 bytes -as a sanity check to make sure the file they think is a journal really -is a valid journal. Prior version of SQLite used different journal -file formats. The magic numbers for these prior formats are different -so that if a new version of the library attempts to rollback a journal -created by an earlier version, it can detect that the journal uses -an obsolete format and make the necessary adjustments. This article -describes only the newest journal format - supported as of version -2.8.0. -

    - -

    -Following the 8 byte prefix is a three 4-byte integers that tell us -the number of pages that have been committed to the journal, -a magic number used for -sanity checking each page, and the -original size of the main database file before the transaction was -started. The number of committed pages is used to limit how far -into the journal to read. The use of the checksum magic number is -described below. -The original size of the database is used to restore the database -file back to its original size. -The size is expressed in pages (1024 bytes per page). -

    - -

    -All three integers in the journal header and all other multi-byte -numbers used in the journal file are big-endian. -That means that the most significant byte -occurs first. That way, a journal file that is -originally created on one machine can be rolled back by another -machine that uses a different byte order. So, for example, a -transaction that failed to complete on your big-endian SparcStation -can still be rolled back on your little-endian Linux box. -

    - -

    -After the 8-byte prefix and the three 4-byte integers, the -journal file consists of zero or more page records. Each page -record is a 4-byte (big-endian) page number followed by 1024 bytes -of data and a 4-byte checksum. -The data is the original content of the database page -before the transaction was started. So to roll back the transaction, -the data is simply written into the corresponding page of the -main database file. Pages can appear in the journal in any order, -but they are guaranteed to appear only once. All page numbers will be -between 1 and the maximum specified by the page size integer that -appeared at the beginning of the journal. -

    - -

    -The so-called checksum at the end of each record is not really a -checksum - it is the sum of the page number and the magic number which -was the second integer in the journal header. The purpose of this -value is to try to detect journal corruption that might have occurred -because of a power loss or OS crash that occurred which the journal -file was being written to disk. It could have been the case that the -meta-data for the journal file, specifically the size of the file, had -been written to the disk so that when the machine reboots it appears that -file is large enough to hold the current record. But even though the -file size has changed, the data for the file might not have made it to -the disk surface at the time of the OS crash or power loss. This means -that after reboot, the end of the journal file will contain quasi-random -garbage data. The checksum is an attempt to detect such corruption. If -the checksum does not match, that page of the journal is not rolled back. -

    - -

    -Here is a summary of the journal file format: -

    - -
      -
    • 8 byte prefix: 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, 0xd6
    • -
    • 4 byte number of records in journal
    • -
    • 4 byte magic number used for page checksums
    • -
    • 4 byte initial database page count
    • -
    • Zero or more instances of the following: -
        -
      • 4 byte page number
      • -
      • 1024 bytes of original data for the page
      • -
      • 4 byte checksum
      • -
      -
    • -
    - -

    3.0   The B-Tree Layer

    - -

    -The B-Tree layer builds on top of the pager layer to implement -one or more separate b-trees all in the same disk file. The -algorithms used are taken from Knuth's The Art Of Computer -Programming.

    - -

    -Page 1 of a database contains a header string used for sanity -checking, a few 32-bit words of configuration data, and a pointer -to the beginning of a list of unused pages in the database. -All other pages in the -database are either pages of a b-tree, overflow pages, or unused -pages on the freelist. -

    - -

    -Each b-tree page contains zero or more database entries. -Each entry has an unique key of one or more bytes and data of -zero or more bytes. -Both the key and data are arbitrary byte sequences. The combination -of key and data are collectively known as "payload". The current -implementation limits the amount of payload in a single entry to -1048576 bytes. This limit can be raised to 16777216 by adjusting -a single #define in the source code and recompiling. But most entries -contain less than a hundred bytes of payload so a megabyte limit seems -more than enough. -

    - -

    -Up to 238 bytes of payload for an entry can be held directly on -a b-tree page. Any additional payload is contained on a linked list -of overflow pages. This limit on the amount of payload held directly -on b-tree pages guarantees that each b-tree page can hold at least -4 entries. In practice, most entries are smaller than 238 bytes and -thus most pages can hold more than 4 entries. -

    - -

    -A single database file can hold any number of separate, independent b-trees. -Each b-tree is identified by its root page, which never changes. -Child pages of the b-tree may change as entries are added and removed -and pages split and combine. But the root page always stays the same. -The b-tree itself does not record which pages are root pages and which -are not. That information is handled entirely at the schema layer. -

    - -

    3.1   B-Tree Page 1 Details

    - -

    -Page 1 begins with the following 48-byte string: -

    - -
    -** This file contains an SQLite 2.1 database **
    -
    - -

    -If you count the number of characters in the string above, you will -see that there are only 47. A '\000' terminator byte is added to -bring the total to 48. -

    - -

    -A frequent question is why the string says version 2.1 when (as -of this writing) we are up to version 2.7.0 of SQLite and any -change to the second digit of the version is suppose to represent -a database format change. The answer to this is that the B-tree -layer has not changed any since version 2.1. There have been -database format changes since version 2.1 but those changes have -all been in the schema layer. Because the format of the b-tree -layer is unchanged since version 2.1.0, the header string still -says version 2.1. -

    - -

    -After the format string is a 4-byte integer used to determine the -byte-order of the database. The integer has a value of -0xdae37528. If this number is expressed as 0xda, 0xe3, 0x75, 0x28, then -the database is in a big-endian format and all 16 and 32-bit integers -elsewhere in the b-tree layer are also big-endian. If the number is -expressed as 0x28, 0x75, 0xe3, and 0xda, then the database is in a -little-endian format and all other multi-byte numbers in the b-tree -layer are also little-endian. -Prior to version 2.6.3, the SQLite engine was only able to read databases -that used the same byte order as the processor they were running on. -But beginning with 2.6.3, SQLite can read or write databases in any -byte order. -

    - -

    -After the byte-order code are six 4-byte integers. Each integer is in the -byte order determined by the byte-order code. The first integer is the -page number for the first page of the freelist. If there are no unused -pages in the database, then this integer is 0. The second integer is -the number of unused pages in the database. The last 4 integers are -not used by the b-tree layer. These are the so-called "meta" values that -are passed up to the schema layer -and used there for configuration and format version information. -All bytes of page 1 past beyond the meta-value integers are unused -and are initialized to zero. -

    - -

    -Here is a summary of the information contained on page 1 in the b-tree layer: -

    - -
      -
    • 48 byte header string
    • -
    • 4 byte integer used to determine the byte-order
    • -
    • 4 byte integer which is the first page of the freelist
    • -
    • 4 byte integer which is the number of pages on the freelist
    • -
    • 36 bytes of meta-data arranged as nine 4-byte integers
    • -
    • 928 bytes of unused space
    • -
    - -

    3.2   Structure Of A Single B-Tree Page

    - -

    -Conceptually, a b-tree page contains N database entries and N+1 pointers -to other b-tree pages. -

    - -
    - - - - - - - - - - - -
    Ptr
    0
    Entry
    0
    Ptr
    1
    Entry
    1
    ...Ptr
    N-1
    Entry
    N-1
    Ptr
    N
    -
    - -

    -The entries are arranged in increasing order. That is, the key to -Entry 0 is less than the key to Entry 1, and the key to Entry 1 is -less than the key of Entry 2, and so forth. The pointers point to -pages containing additional entries that have keys in between the -entries on either side. So Ptr 0 points to another b-tree page that -contains entries that all have keys less than Key 0, and Ptr 1 -points to a b-tree pages where all entries have keys greater than Key 0 -but less than Key 1, and so forth. -

    - -

    -Each b-tree page in SQLite consists of a header, zero or more "cells" -each holding a single entry and pointer, and zero or more "free blocks" -that represent unused space on the page. -

    - -

    -The header on a b-tree page is the first 8 bytes of the page. -The header contains the value -of the right-most pointer (Ptr N) and the byte offset into the page -of the first cell and the first free block. The pointer is a 32-bit -value and the offsets are each 16-bit values. We have: -

    - -
    - - - - - - - - - - - - - - - - -
    01234567
    Ptr NCell 0Freeblock 0
    -
    - -

    -The 1016 bytes of a b-tree page that come after the header contain -cells and freeblocks. All 1016 bytes are covered by either a cell -or a freeblock. -

    - -

    -The cells are connected in a linked list. Cell 0 contains Ptr 0 and -Entry 0. Bytes 4 and 5 of the header point to Cell 0. Cell 0 then -points to Cell 1 which contains Ptr 1 and Entry 1. And so forth. -Cells vary in size. Every cell has a 12-byte header and at least 4 -bytes of payload space. Space is allocated to payload in increments -of 4 bytes. Thus the minimum size of a cell is 16 bytes and up to -63 cells can fit on a single page. The size of a cell is always a multiple -of 4 bytes. -A cell can have up to 238 bytes of payload space. If -the payload is more than 238 bytes, then an additional 4 byte page -number is appended to the cell which is the page number of the first -overflow page containing the additional payload. The maximum size -of a cell is thus 254 bytes, meaning that a least 4 cells can fit into -the 1016 bytes of space available on a b-tree page. -An average cell is usually around 52 to 100 bytes in size with about -10 or 20 cells to a page. -

    - -

    -The data layout of a cell looks like this: -

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    0123456789101112 ... 249250251252253
    PtrKeysize
    (low)
    NextKsz
    (hi)
    Dsz
    (hi)
    Datasize
    (low)
    PayloadOverflow
    Pointer
    -
    - -

    -The first four bytes are the pointer. The size of the key is a 24-bit -where the upper 8 bits are taken from byte 8 and the lower 16 bits are -taken from bytes 4 and 5 (or bytes 5 and 4 on little-endian machines.) -The size of the data is another 24-bit value where the upper 8 bits -are taken from byte 9 and the lower 16 bits are taken from bytes 10 and -11 or 11 and 10, depending on the byte order. Bytes 6 and 7 are the -offset to the next cell in the linked list of all cells on the current -page. This offset is 0 for the last cell on the page. -

    - -

    -The payload itself can be any number of bytes between 1 and 1048576. -But space to hold the payload is allocated in 4-byte chunks up to -238 bytes. If the entry contains more than 238 bytes of payload, then -additional payload data is stored on a linked list of overflow pages. -A 4 byte page number is appended to the cell that contains the first -page of this linked list. -

    - -

    -Each overflow page begins with a 4-byte value which is the -page number of the next overflow page in the list. This value is -0 for the last page in the list. The remaining -1020 bytes of the overflow page are available for storing payload. -Note that a full page is allocated regardless of the number of overflow -bytes stored. Thus, if the total payload for an entry is 239 bytes, -the first 238 are stored in the cell and the overflow page stores just -one byte. -

    - -

    -The structure of an overflow page looks like this: -

    - -
    - - - - - - - - - - - - -
    01234 ... 1023
    Next PageOverflow Data
    -
    - -

    -All space on a b-tree page which is not used by the header or by cells -is filled by freeblocks. Freeblocks, like cells, are variable in size. -The size of a freeblock is at least 4 bytes and is always a multiple of -4 bytes. -The first 4 bytes contain a header and the remaining bytes -are unused. The structure of the freeblock is as follows: -

    - -
    - - - - - - - - - - - - - -
    01234 ... 1015
    SizeNextUnused
    -
    - -

    -Freeblocks are stored in a linked list in increasing order. That is -to say, the first freeblock occurs at a lower index into the page than -the second free block, and so forth. The first 2 bytes of the header -are an integer which is the total number of bytes in the freeblock. -The second 2 bytes are the index into the page of the next freeblock -in the list. The last freeblock has a Next value of 0. -

    - -

    -When a new b-tree is created in a database, the root page of the b-tree -consist of a header and a single 1016 byte freeblock. As entries are -added, space is carved off of that freeblock and used to make cells. -When b-tree entries are deleted, the space used by their cells is converted -into freeblocks. Adjacent freeblocks are merged, but the page can still -become fragmented. The b-tree code will occasionally try to defragment -the page by moving all cells to the beginning and constructing a single -freeblock at the end to take up all remaining space. -

    - -

    3.3   The B-Tree Free Page List

    - -

    -When information is removed from an SQLite database such that one or -more pages are no longer needed, those pages are added to a list of -free pages so that they can be reused later when new information is -added. This subsection describes the structure of this freelist. -

    - -

    -The 32-bit integer beginning at byte-offset 52 in page 1 of the database -contains the address of the first page in a linked list of free pages. -If there are no free pages available, this integer has a value of 0. -The 32-bit integer at byte-offset 56 in page 1 contains the number of -free pages on the freelist. -

    - -

    -The freelist contains a trunk and many branches. The trunk of -the freelist is composed of overflow pages. That is to say, each page -contains a single 32-bit integer at byte offset 0 which -is the page number of the next page on the freelist trunk. -The payload area -of each trunk page is used to record pointers to branch pages. -The first 32-bit integer in the payload area of a trunk page -is the number of branch pages to follow (between 0 and 254) -and each subsequent 32-bit integer is a page number for a branch page. -The following diagram shows the structure of a trunk freelist page: -

    - -
    - - - - - - - - - - - - - - - - - -
    012345678 ... 1023
    Next trunk page# of branch pagesPage numbers for branch pages
    -
    - -

    -It is important to note that only the pages on the trunk of the freelist -contain pointers to other pages. The branch pages contain no -data whatsoever. The fact that the branch pages are completely -blank allows for an important optimization in the paging layer. When -a branch page is removed from the freelist to be reused, it is not -necessary to write the original content of that page into the rollback -journal. The branch page contained no data to begin with, so there is -no need to restore the page in the event of a rollback. Similarly, -when a page is not longer needed and is added to the freelist as a branch -page, it is not necessary to write the content of that page -into the database file. -Again, the page contains no real data so it is not necessary to record the -content of that page. By reducing the amount of disk I/O required, -these two optimizations allow some database operations -to go four to six times faster than they would otherwise. -

    - -

    4.0   The Schema Layer

    - -

    -The schema layer implements an SQL database on top of one or more -b-trees and keeps track of the root page numbers for all b-trees. -Where the b-tree layer provides only unformatted data storage with -a unique key, the schema layer allows each entry to contain multiple -columns. The schema layer also allows indices and non-unique key values. -

    - -

    -The schema layer implements two separate data storage abstractions: -tables and indices. Each table and each index uses its own b-tree -but they use the b-tree capabilities in different ways. For a table, -the b-tree key is a unique 4-byte integer and the b-tree data is the -content of the table row, encoded so that columns can be separately -extracted. For indices, the b-tree key varies in size depending on the -size of the fields being indexed and the b-tree data is empty. -

    - -

    4.1   SQL Table Implementation Details

    - -

    Each row of an SQL table is stored in a single b-tree entry. -The b-tree key is a 4-byte big-endian integer that is the ROWID -or INTEGER PRIMARY KEY for that table row. -The key is stored in a big-endian format so -that keys will sort in numerical order using memcmp() function.

    - -

    The content of a table row is stored in the data portion of -the corresponding b-tree table. The content is encoded to allow -individual columns of the row to be extracted as necessary. Assuming -that the table has N columns, the content is encoded as N+1 offsets -followed by N column values, as follows: -

    - -
    - - - - - - - - - - - - -
    offset 0offset 1...offset N-1offset Nvalue 0value 1...value N-1
    -
    - -

    -The offsets can be either 8-bit, 16-bit, or 24-bit integers depending -on how much data is to be stored. If the total size of the content -is less than 256 bytes then 8-bit offsets are used. If the total size -of the b-tree data is less than 65536 then 16-bit offsets are used. -24-bit offsets are used otherwise. Offsets are always little-endian, -which means that the least significant byte occurs first. -

    - -

    -Data is stored as a nul-terminated string. Any empty string consists -of just the nul terminator. A NULL value is an empty string with no -nul-terminator. Thus a NULL value occupies zero bytes and an empty string -occupies 1 byte. -

    - -

    -Column values are stored in the order that they appear in the CREATE TABLE -statement. The offsets at the beginning of the record contain the -byte index of the corresponding column value. Thus, Offset 0 contains -the byte index for Value 0, Offset 1 contains the byte offset -of Value 1, and so forth. The number of bytes in a column value can -always be found by subtracting offsets. This allows NULLs to be -recovered from the record unambiguously. -

    - -

    -Most columns are stored in the b-tree data as described above. -The one exception is column that has type INTEGER PRIMARY KEY. -INTEGER PRIMARY KEY columns correspond to the 4-byte b-tree key. -When an SQL statement attempts to read the INTEGER PRIMARY KEY, -the 4-byte b-tree key is read rather than information out of the -b-tree data. But there is still an Offset associated with the -INTEGER PRIMARY KEY, just like any other column. But the Value -associated with that offset is always NULL. -

    - -

    4.2   SQL Index Implementation Details

    - -

    -SQL indices are implement using a b-tree in which the key is used -but the data is always empty. The purpose of an index is to map -one or more column values into the ROWID for the table entry that -contains those column values. -

    - -

    -Each b-tree in an index consists of one or more column values followed -by a 4-byte ROWID. Each column value is nul-terminated (even NULL values) -and begins with a single character that indicates the datatype for that -column value. Only three datatypes are supported: NULL, Number, and -Text. NULL values are encoded as the character 'a' followed by the -nul terminator. Numbers are encoded as the character 'b' followed by -a string that has been crafted so that sorting the string using memcmp() -will sort the corresponding numbers in numerical order. (See the -sqliteRealToSortable() function in util.c of the SQLite sources for -additional information on this encoding.) Numbers are also nul-terminated. -Text values consists of the character 'c' followed by a copy of the -text string and a nul-terminator. These encoding rules result in -NULLs being sorted first, followed by numerical values in numerical -order, followed by text values in lexicographical order. -

    - -

    4.4   SQL Schema Storage And Root B-Tree Page Numbers

    - -

    -The database schema is stored in the database in a special tabled named -"sqlite_master" and which always has a root b-tree page number of 2. -This table contains the original CREATE TABLE, -CREATE INDEX, CREATE VIEW, and CREATE TRIGGER statements used to define -the database to begin with. Whenever an SQLite database is opened, -the sqlite_master table is scanned from beginning to end and -all the original CREATE statements are played back through the parser -in order to reconstruct an in-memory representation of the database -schema for use in subsequent command parsing. For each CREATE TABLE -and CREATE INDEX statement, the root page number for the corresponding -b-tree is also recorded in the sqlite_master table so that SQLite will -know where to look for the appropriate b-tree. -

    - -

    -SQLite users can query the sqlite_master table just like any other table -in the database. But the sqlite_master table cannot be directly written. -The sqlite_master table is automatically updated in response to CREATE -and DROP statements but it cannot be changed using INSERT, UPDATE, or -DELETE statements as that would risk corrupting the database. -

    - -

    -SQLite stores temporary tables and indices in a separate -file from the main database file. The temporary table database file -is the same structure as the main database file. The schema table -for the temporary tables is stored on page 2 just as in the main -database. But the schema table for the temporary database named -"sqlite_temp_master" instead of "sqlite_master". Other than the -name change, it works exactly the same. -

    - -

    4.4   Schema Version Numbering And Other Meta-Information

    - -

    -The nine 32-bit integers that are stored beginning at byte offset -60 of Page 1 in the b-tree layer are passed up into the schema layer -and used for versioning and configuration information. The meaning -of the first four integers is shown below. The other five are currently -unused. -

    - -
      -
    1. The schema version number
    2. -
    3. The format version number
    4. -
    5. The recommended pager cache size
    6. -
    7. The safety level
    8. -
    - -

    -The first meta-value, the schema version number, is used to detect when -the schema of the database is changed by a CREATE or DROP statement. -Recall that when a database is first opened the sqlite_master table is -scanned and an internal representation of the tables, indices, views, -and triggers for the database is built in memory. This internal -representation is used for all subsequent SQL command parsing and -execution. But what if another process were to change the schema -by adding or removing a table, index, view, or trigger? If the original -process were to continue using the old schema, it could potentially -corrupt the database by writing to a table that no longer exists. -To avoid this problem, the schema version number is changed whenever -a CREATE or DROP statement is executed. Before each command is -executed, the current schema version number for the database file -is compared against the schema version number from when the sqlite_master -table was last read. If those numbers are different, the internal -schema representation is erased and the sqlite_master table is reread -to reconstruct the internal schema representation. -(Calls to sqlite_exec() generally return SQLITE_SCHEMA when this happens.) -

    - -

    -The second meta-value is the schema format version number. This -number tells what version of the schema layer should be used to -interpret the file. There have been changes to the schema layer -over time and this number is used to detect when an older database -file is being processed by a newer version of the library. -As of this writing (SQLite version 2.7.0) the current format version -is "4". -

    - -

    -The third meta-value is the recommended pager cache size as set -by the DEFAULT_CACHE_SIZE pragma. If the value is positive it -means that synchronous behavior is enable (via the DEFAULT_SYNCHRONOUS -pragma) and if negative it means that synchronous behavior is -disabled. -

    - -

    -The fourth meta-value is safety level added in version 2.8.0. -A value of 1 corresponds to a SYNCHRONOUS setting of OFF. In other -words, SQLite does not pause to wait for journal data to reach the disk -surface before overwriting pages of the database. A value of 2 corresponds -to a SYNCHRONOUS setting of NORMAL. A value of 3 corresponds to a -SYNCHRONOUS setting of FULL. If the value is 0, that means it has not -been initialized so the default synchronous setting of NORMAL is used. -

    -} -footer $rcsid DELETED formatchng.tcl Index: formatchng.tcl ================================================================== --- formatchng.tcl +++ /dev/null @@ -1,285 +0,0 @@ -# -# Run this Tcl script to generate the formatchng.html file. -# -set rcsid {$Id: formatchng.tcl,v 1.20 2007/09/03 20:32:45 drh Exp $ } -source common.tcl -header {File Format Changes in SQLite} -puts { -

    File Format Changes in SQLite

    - -

    -Every effort is made to keep SQLite fully backwards compatible from -one release to the next. Rarely, however, some -enhancements or bug fixes may require a change to -the underlying file format. When this happens and you -must convert the contents of your -databases into a portable ASCII representation using the old version -of the library then reload the data using the new version of the -library. -

    - -

    -You can tell if you should reload your databases by comparing the -version numbers of the old and new libraries. If the first digit -of the version number is different, then a reload of the database will -be required. If the second digit changes, newer versions of SQLite -will be able to read and write older database files, but older versions -of the library may have difficulty reading or writing newer database -files. -For example, upgrading from -version 2.8.14 to 3.0.0 requires a reload. Going from -version 3.0.8 to 3.1.0 is backwards compatible but not necessarily -forwards compatible. -

    - -

    -The following table summarizes the SQLite file format changes that have -occurred since version 1.0.0: -

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Version ChangeApprox. DateDescription Of File Format Change
    1.0.32 to 2.0.02001-Sep-20Version 1.0.X of SQLite used the GDBM library as its backend - interface to the disk. Beginning in version 2.0.0, GDBM was replaced - by a custom B-Tree library written especially for SQLite. The new - B-Tree backend is twice as fast as GDBM, supports atomic commits and - rollback, and stores an entire database in a single disk file instead - using a separate file for each table as GDBM does. The two - file formats are not even remotely similar.
    2.0.8 to 2.1.02001-Nov-12The same basic B-Tree format is used but the details of the - index keys were changed in order to provide better query - optimization opportunities. Some of the headers were also changed in order - to increase the maximum size of a row from 64KB to 24MB.

    - - This change is an exception to the version number rule described above - in that it is neither forwards or backwards compatible. A complete - reload of the database is required. This is the only exception.

    2.1.7 to 2.2.02001-Dec-21Beginning with version 2.2.0, SQLite no longer builds an index for - an INTEGER PRIMARY KEY column. Instead, it uses that column as the actual - B-Tree key for the main table.

    Version 2.2.0 and later of the library - will automatically detect when it is reading a 2.1.x database and will - disable the new INTEGER PRIMARY KEY feature. In other words, version - 2.2.x is backwards compatible to version 2.1.x. But version 2.1.x is not - forward compatible with version 2.2.x. If you try to open - a 2.2.x database with an older 2.1.x library and that database contains - an INTEGER PRIMARY KEY, you will likely get a coredump. If the database - schema does not contain any INTEGER PRIMARY KEYs, then the version 2.1.x - and version 2.2.x database files will be identical and completely - interchangeable.

    -
    2.2.5 to 2.3.02002-Jan-30Beginning with version 2.3.0, SQLite supports some additional syntax - (the "ON CONFLICT" clause) in the CREATE TABLE and CREATE INDEX statements - that are stored in the SQLITE_MASTER table. If you create a database that - contains this new syntax, then try to read that database using version 2.2.5 - or earlier, the parser will not understand the new syntax and you will get - an error. Otherwise, databases for 2.2.x and 2.3.x are interchangeable.
    2.3.3 to 2.4.02002-Mar-10Beginning with version 2.4.0, SQLite added support for views. - Information about views is stored in the SQLITE_MASTER table. If an older - version of SQLite attempts to read a database that contains VIEW information - in the SQLITE_MASTER table, the parser will not understand the new syntax - and initialization will fail. Also, the - way SQLite keeps track of unused disk blocks in the database file - changed slightly. - If an older version of SQLite attempts to write a database that - was previously written by version 2.4.0 or later, then it may leak disk - blocks.
    2.4.12 to 2.5.02002-Jun-17Beginning with version 2.5.0, SQLite added support for triggers. - Information about triggers is stored in the SQLITE_MASTER table. If an older - version of SQLite attempts to read a database that contains a CREATE TRIGGER - in the SQLITE_MASTER table, the parser will not understand the new syntax - and initialization will fail. -
    2.5.6 to 2.6.02002-July-17A design flaw in the layout of indices required a file format change - to correct. This change appeared in version 2.6.0.

    - - If you use version 2.6.0 or later of the library to open a database file - that was originally created by version 2.5.6 or earlier, an attempt to - rebuild the database into the new format will occur automatically. - This can take some time for a large database. (Allow 1 or 2 seconds - per megabyte of database under Unix - longer under Windows.) This format - conversion is irreversible. It is strongly suggested - that you make a backup copy of older database files prior to opening them - with version 2.6.0 or later of the library, in case there are errors in - the format conversion logic.

    - - Version 2.6.0 or later of the library cannot open read-only database - files from version 2.5.6 or earlier, since read-only files cannot be - upgraded to the new format.

    -
    2.6.3 to 2.7.02002-Aug-13

    Beginning with version 2.7.0, SQLite understands two different - datatypes: text and numeric. Text data sorts in memcmp() order. - Numeric data sorts in numerical order if it looks like a number, - or in memcmp() order if it does not.

    - -

    When SQLite version 2.7.0 or later opens a 2.6.3 or earlier database, - it assumes all columns of all tables have type "numeric". For 2.7.0 - and later databases, columns have type "text" if their datatype - string contains the substrings "char" or "clob" or "blob" or "text". - Otherwise they are of type "numeric".

    - -

    Because "text" columns have a different sort order from numeric, - indices on "text" columns occur in a different order for version - 2.7.0 and later database. Hence version 2.6.3 and earlier of SQLite - will be unable to read a 2.7.0 or later database. But version 2.7.0 - and later of SQLite will read earlier databases.

    -
    2.7.6 to 2.8.02003-Feb-14

    Version 2.8.0 introduces a change to the format of the rollback - journal file. The main database file format is unchanged. Versions - 2.7.6 and earlier can read and write 2.8.0 databases and vice versa. - Version 2.8.0 can rollback a transaction that was started by version - 2.7.6 and earlier. But version 2.7.6 and earlier cannot rollback a - transaction started by version 2.8.0 or later.

    - -

    The only time this would ever be an issue is when you have a program - using version 2.8.0 or later that crashes with an incomplete - transaction, then you try to examine the database using version 2.7.6 or - earlier. The 2.7.6 code will not be able to read the journal file - and thus will not be able to rollback the incomplete transaction - to restore the database.

    -
    2.8.14 to 3.0.02004-Jun-18

    Version 3.0.0 is a major upgrade for SQLite that incorporates - support for UTF-16, BLOBs, and a more compact encoding that results - in database files that are typically 25% to 50% smaller. The new file - format is very different and is completely incompatible with the - version 2 file format.

    -
    3.0.8 to 3.1.02005-Jan-21

    Version 3.1.0 adds support for - autovacuum mode. - Prior versions of SQLite will be able to read an autovacuumed - database but will not be able to write it. If autovaccum is disabled - (which is the default condition) - then databases are fully forwards and backwards compatible.

    -
    3.1.6 to 3.2.02005-Mar-19

    Version 3.2.0 adds support for the - ALTER TABLE ADD COLUMN - command. A database that has been modified by this command can - not be read by a version of SQLite prior to 3.1.4. Running - VACUUM - after the ALTER TABLE - restores the database to a format such that it can be read by earlier - SQLite versions.

    -
    3.2.8 to 3.3.02006-Jan-10

    Version 3.3.0 adds support for descending indices and - uses a new encoding for boolean values that requires - less disk space. Version 3.3.0 can read and write database - files created by prior versions of SQLite. But prior versions - of SQLite will not be able to read or write databases created - by Version 3.3.0

    -

    If you need backwards and forwards capatibility, you can - compile with -DSQLITE_DEFAULT_FILE_FORMAT=1. Or at runtime - you can say "PRAGMA legacy_file_format=ON" prior to creating - a new database file

    -

    Once a database file is created, its format is fixed. So - a database file created by SQLite 3.2.8 and merely modified - by version 3.3.0 or later will retain the old format. Except, - the VACUUM command recreates the database so running VACUUM - on 3.3.0 or later will change the file format to the latest - edition.

    -
    3.3.6 to 3.3.72006-Aug-12

    The previous file format change has caused so much - grief that the default behavior has been changed back to - the original file format. This means that DESC option on - indices is ignored by default that the more efficient encoding - of boolean values is not used. In that way, older versions - of SQLite can read and write databases created by newer - versions. If the new features are desired, they can be - enabled using pragma: "PRAGMA legacy_file_format=OFF".

    -

    To be clear: both old and new file formats continue to - be understood and continue to work. But the old file format - is used by default instead of the new. This might change - again in some future release - we may go back to generating - the new file format by default - but probably not until - all users have upgraded to a version of SQLite that will - understand the new file format. That might take several - years.

    3.4.2 to 3.5.02007-Sep-3

    The design of the OS interface layer was changed for - release 3.5.0. Applications that implemented a custom OS - interface will need to be modified in order to upgrade. - There are also some subtly different semantics a few obscure - APIs. An article is avilable which - describing the changes in detail.

    - -

    The on-disk file format is unchanged.

    -
    -
    - -

    -To perform a database reload, have ready versions of the -sqlite command-line utility for both the old and new -version of SQLite. Call these two executables "sqlite-old" -and "sqlite-new". Suppose the name of your old database -is "old.db" and you want to create a new database with -the same information named "new.db". The command to do -this is as follows: -

    - -
    - sqlite-old old.db .dump | sqlite-new new.db -
    -} -footer $rcsid DELETED fullscanb.gif Index: fullscanb.gif ================================================================== --- fullscanb.gif +++ /dev/null cannot compute difference between binary files ADDED images/arch.gif Index: images/arch.gif ================================================================== --- /dev/null +++ images/arch.gif cannot compute difference between binary files ADDED images/arch.png Index: images/arch.png ================================================================== --- /dev/null +++ images/arch.png cannot compute difference between binary files ADDED images/arch2.gif Index: images/arch2.gif ================================================================== --- /dev/null +++ images/arch2.gif cannot compute difference between binary files ADDED images/direct1b.gif Index: images/direct1b.gif ================================================================== --- /dev/null +++ images/direct1b.gif cannot compute difference between binary files ADDED images/foreignlogos/adobe.gif Index: images/foreignlogos/adobe.gif ================================================================== --- /dev/null +++ images/foreignlogos/adobe.gif cannot compute difference between binary files ADDED images/foreignlogos/apple.gif Index: images/foreignlogos/apple.gif ================================================================== --- /dev/null +++ images/foreignlogos/apple.gif cannot compute difference between binary files ADDED images/foreignlogos/firefox.gif Index: images/foreignlogos/firefox.gif ================================================================== --- /dev/null +++ images/foreignlogos/firefox.gif cannot compute difference between binary files ADDED images/foreignlogos/ge.gif Index: images/foreignlogos/ge.gif ================================================================== --- /dev/null +++ images/foreignlogos/ge.gif cannot compute difference between binary files ADDED images/foreignlogos/google.gif Index: images/foreignlogos/google.gif ================================================================== --- /dev/null +++ images/foreignlogos/google.gif cannot compute difference between binary files ADDED images/foreignlogos/mcaffee.gif Index: images/foreignlogos/mcaffee.gif ================================================================== --- /dev/null +++ images/foreignlogos/mcaffee.gif cannot compute difference between binary files ADDED images/foreignlogos/microsoft.gif Index: images/foreignlogos/microsoft.gif ================================================================== --- /dev/null +++ images/foreignlogos/microsoft.gif cannot compute difference between binary files ADDED images/foreignlogos/monotone.gif Index: images/foreignlogos/monotone.gif ================================================================== --- /dev/null +++ images/foreignlogos/monotone.gif cannot compute difference between binary files ADDED images/foreignlogos/philips.gif Index: images/foreignlogos/philips.gif ================================================================== --- /dev/null +++ images/foreignlogos/philips.gif cannot compute difference between binary files ADDED images/foreignlogos/php.gif Index: images/foreignlogos/php.gif ================================================================== --- /dev/null +++ images/foreignlogos/php.gif cannot compute difference between binary files ADDED images/foreignlogos/python.gif Index: images/foreignlogos/python.gif ================================================================== --- /dev/null +++ images/foreignlogos/python.gif cannot compute difference between binary files ADDED images/foreignlogos/realbasic.gif Index: images/foreignlogos/realbasic.gif ================================================================== --- /dev/null +++ images/foreignlogos/realbasic.gif cannot compute difference between binary files ADDED images/foreignlogos/skype.gif Index: images/foreignlogos/skype.gif ================================================================== --- /dev/null +++ images/foreignlogos/skype.gif cannot compute difference between binary files ADDED images/foreignlogos/sunmicro.gif Index: images/foreignlogos/sunmicro.gif ================================================================== --- /dev/null +++ images/foreignlogos/sunmicro.gif cannot compute difference between binary files ADDED images/foreignlogos/symbian.gif Index: images/foreignlogos/symbian.gif ================================================================== --- /dev/null +++ images/foreignlogos/symbian.gif cannot compute difference between binary files ADDED images/foreignlogos/toshiba.gif Index: images/foreignlogos/toshiba.gif ================================================================== --- /dev/null +++ images/foreignlogos/toshiba.gif cannot compute difference between binary files ADDED images/fullscanb.gif Index: images/fullscanb.gif ================================================================== --- /dev/null +++ images/fullscanb.gif cannot compute difference between binary files ADDED images/index-ex1-x-b.gif Index: images/index-ex1-x-b.gif ================================================================== --- /dev/null +++ images/index-ex1-x-b.gif cannot compute difference between binary files ADDED images/indirect1b1.gif Index: images/indirect1b1.gif ================================================================== --- /dev/null +++ images/indirect1b1.gif cannot compute difference between binary files ADDED images/ne.png Index: images/ne.png ================================================================== --- /dev/null +++ images/ne.png cannot compute difference between binary files ADDED images/nocopy.gif Index: images/nocopy.gif ================================================================== --- /dev/null +++ images/nocopy.gif cannot compute difference between binary files ADDED images/nw.png Index: images/nw.png ================================================================== --- /dev/null +++ images/nw.png cannot compute difference between binary files ADDED images/se.png Index: images/se.png ================================================================== --- /dev/null +++ images/se.png cannot compute difference between binary files ADDED images/shared.gif Index: images/shared.gif ================================================================== --- /dev/null +++ images/shared.gif cannot compute difference between binary files ADDED images/sw.png Index: images/sw.png ================================================================== --- /dev/null +++ images/sw.png cannot compute difference between binary files ADDED images/table-ex1b2.gif Index: images/table-ex1b2.gif ================================================================== --- /dev/null +++ images/table-ex1b2.gif cannot compute difference between binary files DELETED index-ex1-x-b.gif Index: index-ex1-x-b.gif ================================================================== --- index-ex1-x-b.gif +++ /dev/null cannot compute difference between binary files DELETED index.tcl Index: index.tcl ================================================================== --- index.tcl +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/tclsh -source common.tcl -header {SQLite home page} -puts { - - - - -
    -

    About SQLite

    -

    -
    - - -
    -SQLite is a small -C library that implements a self-contained, embeddable, -zero-configuration -SQL database engine. -Features include: -

    - -

      -
    • Transactions are atomic, consistent, isolated, and durable (ACID) - even after system crashes and power failures. -
    • Zero-configuration - no setup or administration needed.
    • -
    • Implements most of SQL92. - (Features not supported)
    • -
    • A complete database is stored in a single disk file.
    • -
    • Database files can be freely shared between machines with - different byte orders.
    • -
    • Supports terabyte-sized databases and gigabyte-sized strings - and blobs. (See limits.html.) -
    • Small code footprint: - - less than 250KiB fully configured or less - than 150KiB with optional features omitted.
    • -
    • Faster than popular client/server database - engines for most common operations.
    • -
    • Simple, easy to use API.
    • -
    • TCL bindings included. - Bindings for many other languages - - available separately.
    • -
    • Well-commented source code with over 98% test coverage.
    • -
    • Available as a - - single ANSI-C source-code file that you can easily drop into - another project. -
    • Self-contained: no external dependencies.
    • -
    • Sources are in the public domain. - Use for any purpose.
    • -
    -

    - -

    -The SQLite distribution comes with a standalone command-line -access program (sqlite) that can -be used to administer an SQLite database and which serves as -an example of how to use the SQLite library. -

    - -
    -

    News

    -} - -proc newsitem {date title text} { - puts "

    $date - $title

    " - regsub -all "\n( *\n)+" $text "

    \n\n

    " txt - puts "

    $txt

    " - puts "
    " -} - -newsitem {2007-Nov-05} {Version 3.5.2} { - This is an incremental release that fixes several minor problems, - adds some obscure features, and provides some performance tweaks. - Upgrading is optional. - - The experimental compile-time option - SQLITE_OMIT_MEMORY_ALLOCATION is no longer supported. On the other - hand, it is now possible to compile SQLite so that it uses a static - array for all its dynamic memory allocation needs and never calls - malloc. Expect to see additional radical changes to the memory - allocation subsystem in future releases. -} - -newsitem {2007-Oct-04} {Version 3.5.1} { - Fix a long-standing bug that might cause database corruption if a - disk-full error occurs in the middle of a transaction and that - transaction is not rolled back. - Ticket #2686. - - The new VFS layer is stable. However, we still reserve the right to - make tweaks to the interface definition of the VFS if necessary. -} - -newsitem {2007-Sep-04} {Version 3.5.0 alpha} { - The OS interface layer and the memory allocation subsystems in - SQLite have been reimplemented. The published API is largely unchanged - but the (unpublished) OS interface has been modified extensively. - Applications that implement their own OS interface will require - modification. See - 34to35.html for details.

    - - This is a large change. Approximately 10% of the source code was - modified. We are calling this first release "alpha" in order to give - the user community time to test and evaluate the changes before we - freeze the new design. -} - -puts { -

    Old news...

    -
    -} -footer {$Id: index.tcl,v 1.165 2007/11/05 18:11:18 drh Exp $} DELETED indirect1b1.gif Index: indirect1b1.gif ================================================================== --- indirect1b1.gif +++ /dev/null cannot compute difference between binary files DELETED lang.tcl Index: lang.tcl ================================================================== --- lang.tcl +++ /dev/null @@ -1,2207 +0,0 @@ -# -# Run this Tcl script to generate the lang-*.html files. -# -set rcsid {$Id: lang.tcl,v 1.137 2007/10/12 19:11:55 drh Exp $} -source common.tcl - -if {[llength $argv]>0} { - set outputdir [lindex $argv 0] -} else { - set outputdir "" -} - -header {Query Language Understood by SQLite} -puts { -

    SQL As Understood By SQLite

    - -

    The SQLite library understands most of the standard SQL -language. But it does omit some features -while at the same time -adding a few features of its own. This document attempts to -describe precisely what parts of the SQL language SQLite does -and does not support. A list of keywords is -also provided.

    - -

    In all of the syntax diagrams that follow, literal text is shown in -bold blue. Non-terminal symbols are shown in italic red. Operators -that are part of the syntactic markup itself are shown in black roman.

    - -

    This document is just an overview of the SQL syntax implemented -by SQLite. Many low-level productions are omitted. For detailed information -on the language that SQLite understands, refer to the source code and -the grammar file "parse.y".

    - -
    -

    SQLite implements the follow syntax:

    -

      -} - -proc slink {label} { - if {[string match *.html $label]} { - return $label - } - if {[string length $::outputdir]==0} { - return #$label - } else { - return lang_$label.html - } -} - -foreach {section} [lsort -index 0 -dictionary { - {{CREATE TABLE} createtable} - {{CREATE VIRTUAL TABLE} createvtab} - {{CREATE INDEX} createindex} - {VACUUM vacuum} - {{DROP TABLE} droptable} - {{DROP INDEX} dropindex} - {INSERT insert} - {REPLACE replace} - {DELETE delete} - {UPDATE update} - {SELECT select} - {comment comment} - {COPY copy} - {EXPLAIN explain} - {expression expr} - {{BEGIN TRANSACTION} transaction} - {{COMMIT TRANSACTION} transaction} - {{END TRANSACTION} transaction} - {{ROLLBACK TRANSACTION} transaction} - {PRAGMA pragma.html} - {{ON CONFLICT clause} conflict} - {{CREATE VIEW} createview} - {{DROP VIEW} dropview} - {{CREATE TRIGGER} createtrigger} - {{DROP TRIGGER} droptrigger} - {{ATTACH DATABASE} attach} - {{DETACH DATABASE} detach} - {REINDEX reindex} - {{ALTER TABLE} altertable} - {{ANALYZE} analyze} -}] { - foreach {s_title s_tag} $section {} - puts "
    • $s_title
    • " -} -puts {

    -
    - -

    Details on the implementation of each command are provided in -the sequel.

    -} - -proc Operator {name} { - return "$name" -} -proc Nonterminal {name} { - return "$name" -} -proc Keyword {name} { - return "$name" -} -proc Example {text} { - puts "
    $text
    " -} - -proc Section {name label} { - global outputdir - - if {[string length $outputdir]!=0} { - if {[llength [info commands puts_standard]]>0} { - footer $::rcsid - } - - if {[string length $label]>0} { - rename puts puts_standard - proc puts {str} { - regsub -all {href="#([a-z]+)"} $str {href="lang_\1.html"} str - puts_standard $::section_file $str - } - rename footer footer_standard - proc footer {id} { - footer_standard $id - rename footer "" - rename puts "" - rename puts_standard puts - rename footer_standard footer - } - set ::section_file [open [file join $outputdir lang_$label.html] w] - header "Query Language Understood by SQLite: $name" - puts "

    SQL As Understood By SQLite

    " - puts "\[Contents\]" - puts "

    $name

    " - return - } - } - puts "\n
    " - if {$label!=""} { - puts "" - } - puts "

    $name

    \n" -} - -Section {ALTER TABLE} altertable - -Syntax {sql-statement} { -ALTER TABLE [ .] -} {alteration} { -RENAME TO -} {alteration} { -ADD [COLUMN] -} - -puts { -

    SQLite's version of the ALTER TABLE command allows the user to -rename or add a new column to an existing table. It is not possible -to remove a column from a table. -

    - -

    The RENAME TO syntax is used to rename the table identified by -[database-name.]table-name to new-table-name. This command -cannot be used to move a table between attached databases, only to rename -a table within the same database.

    - -

    If the table being renamed has triggers or indices, then these remain -attached to the table after it has been renamed. However, if there are -any view definitions, or statements executed by triggers that refer to -the table being renamed, these are not automatically modified to use the new -table name. If this is required, the triggers or view definitions must be -dropped and recreated to use the new table name by hand. -

    - -

    The ADD [COLUMN] syntax is used to add a new column to an existing table. -The new column is always appended to the end of the list of existing columns. -Column-def may take any of the forms permissable in a CREATE TABLE -statement, with the following restrictions: -

      -
    • The column may not have a PRIMARY KEY or UNIQUE constraint.
    • -
    • The column may not have a default value of CURRENT_TIME, CURRENT_DATE - or CURRENT_TIMESTAMP.
    • -
    • If a NOT NULL constraint is specified, then the column must have a - default value other than NULL. -
    - -

    The execution time of the ALTER TABLE command is independent of -the amount of data in the table. The ALTER TABLE command runs as quickly -on a table with 10 million rows as it does on a table with 1 row. -

    - -

    After ADD COLUMN has been run on a database, that database will not -be readable by SQLite version 3.1.3 and earlier until the database -is VACUUMed.

    -} - -Section {ANALYZE} analyze - -Syntax {sql-statement} { - ANALYZE -} -Syntax {sql-statement} { - ANALYZE -} -Syntax {sql-statement} { - ANALYZE [ .] -} - -puts { -

    The ANALYZE command gathers statistics about indices and stores them -in a special tables in the database where the query optimizer can use -them to help make better index choices. -If no arguments are given, all indices in all attached databases are -analyzed. If a database name is given as the argument, all indices -in that one database are analyzed. If the argument is a table name, -then only indices associated with that one table are analyzed.

    - -

    The initial implementation stores all statistics in a single -table named sqlite_stat1. Future enhancements may create -additional tables with the same name pattern except with the "1" -changed to a different digit. The sqlite_stat1 table cannot -be DROPped, -but all the content can be DELETEd which has the -same effect.

    -} - -Section {ATTACH DATABASE} attach - -Syntax {sql-statement} { -ATTACH [DATABASE] AS -} - -puts { -

    The ATTACH DATABASE statement adds another database -file to the current database connection. If the filename contains -punctuation characters it must be quoted. The names 'main' and -'temp' refer to the main database and the database used for -temporary tables. These cannot be detached. Attached databases -are removed using the DETACH DATABASE -statement.

    - -

    You can read from and write to an attached database and you -can modify the schema of the attached database. This is a new -feature of SQLite version 3.0. In SQLite 2.8, schema changes -to attached databases were not allowed.

    - -

    You cannot create a new table with the same name as a table in -an attached database, but you can attach a database which contains -tables whose names are duplicates of tables in the main database. It is -also permissible to attach the same database file multiple times.

    - -

    Tables in an attached database can be referred to using the syntax -database-name.table-name. If an attached table doesn't have -a duplicate table name in the main database, it doesn't require a -database name prefix. When a database is attached, all of its -tables which don't have duplicate names become the default table -of that name. Any tables of that name attached afterwards require the table -prefix. If the default table of a given name is detached, then -the last table of that name attached becomes the new default.

    - -

    -Transactions involving multiple attached databases are atomic, -assuming that the main database is not ":memory:". If the main -database is ":memory:" then -transactions continue to be atomic within each individual -database file. But if the host computer crashes in the middle -of a COMMIT where two or more database files are updated, -some of those files might get the changes where others -might not. -Atomic commit of attached databases is a new feature of SQLite version 3.0. -In SQLite version 2.8, all commits to attached databases behaved as if -the main database were ":memory:". -

    - -

    There is a compile-time limit of 10 attached database files.

    -} - - -Section {BEGIN TRANSACTION} transaction - -Syntax {sql-statement} { -BEGIN [ DEFERRED | IMMEDIATE | EXCLUSIVE ] [TRANSACTION []] -} -Syntax {sql-statement} { -END [TRANSACTION []] -} -Syntax {sql-statement} { -COMMIT [TRANSACTION []] -} -Syntax {sql-statement} { -ROLLBACK [TRANSACTION []] -} - -puts { - -

    -No changes can be made to the database except within a transaction. -Any command that changes the database (basically, any SQL command -other than SELECT) will automatically start a transaction if -one is not already in effect. Automatically started transactions -are committed at the conclusion of the command. -

    - -

    -Transactions can be started manually using the BEGIN -command. Such transactions usually persist until the next -COMMIT or ROLLBACK command. But a transaction will also -ROLLBACK if the database is closed or if an error occurs -and the ROLLBACK conflict resolution algorithm is specified. -See the documentation on the ON CONFLICT -clause for additional information about the ROLLBACK -conflict resolution algorithm. -

    - -

    -END TRANSACTION is an alias for COMMIT. -

    - -

    The optional transaction name is current ignored. SQLite -does not recognize nested transactions at this time. -However, future versions of SQLite may be enhanced to support nested -transactions and the transaction name would then become significant. -Application are advised not to use the transaction name in order -to avoid future compatibility problems.

    - -

    -Transactions can be deferred, immediate, or exclusive. -The default transaction behavior is deferred. -Deferred means that no locks are acquired -on the database until the database is first accessed. Thus with a -deferred transaction, the BEGIN statement itself does nothing. Locks -are not acquired until the first read or write operation. The first read -operation against a database creates a SHARED lock and the first -write operation creates a RESERVED lock. Because the acquisition of -locks is deferred until they are needed, it is possible that another -thread or process could create a separate transaction and write to -the database after the BEGIN on the current thread has executed. -If the transaction is immediate, then RESERVED locks -are acquired on all databases as soon as the BEGIN command is -executed, without waiting for the -database to be used. After a BEGIN IMMEDIATE, you are guaranteed that -no other thread or process will be able to write to the database or -do a BEGIN IMMEDIATE or BEGIN EXCLUSIVE. Other processes can continue -to read from the database, however. An exclusive transaction causes -EXCLUSIVE locks to be acquired on all databases. After a BEGIN -EXCLUSIVE, you are guaranteed that no other thread or process will -be able to read or write the database until the transaction is -complete. -

    - -

    -A description of the meaning of SHARED, RESERVED, and EXCLUSIVE locks -is available separately. -

    - -

    -The COMMIT command does not actually perform a commit until all -pending SQL commands finish. Thus if two or more SELECT statements -are in the middle of processing and a COMMIT is executed, the commit -will not actually occur until all SELECT statements finish. -

    - -

    -An attempt to execute COMMIT might result in an SQLITE_BUSY return code. -This indicates that another thread or process had a read lock on the database -that prevented the database from being updated. When COMMIT fails in this -way, the transaction remains active and the COMMIT can be retried later -after the reader has had a chance to clear. -

    - -

    Response To Errors Within A Transaction

    - -

    If certain kinds of errors occur within a transaction, the -transaction may or may not be rolled back automatically. The -errors that cause the behavior include:

    - -
      -
    • SQLITE_FULL: database or disk full -
    • SQLITE_IOERR: disk I/O error -
    • SQLITE_BUSY: database in use by another process -
    • SQLITE_NOMEM: out or memory -
    • SQLITE_INTERRUPT: processing interrupted by user request -
    - -

    -For all of these errors, SQLite attempts to undo just the one statement -it was working on and leave changes from prior statements within the -same transaction intact and continue with the transaction. However, -depending on the statement being evaluated and the point at which the -error occurs, it might be necessary for SQLite to rollback and -cancel the transaction. An application can tell which -course of action SQLite took by using the -sqlite3_get_autocommit() -C-language interface.

    - -

    It is recommended that applications respond to the errors -listed above by explicitly issuing a ROLLBACK command. If the -transaction has already been rolled back automatically -by the error response, then the ROLLBACK command will fail with an -error, but no harm is caused by this.

    - -

    Future versions of SQLite may extend the list of errors which -might cause automatic transaction rollback. Future versions of -SQLite might change the error response. In particular, we may -choose to simplify the interface in future versions of SQLite by -causing the errors above to force an unconditional rollback.

    -} - - -Section comment comment - -Syntax {comment} { | -} {SQL-comment} {-- -} {C-comment} {/STAR [STAR/] -} - -puts { -

    Comments aren't SQL commands, but can occur in SQL queries. They are -treated as whitespace by the parser. They can begin anywhere whitespace -can be found, including inside expressions that span multiple lines. -

    - -

    SQL comments only extend to the end of the current line.

    - -

    C comments can span any number of lines. If there is no terminating -delimiter, they extend to the end of the input. This is not treated as -an error. A new SQL statement can begin on a line after a multiline -comment ends. C comments can be embedded anywhere whitespace can occur, -including inside expressions, and in the middle of other SQL statements. -C comments do not nest. SQL comments inside a C comment will be ignored. -

    -} - - -Section COPY copy - -Syntax {sql-statement} { -COPY [ OR ] [ .] FROM -[ USING DELIMITERS ] -} - -puts { -

    The COPY command is available in SQLite version 2.8 and earlier. -The COPY command has been removed from SQLite version 3.0 due to -complications in trying to support it in a mixed UTF-8/16 environment. -In version 3.0, the command-line shell -contains a new command .import that can be used as a substitute -for COPY. -

    - -

    The COPY command is an extension used to load large amounts of -data into a table. It is modeled after a similar command found -in PostgreSQL. In fact, the SQLite COPY command is specifically -designed to be able to read the output of the PostgreSQL dump -utility pg_dump so that data can be easily transferred from -PostgreSQL into SQLite.

    - -

    The table-name is the name of an existing table which is to -be filled with data. The filename is a string or identifier that -names a file from which data will be read. The filename can be -the STDIN to read data from standard input.

    - -

    Each line of the input file is converted into a single record -in the table. Columns are separated by tabs. If a tab occurs as -data within a column, then that tab is preceded by a baskslash "\" -character. A baskslash in the data appears as two backslashes in -a row. The optional USING DELIMITERS clause can specify a delimiter -other than tab.

    - -

    If a column consists of the character "\N", that column is filled -with the value NULL.

    - -

    The optional conflict-clause allows the specification of an alternative -constraint conflict resolution algorithm to use for this one command. -See the section titled -ON CONFLICT for additional information.

    - -

    When the input data source is STDIN, the input can be terminated -by a line that contains only a baskslash and a dot:} -puts "\"[Operator \\.]\".

    " - - -Section {CREATE INDEX} createindex - -Syntax {sql-statement} { -CREATE [UNIQUE] INDEX [IF NOT EXISTS] [ .] -ON ( [, ]* ) -} {column-name} { - [ COLLATE ] [ ASC | DESC ] -} - -puts { -

    The CREATE INDEX command consists of the keywords "CREATE INDEX" followed -by the name of the new index, the keyword "ON", the name of a previously -created table that is to be indexed, and a parenthesized list of names of -columns in the table that are used for the index key. -Each column name can be followed by one of the "ASC" or "DESC" keywords -to indicate sort order, but the sort order is ignored in the current -implementation. Sorting is always done in ascending order.

    - -

    The COLLATE clause following each column name defines a collating -sequence used for text entires in that column. The default collating -sequence is the collating sequence defined for that column in the -CREATE TABLE statement. Or if no collating sequence is otherwise defined, -the built-in BINARY collating sequence is used.

    - -

    There are no arbitrary limits on the number of indices that can be -attached to a single table, nor on the number of columns in an index.

    - -

    If the UNIQUE keyword appears between CREATE and INDEX then duplicate -index entries are not allowed. Any attempt to insert a duplicate entry -will result in an error.

    - -

    The exact text -of each CREATE INDEX statement is stored in the sqlite_master -or sqlite_temp_master table, depending on whether the table -being indexed is temporary. Every time the database is opened, -all CREATE INDEX statements -are read from the sqlite_master table and used to regenerate -SQLite's internal representation of the index layout.

    - -

    If the optional IF NOT EXISTS clause is present and another index -with the same name aleady exists, then this command becomes a no-op.

    - -

    Indexes are removed with the DROP INDEX -command.

    -} - - -Section {CREATE TABLE} {createtable} - -Syntax {sql-command} { -CREATE [TEMP | TEMPORARY] TABLE [IF NOT EXISTS] [ .] ( - [, ]* - [, ]* -) -} {sql-command} { -CREATE [TEMP | TEMPORARY] TABLE [.] AS -} {column-def} { - [] [[CONSTRAINT ] ]* -} {type} { - | - ( ) | - ( , ) -} {column-constraint} { -NOT NULL [ ] | -PRIMARY KEY [] [ ] [AUTOINCREMENT] | -UNIQUE [ ] | -CHECK ( ) | -DEFAULT | -COLLATE -} {constraint} { -PRIMARY KEY ( ) [ ] | -UNIQUE ( ) [ ] | -CHECK ( ) -} {conflict-clause} { -ON CONFLICT -} - -puts { -

    A CREATE TABLE statement is basically the keywords "CREATE TABLE" -followed by the name of a new table and a parenthesized list of column -definitions and constraints. The table name can be either an identifier -or a string. Tables names that begin with "sqlite_" are reserved -for use by the engine.

    - -

    Each column definition is the name of the column followed by the -datatype for that column, then one or more optional column constraints. -The datatype for the column does not restrict what data may be put -in that column. -See Datatypes In SQLite Version 3 for -additional information. -The UNIQUE constraint causes an index to be created on the specified -columns. This index must contain unique keys. -The COLLATE clause specifies what text -collating function to use when comparing text entries for the column. -The built-in BINARY collating function is used by default. -

    -The DEFAULT constraint specifies a default value to use when doing an INSERT. -The value may be NULL, a string constant or a number. Starting with version -3.1.0, the default value may also be one of the special case-independant -keywords CURRENT_TIME, CURRENT_DATE or CURRENT_TIMESTAMP. If the value is -NULL, a string constant or number, it is literally inserted into the column -whenever an INSERT statement that does not specify a value for the column is -executed. If the value is CURRENT_TIME, CURRENT_DATE or CURRENT_TIMESTAMP, then -the current UTC date and/or time is inserted into the columns. For -CURRENT_TIME, the format is HH:MM:SS. For CURRENT_DATE, YYYY-MM-DD. The format -for CURRENT_TIMESTAMP is "YYYY-MM-DD HH:MM:SS". -

    - -

    Specifying a PRIMARY KEY normally just creates a UNIQUE index -on the corresponding columns. However, if primary key is on a single column -that has datatype INTEGER, then that column is used internally -as the actual key of the B-Tree for the table. This means that the column -may only hold unique integer values. (Except for this one case, -SQLite ignores the datatype specification of columns and allows -any kind of data to be put in a column regardless of its declared -datatype.) If a table does not have an INTEGER PRIMARY KEY column, -then the B-Tree key will be a automatically generated integer. - The -B-Tree key for a row can always be accessed using one of the -special names "ROWID", "OID", or "_ROWID_". -This is true regardless of whether or not there is an INTEGER -PRIMARY KEY. An INTEGER PRIMARY KEY column can also include the -keyword AUTOINCREMENT. The AUTOINCREMENT keyword modified the way -that B-Tree keys are automatically generated. Additional detail -on automatic B-Tree key generation is available -separately.

    - -

    According to the SQL standard, PRIMARY KEY should imply NOT NULL. -Unfortunately, due to a long-standing coding oversight, this is not -the case in SQLite. SQLite allows NULL values -in a PRIMARY KEY column. We could change SQLite to conform to the -standard (and we might do so in the future), but by the time the -oversight was discovered, SQLite was in such wide use that we feared -breaking legacy code if we fixed the problem. So for now we have -chosen to contain allowing NULLs in PRIMARY KEY columns. -Developers should be aware, however, that we may change SQLite to -conform to the SQL standard in future and should design new programs -accordingly.

    - -

    If the "TEMP" or "TEMPORARY" keyword occurs in between "CREATE" -and "TABLE" then the table that is created is only visible -within that same database connection -and is automatically deleted when -the database connection is closed. Any indices created on a temporary table -are also temporary. Temporary tables and indices are stored in a -separate file distinct from the main database file.

    - -

    If a <database-name> is specified, then the table is created in -the named database. It is an error to specify both a <database-name> -and the TEMP keyword, unless the <database-name> is "temp". If no -database name is specified, and the TEMP keyword is not present, -the table is created in the main database.

    - -

    The optional conflict-clause following each constraint -allows the specification of an alternative default -constraint conflict resolution algorithm for that constraint. -The default is abort ABORT. Different constraints within the same -table may have different default conflict resolution algorithms. -If an COPY, INSERT, or UPDATE command specifies a different conflict -resolution algorithm, then that algorithm is used in place of the -default algorithm specified in the CREATE TABLE statement. -See the section titled -ON CONFLICT for additional information.

    - -

    CHECK constraints are supported as of version 3.3.0. Prior -to version 3.3.0, CHECK constraints were parsed but not enforced.

    - -

    There are no arbitrary limits on the number -of columns or on the number of constraints in a table. -The total amount of data in a single row is limited to about -1 megabytes in version 2.8. In version 3.0 there is no arbitrary -limit on the amount of data in a row.

    - - -

    The CREATE TABLE AS form defines the table to be -the result set of a query. The names of the table columns are -the names of the columns in the result.

    - -

    The exact text -of each CREATE TABLE statement is stored in the sqlite_master -table. Every time the database is opened, all CREATE TABLE statements -are read from the sqlite_master table and used to regenerate -SQLite's internal representation of the table layout. -If the original command was a CREATE TABLE AS then then an equivalent -CREATE TABLE statement is synthesized and store in sqlite_master -in place of the original command. -The text of CREATE TEMPORARY TABLE statements are stored in the -sqlite_temp_master table. -

    - -

    If the optional IF NOT EXISTS clause is present and another table -with the same name aleady exists, then this command becomes a no-op.

    - -

    Tables are removed using the DROP TABLE -statement.

    -} - - -Section {CREATE TRIGGER} createtrigger - -Syntax {sql-statement} { -CREATE [TEMP | TEMPORARY] TRIGGER [IF NOT EXISTS] [ BEFORE | AFTER ] - ON [ .] - -} - -Syntax {sql-statement} { -CREATE [TEMP | TEMPORARY] TRIGGER [IF NOT EXISTS] INSTEAD OF - ON [ .] - -} - -Syntax {database-event} { -DELETE | -INSERT | -UPDATE | -UPDATE OF -} - -Syntax {trigger-action} { -[ FOR EACH ROW ] [ WHEN ] -BEGIN - ; [ ; ]* -END -} - -Syntax {trigger-step} { - | | - | -} - -puts { -

    The CREATE TRIGGER statement is used to add triggers to the -database schema. Triggers are database operations (the trigger-action) -that are automatically performed when a specified database event (the -database-event) occurs.

    - -

    A trigger may be specified to fire whenever a DELETE, INSERT or UPDATE of a -particular database table occurs, or whenever an UPDATE of one or more -specified columns of a table are updated.

    - -

    At this time SQLite supports only FOR EACH ROW triggers, not FOR EACH -STATEMENT triggers. Hence explicitly specifying FOR EACH ROW is optional. FOR -EACH ROW implies that the SQL statements specified as trigger-steps -may be executed (depending on the WHEN clause) for each database row being -inserted, updated or deleted by the statement causing the trigger to fire.

    - -

    Both the WHEN clause and the trigger-steps may access elements of -the row being inserted, deleted or updated using references of the form -"NEW.column-name" and "OLD.column-name", where -column-name is the name of a column from the table that the trigger -is associated with. OLD and NEW references may only be used in triggers on -trigger-events for which they are relevant, as follows:

    - - - - - - - - - - - - - - -
    INSERTNEW references are valid
    UPDATENEW and OLD references are valid
    DELETEOLD references are valid
    -

    - -

    If a WHEN clause is supplied, the SQL statements specified as trigger-steps are only executed for rows for which the WHEN clause is true. If no WHEN clause is supplied, the SQL statements are executed for all rows.

    - -

    The specified trigger-time determines when the trigger-steps -will be executed relative to the insertion, modification or removal of the -associated row.

    - -

    An ON CONFLICT clause may be specified as part of an UPDATE or INSERT -trigger-step. However if an ON CONFLICT clause is specified as part of -the statement causing the trigger to fire, then this conflict handling -policy is used instead.

    - -

    Triggers are automatically dropped when the table that they are -associated with is dropped.

    - -

    Triggers may be created on views, as well as ordinary tables, by specifying -INSTEAD OF in the CREATE TRIGGER statement. If one or more ON INSERT, ON DELETE -or ON UPDATE triggers are defined on a view, then it is not an error to execute -an INSERT, DELETE or UPDATE statement on the view, respectively. Thereafter, -executing an INSERT, DELETE or UPDATE on the view causes the associated - triggers to fire. The real tables underlying the view are not modified - (except possibly explicitly, by a trigger program).

    - -

    Example:

    - -

    Assuming that customer records are stored in the "customers" table, and -that order records are stored in the "orders" table, the following trigger -ensures that all associated orders are redirected when a customer changes -his or her address:

    -} -Example { -CREATE TRIGGER update_customer_address UPDATE OF address ON customers - BEGIN - UPDATE orders SET address = new.address WHERE customer_name = old.name; - END; -} -puts { -

    With this trigger installed, executing the statement:

    -} - -Example { -UPDATE customers SET address = '1 Main St.' WHERE name = 'Jack Jones'; -} -puts { -

    causes the following to be automatically executed:

    -} -Example { -UPDATE orders SET address = '1 Main St.' WHERE customer_name = 'Jack Jones'; -} - -puts { -

    Note that currently, triggers may behave oddly when created on tables - with INTEGER PRIMARY KEY fields. If a BEFORE trigger program modifies the - INTEGER PRIMARY KEY field of a row that will be subsequently updated by the - statement that causes the trigger to fire, then the update may not occur. - The workaround is to declare the table with a PRIMARY KEY column instead - of an INTEGER PRIMARY KEY column.

    -} - -puts { -

    A special SQL function RAISE() may be used within a trigger-program, with the following syntax

    -} -Syntax {raise-function} { -RAISE ( ABORT, ) | -RAISE ( FAIL, ) | -RAISE ( ROLLBACK, ) | -RAISE ( IGNORE ) -} -puts { -

    When one of the first three forms is called during trigger-program execution, the specified ON CONFLICT processing is performed (either ABORT, FAIL or - ROLLBACK) and the current query terminates. An error code of SQLITE_CONSTRAINT is returned to the user, along with the specified error message.

    - -

    When RAISE(IGNORE) is called, the remainder of the current trigger program, -the statement that caused the trigger program to execute and any subsequent - trigger programs that would of been executed are abandoned. No database - changes are rolled back. If the statement that caused the trigger program - to execute is itself part of a trigger program, then that trigger program - resumes execution at the beginning of the next step. -

    - -

    Triggers are removed using the DROP TRIGGER -statement.

    -} - - -Section {CREATE VIEW} {createview} - -Syntax {sql-command} { -CREATE [TEMP | TEMPORARY] VIEW [IF NOT EXISTS] [.] AS -} - -puts { -

    The CREATE VIEW command assigns a name to a pre-packaged -SELECT -statement. Once the view is created, it can be used in the FROM clause -of another SELECT in place of a table name. -

    - -

    If the "TEMP" or "TEMPORARY" keyword occurs in between "CREATE" -and "VIEW" then the view that is created is only visible to the -process that opened the database and is automatically deleted when -the database is closed.

    - -

    If a <database-name> is specified, then the view is created in -the named database. It is an error to specify both a <database-name> -and the TEMP keyword, unless the <database-name> is "temp". If no -database name is specified, and the TEMP keyword is not present, -the table is created in the main database.

    - -

    You cannot COPY, DELETE, INSERT or UPDATE a view. Views are read-only -in SQLite. However, in many cases you can use a -TRIGGER on the view to accomplish the same thing. Views are removed -with the DROP VIEW -command.

    -} - -Section {CREATE VIRTUAL TABLE} {createvtab} - -Syntax {sql-command} { -CREATE VIRTUAL TABLE [ .] USING [( )] -} - -puts { -

    A virtual table is an interface to an external storage or computation -engine that appears to be a table but does not actually store information -in the database file.

    - -

    In general, you can do anything with a virtual table that can be done -with an ordinary table, except that you cannot create triggers on a -virtual table. Some virtual table implementations might impose additional -restrictions. For example, many virtual tables are read-only.

    - -

    The <module-name> is the name of an object that implements -the virtual table. The <module-name> must be registered with -the SQLite database connection using -sqlite3_create_module -prior to issuing the CREATE VIRTUAL TABLE statement. -The module takes zero or more comma-separated arguments. -The arguments can be just about any text as long as it has balanced -parentheses. The argument syntax is sufficiently general that the -arguments can be made to appear as column definitions in a traditional -CREATE TABLE statement. -SQLite passes the module arguments directly -to the module without any interpretation. It is the responsibility -of the module implementation to parse and interpret its own arguments.

    - -

    A virtual table is destroyed using the ordinary -DROP TABLE statement. There is no -DROP VIRTUAL TABLE statement.

    -} - -Section DELETE delete - -Syntax {sql-statement} { -DELETE FROM [ .] [WHERE ] -} - -puts { -

    The DELETE command is used to remove records from a table. -The command consists of the "DELETE FROM" keywords followed by -the name of the table from which records are to be removed. -

    - -

    Without a WHERE clause, all rows of the table are removed. -If a WHERE clause is supplied, then only those rows that match -the expression are removed.

    -} - - -Section {DETACH DATABASE} detach - -Syntax {sql-command} { -DETACH [DATABASE] -} - -puts { -

    This statement detaches an additional database connection previously -attached using the ATTACH DATABASE statement. It -is possible to have the same database file attached multiple times using -different names, and detaching one connection to a file will leave the -others intact.

    - -

    This statement will fail if SQLite is in the middle of a transaction.

    -} - - -Section {DROP INDEX} dropindex - -Syntax {sql-command} { -DROP INDEX [IF EXISTS] [ .] -} - -puts { -

    The DROP INDEX statement removes an index added -with the -CREATE INDEX statement. The index named is completely removed from -the disk. The only way to recover the index is to reenter the -appropriate CREATE INDEX command.

    - -

    The DROP INDEX statement does not reduce the size of the database -file in the default mode. -Empty space in the database is retained for later INSERTs. To -remove free space in the database, use the VACUUM -command. If AUTOVACUUM mode is enabled for a database then space -will be freed automatically by DROP INDEX.

    -} - - -Section {DROP TABLE} droptable - -Syntax {sql-command} { -DROP TABLE [IF EXISTS] [.] -} - -puts { -

    The DROP TABLE statement removes a table added with the CREATE TABLE statement. The name specified is the -table name. It is completely removed from the database schema and the -disk file. The table can not be recovered. All indices associated -with the table are also deleted.

    - -

    The DROP TABLE statement does not reduce the size of the database -file in the default mode. Empty space in the database is retained for -later INSERTs. To -remove free space in the database, use the VACUUM -command. If AUTOVACUUM mode is enabled for a database then space -will be freed automatically by DROP TABLE.

    - -

    The optional IF EXISTS clause suppresses the error that would normally -result if the table does not exist.

    -} - - -Section {DROP TRIGGER} droptrigger -Syntax {sql-statement} { -DROP TRIGGER [IF EXISTS] [ .] -} -puts { -

    The DROP TRIGGER statement removes a trigger created by the -CREATE TRIGGER statement. The trigger is -deleted from the database schema. Note that triggers are automatically -dropped when the associated table is dropped.

    -} - - -Section {DROP VIEW} dropview - -Syntax {sql-command} { -DROP VIEW [IF EXISTS] -} - -puts { -

    The DROP VIEW statement removes a view created by the CREATE VIEW statement. The name specified is the -view name. It is removed from the database schema, but no actual data -in the underlying base tables is modified.

    -} - - -Section EXPLAIN explain - -Syntax {sql-statement} { -EXPLAIN -} - -puts { -

    The EXPLAIN command modifier is a non-standard extension. The -idea comes from a similar command found in PostgreSQL, but the operation -is completely different.

    - -

    If the EXPLAIN keyword appears before any other SQLite SQL command -then instead of actually executing the command, the SQLite library will -report back the sequence of virtual machine instructions it would have -used to execute the command had the EXPLAIN keyword not been present. -For additional information about virtual machine instructions see -the architecture description or the documentation -on available opcodes for the virtual machine.

    -} - - -Section expression expr - -Syntax {expr} { - | - [NOT] [ESCAPE ] | - | -( ) | - | - . | - . . | - | - | - ( | STAR ) | - ISNULL | - NOTNULL | - [NOT] BETWEEN AND | - [NOT] IN ( ) | - [NOT] IN ( ) | - [NOT] IN [ .] | -[EXISTS] ( ) | -CASE [] LP WHEN THEN RPPLUS [ELSE ] END | -CAST ( AS ) | - COLLATE -} {like-op} { -LIKE | GLOB | REGEXP | MATCH -} - -puts { -

    This section is different from the others. Most other sections of -this document talks about a particular SQL command. This section does -not talk about a standalone command but about "expressions" which are -subcomponents of most other commands.

    - -

    SQLite understands the following binary operators, in order from -highest to lowest precedence:

    - -
    -||
    -*    /    %
    -+    -
    -<<   >>   &    |
    -<    <=   >    >=
    -=    ==   !=   <>   IN
    -AND   
    -OR
    -
    - -

    Supported unary prefix operators are these:

    - -
    --    +    !    ~    NOT
    -
    - -

    The COLLATE operator can be thought of as a unary postfix -operator. The COLLATE operator has the highest precedence. -It always binds more tightly than any prefix unary operator or -any binary operator.

    - -

    The unary operator [Operator +] is a no-op. It can be applied -to strings, numbers, or blobs and it always gives as its result the -value of the operand.

    - -

    Note that there are two variations of the equals and not equals -operators. Equals can be either} -puts "[Operator =] or [Operator ==]. -The non-equals operator can be either -[Operator !=] or [Operator {<>}]. -The [Operator ||] operator is \"concatenate\" - it joins together -the two strings of its operands. -The operator [Operator %] outputs the remainder of its left -operand modulo its right operand.

    - -

    The result of any binary operator is a numeric value, except -for the [Operator ||] concatenation operator which gives a string -result.

    " - -puts { - - -

    -A literal value is an integer number or a floating point number. -Scientific notation is supported. The "." character is always used -as the decimal point even if the locale setting specifies "," for -this role - the use of "," for the decimal point would result in -syntactic ambiguity. A string constant is formed by enclosing the -string in single quotes ('). A single quote within the string can -be encoded by putting two single quotes in a row - as in Pascal. -C-style escapes using the backslash character are not supported because -they are not standard SQL. -BLOB literals are string literals containing hexadecimal data and -preceded by a single "x" or "X" character. For example:

    - -
    -X'53514C697465'
    -
    - -

    -A literal value can also be the token "NULL". -

    - -

    -A parameter specifies a placeholder in the expression for a literal -value that is filled in at runtime using the -sqlite3_bind API. -Parameters can take several forms: -

    - - - - - - - - - - - - - - - - - - - - - -
    ?NNNA question mark followed by a number NNN holds a spot for the -NNN-th parameter. NNN must be between 1 and 999.
    ?A question mark that is not followed by a number holds a spot for -the next unused parameter.
    :AAAAA colon followed by an identifier name holds a spot for a named -parameter with the name AAAA. Named parameters are also numbered. -The number assigned is the next unused number. To avoid confusion, -it is best to avoid mixing named and numbered parameters.
    @AAAAAn "at" sign works exactly like a colon.
    $AAAAA dollar-sign followed by an identifier name also holds a spot for a named -parameter with the name AAAA. The identifier name in this case can include -one or more occurances of "::" and a suffix enclosed in "(...)" containing -any text at all. This syntax is the form of a variable name in the Tcl -programming language.
    - - -

    Parameters that are not assigned values using -sqlite3_bind are treated -as NULL.

    - - -

    The LIKE operator does a pattern matching comparison. The operand -to the right contains the pattern, the left hand operand contains the -string to match against the pattern. -} -puts "A percent symbol [Operator %] in the pattern matches any -sequence of zero or more characters in the string. An underscore -[Operator _] in the pattern matches any single character in the -string. Any other character matches itself or it's lower/upper case -equivalent (i.e. case-insensitive matching). (A bug: SQLite only -understands upper/lower case for 7-bit Latin characters. Hence the -LIKE operator is case sensitive for 8-bit iso8859 characters or UTF-8 -characters. For example, the expression 'a' LIKE 'A' -is TRUE but 'æ' LIKE 'Æ' is FALSE.).

    " - -puts { -

    If the optional ESCAPE clause is present, then the expression -following the ESCAPE keyword must evaluate to a string consisting of -a single character. This character may be used in the LIKE pattern -to include literal percent or underscore characters. The escape -character followed by a percent symbol, underscore or itself matches a -literal percent symbol, underscore or escape character in the string, -respectively. The infix LIKE operator is implemented by calling the -user function like(X,Y).

    -} - -puts { -The LIKE operator is not case sensitive and will match upper case -characters on one side against lower case characters on the other. -(A bug: SQLite only understands upper/lower case for 7-bit Latin -characters. Hence the LIKE operator is case sensitive for 8-bit -iso8859 characters or UTF-8 characters. For example, the expression -'a' LIKE 'A' is TRUE but -'æ' LIKE 'Æ' is FALSE.).

    - -

    The infix LIKE -operator is implemented by calling the user function -like(X,Y). If an ESCAPE clause is present, it adds -a third parameter to the function call. If the functionality of LIKE can be -overridden by defining an alternative implementation of the -like() SQL function.

    -

    - - -

    The GLOB operator is similar to LIKE but uses the Unix -file globbing syntax for its wildcards. Also, GLOB is case -sensitive, unlike LIKE. Both GLOB and LIKE may be preceded by -the NOT keyword to invert the sense of the test. The infix GLOB -operator is implemented by calling the user function -glob(X,Y) and can be modified by overriding -that function.

    - - -

    The REGEXP operator is a special syntax for the regexp() -user function. No regexp() user function is defined by default -and so use of the REGEXP operator will normally result in an -error message. If a user-defined function named "regexp" -is added at run-time, that function will be called in order -to implement the REGEXP operator.

    - - -

    The MATCH operator is a special syntax for the match() -user function. The default match() function implementation -raises and exception and is not really useful for anything. -But extensions can override the match() function with more -helpful logic.

    - -

    A column name can be any of the names defined in the CREATE TABLE -statement or one of the following special identifiers: "ROWID", -"OID", or "_ROWID_". -These special identifiers all describe the -unique integer key (the "row key") associated with every -row of every table. -The special identifiers only refer to the row key if the CREATE TABLE -statement does not define a real column with the same name. Row keys -act like read-only columns. A row key can be used anywhere a regular -column can be used, except that you cannot change the value -of a row key in an UPDATE or INSERT statement. -"SELECT * ..." does not return the row key.

    - -

    SELECT statements can appear in expressions as either the -right-hand operand of the IN operator, as a scalar quantity, or -as the operand of an EXISTS operator. -As a scalar quantity or the operand of an IN operator, -the SELECT should have only a single column in its -result. Compound SELECTs (connected with keywords like UNION or -EXCEPT) are allowed. -With the EXISTS operator, the columns in the result set of the SELECT are -ignored and the expression returns TRUE if one or more rows exist -and FALSE if the result set is empty. -If no terms in the SELECT expression refer to value in the containing -query, then the expression is evaluated once prior to any other -processing and the result is reused as necessary. If the SELECT expression -does contain variables from the outer query, then the SELECT is reevaluated -every time it is needed.

    - -

    When a SELECT is the right operand of the IN operator, the IN -operator returns TRUE if the result of the left operand is any of -the values generated by the select. The IN operator may be preceded -by the NOT keyword to invert the sense of the test.

    - -

    When a SELECT appears within an expression but is not the right -operand of an IN operator, then the first row of the result of the -SELECT becomes the value used in the expression. If the SELECT yields -more than one result row, all rows after the first are ignored. If -the SELECT yields no rows, then the value of the SELECT is NULL.

    - -

    A CAST expression changes the datatype of the into the -type specified by <type>. -<type> can be any non-empty type name that is valid -for the type in a column definition of a CREATE TABLE statement.

    - -

    Both simple and aggregate functions are supported. A simple -function can be used in any expression. Simple functions return -a result immediately based on their inputs. Aggregate functions -may only be used in a SELECT statement. Aggregate functions compute -their result across all rows of the result set.

    - - -Core Functions - -

    The core functions shown below are available by default. Additional -functions may be written in C and added to the database engine using -the sqlite3_create_function() -API.

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    abs(X)Return the absolute value of argument X.
    coalesce(X,Y,...)Return a copy of the first non-NULL argument. If -all arguments are NULL then NULL is returned. There must be at least -2 arguments.
    - -glob(X,Y)This function is used to implement the -"X GLOB Y" syntax of SQLite. The -sqlite3_create_function() -interface can -be used to override this function and thereby change the operation -of the GLOB operator.
    ifnull(X,Y)Return a copy of the first non-NULL argument. If -both arguments are NULL then NULL is returned. This behaves the same as -coalesce() above.
    - -hex(X)The argument is interpreted as a BLOB. The result -is a hexadecimal rendering of the content of that blob.
    last_insert_rowid()Return the ROWID -of the last row insert from this -connection to the database. This is the same value that would be returned -from the sqlite_last_insert_rowid() API function.
    length(X)Return the string length of X in characters. -If SQLite is configured to support UTF-8, then the number of UTF-8 -characters is returned, not the number of bytes.
    - -like(X,Y)
    -like(X,Y,Z)
    -This function is used to implement the "X LIKE Y [ESCAPE Z]" -syntax of SQL. If the optional ESCAPE clause is present, then the -user-function is invoked with three arguments. Otherwise, it is -invoked with two arguments only. The - -sqlite_create_function() interface can be used to override this -function and thereby change the operation of the LIKE operator. When doing this, it may be important -to override both the two and three argument versions of the like() -function. Otherwise, different code may be called to implement the -LIKE operator depending on whether or not an ESCAPE clause was -specified.
    load_extension(X)
    -load_extension(X,Y)
    Load SQLite extensions out of the shared library -file named X using the entry point Y. The result -is a NULL. If Y is omitted then the default entry point -of sqlite3_extension_init is used. This function raises -an exception if the extension fails to load or initialize correctly. - -

    This function will fail if the extension attempts to modify -or delete a SQL function or collating sequence. The -extension can add new functions or collating sequences, but cannot -modify or delete existing functions or collating sequences because -those functions and/or collating sequences might be used elsewhere -in the currently running SQL statement. To load an extension that -changes or deletes functions or collating sequences, use the -sqlite3_load_extension() -C-language API.

    -
    lower(X)Return a copy of string X will all characters -converted to lower case. The C library tolower() routine is used -for the conversion, which means that this function might not -work correctly on UTF-8 characters.
    - -ltrim(X)
    ltrim(X,Y)
    Return a string formed by removing any and all -characters that appear in Y from the left side of X. -If the Y argument is omitted, spaces are removed.
    max(X,Y,...)Return the argument with the maximum value. Arguments -may be strings in addition to numbers. The maximum value is determined -by the usual sort order. Note that max() is a simple function when -it has 2 or more arguments but converts to an aggregate function if given -only a single argument.
    min(X,Y,...)Return the argument with the minimum value. Arguments -may be strings in addition to numbers. The minimum value is determined -by the usual sort order. Note that min() is a simple function when -it has 2 or more arguments but converts to an aggregate function if given -only a single argument.
    nullif(X,Y)Return the first argument if the arguments are different, -otherwise return NULL.
    quote(X)This routine returns a string which is the value of -its argument suitable for inclusion into another SQL statement. -Strings are surrounded by single-quotes with escapes on interior quotes -as needed. BLOBs are encoded as hexadecimal literals. -The current implementation of VACUUM uses this function. The function -is also useful when writing triggers to implement undo/redo functionality. -
    random(*)Return a pseudo-random integer -between -9223372036854775808 and +9223372036854775807.
    - -replace(X,Y,Z)Return a string formed by substituting string Z for -every occurrance of string Y in string X. The BINARY -collating sequence is used for comparisons.
    - -randomblob(N)Return a N-byte blob containing pseudo-random bytes. -N should be a postive integer.
    round(X)
    round(X,Y)
    Round off the number X to Y digits to the -right of the decimal point. If the Y argument is omitted, 0 is -assumed.
    - -rtrim(X)
    rtrim(X,Y)
    Return a string formed by removing any and all -characters that appear in Y from the right side of X. -If the Y argument is omitted, spaces are removed.
    soundex(X)Compute the soundex encoding of the string X. -The string "?000" is returned if the argument is NULL. -This function is omitted from SQLite by default. -It is only available the -DSQLITE_SOUNDEX=1 compiler option -is used when SQLite is built.
    sqlite_version(*)Return the version string for the SQLite library -that is running. Example: "2.8.0"
    - substr(X,Y,Z)
    - substr(X,Y)
    Return a substring of input string X that begins -with the Y-th character and which is Z characters long. -If Z is omitted then all character through the end of the string -are returned. -The left-most character of X is number 1. If Y is negative -the the first character of the substring is found by counting from the -right rather than the left. If X is string -then characters indices refer to actual UTF-8 characters. If -X is a BLOB then the indices refer to bytes.
    - -trim(X)
    trim(X,Y)
    Return a string formed by removing any and all -characters that appear in Y from both ends of X. -If the Y argument is omitted, spaces are removed.
    typeof(X)Return the type of the expression X. The only -return values are "null", "integer", "real", "text", and "blob". -SQLite's type handling is -explained in Datatypes in SQLite Version 3.
    upper(X)Return a copy of input string X converted to all -upper-case letters. The implementation of this function uses the C library -routine toupper() which means it may not work correctly on -UTF-8 strings.
    zeroblob(N) -Return a BLOB consisting of N bytes of 0x00. SQLite -manages these zeroblobs very efficiently. Zeroblobs can be used to -reserve space for a BLOB that is later written using -incremental BLOB I/O.
    - -Date And Time Functions - -

    Date and time functions are documented in the - -SQLite Wiki.

    - - -Aggregate Functions - -

    -The aggregate functions shown below are available by default. Additional -aggregate functions written in C may be added using the -sqlite3_create_function() -API.

    - -

    -In any aggregate function that takes a single argument, that argument -can be preceeded by the keyword DISTINCT. In such cases, duplicate -elements are filtered before being passed into the aggregate function. -For example, the function "count(distinct X)" will return the number -of distinct values of column X instead of the total number of non-null -values in column X. -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    avg(X)Return the average value of all non-NULL X within a -group. String and BLOB values that do not look like numbers are -interpreted as 0. -The result of avg() is always a floating point value even if all -inputs are integers.

    count(X)
    count(*)
    The first form return a count of the number of times -that X is not NULL in a group. The second form (with no argument) -returns the total number of rows in the group.
    max(X)Return the maximum value of all values in the group. -The usual sort order is used to determine the maximum.
    min(X)Return the minimum non-NULL value of all values in the group. -The usual sort order is used to determine the minimum. NULL is only returned -if all values in the group are NULL.
    sum(X)
    total(X)
    Return the numeric sum of all non-NULL values in the group. - If there are no non-NULL input rows then sum() returns - NULL but total() returns 0.0. - NULL is not normally a helpful result for the sum of no rows - but the SQL standard requires it and most other - SQL database engines implement sum() that way so SQLite does it in the - same way in order to be compatible. The non-standard total() function - is provided as a convenient way to work around this design problem - in the SQL language.

    - -

    The result of total() is always a floating point value. - The result of sum() is an integer value if all non-NULL inputs are integers. - If any input to sum() is neither an integer or a NULL - then sum() returns a floating point value - which might be an approximation to the true sum.

    - -

    Sum() will throw an "integer overflow" exception if all inputs - are integers or NULL - and an integer overflow occurs at any point during the computation. - Total() never throws an exception.

    -
    -} - - -Section INSERT insert - -Syntax {sql-statement} { -INSERT [OR ] INTO [ .] [()] VALUES() | -INSERT [OR ] INTO [ .] [()] -} - -puts { -

    The INSERT statement comes in two basic forms. The first form -(with the "VALUES" keyword) creates a single new row in an existing table. -If no column-list is specified then the number of values must -be the same as the number of columns in the table. If a column-list -is specified, then the number of values must match the number of -specified columns. Columns of the table that do not appear in the -column list are filled with the default value, or with NULL if no -default value is specified. -

    - -

    The second form of the INSERT statement takes it data from a -SELECT statement. The number of columns in the result of the -SELECT must exactly match the number of columns in the table if -no column list is specified, or it must match the number of columns -name in the column list. A new entry is made in the table -for every row of the SELECT result. The SELECT may be simple -or compound.

    - -

    The optional conflict-clause allows the specification of an alternative -constraint conflict resolution algorithm to use during this one command. -See the section titled -ON CONFLICT for additional information. -For compatibility with MySQL, the parser allows the use of the -single keyword REPLACE as an alias for "INSERT OR REPLACE". -

    -} - - -Section {ON CONFLICT clause} conflict - -Syntax {conflict-clause} { -ON CONFLICT -} {conflict-algorithm} { -ROLLBACK | ABORT | FAIL | IGNORE | REPLACE -} - -puts { -

    The ON CONFLICT clause is not a separate SQL command. It is a -non-standard clause that can appear in many other SQL commands. -It is given its own section in this document because it is not -part of standard SQL and therefore might not be familiar.

    - -

    The syntax for the ON CONFLICT clause is as shown above for -the CREATE TABLE command. For the INSERT and -UPDATE commands, the keywords "ON CONFLICT" are replaced by "OR", to make -the syntax seem more natural. For example, instead of -"INSERT ON CONFLICT IGNORE" we have "INSERT OR IGNORE". -The keywords change but the meaning of the clause is the same -either way.

    - -

    The ON CONFLICT clause specifies an algorithm used to resolve -constraint conflicts. There are five choices: ROLLBACK, ABORT, -FAIL, IGNORE, and REPLACE. The default algorithm is ABORT. This -is what they mean:

    - -
    -
    ROLLBACK
    -

    When a constraint violation occurs, an immediate ROLLBACK -occurs, thus ending the current transaction, and the command aborts -with a return code of SQLITE_CONSTRAINT. If no transaction is -active (other than the implied transaction that is created on every -command) then this algorithm works the same as ABORT.

    - -
    ABORT
    -

    When a constraint violation occurs, the command backs out -any prior changes it might have made and aborts with a return code -of SQLITE_CONSTRAINT. But no ROLLBACK is executed so changes -from prior commands within the same transaction -are preserved. This is the default behavior.

    - -
    FAIL
    -

    When a constraint violation occurs, the command aborts with a -return code SQLITE_CONSTRAINT. But any changes to the database that -the command made prior to encountering the constraint violation -are preserved and are not backed out. For example, if an UPDATE -statement encountered a constraint violation on the 100th row that -it attempts to update, then the first 99 row changes are preserved -but changes to rows 100 and beyond never occur.

    - -
    IGNORE
    -

    When a constraint violation occurs, the one row that contains -the constraint violation is not inserted or changed. But the command -continues executing normally. Other rows before and after the row that -contained the constraint violation continue to be inserted or updated -normally. No error is returned.

    - -
    REPLACE
    -

    When a UNIQUE constraint violation occurs, the pre-existing rows -that are causing the constraint violation are removed prior to inserting -or updating the current row. Thus the insert or update always occurs. -The command continues executing normally. No error is returned. -If a NOT NULL constraint violation occurs, the NULL value is replaced -by the default value for that column. If the column has no default -value, then the ABORT algorithm is used. If a CHECK constraint violation -occurs then the IGNORE algorithm is used.

    - -

    When this conflict resolution strategy deletes rows in order to -satisfy a constraint, it does not invoke delete triggers on those -rows. This behavior might change in a future release.

    -
    - -

    The algorithm specified in the OR clause of a INSERT or UPDATE -overrides any algorithm specified in a CREATE TABLE. -If no algorithm is specified anywhere, the ABORT algorithm is used.

    -} - -Section REINDEX reindex - -Syntax {sql-statement} { - REINDEX -} -Syntax {sql-statement} { - REINDEX [ .] -} - -puts { -

    The REINDEX command is used to delete and recreate indices from scratch. -This is useful when the definition of a collation sequence has changed. -

    - -

    In the first form, all indices in all attached databases that use the -named collation sequence are recreated. In the second form, if -[database-name.]table/index-name identifies a table, then all indices -associated with the table are rebuilt. If an index is identified, then only -this specific index is deleted and recreated. -

    - -

    If no database-name is specified and there exists both a table or -index and a collation sequence of the specified name, then indices associated -with the collation sequence only are reconstructed. This ambiguity may be -dispelled by always specifying a database-name when reindexing a -specific table or index. -} - -Section REPLACE replace - -Syntax {sql-statement} { -REPLACE INTO [ .] [( )] VALUES ( ) | -REPLACE INTO [ .] [( )] -} - -puts { -

    The REPLACE command is an alias for the "INSERT OR REPLACE" variant -of the INSERT command. This alias is provided for -compatibility with MySQL. See the -INSERT command documentation for additional -information.

    -} - - -Section SELECT select - -Syntax {sql-statement} { -SELECT [ALL | DISTINCT] [FROM ] -[WHERE ] -[GROUP BY ] -[HAVING ] -[
    [
    ]* -} {table} { - [AS ] | -(
    - - - - - - -
    'keyword'A keyword in single quotes is interpreted as a literal string - if it occurs in a context where a string literal is allowed, otherwise - it is understood as an identifier.
    "keyword"A keyword in double-quotes is interpreted as an identifier if - it matches a known identifier. Otherwise it is interpreted as a - string literal.
    [keyword]A keyword enclosed in square brackets is always understood as - an identifier. This is not standard SQL. This quoting mechanism - is used by MS Access and SQL Server and is included in SQLite for - compatibility.
    - -

    - -

    Quoted keywords are unaesthetic. -To help you avoid them, SQLite allows many keywords to be used unquoted -as the names of databases, tables, indices, triggers, views, columns, -user-defined functions, collations, attached databases, and virtual -function modules. -In the list of keywords that follows, those that can be used as identifiers -are shown in an italic font. Keywords that must be quoted in order to be -used as identifiers are shown in bold.

    - -

    -SQLite adds new keywords from time to time when it take on new features. -So to prevent your code from being broken by future enhancements, you should -normally quote any indentifier that is an English language word, even if -you do not have to. -

    - -

    -The following are the keywords currently recognized by SQLite: -

    - -
    - - -
    -} - -set n [llength $keyword_list] -set nCol 5 -set nRow [expr {($n+$nCol-1)/$nCol}] -set i 0 -foreach word $keyword_list { - if {[string index $word end]=="*"} { - set word [string range $word 0 end-1] - set font i - } else { - set font b - } - if {$i==$nRow} { - puts "" - set i 1 - } else { - incr i - } - puts "<$font>$word
    " -} - -puts { -
    - -

    Special names

    - -

    The following are not keywords in SQLite, but are used as names of -system objects. They can be used as an identifier for a different -type of object.

    - -
    - _ROWID_
    - MAIN
    - OID
    - ROWID
    - SQLITE_MASTER
    - SQLITE_SEQUENCE
    - SQLITE_TEMP_MASTER
    - TEMP
    -
    -} - -puts {
    } -footer $rcsid -if {[string length $outputdir]} { - footer $rcsid -} -puts {
    } DELETED limits.tcl Index: limits.tcl ================================================================== --- limits.tcl +++ /dev/null @@ -1,318 +0,0 @@ -# -# Run this script to generate the limits.html output file -# -set rcsid {$Id: limits.tcl,v 1.5 2007/08/09 00:00:26 drh Exp $} -source common.tcl -header {Implementation Limits For SQLite} -puts { -

    Limits In SQLite

    - -

    -"Limits" in the context of this article means sizes or -quantities that can not be exceeded. We are concerned -with things like the maximum number of bytes in a -BLOB or the maximum number of columns in a table. -

    - -

    -SQLite was originally designed with a policy of avoiding -arbitrary limits. -Of course, every program that runs on a machine with finite -memory and disk space has limits of some kind. But in SQLite, -those limits -were not well defined. The policy was that if it would fit -in memory and you could count it with a 32-bit integer, then -it should work. -

    - -

    -Unfortunately, the no-limits policy has been shown to create -problems. Because the upper bounds were not well -defined, they were not tested, and bugs (including possible -security exploits) were often found when pushing SQLite to -extremes. For this reason, newer versions of SQLite have -well-defined limits and those limits are tested as part of -the test suite. -

    - -

    -This article defines what the limits of SQLite are and how they -can be customized for specific applications. The default settings -for limits are normally quite large and adequate for almost every -application. Some applications may what to increase a limit here -or there, but we expect such needs to be rare. More commonly, -an application might want to recompile SQLite with much lower -limits to avoid excess resource utilization in the event of -bug in higher-level SQL statement generators or to help thwart -attackers who inject malicious SQL statements. -

    -} -proc limititem {title text} { - puts "
  • $title

    \n$text
  • " -} -puts { -
      -} - -limititem {Maximum length of a string or BLOB} { -

      -The maximum number of bytes in a string or BLOB in SQLite is defined -by the preprocessor macro SQLITE_MAX_LENGTH. The default value -of this macro is 1 billion (1 thousand million or 1,000,000,000). -You can raise or lower this value at compile-time using a command-line -option like this: -

      - -
      -DSQLITE_MAX_LENGTH=123456789
      - -

      -The current implementation will only support a string or BLOB -length up to 231-1 or 2147483647. And -some built-in functions such as hex() might fail well before that -point. In security-sensitive applications it is best not to -try to increase the maximum string and blob length. In fact, -you might do well to lower the maximum string and blob length -to something more in the range of a few million if that is -possible. -

      - -

      -During part of SQLite's INSERT and SELECT processing, the complete -content of each row in the database is encoded as a single BLOB. -So the SQLITE_MAX_LENGTH parameter also determines the maximum -number of bytes in a row. -

      -} - -limititem {Maximum Number Of Columns} { -

      -The SQLITE_MAX_COLUMN compile-time parameter is used to set an upper -bound on: -

      - -
        -
      • The number of columns in a table
      • -
      • The number of columns in an index
      • -
      • The number of columns in a view
      • -
      • The number of terms in the SET clause of an UPDATE statement
      • -
      • The number of columns in the result set of a SELECT statement
      • -
      • The number of terms in a GROUP BY or ORDER BY clause
      • -
      • The number of values in an INSERT statement
      • -
      - -

      -The default setting for SQLITE_MAX_COLUMN is 2000. You can change it -at compile time to values as large as 32676. You might be able to -redefine this value to be as large as billions, though nobody has ever -tried doing that so we do not know if it will work. On the other hand, there -are people who will argue that a well-normalized database design -will never need a value larger than about 100. -

      - -

      -In most applications, the number of columns is small - a few dozen. -There are places in the SQLite code generator that use algorithms -that are O(N²) where N is the number of columns. -So if you redefine SQLITE_MAX_COLUMN to be a -really huge number and you generate SQL that uses a large number of -columns, you may find that -sqlite3_prepare_v2() -runs slowly. -} - -limititem {Maximum Length Of An SQL Statement} { -

      -The maximum number of bytes in the text of an SQL statement is -limited to SQLITE_MAX_SQL_LENGTH which defaults to 1000000. You -can redefine this limit to be as large as the smaller of SQLITE_MAX_LENGTH -and 1073741824. -

      - -

      -If an SQL statement is limited to be a million bytes in length, then -obviously you will not be able to insert multi-million byte strings -by embedding them as literals inside of INSERT statements. But -you should not do that anyway. Use host parameters -for your data. Prepare short SQL statements like this: -

      - -
      -INSERT INTO tab1 VALUES(?,?,?); -
      - -

      -Then use the -sqlite3_bind_XXXX() functions -to bind your large string values to the SQL statement. The use of binding -obviates the need to escape quote characters in the string, reducing the -risk of SQL injection attacks. It is also runs faster since the large -string does not need to be parsed or copied as much. -

      -} - -limititem {Maximum Number Of Tables In A Join} { -

      -SQLite does not support joins containing more than 64 tables. -This limit arises from the fact that the SQLite code generator -uses bitmaps with one bit per join-table in the query optimizer. -

      -} - -limititem {Maximum Depth Of An Expression Tree} { -

      -SQLite parses expressions into a tree for processing. During -code generation, SQLite walks this tree recursively. The depth -of expression trees is therefore limited in order to avoid -using too much stack space. -

      - -

      -The SQLITE_MAX_EXPR_DEPTH parameter determines the maximum expression -tree depth. If the value is 0, then no limit is enforced. The -current implementation has a default value of 1000. -

      -} - -limititem {Maximum Number Of Arguments On A Function} { -

      -The SQLITE_MAX_FUNCTION_ARG parameter determines the maximum number -of parameters that can be passed to an SQL function. The default value -of this limit is 100. We know of no -technical reason why SQLite would not work with functions that have -millions of parameters. However, we suspect that anybody who tries -to invoke a function with millions of parameters is really -trying to find security exploits in systems that use SQLite, -not do useful work, -and so for that reason we have set this parameter relatively low. -} - -limititem {Maximum Number Of Terms In A Compound SELECT Statement} { -

      -A compound SELECT statement is two or more SELECT statements connected -by operators UNION, UNION ALL, EXCEPT, or INTERSECT. We call each -individual SELECT statement within a compound SELECT a "term". -

      - -

      -The code generator in SQLite processes compound SELECT statements using -a recursive algorithm. In order to limit the size of the stack, we -therefore limit the number of terms in a compound SELECT. The maximum -number of terms is SQLITE_MAX_COMPOUND_SELECT which defaults to 500. -We think this is a generous allotment since in practice we almost -never see the number of terms in a compound select exceed single digits. -

      -} - -limititem {Maximum Length Of A LIKE Or GLOB Pattern} { -

      -The pattern matching algorithm used in the default LIKE and GLOB -implementation of SQLite can exhibit O(N²) performance (where -N is the number of characters in the pattern) for certain pathological -cases. To avoid denial-of-service attacks from miscreants who are able -to specify their own LIKE or GLOB patterns, the length of the LIKE -or GLOB pattern is limited to SQLITE_MAX_LIKE_PATTERN_LENGTH bytes. -The default value of this limit is 50000. A modern workstation can -evaluate even a pathological LIKE or GLOB pattern of 50000 bytes -relatively quickly. The denial of service problem only comes into -play when the pattern length gets into millions of bytes. Nevertheless, -since most useful LIKE or GLOB patterns are at most a few dozen bytes -in length, paranoid application developers may want to reduce this -parameter to something in the range of a few hundred if they know that -external users are able to generate arbitrary patterns. -

      -} - -limititem {Maximum Number Of Host Parameters In A Single SQL Statement} { -

      -A host parameter is a place-holder in an SQL statement that is filled -in using one of the -sqlite3_bind_XXXX() interfaces. -Many SQL programmers are familiar with using a question mark ("?") as a -host parameter. SQLite also supports named host parameters prefaced -by ":", "$", or "@" and numbered host parameters of the form "?123". -

      - -

      -Each host parameter in an SQLite statement is assigned a number. The -numbers normally begin with 1 and increase by one with each new -parameter. However, when the "?123" form is used, the host parameter -number is the number that follows the question mark. -

      - -

      -The maximum value of a host parameter number is SQLITE_MAX_VARIABLE_NUMBER. -This setting defaults to 999. -

      -} - -limititem {Maximum Number Of Attached Databases} { -

      -The ATTACH statement is an SQLite extension -that allows two or more databases to be associated to the same database -connection and to operate as if they were a single database. The number -of simulataneously attached databases is limited to SQLITE_MAX_ATTACHED -which is set to 10 by default. -The code generator in SQLite uses bitmaps -to keep track of attached databases. That means that the number of -attached databases cannot be increased above 30 on a 32-bit machine -or 62 on a 64-bit machine. -} - -limititem {Maximum Database Page Size} { -

      -An SQLite database file is organized as pages. The size of each -page is a power of 2 between 512 and SQLITE_MAX_PAGE_SIZE. -The default value for SQLITE_MAX_PAGE_SIZE is 32768. The current -implementation will not support a larger value. -

      - -

      -It used to be the case that SQLite would allocate some stack -structures whose size was proportional to the maximum page size. -For this reason, SQLite would sometimes be compiled with a smaller -maximum page size on embedded devices with limited stack memory. But -more recent versions of SQLite put these large structures on the -heap, not on the stack, so reducing the maximum page size is no -longer necessary on embedded devices. -

      -} - -limititem {Maximum Number Of Pages In A Database File} { -

      -SQLite is able to limit the size of a database file to prevent -the database file from growing too large and consuming too much -disk or flash space. -The SQLITE_MAX_PAGE_COUNT parameter, which is normally set to -1073741823, is the maximum number of pages allowed in a single -database file. An attempt to insert new data that would cause -the database file to grow larger than this will return -SQLITE_FULL. -

      - -

      -The -max_page_count PRAGMA can be used to raise or lower this -limit at run-time. -

      - -

      -Note that the transaction processing in SQLite requires two bits -of heap memory for every page in the database file. For databases -of a few megabytes in size, this amounts to only a few hundred -bytes of heap memory. But for gigabyte-sized databases the amount -of heap memory required is getting into the kilobyte range and -for terabyte-sized databases, megabytes of heap memory must be -allocated and zeroed at each transaction. SQLite will -support very large databases in theory, but the current implementation -is optimized for the common SQLite use cases of embedded devices -and persistent stores for desktop applications. In other words, -SQLite is designed for use with databases sized in kilobytes or -megabytes not gigabytes. If you are building an application to -work with databases that are hundreds of gigabytes or more -in size, then you should perhaps consider using a different database -engine that is explicitly designed for such large data sets. -

      -} - -puts {
    } -footer $rcsid DELETED lockingv3.tcl Index: lockingv3.tcl ================================================================== --- lockingv3.tcl +++ /dev/null @@ -1,570 +0,0 @@ -# -# Run this script to generated a lockingv3.html output file -# -set rcsid {$Id: } -source common.tcl -header {File Locking And Concurrency In SQLite Version 3} - -proc HEADING {level title {label {}}} { - global pnum - incr pnum($level) - foreach i [array names pnum] { - if {$i>$level} {set pnum($i) 0} - } - set h [expr {$level+1}] - if {$h>6} {set h 6} - set n $pnum(1).$pnum(2) - for {set i 3} {$i<=$level} {incr i} { - append n .$pnum($i) - } - if {$label!=""} { - puts "" - } - puts "$n $title" -} -set pnum(1) 0 -set pnum(2) 0 -set pnum(3) 0 -set pnum(4) 0 -set pnum(5) 0 -set pnum(6) 0 -set pnum(7) 0 -set pnum(8) 0 - -HEADING 1 {File Locking And Concurrency In SQLite Version 3} - -puts { -

    Version 3 of SQLite introduces a more complex locking and journaling -mechanism designed to improve concurrency and reduce the writer starvation -problem. The new mechanism also allows atomic commits of transactions -involving multiple database files. -This document describes the new locking mechanism. -The intended audience is programmers who want to understand and/or modify -the pager code and reviewers working to verify the design -of SQLite version 3. -

    -} - -HEADING 1 {Overview} overview - -puts { -

    -Locking and concurrency control are handled by the the - -pager module. -The pager module is responsible for making SQLite "ACID" (Atomic, -Consistent, Isolated, and Durable). The pager module makes sure changes -happen all at once, that either all changes occur or none of them do, -that two or more processes do not try to access the database -in incompatible ways at the same time, and that once changes have been -written they persist until explicitly deleted. The pager also provides -an memory cache of some of the contents of the disk file.

    - -

    The pager is unconcerned -with the details of B-Trees, text encodings, indices, and so forth. -From the point of view of the pager the database consists of -a single file of uniform-sized blocks. Each block is called a -"page" and is usually 1024 bytes in size. The pages are numbered -beginning with 1. So the first 1024 bytes of the database are called -"page 1" and the second 1024 bytes are call "page 2" and so forth. All -other encoding details are handled by higher layers of the library. -The pager communicates with the operating system using one of several -modules -(Examples: - -os_unix.c, - -os_win.c) -that provides a uniform abstraction for operating system services. -

    - -

    The pager module effectively controls access for separate threads, or -separate processes, or both. Throughout this document whenever the -word "process" is written you may substitute the word "thread" without -changing the truth of the statement.

    -} - -HEADING 1 {Locking} locking - -puts { -

    -From the point of view of a single process, a database file -can be in one of five locking states: -

    - -

    - - - - - - - - - - - - - - - -
    UNLOCKED -No locks are held on the database. The database may be neither read nor -written. Any internally cached data is considered suspect and subject to -verification against the database file before being used. Other -processes can read or write the database as their own locking states -permit. This is the default state. -
    SHARED -The database may be read but not written. Any number of -processes can hold SHARED locks at the same time, hence there can be -many simultaneous readers. But no other thread or process is allowed -to write to the database file while one or more SHARED locks are active. -
    RESERVED -A RESERVED lock means that the process is planning on writing to the -database file at some point in the future but that it is currently just -reading from the file. Only a single RESERVED lock may be active at one -time, though multiple SHARED locks can coexist with a single RESERVED lock. -RESERVED differs from PENDING in that new SHARED locks can be acquired -while there is a RESERVED lock. -
    PENDING -A PENDING lock means that the process holding the lock wants to write -to the database as soon as possible and is just waiting on all current -SHARED locks to clear so that it can get an EXCLUSIVE lock. No new -SHARED locks are permitted against the database if -a PENDING lock is active, though existing SHARED locks are allowed to -continue. -
    EXCLUSIVE -An EXCLUSIVE lock is needed in order to write to the database file. -Only one EXCLUSIVE lock is allowed on the file and no other locks of -any kind are allowed to coexist with an EXCLUSIVE lock. In order to -maximize concurrency, SQLite works to minimize the amount of time that -EXCLUSIVE locks are held. -
    -

    - -

    -The operating system interface layer understands and tracks all five -locking states described above. -The pager module only tracks four of the five locking states. -A PENDING lock is always just a temporary -stepping stone on the path to an EXCLUSIVE lock and so the pager module -does not track PENDING locks. -

    -} - -HEADING 1 {The Rollback Journal} rollback - -puts { -

    Any time a process wants to make a changes to a database file, it -first records enough information in the rollback journal to -restore the database file back to its initial condition. Thus, before -altering any page of the database, the original contents of that page -must be written into the journal. The journal also records the initial -size of the database so that if the database file grows it can be truncated -back to its original size on a rollback.

    - -

    The rollback journal is a ordinary disk file that has the same name as -the database file with the suffix "-journal" added.

    - -

    If SQLite is working with multiple databases at the same time -(using the ATTACH command) then each database has its own journal. -But there is also a separate aggregate journal -called the master journal. -The master journal does not contain page data used for rolling back -changes. Instead the master journal contains the names of the -individual file journals for each of the ATTACHed databases. Each of -the individual file journals also contain the name of the master journal. -If there are no ATTACHed databases (or if none of the ATTACHed database -is participating in the current transaction) no master journal is -created and the normal rollback journal contains an empty string -in the place normally reserved for recording the name of the master -journal.

    - -

    A individual file journal is said to be hot -if it needs to be rolled back -in order to restore the integrity of its database. -A hot journal is created when a process is in the middle of a database -update and a program or operating system crash or power failure prevents -the update from completing. -Hot journals are an exception condition. -Hot journals exist to recover from crashes and power failures. -If everything is working correctly -(that is, if there are no crashes or power failures) -you will never get a hot journal. -

    - -

    -If no master journal is involved, then -a journal is hot if it exists and its corresponding database file -does not have a RESERVED lock. -If a master journal is named in the file journal, then the file journal -is hot if its master journal exists and there is no RESERVED -lock on the corresponding database file. -It is important to understand when a journal is hot so the -preceding rules will be repeated in bullets: -

    - -
      -
    • A journal is hot if... -
        -
      • It exists, and
      • -
      • It's master journal exists or the master journal name is an - empty string, and
      • -
      • There is no RESERVED lock on the corresponding database file.
      • -
      -
    • -
    -} - -HEADING 2 {Dealing with hot journals} hot_journals - -puts { -

    -Before reading from a a database file, SQLite always checks to see if that -database file has a hot journal. If the file does have a hot journal, then -the journal is rolled back before the file is read. In this way, we ensure -that the database file is in a consistent state before it is read. -

    - -

    When a process wants to read from a database file, it followed -the following sequence of steps: -

    - -
      -
    1. Open the database file and obtain a SHARED lock. If the SHARED lock - cannot be obtained, fail immediately and return SQLITE_BUSY.
    2. -
    3. Check to see if the database file has a hot journal. If the file - does not have a hot journal, we are done. Return immediately. - If there is a hot journal, that journal must be rolled back by - the subsequent steps of this algorithm.
    4. -
    5. Acquire a PENDING lock then an EXCLUSIVE lock on the database file. - (Note: Do not acquire a RESERVED lock because that would make - other processes think the journal was no longer hot.) If we - fail to acquire these locks it means another process - is already trying to do the rollback. In that case, - drop all locks, close the database, and return SQLITE_BUSY.
    6. -
    7. Read the journal file and roll back the changes.
    8. -
    9. Wait for the rolled back changes to be written onto - the surface of the disk. This protects the integrity of the database - in case another power failure or crash occurs.
    10. -
    11. Delete the journal file.
    12. -
    13. Delete the master journal file if it is safe to do so. - This step is optional. It is here only to prevent stale - master journals from cluttering up the disk drive. - See the discussion below for details.
    14. -
    15. Drop the EXCLUSIVE and PENDING locks but retain the SHARED lock.
    16. -
    - -

    After the algorithm above completes successfully, it is safe to -read from the database file. Once all reading has completed, the -SHARED lock is dropped.

    -} - -HEADING 2 {Deleting stale master journals} stale_master_journals - -puts { -

    A stale master journal is a master journal that is no longer being -used for anything. There is no requirement that stale master journals -be deleted. The only reason for doing so is to free up disk space.

    - -

    A master journal is stale if no individual file journals are pointing -to it. To figure out if a master journal is stale, we first read the -master journal to obtain the names of all of its file journals. Then -we check each of those file journals. If any of the file journals named -in the master journal exists and points back to the master journal, then -the master journal is not stale. If all file journals are either missing -or refer to other master journals or no master journal at all, then the -master journal we are testing is stale and can be safely deleted.

    -} - -HEADING 1 {Writing to a database file} writing - -puts { -

    To write to a database, a process must first acquire a SHARED lock -as described above (possibly rolling back incomplete changes if there -is a hot journal). -After a SHARED lock is obtained, a RESERVED lock must be acquired. -The RESERVED lock signals that the process intends to write to the -database at some point in the future. Only one process at a time -can hold a RESERVED lock. But other processes can continue to read -the database while the RESERVED lock is held. -

    - -

    If the process that wants to write is unable to obtain a RESERVED -lock, it must mean that another process already has a RESERVED lock. -In that case, the write attempt fails and returns SQLITE_BUSY.

    - -

    After obtaining a RESERVED lock, the process that wants to write -creates a rollback journal. The header of the journal is initialized -with the original size of the database file. Space in the journal header -is also reserved for a master journal name, though the master journal -name is initially empty.

    - -

    Before making changes to any page of the database, the process writes -the original content of that page into the rollback journal. Changes -to pages are held in memory at first and are not written to the disk. -The original database file remains unaltered, which means that other -processes can continue to read the database.

    - -

    Eventually, the writing process will want to update the database -file, either because its memory cache has filled up or because it is -ready to commit its changes. Before this happens, the writer must -make sure no other process is reading the database and that the rollback -journal data is safely on the disk surface so that it can be used to -rollback incomplete changes in the event of a power failure. -The steps are as follows:

    - -
      -
    1. Make sure all rollback journal data has actually been written to - the surface of the disk (and is not just being held in the operating - system's or disk controllers cache) so that if a power failure occurs - the data will still be there after power is restored.
    2. -
    3. Obtain a PENDING lock and then an EXCLUSIVE lock on the database file. - If other processes are still have SHARED locks, the writer might have - to wait until those SHARED locks clear before it is able to obtain - an EXCLUSIVE lock.
    4. -
    5. Write all page modifications currently held in memory out to the - original database disk file.
    6. -
    - -

    -If the reason for writing to the database file is because the memory -cache was full, then the writer will not commit right away. Instead, -the writer might continue to make changes to other pages. Before -subsequent changes are written to the database file, the rollback -journal must be flushed to disk again. Note also that the EXCLUSIVE -lock that the writer obtained in order to write to the database initially -must be held until all changes are committed. That means that no other -processes are able to access the database from the -time the memory cache first spills to disk until the transaction -commits. -

    - -

    -When a writer is ready to commit its changes, it executes the following -steps: -

    - -
      -
    1. - Obtain an EXCLUSIVE lock on the database file and - make sure all memory changes have been written to the database file - using the algorithm of steps 1-3 above.
    2. -
    3. Flush all database file changes to the disk. Wait for those changes - to actually be written onto the disk surface.
    4. -
    5. Delete the journal file. This is the instant when the changes are - committed. Prior to deleting the journal file, if a power failure - or crash occurs, the next process to open the database will see that - it has a hot journal and will roll the changes back. - After the journal is deleted, there will no longer be a hot journal - and the changes will persist. -
    6. -
    7. Drop the EXCLUSIVE and PENDING locks from the database file. -
    8. -
    - -

    As soon as PENDING lock is released from the database file, other -processes can begin reading the database again. In the current implementation, -the RESERVED lock is also released, but that is not essential. Future -versions of SQLite might provide a "CHECKPOINT" SQL command that will -commit all changes made so far within a transaction but retain the -RESERVED lock so that additional changes can be made without given -any other process an opportunity to write.

    - -

    If a transaction involves multiple databases, then a more complex -commit sequence is used, as follows:

    - -
      -
    1. - Make sure all individual database files have an EXCLUSIVE lock and a - valid journal. -
    2. Create a master-journal. The name of the master-journal is arbitrary. - (The current implementation appends random suffixes to the name of the - main database file until it finds a name that does not previously exist.) - Fill the master journal with the names of all the individual journals - and flush its contents to disk. -
    3. Write the name of the master journal into - all individual journals (in space set aside for that purpose in the - headers of the individual journals) and flush the contents of the - individual journals to disk and wait for those changes to reach the - disk surface. -
    4. Flush all database file changes to the disk. Wait for those changes - to actually be written onto the disk surface.
    5. -
    6. Delete the master journal file. This is the instant when the changes are - committed. Prior to deleting the master journal file, if a power failure - or crash occurs, the individual file journals will be considered hot - and will be rolled back by the next process that - attempts to read them. After the master journal has been deleted, - the file journals will no longer be considered hot and the changes - will persist. -
    7. -
    8. Delete all individual journal files. -
    9. Drop the EXCLUSIVE and PENDING locks from all database files. -
    10. -
    -} - -HEADING 2 {Writer starvation} writer_starvation - -puts { -

    In SQLite version 2, if many processes are reading from the database, -it might be the case that there is never a time when there are -no active readers. And if there is always at least one read lock on the -database, no process would ever be able to make changes to the database -because it would be impossible to acquire a write lock. This situation -is called writer starvation.

    - -

    SQLite version 3 seeks to avoid writer starvation through the use of -the PENDING lock. The PENDING lock allows existing readers to continue -but prevents new readers from connecting to the database. So when a -process wants to write a busy database, it can set a PENDING lock which -will prevent new readers from coming in. Assuming existing readers do -eventually complete, all SHARED locks will eventually clear and the -writer will be given a chance to make its changes.

    -} - -HEADING 1 {How To Corrupt Your Database Files} how_to_corrupt - -puts { -

    The pager module is robust but it is not completely failsafe. -It can be subverted. This section attempts to identify and explain -the risks.

    - -

    -Clearly, a hardware or operating system fault that introduces incorrect data -into the middle of the database file or journal will cause problems. -Likewise, -if a rogue process opens a database file or journal and writes malformed -data into the middle of it, then the database will become corrupt. -There is not much that can be done about these kinds of problems -so they are given no further attention. -

    - -

    -SQLite uses POSIX advisory locks to implement locking on Unix. On -windows it uses the LockFile(), LockFileEx(), and UnlockFile() system -calls. SQLite assumes that these system calls all work as advertised. If -that is not the case, then database corruption can result. One should -note that POSIX advisory locking is known to be buggy or even unimplemented -on many NFS implementations (including recent versions of Mac OS X) -and that there are reports of locking problems -for network filesystems under windows. Your best defense is to not -use SQLite for files on a network filesystem. -

    - -

    -SQLite uses the fsync() system call to flush data to the disk under Unix and -it uses the FlushFileBuffers() to do the same under windows. Once again, -SQLite assumes that these operating system services function as advertised. -But it has been reported that fsync() and FlushFileBuffers() do not always -work correctly, especially with inexpensive IDE disks. Apparently some -manufactures of IDE disks have defective controller chips that report -that data has reached the disk surface when in fact the data is still -in volatile cache memory in the disk drive electronics. There are also -reports that windows sometimes chooses to ignore FlushFileBuffers() for -unspecified reasons. The author cannot verify any of these reports. -But if they are true, it means that database corruption is a possibility -following an unexpected power loss. These are hardware and/or operating -system bugs that SQLite is unable to defend against. -

    - -

    -If a crash or power failure occurs and results in a hot journal but that -journal is deleted, the next process to open the database will not -know that it contains changes that need to be rolled back. The rollback -will not occur and the database will be left in an inconsistent state. -Rollback journals might be deleted for any number of reasons: -

    - -
      -
    • An administrator might be cleaning up after an OS crash or power failure, - see the journal file, think it is junk, and delete it.
    • -
    • Someone (or some process) might rename the database file but fail to - also rename its associated journal.
    • -
    • If the database file has aliases (hard or soft links) and the file - is opened by a different alias than the one used to create the journal, - then the journal will not be found. To avoid this problem, you should - not create links to SQLite database files.
    • -
    • Filesystem corruption following a power failure might cause the - journal to be renamed or deleted.
    • -
    - -

    -The last (fourth) bullet above merits additional comment. When SQLite creates -a journal file on Unix, it opens the directory that contains that file and -calls fsync() on the directory, in an effort to push the directory information -to disk. But suppose some other process is adding or removing unrelated -files to the directory that contains the database and journal at the the -moment of a power failure. The supposedly unrelated actions of this other -process might result in the journal file being dropped from the directory and -moved into "lost+found". This is an unlikely scenario, but it could happen. -The best defenses are to use a journaling filesystem or to keep the -database and journal in a directory by themselves. -

    - -

    -For a commit involving multiple databases and a master journal, if the -various databases were on different disk volumes and a power failure occurs -during the commit, then when the machine comes back up the disks might -be remounted with different names. Or some disks might not be mounted -at all. When this happens the individual file journals and the master -journal might not be able to find each other. The worst outcome from -this scenario is that the commit ceases to be atomic. -Some databases might be rolled back and others might not. -All databases will continue to be self-consistent. -To defend against this problem, keep all databases -on the same disk volume and/or remount disks using exactly the same names -after a power failure. -

    -} - -HEADING 1 {Transaction Control At The SQL Level} transaction_control - -puts { -

    -The changes to locking and concurrency control in SQLite version 3 also -introduce some subtle changes in the way transactions work at the SQL -language level. -By default, SQLite version 3 operates in autocommit mode. -In autocommit mode, -all changes to the database are committed as soon as all operations associated -with the current database connection complete.

    - -

    The SQL command "BEGIN TRANSACTION" (the TRANSACTION keyword -is optional) is used to take SQLite out of autocommit mode. -Note that the BEGIN command does not acquire any locks on the database. -After a BEGIN command, a SHARED lock will be acquired when the first -SELECT statement is executed. A RESERVED lock will be acquired when -the first INSERT, UPDATE, or DELETE statement is executed. No EXCLUSIVE -lock is acquired until either the memory cache fills up and must -be spilled to disk or until the transaction commits. In this way, -the system delays blocking read access to the file file until the -last possible moment. -

    - -

    The SQL command "COMMIT" does not actually commit the changes to -disk. It just turns autocommit back on. Then, at the conclusion of -the command, the regular autocommit logic takes over and causes the -actual commit to disk to occur. -The SQL command "ROLLBACK" also operates by turning autocommit back on, -but it also sets a flag that tells the autocommit logic to rollback rather -than commit.

    - -

    If the SQL COMMIT command turns autocommit on and the autocommit logic -then tries to commit change but fails because some other process is holding -a SHARED lock, then autocommit is turned back off automatically. This -allows the user to retry the COMMIT at a later time after the SHARED lock -has had an opportunity to clear.

    - -

    If multiple commands are being executed against the same SQLite database -connection at the same time, the autocommit is deferred until the very -last command completes. For example, if a SELECT statement is being -executed, the execution of the command will pause as each row of the -result is returned. During this pause other INSERT, UPDATE, or DELETE -commands can be executed against other tables in the database. But none -of these changes will commit until the original SELECT statement finishes. -

    -} - - -footer $rcsid Index: main.mk ================================================================== --- main.mk +++ main.mk @@ -20,18 +20,12 @@ -e s/--VERSION-NUMBER--/`cat ${SRC}/VERSION | \ sed 's/[^0-9]/ /g' | \ $(NAWK) '{printf "%d%03d%03d",$$1,$$2,$$3}'`/ \ $(SRC)/src/sqlite.h.in >sqlite3.h - -# Rules used to build documentation -# -%.html: $(DOC)/%.tcl common.tcl - tclsh $< > $@ - -common.tcl: $(DOC)/common.tcl - cp $(DOC)/common.tcl . +wrap.tcl: $(DOC)/wrap.tcl + cp $(DOC)/wrap.tcl . lang.html: $(DOC)/lang.tcl tclsh $(DOC)/lang.tcl doc >lang.html opcode.html: $(DOC)/opcode.tcl $(SRC)/src/vdbe.c @@ -38,68 +32,19 @@ tclsh $(DOC)/opcode.tcl $(SRC)/src/vdbe.c >opcode.html capi3ref.html: $(DOC)/mkapidoc.tcl sqlite3.h tclsh $(DOC)/mkapidoc.tcl capi3ref.html -copyright-release.html: $(DOC)/copyright-release.html - cp $(DOC)/copyright-release.html . - -copyright-release.pdf: $(DOC)/copyright-release.pdf - cp $(DOC)/copyright-release.pdf . - -#%: $(DOC)/% -# cp $< $@ - -# Files to be published on the website. -# -DOCFILES = \ - arch.html \ - autoinc.html \ - c_interface.html \ - capi3.html \ - capi3ref.html \ - changes.html \ - compile.html \ - copyright.html \ - copyright-release.html \ - copyright-release.pdf \ - conflict.html \ - datatypes.html \ - datatype3.html \ - different.html \ - docs.html \ - download.html \ - faq.html \ - fileformat.html \ - formatchng.html \ - index.html \ - limits.html \ - lang.html \ - lockingv3.html \ - mingw.html \ - nulls.html \ - oldnews.html \ - omitted.html \ - opcode.html \ - optimizer.html \ - optoverview.html \ - pragma.html \ - quickstart.html \ - sharedcache.html \ - speed.html \ - sqlite.html \ - support.html \ - tclsqlite.html \ - vdbe.html \ - version3.html \ - whentouse.html \ - 34to35.html - -docdir: - mkdir -p doc - -doc: docdir $(DOCFILES) - mv $(DOCFILES) doc - cp $(DOC)/*.gif $(SRC)/art/*.gif doc +docdir: + mkdir -p doc + +doc: docdir always + rm -rf doc/images + cp -r $(DOC)/images doc + cp $(DOC)/rawpages/* doc + tclsh $(DOC)/wrap.tcl $(DOC) $(SRC) doc $(DOC)/pages/*.in + +always: + clean: rm -f doc sqlite3.h DELETED mingw.tcl Index: mingw.tcl ================================================================== --- mingw.tcl +++ /dev/null @@ -1,160 +0,0 @@ -# -# Run this Tcl script to generate the mingw.html file. -# -set rcsid {$Id: mingw.tcl,v 1.4 2003/03/30 18:58:58 drh Exp $} - -puts { - - Notes On How To Build MinGW As A Cross-Compiler - - -

    -Notes On How To Build MinGW As A Cross-Compiler -

    } -puts "

    -(This page was last modified on [lrange $rcsid 3 4] UTC) -

    " - -puts { -

    MinGW or -Minimalist GNU For Windows -is a version of the popular GCC compiler that builds Win95/Win98/WinNT -binaries. See the website for details.

    - -

    This page describes how you can build MinGW -from sources as a cross-compiler -running under Linux. Doing so will allow you to construct -WinNT binaries from the comfort and convenience of your -Unix desktop.

    -} - -proc Link {path {file {}}} { - if {$file!=""} { - set path $path/$file - } else { - set file $path - } - puts "$file" -} - -puts { -

    Here are the steps:

    - -
      -
    1. -

      Get a copy of source code. You will need the binutils, the -compiler, and the MinGW runtime. Each are available separately. -As of this writing, Mumit Khan has collected everything you need -together in one FTP site: -} -set ftpsite \ - ftp://ftp.nanotech.wisc.edu/pub/khan/gnu-win32/mingw32/snapshots/gcc-2.95.2-1 -Link $ftpsite -puts { -The three files you will need are:

      -
        -
      • } -Link $ftpsite binutils-19990818-1-src.tar.gz -puts
      • -Link $ftpsite gcc-2.95.2-1-src.tar.gz -puts
      • -Link $ftpsite mingw-20000203.zip -puts {
      • -
      - -

      Put all the downloads in a directory out of the way. The sequel -will assume all downloads are in a directory named -~/mingw/download.

      -
    2. - -
    3. -

      -Create a directory in which to install the new compiler suite and make -the new directory writable. -Depending on what directory you choose, you might need to become -root. The example shell commands that follow -will assume the installation directory is -/opt/mingw and that your user ID is drh.

      -
      -su
      -mkdir /opt/mingw
      -chown drh /opt/mingw
      -exit
      -
      -
    4. - -
    5. -

      Unpack the source tarballs into a separate directory.

      -
      -mkdir ~/mingw/src
      -cd ~/mingw/src
      -tar xzf ../download/binutils-*.tar.gz
      -tar xzf ../download/gcc-*.tar.gz
      -unzip ../download/mingw-*.zip
      -
      -
    6. - -
    7. -

      Create a directory in which to put all the build products.

      -
      -mkdir ~/mingw/bld
      -
      -
    8. - -
    9. -

      Configure and build binutils and add the results to your PATH.

      -
      -mkdir ~/mingw/bld/binutils
      -cd ~/mingw/bld/binutils
      -../../src/binutils/configure --prefix=/opt/mingw --target=i386-mingw32 -v
      -make 2>&1 | tee make.out
      -make install 2>&1 | tee make-install.out
      -export PATH=$PATH:/opt/mingw/bin
      -
      -
    10. - -
    11. -

      Manually copy the runtime include files into the installation directory -before trying to build the compiler.

      -
      -mkdir /opt/mingw/i386-mingw32/include
      -cd ~/mingw/src/mingw-runtime*/mingw/include
      -cp -r * /opt/mingw/i386-mingw32/include
      -
      -
    12. - -
    13. -

      Configure and build the compiler

      -
      -mkdir ~/mingw/bld/gcc
      -cd ~/mingw/bld/gcc
      -../../src/gcc-*/configure --prefix=/opt/mingw --target=i386-mingw32 -v
      -cd gcc
      -make installdirs
      -cd ..
      -make 2>&1 | tee make.out
      -make install
      -
      -
    14. - -
    15. -

      Configure and build the MinGW runtime

      -
      -mkdir ~/mingw/bld/runtime
      -cd ~/mingw/bld/runtime
      -../../src/mingw-runtime*/configure --prefix=/opt/mingw --target=i386-mingw32 -v
      -make install-target-w32api
      -make install
      -
      -
    16. -
    - -

    And you are done...

    -} -puts { -


    -

    -Back to the SQLite Home Page -

    - -} DELETED mkapidoc.tcl Index: mkapidoc.tcl ================================================================== --- mkapidoc.tcl +++ /dev/null @@ -1,176 +0,0 @@ -#!/usr/bin/tclsh -# -# Run this script redirecting the sqlite3.h file as standard -# inputs and this script will generate API documentation. -# -set rcsid {$Id: mkapidoc.tcl,v 1.2 2007/06/20 09:09:48 danielk1977 Exp $} -source common.tcl -header {C/C++ Interface For SQLite Version 3} -puts { -

    C/C++ Interface For SQLite Version 3

    -} - -# Scan standard input to extract the information we need -# to build the documentation. -# -set title {} -set type {} -set body {} -set code {} -set phase 0 -set content {} -while {![eof stdin]} { - set line [gets stdin] - if {$phase==0} { - # Looking for the CAPI3REF: keyword - if {[regexp {^\*\* CAPI3REF: +(.*)} $line all tx]} { - set title $tx - set phase 1 - } - } elseif {$phase==1} { - if {[string range $line 0 1]=="**"} { - set lx [string trim [string range $line 3 end]] - if {[regexp {^CATEGORY: +([a-z]*)} $lx all cx]} { - set type $cx - } elseif {[regexp {^KEYWORDS: +(.*)} $lx all kx]} { - foreach k $kx { - set keyword($k) 1 - } - } else { - append body $lx\n - } - } elseif {[string range $line 0 1]=="*/"} { - set phase 2 - } - } elseif {$phase==2} { - if {$line==""} { - set kwlist [lsort [array names keyword]] - unset -nocomplain keyword - set key $type:$kwlist - lappend content [list $key $title $type $kwlist $body $code] - set title {} - set keywords {} - set type {} - set body {} - set code {} - set phase 0 - } else { - if {[regexp {^#define (SQLITE_[A-Z0-9_]+)} $line all kx]} { - set type constant - set keyword($kx) 1 - } elseif {[regexp {^typedef .* (sqlite[0-9a-z_]+);} $line all kx]} { - set type datatype - set keyword($kx) 1 - } elseif {[regexp {^[a-z].*[ *](sqlite3_[a-z0-9_]+)\(} $line all kx]} { - set type function - set keyword($kx) 1 - } - append code $line\n - } - } -} - -# Output HTML that displays the given list in N columns -# -proc output_list {N lx} { - puts {} - set len [llength $lx] - set n [expr {($len + $N - 1)/$N}] - for {set i 0} {$i<$N} {incr i} { - set start [expr {$i*$n}] - set end [expr {($i+1)*$n}] - puts {} - } - puts {
      } - for {set j $start} {$j<$end} {incr j} { - set entry [lindex $lx $j] - if {$entry!=""} { - foreach {link label} $entry break - puts "
    • $label
    • " - } - } - puts {
    } -} - -# Do a table of contents for objects -# -set objlist {} -foreach c $content { - foreach {key title type keywords body code} $c break - if {$type!="datatype"} continue - set keywords [lsort $keywords] - set k [lindex $keywords 0] - foreach kw $keywords { - lappend objlist [list $k $kw] - } -} -puts {

    Datatypes:

    } -output_list 3 $objlist -puts {
    } - -# Do a table of contents for constants -# -set clist {} -foreach c $content { - foreach {key title type keywords body code} $c break - if {$type!="constant"} continue - set keywords [lsort $keywords] - set k [lindex $keywords 0] - foreach kw $keywords { - lappend clist [list $k $kw] - } -} -puts {

    Constants:

    } -set clist [lsort -index 1 $clist] -output_list 3 $clist -puts {
    } - - -# Do a table of contents for functions -# -set funclist {} -foreach c $content { - foreach {key title type keywords body code} $c break - if {$type!="function"} continue - set keywords [lsort $keywords] - set k [lindex $keywords 0] - foreach kw $keywords { - lappend funclist [list $k $kw] - } -} -puts {

    Functions:

    } -set funclist [lsort -index 1 $funclist] -output_list 3 $funclist -puts {
    } - -# Resolve links -# -proc resolve_links {args} { - set tag [lindex $args 0] - regsub -all {[^a-zA-Z0-9_]} $tag {} tag - set x "" - if {[llength $args]>2} { - append x [lrange $args 2 end] - } else { - append x [lindex $args 0] - } - return $x -} - -# Output all the records -# -foreach c [lsort $content] { - foreach {key title type keywords body code} $c break - foreach k $keywords { - puts "" - } - puts "

    $title

    " - puts "
    "
    -  puts "$code"
    -  puts "
    " - regsub -all "\n\n+" $body {

    \1

    } body - regsub -all {\[}

    $body

    {[resolve_links } body - set body [subst -novar -noback $body] - puts "$body" - puts "
    " -} DELETED nulls.tcl Index: nulls.tcl ================================================================== --- nulls.tcl +++ /dev/null @@ -1,329 +0,0 @@ -# -# Run this script to generated a nulls.html output file -# -set rcsid {$Id: nulls.tcl,v 1.8 2004/10/10 17:24:55 drh Exp $} -source common.tcl -header {NULL Handling in SQLite} -puts { -

    NULL Handling in SQLite Versus Other Database Engines

    - -

    -The goal is -to make SQLite handle NULLs in a standards-compliant way. -But the descriptions in the SQL standards on how to handle -NULLs seem ambiguous. -It is not clear from the standards documents exactly how NULLs should -be handled in all circumstances. -

    - -

    -So instead of going by the standards documents, various popular -SQL engines were tested to see how they handle NULLs. The idea -was to make SQLite work like all the other engines. -A SQL test script was developed and run by volunteers on various -SQL RDBMSes and the results of those tests were used to deduce -how each engine processed NULL values. -The original tests were run in May of 2002. -A copy of the test script is found at the end of this document. -

    - -

    -SQLite was originally coded in such a way that the answer to -all questions in the chart below would be "Yes". But the -experiments run on other SQL engines showed that none of them -worked this way. So SQLite was modified to work the same as -Oracle, PostgreSQL, and DB2. This involved making NULLs -indistinct for the purposes of the SELECT DISTINCT statement and -for the UNION operator in a SELECT. NULLs are still distinct -in a UNIQUE column. This seems somewhat arbitrary, but the desire -to be compatible with other engines outweighted that objection. -

    - -

    -It is possible to make SQLite treat NULLs as distinct for the -purposes of the SELECT DISTINCT and UNION. To do so, one should -change the value of the NULL_ALWAYS_DISTINCT #define in the -sqliteInt.h source file and recompile. -

    - -
    -

    -Update 2003-07-13: -Since this document was originally written some of the database engines -tested have been updated and users have been kind enough to send in -corrections to the chart below. The original data showed a wide variety -of behaviors, but over time the range of behaviors has converged toward -the PostgreSQL/Oracle model. The only significant difference -is that Informix and MS-SQL both threat NULLs as -indistinct in a UNIQUE column. -

    - -

    -The fact that NULLs are distinct for UNIQUE columns but are indistinct for -SELECT DISTINCT and UNION continues to be puzzling. It seems that NULLs -should be either distinct everywhere or nowhere. And the SQL standards -documents suggest that NULLs should be distinct everywhere. Yet as of -this writing, no SQL engine tested treats NULLs as distinct in a SELECT -DISTINCT statement or in a UNION. -

    -
    - - -

    -The following table shows the results of the NULL handling experiments. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      SQLitePostgreSQLOracleInformixDB2MS-SQLOCELOT
    Adding anything to null gives nullYesYesYesYesYesYesYes
    Multiplying null by zero gives nullYesYesYesYesYesYesYes
    nulls are distinct in a UNIQUE columnYesYesYesNo(Note 4)NoYes
    nulls are distinct in SELECT DISTINCTNoNoNoNoNoNoNo
    nulls are distinct in a UNIONNoNoNoNoNoNoNo
    "CASE WHEN null THEN 1 ELSE 0 END" is 0?YesYesYesYesYesYesYes
    "null OR true" is trueYesYesYesYesYesYesYes
    "not (null AND false)" is trueYesYesYesYesYesYesYes
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      MySQL
    3.23.41
    MySQL
    4.0.16
    FirebirdSQL
    Anywhere
    Borland
    Interbase
    Adding anything to null gives nullYesYesYesYesYes
    Multiplying null by zero gives nullYesYesYesYesYes
    nulls are distinct in a UNIQUE columnYesYesYes(Note 4)(Note 4)
    nulls are distinct in SELECT DISTINCTNoNoNo (Note 1)NoNo
    nulls are distinct in a UNION(Note 3)NoNo (Note 1)NoNo
    "CASE WHEN null THEN 1 ELSE 0 END" is 0?YesYesYesYes(Note 5)
    "null OR true" is trueYesYesYesYesYes
    "not (null AND false)" is trueNoYesYesYesYes
    - - - - - - - - - - - - - - - - - - - -
    Notes:  1. Older versions of firebird omits all NULLs from SELECT DISTINCT -and from UNION.
    2. Test data unavailable.
    3. MySQL version 3.23.41 does not support UNION.
    4. DB2, SQL Anywhere, and Borland Interbase -do not allow NULLs in a UNIQUE column.
    5. Borland Interbase does not support CASE expressions.
    -
    - -

     

    -

    -The following script was used to gather information for the table -above. -

    - -
    --- I have about decided that SQL's treatment of NULLs is capricious and cannot be
    --- deduced by logic.  It must be discovered by experiment.  To that end, I have 
    --- prepared the following script to test how various SQL databases deal with NULL.
    --- My aim is to use the information gather from this script to make SQLite as much
    --- like other databases as possible.
    ---
    --- If you could please run this script in your database engine and mail the results
    --- to me at drh@hwaci.com, that will be a big help.  Please be sure to identify the
    --- database engine you use for this test.  Thanks.
    ---
    --- If you have to change anything to get this script to run with your database
    --- engine, please send your revised script together with your results.
    ---
    -
    --- Create a test table with data
    -create table t1(a int, b int, c int);
    -insert into t1 values(1,0,0);
    -insert into t1 values(2,0,1);
    -insert into t1 values(3,1,0);
    -insert into t1 values(4,1,1);
    -insert into t1 values(5,null,0);
    -insert into t1 values(6,null,1);
    -insert into t1 values(7,null,null);
    -
    --- Check to see what CASE does with NULLs in its test expressions
    -select a, case when b<>0 then 1 else 0 end from t1;
    -select a+10, case when not b<>0 then 1 else 0 end from t1;
    -select a+20, case when b<>0 and c<>0 then 1 else 0 end from t1;
    -select a+30, case when not (b<>0 and c<>0) then 1 else 0 end from t1;
    -select a+40, case when b<>0 or c<>0 then 1 else 0 end from t1;
    -select a+50, case when not (b<>0 or c<>0) then 1 else 0 end from t1;
    -select a+60, case b when c then 1 else 0 end from t1;
    -select a+70, case c when b then 1 else 0 end from t1;
    -
    --- What happens when you multiple a NULL by zero?
    -select a+80, b*0 from t1;
    -select a+90, b*c from t1;
    -
    --- What happens to NULL for other operators?
    -select a+100, b+c from t1;
    -
    --- Test the treatment of aggregate operators
    -select count(*), count(b), sum(b), avg(b), min(b), max(b) from t1;
    -
    --- Check the behavior of NULLs in WHERE clauses
    -select a+110 from t1 where b<10;
    -select a+120 from t1 where not b>10;
    -select a+130 from t1 where b<10 OR c=1;
    -select a+140 from t1 where b<10 AND c=1;
    -select a+150 from t1 where not (b<10 AND c=1);
    -select a+160 from t1 where not (c=1 AND b<10);
    -
    --- Check the behavior of NULLs in a DISTINCT query
    -select distinct b from t1;
    -
    --- Check the behavior of NULLs in a UNION query
    -select b from t1 union select b from t1;
    -
    --- Create a new table with a unique column.  Check to see if NULLs are considered
    --- to be distinct.
    -create table t2(a int, b int unique);
    -insert into t2 values(1,1);
    -insert into t2 values(2,null);
    -insert into t2 values(3,null);
    -select * from t2;
    -
    -drop table t1;
    -drop table t2;
    -
    -} - -footer $rcsid DELETED oldnews.tcl Index: oldnews.tcl ================================================================== --- oldnews.tcl +++ /dev/null @@ -1,509 +0,0 @@ -#!/usr/bin/tclsh -source common.tcl -header {SQLite Older News} - -proc newsitem {date title text} { - puts "

    $date - $title

    " - regsub -all "\n( *\n)+" $text "

    \n\n

    " txt - puts "

    $txt

    " - puts "
    " -} - -newsitem {2007-Aug-13} {Version 3.4.2} { - While stress-testing the - soft_heap_limit - feature, a bug that could lead to - database - corruption was - discovered and fixed. - Though the consequences of this bug are severe, the chances of hitting - it in a typical application are remote. Upgrading is recommended - only if you use the - sqlite3_soft_heap_limit - interface. -} - -newsitem {2007-Jly-20} {Version 3.4.1} { - This release fixes a bug in VACUUM that - can lead to - database corruption. The bug was introduced in version - 3.3.14. - Upgrading is recommended for all users. Also included are a slew of - other more routine - enhancements and bug fixes. -} - -newsitem {2007-Jun-18} {Version 3.4.0} { - This release fixes two separate bugs either of which - can lead to database corruption. Upgrading - is strongly recommended. If you must continue using an older version - of SQLite, please at least read about how to avoid these bugs - at - - CorruptionFollowingBusyError and - ticket #2418 -

    - This release also adds explicit limits on the - sizes and quantities of things SQLite will handle. The new limits might - causes compatibility problems for existing applications that - use excessively large strings, BLOBs, tables, or SQL statements. - The new limits can be increased at compile-time to work around any problems - that arise. Nevertheless, the version number of this release is - 3.4.0 instead of 3.3.18 in order to call attention to the possible - incompatibility. -

    - There are also new features, including - incremental BLOB I/O and - incremental vacuum. - See the change log - for additional information. -} - -newsitem {2007-Apr-25} {Version 3.3.17} { - This version fixes a bug in the forwards-compatibility logic of SQLite - that was causing a database to become unreadable when it should have - been read-only. Upgrade from 3.3.16 only if you plan to deploy into - a product that might need to be upgraded in the future. For day to day - use, it probably does not matter. -} - -newsitem {2007-Apr-18} {Version 3.3.16} { - Performance improvements added in 3.3.14 but mistakenly turned off - in 3.3.15 have been reinstated. A bug has been fixed that prevented - VACUUM from running if a NULL value was in a UNIQUE column. -} - -newsitem {2007-Apr-09} {Version 3.3.15} { - An annoying bug introduced in 3.3.14 has been fixed. There are - also many enhancements to the test suite. -} - -newsitem {2007-Apr-02} {Version 3.3.14} { - This version focuses on performance improvements. If you recompile - - the amalgamation using GCC option -O3 (the precompiled binaries - use -O2) you may see performance - improvements of 35% or more over version 3.3.13 depending on your - workload. This version also - adds support for - exclusive access mode. -} - -newsitem {2007-Feb-13} {Version 3.3.13} { - This version fixes a subtle bug in the ORDER BY optimizer that can - occur when using joins. There are also a few minor enhancements. - Upgrading is recommended. -} - -newsitem {2007-Jan-27} {Version 3.3.12} { - The first published build of the previous version used the wrong - set of source files. Consequently, many people downloaded a build - that was labeled as "3.3.11" but was really 3.3.10. Version 3.3.12 - is released to clear up the ambiguity. A couple more bugs have - also been fixed and - PRAGMA integrity_check has been enhanced. -} - -newsitem {2007-Jan-22} {Version 3.3.11} { - Version 3.3.11 fixes for a few more problems in version 3.3.9 that - version 3.3.10 failed to catch. Upgrading is recommended. -} - -newsitem {2007-Jan-9} {Version 3.3.10} { - Version 3.3.10 fixes several bugs that were introduced by the previous - release. Upgrading is recommended. -} - -newsitem {2007-Jan-4} {Version 3.3.9} { - Version 3.3.9 fixes bugs that can lead to database corruption under - obscure and difficult to reproduce circumstances. See - - DatabaseCorruption in the - wiki for details. - This release also adds the new - sqlite3_prepare_v2() - API and includes important bug fixes in the command-line - shell and enhancements to the query optimizer. Upgrading is - recommended. -} - -newsitem {2006-Oct-9} {Version 3.3.8} { - Version 3.3.8 adds support for full-text search using the - FTS1 - module. There are also minor bug fixes. Upgrade only if - you want to try out the new full-text search capabilities or if - you are having problems with 3.3.7. -} - -newsitem {2006-Aug-12} {Version 3.3.7} { - Version 3.3.7 includes support for loadable extensions and virtual - tables. But both features are still considered "beta" and their - APIs are subject to change in a future release. This release is - mostly to make available the minor bug fixes that have accumulated - since 3.3.6. Upgrading is not necessary. Do so only if you encounter - one of the obscure bugs that have been fixed or if you want to try - out the new features. -} - -newsitem {2006-Jun-19} {New Book About SQLite} { - - The Definitive Guide to SQLite, a new book by - Mike Owens. - is now available from Apress. - The books covers the latest SQLite internals as well as - the native C interface and bindings for PHP, Python, - Perl, Ruby, Tcl, and Java. Recommended. -} - - -newsitem {2006-Jun-6} {Version 3.3.6} { - Changes include improved tolerance for windows virus scanners - and faster :memory: databases. There are also fixes for several - obscure bugs. Upgrade if you are having problems. -} - -newsitem {2006-Apr-5} {Version 3.3.5} { - This release fixes many minor bugs and documentation typos and - provides some minor new features and performance enhancements. - Upgrade only if you are having problems or need one of the new features. -} - -newsitem {2006-Feb-11} {Version 3.3.4} { - This release fixes several bugs, including a - a blunder that might cause a deadlock on multithreaded systems. - Anyone using SQLite in a multithreaded environment should probably upgrade. -} - -newsitem {2006-Jan-31} {Version 3.3.3 stable} { - There have been no major problems discovered in version 3.3.2, so - we hereby declare the new APIs and language features to be stable - and supported. -} - -newsitem {2006-Jan-24} {Version 3.3.2 beta} { - More bug fixes and performance improvements as we move closer to - a production-ready version 3.3.x. -} - -newsitem {2006-Jan-16} {Version 3.3.1 alpha} { - Many bugs found in last week's alpha release have now been fixed and - the library is running much faster again. - - Database connections can now be moved between threads as long as the - connection holds no locks at the time it is moved. Thus the common - paradigm of maintaining a pool of database connections and handing - them off to transient worker threads is now supported. - Please help test this new feature. - See - the MultiThreading wiki page for additional - information. -} - -newsitem {2006-Jan-10} {Version 3.3.0 alpha} { - Version 3.3.0 adds support for CHECK constraints, DESC indices, - separate REAL and INTEGER column affinities, a new OS interface layer - design, and many other changes. The code passed a regression - test but should still be considered alpha. Please report any - problems. - - The file format for version 3.3.0 has changed slightly to support - descending indices and - a more efficient encoding of boolean values. SQLite 3.3.0 will read and - write legacy databases created with any prior version of SQLite 3. But - databases created by version 3.3.0 will not be readable or writable - by earlier versions of the SQLite. The older file format can be - specified at compile-time for those rare cases where it is needed. -} - -newsitem {2005-Dec-19} {Versions 3.2.8 and 2.8.17} { - These versions contain one-line changes to 3.2.7 and 2.8.16 to fix a bug - that has been present since March of 2002 and version 2.4.0. - That bug might possibly cause database corruption if a large INSERT or - UPDATE statement within a multi-statement transaction fails due to a - uniqueness constraint but the containing transaction commits. -} - - -newsitem {2005-Sep-24} {Version 3.2.7} { - This version fixes several minor and obscure bugs. - Upgrade only if you are having problems. -} - -newsitem {2005-Sep-16} {Version 3.2.6 - Critical Bug Fix} { - This version fixes a bug that can result in database - corruption if a VACUUM of a 1 gibibyte or larger database fails - (perhaps do to running out of disk space or an unexpected power loss) - and is later rolled back. -

    - Also in this release: - The ORDER BY and GROUP BY processing was rewritten to use less memory. - Support for COUNT(DISTINCT) was added. The LIKE operator can now be - used by the optimizer on columns with COLLATE NOCASE. -} - -newsitem {2005-Aug-27} {Version 3.2.5} { - This release fixes a few more lingering bugs in the new code. - We expect that this release will be stable and ready for production use. -} - -newsitem {2005-Aug-24} {Version 3.2.4} { - This release fixes a bug in the new optimizer that can lead to segfaults - when parsing very complex WHERE clauses. -} - -newsitem {2005-Aug-21} {Version 3.2.3} { - This release adds the ANALYZE command, - the CAST operator, and many - very substantial improvements to the query optimizer. See the - change log for additional - information. -} - -newsitem {2005-Aug-2} {2005 Open Source Award for SQLite} { - SQLite and its primary author D. Richard Hipp have been honored with - a 2005 Open Source - Award from Google and O'Reilly.
    -} - - -newsitem {2005-Jun-13} {Version 3.2.2} { - This release includes numerous minor bug fixes, speed improvements, - and code size reductions. There is no reason to upgrade unless you - are having problems or unless you just want to. -} - -newsitem {2005-Mar-29} {Version 3.2.1} { - This release fixes a memory allocation problem in the new - ALTER TABLE ADD COLUMN - command. -} - -newsitem {2005-Mar-21} {Version 3.2.0} { - The primary purpose for version 3.2.0 is to add support for - ALTER TABLE ADD COLUMN. - The new ADD COLUMN capability is made - possible by AOL developers supporting and embracing great - open-source software. Thanks, AOL! - - Version 3.2.0 also fixes an obscure but serious bug that was discovered - just prior to release. If you have a multi-statement transaction and - within that transaction an UPDATE or INSERT statement fails due to a - constraint, then you try to rollback the whole transaction, the rollback - might not work correctly. See - Ticket #1171 - for details. Upgrading is recommended for all users. -} - -newsitem {2005-Mar-16} {Version 3.1.6} { - Version 3.1.6 fixes a critical bug that can cause database corruption - when inserting rows into tables with around 125 columns. This bug was - introduced in version 3.0.0. See - Ticket #1163 - for additional information. -} - -newsitem {2005-Mar-11} {Versions 3.1.4 and 3.1.5 Released} { - Version 3.1.4 fixes a critical bug that could cause database corruption - if the autovacuum mode of version 3.1.0 is turned on (it is off by - default) and a CREATE UNIQUE INDEX is executed within a transaction but - fails because the indexed columns are not unique. Anyone using the - autovacuum feature and unique indices should upgrade. - - Version 3.1.5 adds the ability to disable - the F_FULLFSYNC ioctl() in OS-X by setting "PRAGMA synchronous=on" instead - of the default "PRAGMA synchronous=full". There was an attempt to add - this capability in 3.1.4 but it did not work due to a spelling error. -} - -newsitem {2005-Feb-19} {Version 3.1.3 Released} { - Version 3.1.3 cleans up some minor issues discovered in version 3.1.2. -} - -newsitem {2005-Feb-15} {Versions 2.8.16 and 3.1.2 Released} { - A critical bug in the VACUUM command that can lead to database - corruption has been fixed in both the 2.x branch and the main - 3.x line. This bug has existed in all prior versions of SQLite. - Even though it is unlikely you will ever encounter this bug, - it is suggested that all users upgrade. See - - ticket #1116. for additional information. - - Version 3.1.2 is also the first stable release of the 3.1 - series. SQLite 3.1 features added support for correlated - subqueries, autovacuum, autoincrement, ALTER TABLE, and - other enhancements. See the - release notes - for version 3.1.0 for a detailed description of the - changes available in the 3.1 series. -} - -newsitem {2005-Feb-01} {Version 3.1.1 (beta) Released} { - Version 3.1.1 (beta) is now available on the - website. Verison 3.1.1 is fully backwards compatible with the 3.0 series - and features many new features including Autovacuum and correlated - subqueries. The - release notes - From version 3.1.0 apply equally to this release beta. A stable release - is expected within a couple of weeks. -} - -newsitem {2005-Jan-21} {Version 3.1.0 (alpha) Released} { - Version 3.1.0 (alpha) is now available on the - website. Verison 3.1.0 is fully backwards compatible with the 3.0 series - and features many new features including Autovacuum and correlated - subqueries. See the - release notes - for details. - - This is an alpha release. A beta release is expected in about a week - with the first stable release to follow after two more weeks. -} - -newsitem {2004-Nov-09} {SQLite at the 2004 International PHP Conference} { - There was a talk on the architecture of SQLite and how to optimize - SQLite queries at the 2004 International PHP Conference in Frankfurt, - Germany. - - Slides from that talk are available. -} - -newsitem {2004-Oct-11} {Version 3.0.8} { - Version 3.0.8 of SQLite contains several code optimizations and minor - bug fixes and adds support for DEFERRED, IMMEDIATE, and EXCLUSIVE - transactions. This is an incremental release. There is no reason - to upgrade from version 3.0.7 if that version is working for you. -} - - -newsitem {2004-Oct-10} {SQLite at the 11th -Annual Tcl/Tk Conference} { - There will be a talk on the use of SQLite in Tcl/Tk at the - 11th Tcl/Tk Conference this week in - New Orleans. Visit - http://www.tcl.tk/ for details. - - Slides from the talk are available. -} - -newsitem {2004-Sep-18} {Version 3.0.7} { - Version 3.0 has now been in use by multiple projects for several - months with no major difficulties. We consider it stable and - ready for production use. -} - -newsitem {2004-Sep-02} {Version 3.0.6 (beta)} { - Because of some important changes to sqlite3_step(), - we have decided to - do an additional beta release prior to the first "stable" release. - If no serious problems are discovered in this version, we will - release version 3.0 "stable" in about a week. -} - - -newsitem {2004-Aug-29} {Version 3.0.5 (beta)} { - The fourth beta release of SQLite version 3.0 is now available. - The next release is expected to be called "stable". -} - - -newsitem {2004-Aug-08} {Version 3.0.4 (beta)} { - The third beta release of SQLite version 3.0 is now available. - This new beta fixes several bugs including a database corruption - problem that can occur when doing a DELETE while a SELECT is pending. - Expect at least one more beta before version 3.0 goes final. -} - -newsitem {2004-July-22} {Version 3.0.3 (beta)} { - The second beta release of SQLite version 3.0 is now available. - This new beta fixes many bugs and adds support for databases with - varying page sizes. The next 3.0 release will probably be called - a final or stable release. - - Version 3.0 adds support for internationalization and a new - more compact file format. - Details. - The API and file format have been fixed since 3.0.2. All - regression tests pass (over 100000 tests) and the test suite - exercises over 95% of the code. - - SQLite version 3.0 is made possible in part by AOL - developers supporting and embracing great Open-Source Software. -} - -newsitem {2004-Jly-22} {Version 2.8.15} { - SQLite version 2.8.15 is a maintenance release for the version 2.8 - series. Version 2.8 continues to be maintained with bug fixes, but - no new features will be added to version 2.8. All the changes in - this release are minor. If you are not having problems, there is - there is no reason to upgrade. -} - -newsitem {2004-Jun-30} {Version 3.0.2 (beta) Released} { - The first beta release of SQLite version 3.0 is now available. - Version 3.0 adds support for internationalization and a new - more compact file format. - Details. - As of this release, the API and file format are frozen. All - regression tests pass (over 100000 tests) and the test suite - exercises over 95% of the code. - - SQLite version 3.0 is made possible in part by AOL - developers supporting and embracing great Open-Source Software. -} - - -newsitem {2004-Jun-25} {Website hacked} { - The www.sqlite.org website was hacked sometime around 2004-Jun-22 - because the lead SQLite developer failed to properly patch CVS. - Evidence suggests that the attacker was unable to elevate privileges - above user "cvs". Nevertheless, as a precaution the entire website - has been reconstructed from scratch on a fresh machine. All services - should be back to normal as of 2004-Jun-28. -} - - -newsitem {2004-Jun-18} {Version 3.0.0 (alpha) Released} { - The first alpha release of SQLite version 3.0 is available for - public review and comment. Version 3.0 enhances internationalization support - through the use of UTF-16 and user-defined text collating sequences. - BLOBs can now be stored directly, without encoding. - A new file format results in databases that are 25% smaller (depending - on content). The code is also a little faster. In spite of the many - new features, the library footprint is still less than 240KB - (x86, gcc -O1). - Additional information. - - Our intent is to freeze the file format and API on 2004-Jul-01. - Users are encouraged to review and evaluate this alpha release carefully - and submit any feedback prior to that date. - - The 2.8 series of SQLite will continue to be supported with bug - fixes for the foreseeable future. -} - -newsitem {2004-Jun-09} {Version 2.8.14 Released} { - SQLite version 2.8.14 is a patch release to the stable 2.8 series. - There is no reason to upgrade if 2.8.13 is working ok for you. - This is only a bug-fix release. Most development effort is - going into version 3.0.0 which is due out soon. -} - -newsitem {2004-May-31} {CVS Access Temporarily Disabled} { - Anonymous access to the CVS repository will be suspended - for 2 weeks beginning on 2004-June-04. Everyone will still - be able to download - prepackaged source bundles, create or modify trouble tickets, or view - change logs during the CVS service interruption. Full open access to the - CVS repository will be restored on 2004-June-18. -} - -newsitem {2004-Apr-23} {Work Begins On SQLite Version 3} { - Work has begun on version 3 of SQLite. Version 3 is a major - changes to both the C-language API and the underlying file format - that will enable SQLite to better support internationalization. - The first beta is schedule for release on 2004-July-01. - - Plans are to continue to support SQLite version 2.8 with - bug fixes. But all new development will occur in version 3.0. -} -footer {$Id: oldnews.tcl,v 1.24 2007/11/05 18:11:18 drh Exp $} DELETED omitted.tcl Index: omitted.tcl ================================================================== --- omitted.tcl +++ /dev/null @@ -1,85 +0,0 @@ -# -# Run this script to generated a omitted.html output file -# -set rcsid {$Id: omitted.tcl,v 1.10 2005/11/03 00:41:18 drh Exp $} -source common.tcl -header {SQL Features That SQLite Does Not Implement} -puts { -

    SQL Features That SQLite Does Not Implement

    - -

    -Rather than try to list all the features of SQL92 that SQLite does -support, it is much easier to list those that it does not. -Unsupported features of SQL92 are shown below.

    - -

    -The order of this list gives some hint as to when a feature might -be added to SQLite. Those features near the top of the list are -likely to be added in the near future. There are no immediate -plans to add features near the bottom of the list. -

    - - -} - -proc feature {name desc} { - puts "" - puts "" -} - -feature {FOREIGN KEY constraints} { - FOREIGN KEY constraints are parsed but are not enforced. -} - -feature {Complete trigger support} { - There is some support for triggers but it is not complete. Missing - subfeatures include FOR EACH STATEMENT triggers (currently all triggers - must be FOR EACH ROW), INSTEAD OF triggers on tables (currently - INSTEAD OF triggers are only allowed on views), and recursive - triggers - triggers that trigger themselves. -} - -feature {Complete ALTER TABLE support} { - Only the RENAME TABLE and ADD COLUMN variants of the - ALTER TABLE command are supported. Other kinds of ALTER TABLE operations - such as - DROP COLUMN, ALTER COLUMN, ADD CONSTRAINT, and so forth are omitted. -} - -feature {Nested transactions} { - The current implementation only allows a single active transaction. -} - -feature {RIGHT and FULL OUTER JOIN} { - LEFT OUTER JOIN is implemented, but not RIGHT OUTER JOIN or - FULL OUTER JOIN. -} - -feature {Writing to VIEWs} { - VIEWs in SQLite are read-only. You may not execute a DELETE, INSERT, or - UPDATE statement on a view. But you can create a trigger - that fires on an attempt to DELETE, INSERT, or UPDATE a view and do - what you need in the body of the trigger. -} - -feature {GRANT and REVOKE} { - Since SQLite reads and writes an ordinary disk file, the - only access permissions that can be applied are the normal - file access permissions of the underlying operating system. - The GRANT and REVOKE commands commonly found on client/server - RDBMSes are not implemented because they would be meaningless - for an embedded database engine. -} - -puts { -
    $name " - puts "$desc
    - -

    -If you find other SQL92 features that SQLite does not support, please -add them to the Wiki page at - -http://www.sqlite.org/cvstrac/wiki?p=Unsupported -

    -} -footer $rcsid DELETED opcode.tcl Index: opcode.tcl ================================================================== --- opcode.tcl +++ /dev/null @@ -1,243 +0,0 @@ -# -# Run this Tcl script to generate the sqlite.html file. -# -set rcsid {$Id: opcode.tcl,v 1.15 2005/03/09 12:26:51 danielk1977 Exp $} -source common.tcl -header {SQLite Virtual Machine Opcodes} -puts { -

    SQLite Virtual Machine Opcodes

    -} - -set fd [open [lindex $argv 0] r] -set file [read $fd [file size [lindex $argv 0]]] -close $fd -set current_op {} -foreach line [split $file \n] { - set line [string trim $line] - if {[string index $line 1]!="*"} { - set current_op {} - continue - } - if {[regexp {^/\* Opcode: } $line]} { - set current_op [lindex $line 2] - set txt [lrange $line 3 end] - regsub -all {>} $txt {\>} txt - regsub -all {<} $txt {\<} txt - set Opcode($current_op:args) $txt - lappend OpcodeList $current_op - continue - } - if {$current_op==""} continue - if {[regexp {^\*/} $line]} { - set current_op {} - continue - } - set line [string trim [string range $line 3 end]] - if {$line==""} { - append Opcode($current_op:text) \n

    - } else { - regsub -all {>} $line {\>} line - regsub -all {<} $line {\<} line - append Opcode($current_op:text) \n$line - } -} -unset file - -puts { -

    Introduction

    - -

    In order to execute an SQL statement, the SQLite library first parses -the SQL, analyzes the statement, then generates a short program to execute -the statement. The program is generated for a "virtual machine" implemented -by the SQLite library. This document describes the operation of that -virtual machine.

    - -

    This document is intended as a reference, not a tutorial. -A separate Virtual Machine Tutorial is -available. If you are looking for a narrative description -of how the virtual machine works, you should read the tutorial -and not this document. Once you have a basic idea of what the -virtual machine does, you can refer back to this document for -the details on a particular opcode. -Unfortunately, the virtual machine tutorial was written for -SQLite version 1.0. There are substantial changes in the virtual -machine for version 2.0 and the document has not been updated. -

    - -

    The source code to the virtual machine is in the vdbe.c source -file. All of the opcode definitions further down in this document are -contained in comments in the source file. In fact, the opcode table -in this document -was generated by scanning the vdbe.c source file -and extracting the necessary information from comments. So the -source code comments are really the canonical source of information -about the virtual machine. When in doubt, refer to the source code.

    - -

    Each instruction in the virtual machine consists of an opcode and -up to three operands named P1, P2 and P3. P1 may be an arbitrary -integer. P2 must be a non-negative integer. P2 is always the -jump destination in any operation that might cause a jump. -P3 is a null-terminated -string or NULL. Some operators use all three operands. Some use -one or two. Some operators use none of the operands.

    - -

    The virtual machine begins execution on instruction number 0. -Execution continues until (1) a Halt instruction is seen, or -(2) the program counter becomes one greater than the address of -last instruction, or (3) there is an execution error. -When the virtual machine halts, all memory -that it allocated is released and all database cursors it may -have had open are closed. If the execution stopped due to an -error, any pending transactions are terminated and changes made -to the database are rolled back.

    - -

    The virtual machine also contains an operand stack of unlimited -depth. Many of the opcodes use operands from the stack. See the -individual opcode descriptions for details.

    - -

    The virtual machine can have zero or more cursors. Each cursor -is a pointer into a single table or index within the database. -There can be multiple cursors pointing at the same index or table. -All cursors operate independently, even cursors pointing to the same -indices or tables. -The only way for the virtual machine to interact with a database -file is through a cursor. -Instructions in the virtual -machine can create a new cursor (Open), read data from a cursor -(Column), advance the cursor to the next entry in the table -(Next) or index (NextIdx), and many other operations. -All cursors are automatically -closed when the virtual machine terminates.

    - -

    The virtual machine contains an arbitrary number of fixed memory -locations with addresses beginning at zero and growing upward. -Each memory location can hold an arbitrary string. The memory -cells are typically used to hold the result of a scalar SELECT -that is part of a larger expression.

    - -

    The virtual machine contains a single sorter. -The sorter is able to accumulate records, sort those records, -then play the records back in sorted order. The sorter is used -to implement the ORDER BY clause of a SELECT statement.

    - -

    The virtual machine contains a single "List". -The list stores a list of integers. The list is used to hold the -rowids for records of a database table that needs to be modified. -The WHERE clause of an UPDATE or DELETE statement scans through -the table and writes the rowid of every record to be modified -into the list. Then the list is played back and the table is modified -in a separate step.

    - -

    The virtual machine can contain an arbitrary number of "Sets". -Each set holds an arbitrary number of strings. Sets are used to -implement the IN operator with a constant right-hand side.

    - -

    The virtual machine can open a single external file for reading. -This external read file is used to implement the COPY command.

    - -

    Finally, the virtual machine can have a single set of aggregators. -An aggregator is a device used to implement the GROUP BY clause -of a SELECT. An aggregator has one or more slots that can hold -values being extracted by the select. The number of slots is the -same for all aggregators and is defined by the AggReset operation. -At any point in time a single aggregator is current or "has focus". -There are operations to read or write to memory slots of the aggregator -in focus. There are also operations to change the focus aggregator -and to scan through all aggregators.

    - -

    Viewing Programs Generated By SQLite

    - -

    Every SQL statement that SQLite interprets results in a program -for the virtual machine. But if you precede the SQL statement with -the keyword "EXPLAIN" the virtual machine will not execute the -program. Instead, the instructions of the program will be returned -like a query result. This feature is useful for debugging and -for learning how the virtual machine operates.

    - -

    You can use the sqlite command-line tool to see the -instructions generated by an SQL statement. The following is -an example:

    } - -proc Code {body} { - puts {
    } - regsub -all {&} [string trim $body] {\&} body - regsub -all {>} $body {\>} body - regsub -all {<} $body {\<} body - regsub -all {\(\(\(} $body {} body - regsub -all {\)\)\)} $body {} body - regsub -all { } $body {\ } body - regsub -all \n $body
    \n body - puts $body - puts {
    } -} - -Code { -$ (((sqlite ex1))) -sqlite> (((.explain))) -sqlite> (((explain delete from tbl1 where two<20;))) -addr opcode p1 p2 p3 ----- ------------ ----- ----- ---------------------------------------- -0 Transaction 0 0 -1 VerifyCookie 219 0 -2 ListOpen 0 0 -3 Open 0 3 tbl1 -4 Rewind 0 0 -5 Next 0 12 -6 Column 0 1 -7 Integer 20 0 -8 Ge 0 5 -9 Recno 0 0 -10 ListWrite 0 0 -11 Goto 0 5 -12 Close 0 0 -13 ListRewind 0 0 -14 OpenWrite 0 3 -15 ListRead 0 19 -16 MoveTo 0 0 -17 Delete 0 0 -18 Goto 0 15 -19 ListClose 0 0 -20 Commit 0 0 -} - -puts { -

    All you have to do is add the "EXPLAIN" keyword to the front of the -SQL statement. But if you use the ".explain" command to sqlite -first, it will set up the output mode to make the program more easily -viewable.

    - -

    If sqlite has been compiled without the "-DNDEBUG=1" option -(that is, with the NDEBUG preprocessor macro not defined) then you -can put the SQLite virtual machine in a mode where it will trace its -execution by writing messages to standard output. The non-standard -SQL "PRAGMA" comments can be used to turn tracing on and off. To -turn tracing on, enter: -

    - -
    -PRAGMA vdbe_trace=on;
    -
    - -

    -You can turn tracing back off by entering a similar statement but -changing the value "on" to "off".

    - -

    The Opcodes

    -} - -puts "

    There are currently [llength $OpcodeList] opcodes defined by -the virtual machine." -puts {All currently defined opcodes are described in the table below. -This table was generated automatically by scanning the source code -from the file vdbe.c.

    } - -puts { -

    -} -foreach op [lsort -dictionary $OpcodeList] { - puts {" -} -puts {
    Opcode NameDescription
    } - puts "$op" - puts "[string trim $Opcode($op:text)]

    } -footer $rcsid DELETED optimizer.tcl Index: optimizer.tcl ================================================================== --- optimizer.tcl +++ /dev/null @@ -1,265 +0,0 @@ -# -# Run this TCL script to generate HTML for the goals.html file. -# -set rcsid {$Id: optimizer.tcl,v 1.1 2005/08/30 22:44:06 drh Exp $} -source common.tcl -header {The SQLite Query Optimizer} - -proc CODE {text} { - puts "
    "
    -  puts $text
    -  puts "
    " -} -proc IMAGE {name {caption {}}} { - puts "
    " - if {$caption!=""} { - puts "
    $caption" - } - puts "
    " -} -proc PARAGRAPH {text} { - puts "

    $text

    \n" -} -proc HEADING {level name} { - puts "$name" -} - -HEADING 1 {The SQLite Query Optimizer} - -PARAGRAPH { - This article describes how the SQLite query optimizer works. - This is not something you have to know in order to use SQLite - many - programmers use SQLite successfully without the slightest hint of what - goes on in the inside. - But a basic understanding of what SQLite is doing - behind the scenes will help you to write more efficient SQL. And the - knowledge gained by studying the SQLite query optimizer has broad - application since most other relational database engines operate - similarly. - A solid understanding of how the query optimizer works is also - required before making meaningful changes or additions to the SQLite, so - this article should be read closely by anyone aspiring - to hack the source code. -} - -HEADING 2 Background - -PARAGRAPH { - It is important to understand that SQL is a programming language. - SQL is a perculiar programming language in that it - describes what the programmer wants to compute not how - to compute it as most other programming languages do. - But perculiar or not, SQL is still just a programming language. -} - -PARAGRAPH { - It is very helpful to think of each SQL statement as a separate - program. - An important job of the SQL database engine is to translate each - SQL statement from its descriptive form that specifies what the - information is desired (the what) - into a procedural form that specifies how to go - about acquiring the desired information (the how). - The task of translating the what into a - how is assigned to the query optimizer. -} - -PARAGRAPH { - The beauty of SQL comes from the fact that the optimizer frees the programmer - from having to worry over the details of how. The programmer - only has to specify the what and then leave the optimizer - to deal with all of the minutae of implementing the - how. Thus the programmer is able to think and work at a - much higher level and leave the optimizer to stress over the low-level - work. -} - -HEADING 2 {Database Layout} - -PARAGRAPH { - An SQLite database consists of one or more "b-trees". - Each b-tree contains zero or more "rows". - A single row contains a "key" and some "data". - In general, both the key and the data are arbitrary binary - data of any length. - The keys must all be unique within a single b-tree. - Rows are stored in order of increasing key values - each - b-tree has a comparision functions for keys that determines - this order. -} - -PARAGRAPH { - In SQLite, each SQL table is stored as a b-tree where the - key is a 64-bit integer and the data is the content of the - table row. The 64-bit integer key is the ROWID. And, of course, - if the table has an INTEGER PRIMARY KEY, then that integer is just - an alias for the ROWID. -} - -PARAGRAPH { - Consider the following block of SQL code: -} - -CODE { - CREATE TABLE ex1( - id INTEGER PRIMARY KEY, - x VARCHAR(30), - y INTEGER - ); - INSERT INTO ex1 VALUES(NULL,'abc',12345); - INSERT INTO ex1 VALUES(NULL,456,'def'); - INSERT INTO ex1 VALUES(100,'hello','world'); - INSERT INTO ex1 VALUES(-5,'abc','xyz'); - INSERT INTO ex1 VALUES(54321,NULL,987); -} - -PARAGRAPH { - This code generates a new b-tree (named "ex1") containing 5 rows. - This table can be visualized as follows: -} -IMAGE table-ex1b2.gif - -PARAGRAPH { - Note that the key for each row if the b-tree is the INTEGER PRIMARY KEY - for that row. (Remember that the INTEGER PRIMARY KEY is just an alias - for the ROWID.) The other fields of the table form the data for each - entry in the b-tree. Note also that the b-tree entries are in ROWID order - which is different from the order that they were originally inserted. -} - -PARAGRAPH { - Now consider the following SQL query: -} -CODE { - SELECT y FROM ex1 WHERE x=456; -} - -PARAGRAPH { - When the SQLite parser and query optimizer are handed this query, they - have to translate it into a procedure that will find the desired result. - In this case, they do what is call a "full table scan". They start - at the beginning of the b-tree that contains the table and visit each - row. Within each row, the value of the "x" column is tested and when it - is found to match 456, the value of the "y" column is output. - We can represent this procedure graphically as follows: -} -IMAGE fullscanb.gif - -PARAGRAPH { - A full table scan is the access method of last resort. It will always - work. But if the table contains millions of rows and you are only looking - a single one, it might take a very long time to find the particular row - you are interested in. - In particular, the time needed to access a single row of the table is - proportional to the total number of rows in the table. - So a big part of the job of the optimizer is to try to find ways to - satisfy the query without doing a full table scan. -} -PARAGRAPH { - The usual way to avoid doing a full table scan is use a binary search - to find the particular row or rows of interest in the table. - Consider the next query which searches on rowid instead of x: -} -CODE { - SELECT y FROM ex1 WHERE rowid=2; -} - -PARAGRAPH { - In the previous query, we could not use a binary search for x because - the values of x were not ordered. But the rowid values are ordered. - So instead of having to visit every row of the b-tree looking for one - that has a rowid value of 2, we can do a binary search for that particular - row and output its corresponding y value. We show this graphically - as follows: -} -IMAGE direct1b.gif - -PARAGRAPH { - When doing a binary search, we only have to look at a number of - rows with is proportional to the logorithm of the number of entries - in the table. For a table with just 5 entires as in the example above, - the difference between a full table scan and a binary search is - negligible. In fact, the full table scan might be faster. But in - a database that has 5 million rows, a binary search will be able to - find the desired row in only about 23 tries, whereas the full table - scan will need to look at all 5 million rows. So the binary search - is about 200,000 times faster in that case. -} -PARAGRAPH { - A 200,000-fold speed improvement is huge. So we always want to do - a binary search rather than a full table scan when we can. -} -PARAGRAPH { - The problem with a binary search is that the it only works if the - fields you are search for are in sorted order. So we can do a binary - search when looking up the rowid because the rows of the table are - sorted by rowid. But we cannot use a binary search when looking up - x because the values in the x column are in no particular order. -} -PARAGRAPH { - The way to work around this problem and to permit binary searching on - fields like x is to provide an index. - An index is another b-tree. - But in the index b-tree the key is not the rowid but rather the field - or fields being indexed followed by the rowid. - The data in an index b-tree is empty - it is not needed or used. - The following diagram shows an index on the x field of our example table: -} -IMAGE index-ex1-x-b.gif - -PARAGRAPH { - An important point to note in the index are that they keys of the - b-tree are in sorted order. (Recall that NULL values in SQLite sort - first, followed by numeric values in numerical order, then strings, and - finally BLOBs.) This is the property that will allow use to do a - binary search for the field x. The rowid is also included in every - key for two reasons. First, by including the rowid we guarantee that - every key will be unique. And second, the rowid will be used to look - up the actual table entry after doing the binary search. Finally, note - that the data portion of the index b-tree serves no purpose and is thus - kept empty to save space in the disk file. -} -PARAGRAPH { - Remember what the original query example looked like: -} -CODE { - SELECT y FROM ex1 WHERE x=456; -} - -PARAGRAPH { - The first time this query was encountered we had to do a full table - scan. But now that we have an index on x, we can do a binary search - on that index for the entry where x==456. Then from that entry we - can find the rowid value and use the rowid to look up the corresponding - entry in the original table. From the entry in the original table, - we can find the value y and return it as our result. The following - diagram shows this process graphically: -} -IMAGE indirect1b1.gif - -PARAGRAPH { - With the index, we are able to look up an entry based on the value of - x after visiting only a logorithmic number of b-tree entries. Unlike - the case where we were searching using rowid, we have to do two binary - searches for each output row. But for a 5-million row table, that is - still only 46 searches instead of 5 million for a 100,000-fold speedup. -} - -HEADING 3 {Parsing The WHERE Clause} - - - -# parsing the where clause -# rowid lookup -# index lookup -# index lookup without the table -# how an index is chosen -# joins -# join reordering -# order by using an index -# group by using an index -# OR -> IN optimization -# Bitmap indices -# LIKE and GLOB optimization -# subquery flattening -# MIN and MAX optimizations DELETED optimizing.tcl Index: optimizing.tcl ================================================================== --- optimizing.tcl +++ /dev/null @@ -1,15 +0,0 @@ -set rcsid {$Id: optimizing.tcl,v 1.1 2005/01/17 03:42:52 drh Exp $} -source common.tcl -header {Hints For Optimizing Queries In SQLite} -proc section {level tag name} { - incr level - if {$level>6} {set level 6} - puts "\n"" - puts "$name\n" -} -section 1 recompile {Recompile the library for optimal performance} -section 2 avoidtrans {Minimize the number of transactions} -section 3 usebind {Use sqlite3_bind to insert large chunks of data} -section 4 useindices {Use appropriate indices} -section 5 recordjoin {Reorder the tables in a join} -footer $rcsid DELETED optoverview.tcl Index: optoverview.tcl ================================================================== --- optoverview.tcl +++ /dev/null @@ -1,516 +0,0 @@ -# -# Run this TCL script to generate HTML for the goals.html file. -# -set rcsid {$Id: optoverview.tcl,v 1.5 2005/11/24 13:15:34 drh Exp $} -source common.tcl -header {The SQLite Query Optimizer Overview} - -proc CODE {text} { - puts "
    "
    -  puts $text
    -  puts "
    " -} -proc SYNTAX {text} { - puts "
    " - } - global level - incr level($n) - for {set i [expr {$n+1}]} {$i<10} {incr i} { - set level($i) 0 - } - if {$n==0} { - set num {} - } elseif {$n==1} { - set num $level(1).0 - } else { - set num $level(1) - for {set i 2} {$i<=$n} {incr i} { - append num .$level($i) - } - } - incr n 1 - puts "$num $name" -} - -HEADING 0 {The SQLite Query Optimizer Overview} - -PARAGRAPH { - This document provides a terse overview of how the query optimizer - for SQLite works. This is not a tutorial. The reader is likely to - need some prior knowledge of how database engines operate - in order to fully understand this text. -} - -HEADING 1 {WHERE clause analysis} where_clause - -PARAGRAPH { - The WHERE clause on a query is broken up into "terms" where each term - is separated from the others by an AND operator. -} -PARAGRAPH { - All terms of the WHERE clause are analyzed to see if they can be - satisfied using indices. - Terms that cannot be satisfied through the use of indices become - tests that are evaluated against each row of the relevant input - tables. No tests are done for terms that are completely satisfied by - indices. Sometimes - one or more terms will provide hints to indices but still must be - evaluated against each row of the input tables. -} - -PARAGRAPH { - The analysis of a term might cause new "virtual" terms to - be added to the WHERE clause. Virtual terms can be used with - indices to restrict a search. But virtual terms never generate code - that is tested against input rows. -} - -PARAGRAPH { - To be usable by an index a term must be of one of the following - forms: -} -SYNTAX { - /column/ = /expression/ - /column/ > /expression/ - /column/ >= /expression/ - /column/ < /expression/ - /column/ <= /expression/ - /expression/ = /column/ - /expression/ > /column/ - /expression/ >= /column/ - /expression/ < /column/ - /expression/ <= /column/ - /column/ IN (/expression-list/) - /column/ IN (/subquery/) -} -PARAGRAPH { - If an index is created using a statement like this: -} -CODE { - CREATE INDEX idx_ex1 ON ex1(a,b,c,d,e,...,y,z); -} -PARAGRAPH { - Then the index might be used if the initial columns of the index - (columns a, b, and so forth) appear in WHERE clause terms. - All index columns must be used with - the *=* or *IN* operators except for - the right-most column which can use inequalities. For the right-most - column of an index that is used, there can be up to two inequalities - that must sandwich the allowed values of the column between two extremes. -} -PARAGRAPH { - It is not necessary for every column of an index to appear in a - WHERE clause term in order for that index to be used. - But there can not be gaps in the columns of the index that are used. - Thus for the example index above, if there is no WHERE clause term - that constraints column c, then terms that constraint columns a and b can - be used with the index but not terms that constraint columns d through z. - Similarly, no index column will be used (for indexing purposes) - that is to the right of a - column that is constrained only by inequalities. - For the index above and WHERE clause like this: -} -CODE { - ... WHERE a=5 AND b IN (1,2,3) AND c>12 AND d='hello' -} -PARAGRAPH { - Only columns a, b, and c of the index would be usable. The d column - would not be usable because it occurs to the right of c and c is - constrained only by inequalities. -} - -HEADING 1 {The BETWEEN optimization} between_opt - -PARAGRAPH { - If a term of the WHERE clause is of the following form: -} -SYNTAX { - /expr1/ BETWEEN /expr2/ AND /expr3/ -} -PARAGRAPH { - Then two virtual terms are added as follows: -} -SYNTAX { - /expr1/ >= /expr2/ AND /expr1/ <= /expr3/ -} -PARAGRAPH { - If both virtual terms end up being used as constraints on an index, - then the original BETWEEN term is omitted and the corresponding test - is not performed on input rows. - Thus if the BETWEEN term ends up being used as an index constraint - no tests are ever performed on that term. - On the other hand, the - virtual terms themselves never causes tests to be performed on - input rows. - Thus if the BETWEEN term is not used as an index constraint and - instead must be used to test input rows, the expr1 expression is - only evaluated once. -} - -HEADING 1 {The OR optimization} or_opt - -PARAGRAPH { - If a term consists of multiple subterms containing a common column - name and separated by OR, like this: -} -SYNTAX { - /column/ = /expr1/ OR /column/ = /expr2/ OR /column/ = /expr3/ OR ... -} -PARAGRAPH { - Then the term is rewritten as follows: -} -SYNTAX { - /column/ IN (/expr1/,/expr2/,/expr3/,/expr4/,...) -} -PARAGRAPH { - The rewritten term then might go on to constraint an index using the - normal rules for *IN* operators. - Note that column must be the same column in every OR-connected subterm, - although the column can occur on either the left or the right side of - the *=* operator. -} - -HEADING 1 {The LIKE optimization} like_opt - -PARAGRAPH { - Terms that are composed of the LIKE or GLOB operator - can sometimes be used to constrain indices. - There are many conditions on this use: -} -PARAGRAPH { -
      -
    1. The left-hand side of the LIKE or GLOB operator must be the name - of an indexed column.
    2. -
    3. The right-hand side of the LIKE or GLOB must be a string literal - that does not begin with a wildcard character.
    4. -
    5. The ESCAPE clause cannot appear on the LIKE operator.
    6. -
    7. The build-in functions used to implement LIKE and GLOB must not - have been overloaded using the sqlite3_create_function() API.
    8. -
    9. For the GLOB operator, the column must use the default BINARY - collating sequence.
    10. -
    11. For the LIKE operator, if case_sensitive_like mode is enabled then - the column must use the default BINARY collating sequence, or if - case_sensitive_like mode is disabled then the column must use the - built-in NOCASE collating sequence.
    12. -
    -} -PARAGRAPH { - The LIKE operator has two modes that can be set by a pragma. The - default mode is for LIKE comparisons to be insensitive to differences - of case for latin1 characters. Thus, by default, the following - expression is true: -} -CODE { - 'a' LIKE 'A' -} -PARAGRAPH { - By turned on the case_sensitive_like pragma as follows: -} -CODE { - PRAGMA case_sensitive_like=ON; -} -PARAGRAPH { - Then the LIKE operator pays attention to case and the example above would - evaluate to false. Note that case insensitivity only applies to - latin1 characters - basically the upper and lower case letters of English - in the lower 127 byte codes of ASCII. International character sets - are case sensitive in SQLite unless a user-supplied collating - sequence is used. But if you employ a user-supplied collating sequence, - the LIKE optimization describe here will never be taken. -} -PARAGRAPH { - The LIKE operator is case insensitive by default because this is what - the SQL standard requires. You can change the default behavior at - compile time by using the -DSQLITE_CASE_SENSITIVE_LIKE command-line option - to the compiler. -} -PARAGRAPH { - The LIKE optimization might occur if the column named on the left of the - operator uses the BINARY collating sequence (which is the default) and - case_sensitive_like is turned on. Or the optimization might occur if - the column uses the built-in NOCASE collating sequence and the - case_sensitive_like mode is off. These are the only two combinations - under which LIKE operators will be optimized. If the column on the - right-hand side of the LIKE operator uses any collating sequence other - than the built-in BINARY and NOCASE collating sequences, then no optimizations - will ever be attempted on the LIKE operator. -} -PARAGRAPH { - The GLOB operator is always case sensitive. The column on the left side - of the GLOB operator must always use the built-in BINARY collating sequence - or no attempt will be made to optimize that operator with indices. -} -PARAGRAPH { - The right-hand side of the GLOB or LIKE operator must be a literal string - value that does not begin with a wildcard. If the right-hand side is a - parameter that is bound to a string, then no optimization is attempted. - If the right-hand side begins with a wildcard character then no - optimization is attempted. -} -PARAGRAPH { - Suppose the initial sequence of non-wildcard characters on the right-hand - side of the LIKE or GLOB operator is x. We are using a single - character to denote this non-wildcard prefix but the reader should - understand that the prefix can consist of more than 1 character. - Let y the smallest string that is the same length as /x/ but which - compares greater than x. For example, if x is *hello* then - y would be *hellp*. - The LIKE and GLOB optimizations consist of adding two virtual terms - like this: -} -SYNTAX { - /column/ >= /x/ AND /column/ < /y/ -} -PARAGRAPH { - Under most circumstances, the original LIKE or GLOB operator is still - tested against each input row even if the virtual terms are used to - constrain an index. This is because we do not know what additional - constraints may be imposed by characters to the right - of the x prefix. However, if there is only a single global wildcard - to the right of x, then the original LIKE or GLOB test is disabled. - In other words, if the pattern is like this: -} -SYNTAX { - /column/ LIKE /x/% - /column/ GLOB /x/* -} -PARAGRAPH { - Then the original LIKE or GLOB tests are disabled when the virtual - terms constrain an index because in that case we know that all of the - rows selected by the index will pass the LIKE or GLOB test. -} - -HEADING 1 {Joins} joins - -PARAGRAPH { - The current implementation of - SQLite uses only loop joins. That is to say, joins are implemented as - nested loops. -} -PARAGRAPH { - The default order of the nested loops in a join is for the left-most - table in the FROM clause to form the outer loop and the right-most - table to form the inner loop. - However, SQLite will nest the loops in a different order if doing so - will help it to select better indices. -} -PARAGRAPH { - Inner joins can be freely reordered. However a left outer join is - neither commutative nor associative and hence will not be reordered. - Inner joins to the left and right of the outer join might be reordered - if the optimizer thinks that is advantageous but the outer joins are - always evaluated in the order in which they occur. -} -PARAGRAPH { - When selecting the order of tables in a join, SQLite uses a greedy - algorithm that runs in polynomial time. -} -PARAGRAPH { - The ON and USING clauses of a join are converted into additional - terms of the WHERE clause prior to WHERE clause analysis described - above in paragraph 1.0. Thus - with SQLite, there is no advantage to use the newer SQL92 join syntax - over the older SQL89 comma-join syntax. They both end up accomplishing - exactly the same thing. -} -PARAGRAPH { - Join reordering is automatic and usually works well enough that - programmer do not have to think about it. But occasionally some - hints from the programmer are needed. For a description of when - hints might be necessary and how to provide those hints, see the -
    QueryPlans - page in the Wiki. -} - -HEADING 1 {Choosing between multiple indices} multi_index - -PARAGRAPH { - Each table in the FROM clause of a query can use at most one index, - and SQLite strives to use at least one index on each table. Sometimes, - two or more indices might be candidates for use on a single table. - For example: -} -CODE { - CREATE TABLE ex2(x,y,z); - CREATE INDEX ex2i1 ON ex2(x); - CREATE INDEX ex2i2 ON ex2(y); - SELECT z FROM ex2 WHERE x=5 AND y=6; -} -PARAGRAPH { - For the SELECT statement above, the optimizer can use the ex2i1 index - to lookup rows of ex2 that contain x=5 and then test each row against - the y=6 term. Or it can use the ex2i2 index to lookup rows - of ex2 that contain y=6 then test each of those rows against the - x=5 term. -} -PARAGRAPH { - When faced with a choice of two or more indices, SQLite tries to estimate - the total amount of work needed to perform the query using each option. - It then selects the option that gives the least estimated work. -} -PARAGRAPH { - To help the optimizer get a more accurate estimate of the work involved - in using various indices, the user may optional run the ANALYZE command. - The ANALYZE command scans all indices of database where there might - be a choice between two or more indices and gathers statistics on the - selectiveness of those indices. The results of this scan are stored - in the sqlite_stat1 table. - The contents of the sqlite_stat1 table are not updated as the database - changes so after making significant changes it might be prudent to - rerun ANALYZE. - The results of an ANALYZE command are only available to database connections - that are opened after the ANALYZE command completes. -} -PARAGRAPH { - Once created, the sqlite_stat1 table cannot be dropped. But its - content can be viewed, modified, or erased. Erasing the entire content - of the sqlite_stat1 table has the effect of undoing the ANALYZE command. - Changing the content of the sqlite_stat1 table can get the optimizer - deeply confused and cause it to make silly index choices. Making - updates to the sqlite_stat1 table (except by running ANALYZE) is - not recommended. -} -PARAGRAPH { - Terms of the WHERE clause can be manually disqualified for use with - indices by prepending a unary *+* operator to the column name. The - unary *+* is a no-op and will not slow down the evaluation of the test - specified by the term. - But it will prevent the term from constraining an index. - So, in the example above, if the query were rewritten as: -} -CODE { - SELECT z FROM ex2 WHERE +x=5 AND y=6; -} -PARAGRAPH { - The *+* operator on the *x* column would prevent that term from - constraining an index. This would force the use of the ex2i2 index. -} - -HEADING 1 {Avoidance of table lookups} index_only - -PARAGRAPH { - When doing an indexed lookup of a row, the usual procedure is to - do a binary search on the index to find the index entry, then extract - the rowid from the index and use that rowid to do a binary search on - the original table. Thus a typical indexed lookup involves two - binary searches. - If, however, all columns that were to be fetched from the table are - already available in the index itself, SQLite will use the values - contained in the index and will never look up the original table - row. This saves one binary search for each row and can make many - queries run twice as fast. -} - -HEADING 1 {ORDER BY optimizations} order_by - -PARAGRAPH { - SQLite attempts to use an index to satisfy the ORDER BY clause of a - query when possible. - When faced with the choice of using an index to satisfy WHERE clause - constraints or satisfying an ORDER BY clause, SQLite does the same - work analysis described in section 6.0 - and chooses the index that it believes will result in the fastest answer. - -} - -HEADING 1 {Subquery flattening} flattening - -PARAGRAPH { - When a subquery occurs in the FROM clause of a SELECT, the default - behavior is to evaluate the subquery into a transient table, then run - the outer SELECT against the transient table. - This is problematic since the transient table will not have any indices - and the outer query (which is likely a join) will be forced to do a - full table scan on the transient table. -} -PARAGRAPH { - To overcome this problem, SQLite attempts to flatten subqueries in - the FROM clause of a SELECT. - This involves inserting the FROM clause of the subquery into the - FROM clause of the outer query and rewriting expressions in - the outer query that refer to the result set of the subquery. - For example: -} -CODE { - SELECT a FROM (SELECT x+y AS a FROM t1 WHERE z<100) WHERE a>5 -} -PARAGRAPH { - Would be rewritten using query flattening as: -} -CODE { - SELECT x+y AS a FROM t1 WHERE z<100 AND a>5 -} -PARAGRAPH { - There is a long list of conditions that must all be met in order for - query flattening to occur. -} -PARAGRAPH { -
      -
    1. The subquery and the outer query do not both use aggregates.
    2. -
    3. The subquery is not an aggregate or the outer query is not a join.
    4. -
    5. The subquery is not the right operand of a left outer join, or - the subquery is not itself a join.
    6. -
    7. The subquery is not DISTINCT or the outer query is not a join.
    8. -
    9. The subquery is not DISTINCT or the outer query does not use - aggregates.
    10. -
    11. The subquery does not use aggregates or the outer query is not - DISTINCT.
    12. -
    13. The subquery has a FROM clause.
    14. -
    15. The subquery does not use LIMIT or the outer query is not a join.
    16. -
    17. The subquery does not use LIMIT or the outer query does not use - aggregates.
    18. -
    19. The subquery does not use aggregates or the outer query does not - use LIMIT.
    20. -
    21. The subquery and the outer query do not both have ORDER BY clauses.
    22. -
    23. The subquery is not the right term of a LEFT OUTER JOIN or the - subquery has no WHERE clause.
    24. -
    -} -PARAGRAPH { - The proof that query flattening may safely occur if all of the the - above conditions are met is left as an exercise to the reader. -} -PARAGRAPH { - Query flattening is an important optimization when views are used as - each use of a view is translated into a subquery. -} - -HEADING 1 {The MIN/MAX optimization} minmax - -PARAGRAPH { - Queries of the following forms will be optimized to run in logarithmic - time assuming appropriate indices exist: -} -CODE { - SELECT MIN(x) FROM table; - SELECT MAX(x) FROM table; -} -PARAGRAPH { - In order for these optimizations to occur, they must appear in exactly - the form shown above - changing only the name of the table and column. - It is not permissible to add a WHERE clause or do any arithmetic on the - result. The result set must contain a single column. - The column in the MIN or MAX function must be an indexed column. -} ADDED pages/34to35.in Index: pages/34to35.in ================================================================== --- /dev/null +++ pages/34to35.in @@ -0,0 +1,1002 @@ +SQLite Changes From Version 3.4.2 To 3.5.0 + +proc CODE {text} { + puts "
    "
    +  puts $text
    +  puts "
    " +} +proc SYNTAX {text} { + puts "
    "
    +  set t2 [string map {& & < < > >} $text]
    +  regsub -all "/(\[^\n/\]+)/" $t2 {\1} t3
    +  puts "$t3"
    +  puts "
    " +} +proc IMAGE {name {caption {}}} { + puts "
    " + if {$caption!=""} { + puts "
    $caption" + } + puts "
    " +} +proc PARAGRAPH {text} { + # regsub -all "/(\[a-zA-Z0-9\]+)/" $text {\1} t2 + #regsub -all "\\*(\[^\n*\]+)\\*" $text {\1} t3 + regsub -all {\[([^]\n]+)\]} $text {[resolve_link \1]} t3 + puts "

    [subst -novar -noback $t3]

    \n" +} +proc resolve_link {args} { + set a2 [split $args |] + set id [string trim [lindex $a2 0]] + if {[lindex $a2 1]==""} { + set display [string trim [lindex $a2 0]] + } else { + set display [string trim [lrange $a2 1 end]] + } + regsub -all {[^a-zA-Z0-9_]} $id {} id + return "$display" +} +set level(0) 0 +set level(1) 0 +proc HEADING {n name {tag {}}} { + if {$tag!=""} { + puts "" + } + global level + incr level($n) + for {set i [expr {$n+1}]} {$i<10} {incr i} { + set level($i) 0 + } + if {$n==0} { + set num {} + } elseif {$n==1} { + set num $level(1).0 + } else { + set num $level(1) + for {set i 2} {$i<=$n} {incr i} { + append num .$level($i) + } + } + incr n 1 + puts "$num $name" +} + +HEADING 0 {Moving From SQLite 3.4.2 to 3.5.0} + +PARAGRAPH { + SQLite version 3.5.0 introduces a new OS interface layer that + is incompatible with all prior versions of SQLite. In addition, + a few existing interfaces have been generalized to work across all + database connections within a process rather than just all + connections within a thread. The purpose of this article + is to describe the changes to 3.5.0 in detail so that users + of prior versions of SQLite can judge what, if any, effort will + be required to upgrade to newer versions. +} + +HEADING 1 {Overview Of Changes} + +PARAGRAPH { + A quick enumeration of the changes in SQLite version 3.5.0 + is provide here. Subsequent sections will describe these + changes in more detail. +} +PARAGRAPH { +
      +
    1. The OS interface layer has been completely reworked: +
        +
      1. The undocumented sqlite3_os_switch() interface has + been removed.
      2. +
      3. The SQLITE_ENABLE_REDEF_IO compile-time flag no longer functions. + I/O procedures are now always redefinable.
      4. +
      5. Three new objects are defined for specifying I/O procedures: + [sqlite3_vfs], [sqlite3_file], and [sqlite3_io_methods].
      6. +
      7. Three new interfaces are used to create alternative OS interfaces: + [sqlite3_vfs_register()], [sqlite3_vfs_unregister()], and + [sqlite3_vfs_find()].
      8. +
      9. A new interface has been added to provided additional control over + the creation of new database connections: [sqlite3_open_v2()]. + The legacy interfaces of [sqlite3_open()] and + [sqlite3_open16()] continue to be fully supported.
      10. +
    2. +
    3. The optional shared cache and memory management features that + were introduced in version 3.3.0 can now be used across multiple + threads within the same process. Formerly, these extensions only + applied to database connections operating within a single thread. +
        +
      1. The [sqlite3_enable_shared_cache()] interface now applies to all + threads within a process, not to just the one thread in which it + was run.
      2. +
      3. The [sqlite3_soft_heap_limit()] interface now applies to all threads + within a process, not to just the one thread in which it was run.
      4. +
      5. The [sqlite3_release_memory()] interface will now attempt to reduce + the memory usages across all database connections in all threads, not + just connections in the thread where the interface is called.
      6. +
      7. The [sqlite3_thread_cleanup()] interface has become a no-op.
      8. +
    4. +
    5. Restrictions on the use of the same database connection by multiple + threads have been dropped. It is now safe for + multiple threads to use the same database connection at the same + time.
    6. +
    7. There is now a compile-time option that allows an application to + define alternative malloc()/free() implementations without having + to modify any core SQLite code.
    8. +
    9. There is now a compile-time option that allows an application to + define alternative mutex implementations without having + to modify any core SQLite code.
    10. +
    +} +PARAGRAPH { + Of these changes, only 1a and 2a through 2c are incompatibilities + in any formal sense. + But users who have previously made custom modifications to the + SQLite source (for example to add a custom OS layer for embedded + hardware) might find that these changes have a larger impact. + On the other hand, an important goal of these changes is to make + it much easier to customize SQLite for use on different operating + systems. +} + +HEADING 1 {The OS Interface Layer} + +PARAGRAPH { + If your system defines a custom OS interface for SQLite or if you + were using the undocumented sqlite3_os_switch() + interface, then you will need to make modifications in order to + upgrade to SQLite version 3.5.0. This may seem painful at first + glance. But as you look more closely, you will probably discover + that your changes are made smaller and easier to understand and manage + by the new SQLite interface. It is likely that your changes will + now also work seamlessly with the SQLite amalgamation. You will + no longer need to make any changes to the code SQLite source code. + All of your changes can be effected by application code and you can + link against a standard, unmodified version of the SQLite amalgamation. + Furthermore, the OS interface layer, which was formerly undocumented, + is now an officially support interface for SQLite. So you have + some assurance that this will be a one-time change and that your + new backend will continue to work in future versions of SQLite. +} + +HEADING 2 {The Virtual File System Object} + +PARAGRAPH { + The new OS interface for SQLite is built around an object named + [sqlite3_vfs]. The "vfs" standard for "Virtual File System". + The sqlite3_vfs object is basically a structure containing pointers + to functions that implement the primitive disk I/O operations that + SQLite needs to perform in order to read and write databases. + In this article, we will often refer a sqlite3_vfs objects as a "VFS". +} + +PARAGRAPH { + SQLite is able to use multiple VFSes at the same time. Each + individual database connection is associated with just one VFS. + But if you have multiple database connections, each connection + can be associated with a different VFS. +} + +PARAGRAPH { + There is always a default VFS. + The legacy interfaces [sqlite3_open()] and [sqlite3_open16()] always + use the default VFS. + The new interface for creating database connections, + [sqlite3_open_v2()], allows you to specify which VFS you want to + use by name. +} + +HEADING 3 {Registering New VFS Objects} + +PARAGRAPH { + Standard builds of SQLite for unix or windows come with a single + VFS named "unix" or "win32", as appropriate. This one VFS is also + the default. So if you are using the legacy open functions, everything + will continue to operate as it has before. The change is that an application + now has the flexibility of adding new VFS modules to implement a + customized OS layer. The [sqlite3_vfs_register()] API can be used + to tell SQLite about one or more application-defined VFS modules: +} + +CODE { +int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt); +} + +PARAGRAPH { + Applications can call sqlite3_vfs_register at any time, though of course + a VFS needs to be registered before it can be used. The first argument + is a pointer to a customized VFS object that the application has prepared. + The second argument is true to make the new VFS the default VFS so that + it will be used by the legacy [sqlite3_open()] and [sqlite3_open16()] APIs. + If the new VFS is not the default, then you will probably have to use + the new [sqlite3_open_v2()] API to use it. Note, however, that if + a new VFS is the only VFS known to SQLite (if SQLite was compiled without + its usual default VFS or if the pre-compiled default VFS was removed + using [sqlite3_vfs_unregister()]) then the new VFS automatic becomes the + default VFS regardless of the makeDflt argument to [sqlite3_vfs_register()]. +} + +PARAGRAPH { + Standard builds include the default "unix" or "win32" VFSes. + But if you use the -DOS_OTHER=1 compile-time option, then SQLite is + built without a default VFS. In that case, the application must + register at least one VFS prior to calling [sqlite3_open()]. + This is the approach that embedded applications should use. + Rather than modifying the SQLite source to to insert an alternative + OS layer as was done in prior releases of SQLite, instead compile + an unmodified SQLite source file (preferably the amalgamation) + with the -DOS_OTHER=1 option, then invoke [sqlite3_vfs_register()] + to define the interface to the underlying filesystem prior to + creating any database connections. +} + +HEADING 3 {Additional Control Over VFS Objects} + +PARAGRAPH { + The [sqlite3_vfs_unregister()] API is used to remove an existing + VFS from the system. +} + +CODE { +int sqlite3_vfs_unregister(sqlite3_vfs*); +} + +PARAGRAPH { + The [sqlite3_vfs_find()] API is used to locate a particular VFS + by name. Its prototype is as follows: +} + +CODE { +sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName); +} + +PARAGRAPH { + The argument is the symbolic name for the desired VFS. If the + argument is a NULL pointer, then the default VFS is returned. + The function returns a pointer to the [sqlite3_vfs] object that + implements the VFS. Or it returns a NULL pointer if no object + could be found that matched the search criteria. +} + +HEADING 3 {Modifications Of Existing VFSes} + +PARAGRAPH { + Once a VFS has been registered, it should never be modified. If + a change in behavior is required, a new VFS should be registered. + The application could, perhaps, use [sqlite3_vfs_find()] to locate + the old VFS, make a copy of the old VFS into a new [sqlite3_vfs] + object, make the desired modifications to the new VFS, unregister + the old VFS, the register the new VFS in its place. Existing + database connections would continue to use the old VFS even after + it is unregistered, but new database connections would use the + new VFS. +} + +HEADING 3 {The VFS Object} + +PARAGRAPH { + A VFS object is an instance of the following structure: +} + +CODE { +typedef struct sqlite3_vfs sqlite3_vfs; +struct sqlite3_vfs { + int iVersion; /* Structure version number */ + int szOsFile; /* Size of subclassed sqlite3_file */ + int mxPathname; /* Maximum file pathname length */ + sqlite3_vfs *pNext; /* Next registered VFS */ + const char *zName; /* Name of this virtual file system */ + void *pAppData; /* Pointer to application-specific data */ + int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*, + int flags, int *pOutFlags); + int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir); + int (*xAccess)(sqlite3_vfs*, const char *zName, int flags); + int (*xGetTempName)(sqlite3_vfs*, char *zOut); + int (*xFullPathname)(sqlite3_vfs*, const char *zName, char *zOut); + void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename); + void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg); + void *(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol); + void (*xDlClose)(sqlite3_vfs*, void*); + int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut); + int (*xSleep)(sqlite3_vfs*, int microseconds); + int (*xCurrentTime)(sqlite3_vfs*, double*); + /* New fields may be appended in figure versions. The iVersion + ** value will increment whenever this happens. */ +}; +} + +PARAGRAPH { + To create a new VFS, an application fills in an instance of this + structure with appropriate values and then calls [sqlite3_vfs_register()]. +} + +PARAGRAPH { + The iVersion field of [sqlite3_vfs] should be 1 for SQLite version 3.5.0. + This number may increase in future versions of SQLite if we have to + modify the VFS object in some way. We hope that this never happens, + but the provision is made in case it does. +} + +PARAGRAPH { + The szOsFile field is the size in bytes of the structure that defines + an open file: the [sqlite3_file] object. This object will be described + more fully below. The point here is that each VFS implementation can + define its own [sqlite3_file] object containing whatever information + the VFS implementation needs to store about an open file. SQLite needs + to know how big this object is, however, in order to preallocate enough + space to hold it. +} + +PARAGRAPH { + The mxPathname field is the maximum length of a file pathname that + this VFS can use. SQLite sometimes has to preallocate buffers of + this size, so it should be as small as reasonably possible. Some + filesystems permit huge pathnames, but in practice pathnames rarely + extend beyond 100 bytes or so. You do not have to put the longest + pathname that the underlying filesystem can handle here. You only + have to put the longest pathname that you want SQLite to be able to + handle. A few hundred is a good value in most cases. +} + +PARAGRAPH { + The pNext field is used internally by SQLite. Specifically, SQLite + uses this field to form a linked list of registered VFSes. +} + +PARAGRAPH { + The zName field is the symbolic name of the VFS. This is the name + that the [sqlite3_vfs_find()] compares against when it is looking for + a VFS. +} + +PARAGRAPH { + The pAppData pointer is unused by the SQLite core. The pointer is + available to store auxiliary information that a VFS information might + want to carry around. +} + +PARAGRAPH { + The remaining fields of the [sqlite3_vfs] object all store pointer + to functions that implement primitive operations. We call these + "methods". The first methods, xOpen, is used to open files on + the underlying storage media. The result is an [sqlite3_file] + object. There are additional methods, defined by the [sqlite3_file] + object itself that are used to read and write and close the file. + The additional methods are detailed below. The filename is in UTF-8. + SQLite will guarantee that the zFilename string passed to + xOpen() is a full pathname as generated by xFullPathname() and + that the string will be valid and unchanged until xClose() is + called. So the [sqlite3_file] can store a pointer to the + filename if it needs to remember the filename for some reason. + The flags argument to xOpen() is a copy of the flags argument + to sqlite3_open_v2(). If sqlite3_open() or sqlite3_open16() + is used, then flags is [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]. + If xOpen() opens a file read-only then it sets *pOutFlags to + include [SQLITE_OPEN_READONLY]. Other bits in *pOutFlags may be + set. + SQLite will also add one of the following flags to the xOpen() + call, depending on the object being opened: +
      +
    • [SQLITE_OPEN_MAIN_DB] +
    • [SQLITE_OPEN_MAIN_JOURNAL] +
    • [SQLITE_OPEN_TEMP_DB] +
    • [SQLITE_OPEN_TEMP_JOURNAL] +
    • [SQLITE_OPEN_TRANSIENT_DB] +
    • [SQLITE_OPEN_SUBJOURNAL] +
    • [SQLITE_OPEN_MASTER_JOURNAL] +
    + The file I/O implementation can use the object type flags to + changes the way it deals with files. For example, an application + that does not care about crash recovery or rollback, might make + the open of a journal file a no-op. Writes to this journal are + also a no-op. Any attempt to read the journal returns [SQLITE_IOERR]. + Or the implementation might recognize the a database file will + be doing page-aligned sector reads and writes in a random order + and set up its I/O subsystem accordingly. + SQLite might also add one of the following flags to the xOpen + method: +
      +
    • [SQLITE_OPEN_DELETEONCLOSE] +
    • [SQLITE_OPEN_EXCLUSIVE] +
    + The [SQLITE_OPEN_DELETEONCLOSE] flag means the file should be + deleted when it is closed. This will always be set for TEMP + databases and journals and for subjournals. The + [SQLITE_OPEN_EXCLUSIVE] flag means the file should be opened + for exclusive access. This flag is set for all files except + for the main database file. + The [sqlite3_file] structure passed as the third argument to + xOpen is allocated by the caller. xOpen just fills it in. The + caller allocates a minimum of szOsFile bytes for the [sqlite3_file] + structure. +} + +PARAGRAPH { + The differences between an [SQLITE_OPEN_TEMP_DB] database and an + [SQLITE_OPEN_TRANSIENT_DB] database is this: The [SQLITE_OPEN_TEMP_DB] + is used for explicitly declared and named TEMP tables (using the + CREATE TEMP TABLE syntax) or for named tables in a temporary database + that is created by opening a database with a filename that is an empty + string. An [SQLITE_OPEN_TRANSIENT_DB] holds an database table that + SQLite creates automatically in order to evaluate a subquery or + ORDER BY or GROUP BY clause. Both TEMP_DB and TRANSIENT_DB databases + are private and are deleted automatically. TEMP_DB databases last + for the duration of the database connection. TRANSIENT_DB databases + last only for the duration of a single SQL statement. +} + +PARAGRAPH { + The xDelete method is used delete a file. The name of the file is + given in the second parameter. The filename will be in UTF-8. + The VFS must convert the filename into whatever character representation + the underlying operating system expects. If the syncDir parameter is + true, then the xDelete method should not return until the change + to the directory contents for the directory containing the + deleted file have been synced to disk in order to insure that the + file does not "reappear" if a power failure occurs soon after. +} + +PARAGRAPH { + The xAccess method is used to check for access permissions on a file. + The filename will be UTF-8 encoded. The flags argument will be + [SQLITE_ACCESS_EXISTS] to check for the existence of the file, + [SQLITE_ACCESS_READWRITE] to check to see if the file is both readable + and writable, or [SQLITE_ACCESS_READ] to check to see if the file is + at least readable. The "file" named by the second parameter might + be a directory or folder name. +} + +PARAGRAPH { + The xGetTempName method computes the name of a temporary file that + SQLite can use. The name should be written into the buffer given + by the second parameter. SQLite will size that buffer to hold + at least mxPathname bytes. The generated filename should be in UTF-8. + To avoid security problems, the generated temporary filename should + contain enough randomness to prevent an attacker from guessing the + temporary filename in advance. +} + +PARAGRAPH { + The xFullPathname method is used to convert a relative pathname + into a full pathname. The resulting full pathname is written into + the buffer provided by the third parameter. SQLite will size the + output buffer to at least mxPathname bytes. Both the input and + output names should be in UTF-8. +} + +PARAGRAPH { + The xDlOpen, xDlError, xDlSym, and xDlClose methods are all used for + accessing shared libraries at run-time. These methods may be omitted + (and their pointers set to zero) if the library is compiled with + SQLITE_OMIT_LOAD_EXTENSION or if the [sqlite3_enable_load_extension()] + interface is never used to enable dynamic extension loading. The + xDlOpen method opens a shared library or DLL and returns a pointer to + a handle. NULL is returned if the open fails. If the open fails, + the xDlError method can be used to obtain a text error message. + The message is written into the zErrMsg buffer of the third parameter + which is at least nByte bytes in length. The xDlSym returns a pointer + to a symbol in the shared library. The name of the symbol is given + by the second parameter. UTF-8 encoding is assumed. If the symbol + is not found a NULL pointer is returned. The xDlClose routine closes + the shared library. +} + +PARAGRAPH { + The xRandomness method is used exactly once to initialize the + pseudo-random number generator (PRNG) inside of SQLite. Only + the xRandomness method on the default VFS is used. The xRandomness + methods on other VFSes are never accessed by SQLite. + The xRandomness routine requests that nByte bytes of randomness + be written into zOut. The routine returns the actual number of + bytes of randomness obtained. The quality of the randomness so obtained + will determine the quality of the randomness generated by built-in + SQLite functions such as random() and randomblob(). SQLite also + uses its PRNG to generate temporary file names.. On some platforms + (ex: windows) SQLite assumes that temporary file names are unique + without actually testing for collisions, so it is important to have + good-quality randomness even if the random() and randomblob() + functions are never used. +} + +PARAGRAPH { + The xSleep method is used to suspend the calling thread for at + least the number of microseconds given. This method is used to + implement the [sqlite3_sleep()] and [sqlite3_busy_timeout()] APIs. + In the case of [sqlite3_sleep()] the xSleep method of the default + VFS is always used. If the underlying system does not have a + microsecond resolution sleep capability, then the sleep time should + be rounded up. xSleep returns this rounded-up value. +} + +PARAGRAPH { + The xCurrentTime method finds the current time and date and writes + the result as double-precision floating point value into pointer + provided by the second parameter. The time and date is in + coordinated universal time (UTC) and is a fractional julian day number. +} + +HEADING 3 {The Open File Object} + +PARAGRAPH { + The result of opening a file is an instance of an [sqlite3_file] object. + The [sqlite3_file] object is an abstract base class defined as follows: +} + +CODE { +typedef struct sqlite3_file sqlite3_file; +struct sqlite3_file { + const struct sqlite3_io_methods *pMethods; +}; +} + +PARAGRAPH { + Each VFS implementation will subclass the [sqlite3_file] by adding + additional fields at the end to hold whatever information the VFS + needs to know about an open file. It does not matter what information + is stored as long as the total size of the structure does not exceed + the szOsFile value recorded in the [sqlite3_vfs] object. +} + +PARAGRAPH { + The [sqlite3_io_methods] object is a structure that contains pointers + to methods for reading, writing, and otherwise dealing with files. + This object is defined as follows: +} + +CODE { +typedef struct sqlite3_io_methods sqlite3_io_methods; +struct sqlite3_io_methods { + int iVersion; + int (*xClose)(sqlite3_file*); + int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); + int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst); + int (*xTruncate)(sqlite3_file*, sqlite3_int64 size); + int (*xSync)(sqlite3_file*, int flags); + int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize); + int (*xLock)(sqlite3_file*, int); + int (*xUnlock)(sqlite3_file*, int); + int (*xCheckReservedLock)(sqlite3_file*); + int (*xFileControl)(sqlite3_file*, int op, void *pArg); + int (*xSectorSize)(sqlite3_file*); + int (*xDeviceCharacteristics)(sqlite3_file*); + /* Additional methods may be added in future releases */ +}; +} + +PARAGRAPH { + The iVersion field of [sqlite3_io_methods] is provided as insurance + against future enhancements. The iVersion value should always be + 1 for SQLite version 3.5. +} + +PARAGRAPH { + The xClose method closes the file. The space for the [sqlite3_file] + structure is deallocated by the caller. But if the [sqlite3_file] + contains pointers to other allocated memory or resources, those + allocations should be released by the xClose method. +} + +PARAGRAPH { + The xRead method reads iAmt bytes from the file beginning at a byte + offset to iOfst. The data read is stored in the pointer of the + second parameter. xRead returns the [SQLITE_OK] on success, + [SQLITE_IOERR_SHORT_READ] if it was not able to read the full number + of bytes because it reached end-of-file, or [SQLITE_IOERR_READ] for + any other error. +} + +PARAGRAPH { + The xWrite method writes iAmt bytes of data from the second parameter + into the file beginning at an offset of iOfst bytes. If the size of + the file is less than iOfst bytes prior to the write, then xWrite should + ensure that the file is extended with zeros up to iOfst bytes prior + to beginning its write. xWrite continues to extends the file as + necessary so that the size of the file is at least iAmt+iOfst bytes + at the conclusion of the xWrite call. The xWrite method returns + [SQLITE_OK] on success. If the write cannot complete because the + underlying storage medium is full, then [SQLITE_FULL] is returned. + [SQLITE_IOERR_WRITE] should be returned for any other error. +} + +PARAGRAPH { + The xTruncate method truncates a file to be nByte bytes in length. + If the file is already nByte bytes or less in length then this + method is a no-op. The xTruncate method returns [SQLITE_OK] on + success and [SQLITE_IOERR_TRUNCATE] if anything goes wrong. +} + +PARAGRAPH { + The xSync method is used to force previously written data out of + operating system cache and into non-volatile memory. The second + parameter is usually [SQLITE_SYNC_NORMAL]. If the second parameter + is [SQLITE_SYNC_FULL] then the xSync method should make sure that + data has also been flushed through the disk controllers cache. + The [SQLITE_SYNC_FULL] parameter is the equivalent of the F_FULLSYNC + ioctl() on Mac OS X. The xSync method returns + [SQLITE_OK] on success and [SQLITE_IOERR_FSYNC] if anything goes wrong. +} + +PARAGRAPH { + The xFileSize() method determines the current size of the file + in bytes and writes that value into *pSize. It returns [SQLITE_OK] + on success and [SQLITE_IOERR_FSTAT] if something goes wrong. +} + +PARAGRAPH { + The xLock and xUnlock methods are used to set and clear file locks. + SQLite supports five levels of file locks, in order: +
      +
    • [SQLITE_LOCK_NONE] +
    • [SQLITE_LOCK_SHARED] +
    • [SQLITE_LOCK_RESERVED] +
    • [SQLITE_LOCK_PENDING] +
    • [SQLITE_LOCK_EXCLUSIVE] +
    + The underlying implementation can support some subset of these locking + levels as long as it meets the other requirements of this paragraph. + The locking level is specified as the second argument to both xLock + and xUnlock. The xLock method increases the locking level to the + specified locking level or higher. The xUnlock method decreases the + locking level to no lower than the level specified. + [SQLITE_LOCK_NONE] means that the file is unlocked. [SQLITE_LOCK_SHARED] + gives permission to read the file. Multiple database connections can + hold [SQLITE_LOCK_SHARED] at the same time. + [SQLITE_LOCK_RESERVED] is like [SQLITE_LOCK_SHARED] in that its is permission + to read the file. But only a single connection can hold a reserved lock + at any point in time. The [SQLITE_LOCK_PENDING] is also permission to + read the file. Other connections can continue to read the file as well, + but no other connection is allowed to escalate a lock from none to shared. + [SQLITE_LOCK_EXCLUSIVE] is permission to write on the file. Only a single + connection can hold an exclusive lock and no other connection can hold + any lock (other than "none") while one connection is hold an exclusive + lock. The xLock returns [SQLITE_OK] on success, [SQLITE_BUSY] if it + is unable to obtain the lock, or [SQLITE_IOERR_RDLOCK] if something else + goes wrong. The xUnlock method returns [SQLITE_OK] on success and + [SQLITE_IOERR_UNLOCK] for problems. +} + +PARAGRAPH { + The xCheckReservedLock method checks to see if another connection or + another process is currently holding a reserved, pending, or exclusive + lock on the file. It returns true or false. +} + +PARAGRAPH { + The xFileControl() method is a generic interface that allows custom + VFS implementations to directly control an open file using the + (new and experimental) + [sqlite3_file_control()] interface. The second "op" argument + is an integer opcode. The third + argument is a generic pointer which is intended to be a pointer + to a structure that may contain arguments or space in which to + write return values. Potential uses for xFileControl() might be + functions to enable blocking locks with timeouts, to change the + locking strategy (for example to use dot-file locks), to inquire + about the status of a lock, or to break stale locks. The SQLite + core reserves opcodes less than 100 for its own use. + A [SQLITE_FCNTL_LOCKSTATE | list of opcodes] less than 100 is available. + Applications that define a custom xFileControl method should use opcodes + greater than 100 to avoid conflicts. +} + +PARAGRAPH { + The xSectorSize returns the "sector size" of the underlying + non-volatile media. A "sector" is defined as the smallest unit of + storage that can be written without disturbing adjacent storage. + On a disk drive the "sector size" has until recently been 512 bytes, + though there is a push to increase this value to 4KiB. SQLite needs + to know the sector size so that it can write a full sector at a + time, and thus avoid corrupting adjacent storage space if a power + lose occurs in the middle of a write. +} + +PARAGRAPH { + The xDeviceCharacteristics method returns an integer bit vector that + defines any special properties that the underlying storage medium might + have that SQLite can use to increase performance. The allowed return + is the bit-wise OR of the following values: +
      +
    • [SQLITE_IOCAP_ATOMIC] +
    • [SQLITE_IOCAP_ATOMIC512] +
    • [SQLITE_IOCAP_ATOMIC1K] +
    • [SQLITE_IOCAP_ATOMIC2K] +
    • [SQLITE_IOCAP_ATOMIC4K] +
    • [SQLITE_IOCAP_ATOMIC8K] +
    • [SQLITE_IOCAP_ATOMIC16K] +
    • [SQLITE_IOCAP_ATOMIC32K] +
    • [SQLITE_IOCAP_ATOMIC64K] +
    • [SQLITE_IOCAP_SAFE_APPEND] +
    • [SQLITE_IOCAP_SEQUENTIAL] +
    + The [SQLITE_IOCAP_ATOMIC] bit means that all writes to this device are + atomic in the sense that either the entire write occurs or none of it + occurs. The other + [SQLITE_IOCAP_ATOMIC | SQLITE_IOCAP_ATOMICnnn] values indicate that + writes of aligned blocks of the indicated size are atomic. + [SQLITE_IOCAP_SAFE_APPEND] means that when extending a file with new + data, the new data is written first and then the file size is updated. + So if a power failure occurs, there is no chance that the file might have + been extended with randomness. The [SQLITE_IOCAP_SEQUENTIAL] bit means + that all writes occur in the order that they are issued and are not + reordered by the underlying file system. +} + +HEADING 3 {Checklist For Constructing A New VFS} + +PARAGRAPH { + The preceding paragraphs contain a lot of information. + To ease the task of constructing + a new VFS for SQLite we offer the following implementation checklist: +} + +PARAGRAPH { +
      +
    1. Define an appropriate subclass of the [sqlite3_file] object. +
    2. Implement the methods required by the [sqlite3_io_methods] object. +
    3. Create a static and + constant [sqlite3_io_methods] object containing pointers + to the methods from the previous step. +
    4. Implement the xOpen method that opens a file and populates an + [sqlite3_file] object, including setting pMethods to + point to the [sqlite3_io_methods] object from the previous step. +
    5. Implement the other methods required by [sqlite3_vfs]. +
    6. Define a static (but not constant) [sqlite3_vfs] structure that + contains pointers to the xOpen method and the other methods and + which contains the appropriate values for iVersion, szOsFile, + mxPathname, zName, and pAppData. +
    7. Implement a procedure that calls [sqlite3_vfs_register()] and + passes it a pointer to the [sqlite3_vfs] structure from the previous + step. This procedure is probably the only exported symbol in the + source file that implements your VFS. +
    +} + +PARAGRAPH { + Within your application, call the procedure implemented in the last + step above as part of your initialization process before any + database connections are opened. +} + +HEADING 1 {The Memory Allocation Subsystem} + +PARAGRAPH { + Beginning with version 3.5, SQLite obtains all of the heap memory it + needs using the routines [sqlite3_malloc()], [sqlite3_free()], and + [sqlite3_realloc()]. These routines have existed in prior versions + of SQLite, but SQLite has previously bypassed these routines and used + its own memory allocator. This all changes in version 3.5.0. +} + +PARAGRAPH { + The SQLite source tree actually contains multiple versions of the + memory allocator. The default high-speed version found in the + "mem1.c" source file is used for most builds. But if the SQLITE_MEMDEBUG + flag is enabled, a separate memory allocator the "mem2.c" source file + is used instead. The mem2.c allocator implements lots of hooks to + do error checking and to simulate memory allocation failures for testing + purposes. Both of these allocators use the malloc()/free() implementation + in the standard C library. +} + +PARAGRAPH { + Applications are not required to use either of these standard memory + allocators. If SQLite is compiled with SQLITE_OMIT_MEMORY_ALLOCATION + then no implementation for the [sqlite3_malloc()], [sqlite3_realloc()], + and [sqlite3_free()] functions is provided. Instead, the application + that links against SQLite must provide its own implementation of these + functions. The application provided memory allocator is not required + to use the malloc()/free() implementation in the standard C library. + An embedded application might provide an alternative memory allocator + that uses memory for a fixed memory pool set aside for the exclusive + use of SQLite, for example. +} + +PARAGRAPH { + Applications that implement their own memory allocator must provide + implementation for the usual three allocation functions + [sqlite3_malloc()], [sqlite3_realloc()], and [sqlite3_free()]. + And they must also implement a fourth function: +} + +CODE { +int sqlite3_memory_alarm( + void(*xCallback)(void *pArg, sqlite3_int64 used, int N), + void *pArg, + sqlite3_int64 iThreshold +); +} + +PARAGRAPH { + The [sqlite3_memory_alarm] routine is used to register + a callback on memory allocation events. + This routine registers or clears a callbacks that fires when + the amount of memory allocated exceeds iThreshold. Only + a single callback can be registered at a time. Each call + to [sqlite3_memory_alarm()] overwrites the previous callback. + The callback is disabled by setting xCallback to a NULL + pointer. +} + +PARAGRAPH { + The parameters to the callback are the pArg value, the + amount of memory currently in use, and the size of the + allocation that provoked the callback. The callback will + presumably invoke [sqlite3_free()] to free up memory space. + The callback may invoke [sqlite3_malloc()] or [sqlite3_realloc()] + but if it does, no additional callbacks will be invoked by + the recursive calls. +} + +PARAGRAPH { + The [sqlite3_soft_heap_limit()] interface works by registering + a memory alarm at the soft heap limit and invoking + [sqlite3_release_memory()] in the alarm callback. Application + programs should not attempt to use the [sqlite3_memory_alarm()] + interface because doing so will interfere with the + [sqlite3_soft_heap_limit()] module. This interface is exposed + only so that applications can provide their own + alternative implementation when the SQLite core is + compiled with SQLITE_OMIT_MEMORY_ALLOCATION. +} + +PARAGRAPH { + The built-in memory allocators in SQLite also provide the following + additional interfaces: +} + +CODE { +sqlite3_int64 sqlite3_memory_used(void); +sqlite3_int64 sqlite3_memory_highwater(int resetFlag); +} + +PARAGRAPH { + These interfaces can be used by an application to monitor how + much memory SQLite is using. The [sqlite3_memory_used()] routine + returns the number of bytes of memory currently in use and the + [sqlite3_memory_highwater()] returns the maximum instantaneous + memory usage. Neither routine includes the overhead associated + with the memory allocator. These routines are provided for use + by the application. SQLite never invokes them itself. So if + the application is providing its own memory allocation subsystem, + it can omit these interfaces if desired. +} + +HEADING 1 {The Mutex Subsystem} + +PARAGRAPH { + SQLite has always been threadsafe in the sense that it is safe to + use different SQLite database connections in different threads at the + same time. The constraint was that the same database connection + could not be used in two separate threads at once. SQLite version 3.5.0 + relaxes this constraint. +} + +PARAGRAPH { + In order to allow multiple threads to use the same database connection + at the same time, SQLite must make extensive use of mutexes. And for + this reason a new mutex subsystem as been added. The mutex subsystem + as the following interface: +} + +CODE { +sqlite3_mutex *sqlite3_mutex_alloc(int); +void sqlite3_mutex_free(sqlite3_mutex*); +void sqlite3_mutex_enter(sqlite3_mutex*); +int sqlite3_mutex_try(sqlite3_mutex*); +void sqlite3_mutex_leave(sqlite3_mutex*); +} + +PARAGRAPH { + Though these routines exist for the use of the SQLite core, + application code is free to use these routines as well, if desired. + A mutex is an [sqlite3_mutex] object. The [sqlite3_mutex_alloc()] + routine allocates a new mutex object and returns a pointer to it. + The argument to [sqlite3_mutex_alloc()] should be + [SQLITE_MUTEX_FAST] or [SQLITE_MUTEX_RECURSIVE] for non-recursive + and recursive mutexes, respectively. If the underlying system does + not provide non-recursive mutexes, then a recursive mutex can be + substituted in that case. The argument to [sqlite3_mutex_alloc()] + can also be a constant designating one of several static mutexes: +
      +
    • [SQLITE_MUTEX_STATIC_MASTER] +
    • [SQLITE_MUTEX_STATIC_MEM] +
    • [SQLITE_MUTEX_STATIC_MEM2] +
    • [SQLITE_MUTEX_STATIC_PRNG] +
    • [SQLITE_MUTEX_STATIC_LRU] +
    + These static mutexes are reserved for use internally by SQLite + and should not be used by the application. The static mutexes + are all non-recursive. +} + +PARAGRAPH { + The [sqlite3_mutex_free()] routine should be used to deallocate + a non-static mutex. If a static mutex is passed to this routine + then the behavior is undefined. +} + +PARAGRAPH { + The [sqlite3_mutex_enter()] attempts to enter the mutex and blocks + if another threads is already there. [sqlite3_mutex_try()] attempts + to enter and returns [SQLITE_OK] on success or [SQLITE_BUSY] if another + thread is already there. [sqlite3_mutex_leave()] exits a mutex. + The mutex is held until the number of exits matches the number of + entrances. If [sqlite3_mutex_leave()] is called on a mutex that + the thread is not currently holding, then the behavior is undefined. + If any routine is called for a deallocated mutex, then the behavior + is undefined. +} + +PARAGRAPH { + The SQLite source code provides multiple implementations of these + APIs, suitable for varying environments. If SQLite is compiled with + the SQLITE_THREADSAFE=0 flag then a no-op mutex implementation that + is fast but does no real mutual exclusion is provided. That + implementation is suitable for use in single-threaded applications + or applications that only use SQLite in a single thread. Other + real mutex implementations are provided based on the underlying + operating system. +} + +PARAGRAPH { + Embedded applications may wish to provide their own mutex implementation. + If SQLite is compiled with the -DSQLITE_MUTEX_APPDEF=1 compile-time flag + then the SQLite core provides no mutex subsystem and a mutex subsystem + that matches the interface described above must be provided by the + application that links against SQLite. +} + +HEADING 1 {Other Interface Changes} + +PARAGRAPH { + Version 3.5.0 of SQLite changes the behavior of a few APIs in ways + that are technically incompatible. However, these APIs are seldom + used and even when they are used it is difficult to imagine a + scenario where the change might break something. The changes + actually makes these interface much more useful and powerful. +} + +PARAGRAPH { + Prior to version 3.5.0, the [sqlite3_enable_shared_cache()] API + would enable and disable the shared cache feature for all connections + within a single thread - the same thread from which the + sqlite3_enable_shared_cache() routine was called. Database connections + that used the shared cache were restricted to running in the same + thread in which they were opened. Beginning with version 3.5.0, + the sqlite3_enable_shared_cache() applies to all database connections + in all threads within the process. Now database connections running + in separate threads can share a cache. And database connections that + use shared cache can migrate from one thread to another. +} + +PARAGRAPH { + Prior to version 3.5.0 the [sqlite3_soft_heap_limit()] set an upper + bound on heap memory usage for all database connections within a + single thread. Each thread could have its own heap limit. Beginning + in version 3.5.0, there is a single heap limit for the entire process. + This seems more restrictive (one limit as opposed to many) but in + practice it is what most users want. +} + +PARAGRAPH { + Prior to version 3.5.0 the [sqlite3_release_memory()] function would + try to reclaim memory from all database connections in the same thread + as the sqlite3_release_memory() call. Beginning with version 3.5.0, + the sqlite3_release_memory() function will attempt to reclaim memory + from all database connections in all threads. +} + +HEADING 1 {Summary} + +PARAGRAPH { + The transition from SQLite version 3.4.2 to 3.5.0 is a major change. + Every source code file in the SQLite core had to be modified, some + extensively. And the change introduced some minor incompatibilities + in the C interface. But we feel that the benefits of the transition + from 3.4.2 to 3.5.0 far outweigh the pain of porting. The new + VFS layer is now well-defined and stable and should simplify future + customizations. The VFS layer, and the separable memory allocator + and mutex subsystems allow a standard SQLite source code amalgamation + to be used in an embedded project without change, greatly simplifying + configuration management. And the resulting system is much more + tolerant of highly threaded designs. +} + ADDED pages/about.in Index: pages/about.in ================================================================== --- /dev/null +++ pages/about.in @@ -0,0 +1,80 @@ +

    About SQLite

    +

    +SQLite is a small C library that implements a +self-contained, +serverless, +zero-configuration, +transactional +SQL database engine. +

    + +

    Features:

    + +

      +
    • Transactions + are atomic, consistent, isolated, and durable (ACID) + even after system crashes and power failures. +
    • Zero-configuration + - no setup or administration needed.
    • +
    • Implements most of SQL92. + (Features not supported)
    • +
    • A complete database is stored in a single disk file.
    • +
    • Database files can be freely shared between machines with + different byte orders.
    • +
    • Supports terabyte-sized databases and gigabyte-sized strings + and blobs. (See limits.html.) +
    • Small code footprint: + + less than 250KiB fully configured or less + than 150KiB with optional features omitted.
    • +
    • Faster than popular client/server database + engines for most common operations.
    • +
    • Simple, easy to use API.
    • +
    • TCL bindings included. + Bindings for many other languages + + available separately.
    • +
    • Well-commented source code with over 98% test coverage.
    • +
    • Available as a + + single ANSI-C source-code file that you can easily drop into + another project. +
    • Self-contained: + no external dependencies.
    • +
    • Cross-platform: Linux (unix), MacOSX, OS/2, Win32 and WinCE are supported + out of the box. Easy to port to other systems. +
    • Sources are in the public domain. + Use for any purpose.
    • +
    • Comes with a standalone command-line interface (CLI) client that + can be used to administer SQLite databases.
    • +
    +

    + +

    Suggested Uses:

    + +

      +
    • Application File Format. +Rather than using fopen() to write XML or some proprietary format into +disk files used by your application, use an SQLite database instead. +You'll avoid having to write and troubleshoot a parser, your data +will be more easily accessible and cross-platform, your updates +will transactional.

    • + +
    • Database For Gadgets. +SQLite is popular choice for the database engine in cellphones, +PDAs, MP3 players, set-top boxes, and other electronic gadgets. +SQLite has a small code footprint, makes efficient use of memory, +disk space, and disk bandwidth, is highly reliable, and requires +no maintenance from a Database Adminstrator.

    • + +
    • Website Database. +Because it requires no configuration and stores information in order +disk files, SQLite is a popular choice as the database to back small +to medium-sized websites.

    • + +
    • Stand-in For An Enterprise RDBMS. +SQLite is often used as a surrogate for an enterprise RDBMS for +demonstration purposes or for testing. SQLite is fast and requires +no setup, which takes a lot of the hassle out of testing and which +makes demos perky and easy to launch.

    • +
    ADDED pages/arch.in Index: pages/arch.in ================================================================== --- /dev/null +++ pages/arch.in @@ -0,0 +1,212 @@ +Architecture of SQLite +

    The Architecture Of SQLite

    + +

    Introduction

    + + + + +
    Block Diagram Of SQLite
    +

    This document describes the architecture of the SQLite library. +The information here is useful to those who want to understand or +modify the inner workings of SQLite. +

    + +

    +A block diagram showing the main components of SQLite +and how they interrelate is shown at the right. The text that +follows will provide a quick overview of each of these components. +

    + + +

    +This document describes SQLite version 3.0. Version 2.8 and +earlier are similar but the details differ. +

    + +

    Interface

    + +

    Much of the public interface to the SQLite library is implemented by +functions found in the main.c, legacy.c, and +vdbeapi.c source files +though some routines are +scattered about in other files where they can have access to data +structures with file scope. The +sqlite3_get_table() routine is implemented in table.c. +sqlite3_mprintf() is found in printf.c. +sqlite3_complete() is in tokenize.c. +The Tcl interface is implemented by tclsqlite.c. More +information on the C interface to SQLite is +available separately.

    + +

    To avoid name collisions with other software, all external +symbols in the SQLite library begin with the prefix sqlite3. +Those symbols that are intended for external use (in other words, +those symbols which form the API for SQLite) begin +with sqlite3_.

    + +

    Tokenizer

    + +

    When a string containing SQL statements is to be executed, the +interface passes that string to the tokenizer. The job of the tokenizer +is to break the original string up into tokens and pass those tokens +one by one to the parser. The tokenizer is hand-coded in C in +the file tokenize.c. + +

    Note that in this design, the tokenizer calls the parser. People +who are familiar with YACC and BISON may be used to doing things the +other way around -- having the parser call the tokenizer. The author +of SQLite +has done it both ways and finds things generally work out nicer for +the tokenizer to call the parser. YACC has it backwards.

    + +

    Parser

    + +

    The parser is the piece that assigns meaning to tokens based on +their context. The parser for SQLite is generated using the +Lemon LALR(1) parser +generator. Lemon does the same job as YACC/BISON, but it uses +a different input syntax which is less error-prone. +Lemon also generates a parser which is reentrant and thread-safe. +And lemon defines the concept of a non-terminal destructor so +that it does not leak memory when syntax errors are encountered. +The source file that drives Lemon is found in parse.y.

    + +

    Because +lemon is a program not normally found on development machines, the +complete source code to lemon (just one C file) is included in the +SQLite distribution in the "tool" subdirectory. Documentation on +lemon is found in the "doc" subdirectory of the distribution. +

    + +

    Code Generator

    + +

    After the parser assembles tokens into complete SQL statements, +it calls the code generator to produce virtual machine code that +will do the work that the SQL statements request. There are many +files in the code generator: +attach.c, +auth.c, +build.c, +delete.c, +expr.c, +insert.c, +pragma.c, +select.c, +trigger.c, +update.c, +vacuum.c +and where.c. +In these files is where most of the serious magic happens. +expr.c handles code generation for expressions. +where.c handles code generation for WHERE clauses on +SELECT, UPDATE and DELETE statements. The files attach.c, +delete.c, insert.c, select.c, trigger.c +update.c, and vacuum.c handle the code generation +for SQL statements with the same names. (Each of these files calls routines +in expr.c and where.c as necessary.) All other +SQL statements are coded out of build.c. +The auth.c file implements the functionality of +sqlite3_set_authorizer().

    + +

    Virtual Machine

    + +

    The program generated by the code generator is executed by +the virtual machine. Additional information about the virtual +machine is available separately. +To summarize, the virtual machine implements an abstract computing +engine specifically designed to manipulate database files. The +machine has a stack which is used for intermediate storage. +Each instruction contains an opcode and +up to three additional operands.

    + +

    The virtual machine itself is entirely contained in a single +source file vdbe.c. The virtual machine also has +its own header files: vdbe.h that defines an interface +between the virtual machine and the rest of the SQLite library and +vdbeInt.h which defines structure private the virtual machine. +The vdbeaux.c file contains utilities used by the virtual +machine and interface modules used by the rest of the library to +construct VM programs. The vdbeapi.c file contains external +interfaces to the virtual machine such as the +sqlite3_bind_... family of functions. Individual values +(strings, integer, floating point numbers, and BLOBs) are stored +in an internal object named "Mem" which is implemented by +vdbemem.c.

    + +

    +SQLite implements SQL functions using callbacks to C-language routines. +Even the built-in SQL functions are implemented this way. Most of +the built-in SQL functions (ex: coalesce(), count(), +substr(), and so forth) can be found in func.c. +Date and time conversion functions are found in date.c. +

    + +

    B-Tree

    + +

    An SQLite database is maintained on disk using a B-tree implementation +found in the btree.c source file. A separate B-tree is used for +each table and index in the database. All B-trees are stored in the +same disk file. Details of the file format are recorded in a large +comment at the beginning of btree.c.

    + +

    The interface to the B-tree subsystem is defined by the header file +btree.h. +

    + +

    Page Cache

    + +

    The B-tree module requests information from the disk in fixed-size +chunks. The default chunk size is 1024 bytes but can vary between 512 +and 65536 bytes. +The page cache is responsible for reading, writing, and +caching these chunks. +The page cache also provides the rollback and atomic commit abstraction +and takes care of locking of the database file. The +B-tree driver requests particular pages from the page cache and notifies +the page cache when it wants to modify pages or commit or rollback +changes and the page cache handles all the messy details of making sure +the requests are handled quickly, safely, and efficiently.

    + +

    The code to implement the page cache is contained in the single C +source file pager.c. The interface to the page cache subsystem +is defined by the header file pager.h. +

    + +

    OS Interface

    + +

    +In order to provide portability between POSIX and Win32 operating systems, +SQLite uses an abstraction layer to interface with the operating system. +The interface to the OS abstraction layer is defined in +os.h. Each supported operating system has its own implementation: +os_unix.c for Unix, os_win.c for windows, and so forth. +Each of these operating-specific implements typically has its own +header file: os_unix.h, os_win.h, etc. +

    + +

    Utilities

    + +

    +Memory allocation and caseless string comparison routines are located +in util.c. +Symbol tables used by the parser are maintained by hash tables found +in hash.c. The utf.c source file contains Unicode +conversion subroutines. +SQLite has its own private implementation of printf() (with +some extensions) in printf.c and its own random number generator +in random.c. +

    + +

    Test Code

    + +

    +If you count regression test scripts, +more than half the total code base of SQLite is devoted to testing. +There are many assert() statements in the main code files. +In additional, the source files test1.c through test5.c +together with md5.c implement extensions used for testing +purposes only. The os_test.c backend interface is used to +simulate power failures to verify the crash-recovery mechanism in +the pager. +

    ADDED pages/audit.in Index: pages/audit.in ================================================================== --- /dev/null +++ pages/audit.in @@ -0,0 +1,190 @@ +SQLite Security Audit Procedure + +

    +A security audit for SQLite consists of two components. First, there is +a check for common errors that often lead to security problems. Second, +an attempt is made to construct a proof that SQLite has certain desirable +security properties. +

    + +

    Part I: Things to check

    + +

    +Scan all source code and check for the following common errors: +

    + +
      +
    1. +Verify that the destination buffer is large enough to hold its result +in every call to the following routines: +

        +
      • strcpy()
      • +
      • strncpy()
      • +
      • strcat()
      • +
      • memcpy()
      • +
      • memset()
      • +
      • memmove()
      • +
      • bcopy()
      • +
      • sprintf()
      • +
      • scanf()
      • +
      +

    2. +
    3. +Verify that pointers returned by subroutines are not NULL before using +the pointers. In particular, make sure the return values for the following +routines are checked before they are used: +

        +
      • malloc()
      • +
      • realloc()
      • +
      • sqliteMalloc()
      • +
      • sqliteRealloc()
      • +
      • sqliteStrDup()
      • +
      • sqliteStrNDup()
      • +
      • sqliteExpr()
      • +
      • sqliteExprFunction()
      • +
      • sqliteExprListAppend()
      • +
      • sqliteResultSetOfSelect()
      • +
      • sqliteIdListAppend()
      • +
      • sqliteSrcListAppend()
      • +
      • sqliteSelectNew()
      • +
      • sqliteTableNameToTable()
      • +
      • sqliteTableTokenToSrcList()
      • +
      • sqliteWhereBegin()
      • +
      • sqliteFindTable()
      • +
      • sqliteFindIndex()
      • +
      • sqliteTableNameFromToken()
      • +
      • sqliteGetVdbe()
      • +
      • sqlite_mprintf()
      • +
      • sqliteExprDup()
      • +
      • sqliteExprListDup()
      • +
      • sqliteSrcListDup()
      • +
      • sqliteIdListDup()
      • +
      • sqliteSelectDup()
      • +
      • sqliteFindFunction()
      • +
      • sqliteTriggerSelectStep()
      • +
      • sqliteTriggerInsertStep()
      • +
      • sqliteTriggerUpdateStep()
      • +
      • sqliteTriggerDeleteStep()
      • +
      +

    4. +
    5. +On all functions and procedures, verify that pointer parameters are not NULL +before dereferencing those parameters. +

    6. +
    7. +Check to make sure that temporary files are opened safely: that the process +will not overwrite an existing file when opening the temp file and that +another process is unable to substitute a file for the temp file being +opened. +

    8. +
    + + + +

    Part II: Things to prove

    + +

    +Prove that SQLite exhibits the characteristics outlined below: +

    + +
      +
    1. +The following are preconditions:

      +

        +
      • Z is an arbitrary-length NUL-terminated string.
      • +
      • An existing SQLite database has been opened. The return value + from the call to sqlite_open() is stored in the variable + db.
      • +
      • The database contains at least one table of the form: +
        +CREATE TABLE t1(a CLOB);
        +
      • +
      • There are no user-defined functions other than the standard + build-in functions.
      • +

      +

      The following statement of C code is executed:

      +
      +sqlite_exec_printf(
      +   db,
      +   "INSERT INTO t1(a) VALUES('%q');", 
      +   0, 0, 0, Z
      +);
      +
      +

      Prove the following are true for all possible values of string Z:

      +
        +
      1. +The call to sqlite_exec_printf() will +return in a length of time that is a polynomial in strlen(Z). +It might return an error code but it will not crash. +

      2. +
      3. +At most one new row will be inserted into table t1. +

      4. +
      5. +No preexisting rows of t1 will be deleted or modified. +

      6. +
      7. +No tables other than t1 will be altered in any way. +

      8. +
      9. +No preexisting files on the host computers filesystem, other than +the database file itself, will be deleted or modified. +

      10. +
      11. +For some constants K1 and K2, +if at least K1*strlen(Z) + K2 bytes of contiguous memory are +available to malloc(), then the call to sqlite_exec_printf() +will not return SQLITE_NOMEM. +

      12. +
      +

    2. + + +
    3. +The following are preconditions: +

        +
      • Z is an arbitrary-length NUL-terminated string.
      • +
      • An existing SQLite database has been opened. The return value + from the call to sqlite_open() is stored in the variable + db.
      • +
      • There exists a callback function cb() that appends all + information passed in through its parameters into a single + data buffer called Y.
      • +
      • There are no user-defined functions other than the standard + build-in functions.
      • +

      +

      The following statement of C code is executed:

      +
      +sqlite_exec(db, Z, cb, 0, 0);
      +
      +

      Prove the following are true for all possible values of string Z:

      +
        +
      1. +The call to sqlite_exec() will +return in a length of time which is a polynomial in strlen(Z). +It might return an error code but it will not crash. +

      2. +
      3. +After sqlite_exec() returns, the buffer Y will not contain +any content from any preexisting file on the host computers file system, +except for the database file. +

      4. +
      5. +After the call to sqlite_exec() returns, the database file will +still be well-formed. It might not contain the same data, but it will +still be a properly constructed SQLite database file. +

      6. +
      7. +No preexisting files on the host computers filesystem, other than +the database file itself, will be deleted or modified. +

      8. +
      9. +For some constants K1 and K2, +if at least K1*strlen(Z) + K2 bytes of contiguous memory are +available to malloc(), then the call to sqlite_exec() +will not return SQLITE_NOMEM. +

      10. +
      +

    4. + +
    ADDED pages/autoinc.in Index: pages/autoinc.in ================================================================== --- /dev/null +++ pages/autoinc.in @@ -0,0 +1,94 @@ +SQLite Autoincrement +

    SQLite Autoincrement

    + +

    +In SQLite, every row of every table has an integer ROWID. +The ROWID for each row is unique among all rows in the same table. +In SQLite version 2.8 the ROWID is a 32-bit signed integer. +Version 3.0 of SQLite expanded the ROWID to be a 64-bit signed integer. +

    + +

    +You can access the ROWID of an SQLite table using one the special column +names ROWID, _ROWID_, or OID. +Except if you declare an ordinary table column to use one of those special +names, then the use of that name will refer to the declared column not +to the internal ROWID. +

    + +

    +If a table contains a column of type INTEGER PRIMARY KEY, then that +column becomes an alias for the ROWID. You can then access the ROWID +using any of four different names, the original three names described above +or the name given to the INTEGER PRIMARY KEY column. All these names are +aliases for one another and work equally well in any context. +

    + +

    +When a new row is inserted into an SQLite table, the ROWID can either +be specified as part of the INSERT statement or it can be assigned +automatically by the database engine. To specify a ROWID manually, +just include it in the list of values to be inserted. For example: +

    + +
    +CREATE TABLE test1(a INT, b TEXT);
    +INSERT INTO test1(rowid, a, b) VALUES(123, 5, 'hello');
    +
    + +

    +If no ROWID is specified on the insert, an appropriate ROWID is created +automatically. The usual algorithm is to give the newly created row +a ROWID that is one larger than the largest ROWID in the table prior +to the insert. If the table is initially empty, then a ROWID of 1 is +used. If the largest ROWID is equal to the largest possible integer +(9223372036854775807 in SQLite version 3.0 and later) then the database +engine starts picking candidate ROWIDs at random until it finds one +that is not previously used. +

    + +

    +The normal ROWID selection algorithm described above +will generate monotonically increasing +unique ROWIDs as long as you never use the maximum ROWID value and you never +delete the entry in the table with the largest ROWID. +If you ever delete rows or if you ever create a row with the maximum possible +ROWID, then ROWIDs from previously deleted rows might be reused when creating +new rows and newly created ROWIDs might not be in strictly accending order. +

    + + +

    The AUTOINCREMENT Keyword

    + +

    +If a column has the type INTEGER PRIMARY KEY AUTOINCREMENT then a slightly +different ROWID selection algorithm is used. +The ROWID chosen for the new row is one larger than the largest ROWID +that has ever before existed in that same table. If the table has never +before contained any data, then a ROWID of 1 is used. If the table +has previously held a row with the largest possible ROWID, then new INSERTs +are not allowed and any attempt to insert a new row will fail with an +SQLITE_FULL error. +

    + +

    +SQLite keeps track of the largest ROWID that a table has ever held using +the special SQLITE_SEQUENCE table. The SQLITE_SEQUENCE table is created +and initialized automatically whenever a normal table that contains an +AUTOINCREMENT column is created. The content of the SQLITE_SEQUENCE table +can be modified using ordinary UPDATE, INSERT, and DELETE statements. +But making modifications to this table will likely perturb the AUTOINCREMENT +key generation algorithm. Make sure you know what you are doing before +you undertake such changes. +

    + +

    +The behavior implemented by the AUTOINCREMENT keyword is subtly different +from the default behavior. With AUTOINCREMENT, rows with automatically +selected ROWIDs are guaranteed to have ROWIDs that have never been used +before by the same table in the same database. And the automatically generated +ROWIDs are guaranteed to be monotonically increasing. These are important +properties in certain applications. But if your application does not +need these properties, you should probably stay with the default behavior +since the use of AUTOINCREMENT requires additional work to be done +as each row is inserted and thus causes INSERTs to run a little slower. ADDED pages/c_interface.in Index: pages/c_interface.in ================================================================== --- /dev/null +++ pages/c_interface.in @@ -0,0 +1,1109 @@ +The C language interface to SQLite Version 2 + +

    The C language interface to SQLite Version 2

    + +

    The SQLite library is designed to be very easy to use from +a C or C++ program. This document gives an overview of the C/C++ +programming interface.

    + +

    1.0 The Core API

    + +

    The interface to the SQLite library consists of three core functions, +one opaque data structure, and some constants used as return values. +The core interface is as follows:

    + +
    +typedef struct sqlite sqlite;
    +#define SQLITE_OK           0   /* Successful result */
    +
    +sqlite *sqlite_open(const char *dbname, int mode, char **errmsg);
    +
    +void sqlite_close(sqlite *db);
    +
    +int sqlite_exec(
    +  sqlite *db,
    +  char *sql,
    +  int (*xCallback)(void*,int,char**,char**),
    +  void *pArg,
    +  char **errmsg
    +);
    +
    + +

    +The above is all you really need to know in order to use SQLite +in your C or C++ programs. There are other interface functions +available (and described below) but we will begin by describing +the core functions shown above. +

    + + +

    1.1 Opening a database

    + +

    Use the sqlite_open function to open an existing SQLite +database or to create a new SQLite database. The first argument +is the database name. The second argument is intended to signal +whether the database is going to be used for reading and writing +or just for reading. But in the current implementation, the +second argument to sqlite_open is ignored. +The third argument is a pointer to a string pointer. +If the third argument is not NULL and an error occurs +while trying to open the database, then an error message will be +written to memory obtained from malloc() and *errmsg will be made +to point to this error message. The calling function is responsible +for freeing the memory when it has finished with it.

    + +

    The name of an SQLite database is the name of a file that will +contain the database. If the file does not exist, SQLite attempts +to create and initialize it. If the file is read-only (due to +permission bits or because it is located on read-only media like +a CD-ROM) then SQLite opens the database for reading only. The +entire SQL database is stored in a single file on the disk. But +additional temporary files may be created during the execution of +an SQL command in order to store the database rollback journal or +temporary and intermediate results of a query.

    + +

    The return value of the sqlite_open function is a +pointer to an opaque sqlite structure. This pointer will +be the first argument to all subsequent SQLite function calls that +deal with the same database. NULL is returned if the open fails +for any reason.

    + + +

    1.2 Closing the database

    + +

    To close an SQLite database, call the sqlite_close +function passing it the sqlite structure pointer that was obtained +from a prior call to sqlite_open. +If a transaction is active when the database is closed, the transaction +is rolled back.

    + + +

    1.3 Executing SQL statements

    + +

    The sqlite_exec function is used to process SQL statements +and queries. This function requires 5 parameters as follows:

    + +
      +
    1. A pointer to the sqlite structure obtained from a prior call + to sqlite_open.

    2. +
    3. A null-terminated string containing the text of one or more + SQL statements and/or queries to be processed.

    4. +
    5. A pointer to a callback function which is invoked once for each + row in the result of a query. This argument may be NULL, in which + case no callbacks will ever be invoked.

    6. +
    7. A pointer that is forwarded to become the first argument + to the callback function.

    8. +
    9. A pointer to an error string. Error messages are written to space + obtained from malloc() and the error string is made to point to + the malloced space. The calling function is responsible for freeing + this space when it has finished with it. + This argument may be NULL, in which case error messages are not + reported back to the calling function.

    10. +
    + +

    +The callback function is used to receive the results of a query. A +prototype for the callback function is as follows:

    + +
    +int Callback(void *pArg, int argc, char **argv, char **columnNames){
    +  return 0;
    +}
    +
    + + +

    The first argument to the callback is just a copy of the fourth argument +to sqlite_exec This parameter can be used to pass arbitrary +information through to the callback function from client code. +The second argument is the number of columns in the query result. +The third argument is an array of pointers to strings where each string +is a single column of the result for that record. Note that the +callback function reports a NULL value in the database as a NULL pointer, +which is very different from an empty string. If the i-th parameter +is an empty string, we will get:

    +
    +argv[i][0] == 0
    +
    +

    But if the i-th parameter is NULL we will get:

    +
    +argv[i] == 0
    +
    + +

    The names of the columns are contained in first argc +entries of the fourth argument. +If the SHOW_DATATYPES pragma +is on (it is off by default) then +the second argc entries in the 4th argument are the datatypes +for the corresponding columns. +

    + +

    If the +EMPTY_RESULT_CALLBACKS pragma is set to ON and the result of +a query is an empty set, then the callback is invoked once with the +third parameter (argv) set to 0. In other words +

    +argv == 0
    +
    +The second parameter (argc) +and the fourth parameter (columnNames) are still valid +and can be used to determine the number and names of the result +columns if there had been a result. +The default behavior is not to invoke the callback at all if the +result set is empty.

    + + +

    The callback function should normally return 0. If the callback +function returns non-zero, the query is immediately aborted and +sqlite_exec will return SQLITE_ABORT.

    + +

    1.4 Error Codes

    + +

    +The sqlite_exec function normally returns SQLITE_OK. But +if something goes wrong it can return a different value to indicate +the type of error. Here is a complete list of the return codes: +

    + +
    +#define SQLITE_OK           0   /* Successful result */
    +#define SQLITE_ERROR        1   /* SQL error or missing database */
    +#define SQLITE_INTERNAL     2   /* An internal logic error in SQLite */
    +#define SQLITE_PERM         3   /* Access permission denied */
    +#define SQLITE_ABORT        4   /* Callback routine requested an abort */
    +#define SQLITE_BUSY         5   /* The database file is locked */
    +#define SQLITE_LOCKED       6   /* A table in the database is locked */
    +#define SQLITE_NOMEM        7   /* A malloc() failed */
    +#define SQLITE_READONLY     8   /* Attempt to write a readonly database */
    +#define SQLITE_INTERRUPT    9   /* Operation terminated by sqlite_interrupt() */
    +#define SQLITE_IOERR       10   /* Some kind of disk I/O error occurred */
    +#define SQLITE_CORRUPT     11   /* The database disk image is malformed */
    +#define SQLITE_NOTFOUND    12   /* (Internal Only) Table or record not found */
    +#define SQLITE_FULL        13   /* Insertion failed because database is full */
    +#define SQLITE_CANTOPEN    14   /* Unable to open the database file */
    +#define SQLITE_PROTOCOL    15   /* Database lock protocol error */
    +#define SQLITE_EMPTY       16   /* (Internal Only) Database table is empty */
    +#define SQLITE_SCHEMA      17   /* The database schema changed */
    +#define SQLITE_TOOBIG      18   /* Too much data for one row of a table */
    +#define SQLITE_CONSTRAINT  19   /* Abort due to contraint violation */
    +#define SQLITE_MISMATCH    20   /* Data type mismatch */
    +#define SQLITE_MISUSE      21   /* Library used incorrectly */
    +#define SQLITE_NOLFS       22   /* Uses OS features not supported on host */
    +#define SQLITE_AUTH        23   /* Authorization denied */
    +#define SQLITE_ROW         100  /* sqlite_step() has another row ready */
    +#define SQLITE_DONE        101  /* sqlite_step() has finished executing */
    +
    + +

    +The meanings of these various return values are as follows: +

    + +
    +
    +
    SQLITE_OK
    +

    This value is returned if everything worked and there were no errors. +

    +
    SQLITE_INTERNAL
    +

    This value indicates that an internal consistency check within +the SQLite library failed. This can only happen if there is a bug in +the SQLite library. If you ever get an SQLITE_INTERNAL reply from +an sqlite_exec call, please report the problem on the SQLite +mailing list. +

    +
    SQLITE_ERROR
    +

    This return value indicates that there was an error in the SQL +that was passed into the sqlite_exec. +

    +
    SQLITE_PERM
    +

    This return value says that the access permissions on the database +file are such that the file cannot be opened. +

    +
    SQLITE_ABORT
    +

    This value is returned if the callback function returns non-zero. +

    +
    SQLITE_BUSY
    +

    This return code indicates that another program or thread has +the database locked. SQLite allows two or more threads to read the +database at the same time, but only one thread can have the database +open for writing at the same time. Locking in SQLite is on the +entire database.

    +

    +
    SQLITE_LOCKED
    +

    This return code is similar to SQLITE_BUSY in that it indicates +that the database is locked. But the source of the lock is a recursive +call to sqlite_exec. This return can only occur if you attempt +to invoke sqlite_exec from within a callback routine of a query +from a prior invocation of sqlite_exec. Recursive calls to +sqlite_exec are allowed as long as they do +not attempt to write the same table. +

    +
    SQLITE_NOMEM
    +

    This value is returned if a call to malloc fails. +

    +
    SQLITE_READONLY
    +

    This return code indicates that an attempt was made to write to +a database file that is opened for reading only. +

    +
    SQLITE_INTERRUPT
    +

    This value is returned if a call to sqlite_interrupt +interrupts a database operation in progress. +

    +
    SQLITE_IOERR
    +

    This value is returned if the operating system informs SQLite +that it is unable to perform some disk I/O operation. This could mean +that there is no more space left on the disk. +

    +
    SQLITE_CORRUPT
    +

    This value is returned if SQLite detects that the database it is +working on has become corrupted. Corruption might occur due to a rogue +process writing to the database file or it might happen due to an +perviously undetected logic error in of SQLite. This value is also +returned if a disk I/O error occurs in such a way that SQLite is forced +to leave the database file in a corrupted state. The latter should only +happen due to a hardware or operating system malfunction. +

    +
    SQLITE_FULL
    +

    This value is returned if an insertion failed because there is +no space left on the disk, or the database is too big to hold any +more information. The latter case should only occur for databases +that are larger than 2GB in size. +

    +
    SQLITE_CANTOPEN
    +

    This value is returned if the database file could not be opened +for some reason. +

    +
    SQLITE_PROTOCOL
    +

    This value is returned if some other process is messing with +file locks and has violated the file locking protocol that SQLite uses +on its rollback journal files. +

    +
    SQLITE_SCHEMA
    +

    When the database first opened, SQLite reads the database schema +into memory and uses that schema to parse new SQL statements. If another +process changes the schema, the command currently being processed will +abort because the virtual machine code generated assumed the old +schema. This is the return code for such cases. Retrying the +command usually will clear the problem. +

    +
    SQLITE_TOOBIG
    +

    SQLite will not store more than about 1 megabyte of data in a single +row of a single table. If you attempt to store more than 1 megabyte +in a single row, this is the return code you get. +

    +
    SQLITE_CONSTRAINT
    +

    This constant is returned if the SQL statement would have violated +a database constraint. +

    +
    SQLITE_MISMATCH
    +

    This error occurs when there is an attempt to insert non-integer +data into a column labeled INTEGER PRIMARY KEY. For most columns, SQLite +ignores the data type and allows any kind of data to be stored. But +an INTEGER PRIMARY KEY column is only allowed to store integer data. +

    +
    SQLITE_MISUSE
    +

    This error might occur if one or more of the SQLite API routines +is used incorrectly. Examples of incorrect usage include calling +sqlite_exec after the database has been closed using +sqlite_close or +calling sqlite_exec with the same +database pointer simultaneously from two separate threads. +

    +
    SQLITE_NOLFS
    +

    This error means that you have attempts to create or access a file +database file that is larger that 2GB on a legacy Unix machine that +lacks large file support. +

    +
    SQLITE_AUTH
    +

    This error indicates that the authorizer callback +has disallowed the SQL you are attempting to execute. +

    +
    SQLITE_ROW
    +

    This is one of the return codes from the +sqlite_step routine which is part of the non-callback API. +It indicates that another row of result data is available. +

    +
    SQLITE_DONE
    +

    This is one of the return codes from the +sqlite_step routine which is part of the non-callback API. +It indicates that the SQL statement has been completely executed and +the sqlite_finalize routine is ready to be called. +

    +
    +
    + +

    2.0 Accessing Data Without Using A Callback Function

    + +

    +The sqlite_exec routine described above used to be the only +way to retrieve data from an SQLite database. But many programmers found +it inconvenient to use a callback function to obtain results. So beginning +with SQLite version 2.7.7, a second access interface is available that +does not use callbacks. +

    + +

    +The new interface uses three separate functions to replace the single +sqlite_exec function. +

    + +
    +typedef struct sqlite_vm sqlite_vm;
    +
    +int sqlite_compile(
    +  sqlite *db,              /* The open database */
    +  const char *zSql,        /* SQL statement to be compiled */
    +  const char **pzTail,     /* OUT: uncompiled tail of zSql */
    +  sqlite_vm **ppVm,        /* OUT: the virtual machine to execute zSql */
    +  char **pzErrmsg          /* OUT: Error message. */
    +);
    +
    +int sqlite_step(
    +  sqlite_vm *pVm,          /* The virtual machine to execute */
    +  int *pN,                 /* OUT: Number of columns in result */
    +  const char ***pazValue,  /* OUT: Column data */
    +  const char ***pazColName /* OUT: Column names and datatypes */
    +);
    +
    +int sqlite_finalize(
    +  sqlite_vm *pVm,          /* The virtual machine to be finalized */
    +  char **pzErrMsg          /* OUT: Error message */
    +);
    +
    + +

    +The strategy is to compile a single SQL statement using +sqlite_compile then invoke sqlite_step multiple times, +once for each row of output, and finally call sqlite_finalize +to clean up after the SQL has finished execution. +

    + +

    2.1 Compiling An SQL Statement Into A Virtual Machine

    + +

    +The sqlite_compile "compiles" a single SQL statement (specified +by the second parameter) and generates a virtual machine that is able +to execute that statement. +As with must interface routines, the first parameter must be a pointer +to an sqlite structure that was obtained from a prior call to +sqlite_open. + +

    +A pointer to the virtual machine is stored in a pointer which is passed +in as the 4th parameter. +Space to hold the virtual machine is dynamically allocated. To avoid +a memory leak, the calling function must invoke +sqlite_finalize on the virtual machine after it has finished +with it. +The 4th parameter may be set to NULL if an error is encountered during +compilation. +

    + +

    +If any errors are encountered during compilation, an error message is +written into memory obtained from malloc and the 5th parameter +is made to point to that memory. If the 5th parameter is NULL, then +no error message is generated. If the 5th parameter is not NULL, then +the calling function should dispose of the memory containing the error +message by calling sqlite_freemem. +

    + +

    +If the 2nd parameter actually contains two or more statements of SQL, +only the first statement is compiled. (This is different from the +behavior of sqlite_exec which executes all SQL statements +in its input string.) The 3rd parameter to sqlite_compile +is made to point to the first character beyond the end of the first +statement of SQL in the input. If the 2nd parameter contains only +a single SQL statement, then the 3rd parameter will be made to point +to the '\000' terminator at the end of the 2nd parameter. +

    + +

    +On success, sqlite_compile returns SQLITE_OK. +Otherwise and error code is returned. +

    + +

    2.2 Step-By-Step Execution Of An SQL Statement

    + +

    +After a virtual machine has been generated using sqlite_compile +it is executed by one or more calls to sqlite_step. Each +invocation of sqlite_step, except the last one, +returns a single row of the result. +The number of columns in the result is stored in the integer that +the 2nd parameter points to. +The pointer specified by the 3rd parameter is made to point +to an array of pointers to column values. +The pointer in the 4th parameter is made to point to an array +of pointers to column names and datatypes. +The 2nd through 4th parameters to sqlite_step convey the +same information as the 2nd through 4th parameters of the +callback routine when using +the sqlite_exec interface. Except, with sqlite_step +the column datatype information is always included in the in the +4th parameter regardless of whether or not the +SHOW_DATATYPES pragma +is on or off. +

    + +

    +Each invocation of sqlite_step returns an integer code that +indicates what happened during that step. This code may be +SQLITE_BUSY, SQLITE_ROW, SQLITE_DONE, SQLITE_ERROR, or +SQLITE_MISUSE. +

    + +

    +If the virtual machine is unable to open the database file because +it is locked by another thread or process, sqlite_step +will return SQLITE_BUSY. The calling function should do some other +activity, or sleep, for a short amount of time to give the lock a +chance to clear, then invoke sqlite_step again. This can +be repeated as many times as desired. +

    + +

    +Whenever another row of result data is available, +sqlite_step will return SQLITE_ROW. The row data is +stored in an array of pointers to strings and the 2nd parameter +is made to point to this array. +

    + +

    +When all processing is complete, sqlite_step will return +either SQLITE_DONE or SQLITE_ERROR. SQLITE_DONE indicates that the +statement completed successfully and SQLITE_ERROR indicates that there +was a run-time error. (The details of the error are obtained from +sqlite_finalize.) It is a misuse of the library to attempt +to call sqlite_step again after it has returned SQLITE_DONE +or SQLITE_ERROR. +

    + +

    +When sqlite_step returns SQLITE_DONE or SQLITE_ERROR, +the *pN and *pazColName values are set to the number of columns +in the result set and to the names of the columns, just as they +are for an SQLITE_ROW return. This allows the calling code to +find the number of result columns and the column names and datatypes +even if the result set is empty. The *pazValue parameter is always +set to NULL when the return codes is SQLITE_DONE or SQLITE_ERROR. +If the SQL being executed is a statement that does not +return a result (such as an INSERT or an UPDATE) then *pN will +be set to zero and *pazColName will be set to NULL. +

    + +

    +If you abuse the library by trying to call sqlite_step +inappropriately it will attempt return SQLITE_MISUSE. +This can happen if you call sqlite_step() on the same virtual machine +at the same +time from two or more threads or if you call sqlite_step() +again after it returned SQLITE_DONE or SQLITE_ERROR or if you +pass in an invalid virtual machine pointer to sqlite_step(). +You should not depend on the SQLITE_MISUSE return code to indicate +an error. It is possible that a misuse of the interface will go +undetected and result in a program crash. The SQLITE_MISUSE is +intended as a debugging aid only - to help you detect incorrect +usage prior to a mishap. The misuse detection logic is not guaranteed +to work in every case. +

    + +

    2.3 Deleting A Virtual Machine

    + +

    +Every virtual machine that sqlite_compile creates should +eventually be handed to sqlite_finalize. The sqlite_finalize() +procedure deallocates the memory and other resources that the virtual +machine uses. Failure to call sqlite_finalize() will result in +resource leaks in your program. +

    + +

    +The sqlite_finalize routine also returns the result code +that indicates success or failure of the SQL operation that the +virtual machine carried out. +The value returned by sqlite_finalize() will be the same as would +have been returned had the same SQL been executed by sqlite_exec. +The error message returned will also be the same. +

    + +

    +It is acceptable to call sqlite_finalize on a virtual machine +before sqlite_step has returned SQLITE_DONE. Doing so has +the effect of interrupting the operation in progress. Partially completed +changes will be rolled back and the database will be restored to its +original state (unless an alternative recovery algorithm is selected using +an ON CONFLICT clause in the SQL being executed.) The effect is the +same as if a callback function of sqlite_exec had returned +non-zero. +

    + +

    +It is also acceptable to call sqlite_finalize on a virtual machine +that has never been passed to sqlite_step even once. +

    + +

    3.0 The Extended API

    + +

    Only the three core routines described in section 1.0 are required to use +SQLite. But there are many other functions that provide +useful interfaces. These extended routines are as follows: +

    + +
    +int sqlite_last_insert_rowid(sqlite*);
    +
    +int sqlite_changes(sqlite*);
    +
    +int sqlite_get_table(
    +  sqlite*,
    +  char *sql,
    +  char ***result,
    +  int *nrow,
    +  int *ncolumn,
    +  char **errmsg
    +);
    +
    +void sqlite_free_table(char**);
    +
    +void sqlite_interrupt(sqlite*);
    +
    +int sqlite_complete(const char *sql);
    +
    +void sqlite_busy_handler(sqlite*, int (*)(void*,const char*,int), void*);
    +
    +void sqlite_busy_timeout(sqlite*, int ms);
    +
    +const char sqlite_version[];
    +
    +const char sqlite_encoding[];
    +
    +int sqlite_exec_printf(
    +  sqlite*,
    +  char *sql,
    +  int (*)(void*,int,char**,char**),
    +  void*,
    +  char **errmsg,
    +  ...
    +);
    +
    +int sqlite_exec_vprintf(
    +  sqlite*,
    +  char *sql,
    +  int (*)(void*,int,char**,char**),
    +  void*,
    +  char **errmsg,
    +  va_list
    +);
    +
    +int sqlite_get_table_printf(
    +  sqlite*,
    +  char *sql,
    +  char ***result,
    +  int *nrow,
    +  int *ncolumn,
    +  char **errmsg,
    +  ...
    +);
    +
    +int sqlite_get_table_vprintf(
    +  sqlite*,
    +  char *sql,
    +  char ***result,
    +  int *nrow,
    +  int *ncolumn,
    +  char **errmsg,
    +  va_list
    +);
    +
    +char *sqlite_mprintf(const char *zFormat, ...);
    +
    +char *sqlite_vmprintf(const char *zFormat, va_list);
    +
    +void sqlite_freemem(char*);
    +
    +void sqlite_progress_handler(sqlite*, int, int (*)(void*), void*);
    +
    +
    + +

    All of the above definitions are included in the "sqlite.h" +header file that comes in the source tree.

    + +

    3.1 The ROWID of the most recent insert

    + +

    Every row of an SQLite table has a unique integer key. If the +table has a column labeled INTEGER PRIMARY KEY, then that column +serves as the key. If there is no INTEGER PRIMARY KEY column then +the key is a unique integer. The key for a row can be accessed in +a SELECT statement or used in a WHERE or ORDER BY clause using any +of the names "ROWID", "OID", or "_ROWID_".

    + +

    When you do an insert into a table that does not have an INTEGER PRIMARY +KEY column, or if the table does have an INTEGER PRIMARY KEY but the value +for that column is not specified in the VALUES clause of the insert, then +the key is automatically generated. You can find the value of the key +for the most recent INSERT statement using the +sqlite_last_insert_rowid API function.

    + +

    3.2 The number of rows that changed

    + +

    The sqlite_changes API function returns the number of rows +that have been inserted, deleted, or modified since the database was +last quiescent. A "quiescent" database is one in which there are +no outstanding calls to sqlite_exec and no VMs created by +sqlite_compile that have not been finalized by sqlite_finalize. +In common usage, sqlite_changes returns the number +of rows inserted, deleted, or modified by the most recent sqlite_exec +call or since the most recent sqlite_compile. But if you have +nested calls to sqlite_exec (that is, if the callback routine +of one sqlite_exec invokes another sqlite_exec) or if +you invoke sqlite_compile to create a new VM while there is +still another VM in existance, then +the meaning of the number returned by sqlite_changes is more +complex. +The number reported includes any changes +that were later undone by a ROLLBACK or ABORT. But rows that are +deleted because of a DROP TABLE are not counted.

    + +

    SQLite implements the command "DELETE FROM table" (without +a WHERE clause) by dropping the table then recreating it. +This is much faster than deleting the elements of the table individually. +But it also means that the value returned from sqlite_changes +will be zero regardless of the number of elements that were originally +in the table. If an accurate count of the number of elements deleted +is necessary, use "DELETE FROM table WHERE 1" instead.

    + +

    3.3 Querying into memory obtained from malloc()

    + +

    The sqlite_get_table function is a wrapper around +sqlite_exec that collects all the information from successive +callbacks and writes it into memory obtained from malloc(). This +is a convenience function that allows the application to get the +entire result of a database query with a single function call.

    + +

    The main result from sqlite_get_table is an array of pointers +to strings. There is one element in this array for each column of +each row in the result. NULL results are represented by a NULL +pointer. In addition to the regular data, there is an added row at the +beginning of the array that contains the name of each column of the +result.

    + +

    As an example, consider the following query:

    + +
    +SELECT employee_name, login, host FROM users WHERE login LIKE 'd%'; +
    + +

    This query will return the name, login and host computer name +for every employee whose login begins with the letter "d". If this +query is submitted to sqlite_get_table the result might +look like this:

    + +
    +nrow = 2
    +ncolumn = 3
    +result[0] = "employee_name"
    +result[1] = "login"
    +result[2] = "host"
    +result[3] = "dummy"
    +result[4] = "No such user"
    +result[5] = 0
    +result[6] = "D. Richard Hipp"
    +result[7] = "drh"
    +result[8] = "zadok" +
    + +

    Notice that the "host" value for the "dummy" record is NULL so +the result[] array contains a NULL pointer at that slot.

    + +

    If the result set of a query is empty, then by default +sqlite_get_table will set nrow to 0 and leave its +result parameter is set to NULL. But if the EMPTY_RESULT_CALLBACKS +pragma is ON then the result parameter is initialized to the names +of the columns only. For example, consider this query which has +an empty result set:

    + +
    +SELECT employee_name, login, host FROM users WHERE employee_name IS NULL; +
    + +

    +The default behavior gives this results: +

    + +
    +nrow = 0
    +ncolumn = 0
    +result = 0
    +
    + +

    +But if the EMPTY_RESULT_CALLBACKS pragma is ON, then the following +is returned: +

    + +
    +nrow = 0
    +ncolumn = 3
    +result[0] = "employee_name"
    +result[1] = "login"
    +result[2] = "host"
    +
    + +

    Memory to hold the information returned by sqlite_get_table +is obtained from malloc(). But the calling function should not try +to free this information directly. Instead, pass the complete table +to sqlite_free_table when the table is no longer needed. +It is safe to call sqlite_free_table with a NULL pointer such +as would be returned if the result set is empty.

    + +

    The sqlite_get_table routine returns the same integer +result code as sqlite_exec.

    + +

    3.4 Interrupting an SQLite operation

    + +

    The sqlite_interrupt function can be called from a +different thread or from a signal handler to cause the current database +operation to exit at its first opportunity. When this happens, +the sqlite_exec routine (or the equivalent) that started +the database operation will return SQLITE_INTERRUPT.

    + +

    3.5 Testing for a complete SQL statement

    + +

    The next interface routine to SQLite is a convenience function used +to test whether or not a string forms a complete SQL statement. +If the sqlite_complete function returns true when its input +is a string, then the argument forms a complete SQL statement. +There are no guarantees that the syntax of that statement is correct, +but we at least know the statement is complete. If sqlite_complete +returns false, then more text is required to complete the SQL statement.

    + +

    For the purpose of the sqlite_complete function, an SQL +statement is complete if it ends in a semicolon.

    + +

    The sqlite command-line utility uses the sqlite_complete +function to know when it needs to call sqlite_exec. After each +line of input is received, sqlite calls sqlite_complete +on all input in its buffer. If sqlite_complete returns true, +then sqlite_exec is called and the input buffer is reset. If +sqlite_complete returns false, then the prompt is changed to +the continuation prompt and another line of text is read and added to +the input buffer.

    + +

    3.6 Library version string

    + +

    The SQLite library exports the string constant named +sqlite_version which contains the version number of the +library. The header file contains a macro SQLITE_VERSION +with the same information. If desired, a program can compare +the SQLITE_VERSION macro against the sqlite_version +string constant to verify that the version number of the +header file and the library match.

    + +

    3.7 Library character encoding

    + +

    By default, SQLite assumes that all data uses a fixed-size +8-bit character (iso8859). But if you give the --enable-utf8 option +to the configure script, then the library assumes UTF-8 variable +sized characters. This makes a difference for the LIKE and GLOB +operators and the LENGTH() and SUBSTR() functions. The static +string sqlite_encoding will be set to either "UTF-8" or +"iso8859" to indicate how the library was compiled. In addition, +the sqlite.h header file will define one of the +macros SQLITE_UTF8 or SQLITE_ISO8859, as appropriate.

    + +

    Note that the character encoding mechanism used by SQLite cannot +be changed at run-time. This is a compile-time option only. The +sqlite_encoding character string just tells you how the library +was compiled.

    + +

    3.8 Changing the library's response to locked files

    + +

    The sqlite_busy_handler procedure can be used to register +a busy callback with an open SQLite database. The busy callback will +be invoked whenever SQLite tries to access a database that is locked. +The callback will typically do some other useful work, or perhaps sleep, +in order to give the lock a chance to clear. If the callback returns +non-zero, then SQLite tries again to access the database and the cycle +repeats. If the callback returns zero, then SQLite aborts the current +operation and returns SQLITE_BUSY.

    + +

    The arguments to sqlite_busy_handler are the opaque +structure returned from sqlite_open, a pointer to the busy +callback function, and a generic pointer that will be passed as +the first argument to the busy callback. When SQLite invokes the +busy callback, it sends it three arguments: the generic pointer +that was passed in as the third argument to sqlite_busy_handler, +the name of the database table or index that the library is trying +to access, and the number of times that the library has attempted to +access the database table or index.

    + +

    For the common case where we want the busy callback to sleep, +the SQLite library provides a convenience routine sqlite_busy_timeout. +The first argument to sqlite_busy_timeout is a pointer to +an open SQLite database and the second argument is a number of milliseconds. +After sqlite_busy_timeout has been executed, the SQLite library +will wait for the lock to clear for at least the number of milliseconds +specified before it returns SQLITE_BUSY. Specifying zero milliseconds for +the timeout restores the default behavior.

    + +

    3.9 Using the _printf() wrapper functions

    + +

    The four utility functions

    + +

    +

      +
    • sqlite_exec_printf()
    • +
    • sqlite_exec_vprintf()
    • +
    • sqlite_get_table_printf()
    • +
    • sqlite_get_table_vprintf()
    • +
    +

    + +

    implement the same query functionality as sqlite_exec +and sqlite_get_table. But instead of taking a complete +SQL statement as their second argument, the four _printf +routines take a printf-style format string. The SQL statement to +be executed is generated from this format string and from whatever +additional arguments are attached to the end of the function call.

    + +

    There are two advantages to using the SQLite printf +functions instead of sprintf. First of all, with the +SQLite printf routines, there is never a danger of overflowing a +static buffer as there is with sprintf. The SQLite +printf routines automatically allocate (and later frees) +as much memory as is +necessary to hold the SQL statements generated.

    + +

    The second advantage the SQLite printf routines have over +sprintf are two new formatting options specifically designed +to support string literals in SQL. Within the format string, +the %q formatting option works very much like %s in that it +reads a null-terminated string from the argument list and inserts +it into the result. But %q translates the inserted string by +making two copies of every single-quote (') character in the +substituted string. This has the effect of escaping the end-of-string +meaning of single-quote within a string literal. The %Q formatting +option works similar; it translates the single-quotes like %q and +additionally encloses the resulting string in single-quotes. +If the argument for the %Q formatting options is a NULL pointer, +the resulting string is NULL without single quotes. +

    + +

    Consider an example. Suppose you are trying to insert a string +value into a database table where the string value was obtained from +user input. Suppose the string to be inserted is stored in a variable +named zString. The code to do the insertion might look like this:

    + +
    +sqlite_exec_printf(db,
    +  "INSERT INTO table1 VALUES('%s')",
    +  0, 0, 0, zString);
    +
    + +

    If the zString variable holds text like "Hello", then this statement +will work just fine. But suppose the user enters a string like +"Hi y'all!". The SQL statement generated reads as follows: + +

    +INSERT INTO table1 VALUES('Hi y'all')
    +
    + +

    This is not valid SQL because of the apostrophy in the word "y'all". +But if the %q formatting option is used instead of %s, like this:

    + +
    +sqlite_exec_printf(db,
    +  "INSERT INTO table1 VALUES('%q')",
    +  0, 0, 0, zString);
    +
    + +

    Then the generated SQL will look like the following:

    + +
    +INSERT INTO table1 VALUES('Hi y''all')
    +
    + +

    Here the apostrophy has been escaped and the SQL statement is well-formed. +When generating SQL on-the-fly from data that might contain a +single-quote character ('), it is always a good idea to use the +SQLite printf routines and the %q formatting option instead of sprintf. +

    + +

    If the %Q formatting option is used instead of %q, like this:

    + +
    +sqlite_exec_printf(db,
    +  "INSERT INTO table1 VALUES(%Q)",
    +  0, 0, 0, zString);
    +
    + +

    Then the generated SQL will look like the following:

    + +
    +INSERT INTO table1 VALUES('Hi y''all')
    +
    + +

    If the value of the zString variable is NULL, the generated SQL +will look like the following:

    + +
    +INSERT INTO table1 VALUES(NULL)
    +
    + +

    All of the _printf() routines above are built around the following +two functions:

    + +
    +char *sqlite_mprintf(const char *zFormat, ...);
    +char *sqlite_vmprintf(const char *zFormat, va_list);
    +
    + +

    The sqlite_mprintf() routine works like the the standard library +sprintf() except that it writes its results into memory obtained +from malloc() and returns a pointer to the malloced buffer. +sqlite_mprintf() also understands the %q and %Q extensions described +above. The sqlite_vmprintf() is a varargs version of the same +routine. The string pointer that these routines return should be freed +by passing it to sqlite_freemem(). +

    + +

    3.10 Performing background jobs during large queries

    + +

    The sqlite_progress_handler() routine can be used to register a +callback routine with an SQLite database to be invoked periodically during long +running calls to sqlite_exec(), sqlite_step() and the various +wrapper functions. +

    + +

    The callback is invoked every N virtual machine operations, where N is +supplied as the second argument to sqlite_progress_handler(). The third +and fourth arguments to sqlite_progress_handler() are a pointer to the +routine to be invoked and a void pointer to be passed as the first argument to +it. +

    + +

    The time taken to execute each virtual machine operation can vary based on +many factors. A typical value for a 1 GHz PC is between half and three million +per second but may be much higher or lower, depending on the query. As such it +is difficult to schedule background operations based on virtual machine +operations. Instead, it is recommended that a callback be scheduled relatively +frequently (say every 1000 instructions) and external timer routines used to +determine whether or not background jobs need to be run. +

    + + +

    4.0 Adding New SQL Functions

    + +

    Beginning with version 2.4.0, SQLite allows the SQL language to be +extended with new functions implemented as C code. The following interface +is used: +

    + +
    +typedef struct sqlite_func sqlite_func;
    +
    +int sqlite_create_function(
    +  sqlite *db,
    +  const char *zName,
    +  int nArg,
    +  void (*xFunc)(sqlite_func*,int,const char**),
    +  void *pUserData
    +);
    +int sqlite_create_aggregate(
    +  sqlite *db,
    +  const char *zName,
    +  int nArg,
    +  void (*xStep)(sqlite_func*,int,const char**),
    +  void (*xFinalize)(sqlite_func*),
    +  void *pUserData
    +);
    +
    +char *sqlite_set_result_string(sqlite_func*,const char*,int);
    +void sqlite_set_result_int(sqlite_func*,int);
    +void sqlite_set_result_double(sqlite_func*,double);
    +void sqlite_set_result_error(sqlite_func*,const char*,int);
    +
    +void *sqlite_user_data(sqlite_func*);
    +void *sqlite_aggregate_context(sqlite_func*, int nBytes);
    +int sqlite_aggregate_count(sqlite_func*);
    +
    + +

    +The sqlite_create_function() interface is used to create +regular functions and sqlite_create_aggregate() is used to +create new aggregate functions. In both cases, the db +parameter is an open SQLite database on which the functions should +be registered, zName is the name of the new function, +nArg is the number of arguments, and pUserData is +a pointer which is passed through unchanged to the C implementation +of the function. Both routines return 0 on success and non-zero +if there are any errors. +

    + +

    +The length of a function name may not exceed 255 characters. +Any attempt to create a function whose name exceeds 255 characters +in length will result in an error. +

    + +

    +For regular functions, the xFunc callback is invoked once +for each function call. The implementation of xFunc should call +one of the sqlite_set_result_... interfaces to return its +result. The sqlite_user_data() routine can be used to +retrieve the pUserData pointer that was passed in when the +function was registered. +

    + +

    +For aggregate functions, the xStep callback is invoked once +for each row in the result and then xFinalize is invoked at the +end to compute a final answer. The xStep routine can use the +sqlite_aggregate_context() interface to allocate memory that +will be unique to that particular instance of the SQL function. +This memory will be automatically deleted after xFinalize is called. +The sqlite_aggregate_count() routine can be used to find out +how many rows of data were passed to the aggregate. The xFinalize +callback should invoke one of the sqlite_set_result_... +interfaces to set the final result of the aggregate. +

    + +

    +SQLite now implements all of its built-in functions using this +interface. For additional information and examples on how to create +new SQL functions, review the SQLite source code in the file +func.c. +

    + +

    5.0 Multi-Threading And SQLite

    + +

    +If SQLite is compiled with the THREADSAFE preprocessor macro set to 1, +then it is safe to use SQLite from two or more threads of the same process +at the same time. But each thread should have its own sqlite* +pointer returned from sqlite_open. It is never safe for two +or more threads to access the same sqlite* pointer at the same time. +

    + +

    +In precompiled SQLite libraries available on the website, the Unix +versions are compiled with THREADSAFE turned off but the windows +versions are compiled with THREADSAFE turned on. If you need something +different that this you will have to recompile. +

    + +

    +Under Unix, an sqlite* pointer should not be carried across a +fork() system call into the child process. The child process +should open its own copy of the database after the fork(). +

    + +

    6.0 Usage Examples

    + +

    For examples of how the SQLite C/C++ interface can be used, +refer to the source code for the sqlite program in the +file src/shell.c of the source tree. +Additional information about sqlite is available at +sqlite.html. +See also the sources to the Tcl interface for SQLite in +the source file src/tclsqlite.c.

    ADDED pages/capi3.in Index: pages/capi3.in ================================================================== --- /dev/null +++ pages/capi3.in @@ -0,0 +1,515 @@ +C/C++ Interface For SQLite Version 3 + + +proc AddHyperlinks {txt} { + regsub -all {([^:alnum:>])(sqlite3_\w+)(\([^\)]*\))} $txt \ + {\1\2\3} t2 + puts $t2 +} + +AddHyperlinks { +

    C/C++ Interface For SQLite Version 3

    + +

    1.0 Overview

    + +

    +SQLite version 3.0 is a new version of SQLite, derived from +the SQLite 2.8.13 code base, but with an incompatible file format +and API. +SQLite version 3.0 was created to answer demand for the following features: +

    + +
      +
    • Support for UTF-16.
    • +
    • User-definable text collating sequences.
    • +
    • The ability to store BLOBs in indexed columns.
    • +
    + +

    +It was necessary to move to version 3.0 to implement these features because +each requires incompatible changes to the database file format. Other +incompatible changes, such as a cleanup of the API, were introduced at the +same time under the theory that it is best to get your incompatible changes +out of the way all at once. +

    + +

    +The API for version 3.0 is similar to the version 2.X API, +but with some important changes. Most noticeably, the "sqlite_" +prefix that occurs on the beginning of all API functions and data +structures are changed to "sqlite3_". +This avoids confusion between the two APIs and allows linking against both +SQLite 2.X and SQLite 3.0 at the same time. +

    + +

    +There is no agreement on what the C datatype for a UTF-16 +string should be. Therefore, SQLite uses a generic type of void* +to refer to UTF-16 strings. Client software can cast the void* +to whatever datatype is appropriate for their system. +

    + +

    2.0 C/C++ Interface

    + +

    +The API for SQLite 3.0 includes 83 separate functions in addition +to several data structures and #defines. (A complete +API reference is provided as a separate document.) +Fortunately, the interface is not nearly as complex as its size implies. +Simple programs can still make do with only 3 functions: +sqlite3_open(), +sqlite3_exec(), and +sqlite3_close(). +More control over the execution of the database engine is provided +using +sqlite3_prepare() +to compile an SQLite statement into byte code and +sqlite3_step() +to execute that bytecode. +A family of routines with names beginning with +sqlite3_column_ +is used to extract information about the result set of a query. +Many interface functions come in pairs, with both a UTF-8 and +UTF-16 version. And there is a collection of routines +used to implement user-defined SQL functions and user-defined +text collating sequences. +

    + + +

    2.1 Opening and closing a database

    + +
    +   typedef struct sqlite3 sqlite3;
    +   int sqlite3_open(const char*, sqlite3**);
    +   int sqlite3_open16(const void*, sqlite3**);
    +   int sqlite3_close(sqlite3*);
    +   const char *sqlite3_errmsg(sqlite3*);
    +   const void *sqlite3_errmsg16(sqlite3*);
    +   int sqlite3_errcode(sqlite3*);
    +
    + +

    +The sqlite3_open() routine returns an integer error code rather than +a pointer to the sqlite3 structure as the version 2 interface did. +The difference between sqlite3_open() +and sqlite3_open16() is that sqlite3_open16() takes UTF-16 (in host native +byte order) for the name of the database file. If a new database file +needs to be created, then sqlite3_open16() sets the internal text +representation to UTF-16 whereas sqlite3_open() sets the text +representation to UTF-8. +

    + +

    +The opening and/or creating of the database file is deferred until the +file is actually needed. This allows options and parameters, such +as the native text representation and default page size, to be +set using PRAGMA statements. +

    + +

    +The sqlite3_errcode() routine returns a result code for the most +recent major API call. sqlite3_errmsg() returns an English-language +text error message for the most recent error. The error message is +represented in UTF-8 and will be ephemeral - it could disappear on +the next call to any SQLite API function. sqlite3_errmsg16() works like +sqlite3_errmsg() except that it returns the error message represented +as UTF-16 in host native byte order. +

    + +

    +The error codes for SQLite version 3 are unchanged from version 2. +They are as follows: +

    + +
    +#define SQLITE_OK           0   /* Successful result */
    +#define SQLITE_ERROR        1   /* SQL error or missing database */
    +#define SQLITE_INTERNAL     2   /* An internal logic error in SQLite */
    +#define SQLITE_PERM         3   /* Access permission denied */
    +#define SQLITE_ABORT        4   /* Callback routine requested an abort */
    +#define SQLITE_BUSY         5   /* The database file is locked */
    +#define SQLITE_LOCKED       6   /* A table in the database is locked */
    +#define SQLITE_NOMEM        7   /* A malloc() failed */
    +#define SQLITE_READONLY     8   /* Attempt to write a readonly database */
    +#define SQLITE_INTERRUPT    9   /* Operation terminated by sqlite_interrupt() */
    +#define SQLITE_IOERR       10   /* Some kind of disk I/O error occurred */
    +#define SQLITE_CORRUPT     11   /* The database disk image is malformed */
    +#define SQLITE_NOTFOUND    12   /* (Internal Only) Table or record not found */
    +#define SQLITE_FULL        13   /* Insertion failed because database is full */
    +#define SQLITE_CANTOPEN    14   /* Unable to open the database file */
    +#define SQLITE_PROTOCOL    15   /* Database lock protocol error */
    +#define SQLITE_EMPTY       16   /* (Internal Only) Database table is empty */
    +#define SQLITE_SCHEMA      17   /* The database schema changed */
    +#define SQLITE_TOOBIG      18   /* Too much data for one row of a table */
    +#define SQLITE_CONSTRAINT  19   /* Abort due to contraint violation */
    +#define SQLITE_MISMATCH    20   /* Data type mismatch */
    +#define SQLITE_MISUSE      21   /* Library used incorrectly */
    +#define SQLITE_NOLFS       22   /* Uses OS features not supported on host */
    +#define SQLITE_AUTH        23   /* Authorization denied */
    +#define SQLITE_ROW         100  /* sqlite_step() has another row ready */
    +#define SQLITE_DONE        101  /* sqlite_step() has finished executing */
    +
    + +

    2.2 Executing SQL statements

    + +
    +   typedef int (*sqlite_callback)(void*,int,char**, char**);
    +   int sqlite3_exec(sqlite3*, const char *sql, sqlite_callback, void*, char**);
    +
    + +

    +The sqlite3_exec function works much as it did in SQLite version 2. +Zero or more SQL statements specified in the second parameter are compiled +and executed. Query results are returned to a callback routine. +See the API reference for additional +information. +

    + +

    +In SQLite version 3, the sqlite3_exec routine is just a wrapper around +calls to the prepared statement interface. +

    + +
    +   typedef struct sqlite3_stmt sqlite3_stmt;
    +   int sqlite3_prepare(sqlite3*, const char*, int, sqlite3_stmt**, const char**);
    +   int sqlite3_prepare16(sqlite3*, const void*, int, sqlite3_stmt**, const void**);
    +   int sqlite3_finalize(sqlite3_stmt*);
    +   int sqlite3_reset(sqlite3_stmt*);
    +
    + +

    +The sqlite3_prepare interface compiles a single SQL statement into byte code +for later execution. This interface is now the preferred way of accessing +the database. +

    + +

    +The SQL statement is a UTF-8 string for sqlite3_prepare(). +The sqlite3_prepare16() works the same way except +that it expects a UTF-16 string as SQL input. +Only the first SQL statement in the input string is compiled. +The fourth parameter is filled in with a pointer to the next (uncompiled) +SQLite statement in the input string, if any. +The sqlite3_finalize() routine deallocates a prepared SQL statement. +All prepared statements must be finalized before the database can be +closed. +The sqlite3_reset() routine resets a prepared SQL statement so that it +can be executed again. +

    + +

    +The SQL statement may contain tokens of the form "?" or "?nnn" or ":aaa" +where "nnn" is an integer and "aaa" is an identifier. +Such tokens represent unspecified literal values (or "wildcards") +to be filled in later by the +sqlite3_bind interface. +Each wildcard has an associated number which is its sequence in the +statement or the "nnn" in the case of a "?nnn" form. +It is allowed for the same wildcard +to occur more than once in the same SQL statement, in which case +all instance of that wildcard will be filled in with the same value. +Unbound wildcards have a value of NULL. +

    + +
    +   int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
    +   int sqlite3_bind_double(sqlite3_stmt*, int, double);
    +   int sqlite3_bind_int(sqlite3_stmt*, int, int);
    +   int sqlite3_bind_int64(sqlite3_stmt*, int, long long int);
    +   int sqlite3_bind_null(sqlite3_stmt*, int);
    +   int sqlite3_bind_text(sqlite3_stmt*, int, const char*, int n, void(*)(void*));
    +   int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
    +   int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
    +
    + +

    +There is an assortment of sqlite3_bind routines used to assign values +to wildcards in a prepared SQL statement. Unbound wildcards +are interpreted as NULLs. Bindings are not reset by sqlite3_reset(). +But wildcards can be rebound to new values after an sqlite3_reset(). +

    + +

    +After an SQL statement has been prepared (and optionally bound), it +is executed using: +

    + +
    +   int sqlite3_step(sqlite3_stmt*);
    +
    + +

    +The sqlite3_step() routine return SQLITE_ROW if it is returning a single +row of the result set, or SQLITE_DONE if execution has completed, either +normally or due to an error. It might also return SQLITE_BUSY if it is +unable to open the database file. If the return value is SQLITE_ROW, then +the following routines can be used to extract information about that row +of the result set: +

    + +
    +   const void *sqlite3_column_blob(sqlite3_stmt*, int iCol);
    +   int sqlite3_column_bytes(sqlite3_stmt*, int iCol);
    +   int sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
    +   int sqlite3_column_count(sqlite3_stmt*);
    +   const char *sqlite3_column_decltype(sqlite3_stmt *, int iCol);
    +   const void *sqlite3_column_decltype16(sqlite3_stmt *, int iCol);
    +   double sqlite3_column_double(sqlite3_stmt*, int iCol);
    +   int sqlite3_column_int(sqlite3_stmt*, int iCol);
    +   long long int sqlite3_column_int64(sqlite3_stmt*, int iCol);
    +   const char *sqlite3_column_name(sqlite3_stmt*, int iCol);
    +   const void *sqlite3_column_name16(sqlite3_stmt*, int iCol);
    +   const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol);
    +   const void *sqlite3_column_text16(sqlite3_stmt*, int iCol);
    +   int sqlite3_column_type(sqlite3_stmt*, int iCol);
    +
    + +

    +The +sqlite3_column_count() +function returns the number of columns in +the results set. sqlite3_column_count() can be called at any time after +sqlite3_prepare(). +sqlite3_data_count() +works similarly to +sqlite3_column_count() except that it only works following sqlite3_step(). +If the previous call to sqlite3_step() returned SQLITE_DONE or an error code, +then sqlite3_data_count() will return 0 whereas sqlite3_column_count() will +continue to return the number of columns in the result set. +

    + +

    Returned data is examined using the other sqlite3_column_***() functions, +all of which take a column number as their second parameter. Columns are +zero-indexed from left to right. Note that this is different to parameters, +which are indexed starting at one. +

    + +

    +The sqlite3_column_type() function returns the +datatype for the value in the Nth column. The return value is one +of these: +

    + +
    +   #define SQLITE_INTEGER  1
    +   #define SQLITE_FLOAT    2
    +   #define SQLITE_TEXT     3
    +   #define SQLITE_BLOB     4
    +   #define SQLITE_NULL     5
    +
    + +

    +The sqlite3_column_decltype() routine returns text which is the +declared type of the column in the CREATE TABLE statement. For an +expression, the return type is an empty string. sqlite3_column_name() +returns the name of the Nth column. sqlite3_column_bytes() returns +the number of bytes in a column that has type BLOB or the number of bytes +in a TEXT string with UTF-8 encoding. sqlite3_column_bytes16() returns +the same value for BLOBs but for TEXT strings returns the number of bytes +in a UTF-16 encoding. +sqlite3_column_blob() return BLOB data. +sqlite3_column_text() return TEXT data as UTF-8. +sqlite3_column_text16() return TEXT data as UTF-16. +sqlite3_column_int() return INTEGER data in the host machines native +integer format. +sqlite3_column_int64() returns 64-bit INTEGER data. +Finally, sqlite3_column_double() return floating point data. +

    + +

    +It is not necessary to retrieve data in the format specify by +sqlite3_column_type(). If a different format is requested, the data +is converted automatically. +

    + +

    +Data format conversions can invalidate the pointer returned by +prior calls to sqlite3_column_blob(), sqlite3_column_text(), and/or +sqlite3_column_text16(). Pointers might be invalided in the following +cases: +

    +
      +
    • +The initial content is a BLOB and sqlite3_column_text() +or sqlite3_column_text16() +is called. A zero-terminator might need to be added to the string. +

    • +
    • +The initial content is UTF-8 text and sqlite3_column_bytes16() or +sqlite3_column_text16() is called. The content must be converted to UTF-16. +

    • +
    • +The initial content is UTF-16 text and sqlite3_column_bytes() or +sqlite3_column_text() is called. The content must be converted to UTF-8. +

    • +
    +

    +Note that conversions between UTF-16be and UTF-16le +are always done in place and do +not invalidate a prior pointer, though of course the content of the buffer +that the prior pointer points to will have been modified. Other kinds +of conversion are done in place when it is possible, but sometime it is +not possible and in those cases prior pointers are invalidated. +

    + +

    +The safest and easiest to remember policy is this: assume that any +result from +

      +
    • sqlite3_column_blob(),
    • +
    • sqlite3_column_text(), or
    • +
    • sqlite3_column_text16()
    • +
    +is invalided by subsequent calls to +
      +
    • sqlite3_column_bytes(),
    • +
    • sqlite3_column_bytes16(),
    • +
    • sqlite3_column_text(), or
    • +
    • sqlite3_column_text16().
    • +
    +This means that you should always call sqlite3_column_bytes() or +sqlite3_column_bytes16() before calling sqlite3_column_blob(), +sqlite3_column_text(), or sqlite3_column_text16(). +

    + +

    2.3 User-defined functions

    + +

    +User defined functions can be created using the following routine: +

    + +
    +   typedef struct sqlite3_value sqlite3_value;
    +   int sqlite3_create_function(
    +     sqlite3 *,
    +     const char *zFunctionName,
    +     int nArg,
    +     int eTextRep,
    +     void*,
    +     void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
    +     void (*xStep)(sqlite3_context*,int,sqlite3_value**),
    +     void (*xFinal)(sqlite3_context*)
    +   );
    +   int sqlite3_create_function16(
    +     sqlite3*,
    +     const void *zFunctionName,
    +     int nArg,
    +     int eTextRep,
    +     void*,
    +     void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
    +     void (*xStep)(sqlite3_context*,int,sqlite3_value**),
    +     void (*xFinal)(sqlite3_context*)
    +   );
    +   #define SQLITE_UTF8     1
    +   #define SQLITE_UTF16    2
    +   #define SQLITE_UTF16BE  3
    +   #define SQLITE_UTF16LE  4
    +   #define SQLITE_ANY      5
    +
    + +

    +The nArg parameter specifies the number of arguments to the function. +A value of 0 indicates that any number of arguments is allowed. The +eTextRep parameter specifies what representation text values are expected +to be in for arguments to this function. The value of this parameter should +be one of the parameters defined above. SQLite version 3 allows multiple +implementations of the same function using different text representations. +The database engine chooses the function that minimization the number +of text conversions required. +

    + +

    +Normal functions specify only xFunc and leave xStep and xFinal set to NULL. +Aggregate functions specify xStep and xFinal and leave xFunc set to NULL. +There is no separate sqlite3_create_aggregate() API. +

    + +

    +The function name is specified in UTF-8. A separate sqlite3_create_function16() +API works the same as sqlite_create_function() +except that the function name is specified in UTF-16 host byte order. +

    + +

    +Notice that the parameters to functions are now pointers to sqlite3_value +structures instead of pointers to strings as in SQLite version 2.X. +The following routines are used to extract useful information from these +"values": +

    + +
    +   const void *sqlite3_value_blob(sqlite3_value*);
    +   int sqlite3_value_bytes(sqlite3_value*);
    +   int sqlite3_value_bytes16(sqlite3_value*);
    +   double sqlite3_value_double(sqlite3_value*);
    +   int sqlite3_value_int(sqlite3_value*);
    +   long long int sqlite3_value_int64(sqlite3_value*);
    +   const unsigned char *sqlite3_value_text(sqlite3_value*);
    +   const void *sqlite3_value_text16(sqlite3_value*);
    +   int sqlite3_value_type(sqlite3_value*);
    +
    + +

    +Function implementations use the following APIs to acquire context and +to report results: +

    + +
    +   void *sqlite3_aggregate_context(sqlite3_context*, int nbyte);
    +   void *sqlite3_user_data(sqlite3_context*);
    +   void sqlite3_result_blob(sqlite3_context*, const void*, int n, void(*)(void*));
    +   void sqlite3_result_double(sqlite3_context*, double);
    +   void sqlite3_result_error(sqlite3_context*, const char*, int);
    +   void sqlite3_result_error16(sqlite3_context*, const void*, int);
    +   void sqlite3_result_int(sqlite3_context*, int);
    +   void sqlite3_result_int64(sqlite3_context*, long long int);
    +   void sqlite3_result_null(sqlite3_context*);
    +   void sqlite3_result_text(sqlite3_context*, const char*, int n, void(*)(void*));
    +   void sqlite3_result_text16(sqlite3_context*, const void*, int n, void(*)(void*));
    +   void sqlite3_result_value(sqlite3_context*, sqlite3_value*);
    +   void *sqlite3_get_auxdata(sqlite3_context*, int);
    +   void sqlite3_set_auxdata(sqlite3_context*, int, void*, void (*)(void*));
    +
    + +

    2.4 User-defined collating sequences

    + +

    +The following routines are used to implement user-defined +collating sequences: +

    + +
    +   sqlite3_create_collation(sqlite3*, const char *zName, int eTextRep, void*,
    +      int(*xCompare)(void*,int,const void*,int,const void*));
    +   sqlite3_create_collation16(sqlite3*, const void *zName, int eTextRep, void*,
    +      int(*xCompare)(void*,int,const void*,int,const void*));
    +   sqlite3_collation_needed(sqlite3*, void*, 
    +      void(*)(void*,sqlite3*,int eTextRep,const char*));
    +   sqlite3_collation_needed16(sqlite3*, void*,
    +      void(*)(void*,sqlite3*,int eTextRep,const void*));
    +
    + +

    +The sqlite3_create_collation() function specifies a collating sequence name +and a comparison function to implement that collating sequence. The +comparison function is only used for comparing text values. The eTextRep +parameter is one of SQLITE_UTF8, SQLITE_UTF16LE, SQLITE_UTF16BE, or +SQLITE_ANY to specify which text representation the comparison function works +with. Separate comparison functions can exist for the same collating +sequence for each of the UTF-8, UTF-16LE and UTF-16BE text representations. +The sqlite3_create_collation16() works like sqlite3_create_collation() except +that the collation name is specified in UTF-16 host byte order instead of +in UTF-8. +

    + +

    +The sqlite3_collation_needed() routine registers a callback which the +database engine will invoke if it encounters an unknown collating sequence. +The callback can lookup an appropriate comparison function and invoke +sqlite_3_create_collation() as needed. The fourth parameter to the callback +is the name of the collating sequence in UTF-8. For sqlite3_collation_need16() +the callback sends the collating sequence name in UTF-16 host byte order. +

    +} +
    ADDED pages/capi3ref.in Index: pages/capi3ref.in ================================================================== --- /dev/null +++ pages/capi3ref.in @@ -0,0 +1,168 @@ +C/C++ Interface For SQLite Version 3 + +

    C/C++ Interface For SQLite Version 3

    + + +set in [open sqlite3.h] +set title {} +set type {} +set body {} +set code {} +set phase 0 +set content {} +while {![eof $in]} { + set line [gets $in] + if {$phase==0} { + # Looking for the CAPI3REF: keyword + if {[regexp {^\*\* CAPI3REF: +(.*)} $line all tx]} { + set title $tx + set phase 1 + } + } elseif {$phase==1} { + if {[string range $line 0 1]=="**"} { + set lx [string trim [string range $line 3 end]] + if {[regexp {^CATEGORY: +([a-z]*)} $lx all cx]} { + set type $cx + } elseif {[regexp {^KEYWORDS: +(.*)} $lx all kx]} { + foreach k $kx { + set keyword($k) 1 + } + } else { + append body $lx\n + } + } elseif {[string range $line 0 1]=="*/"} { + set phase 2 + } + } elseif {$phase==2} { + if {$line==""} { + set kwlist [lsort [array names keyword]] + unset -nocomplain keyword + set key $type:$kwlist + lappend content [list $key $title $type $kwlist $body $code] + set title {} + set keywords {} + set type {} + set body {} + set code {} + set phase 0 + } else { + if {[regexp {^#define (SQLITE_[A-Z0-9_]+)} $line all kx]} { + set type constant + set keyword($kx) 1 + } elseif {[regexp {^typedef .* (sqlite[0-9a-z_]+);} $line all kx]} { + set type datatype + set keyword($kx) 1 + } elseif {[regexp {^[a-z].*[ *](sqlite3_[a-z0-9_]+)\(} $line all kx]} { + set type function + set keyword($kx) 1 + } + append code $line\n + } + } +} + +# Output HTML that displays the given list in N columns +# +proc output_list {N lx} { + puts {} + set len [llength $lx] + set n [expr {($len + $N - 1)/$N}] + for {set i 0} {$i<$N} {incr i} { + set start [expr {$i*$n}] + set end [expr {($i+1)*$n}] + puts {} + } + puts {
      } + for {set j $start} {$j<$end} {incr j} { + set entry [lindex $lx $j] + if {$entry!=""} { + foreach {link label} $entry break + puts "
    • $label
    • " + } + } + puts {
    } +} + +# Do a table of contents for objects +# +set objlist {} +foreach c $content { + foreach {key title type keywords body code} $c break + if {$type!="datatype"} continue + set keywords [lsort $keywords] + set k [lindex $keywords 0] + foreach kw $keywords { + lappend objlist [list $k $kw] + } +} +puts {

    Datatypes:

    } +output_list 3 $objlist +puts {
    } + +# Do a table of contents for constants +# +set clist {} +foreach c $content { + foreach {key title type keywords body code} $c break + if {$type!="constant"} continue + set keywords [lsort $keywords] + set k [lindex $keywords 0] + foreach kw $keywords { + lappend clist [list $k $kw] + } +} +puts {

    Constants:

    } +set clist [lsort -index 1 $clist] +output_list 3 $clist +puts {
    } + + +# Do a table of contents for functions +# +set funclist {} +foreach c $content { + foreach {key title type keywords body code} $c break + if {$type!="function"} continue + set keywords [lsort $keywords] + set k [lindex $keywords 0] + foreach kw $keywords { + lappend funclist [list $k $kw] + } +} +puts {

    Functions:

    } +set funclist [lsort -index 1 $funclist] +output_list 3 $funclist +puts {
    } + +# Resolve links +# +proc resolve_links {args} { + set tag [lindex $args 0] + regsub -all {[^a-zA-Z0-9_]} $tag {} tag + set x "" + if {[llength $args]>2} { + append x [lrange $args 2 end] + } else { + append x [lindex $args 0] + } + return $x +} + +# Output all the records +# +foreach c [lsort $content] { + foreach {key title type keywords body code} $c break + foreach k $keywords { + puts "" + } + puts "

    $title

    " + puts "
    "
    +  puts "$code"
    +  puts "
    " + regsub -all "\n\n+" $body {

    \1

    } body + regsub -all {\[}

    $body

    {[resolve_links } body + set body [subst -novar -noback $body] + puts "$body" + puts "
    " +} +
    ADDED pages/changes.in Index: pages/changes.in ================================================================== --- /dev/null +++ pages/changes.in @@ -0,0 +1,1873 @@ +SQLite changes + +

    +This page provides a high-level summary of changes to SQLite. +For more detail, refer the the checkin logs generated by +CVS at + +http://www.sqlite.org/cvstrac/timeline. +

    + + +proc chng {date desc} { + if {[regexp {\(([0-9.]+)\)} $date all vers]} { + set label [string map {. _} $vers] + puts "" + } + puts "
    $date
    " + regsub -all {[Tt]icket #(\d+)} $desc \ + {
    \0} desc + puts "

      $desc

    " + puts "
    " +} + +chng {2007 Nov 05 (3.5.2)} { +
  • Dropped support for the SQLITE_OMIT_MEMORY_ALLOCATION compile-time +option. +
  • Always open files using FILE_FLAG_RANDOM_ACCESS under windows. +
  • The 3rd parameter of the built-in SUBSTR() function is now optional. +
  • Bug fix: do not invoke the authorizer when reparsing the schema after +a schema change. +
  • Added the experimental malloc-free memory allocator in mem3.c. +
  • Virtual machine stores 64-bit integer and floating point constants +in binary instead of text for a performance boost. +
  • Fix a race condition in test_async.c. +
  • Added the ".timer" command to the CLI +} + +chng {2007 Oct 04 (3.5.1)} { +
  • Nota Bene: We are not using terms "alpha" or "beta" on this + release because the code is stable and because if we use those terms, + nobody will upgrade. However, we still reserve the right to make + incompatible changes to the new VFS interface in future releases.
  • + +
  • Fix a bug in the handling of SQLITE_FULL errors that could lead + to database corruption. Ticket #2686. +
  • The test_async.c drive now does full file locking and works correctly + when used simultaneously by multiple processes on the same database. +
  • The CLI ignores whitespace (including comments) at the end of lines +
  • Make sure the query optimizer checks dependences on all terms of + a compound SELECT statement. Ticket #2640. +
  • Add demonstration code showing how to build a VFS for a raw + mass storage without a filesystem. +
  • Added an output buffer size parameter to the xGetTempname() method + of the VFS layer. +
  • Sticky SQLITE_FULL or SQLITE_IOERR errors in the pager are reset + when a new transaction is started. +} + + +chng {2007 Sep 04 (3.5.0) alpha} { +
  • Redesign the OS interface layer. See + 34to35.html for details. + *** Potentially incompatible change *** +
  • The + sqlite3_release_memory(), + + sqlite3_soft_heap_limit(), + and + sqlite3_enable_shared_cache() interfaces now work cross all + threads in the process, not just the single thread in which they + are invoked. + *** Potentially incompatible change *** +
  • Added the + sqlite3_open_v2() + interface. +
  • Reimplemented the memory allocation subsystem and made it + replacable at compile-time. +
  • Created a new mutex subsystem and made it replacable at + compile-time. +
  • The same database connection may now be used simultaneously by + separate threads. +} + + +chng {2007 August 13 (3.4.2)} { +
  • Fix a database corruption bug that might occur if a ROLLBACK command +is executed in auto-vacuum mode +and a very small +soft_heap_limit is set. +Ticket #2565. + +
  • Add the ability to run a full regression test with a small +soft_heap_limit. + +
  • Fix other minor problems with using small soft heap limits. + +
  • Work-around for +GCC bug 32575. + +
  • Improved error detection of misused aggregate functions. + +
  • Improvements to the amalgamation generator script so that all symbols +are prefixed with either SQLITE_PRIVATE or SQLITE_API. +} + +chng {2007 July 20 (3.4.1)} { +
  • Fix a bug in VACUUM that can lead to + + database corruption if two + processes are connected to the database at the same time and one + VACUUMs then the other then modifies the database.
  • +
  • The expression "+column" is now considered the same as "column" + when computing the collating sequence to use on the expression.
  • +
  • In the TCL language interface, + "@variable" instead of "$variable" always binds as a blob.
  • +
  • Added PRAGMA freelist_count + for determining the current size of the freelist.
  • +
  • The + PRAGMA auto_vacuum=incremental setting is now persistent.
  • +
  • Add FD_CLOEXEC to all open files under unix.
  • +
  • Fix a bug in the + min()/max() optimization when applied to + descending indices.
  • +
  • Make sure the TCL language interface works correctly with 64-bit + integers on 64-bit machines.
  • +
  • Allow the value -9223372036854775808 as an integer literal in SQL + statements.
  • +
  • Add the capability of "hidden" columns in virtual tables.
  • +
  • Use the macro SQLITE_PRIVATE (defaulting to "static") on all + internal functions in the amalgamation.
  • +
  • Add pluggable tokenizers and ICU + tokenization support to FTS2
  • +
  • Other minor bug fixes and documentation enhancements
  • +} + +chng {2007 June 18 (3.4.0)} { +
  • Fix a bug that can lead to database corruption if an SQLITE_BUSY error + occurs in the middle of an explicit transaction and that transaction + is later committed. + Ticket #2409. + See the + + CorruptionFollowingBusyError wiki page for details. +
  • Fix a bug that can lead to database corruption if autovacuum mode is + on and a malloc() failure follows a CREATE TABLE or CREATE INDEX statement + which itself follows a cache overflow inside a transaction. See + ticket #2418. +
  • +
  • Added explicit upper bounds on the sizes and + quantities of things SQLite can process. This change might cause + compatibility problems for + applications that use SQLite in the extreme, which is why the current + release is 3.4.0 instead of 3.3.18.
  • +
  • Added support for + Incremental BLOB I/O.
  • +
  • Added the zeroblob API + and the zeroblob() SQL function.
  • +
  • Added support for + Incremental Vacuum.
  • +
  • Added the SQLITE_MIXED_ENDIAN_64BIT_FLOAT compile-time option to suppport + ARM7 processors with goofy endianness.
  • +
  • Removed all instances of sprintf() and strcpy() from the core library.
  • +
  • Added support for + International Components for Unicode (ICU) to the full-text search + extensions. +

    +

      +
    • In the windows OS driver, reacquire a SHARED lock if an attempt to + acquire an EXCLUSIVE lock fails. Ticket #2354
    • +
    • Fix the REPLACE() function so that it returns NULL if the second argument + is an empty string. Ticket #2324.
    • +
    • Document the hazards of type coversions in + sqlite3_column_blob() + and related APIs. Fix unnecessary type conversions. Ticket #2321.
    • +
    • Internationalization of the TRIM() function. Ticket #2323
    • +
    • Use memmove() instead of memcpy() when moving between memory regions + that might overlap. Ticket #2334
    • +
    • Fix an optimizer bug involving subqueries in a compound SELECT that has + both an ORDER BY and a LIMIT clause. Ticket #2339.
    • +
    • Make sure the sqlite3_snprintf() + interface does not zero-terminate the buffer if the buffer size is + less than 1. Ticket #2341
    • +
    • Fix the built-in printf logic so that it prints "NaN" not "Inf" for + floating-point NaNs. Ticket #2345
    • +
    • When converting BLOB to TEXT, use the text encoding of the main database. + Ticket #2349
    • +
    • Keep the full precision of integers (if possible) when casting to + NUMERIC. Ticket #2364
    • +
    • Fix a bug in the handling of UTF16 codepoint 0xE000
    • +
    • Consider explicit collate clauses when matching WHERE constraints + to indices in the query optimizer. Ticket #2391
    • +
    • Fix the query optimizer to correctly handle constant expressions in + the ON clause of a LEFT JOIN. Ticket #2403
    • +
    • Fix the query optimizer to handle rowid comparisions to NULL + correctly. Ticket #2404
    • +
    • Fix many potental segfaults that could be caused by malicious SQL + statements.
    • +} + +chng {2007 April 25 (3.3.17)} { +
    • When the "write_version" value of the database header is larger than + what the library understands, make the database read-only instead of + unreadable.
    • +
    • Other minor bug fixes
    • +} + +chng {2007 April 18 (3.3.16)} { +
    • Fix a bug that caused VACUUM to fail if NULLs appeared in a + UNIQUE column.
    • +
    • Reinstate performance improvements that were added in 3.3.14 + but regressed in 3.3.15.
    • +
    • Fix problems with the handling of ORDER BY expressions on + compound SELECT statements in subqueries.
    • +
    • Fix a potential segfault when destroying locks on WinCE in + a multi-threaded environment.
    • +
    • Documentation updates.
    • +} + +chng {2007 April 9 (3.3.15)} { +
    • Fix a bug introduced in 3.3.14 that caused a rollback of + CREATE TEMP TABLE to leave the database connection wedged.
    • +
    • Fix a bug that caused an extra NULL row to be returned when + a descending query was interrupted by a change to the database.
    • +
    • The FOR EACH STATEMENT clause on a trigger now causes a syntax + error. It used to be silently ignored.
    • +
    • Fix an obscure and relatively harmless problem that might have caused + a resource leak following an I/O error.
    • +
    • Many improvements to the test suite. Test coverage now exceeded 98%
    • +} + +chng {2007 April 2 (3.3.14)} { +
    • Fix a bug + in 3.3.13 that could cause a segfault when the IN operator + is used one one term of a two-column index and the right-hand side of + the IN operator contains a NULL.
    • +
    • Added a new OS interface method for determining the sector size + of underlying media: sqlite3OsSectorSize().
    • +
    • A new algorithm for statements of the form + INSERT INTO table1 SELECT * FROM table2 + is faster and reduces fragmentation. VACUUM uses statements of + this form and thus runs faster and defragments better.
    • +
    • Performance enhancements through reductions in disk I/O: +
        +
      • Do not read the last page of an overflow chain when + deleting the row - just add that page to the freelist.
      • +
      • Do not store pages being deleted in the + rollback journal.
      • +
      • Do not read in the (meaningless) content of + pages extracted from the freelist.
      • +
      • Do not flush the page cache (and thus avoiding + a cache refill) unless another process changes the underlying + database file.
      • +
      • Truncate rather than delete the rollback journal when committing + a transaction in exclusive access mode, or when committing the TEMP + database.
      • +
    • +
    • Added support for exclusive access mode using + + "PRAGMA locking_mode=EXCLUSIVE"
    • +
    • Use heap space instead of stack space for large buffers in the + pager - useful on embedded platforms with stack-space + limitations.
    • +
    • Add a makefile target "sqlite3.c" that builds an amalgamation containing + the core SQLite library C code in a single file.
    • +
    • Get the library working correctly when compiled + with GCC option "-fstrict-aliasing".
    • +
    • Removed the vestigal SQLITE_PROTOCOL error.
    • +
    • Improvements to test coverage, other minor bugs fixed, + memory leaks plugged, + code refactored and/or recommented in places for easier reading.
    • +} + +chng {2007 February 13 (3.3.13)} { +
    • Add a "fragmentation" measurement in the output of sqlite3_analyzer.
    • +
    • Add the COLLATE operator used to explicitly set the collating sequence +used by an expression. This feature is considered experimental pending +additional testing.
    • +
    • Allow up to 64 tables in a join - the old limit was 32.
    • +
    • Added two new experimental functions: +randomBlob() and +hex(). +Their intended use is to facilitate generating +UUIDs. +
    • +
    • Fix a problem where +PRAGMA count_changes was +causing incorrect results for updates on tables with triggers
    • +
    • Fix a bug in the ORDER BY clause optimizer for joins where the +left-most table in the join is constrained by a UNIQUE index.
    • +
    • Fixed a bug in the "copy" method of the TCL interface.
    • +
    • Bug fixes in fts1 and fts2 modules.
    • +} + +chng {2007 January 27 (3.3.12)} { +
    • Fix another bug in the IS NULL optimization that was added in +version 3.3.9.
    • +
    • Fix a assertion fault that occurred on deeply nested views.
    • +
    • Limit the amount of output that +PRAGMA integrity_check +generates.
    • +
    • Minor syntactic changes to support a wider variety of compilers.
    • +} + +chng {2007 January 22 (3.3.11)} { +
    • Fix another bug in the implementation of the new +sqlite3_prepare_v2() API. +We'll get it right eventually...
    • +
    • Fix a bug in the IS NULL optimization that was added in version 3.3.9 - +the bug was causing incorrect results on certain LEFT JOINs that included +in the WHERE clause an IS NULL constraint for the right table of the +LEFT JOIN.
    • +
    • Make AreFileApisANSI() a no-op macro in winCE since winCE does not +support this function.
    • +} + +chng {2007 January 9 (3.3.10)} { +
    • Fix bugs in the implementation of the new +sqlite3_prepare_v2() API +that can lead to segfaults.
    • +
    • Fix 1-second round-off errors in the + +strftime() function
    • +
    • Enhance the windows OS layer to provide detailed error codes
    • +
    • Work around a win2k problem so that SQLite can use single-character +database file names
    • +
    • The +user_version and +schema_version pragmas +correctly set their column names in the result set
    • +
    • Documentation updates
    • +} + +chng {2007 January 4 (3.3.9)} { +
    • Fix bugs in pager.c that could lead to database corruption if two +processes both try to recover a hot journal at the same instant
    • +
    • Added the sqlite3_prepare_v2() +API.
    • +
    • Fixed the ".dump" command in the command-line shell to show +indices, triggers and views again.
    • +
    • Change the table_info pragma so that it returns NULL for the default +value if there is no default value
    • +
    • Support for non-ASCII characters in win95 filenames
    • +
    • Query optimizer enhancements: +
        +
      • Optimizer does a better job of using indices to satisfy ORDER BY +clauses that sort on the integer primary key
      • +
      • Use an index to satisfy an IS NULL operator in the WHERE clause
      • +
      • Fix a bug that was causing the optimizer to miss an OR optimization +opportunity
      • +
      • The optimizer has more freedom to reorder tables in the FROM clause +even in there are LEFT joins.
      • +
      +
    • Extension loading supported added to winCE
    • +
    • Allow constraint names on the DEFAULT clause in a table definition
    • +
    • Added the ".bail" command to the command-line shell
    • +
    • Make CSV (comma separate value) output from the command-line shell +more closely aligned to accepted practice
    • +
    • Experimental FTS2 module added
    • +
    • Use sqlite3_mprintf() instead of strdup() to avoid libc dependencies
    • +
    • VACUUM uses a temporary file in the official TEMP folder, not in the +same directory as the original database
    • +
    • The prefix on temporary filenames on windows is changed from "sqlite" +to "etilqs".
    • +} + +chng {2006 October 9 (3.3.8)} { +
    • Support for full text search using the +FTS1 module +(beta)
    • +
    • Added OS-X locking patches (beta - disabled by default)
    • +
    • Introduce extended error codes and add error codes for various +kinds of I/O errors.
    • +
    • Added support for IF EXISTS on CREATE/DROP TRIGGER/VIEW
    • +
    • Fix the regression test suite so that it works with Tcl8.5
    • +
    • Enhance sqlite3_set_authorizer() to provide notification of calls to + SQL functions.
    • +
    • Added experimental API: sqlite3_auto_extension()
    • +
    • Various minor bug fixes
    • +} + +chng {2006 August 12 (3.3.7)} { +
    • Added support for +virtual tables +(beta)
    • +
    • Added support for + +dynamically loaded extensions (beta)
    • +
    • The +sqlite3_interrupt() +routine can be called for a different thread
    • +
    • Added the MATCH operator.
    • +
    • The default file format is now 1. +} + +chng {2006 June 6 (3.3.6)} { +
    • Plays better with virus scanners on windows
    • +
    • Faster :memory: databases
    • +
    • Fix an obscure segfault in UTF-8 to UTF-16 conversions
    • +
    • Added driver for OS/2
    • +
    • Correct column meta-information returned for aggregate queries
    • +
    • Enhanced output from EXPLAIN QUERY PLAN
    • +
    • LIMIT 0 now works on subqueries
    • +
    • Bug fixes and performance enhancements in the query optimizer
    • +
    • Correctly handle NULL filenames in ATTACH and DETACH
    • +
    • Inproved syntax error messages in the parser
    • +
    • Fix type coercion rules for the IN operator
    • +} + +chng {2006 April 5 (3.3.5)} { +
    • CHECK constraints use conflict resolution algorithms correctly.
    • +
    • The SUM() function throws an error on integer overflow.
    • +
    • Choose the column names in a compound query from the left-most SELECT + instead of the right-most.
    • +
    • The sqlite3_create_collation() function + honors the SQLITE_UTF16_ALIGNED flag.
    • +
    • SQLITE_SECURE_DELETE compile-time option causes deletes to overwrite + old data with zeros.
    • +
    • Detect integer overflow in abs().
    • +
    • The random() function provides 64 bits of randomness instead of + only 32 bits.
    • +
    • Parser detects and reports automaton stack overflow.
    • +
    • Change the round() function to return REAL instead of TEXT.
    • +
    • Allow WHERE clause terms on the left table of a LEFT OUTER JOIN to + contain aggregate subqueries.
    • +
    • Skip over leading spaces in text to numeric conversions.
    • +
    • Various minor bug and documentation typo fixes and + performance enhancements.
    • +} + +chng {2006 February 11 (3.3.4)} { +
    • Fix a blunder in the Unix mutex implementation that can lead to +deadlock on multithreaded systems.
    • +
    • Fix an alignment problem on 64-bit machines
    • +
    • Added the fullfsync pragma.
    • +
    • Fix an optimizer bug that could have caused some unusual LEFT OUTER JOINs +to give incorrect results.
    • +
    • The SUM function detects integer overflow and converts to accumulating +an approximate result using floating point numbers
    • +
    • Host parameter names can begin with '@' for compatibility with SQL Server. +
    • +
    • Other miscellaneous bug fixes
    • +} + +chng {2006 January 31 (3.3.3)} { +
    • Removed support for an ON CONFLICT clause on CREATE INDEX - it never +worked correctly so this should not present any backward compatibility +problems.
    • +
    • Authorizer callback now notified of ALTER TABLE ADD COLUMN commands
    • +
    • After any changes to the TEMP database schema, all prepared statements +are invalidated and must be recreated using a new call to +sqlite3_prepare()
    • +
    • Other minor bug fixes in preparation for the first stable release +of version 3.3
    • +} + +chng {2006 January 24 (3.3.2 beta)} { +
    • Bug fixes and speed improvements. Improved test coverage.
    • +
    • Changes to the OS-layer interface: mutexes must now be recursive.
    • +
    • Discontinue the use of thread-specific data for out-of-memory +exception handling
    • +} + +chng {2006 January 16 (3.3.1 alpha)} { +
    • Countless bug fixes
    • +
    • Speed improvements
    • +
    • Database connections can now be used by multiple threads, not just +the thread in which they were created.
    • +} + +chng {2006 January 10 (3.3.0 alpha)} { +
    • CHECK constraints
    • +
    • IF EXISTS and IF NOT EXISTS clauses on CREATE/DROP TABLE/INDEX.
    • +
    • DESC indices
    • +
    • More efficient encoding of boolean values resulting in smaller database +files
    • +
    • More aggressive SQLITE_OMIT_FLOATING_POINT
    • +
    • Separate INTEGER and REAL affinity
    • +
    • Added a virtual function layer for the OS interface
    • +
    • "exists" method added to the TCL interface
    • +
    • Improved response to out-of-memory errors
    • +
    • Database cache can be optionally shared between connections +in the same thread
    • +
    • Optional READ UNCOMMITTED isolation (instead of the default +isolation level of SERIALIZABLE) and table level locking when +database connections share a common cache.
    • +} + +chng {2005 December 19 (3.2.8)} { +
    • Fix an obscure bug that can cause database corruption under the +following unusual circumstances: A large INSERT or UPDATE statement which +is part of an even larger transaction fails due to a uniqueness contraint +but the containing transaction commits.
    • +} + +chng {2005 December 19 (2.8.17)} { +
    • Fix an obscure bug that can cause database corruption under the +following unusual circumstances: A large INSERT or UPDATE statement which +is part of an even larger transaction fails due to a uniqueness contraint +but the containing transaction commits.
    • +} + +chng {2005 September 24 (3.2.7)} { +
    • GROUP BY now considers NULLs to be equal again, as it should +
    • +
    • Now compiles on Solaris and OpenBSD and other Unix variants +that lack the fdatasync() function
    • +
    • Now compiles on MSVC++6 again
    • +
    • Fix uninitialized variables causing malfunctions for various obscure +queries
    • +
    • Correctly compute a LEFT OUTER JOINs that is constrained on the +left table only
    • +} + +chng {2005 September 17 (3.2.6)} { +
    • Fix a bug that can cause database corruption if a VACUUM (or + autovacuum) fails and is rolled back on a database that is + larger than 1GiB
    • +
    • LIKE optiization now works for columns with COLLATE NOCASE
    • +
    • ORDER BY and GROUP BY now use bounded memory
    • +
    • Added support for COUNT(DISTINCT expr)
    • +
    • Change the way SUM() handles NULL values in order to comply with + the SQL standard
    • +
    • Use fdatasync() instead of fsync() where possible in order to speed + up commits slightly
    • +
    • Use of the CROSS keyword in a join turns off the table reordering + optimization
    • +
    • Added the experimental and undocumented EXPLAIN QUERY PLAN capability
    • +
    • Use the unicode API in windows
    • +} + +chng {2005 August 27 (3.2.5)} { +
    • Fix a bug effecting DELETE and UPDATE statements that changed +more than 40960 rows.
    • +
    • Change the makefile so that it no longer requires GNUmake extensions
    • +
    • Fix the --enable-threadsafe option on the configure script
    • +
    • Fix a code generator bug that occurs when the left-hand side of an IN +operator is constant and the right-hand side is a SELECT statement
    • +
    • The PRAGMA synchronous=off statement now disables syncing of the +master journal file in addition to the normal rollback journals
    • +} + +chng {2005 August 24 (3.2.4)} { +
    • Fix a bug introduced in the previous release +that can cause a segfault while generating code +for complex WHERE clauses.
    • +
    • Allow floating point literals to begin or end with a decimal point.
    • +} + +chng {2005 August 21 (3.2.3)} { +
    • Added support for the CAST operator
    • +
    • Tcl interface allows BLOB values to be transferred to user-defined +functions
    • +
    • Added the "transaction" method to the Tcl interface
    • +
    • Allow the DEFAULT value of a column to call functions that have constant +operands
    • +
    • Added the ANALYZE command for gathering statistics on indices and +using those statistics when picking an index in the optimizer
    • +
    • Remove the limit (formerly 100) on the number of terms in the +WHERE clause
    • +
    • The right-hand side of the IN operator can now be a list of expressions +instead of just a list of constants
    • +
    • Rework the optimizer so that it is able to make better use of indices
    • +
    • The order of tables in a join is adjusted automatically to make +better use of indices
    • +
    • The IN operator is now a candidate for optimization even if the left-hand +side is not the left-most term of the index. Multiple IN operators can be +used with the same index.
    • +
    • WHERE clause expressions using BETWEEN and OR are now candidates +for optimization
    • +
    • Added the "case_sensitive_like" pragma and the SQLITE_CASE_SENSITIVE_LIKE +compile-time option to set its default value to "on".
    • +
    • Use indices to help with GLOB expressions and LIKE expressions too +when the case_sensitive_like pragma is enabled
    • +
    • Added support for grave-accent quoting for compatibility with MySQL
    • +
    • Improved test coverage
    • +
    • Dozens of minor bug fixes
    • +} + +chng {2005 June 13 (3.2.2)} { +
    • Added the sqlite3_db_handle() API
    • +
    • Added the sqlite3_get_autocommit() API
    • +
    • Added a REGEXP operator to the parser. There is no function to back +up this operator in the standard build but users can add their own using +sqlite3_create_function()
    • +
    • Speed improvements and library footprint reductions.
    • +
    • Fix byte alignment problems on 64-bit architectures.
    • +
    • Many, many minor bug fixes and documentation updates.
    • +} + +chng {2005 March 29 (3.2.1)} { +
    • Fix a memory allocation error in the new ADD COLUMN comment.
    • +
    • Documentation updates
    • +} + +chng {2005 March 21 (3.2.0)} { +
    • Added support for ALTER TABLE ADD COLUMN.
    • +
    • Added support for the "T" separator in ISO-8601 date/time strings.
    • +
    • Improved support for Cygwin.
    • +
    • Numerous bug fixes and documentation updates.
    • +} + +chng {2005 March 16 (3.1.6)} { +
    • Fix a bug that could cause database corruption when inserting + record into tables with around 125 columns.
    • +
    • sqlite3_step() is now much more likely to invoke the busy handler + and less likely to return SQLITE_BUSY.
    • +
    • Fix memory leaks that used to occur after a malloc() failure.
    • +} + +chng {2005 March 11 (3.1.5)} { +
    • The ioctl on OS-X to control syncing to disk is F_FULLFSYNC, + not F_FULLSYNC. The previous release had it wrong.
    • +} + +chng {2005 March 10 (3.1.4)} { +
    • Fix a bug in autovacuum that could cause database corruption if +a CREATE UNIQUE INDEX fails because of a constraint violation. +This problem only occurs if the new autovacuum feature introduced in +version 3.1 is turned on.
    • +
    • The F_FULLSYNC ioctl (currently only supported on OS-X) is disabled +if the synchronous pragma is set to something other than "full".
    • +
    • Add additional forward compatibility to the future version 3.2 database +file format.
    • +
    • Fix a bug in WHERE clauses of the form (rowid<'2')
    • +
    • New SQLITE_OMIT_... compile-time options added
    • +
    • Updates to the man page
    • +
    • Remove the use of strcasecmp() from the shell
    • +
    • Windows DLL exports symbols Tclsqlite_Init and Sqlite_Init
    • +} + +chng {2005 February 19 (3.1.3)} { +
    • Fix a problem with VACUUM on databases from which tables containing +AUTOINCREMENT have been dropped.
    • +
    • Add forward compatibility to the future version 3.2 database file +format.
    • +
    • Documentation updates
    • +} + +chng {2005 February 15 (3.1.2)} { +
    • Fix a bug that can lead to database corruption if there are two +open connections to the same database and one connection does a VACUUM +and the second makes some change to the database.
    • +
    • Allow "?" parameters in the LIMIT clause.
    • +
    • Fix VACUUM so that it works with AUTOINCREMENT.
    • +
    • Fix a race condition in AUTOVACUUM that can lead to corrupt databases
    • +
    • Add a numeric version number to the sqlite3.h include file.
    • +
    • Other minor bug fixes and performance enhancements.
    • +} + +chng {2005 February 15 (2.8.16)} { +
    • Fix a bug that can lead to database corruption if there are two +open connections to the same database and one connection does a VACUUM +and the second makes some change to the database.
    • +
    • Correctly handle quoted names in CREATE INDEX statements.
    • +
    • Fix a naming conflict between sqlite.h and sqlite3.h.
    • +
    • Avoid excess heap usage when copying expressions.
    • +
    • Other minor bug fixes.
    • +} + +chng {2005 February 1 (3.1.1 BETA)} { +
    • Automatic caching of prepared statements in the TCL interface
    • +
    • ATTACH and DETACH as well as some other operations cause existing + prepared statements to expire.
    • +
    • Numerious minor bug fixes
    • +} + +chng {2005 January 21 (3.1.0 ALPHA)} { +
    • Autovacuum support added
    • +
    • CURRENT_TIME, CURRENT_DATE, and CURRENT_TIMESTAMP added
    • +
    • Support for the EXISTS clause added.
    • +
    • Support for correlated subqueries added.
    • +
    • Added the ESCAPE clause on the LIKE operator.
    • +
    • Support for ALTER TABLE ... RENAME TABLE ... added
    • +
    • AUTOINCREMENT keyword supported on INTEGER PRIMARY KEY
    • +
    • Many SQLITE_OMIT_ macros inserts to omit features at compile-time + and reduce the library footprint.
    • +
    • The REINDEX command was added.
    • +
    • The engine no longer consults the main table if it can get + all the information it needs from an index.
    • +
    • Many nuisance bugs fixed.
    • +} + +chng {2004 October 11 (3.0.8)} { +
    • Add support for DEFERRED, IMMEDIATE, and EXCLUSIVE transactions.
    • +
    • Allow new user-defined functions to be created when there are +already one or more precompiled SQL statements.
    • +
    • Fix portability problems for Mingw/MSYS.
    • +
    • Fix a byte alignment problem on 64-bit Sparc machines.
    • +
    • Fix the ".import" command of the shell so that it ignores \r +characters at the end of lines.
    • +
    • The "csv" mode option in the shell puts strings inside double-quotes.
    • +
    • Fix typos in documentation.
    • +
    • Convert array constants in the code to have type "const".
    • +
    • Numerous code optimizations, specially optimizations designed to +make the code footprint smaller.
    • +} + +chng {2004 September 18 (3.0.7)} { +
    • The BTree module allocates large buffers using malloc() instead of + off of the stack, in order to play better on machines with limited + stack space.
    • +
    • Fixed naming conflicts so that versions 2.8 and 3.0 can be + linked and used together in the same ANSI-C source file.
    • +
    • New interface: sqlite3_bind_parameter_index()
    • +
    • Add support for wildcard parameters of the form: "?nnn"
    • +
    • Fix problems found on 64-bit systems.
    • +
    • Removed encode.c file (containing unused routines) from the + version 3.0 source tree.
    • +
    • The sqlite3_trace() callbacks occur before each statement + is executed, not when the statement is compiled.
    • +
    • Makefile updates and miscellaneous bug fixes.
    • +} + +chng {2004 September 02 (3.0.6 beta)} { +
    • Better detection and handling of corrupt database files.
    • +
    • The sqlite3_step() interface returns SQLITE_BUSY if it is unable + to commit a change because of a lock
    • +
    • Combine the implementations of LIKE and GLOB into a single + pattern-matching subroutine.
    • +
    • Miscellaneous code size optimizations and bug fixes
    • +} + +chng {2004 August 29 (3.0.5 beta)} { +
    • Support for ":AAA" style bind parameter names.
    • +
    • Added the new sqlite3_bind_parameter_name() interface.
    • +
    • Support for TCL variable names embedded in SQL statements in the + TCL bindings.
    • +
    • The TCL bindings transfer data without necessarily doing a conversion + to a string.
    • +
    • The database for TEMP tables is not created until it is needed.
    • +
    • Add the ability to specify an alternative temporary file directory + using the "sqlite_temp_directory" global variable.
    • +
    • A compile-time option (SQLITE_BUSY_RESERVED_LOCK) causes the busy + handler to be called when there is contention for a RESERVED lock.
    • +
    • Various bug fixes and optimizations
    • +} + +chng {2004 August 8 (3.0.4 beta)} { +
    • CREATE TABLE and DROP TABLE now work correctly as prepared statements.
    • +
    • Fix a bug in VACUUM and UNIQUE indices.
    • +
    • Add the ".import" command to the command-line shell.
    • +
    • Fix a bug that could cause index corruption when an attempt to + delete rows of a table is blocked by a pending query.
    • +
    • Library size optimizations.
    • +
    • Other minor bug fixes.
    • +} + +chng {2004 July 22 (2.8.15)} { +
    • This is a maintenance release only. Various minor bugs have been +fixed and some portability enhancements are added.
    • +} + +chng {2004 July 22 (3.0.3 beta)} { +
    • The second beta release for SQLite 3.0.
    • +
    • Add support for "PRAGMA page_size" to adjust the page size of +the database.
    • +
    • Various bug fixes and documentation updates.
    • +} + +chng {2004 June 30 (3.0.2 beta)} { +
    • The first beta release for SQLite 3.0.
    • +} + +chng {2004 June 22 (3.0.1 alpha)} { +
    • + *** Alpha Release - Research And Testing Use Only *** +
    • Lots of bug fixes.
    • +} + +chng {2004 June 18 (3.0.0 alpha)} { +
    • + *** Alpha Release - Research And Testing Use Only *** +
    • Support for internationalization including UTF-8, UTF-16, and + user defined collating sequences.
    • +
    • New file format that is 25% to 35% smaller for typical use.
    • +
    • Improved concurrency.
    • +
    • Atomic commits for ATTACHed databases.
    • +
    • Remove cruft from the APIs.
    • +
    • BLOB support.
    • +
    • 64-bit rowids.
    • +
    • More information. +} + +chng {2004 June 9 (2.8.14)} { +
    • Fix the min() and max() optimizer so that it works when the FROM + clause consists of a subquery.
    • +
    • Ignore extra whitespace at the end of of "." commands in the shell.
    • +
    • Bundle sqlite_encode_binary() and sqlite_decode_binary() with the + library.
    • +
    • The TEMP_STORE and DEFAULT_TEMP_STORE pragmas now work.
    • +
    • Code changes to compile cleanly using OpenWatcom.
    • +
    • Fix VDBE stack overflow problems with INSTEAD OF triggers and + NULLs in IN operators.
    • +
    • Add the global variable sqlite_temp_directory which if set defines the + directory in which temporary files are stored.
    • +
    • sqlite_interrupt() plays well with VACUUM.
    • +
    • Other minor bug fixes.
    • +} + +chng {2004 March 8 (2.8.13)} { +
    • Refactor parts of the code in order to make the code footprint + smaller. The code is now also a little bit faster.
    • +
    • sqlite_exec() is now implemented as a wrapper around sqlite_compile() + and sqlite_step().
    • +
    • The built-in min() and max() functions now honor the difference between + NUMERIC and TEXT datatypes. Formerly, min() and max() always assumed + their arguments were of type NUMERIC.
    • +
    • New HH:MM:SS modifier to the built-in date/time functions.
    • +
    • Experimental sqlite_last_statement_changes() API added. Fixed the + the last_insert_rowid() function so that it works correctly with + triggers.
    • +
    • Add functions prototypes for the database encryption API.
    • +
    • Fix several nuisance bugs.
    • +} + +chng {2004 February 8 (2.8.12)} { +
    • Fix a bug that will might corrupt the rollback journal if a power failure + or external program halt occurs in the middle of a COMMIT. The corrupt + journal can lead to database corruption when it is rolled back.
    • +
    • Reduce the size and increase the speed of various modules, especially + the virtual machine.
    • +
    • Allow "<expr> IN <table>" as a shorthand for + "<expr> IN (SELECT * FROM <table>".
    • +
    • Optimizations to the sqlite_mprintf() routine.
    • +
    • Make sure the MIN() and MAX() optimizations work within subqueries.
    • +} + +chng {2004 January 14 (2.8.11)} { +
    • Fix a bug in how the IN operator handles NULLs in subqueries. The bug + was introduced by the previous release.
    • +} + +chng {2004 January 13 (2.8.10)} { +
    • Fix a potential database corruption problem on Unix caused by the fact + that all posix advisory locks are cleared whenever you close() a file. + The work around it to embargo all close() calls while locks are + outstanding.
    • +
    • Performance enhancements on some corner cases of COUNT(*).
    • +
    • Make sure the in-memory backend response sanely if malloc() fails.
    • +
    • Allow sqlite_exec() to be called from within user-defined SQL + functions.
    • +
    • Improved accuracy of floating-point conversions using "long double".
    • +
    • Bug fixes in the experimental date/time functions.
    • +} + +chng {2004 January 5 (2.8.9)} { +
    • Fix a 32-bit integer overflow problem that could result in corrupt + indices in a database if large negative numbers (less than -2147483648) + were inserted into a indexed numeric column.
    • +
    • Fix a locking problem on multi-threaded Linux implementations.
    • +
    • Always use "." instead of "," as the decimal point even if the locale + requests ",".
    • +
    • Added UTC to localtime conversions to the experimental date/time + functions.
    • +
    • Bug fixes to date/time functions.
    • +} + +chng {2003 December 17 (2.8.8)} { +
    • Fix a critical bug introduced into 2.8.0 which could cause + database corruption.
    • +
    • Fix a problem with 3-way joins that do not use indices
    • +
    • The VACUUM command now works with the non-callback API
    • +
    • Improvements to the "PRAGMA integrity_check" command
    • +} + +chng {2003 December 4 (2.8.7)} { +
    • Added experimental sqlite_bind() and sqlite_reset() APIs.
    • +
    • If the name of the database is an empty string, open a new database + in a temporary file that is automatically deleted when the database + is closed.
    • +
    • Performance enhancements in the lemon-generated parser
    • +
    • Experimental date/time functions revised.
    • +
    • Disallow temporary indices on permanent tables.
    • +
    • Documentation updates and typo fixes
    • +
    • Added experimental sqlite_progress_handler() callback API
    • +
    • Removed support for the Oracle8 outer join syntax.
    • +
    • Allow GLOB and LIKE operators to work as functions.
    • +
    • Other minor documentation and makefile changes and bug fixes.
    • +} + +chng {2003 August 21 (2.8.6)} { +
    • Moved the CVS repository to www.sqlite.org
    • +
    • Update the NULL-handling documentation.
    • +
    • Experimental date/time functions added.
    • +
    • Bug fix: correctly evaluate a view of a view without segfaulting.
    • +
    • Bug fix: prevent database corruption if you dropped a + trigger that had the same name as a table.
    • +
    • Bug fix: allow a VACUUM (without segfaulting) on an empty + database after setting the EMPTY_RESULT_CALLBACKS pragma.
    • +
    • Bug fix: if an integer value will not fit in a 32-bit int, store it in + a double instead.
    • +
    • Bug fix: Make sure the journal file directory entry is committed to disk + before writing the database file.
    • +} + +chng {2003 July 22 (2.8.5)} { +
    • Make LIMIT work on a compound SELECT statement.
    • +
    • LIMIT 0 now shows no rows. Use LIMIT -1 to see all rows.
    • +
    • Correctly handle comparisons between an INTEGER PRIMARY KEY and + a floating point number.
    • +
    • Fix several important bugs in the new ATTACH and DETACH commands.
    • +
    • Updated the NULL-handling document.
    • +
    • Allow NULL arguments in sqlite_compile() and sqlite_step().
    • +
    • Many minor bug fixes
    • +} + +chng {2003 June 29 (2.8.4)} { +
    • Enhanced the "PRAGMA integrity_check" command to verify indices.
    • +
    • Added authorization hooks for the new ATTACH and DETACH commands.
    • +
    • Many documentation updates
    • +
    • Many minor bug fixes
    • +} + +chng {2003 June 4 (2.8.3)} { +
    • Fix a problem that will corrupt the indices on a table if you + do an INSERT OR REPLACE or an UPDATE OR REPLACE on a table that + contains an INTEGER PRIMARY KEY plus one or more indices.
    • +
    • Fix a bug in windows locking code so that locks work correctly + when simultaneously accessed by Win95 and WinNT systems.
    • +
    • Add the ability for INSERT and UPDATE statements to refer to the + "rowid" (or "_rowid_" or "oid") columns.
    • +
    • Other important bug fixes
    • +} + +chng {2003 May 17 (2.8.2)} { +
    • Fix a problem that will corrupt the database file if you drop a + table from the main database that has a TEMP index.
    • +} + +chng {2003 May 16 (2.8.1)} { +
    • Reactivated the VACUUM command that reclaims unused disk space in + a database file.
    • +
    • Added the ATTACH and DETACH commands to allow interacting with multiple + database files at the same time.
    • +
    • Added support for TEMP triggers and indices.
    • +
    • Added support for in-memory databases.
    • +
    • Removed the experimental sqlite_open_aux_file(). Its function is + subsumed in the new ATTACH command.
    • +
    • The precedence order for ON CONFLICT clauses was changed so that + ON CONFLICT clauses on BEGIN statements have a higher precedence than + ON CONFLICT clauses on constraints. +
    • Many, many bug fixes and compatibility enhancements.
    • +} + +chng {2003 Feb 16 (2.8.0)} { +
    • Modified the journal file format to make it more resistant to corruption + that can occur after an OS crash or power failure.
    • +
    • Added a new C/C++ API that does not use callback for returning data.
    • +} + +chng {2003 Jan 25 (2.7.6)} { +
    • Performance improvements. The library is now much faster.
    • +
    • Added the sqlite_set_authorizer() API. Formal documentation has + not been written - see the source code comments for instructions on + how to use this function.
    • +
    • Fix a bug in the GLOB operator that was preventing it from working + with upper-case letters.
    • +
    • Various minor bug fixes.
    • +} + +chng {2002 Dec 27 (2.7.5)} { +
    • Fix an uninitialized variable in pager.c which could (with a probability + of about 1 in 4 billion) result in a corrupted database.
    • +} + +chng {2002 Dec 17 (2.7.4)} { +
    • Database files can now grow to be up to 2^41 bytes. The old limit + was 2^31 bytes.
    • +
    • The optimizer will now scan tables in the reverse if doing so will + satisfy an ORDER BY ... DESC clause.
    • +
    • The full pathname of the database file is now remembered even if + a relative path is passed into sqlite_open(). This allows + the library to continue operating correctly after a chdir().
    • +
    • Speed improvements in the VDBE.
    • +
    • Lots of little bug fixes.
    • +} + +chng {2002 Oct 30 (2.7.3)} { +
    • Various compiler compatibility fixes.
    • +
    • Fix a bug in the "expr IN ()" operator.
    • +
    • Accept column names in parentheses.
    • +
    • Fix a problem with string memory management in the VDBE
    • +
    • Fix a bug in the "table_info" pragma"
    • +
    • Export the sqlite_function_type() API function in the Windows DLL
    • +
    • Fix locking behavior under windows
    • +
    • Fix a bug in LEFT OUTER JOIN
    • +} + +chng {2002 Sep 25 (2.7.2)} { +
    • Prevent journal file overflows on huge transactions.
    • +
    • Fix a memory leak that occurred when sqlite_open() failed.
    • +
    • Honor the ORDER BY and LIMIT clause of a SELECT even if the + result set is used for an INSERT.
    • +
    • Do not put write locks on the file used to hold TEMP tables.
    • +
    • Added documentation on SELECT DISTINCT and on how SQLite handles NULLs.
    • +
    • Fix a problem that was causing poor performance when many thousands + of SQL statements were executed by a single sqlite_exec() call.
    • +} + +chng {2002 Aug 31 (2.7.1)} { +
    • Fix a bug in the ORDER BY logic that was introduced in version 2.7.0
    • +
    • C-style comments are now accepted by the tokenizer.
    • +
    • INSERT runs a little faster when the source is a SELECT statement.
    • +} + +chng {2002 Aug 25 (2.7.0)} { +
    • Make a distinction between numeric and text values when sorting. + Text values sort according to memcmp(). Numeric values sort in + numeric order.
    • +
    • Allow multiple simultaneous readers under windows by simulating + the reader/writers locks that are missing from Win95/98/ME.
    • +
    • An error is now returned when trying to start a transaction if + another transaction is already active.
    • +} + +chng {2002 Aug 12 (2.6.3)} { +
    • Add the ability to read both little-endian and big-endian databases. + So database created under SunOS or MacOSX can be read and written + under Linux or Windows and vice versa.
    • +
    • Convert to the new website: http://www.sqlite.org/
    • +
    • Allow transactions to span Linux Threads
    • +
    • Bug fix in the processing of the ORDER BY clause for GROUP BY queries
    • +} + +chng {2002 Jly 30 (2.6.2)} { +
    • Text files read by the COPY command can now have line terminators + of LF, CRLF, or CR.
    • +
    • SQLITE_BUSY is handled correctly if encountered during database + initialization.
    • +
    • Fix to UPDATE triggers on TEMP tables.
    • +
    • Documentation updates.
    • +} + +chng {2002 Jly 19 (2.6.1)} { +
    • Include a static string in the library that responds to the RCS + "ident" command and which contains the library version number.
    • +
    • Fix an assertion failure that occurred when deleting all rows of + a table with the "count_changes" pragma turned on.
    • +
    • Better error reporting when problems occur during the automatic + 2.5.6 to 2.6.0 database format upgrade.
    • +} + +chng {2002 Jly 17 (2.6.0)} { +
    • Change the format of indices to correct a design flaw the originated + with version 2.1.0. *** This is an incompatible + file format change *** When version 2.6.0 or later of the + library attempts to open a database file created by version 2.5.6 or + earlier, it will automatically and irreversibly convert the file format. + Make backup copies of older database files before opening them with + version 2.6.0 of the library. +
    • +} + +chng {2002 Jly 7 (2.5.6)} { +
    • Fix more problems with rollback. Enhance the test suite to exercise + the rollback logic extensively in order to prevent any future problems. +
    • +} + +chng {2002 Jly 6 (2.5.5)} { +
    • Fix a bug which could cause database corruption during a rollback. + This bugs was introduced in version 2.4.0 by the freelist + optimization of checking [410].
    • +
    • Fix a bug in aggregate functions for VIEWs.
    • +
    • Other minor changes and enhancements.
    • +} + +chng {2002 Jly 1 (2.5.4)} { +
    • Make the "AS" keyword optional again.
    • +
    • The datatype of columns now appear in the 4th argument to the + callback.
    • +
    • Added the sqlite_open_aux_file() API, though it is still + mostly undocumented and untested.
    • +
    • Added additional test cases and fixed a few bugs that those + test cases found.
    • +} + +chng {2002 Jun 24 (2.5.3)} { +
    • Bug fix: Database corruption can occur due to the optimization + that was introduced in version 2.4.0 (check-in [410]). The problem + should now be fixed. The use of versions 2.4.0 through 2.5.2 is + not recommended.
    • +} + +chng {2002 Jun 24 (2.5.2)} { +
    • Added the new SQLITE_TEMP_MASTER table which records the schema + for temporary tables in the same way that SQLITE_MASTER does for + persistent tables.
    • +
    • Added an optimization to UNION ALL
    • +
    • Fixed a bug in the processing of LEFT OUTER JOIN
    • +
    • The LIMIT clause now works on subselects
    • +
    • ORDER BY works on subselects
    • +
    • There is a new TypeOf() function used to determine if an expression + is numeric or text.
    • +
    • Autoincrement now works for INSERT from a SELECT.
    • +} + +chng {2002 Jun 19 (2.5.1)} { +
    • The query optimizer now attempts to implement the ORDER BY clause + using an index. Sorting is still used if not suitable index is + available.
    • +} + +chng {2002 Jun 17 (2.5.0)} { +
    • Added support for row triggers.
    • +
    • Added SQL-92 compliant handling of NULLs.
    • +
    • Add support for the full SQL-92 join syntax and LEFT OUTER JOINs.
    • +
    • Double-quoted strings interpreted as column names not text literals.
    • +
    • Parse (but do not implement) foreign keys.
    • +
    • Performance improvements in the parser, pager, and WHERE clause code + generator.
    • +
    • Make the LIMIT clause work on subqueries. (ORDER BY still does not + work, though.)
    • +
    • Added the "%Q" expansion to sqlite_*_printf().
    • +
    • Bug fixes too numerous to mention (see the change log).
    • +} + +chng {2002 May 09 (2.4.12)} { +
    • Added logic to detect when the library API routines are called out + of sequence.
    • +} + +chng {2002 May 08 (2.4.11)} { +
    • Bug fix: Column names in the result set were not being generated + correctly for some (rather complex) VIEWs. This could cause a + segfault under certain circumstances.
    • +} + +chng {2002 May 02 (2.4.10)} { +
    • Bug fix: Generate correct column headers when a compound SELECT is used + as a subquery.
    • +
    • Added the sqlite_encode_binary() and sqlite_decode_binary() functions to + the source tree. But they are not yet linked into the library.
    • +
    • Documentation updates.
    • +
    • Export the sqlite_changes() function from windows DLLs.
    • +
    • Bug fix: Do not attempt the subquery flattening optimization on queries + that lack a FROM clause. To do so causes a segfault.
    • +} + +chng {2002 Apr 21 (2.4.9)} { +
    • Fix a bug that was causing the precompiled binary of SQLITE.EXE to + report "out of memory" under Windows 98.
    • +} + +chng {2002 Apr 20 (2.4.8)} { +
    • Make sure VIEWs are created after their corresponding TABLEs in the + output of the .dump command in the shell.
    • +
    • Speed improvements: Do not do synchronous updates on TEMP tables.
    • +
    • Many improvements and enhancements to the shell.
    • +
    • Make the GLOB and LIKE operators functions that can be overridden + by a programmer. This allows, for example, the LIKE operator to + be changed to be case sensitive.
    • +} + +chng {2002 Apr 06 (2.4.7)} { +
    • Add the ability to put TABLE.* in the column list of a + SELECT statement.
    • +
    • Permit SELECT statements without a FROM clause.
    • +
    • Added the last_insert_rowid() SQL function.
    • +
    • Do not count rows where the IGNORE conflict resolution occurs in + the row count.
    • +
    • Make sure functions expressions in the VALUES clause of an INSERT + are correct.
    • +
    • Added the sqlite_changes() API function to return the number + of row that changed in the most recent operation.
    • +} + +chng {2002 Apr 02 (2.4.6)} { +
    • Bug fix: Correctly handle terms in the WHERE clause of a join that + do not contain a comparison operator.
    • +} + +chng {2002 Apr 01 (2.4.5)} { +
    • Bug fix: Correctly handle functions that appear in the WHERE clause + of a join.
    • +
    • When the PRAGMA vdbe_trace=ON is set, correctly print the P3 operand + value when it is a pointer to a structure rather than a pointer to + a string.
    • +
    • When inserting an explicit NULL into an INTEGER PRIMARY KEY, convert + the NULL value into a unique key automatically.
    • +} + +chng {2002 Mar 24 (2.4.4)} { +
    • Allow "VIEW" to be a column name
    • +
    • Added support for CASE expressions (patch from Dan Kennedy)
    • +
    • Added RPMS to the delivery (patches from Doug Henry)
    • +
    • Fix typos in the documentation
    • +
    • Cut over configuration management to a new CVS repository with + its own CVSTrac bug tracking system.
    • +} + +chng {2002 Mar 22 (2.4.3)} { +
    • Fix a bug in SELECT that occurs when a compound SELECT is used as a + subquery in the FROM of a SELECT.
    • +
    • The sqlite_get_table() function now returns an error if you + give it two or more SELECTs that return different numbers of columns.
    • +} + +chng {2002 Mar 14 (2.4.2)} { +
    • Bug fix: Fix an assertion failure that occurred when ROWID was a column + in a SELECT statement on a view.
    • +
    • Bug fix: Fix an uninitialized variable in the VDBE that would could an + assert failure.
    • +
    • Make the os.h header file more robust in detecting when the compile is + for windows and when it is for unix.
    • +} + +chng {2002 Mar 13 (2.4.1)} { +
    • Using an unnamed subquery in a FROM clause would cause a segfault.
    • +
    • The parser now insists on seeing a semicolon or the end of input before + executing a statement. This avoids an accidental disaster if the + WHERE keyword is misspelled in an UPDATE or DELETE statement.
    • +} + + +chng {2002 Mar 10 (2.4.0)} { +
    • Change the name of the sanity_check PRAGMA to integrity_check + and make it available in all compiles.
    • +
    • SELECT min() or max() of an indexed column with no WHERE or GROUP BY + clause is handled as a special case which avoids a complete table scan.
    • +
    • Automatically generated ROWIDs are now sequential.
    • +
    • Do not allow dot-commands of the command-line shell to occur in the + middle of a real SQL command.
    • +
    • Modifications to the "lemon" parser generator so that the parser tables + are 4 times smaller.
    • +
    • Added support for user-defined functions implemented in C.
    • +
    • Added support for new functions: coalesce(), lower(), + upper(), and random() +
    • Added support for VIEWs.
    • +
    • Added the subquery flattening optimizer.
    • +
    • Modified the B-Tree and Pager modules so that disk pages that do not + contain real data (free pages) are not journaled and are not + written from memory back to the disk when they change. This does not + impact database integrity, since the + pages contain no real data, but it does make large INSERT operations + about 2.5 times faster and large DELETEs about 5 times faster.
    • +
    • Made the CACHE_SIZE pragma persistent
    • +
    • Added the SYNCHRONOUS pragma
    • +
    • Fixed a bug that was causing updates to fail inside of transactions when + the database contained a temporary table.
    • +} + +chng {2002 Feb 18 (2.3.3)} { +
    • Allow identifiers to be quoted in square brackets, for compatibility + with MS-Access.
    • +
    • Added support for sub-queries in the FROM clause of a SELECT.
    • +
    • More efficient implementation of sqliteFileExists() under Windows. + (by Joel Luscy)
    • +
    • The VALUES clause of an INSERT can now contain expressions, including + scalar SELECT clauses.
    • +
    • Added support for CREATE TABLE AS SELECT
    • +
    • Bug fix: Creating and dropping a table all within a single + transaction was not working.
    • +} + +chng {2002 Feb 14 (2.3.2)} { +
    • Bug fix: There was an incorrect assert() in pager.c. The real code was + all correct (as far as is known) so everything should work OK if you + compile with -DNDEBUG=1. When asserts are not disabled, there + could be a fault.
    • +} + +chng {2002 Feb 13 (2.3.1)} { +
    • Bug fix: An assertion was failing if "PRAGMA full_column_names=ON;" was + set and you did a query that used a rowid, like this: + "SELECT rowid, * FROM ...".
    • +} + +chng {2002 Jan 30 (2.3.0)} { +
    • Fix a serious bug in the INSERT command which was causing data to go + into the wrong columns if the data source was a SELECT and the INSERT + clauses specified its columns in some order other than the default.
    • +
    • Added the ability to resolve constraint conflicts is ways other than + an abort and rollback. See the documentation on the "ON CONFLICT" + clause for details.
    • +
    • Temporary files are now automatically deleted by the operating system + when closed. There are no more dangling temporary files on a program + crash. (If the OS crashes, fsck will delete the file after reboot + under Unix. I do not know what happens under Windows.)
    • +
    • NOT NULL constraints are honored.
    • +
    • The COPY command puts NULLs in columns whose data is '\N'.
    • +
    • In the COPY command, backslash can now be used to escape a newline.
    • +
    • Added the SANITY_CHECK pragma.
    • +} + +chng {2002 Jan 28 (2.2.5)} { +
    • Important bug fix: the IN operator was not working if either the + left-hand or right-hand side was derived from an INTEGER PRIMARY KEY.
    • +
    • Do not escape the backslash '\' character in the output of the + sqlite command-line access program.
    • +} + +chng {2002 Jan 22 (2.2.4)} { +
    • The label to the right of an AS in the column list of a SELECT can now + be used as part of an expression in the WHERE, ORDER BY, GROUP BY, and/or + HAVING clauses.
    • +
    • Fix a bug in the -separator command-line option to the sqlite + command.
    • +
    • Fix a problem with the sort order when comparing upper-case strings against + characters greater than 'Z' but less than 'a'.
    • +
    • Report an error if an ORDER BY or GROUP BY expression is constant.
    • +} + +chng {2002 Jan 16 (2.2.3)} { +
    • Fix warning messages in VC++ 7.0. (Patches from nicolas352001)
    • +
    • Make the library thread-safe. (The code is there and appears to work + but has not been stressed.)
    • +
    • Added the new sqlite_last_insert_rowid() API function.
    • +} + +chng {2002 Jan 13 (2.2.2)} { +
    • Bug fix: An assertion was failing when a temporary table with an index + had the same name as a permanent table created by a separate process.
    • +
    • Bug fix: Updates to tables containing an INTEGER PRIMARY KEY and an + index could fail.
    • +} + +chng {2002 Jan 9 (2.2.1)} { +
    • Bug fix: An attempt to delete a single row of a table with a WHERE + clause of "ROWID=x" when no such rowid exists was causing an error.
    • +
    • Bug fix: Passing in a NULL as the 3rd parameter to sqlite_open() + would sometimes cause a coredump.
    • +
    • Bug fix: DROP TABLE followed by a CREATE TABLE with the same name all + within a single transaction was causing a coredump.
    • +
    • Makefile updates from A. Rottmann
    • +} + +chng {2001 Dec 22 (2.2.0)} { +
    • Columns of type INTEGER PRIMARY KEY are actually used as the primary + key in underlying B-Tree representation of the table.
    • +
    • Several obscure, unrelated bugs were found and fixed while + implemented the integer primary key change of the previous bullet.
    • +
    • Added the ability to specify "*" as part of a larger column list in + the result section of a SELECT statement. For example: + "SELECT rowid, * FROM table1;".
    • +
    • Updates to comments and documentation.
    • +} + +chng {2001 Dec 14 (2.1.7)} { +
    • Fix a bug in CREATE TEMPORARY TABLE which was causing the + table to be initially allocated in the main database file instead + of in the separate temporary file. This bug could cause the library + to suffer an assertion failure and it could cause "page leaks" in the + main database file. +
    • Fix a bug in the b-tree subsystem that could sometimes cause the first + row of a table to be repeated during a database scan.
    • +} + +chng {2001 Dec 14 (2.1.6)} { +
    • Fix the locking mechanism yet again to prevent + sqlite_exec() from returning SQLITE_PROTOCOL + unnecessarily. This time the bug was a race condition in + the locking code. This change effects both POSIX and Windows users.
    • +} + +chng {2001 Dec 6 (2.1.5)} { +
    • Fix for another problem (unrelated to the one fixed in 2.1.4) + that sometimes causes sqlite_exec() to return SQLITE_PROTOCOL + unnecessarily. This time the bug was + in the POSIX locking code and should not effect windows users.
    • +} + +chng {2001 Dec 4 (2.1.4)} { +
    • Sometimes sqlite_exec() would return SQLITE_PROTOCOL when it + should have returned SQLITE_BUSY.
    • +
    • The fix to the previous bug uncovered a deadlock which was also + fixed.
    • +
    • Add the ability to put a single .command in the second argument + of the sqlite shell
    • +
    • Updates to the FAQ
    • +} + +chng {2001 Nov 23 (2.1.3)} { +
    • Fix the behavior of comparison operators + (ex: "<", "==", etc.) + so that they are consistent with the order of entries in an index.
    • +
    • Correct handling of integers in SQL expressions that are larger than + what can be represented by the machine integer.
    • +} + +chng {2001 Nov 22 (2.1.2)} { +
    • Changes to support 64-bit architectures.
    • +
    • Fix a bug in the locking protocol.
    • +
    • Fix a bug that could (rarely) cause the database to become + unreadable after a DROP TABLE due to corruption to the SQLITE_MASTER + table.
    • +
    • Change the code so that version 2.1.1 databases that were rendered + unreadable by the above bug can be read by this version of + the library even though the SQLITE_MASTER table is (slightly) + corrupted.
    • +} + +chng {2001 Nov 13 (2.1.1)} { +
    • Bug fix: Sometimes arbitrary strings were passed to the callback + function when the actual value of a column was NULL.
    • +} + +chng {2001 Nov 12 (2.1.0)} { +
    • Change the format of data records so that records up to 16MB in size + can be stored.
    • +
    • Change the format of indices to allow for better query optimization.
    • +
    • Implement the "LIMIT ... OFFSET ..." clause on SELECT statements.
    • +} + +chng {2001 Nov 3 (2.0.8)} { +
    • Made selected parameters in API functions const. This should + be fully backwards compatible.
    • +
    • Documentation updates
    • +
    • Simplify the design of the VDBE by restricting the number of sorters + and lists to 1. + In practice, no more than one sorter and one list was ever used anyhow. +
    • +} + +chng {2001 Oct 21 (2.0.7)} { +
    • Any UTF-8 character or ISO8859 character can be used as part of + an identifier.
    • +
    • Patches from Christian Werner to improve ODBC compatibility and to + fix a bug in the round() function.
    • +
    • Plug some memory leaks that use to occur if malloc() failed. + We have been and continue to be memory leak free as long as + malloc() works.
    • +
    • Changes to some test scripts so that they work on Windows in + addition to Unix.
    • +} + +chng {2001 Oct 19 (2.0.6)} { +
    • Added the EMPTY_RESULT_CALLBACKS pragma
    • +
    • Support for UTF-8 and ISO8859 characters in column and table names.
    • +
    • Bug fix: Compute correct table names with the FULL_COLUMN_NAMES pragma + is turned on.
    • +} + +chng {2001 Oct 14 (2.0.5)} { +
    • Added the COUNT_CHANGES pragma.
    • +
    • Changes to the FULL_COLUMN_NAMES pragma to help out the ODBC driver.
    • +
    • Bug fix: "SELECT count(*)" was returning NULL for empty tables. + Now it returns 0.
    • +} + +chng {2001 Oct 13 (2.0.4)} { +
    • Bug fix: an obscure and relatively harmless bug was causing one of + the tests to fail when gcc optimizations are turned on. This release + fixes the problem.
    • +} + +chng {2001 Oct 13 (2.0.3)} { +
    • Bug fix: the sqlite_busy_timeout() function was delaying 1000 + times too long before failing.
    • +
    • Bug fix: an assertion was failing if the disk holding the database + file became full or stopped accepting writes for some other reason. + New tests were added to detect similar problems in the future.
    • +
    • Added new operators: & (bitwise-and) + | (bitwise-or), ~ (ones-complement), + << (shift left), >> (shift right).
    • +
    • Added new functions: round() and abs().
    • +} + +chng {2001 Oct 9 (2.0.2)} { +
    • Fix two bugs in the locking protocol. (One was masking the other.)
    • +
    • Removed some unused "#include " that were causing problems + for VC++.
    • +
    • Fixed sqlite.h so that it is usable from C++
    • +
    • Added the FULL_COLUMN_NAMES pragma. When set to "ON", the names of + columns are reported back as TABLE.COLUMN instead of just COLUMN.
    • +
    • Added the TABLE_INFO() and INDEX_INFO() pragmas to help support the + ODBC interface.
    • +
    • Added support for TEMPORARY tables and indices.
    • +} + +chng {2001 Oct 2 (2.0.1)} { +
    • Remove some C++ style comments from btree.c so that it will compile + using compilers other than gcc.
    • +
    • The ".dump" output from the shell does not work if there are embedded + newlines anywhere in the data. This is an old bug that was carried + forward from version 1.0. To fix it, the ".dump" output no longer + uses the COPY command. It instead generates INSERT statements.
    • +
    • Extend the expression syntax to support "expr NOT NULL" (with a + space between the "NOT" and the "NULL") in addition to "expr NOTNULL" + (with no space).
    • +} + +chng {2001 Sep 28 (2.0.0)} { +
    • Automatically build binaries for Linux and Windows and put them on + the website.
    • +} + +chng {2001 Sep 28 (2.0-alpha-4)} { +
    • Incorporate makefile patches form A. Rottmann to use LIBTOOL
    • +} + +chng {2001 Sep 27 (2.0-alpha-3)} { +
    • SQLite now honors the UNIQUE keyword in CREATE UNIQUE INDEX. Primary + keys are required to be unique.
    • +
    • File format changed back to what it was for alpha-1
    • +
    • Fixes to the rollback and locking behavior
    • +} + +chng {2001 Sep 20 (2.0-alpha-2)} { +
    • Initial release of version 2.0. The idea of renaming the library + to "SQLus" was abandoned in favor of keeping the "SQLite" name and + bumping the major version number.
    • +
    • The pager and btree subsystems added back. They are now the only + available backend.
    • +
    • The Dbbe abstraction and the GDBM and memory drivers were removed.
    • +
    • Copyright on all code was disclaimed. The library is now in the + public domain.
    • +} + +chng {2001 Jul 23 (1.0.32)} { +
    • Pager and btree subsystems removed. These will be used in a follow-on + SQL server library named "SQLus".
    • +
    • Add the ability to use quoted strings as table and column names in + expressions.
    • +} + +chng {2001 Apr 14 (1.0.31)} { +
    • Pager subsystem added but not yet used.
    • +
    • More robust handling of out-of-memory errors.
    • +
    • New tests added to the test suite.
    • +} + +chng {2001 Apr 6 (1.0.30)} { +
    • Remove the sqlite_encoding TCL variable that was introduced + in the previous version.
    • +
    • Add options -encoding and -tcl-uses-utf to the + sqlite TCL command.
    • +
    • Add tests to make sure that tclsqlite was compiled using Tcl header + files and libraries that match.
    • +} + +chng {2001 Apr 5 (1.0.29)} { +
    • The library now assumes data is stored as UTF-8 if the --enable-utf8 + option is given to configure. The default behavior is to assume + iso8859-x, as it has always done. This only makes a difference for + LIKE and GLOB operators and the LENGTH and SUBSTR functions.
    • +
    • If the library is not configured for UTF-8 and the Tcl library + is one of the newer ones that uses UTF-8 internally, + then a conversion from UTF-8 to iso8859 and + back again is done inside the TCL interface.
    • +} + +chng {2001 Apr 4 (1.0.28)} { +
    • Added limited support for transactions. At this point, transactions + will do table locking on the GDBM backend. There is no support (yet) + for rollback or atomic commit.
    • +
    • Added special column names ROWID, OID, and _ROWID_ that refer to the + unique random integer key associated with every row of every table.
    • +
    • Additional tests added to the regression suite to cover the new ROWID + feature and the TCL interface bugs mentioned below.
    • +
    • Changes to the "lemon" parser generator to help it work better when + compiled using MSVC.
    • +
    • Bug fixes in the TCL interface identified by Oleg Oleinick.
    • +} + +chng {2001 Mar 20 (1.0.27)} { +
    • When doing DELETE and UPDATE, the library used to write the record + numbers of records to be deleted or updated into a temporary file. + This is changed so that the record numbers are held in memory.
    • +
    • The DELETE command without a WHILE clause just removes the database + files from the disk, rather than going through and deleting record + by record.
    • +} + +chng {2001 Mar 20 (1.0.26)} { +
    • A serious bug fixed on Windows. Windows users should upgrade. + No impact to Unix.
    • +} + +chng {2001 Mar 15 (1.0.25)} { +
    • Modify the test scripts to identify tests that depend on system + load and processor speed and + to warn the user that a failure of one of those (rare) tests does + not necessarily mean the library is malfunctioning. No changes to + code. +
    • +} + +chng {2001 Mar 14 (1.0.24)} { +
    • Fix a bug which was causing + the UPDATE command to fail on systems where "malloc(0)" returns + NULL. The problem does not appear Windows, Linux, or HPUX but does + cause the library to fail on QNX. +
    • +} + +chng {2001 Feb 19 (1.0.23)} { +
    • An unrelated (and minor) bug from Mark Muranwski fixed. The algorithm + for figuring out where to put temporary files for a "memory:" database + was not working quite right. +
    • +} + +chng {2001 Feb 19 (1.0.22)} { +
    • The previous fix was not quite right. This one seems to work better. +
    • +} + +chng {2001 Feb 19 (1.0.21)} { +
    • The UPDATE statement was not working when the WHERE clause contained + some terms that could be satisfied using indices and other terms that + could not. Fixed.
    • +} + +chng {2001 Feb 11 (1.0.20)} { +
    • Merge development changes into the main trunk. Future work toward + using a BTree file structure will use a separate CVS source tree. This + CVS tree will continue to support the GDBM version of SQLite only.
    • +} + +chng {2001 Feb 6 (1.0.19)} { +
    • Fix a strange (but valid) C declaration that was causing problems + for QNX. No logical changes.
    • +} + +chng {2001 Jan 4 (1.0.18)} { +
    • Print the offending SQL statement when an error occurs.
    • +
    • Do not require commas between constraints in CREATE TABLE statements.
    • +
    • Added the "-echo" option to the shell.
    • +
    • Changes to comments.
    • +} + +chng {2000 Dec 10 (1.0.17)} { +
    • Rewrote sqlite_complete() to make it faster.
    • +
    • Minor tweaks to other code to make it run a little faster.
    • +
    • Added new tests for sqlite_complete() and for memory leaks.
    • +} + +chng {2000 Dec 4 (1.0.16)} { +
    • Documentation updates. Mostly fixing of typos and spelling errors.
    • +} + +chng {2000 Oct 23 (1.0.15)} { +
    • Documentation updates
    • +
    • Some sanity checking code was removed from the inner loop of vdbe.c + to help the library to run a little faster. The code is only + removed if you compile with -DNDEBUG.
    • +} + +chng {2000 Oct 19 (1.0.14)} { +
    • Added a "memory:" backend driver that stores its database in an + in-memory hash table.
    • +} + +chng {2000 Oct 18 (1.0.13)} { +
    • Break out the GDBM driver into a separate file in anticipation + to added new drivers.
    • +
    • Allow the name of a database to be prefixed by the driver type. + For now, the only driver type is "gdbm:".
    • +} + +chng {2000 Oct 16 (1.0.12)} { +
    • Fixed an off-by-one error that was causing a coredump in + the '%q' format directive of the new + sqlite_..._printf() routines.
    • +
    • Added the sqlite_interrupt() interface.
    • +
    • In the shell, sqlite_interrupt() is invoked when the + user presses Control-C
    • +
    • Fixed some instances where sqlite_exec() was + returning the wrong error code.
    • +} + +chng {2000 Oct 11 (1.0.10)} { +
    • Added notes on how to compile for Windows95/98.
    • +
    • Removed a few variables that were not being used. Etc.
    • +} + +chng {2000 Oct 8 (1.0.9)} { +
    • Added the sqlite_..._printf() interface routines.
    • +
    • Modified the sqlite shell program to use the new interface + routines.
    • +
    • Modified the sqlite shell program to print the schema for + the built-in SQLITE_MASTER table, if explicitly requested.
    • +} + +chng {2000 Sep 30 (1.0.8)} { +
    • Begin writing documentation on the TCL interface.
    • +} + +chng {2000 Sep 29 (Not Released)} { +
    • Added the sqlite_get_table() API
    • +
    • Updated the documentation for due to the above change.
    • +
    • Modified the sqlite shell to make use of the new + sqlite_get_table() API in order to print a list of tables + in multiple columns, similar to the way "ls" prints filenames.
    • +
    • Modified the sqlite shell to print a semicolon at the + end of each CREATE statement in the output of the ".schema" command.
    • +} + +chng {2000 Sep 21 (Not Released)} { +
    • Change the tclsqlite "eval" method to return a list of results if + no callback script is specified.
    • +
    • Change tclsqlite.c to use the Tcl_Obj interface
    • +
    • Add tclsqlite.c to the libsqlite.a library
    • +} + +chng {2000 Sep 13 (Version 1.0.5)} { +
    • Changed the print format for floating point values from "%g" to "%.15g". +
    • +
    • Changed the comparison function so that numbers in exponential notation + (ex: 1.234e+05) sort in numerical order.
    • +} + +chng {2000 Aug 28 (Version 1.0.4)} { +
    • Added functions length() and substr().
    • +
    • Fix a bug in the sqlite shell program that was causing + a coredump when the output mode was "column" and the first row + of data contained a NULL.
    • +} + +chng {2000 Aug 22 (Version 1.0.3)} { +
    • In the sqlite shell, print the "Database opened READ ONLY" message + to stderr instead of stdout.
    • +
    • In the sqlite shell, now print the version number on initial startup.
    • +
    • Add the sqlite_version[] string constant to the library
    • +
    • Makefile updates
    • +
    • Bug fix: incorrect VDBE code was being generated for the following + circumstance: a query on an indexed table containing a WHERE clause with + an IN operator that had a subquery on its right-hand side.
    • +} + +chng {2000 Aug 18 (Version 1.0.1)} { +
    • Fix a bug in the configure script.
    • +
    • Minor revisions to the website.
    • +} + +chng {2000 Aug 17 (Version 1.0)} { +
    • Change the sqlite program so that it can read + databases for which it lacks write permission. (It used to + refuse all access if it could not write.)
    • +} + +chng {2000 Aug 9} { +
    • Treat carriage returns as white space.
    • +} + +chng {2000 Aug 8} { +
    • Added pattern matching to the ".table" command in the "sqlite" +command shell.
    • +} + +chng {2000 Aug 4} { +
    • Documentation updates
    • +
    • Added "busy" and "timeout" methods to the Tcl interface
    • +} + +chng {2000 Aug 3} { +
    • File format version number was being stored in sqlite_master.tcl + multiple times. This was harmless, but unnecessary. It is now fixed.
    • +} + +chng {2000 Aug 2} { +
    • The file format for indices was changed slightly in order to work + around an inefficiency that can sometimes come up with GDBM when + there are large indices having many entries with the same key. + ** Incompatible Change **
    • +} + +chng {2000 Aug 1} { +
    • The parser's stack was overflowing on a very long UPDATE statement. + This is now fixed.
    • +} + +chng {2000 July 31} { +
    • Finish the VDBE tutorial.
    • +
    • Added documentation on compiling to WindowsNT.
    • +
    • Fix a configuration program for WindowsNT.
    • +
    • Fix a configuration problem for HPUX.
    • +} + +chng {2000 July 29} { +
    • Better labels on column names of the result.
    • +} + +chng {2000 July 28} { +
    • Added the sqlite_busy_handler() + and sqlite_busy_timeout() interface.
    • +} + +chng {2000 June 23} { +
    • Begin writing the VDBE tutorial.
    • +} + +chng {2000 June 21} { +
    • Clean up comments and variable names. Changes to documentation. + No functional changes to the code.
    • +} + +chng {2000 June 19} { +
    • Column names in UPDATE statements were case sensitive. + This mistake has now been fixed.
    • +} + +chng {2000 June 16} { +
    • Added the concatenate string operator (||)
    • +} + +chng {2000 June 12} { +
    • Added the fcnt() function to the SQL interpreter. The fcnt() function + returns the number of database "Fetch" operations that have occurred. + This function is designed for use in test scripts to verify that + queries are efficient and appropriately optimized. Fcnt() has no other + useful purpose, as far as I know.
    • +
    • Added a bunch more tests that take advantage of the new fcnt() function. + The new tests did not uncover any new problems.
    • +} + +chng {2000 June 8} { +
    • Added lots of new test cases
    • +
    • Fix a few bugs discovered while adding test cases
    • +
    • Begin adding lots of new documentation
    • +} + +chng {2000 June 6} { +
    • Added compound select operators: UNION, UNION ALL, +INTERSECT, and EXCEPT
    • +
    • Added support for using (SELECT ...) within expressions
    • +
    • Added support for IN and BETWEEN operators
    • +
    • Added support for GROUP BY and HAVING
    • +
    • NULL values are now reported to the callback as a NULL pointer + rather than an empty string.
    • +} + +chng {2000 June 3} { +
    • Added support for default values on columns of a table.
    • +
    • Improved test coverage. Fixed a few obscure bugs found by the +improved tests.
    • +} + +chng {2000 June 2} { +
    • All database files to be modified by an UPDATE, INSERT or DELETE are +now locked before any changes are made to any files. +This makes it safe (I think) to access +the same database simultaneously from multiple processes.
    • +
    • The code appears stable so we are now calling it "beta".
    • +} + +chng {2000 June 1} { +
    • Better support for file locking so that two or more processes +(or threads) +can access the same database simultaneously. More work needed in +this area, though.
    • +} + +chng {2000 May 31} { +
    • Added support for aggregate functions (Ex: COUNT(*), MIN(...)) +to the SELECT statement.
    • +
    • Added support for SELECT DISTINCT ...
    • +} + +chng {2000 May 30} { +
    • Added the LIKE operator.
    • +
    • Added a GLOB operator: similar to LIKE +but it uses Unix shell globbing wildcards instead of the '%' +and '_' wildcards of SQL.
    • +
    • Added the COPY command patterned after +PostgreSQL so that SQLite +can now read the output of the pg_dump database dump utility +of PostgreSQL.
    • +
    • Added a VACUUM command that that calls the +gdbm_reorganize() function on the underlying database +files.
    • +
    • And many, many bug fixes...
    • +} + +chng {2000 May 29} { +
    • Initial Public Release of Alpha code
    • +} + +puts { + +} + ADDED pages/companiesusing.in Index: pages/companiesusing.in ================================================================== --- /dev/null +++ pages/companiesusing.in @@ -0,0 +1,113 @@ +Well-Known Users Of SQLite + +

      Well-Known Users of SQLite:

      + +

      +There are many companies using SQLite in their products. Exactly how +many is unknown. SQLite is in the public domain and so many companies use +it in their products without ever telling us. But here are a few +examples of well-known companies and/or software projects which we +do know are using SQLite: +

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +Adobe uses SQLite as the application file format for their Lightroom product. +This is publically acknowledged on the Lightroom/About_Lightroom menu popup. +Adobe has also announced that their AIR project will include SQLite. It +is reported that Acrobat Reader also uses SQLite. +
      +Apple SQLite for many functions within Mac OS-X, including +Apple Mail, Safari, and in Aperture. +
      +The Firefox Web Browser from Mozilla has recently been updated +to use SQLite to store all kinds of history information. +SQLite is replacing Mork. +
      +We believe that General Electric uses SQLite in some product or +another because they twice wrote the to SQLite developers requesting +the US Export Control Number for SQLite. So presumably GE is using +SQLite in something that they are exporting. But nobody +(outside of GE) seems to know what that might be. +
      +Google use SQLite in their Desktop for Mac and in, Google Gears. +The Google Gears project allows web applications to work offline, +and uses SQLite for the offline data storage. +
      +McAfee appears to use SQLite in its antivirus programs. +Mentioned here and implied here +
      +It can inferred from traffic on the SQLite mailing list that at least one group within Microsoft is using SQLite in the development of a game program. No word yet if this game has actually been released or if they are still using SQLite. +
      +The Monotone configuration management system stores an entire project history in an SQLite database. Each file is a separate BLOB. +
      +It is reported that Philips MP3 Players use SQLite to store metadata about the music they hold. Apparently, if you plug a Philips MP3 player into your USB port, you can see the SQLite database file there in plain sight. +
      +The popular PHP programming language comes with both SQLite2 and SQLite3 built in. +
      +SQLite comes bundled with the Python programming langauge since Python 2.5. +
      +The REALbasic programming environment comes bundled with an enhanced version of SQLite that supports AES encryption. +
      +There are multiple sightings of SQLite in the Skype client for MacOSX and Windows. +
      +Solaris 10 uses SQLite as the storage format for its Service Management Facility. Thus, Sun as essentially replaced the traditional unix /etc/inittab file with an SQLite database. +
      +SQLite is an integral part of Symbian's operating system commonly found on high-end cellphones. +
      +A representative of Toshiba wrote to the SQLite developers and requested the US Export Control Number for SQLite. We infer from this that Toshiba is exporting something from the US that uses SQLite, but we do not know what that something is. +
      ADDED pages/compile.in Index: pages/compile.in ================================================================== --- /dev/null +++ pages/compile.in @@ -0,0 +1,270 @@ +Compilation Options For SQLite + +

      Compilation Options For SQLite

      + +

      +For most purposes, SQLite can be built just fine using the default +compilation options. However, if required, the compile-time options +documented below can be used to +omit SQLite features (resulting in +a smaller compiled library size) or to change the +default values of some parameters. +

      +

      +Every effort has been made to ensure that the various combinations +of compilation options work harmoniously and produce a working library. +Nevertheless, it is strongly recommended that the SQLite test-suite +be executed to check for errors before using an SQLite library built +with non-standard compilation options. +

      + +

      Options To Set Default Parameter Values

      + +

      SQLITE_DEFAULT_AUTOVACUUM=<1 or 0>
      +This macro determines if SQLite creates databases with the +auto-vacuum +flag set by default. The default value is 0 (do not create auto-vacuum +databases). In any case the compile-time default may be overridden by the +"PRAGMA auto_vacuum" command. +

      + +

      SQLITE_DEFAULT_CACHE_SIZE=<pages>
      +This macro sets the default size of the page-cache for each attached +database, in pages. This can be overridden by the "PRAGMA cache_size" +comamnd. The default value is 2000. +

      + +

      SQLITE_DEFAULT_PAGE_SIZE=<bytes>
      +This macro is used to set the default page-size used when a +database is created. The value assigned must be a power of 2. The +default value is 1024. The compile-time default may be overridden at +runtime by the "PRAGMA page_size" command. +

      + +

      SQLITE_DEFAULT_TEMP_CACHE_SIZE=<pages>
      +This macro sets the default size of the page-cache for temporary files +created by SQLite to store intermediate results, in pages. It does +not affect the page-cache for the temp database, where tables created +using "CREATE TEMP TABLE" are stored. The default value is 500. +

      + +

      SQLITE_MAX_PAGE_SIZE=<bytes>
      +This is used to set the maximum allowable page-size that can +be specified by the "PRAGMA page_size" command. The default value +is 8192. +

      + + +

      Options To Omit Features

      + +

      The following options are used to reduce the size of the compiled +library by omiting optional features. This is probably only useful +in embedded systems where space is especially tight, as even with all +features included the SQLite library is relatively small. Don't forget +to tell your compiler to optimize for binary size! (the -Os option if +using GCC).

      + +

      The macros in this section do not require values. The following +compilation switches all have the same effect:
      +-DSQLITE_OMIT_ALTERTABLE
      +-DSQLITE_OMIT_ALTERTABLE=1
      +-DSQLITE_OMIT_ALTERTABLE=0 +

      + +

      If any of these options are defined, then the same set of SQLITE_OMIT_XXX +options must also be defined when using the 'lemon' tool to generate a parse.c +file. Because of this, these options may only used when the library is built +from source, not from the collection of pre-packaged C files provided for +non-UNIX like platforms on the website. +

      + +

      SQLITE_OMIT_ALTERTABLE
      +When this option is defined, the +ALTER TABLE command is not included in the +library. Executing an ALTER TABLE statement causes a parse error. +

      + +

      SQLITE_OMIT_AUTHORIZATION
      +Defining this option omits the authorization callback feature from the +library. The +sqlite3_set_authorizer() API function is not present in the library. +

      + +

      SQLITE_OMIT_AUTOVACUUM
      +If this option is defined, the library cannot create or write to +databases that support +auto-vacuum. Executing a +"PRAGMA auto_vacuum" statement is not an error, but does not return a value +or modify the auto-vacuum flag in the database file. If a database that +supports auto-vacuum is opened by a library compiled with this option, it +is automatically opened in read-only mode. +

      + +

      SQLITE_OMIT_AUTOINCREMENT
      +This option is used to omit the AUTOINCREMENT functionality. When this +is macro is defined, columns declared as "INTEGER PRIMARY KEY AUTOINCREMENT" +behave in the same way as columns declared as "INTEGER PRIMARY KEY" when a +NULL is inserted. The sqlite_sequence system table is neither created, nor +respected if it already exists. +

      +

      TODO: Need a link here - AUTOINCREMENT is not yet documented

      + +

      SQLITE_OMIT_BLOB_LITERAL
      +When this option is defined, it is not possible to specify a blob in +an SQL statement using the X'ABCD' syntax.

      +} +#

      WARNING: The VACUUM command depends on this syntax for vacuuming databases +#that contain blobs, so disabling this functionality may render a database +#unvacuumable. +#

      +#

      TODO: Need a link here - is that syntax documented anywhere?

      +puts { + +

      SQLITE_OMIT_COMPLETE
      +This option causes the +sqlite3_complete API to be omitted. +

      + +

      SQLITE_OMIT_COMPOUND_SELECT
      +This option is used to omit the compound SELECT functionality. +SELECT statements that use the +UNION, UNION ALL, INTERSECT or EXCEPT compound SELECT operators will +cause a parse error. +

      + +

      SQLITE_OMIT_CONFLICT_CLAUSE
      +In the future, this option will be used to omit the +ON CONFLICT clause from the library. +

      + +

      SQLITE_OMIT_DATETIME_FUNCS
      +If this option is defined, SQLite's built-in date and time manipulation +functions are omitted. Specifically, the SQL functions julianday(), date(), +time(), datetime() and strftime() are not available. The default column +values CURRENT_TIME, CURRENT_DATE and CURRENT_DATETIME are still available. +

      + +

      SQLITE_OMIT_EXPLAIN
      +Defining this option causes the EXPLAIN command to be omitted from the +library. Attempting to execute an EXPLAIN statement will cause a parse +error. +

      + +

      SQLITE_OMIT_FLOATING_POINT
      +This option is used to omit floating-point number support from the SQLite +library. When specified, specifying a floating point number as a literal +(i.e. "1.01") results in a parse error. +

      +

      In the future, this option may also disable other floating point +functionality, for example the sqlite3_result_double(), +sqlite3_bind_double(), sqlite3_value_double() and sqlite3_column_double() +API functions. +

      + +

      SQLITE_OMIT_FOREIGN_KEY
      +If this option is defined, FOREIGN KEY clauses in column declarations are +ignored. +

      + +

      SQLITE_OMIT_INTEGRITY_CHECK
      +This option may be used to omit the +"PRAGMA integrity_check" +command from the compiled library. +

      + +

      SQLITE_OMIT_MEMORYDB
      +When this is defined, the library does not respect the special database +name ":memory:" (normally used to create an in-memory database). If +":memory:" is passed to sqlite3_open(), a file with this name will be +opened or created. +

      + +

      SQLITE_OMIT_PAGER_PRAGMAS
      +Defining this option omits pragmas related to the pager subsystem from +the build. Currently, the +default_cache_size and +cache_size pragmas are omitted. +

      + +

      SQLITE_OMIT_PRAGMA
      +This option is used to omit the PRAGMA command +from the library. Note that it is useful to define the macros that omit +specific pragmas in addition to this, as they may also remove supporting code +in other sub-systems. This macro removes the PRAGMA command only. +

      + +

      SQLITE_OMIT_PROGRESS_CALLBACK
      +This option may be defined to omit the capability to issue "progress" +callbacks during long-running SQL statements. The +sqlite3_progress_handler() +API function is not present in the library. + +

      SQLITE_OMIT_REINDEX
      +When this option is defined, the REINDEX +command is not included in the library. Executing a REINDEX statement causes +a parse error. +

      + +

      SQLITE_OMIT_SCHEMA_PRAGMAS
      +Defining this option omits pragmas for querying the database schema from +the build. Currently, the +table_info, +index_info, +index_list and +database_list +pragmas are omitted. +

      + +

      SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS
      +Defining this option omits pragmas for querying and modifying the +database schema version and user version from the build. Specifically, the +schema_version and +user_version +pragmas are omitted. + +

      SQLITE_OMIT_SUBQUERY
      +

      If defined, support for sub-selects and the IN() operator are omitted. +

      + +

      SQLITE_OMIT_TCL_VARIABLE
      +

      If this macro is defined, then the special "$" syntax +used to automatically bind SQL variables to TCL variables is omitted. +

      + +

      SQLITE_OMIT_TRIGGER
      +Defining this option omits support for VIEW objects. Neither the +CREATE TRIGGER or +DROP TRIGGER +commands are available in this case, attempting to execute either will result +in a parse error. +

      +

      +WARNING: If this macro is defined, it will not be possible to open a database +for which the schema contains TRIGGER objects. +

      + +

      SQLITE_OMIT_UTF16
      +This macro is used to omit support for UTF16 text encoding. When this is +defined all API functions that return or accept UTF16 encoded text are +unavailable. These functions can be identified by the fact that they end +with '16', for example sqlite3_prepare16(), sqlite3_column_text16() and +sqlite3_bind_text16(). +

      + +

      SQLITE_OMIT_VACUUM
      +When this option is defined, the VACUUM +command is not included in the library. Executing a VACUUM statement causes +a parse error. +

      + +

      SQLITE_OMIT_VIEW
      +Defining this option omits support for VIEW objects. Neither the +CREATE VIEW or +DROP VIEW +commands are available in this case, attempting to execute either will result +in a parse error. +

      +

      +WARNING: If this macro is defined, it will not be possible to open a database +for which the schema contains VIEW objects. +

      ADDED pages/conflict.in Index: pages/conflict.in ================================================================== --- /dev/null +++ pages/conflict.in @@ -0,0 +1,83 @@ +Constraint Conflict Resolution in SQLite + +

      Constraint Conflict Resolution in SQLite

      + +

      +In most SQL databases, if you have a UNIQUE constraint on +a table and you try to do an UPDATE or INSERT that violates +the constraint, the database will abort the operation in +progress, back out any prior changes associated with +UPDATE or INSERT command, and return an error. +This is the default behavior of SQLite. +Beginning with version 2.3.0, though, SQLite allows you to +define alternative ways for dealing with constraint violations. +This article describes those alternatives and how to use them. +

      + +

      Conflict Resolution Algorithms

      + +

      +SQLite defines five constraint conflict resolution algorithms +as follows: +

      + +
      +
      ROLLBACK
      +

      When a constraint violation occurs, an immediate ROLLBACK +occurs, thus ending the current transaction, and the command aborts +with a return code of SQLITE_CONSTRAINT. If no transaction is +active (other than the implied transaction that is created on every +command) then this algorithm works the same as ABORT.

      + +
      ABORT
      +

      When a constraint violation occurs, the command backs out +any prior changes it might have made and aborts with a return code +of SQLITE_CONSTRAINT. But no ROLLBACK is executed so changes +from prior commands within the same transaction +are preserved. This is the default behavior for SQLite.

      + +
      FAIL
      +

      When a constraint violation occurs, the command aborts with a +return code SQLITE_CONSTRAINT. But any changes to the database that +the command made prior to encountering the constraint violation +are preserved and are not backed out. For example, if an UPDATE +statement encountered a constraint violation on the 100th row that +it attempts to update, then the first 99 row changes are preserved +by change to rows 100 and beyond never occur.

      + +
      IGNORE
      +

      When a constraint violation occurs, the one row that contains +the constraint violation is not inserted or changed. But the command +continues executing normally. Other rows before and after the row that +contained the constraint violation continue to be inserted or updated +normally. No error is returned.

      + +
      REPLACE
      +

      When a UNIQUE constraint violation occurs, the pre-existing row +that caused the constraint violation is removed prior to inserting +or updating the current row. Thus the insert or update always occurs. +The command continues executing normally. No error is returned.

      +
      + +

      Why So Many Choices?

      + +

      SQLite provides multiple conflict resolution algorithms for a +couple of reasons. First, SQLite tries to be roughly compatible with as +many other SQL databases as possible, but different SQL database +engines exhibit different conflict resolution strategies. For +example, PostgreSQL always uses ROLLBACK, Oracle always uses ABORT, and +MySQL usually uses FAIL but can be instructed to use IGNORE or REPLACE. +By supporting all five alternatives, SQLite provides maximum +portability.

      + +

      Another reason for supporting multiple algorithms is that sometimes +it is useful to use an algorithm other than the default. +Suppose, for example, you are +inserting 1000 records into a database, all within a single +transaction, but one of those records is malformed and causes +a constraint error. Under PostgreSQL or Oracle, none of the +1000 records would get inserted. In MySQL, some subset of the +records that appeared before the malformed record would be inserted +but the rest would not. Neither behavior is especially helpful. +What you really want is to use the IGNORE algorithm to insert +all but the malformed record.

      ADDED pages/copyright.in Index: pages/copyright.in ================================================================== --- /dev/null +++ pages/copyright.in @@ -0,0 +1,122 @@ +SQLite Copyright + +

      SQLite Copyright

      + + + +
      +
      +SQLite is in the
      +Public Domain +
      + +

      +All of the deliverable code in SQLite has been dedicated to the +public domain +by the authors. +All code authors, and representatives of the companies they work for, +have signed affidavits dedicating their contributions to +the public domain and originals of +those signed affidavits are stored in a firesafe at the main offices +of Hwaci. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute +the original SQLite code, either in source code form or as a compiled binary, +for any purpose, commercial or non-commercial, and by any means. +

      + +

      +The previous paragraph applies to the deliverable code in SQLite - +those parts of the SQLite library that you actually bundle and +ship with a larger application. Portions of the documentation and +some code used as part of the build process might fall under +other licenses. The details here are unclear. We do not worry +about the licensing of the documentation and build code so much +because none of these things are part of the core deliverable +SQLite library. +

      + +

      +All of the deliverable code in SQLite has been written from scratch. +No code has been taken from other projects or from the open +internet. Every line of code can be traced back to its original +author, and all of those authors have public domain dedications +on file. So the SQLite code base is clean and is +uncontaminated with licensed code from other projects. +

      + +

      Obtaining An Explicit License To Use SQLite

      + +

      +Even though SQLite is in the public domain and does not require +a license, some users want to obtain a license anyway. Some reasons +for obtaining a license include: +

      + +
        +
      • You are using SQLite in a jurisdiction that does not recognize + the public domain.
      • +
      • You are using SQLite in a jurisdiction that does not recognize + the right of an author to dedicate their work to the public + domain.
      • +
      • You want to hold a tangible legal document + as evidence that you have the legal right to use and distribute + SQLite.
      • +
      • Your legal department tells you that you have to purchase a license. +
      • +
      + +

      +If you feel like you really have to purchase a license for SQLite, +Hwaci, the company that employs +the architect and principal developers of SQLite, will sell you +one. +Please contact: +

      + +
      +D. Richard Hipp
      +Hwaci - Applied Software Research
      +704.948.4565
      +drh@hwaci.com +
      + +

      Contributed Code

      + +

      +In order to keep SQLite completely free and unencumbered by copyright, +all new contributors to the SQLite code base are asked to dedicate +their contributions to the public domain. +If you want to send a patch or enhancement for possible inclusion in the +SQLite source tree, please accompany the patch with the following statement: +

      + +
      +The author or authors of this code dedicate any and all copyright interest +in this code to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and successors. +We intend this dedication to be an overt act of relinquishment in +perpetuity of all present and future rights to this code under copyright law. +
      + +

      +We are not able to accept patches or changes to +SQLite that are not accompanied by a statement such as the above. +In addition, if you make +changes or enhancements as an employee, then a simple statement such as the +above is insufficient. You must also send by surface mail a copyright release +signed by a company officer. +A signed original of the copyright release should be mailed to:

      + +
      +Hwaci
      +6200 Maple Cove Lane
      +Charlotte, NC 28269
      +USA +
      + +

      +A template copyright release is available +in PDF or +HTML. +You can use this release to make future changes. +

      ADDED pages/datatype3.in Index: pages/datatype3.in ================================================================== --- /dev/null +++ pages/datatype3.in @@ -0,0 +1,435 @@ +Datatypes In SQLite Version 3 + +

      Datatypes In SQLite Version 3

      + +

      1. Storage Classes

      + +

      Version 2 of SQLite stores all column values as ASCII text. +Version 3 enhances this by providing the ability to store integer and +real numbers in a more compact format and the capability to store +BLOB data.

      + +

      Each value stored in an SQLite database (or manipulated by the +database engine) has one of the following storage classes:

      +
        +
      • NULL. The value is a NULL value.

        +
      • INTEGER. The value is a signed integer, stored in 1, + 2, 3, 4, 6, or 8 bytes depending on the magnitude of the value.

        +
      • REAL. The value is a floating point value, stored as + an 8-byte IEEE floating point number.

        +
      • TEXT. The value is a text string, stored using the + database encoding (UTF-8, UTF-16BE or UTF-16-LE).

        +
      • BLOB. The value is a blob of data, stored exactly as + it was input.

        +
      + +

      As in SQLite version 2, any column in a version 3 database except an INTEGER +PRIMARY KEY may be used to store any type of value. The exception to +this rule is described below under 'Strict Affinity Mode'.

      + +

      All values supplied to SQLite, whether as literals embedded in SQL +statements or values bound to pre-compiled SQL statements +are assigned a storage class before the SQL statement is executed. +Under circumstances described below, the +database engine may convert values between numeric storage classes +(INTEGER and REAL) and TEXT during query execution. +

      + +

      Storage classes are initially assigned as follows:

      +
        +
      • Values specified as literals as part of SQL statements are + assigned storage class TEXT if they are enclosed by single or double + quotes, INTEGER if the literal is specified as an unquoted number + with no decimal point or exponent, REAL if the literal is an + unquoted number with a decimal point or exponent and NULL if the + value is a NULL. Literals with storage class BLOB are specified + using the X'ABCD' notation.

        +
      • Values supplied using the sqlite3_bind_* APIs are assigned + the storage class that most closely matches the native type bound + (i.e. sqlite3_bind_blob() binds a value with storage class BLOB).

        +
      +

      The storage class of a value that is the result of an SQL scalar +operator depends on the outermost operator of the expression. +User-defined functions may return values with any storage class. It +is not generally possible to determine the storage class of the +result of an expression at compile time.

      + + +

      2. Column Affinity

      + +

      +In SQLite version 3, the type of a value is associated with the value +itself, not with the column or variable in which the value is stored. +(This is sometimes called + +manifest typing.) +All other SQL databases engines that we are aware of use the more +restrictive system of static typing where the type is associated with +the container, not the value. +

      + +

      +In order to maximize compatibility between SQLite and other database +engines, SQLite support the concept of "type affinity" on columns. +The type affinity of a column is the recommended type for data stored +in that column. The key here is that the type is recommended, not +required. Any column can still store any type of data, in theory. +It is just that some columns, given the choice, will prefer to use +one storage class over another. The preferred storage class for +a column is called its "affinity". +

      + +

      Each column in an SQLite 3 database is assigned one of the +following type affinities:

      +
        +
      • TEXT
      • +
      • NUMERIC
      • +
      • INTEGER
      • +
      • REAL
      • +
      • NONE
      • +
      + +

      A column with TEXT affinity stores all data using storage classes +NULL, TEXT or BLOB. If numerical data is inserted into a column with +TEXT affinity it is converted to text form before being stored.

      + +

      A column with NUMERIC affinity may contain values using all five +storage classes. When text data is inserted into a NUMERIC column, an +attempt is made to convert it to an integer or real number before it +is stored. If the conversion is successful, then the value is stored +using the INTEGER or REAL storage class. If the conversion cannot be +performed the value is stored using the TEXT storage class. No +attempt is made to convert NULL or blob values.

      + +

      A column that uses INTEGER affinity behaves in the same way as a +column with NUMERIC affinity, except that if a real value with no +floating point component (or text value that converts to such) is +inserted it is converted to an integer and stored using the INTEGER +storage class.

      + +

      A column with REAL affinity behaves like a column with NUMERIC +affinity except that it forces integer values into floating point +representation. (As an optimization, integer values are stored on +disk as integers in order to take up less space and are only converted +to floating point as the value is read out of the table.)

      + +

      A column with affinity NONE does not prefer one storage class over +another. It makes no attempt to coerce data before +it is inserted.

      + +

      2.1 Determination Of Column Affinity

      + +

      The type affinity of a column is determined by the declared type +of the column, according to the following rules:

      +
        +
      1. If the datatype contains the string "INT" then it + is assigned INTEGER affinity.

        + +
      2. If the datatype of the column contains any of the strings + "CHAR", "CLOB", or "TEXT" then that + column has TEXT affinity. Notice that the type VARCHAR contains the + string "CHAR" and is thus assigned TEXT affinity.

        + +
      3. If the datatype for a column + contains the string "BLOB" or if + no datatype is specified then the column has affinity NONE.

        + +
      4. If the datatype for a column + contains any of the strings "REAL", "FLOA", + or "DOUB" then the column has REAL affinity

        + +
      5. Otherwise, the affinity is NUMERIC.

        +
      + +

      If a table is created using a "CREATE TABLE <table> AS +SELECT..." statement, then all columns have no datatype specified +and they are given no affinity.

      + +

      2.2 Column Affinity Example

      + +
      +
      CREATE TABLE t1(
      +    t  TEXT,
      +    nu NUMERIC, 
      +    i  INTEGER,
      +    no BLOB
      +);
      +
      +-- Storage classes for the following row:
      +-- TEXT, REAL, INTEGER, TEXT
      +INSERT INTO t1 VALUES('500.0', '500.0', '500.0', '500.0');
      +
      +-- Storage classes for the following row:
      +-- TEXT, REAL, INTEGER, REAL
      +INSERT INTO t1 VALUES(500.0, 500.0, 500.0, 500.0);
      +
      +
      + + +

      3. Comparison Expressions

      + +

      Like SQLite version 2, version 3 +features the binary comparison operators '=', +'<', '<=', '>=' and '!=', an operation to test for set +membership, 'IN', and the ternary comparison operator 'BETWEEN'.

      +

      The results of a comparison depend on the storage classes of the +two values being compared, according to the following rules:

      +
        +
      • A value with storage class NULL is considered less than any + other value (including another value with storage class NULL).

        + +
      • An INTEGER or REAL value is less than any TEXT or BLOB value. + When an INTEGER or REAL is compared to another INTEGER or REAL, a + numerical comparison is performed.

        + +
      • A TEXT value is less than a BLOB value. When two TEXT values + are compared, the C library function memcmp() is usually used to + determine the result. However this can be overridden, as described + under 'User-defined collation Sequences' below.

        + +
      • When two BLOB values are compared, the result is always + determined using memcmp().

        +
      + +

      SQLite may attempt to convert values between the numeric storage +classes (INTEGER and REAL) and TEXT before performing a comparison. +For binary comparisons, this is done in the cases enumerated below. +The term "expression" used in the bullet points below means any +SQL scalar expression or literal other than a column value. Note that +if X and Y.Z are a column names, then +X and +Y.Z are considered +expressions.

      +
        +
      • When a column value is compared to the result of an + expression, the affinity of the column is applied to the result of + the expression before the comparison takes place.

        + +
      • When two column values are compared, if one column has + INTEGER or REAL or NUMERIC affinity and the other does not, + then NUMERIC affinity is applied to any values with storage + class TEXT extracted from the non-NUMERIC column.

        + +
      • When the results of two expressions are compared, no + conversions occur. The results are compared as is. If a string + is compared to a number, the number will always be less than the + string.

        +
      + +

      +In SQLite, the expression "a BETWEEN b AND c" is equivalent to "a >= b +AND a <= c", even if this means that different affinities are applied to +'a' in each of the comparisons required to evaluate the expression. +

      + +

      Expressions of the type "a IN (SELECT b ....)" are handled by the three +rules enumerated above for binary comparisons (e.g. in a +similar manner to "a = b"). For example if 'b' is a column value +and 'a' is an expression, then the affinity of 'b' is applied to 'a' +before any comparisons take place.

      + +

      SQLite treats the expression "a IN (x, y, z)" as equivalent to "a = +x OR +a = +y OR a = +z". The values to the right of the IN operator (the "x", "y", +and "z" values in this example) are considered to be expressions, even if they +happen to be column values. If the value of the left of the IN operator is +a column, then the affinity of that column is used. If the value is an +expression then no conversions occur. +

      + +

      3.1 Comparison Example

      + +
      +
      +CREATE TABLE t1(
      +    a TEXT,
      +    b NUMERIC,
      +    c BLOB
      +);
      +
      +-- Storage classes for the following row:
      +-- TEXT, REAL, TEXT
      +INSERT INTO t1 VALUES('500', '500', '500');
      +
      +-- 60 and 40 are converted to '60' and '40' and values are compared as TEXT.
      +SELECT a < 60, a < 40 FROM t1;
      +1|0
      +
      +-- Comparisons are numeric. No conversions are required.
      +SELECT b < 60, b < 600 FROM t1;
      +0|1
      +
      +-- Both 60 and 600 (storage class NUMERIC) are less than '500'
      +-- (storage class TEXT).
      +SELECT c < 60, c < 600 FROM t1;
      +0|0
      +
      +
      +

      4. Operators

      + +

      All mathematical operators (which is to say, all operators other +than the concatenation operator "||") apply NUMERIC +affinity to all operands prior to being carried out. If one or both +operands cannot be converted to NUMERIC then the result of the +operation is NULL.

      + +

      For the concatenation operator, TEXT affinity is applied to both +operands. If either operand cannot be converted to TEXT (because it +is NULL or a BLOB) then the result of the concatenation is NULL.

      + +

      5. Sorting, Grouping and Compound SELECTs

      + +

      When values are sorted by an ORDER by clause, values with storage +class NULL come first, followed by INTEGER and REAL values +interspersed in numeric order, followed by TEXT values usually in +memcmp() order, and finally BLOB values in memcmp() order. No storage +class conversions occur before the sort.

      + +

      When grouping values with the GROUP BY clause values with +different storage classes are considered distinct, except for INTEGER +and REAL values which are considered equal if they are numerically +equal. No affinities are applied to any values as the result of a +GROUP by clause.

      + +

      The compound SELECT operators UNION, +INTERSECT and EXCEPT perform implicit comparisons between values. +Before these comparisons are performed an affinity may be applied to +each value. The same affinity, if any, is applied to all values that +may be returned in a single column of the compound SELECT result set. +The affinity applied is the affinity of the column returned by the +left most component SELECTs that has a column value (and not some +other kind of expression) in that position. If for a given compound +SELECT column none of the component SELECTs return a column value, no +affinity is applied to the values from that column before they are +compared.

      + +

      6. Other Affinity Modes

      + +

      The above sections describe the operation of the database engine +in 'normal' affinity mode. SQLite version 3 will feature two other affinity +modes, as follows:

      +
        +
      • Strict affinity mode. In this mode if a conversion + between storage classes is ever required, the database engine + returns an error and the current statement is rolled back.

        + +
      • No affinity mode. In this mode no conversions between + storage classes are ever performed. Comparisons between values of + different storage classes (except for INTEGER and REAL) are always + false.

        +
      + + +

      7. User-defined Collation Sequences

      + +

      +By default, when SQLite compares two text values, the result of the +comparison is determined using memcmp(), regardless of the encoding of the +string. SQLite v3 provides the ability for users to supply arbitrary +comparison functions, known as user-defined collation sequences, to be used +instead of memcmp(). +

      +

      +Aside from the default collation sequence BINARY, implemented using +memcmp(), SQLite features one extra built-in collation sequences +intended for testing purposes, the NOCASE collation: +

      +
        +
      • BINARY - Compares string data using memcmp(), regardless + of text encoding.
      • +
      • NOCASE - The same as binary, except the 26 upper case + characters used by the English language are + folded to their lower case equivalents before + the comparison is performed.
      + + +

      7.1 Assigning Collation Sequences from SQL

      + +

      +Each column of each table has a default collation type. If a collation type +other than BINARY is required, a COLLATE clause is specified as part of the +column definition to define it. +

      + +

      +Whenever two text values are compared by SQLite, a collation sequence is +used to determine the results of the comparison according to the following +rules. Sections 3 and 5 of this document describe the circumstances under +which such a comparison takes place. +

      + +

      +For binary comparison operators (=, <, >, <= and >=) if either operand is a +column, then the default collation type of the column determines the +collation sequence to use for the comparison. If both operands are columns, +then the collation type for the left operand determines the collation +sequence used. If neither operand is a column, then the BINARY collation +sequence is used. For the purposes of this paragraph, a column name +preceded by one or more unary "+" operators is considered a column name. +

      + +

      +The expression "x BETWEEN y and z" is equivalent to "x >= y AND x <= +z". The expression "x IN (SELECT y ...)" is handled in the same way as the +expression "x = y" for the purposes of determining the collation sequence +to use. The collation sequence used for expressions of the form "x IN (y, z +...)" is the default collation type of x if x is a column, or BINARY +otherwise. +

      + +

      +An ORDER BY clause that is part of a SELECT +statement may be assigned a collation sequence to be used for the sort +operation explicitly. In this case the explicit collation sequence is +always used. Otherwise, if the expression sorted by an ORDER BY clause is +a column, then the default collation type of the column is used to +determine sort order. If the expression is not a column, then the BINARY +collation sequence is used. +

      + +

      7.2 Collation Sequences Example

      +

      +The examples below identify the collation sequences that would be used to +determine the results of text comparisons that may be performed by various +SQL statements. Note that a text comparison may not be required, and no +collation sequence used, in the case of numeric, blob or NULL values. +

      +
      +
      +CREATE TABLE t1(
      +    a,                 -- default collation type BINARY
      +    b COLLATE BINARY,  -- default collation type BINARY
      +    c COLLATE REVERSE, -- default collation type REVERSE
      +    d COLLATE NOCASE   -- default collation type NOCASE
      +);
      +
      +-- Text comparison is performed using the BINARY collation sequence.
      +SELECT (a = b) FROM t1;
      +
      +-- Text comparison is performed using the NOCASE collation sequence.
      +SELECT (d = a) FROM t1;
      +
      +-- Text comparison is performed using the BINARY collation sequence.
      +SELECT (a = d) FROM t1;
      +
      +-- Text comparison is performed using the REVERSE collation sequence.
      +SELECT ('abc' = c) FROM t1;
      +
      +-- Text comparison is performed using the REVERSE collation sequence.
      +SELECT (c = 'abc') FROM t1;
      +
      +-- Grouping is performed using the NOCASE collation sequence (i.e. values
      +-- 'abc' and 'ABC' are placed in the same group).
      +SELECT count(*) GROUP BY d FROM t1;
      +
      +-- Grouping is performed using the BINARY collation sequence.
      +SELECT count(*) GROUP BY (d || '') FROM t1;
      +
      +-- Sorting is performed using the REVERSE collation sequence.
      +SELECT * FROM t1 ORDER BY c;
      +
      +-- Sorting is performed using the BINARY collation sequence.
      +SELECT * FROM t1 ORDER BY (c || '');
      +
      +-- Sorting is performed using the NOCASE collation sequence.
      +SELECT * FROM t1 ORDER BY c COLLATE NOCASE;
      +
      +
      +
      ADDED pages/datatypes.in Index: pages/datatypes.in ================================================================== --- /dev/null +++ pages/datatypes.in @@ -0,0 +1,236 @@ +Datatypes In SQLite version 2 + +

      Datatypes In SQLite Version 2

      + +

      1.0   Typelessness

      +

      +SQLite is "typeless". This means that you can store any +kind of data you want in any column of any table, regardless of the +declared datatype of that column. +(See the one exception to this rule in section 2.0 below.) +This behavior is a feature, not +a bug. A database is suppose to store and retrieve data and it +should not matter to the database what format that data is in. +The strong typing system found in most other SQL engines and +codified in the SQL language spec is a misfeature - +it is an example of the implementation showing through into the +interface. SQLite seeks to overcome this misfeature by allowing +you to store any kind of data into any kind of column and by +allowing flexibility in the specification of datatypes. +

      + +

      +A datatype to SQLite is any sequence of zero or more names +optionally followed by a parenthesized lists of one or two +signed integers. Notice in particular that a datatype may +be zero or more names. That means that an empty +string is a valid datatype as far as SQLite is concerned. +So you can declare tables where the datatype of each column +is left unspecified, like this: +

      + +
      +CREATE TABLE ex1(a,b,c);
      +
      + +

      +Even though SQLite allows the datatype to be omitted, it is +still a good idea to include it in your CREATE TABLE statements, +since the data type often serves as a good hint to other +programmers about what you intend to put in the column. And +if you ever port your code to another database engine, that +other engine will probably require a datatype of some kind. +SQLite accepts all the usual datatypes. For example: +

      + +
      +CREATE TABLE ex2(
      +  a VARCHAR(10),
      +  b NVARCHAR(15),
      +  c TEXT,
      +  d INTEGER,
      +  e FLOAT,
      +  f BOOLEAN,
      +  g CLOB,
      +  h BLOB,
      +  i TIMESTAMP,
      +  j NUMERIC(10,5)
      +  k VARYING CHARACTER (24),
      +  l NATIONAL VARYING CHARACTER(16)
      +);
      +
      + +

      +And so forth. Basically any sequence of names optionally followed by +one or two signed integers in parentheses will do. +

      + +

      2.0   The INTEGER PRIMARY KEY

      + +

      +One exception to the typelessness of SQLite is a column whose type +is INTEGER PRIMARY KEY. (And you must use "INTEGER" not "INT". +A column of type INT PRIMARY KEY is typeless just like any other.) +INTEGER PRIMARY KEY columns must contain a 32-bit signed integer. Any +attempt to insert non-integer data will result in an error. +

      + +

      +INTEGER PRIMARY KEY columns can be used to implement the equivalent +of AUTOINCREMENT. If you try to insert a NULL into an INTEGER PRIMARY +KEY column, the column will actually be filled with a integer that is +one greater than the largest key already in the table. Or if the +largest key is 2147483647, then the column will be filled with a +random integer. Either way, the INTEGER PRIMARY KEY column will be +assigned a unique integer. You can retrieve this integer using +the sqlite_last_insert_rowid() API function or using the +last_insert_rowid() SQL function in a subsequent SELECT statement. +

      + +

      3.0   Comparison and Sort Order

      + +

      +SQLite is typeless for the purpose of deciding what data is allowed +to be stored in a column. But some notion of type comes into play +when sorting and comparing data. For these purposes, a column or +an expression can be one of two types: numeric and text. +The sort or comparison may give different results depending on which +type of data is being sorted or compared. +

      + +

      +If data is of type text then the comparison is determined by +the standard C data comparison functions memcmp() or +strcmp(). The comparison looks at bytes from two inputs one +by one and returns the first non-zero difference. +Strings are '\000' terminated so shorter +strings sort before longer strings, as you would expect. +

      + +

      +For numeric data, this situation is more complex. If both inputs +look like well-formed numbers, then they are converted +into floating point values using atof() and compared numerically. +If one input is not a well-formed number but the other is, then the +number is considered to be less than the non-number. If neither inputs +is a well-formed number, then strcmp() is used to do the +comparison. +

      + +

      +Do not be confused by the fact that a column might have a "numeric" +datatype. This does not mean that the column can contain only numbers. +It merely means that if the column does contain a number, that number +will sort in numerical order. +

      + +

      +For both text and numeric values, NULL sorts before any other value. +A comparison of any value against NULL using operators like "<" or +">=" is always false. +

      + +

      4.0   How SQLite Determines Datatypes

      + +

      +For SQLite version 2.6.3 and earlier, all values used the numeric datatype. +The text datatype appears in version 2.7.0 and later. In the sequel it +is assumed that you are using version 2.7.0 or later of SQLite. +

      + +

      +For an expression, the datatype of the result is often determined by +the outermost operator. For example, arithmetic operators ("+", "*", "%") +always return a numeric results. The string concatenation operator +("||") returns a text result. And so forth. If you are ever in doubt +about the datatype of an expression you can use the special typeof() +SQL function to determine what the datatype is. For example: +

      + +
      +sqlite> SELECT typeof('abc'+123);
      +numeric
      +sqlite> SELECT typeof('abc'||123);
      +text
      +
      + +

      +For table columns, the datatype is determined by the type declaration +of the CREATE TABLE statement. The datatype is text if and only if +the type declaration contains one or more of the following strings: +

      + +
      +BLOB
      +CHAR
      +CLOB
      +TEXT +
      + +

      +The search for these strings in the type declaration is case insensitive, +of course. If any of the above strings occur anywhere in the type +declaration, then the datatype of the column is text. Notice that +the type "VARCHAR" contains "CHAR" as a substring so it is considered +text.

      + +

      If none of the strings above occur anywhere in the type declaration, +then the datatype is numeric. Note in particular that the datatype for columns +with an empty type declaration is numeric. +

      + +

      5.0   Examples

      + +

      +Consider the following two command sequences: +

      + +
      +CREATE TABLE t1(a INTEGER UNIQUE);        CREATE TABLE t2(b TEXT UNIQUE);
      +INSERT INTO t1 VALUES('0');               INSERT INTO t2 VALUES(0);
      +INSERT INTO t1 VALUES('0.0');             INSERT INTO t2 VALUES(0.0);
      +
      + +

      In the sequence on the left, the second insert will fail. In this case, +the strings '0' and '0.0' are treated as numbers since they are being +inserted into a numeric column but 0==0.0 which violates the uniqueness +constraint. However, the second insert in the right-hand sequence works. In +this case, the constants 0 and 0.0 are treated a strings which means that +they are distinct.

      + +

      SQLite always converts numbers into double-precision (64-bit) floats +for comparison purposes. This means that a long sequence of digits that +differ only in insignificant digits will compare equal if they +are in a numeric column but will compare unequal if they are in a text +column. We have:

      + +
      +INSERT INTO t1                            INSERT INTO t2
      +   VALUES('12345678901234567890');           VALUES(12345678901234567890);
      +INSERT INTO t1                            INSERT INTO t2
      +   VALUES('12345678901234567891');           VALUES(12345678901234567891);
      +
      + +

      As before, the second insert on the left will fail because the comparison +will convert both strings into floating-point number first and the only +difference in the strings is in the 20-th digit which exceeds the resolution +of a 64-bit float. In contrast, the second insert on the right will work +because in that case, the numbers being inserted are strings and are +compared using memcmp().

      + +

      +Numeric and text types make a difference for the DISTINCT keyword too: +

      + +
      +CREATE TABLE t3(a INTEGER);               CREATE TABLE t4(b TEXT);
      +INSERT INTO t3 VALUES('0');               INSERT INTO t4 VALUES(0);
      +INSERT INTO t3 VALUES('0.0');             INSERT INTO t4 VALUES(0.0);
      +SELECT DISTINCT * FROM t3;                SELECT DISTINCT * FROM t4;
      +
      + +

      +The SELECT statement on the left returns a single row since '0' and '0.0' +are treated as numbers and are therefore indistinct. But the SELECT +statement on the right returns two rows since 0 and 0.0 are treated +a strings which are different.

      ADDED pages/different.in Index: pages/different.in ================================================================== --- /dev/null +++ pages/different.in @@ -0,0 +1,222 @@ +Distinctive Features Of SQLite + +

      +This page highlights some of the characteristics of SQLite that are +unusual and which make SQLite different from many other SQL +database engines. +

      + + +proc feature {tag name text} { + puts "" + puts "

      $name

      \n" + puts "
      $text
      \n" +} + +feature zeroconfig {Zero-Configuration} { + SQLite does not need to be "installed" before it is used. + There is no "setup" procedure. There is no + server process that needs to be started, stopped, or configured. + There is + no need for an administrator to create a new database instance or assign + access permissions to users. + SQLite uses no configuration files. + Nothing needs to be done to tell the system that SQLite is running. + No actions are required to recover after a system crash or power failure. + There is nothing to troubleshoot. +

      + SQLite just works. +

      + Other more familiar database engines run great once you get them going. + But doing the initial installation and configuration can be + intimidatingly complex. +} + +feature serverless {Serverless} { + Most SQL database engines are implemented as a separate server + process. Programs that want to access the database communicate + with the server using some kind of interprocess communcation + (typically TCP/IP) to send requests to the server and to receive + back results. SQLite does not work this way. With SQLite, the + process that wants to access the database reads and writes + directly from the database files on disk. There is no intermediary + server process. +

      + There are advantages and disadvantages to being serverless. The + main advantage is that there is no separate server process + to install, setup, configure, initialize, manage, and troubleshoot. + This is one reason why SQLite is a "zero-configuration" database + engine. Programs that use SQLite require no administrative support + for setting up the database engine before they are run. Any program + that is able to access the disk is able to use an SQLite database. +

      + On the other hand, a database engine that uses a server can provide + better protection from bugs in the client application - stray pointers + in a client cannot corrupt memory on the server. And because a server + is a single persistent process, it is able control database access with + more precision, allowing for finer grain locking and better concurrancy. +

      + Most SQL database engines are client/server based. Of those that are + serverless, SQLite is the only one that this author knows of that + allows multiple applications to access the same database at the same time. +} + +feature onefile {Single Database File} { + An SQLite database is a single ordinary disk file that can be located + anywhere in the directory hierarchy. If SQLite can read + the disk file then it can read anything in the database. If the disk + file and its directory are writable, then SQLite can change anything + in the database. Database files can easily be copied onto a USB + memory stick or emailed for sharing. +

      + Other SQL database engines tend to store data as a large collection of + files. Often these files are in a standard location that only the + database engine itself can access. This makes the data more secure, + but also makes it harder to access. Some SQL database engines provide + the option of writing directly to disk and bypassing the filesystem + all together. This provides added performance, but at the cost of + considerable setup and maintenance complexity. +} + +feature small {Compact} { + When optimized for size, the whole SQLite library with everything enabled + is less than 225KiB in size (as measured on an ix86 using the "size" + utility from the GNU compiler suite.) Unneeded features can be disabled + at compile-time to further reduce the size of the library to under + 170KiB if desired. +

      + Most other SQL database engines are much larger than this. IBM boasts + that it's recently released CloudScape database engine is "only" a 2MiB + jar file - 10 times larger than SQLite even after it is compressed! + Firebird boasts that it's client-side library is only 350KiB. That's + 50% larger than SQLite and does not even contain the database engine. + The Berkeley DB library from Sleepycat is 450KiB and it omits SQL + support, providing the programmer with only simple key/value pairs. +} + +feature typing {Manifest typing} { + Most SQL database engines use static typing. A datatype is associated + with each column in a table and only values of that particular datatype + are allowed to be stored in that column. SQLite relaxes this restriction + by using manifest typing. + In manifest typing, the datatype is a property of the value itself, not + of the column in which the value is stored. + SQLite thus allows the user to store + any value of any datatype into any column regardless of the declared type + of that column. (There are some exceptions to this rule: An INTEGER + PRIMARY KEY column may only store integers. And SQLite attempts to coerce + values into the declared datatype of the column when it can.) +

      + As far as we can tell, the SQL language specification allows the use + of manifest typing. Nevertheless, most other SQL database engines are + statically typed and so some people + feel that the use of manifest typing is a bug in SQLite. But the authors + of SQLite feel very strongly that this is a feature. The use of manifest + typing in SQLite is a deliberate design decision which has proven in practice + to make SQLite more reliable and easier to use, especially when used in + combination with dynamically typed programming languages such as Tcl and + Python. +} + +feature flex {Variable-length records} { + Most other SQL database engines allocated a fixed amount of disk space + for each row in most tables. They play special tricks for handling + BLOBs and CLOBs which can be of wildly varying length. But for most + tables, if you declare a column to be a VARCHAR(100) then the database + engine will allocate + 100 bytes of disk space regardless of how much information you actually + store in that column. +

      + SQLite, in contrast, use only the amount of disk space actually + needed to store the information in a row. If you store a single + character in a VARCHAR(100) column, then only a single byte of disk + space is consumed. (Actually two bytes - there is some overhead at + the beginning of each column to record its datatype and length.) +

      + The use of variable-length records by SQLite has a number of advantages. + It results in smaller database files, obviously. It also makes the + database run faster, since there is less information to move to and from + disk. And, the use of variable-length records makes it possible for + SQLite to employ manifest typing instead of static typing. +} + +feature readable {Readable source code} { + The source code to SQLite is designed to be readable and accessible to + the average programmer. All procedures and data structures and many + automatic variables are carefully commented with useful information about + what they do. Boilerplate commenting is omitted. +} + +feature vdbe {SQL statements compile into virtual machine code} { + Every SQL database engine compiles each SQL statement into some kind of + internal data structure which is then used to carry out the work of the + statement. But in most SQL engines that internal data structure is a + complex web of interlinked structures and objects. In SQLite, the compiled + form of statements is a short program in a machine-language like + representation. Users of the database can view this + virtual machine language + by prepending the EXPLAIN keyword + to a query. +

      + The use of a virtual machine in SQLite has been a great benefit to + library's development. The virtual machine provides a crisp, well-defined + junction between the front-end of SQLite (the part that parses SQL + statements and generates virtual machine code) and the back-end (the + part that executes the virtual machine code and computes a result.) + The virtual machine allows the developers to see clearly and in an + easily readable form what SQLite is trying to do with each statement + it compiles, which is a tremendous help in debugging. + Depending on how it is compiled, SQLite also has the capability of + tracing the execution of the virtual machine - printing each + virtual machine instruction and its result as it executes. +} + +#feature binding {Tight bindings to dynamic languages} { +# Because it is embedded, SQLite can have a much tighter and more natural +# binding to high-level dynamic languages such as Tcl, Perl, Python, +# PHP, and Ruby. +# For example, +#} + +feature license {Public domain} { + The source code for SQLite is in the public domain. No claim of copyright + is made on any part of the core source code. (The documentation and test + code is a different matter - some sections of documentation and test logic + are governed by open-sources licenses.) All contributors to the + SQLite core software have signed affidavits specifically disavowing any + copyright interest in the code. This means that anybody is able to legally + do anything they want with the SQLite source code. +

      + There are other SQL database engines with liberal licenses that allow + the code to be broadly and freely used. But those other engines are + still governed by copyright law. SQLite is different in that copyright + law simply does not apply. +

      + The source code files for other SQL database engines typically begin + with a comment describing your license rights to view and copy that file. + The SQLite source code contains no license since it is not governed by + copyright. Instead of a license, the SQLite source code offers a blessing: +

      + May you do good and not evil
      + May you find forgiveness for yourself and forgive others
      + May you share freely, never taking more than you give.
      +
      +} + +feature extensions {SQL language extensions} { + SQLite provides a number of enhancements to the SQL language + not normally found in other database engines. + The EXPLAIN keyword and manifest typing have already been mentioned + above. SQLite also provides statements such as + REPLACE and the + ON CONFLICT clause that allow for + added control over the resolution of constraint conflicts. + SQLite supports ATTACH and + DETACH commands that allow multiple + independent databases to be used together in the same query. + And SQLite defines APIs that allows the user to add new + SQL functions + and collating sequences. +} + +
      ADDED pages/docs.in Index: pages/docs.in ================================================================== --- /dev/null +++ pages/docs.in @@ -0,0 +1,153 @@ +SQLite Documentation + +

      Available Documentation

      + + + +proc doc {name url desc} { + puts {" + puts {} + puts {} +} + +doc {Appropriate Uses For SQLite} {whentouse.html} { + This document describes situations where SQLite is an approriate + database engine to use versus situations where a client/server + database engine might be a better choice. +} + +doc {Distinctive Features} {different.html} { + This document enumerates and describes some of the features of + SQLite that make it different from other SQL database engines. +} + +doc {SQLite In 5 Minutes Or Less} {quickstart.html} { + A very quick introduction to programming with SQLite. +} + +doc {SQL Syntax} {lang.html} { + This document describes the SQL language that is understood by + SQLite. +} +doc {Version 3 C/C++ API
      Reference} {capi3ref.html} { + This document describes each API function separately. +} +doc {Sharing Cache Mode} {sharedcache.html} { + Version 3.3.0 and later supports the ability for two or more + database connections to share the same page and schema cache. + This feature is useful for certain specialized applications. +} +doc {Tcl API} {tclsqlite.html} { + A description of the TCL interface bindings for SQLite. +} + +doc {How SQLite Implements Atomic Commit} {ac/atomiccommit.html} { + A description of the logic within SQLite that implements + transactions with atomic commit, even in the face of power + failures. +} +doc {Moving From SQLite 3.4 to 3.5} {34to35.html} { + A document describing the differences between SQLite version 3.4.2 + and 3.5.0. +} + +doc {Pragma commands} {pragma.html} { + This document describes SQLite performance tuning options and other + special purpose database commands. +} +doc {SQLite Version 3} {version3.html} { + A summary of of the changes between SQLite version 2.8 and SQLite version 3.0. +} +doc {Version 3 C/C++ API} {capi3.html} { + A description of the C/C++ interface bindings for SQLite version 3.0.0 + and following. +} +doc {Version 3 DataTypes } {datatype3.html} { + SQLite version 3 introduces the concept of manifest typing, where the + type of a value is associated with the value itself, not the column that + it is stored in. + This page describes data typing for SQLite version 3 in further detail. +} + +doc {Locking And Concurrency
      In SQLite Version 3} {lockingv3.html} { + A description of how the new locking code in version 3 increases + concurrancy and decreases the problem of writer starvation. +} + +doc {Overview Of The Optimizer} {optoverview.html} { + A quick overview of the various query optimizations that are + attempted by the SQLite code generator. +} + + +doc {Null Handling} {nulls.html} { + Different SQL database engines handle NULLs in different ways. The + SQL standards are ambiguous. This document describes how SQLite handles + NULLs in comparison with other SQL database engines. +} + +doc {Copyright} {copyright.html} { + SQLite is in the public domain. This document describes what that means + and the implications for contributors. +} + +doc {Unsupported SQL} {omitted.html} { + This page describes features of SQL that SQLite does not support. +} + +doc {Version 2 C/C++ API} {c_interface.html} { + A description of the C/C++ interface bindings for SQLite through version + 2.8 +} + + +doc {Version 2 DataTypes } {datatypes.html} { + A description of how SQLite version 2 handles SQL datatypes. + Short summary: Everything is a string. +} + +doc {Release History} {changes.html} { + A chronology of SQLite releases going back to version 1.0.0 +} + + +doc {Speed Comparison} {speed.html} { + The speed of version 2.7.6 of SQLite is compared against PostgreSQL and + MySQL. +} + +doc {Architecture} {arch.html} { + An architectural overview of the SQLite library, useful for those who want + to hack the code. +} + +doc {VDBE Tutorial} {vdbe.html} { + The VDBE is the subsystem within SQLite that does the actual work of + executing SQL statements. This page describes the principles of operation + for the VDBE in SQLite version 2.7. This is essential reading for anyone + who want to modify the SQLite sources. +} + +doc {VDBE Opcodes} {opcode.html} { + This document is an automatically generated description of the various + opcodes that the VDBE understands. Programmers can use this document as + a reference to better understand the output of EXPLAIN listings from + SQLite. +} + +doc {Compilation Options} {compile.html} { + This document describes the compile time options that may be set to + modify the default behaviour of the library or omit optional features + in order to reduce binary size. +} + +doc {Backwards Compatibility} {formatchng.html} { + This document details all of the incompatible changes to the SQLite + file format that have occurred since version 1.0.0. +} + +
      } + regsub -all { +} $name {\ } name + puts "$name} + puts $desc + puts {
      ADDED pages/download.in Index: pages/download.in ================================================================== --- /dev/null +++ pages/download.in @@ -0,0 +1,223 @@ +SQLite Download Page + +

      SQLite Download Page

      + + + +proc Product {pattern desc} { + regsub {V[23]} $pattern {*} p3 + regsub V2 $pattern {(2[0-9a-z._]+)} pattern + regsub V3 $pattern {(3[0-9a-z._]+)} pattern + set p2 [string map {* .*} $pattern] + set flist [glob -nocomplain $p3] + foreach file [lsort -dict $flist] { + if {![regexp ^$p2\$ $file all version]} continue + regsub -all _ $version . version + set size [file size $file] + set units bytes + if {$size>1024*1024} { + set size [format %.2f [expr {$size/(1024.0*1024.0)}]] + set units MiB + } elseif {$size>1024} { + set size [format %.2f [expr {$size/(1024.0)}]] + set units KiB + } + puts "" + puts "" + puts "" + regsub -all VERSION $desc $version d2 + puts "" + } +} +cd $::DEST + +proc Heading {title} { + puts "" +} + +Heading {Precompiled Binaries for Linux} + +Product sqlite3-V3.bin.gz { + A command-line program for accessing and modifying + SQLite version 3.* databases. + See the documentation for additional information. +} + +Product sqlite-V3.bin.gz { + A command-line program for accessing and modifying + SQLite databases. + See the documentation for additional information. +} + +Product tclsqlite-V3.so.gz { + Bindings for Tcl/Tk. + You can import this shared library into either + tclsh or wish to get SQLite database access from Tcl/Tk. + See the documentation for details. +} + +Product sqlite-V3.so.gz { + A precompiled shared-library for Linux without the TCL bindings. +} + +Product fts1-V3.so.gz { + A precompiled + FTS1 Module + for Linux. +} + +Product fts2-V3.so.gz { + A precompiled + FTS2 Module + for Linux. +} + +Product sqlite-devel-V3.i386.rpm { + RPM containing documentation, header files, and static library for + SQLite version VERSION. +} +Product sqlite-V3-1.i386.rpm { + RPM containing shared libraries and the sqlite command-line + program for SQLite version VERSION. +} + +Product sqlite*_analyzer-V3.bin.gz { + An analysis program for database files compatible with SQLite + version VERSION and later. +} + +Heading {Precompiled Binaries For Windows} + +Product sqlite-V3.zip { + A command-line program for accessing and modifing SQLite databases. + See the documentation for additional information. +} +Product tclsqlite-V3.zip { + Bindings for Tcl/Tk. + You can import this shared library into either + tclsh or wish to get SQLite database access from Tcl/Tk. + See the documentation for details. +} +Product sqlitedll-V3.zip { + This is a DLL of the SQLite library without the TCL bindings. + The only external dependency is MSVCRT.DLL. +} + +Product fts1dll-V3.zip { + A precompiled + FTS1 Module + for win32. +} + +Product fts2dll-V3.zip { + A precompiled + FTS2 Module + for win32. +} + +Product sqlite*_analyzer-V3.zip { + An analysis program for database files compatible with SQLite version + VERSION and later. +} + + +Heading {Source Code} + +Product {sqlite-V3.tar.gz} { + A tarball of the complete source tree for SQLite version VERSION + including all of the documentation. +} + +Product {sqlite-source-V3.zip} { + This ZIP archive contains preprocessed C code for the SQLite library as + individual source files. + Unlike the tarballs below, all of the preprocessing and automatic + code generation has already been done on these C code files, so they + can be converted to object code directly with any ordinary C compiler. +} + +Product {sqlite-amalgamation-V3.zip} { + This ZIP archive contains all preprocessed C code combined into a + single source file (the + + amalgamation). +} + +Product {sqlite-V3-tea.tar.gz} { + A tarball of proprocessed source code together with a + Tcl Extension Architecture (TEA) + compatible configure script and makefile. +} + +Product {sqlite-V3.src.rpm} { + An RPM containing complete source code for SQLite version VERSION +} + +Heading {Cross-Platform Binaries} + +Product {sqlite-V3.kit} { + A starkit containing + precompiled SQLite binaries and Tcl bindings for Linux-x86, Windows, + and Mac OS-X ppc and x86. +} + +Heading {Historical Binaries And Source Code} + +Product sqlite-V2.bin.gz { + A command-line program for accessing and modifying + SQLite version 2.* databases on Linux-x86. +} +Product sqlite-V2.zip { + A command-line program for accessing and modifying + SQLite version 2.* databases on win32. +} + +Product sqlite*_analyzer-V2.bin.gz { + An analysis program for version 2.* database files on Linux-x86 +} +Product sqlite*_analyzer-V2.zip { + An analysis program for version 2.* database files on win32. +} +Product {sqlite-source-V2.zip} { + This ZIP archive contains C source code for the SQLite library + version VERSION. +} + + +
      " + puts "$file
      ($size $units)
      [string trim $d2]
      $title
      + + +

      Direct Access To The Sources Via Anonymous CVS

      + +

      +All SQLite source code is maintained in a +CVS repository that is +available for read-only access by anyone. You can +interactively view the +repository contents and download individual files +by visiting + +http://www.sqlite.org/cvstrac/dir?d=sqlite. +To access the repository directly, use the following +commands: +

      + +
      +cvs -d :pserver:anonymous@www.sqlite.org:/sqlite login
      +cvs -d :pserver:anonymous@www.sqlite.org:/sqlite checkout sqlite
      +
      + +

      +When the first command prompts you for a password, enter "anonymous". +

      + +

      +To access the SQLite version 2.8 sources, begin by getting the 3.0 +tree as described above. Then update to the "version_2" branch +as follows: +

      + +
      +cvs update -r version_2
      +
      ADDED pages/dynload.in Index: pages/dynload.in ================================================================== --- /dev/null +++ pages/dynload.in @@ -0,0 +1,61 @@ +How to build a dynamically loaded Tcl extension for SQLite + +

      +How To Build A Dynamically Loaded Tcl Extension +

      +

      +This note was contributed by +Bill Saunders. Thanks, Bill! +

      + +

      +To compile the SQLite Tcl extension into a dynamically loaded module +I did the following: +

      + +
        +
      1. Do a standard compile +(I had a dir called bld at the same level as sqlite ie + /root/bld + /root/sqlite +I followed the directions and did a standard build in the bld +directory)

      2. + +
      3. +Now do the following in the bld directory +

        +gcc -shared -I. -lgdbm ../sqlite/src/tclsqlite.c libsqlite.a -o sqlite.so
        +

      4. + +
      5. +This should produce the file sqlite.so in the bld directory

      6. + +
      7. +Create a pkgIndex.tcl file that contains this line + +

        +package ifneeded sqlite 1.0 [list load [file join $dir sqlite.so]]
        +

      8. + +
      9. +To use this put sqlite.so and pkgIndex.tcl in the same directory

      10. + +
      11. +From that directory start wish

      12. + +
      13. +Execute the following tcl command (tells tcl where to fine loadable +modules) +

        +lappend auto_path [exec pwd]
        +

      14. + +
      15. +Load the package +

        +package require sqlite
        +

      16. + +
      17. +Have fun....

      18. +
    ADDED pages/faq.in Index: pages/faq.in ================================================================== --- /dev/null +++ pages/faq.in @@ -0,0 +1,458 @@ +SQLite Frequently Asked Questions + + +set cnt 1 +proc faq {question answer} { + set ::faq($::cnt) [list [string trim $question] [string trim $answer]] + incr ::cnt +} + +############# +# Enter questions and answers here. + +faq { + How do I create an AUTOINCREMENT field. +} { +

    Short answer: A column declared INTEGER PRIMARY KEY will + autoincrement.

    + +

    Here is the long answer: + If you declare a column of a table to be INTEGER PRIMARY KEY, then + whenever you insert a NULL + into that column of the table, the NULL is automatically converted + into an integer which is one greater than the largest value of that + column over all other rows in the table, or 1 if the table is empty. + (If the largest possible integer key, 9223372036854775807, then an + unused key value is chosen at random.) + For example, suppose you have a table like this: +

    +CREATE TABLE t1(
    +  a INTEGER PRIMARY KEY,
    +  b INTEGER
    +);
    +
    +

    With this table, the statement

    +
    +INSERT INTO t1 VALUES(NULL,123);
    +
    +

    is logically equivalent to saying:

    +
    +INSERT INTO t1 VALUES((SELECT max(a) FROM t1)+1,123);
    +
    + +

    There is a new API function named + + sqlite3_last_insert_rowid() which will return the integer key + for the most recent insert operation.

    + +

    Note that the integer key is one greater than the largest + key that was in the table just prior to the insert. The new key + will be unique over all keys currently in the table, but it might + overlap with keys that have been previously deleted from the + table. To create keys that are unique over the lifetime of the + table, add the AUTOINCREMENT keyword to the INTEGER PRIMARY KEY + declaration. Then the key chosen will be one more than than the + largest key that has ever existed in that table. If the largest + possible key has previously existed in that table, then the INSERT + will fail with an SQLITE_FULL error code.

    +} + +faq { + What datatypes does SQLite support? +} { +

    See http://www.sqlite.org/datatype3.html.

    +} + +faq { + SQLite lets me insert a string into a database column of type integer! +} { +

    This is a feature, not a bug. SQLite does not enforce data type + constraints. Any data can be + inserted into any column. You can put arbitrary length strings into + integer columns, floating point numbers in boolean columns, or dates + in character columns. The datatype you assign to a column in the + CREATE TABLE command does not restrict what data can be put into + that column. Every column is able to hold + an arbitrary length string. (There is one exception: Columns of + type INTEGER PRIMARY KEY may only hold a 64-bit signed integer. + An error will result + if you try to put anything other than an integer into an + INTEGER PRIMARY KEY column.)

    + +

    But SQLite does use the declared type of a column as a hint + that you prefer values in that format. So, for example, if a + column is of type INTEGER and you try to insert a string into + that column, SQLite will attempt to convert the string into an + integer. If it can, it inserts the integer instead. If not, + it inserts the string. This feature is sometimes + call type or column affinity. +

    +} + +faq { + Why doesn't SQLite allow me to use '0' and '0.0' as the primary + key on two different rows of the same table? +} { +

    Your primary key must have a numeric type. Change the datatype of + your primary key to TEXT and it should work.

    + +

    Every row must have a unique primary key. For a column with a + numeric type, SQLite thinks that '0' and '0.0' are the + same value because they compare equal to one another numerically. + (See the previous question.) Hence the values are not unique.

    +} + + +faq { + Can multiple applications or multiple instances of the same + application access a single database file at the same time? +} { +

    Multiple processes can have the same database open at the same + time. Multiple processes can be doing a SELECT + at the same time. But only one process can be making changes to + the database at any moment in time, however.

    + +

    SQLite uses reader/writer locks to control access to the database. + (Under Win95/98/ME which lacks support for reader/writer locks, a + probabilistic simulation is used instead.) + But use caution: this locking mechanism might + not work correctly if the database file is kept on an NFS filesystem. + This is because fcntl() file locking is broken on many NFS implementations. + You should avoid putting SQLite database files on NFS if multiple + processes might try to access the file at the same time. On Windows, + Microsoft's documentation says that locking may not work under FAT + filesystems if you are not running the Share.exe daemon. People who + have a lot of experience with Windows tell me that file locking of + network files is very buggy and is not dependable. If what they + say is true, sharing an SQLite database between two or more Windows + machines might cause unexpected problems.

    + +

    We are aware of no other embedded SQL database engine that + supports as much concurrancy as SQLite. SQLite allows multiple processes + to have the database file open at once, and for multiple processes to + read the database at once. When any process wants to write, it must + lock the entire database file for the duration of its update. But that + normally only takes a few milliseconds. Other processes just wait on + the writer to finish then continue about their business. Other embedded + SQL database engines typically only allow a single process to connect to + the database at once.

    + +

    However, client/server database engines (such as PostgreSQL, MySQL, + or Oracle) usually support a higher level of concurrency and allow + multiple processes to be writing to the same database at the same time. + This is possible in a client/server database because there is always a + single well-controlled server process available to coordinate access. + If your application has a need for a lot of concurrency, then you should + consider using a client/server database. But experience suggests that + most applications need much less concurrency than their designers imagine. +

    + +

    When SQLite tries to access a file that is locked by another + process, the default behavior is to return SQLITE_BUSY. You can + adjust this behavior from C code using the + sqlite3_busy_handler() or + sqlite3_busy_timeout() + API functions.

    +} + +faq { + Is SQLite threadsafe? +} { +

    Yes. Sometimes. In order to be thread-safe, SQLite must be compiled + with the SQLITE_THREADSAFE preprocessor macro set to 1. Both the windows + and linux precompiled binaries in the distribution are compiled this way. + If you are unsure if the SQLite library you are linking against is compiled + to be threadsafe you can call the + sqlite3_threadsafe() + interface to find out. +

    + +

    Prior to version 3.3.1, + an sqlite3 structure could only be used in the same thread + that called sqlite3_open + to create it. + You could not open a + database in one thread then pass the handle off to another thread for + it to use. This was due to limitations (bugs?) in many common threading + implementations such as on RedHat9. Specifically, an fcntl() lock + created by one thread cannot be removed or modified by a different + thread on the troublesome systems. And since SQLite uses fcntl() + locks heavily for concurrency control, serious problems arose if you + start moving database connections across threads.

    + +

    The restriction on moving database connections across threads + was relaxed somewhat in version 3.3.1. With that and subsequent + versions, it is safe to move a connection handle across threads + as long as the connection is not holding any fcntl() locks. You + can safely assume that no locks are being held if no + transaction is pending and all statements have been finalized.

    + +

    Under UNIX, you should not carry an open SQLite database across + a fork() system call into the child process. Problems will result + if you do.

    +} + +faq { + How do I list all tables/indices contained in an SQLite database +} { +

    If you are running the sqlite3 command-line access program + you can type ".tables" to get a list of all tables. Or you + can type ".schema" to see the complete database schema including + all tables and indices. Either of these commands can be followed by + a LIKE pattern that will restrict the tables that are displayed.

    + +

    From within a C/C++ program (or a script using Tcl/Ruby/Perl/Python + bindings) you can get access to table and index names by doing a SELECT + on a special table named "SQLITE_MASTER". Every SQLite database + has an SQLITE_MASTER table that defines the schema for the database. + The SQLITE_MASTER table looks like this:

    +
    +CREATE TABLE sqlite_master (
    +  type TEXT,
    +  name TEXT,
    +  tbl_name TEXT,
    +  rootpage INTEGER,
    +  sql TEXT
    +);
    +
    +

    For tables, the type field will always be 'table' and the + name field will be the name of the table. So to get a list of + all tables in the database, use the following SELECT command:

    +
    +SELECT name FROM sqlite_master
    +WHERE type='table'
    +ORDER BY name;
    +
    +

    For indices, type is equal to 'index', name is the + name of the index and tbl_name is the name of the table to which + the index belongs. For both tables and indices, the sql field is + the text of the original CREATE TABLE or CREATE INDEX statement that + created the table or index. For automatically created indices (used + to implement the PRIMARY KEY or UNIQUE constraints) the sql field + is NULL.

    + +

    The SQLITE_MASTER table is read-only. You cannot change this table + using UPDATE, INSERT, or DELETE. The table is automatically updated by + CREATE TABLE, CREATE INDEX, DROP TABLE, and DROP INDEX commands.

    + +

    Temporary tables do not appear in the SQLITE_MASTER table. Temporary + tables and their indices and triggers occur in another special table + named SQLITE_TEMP_MASTER. SQLITE_TEMP_MASTER works just like SQLITE_MASTER + except that it is only visible to the application that created the + temporary tables. To get a list of all tables, both permanent and + temporary, one can use a command similar to the following: +

    +SELECT name FROM 
    +   (SELECT * FROM sqlite_master UNION ALL
    +    SELECT * FROM sqlite_temp_master)
    +WHERE type='table'
    +ORDER BY name
    +
    +} + +faq { + Are there any known size limits to SQLite databases? +} { +

    See limits.html for a full discussion of + the limits of SQLite.

    +} + +faq { + What is the maximum size of a VARCHAR in SQLite? +} { +

    SQLite does not enforce the length of a VARCHAR. You can declare + a VARCHAR(10) and SQLite will be happy to let you put 500 characters + in it. And it will keep all 500 characters intact - it never truncates. +

    +} + +faq { + Does SQLite support a BLOB type? +} { +

    SQLite versions 3.0 and later allow you to store BLOB data in any + column, even columns that are declared to hold some other type.

    +} + +faq { + How do I add or delete columns from an existing table in SQLite. +} { +

    SQLite has limited + ALTER TABLE support that you can + use to add a column to the end of a table or to change the name of + a table. + If you what make more complex changes the structure of a table, + you will have to recreate the + table. You can save existing data to a temporary table, drop the + old table, create the new table, then copy the data back in from + the temporary table.

    + +

    For example, suppose you have a table named "t1" with columns + names "a", "b", and "c" and that you want to delete column "c" from + this table. The following steps illustrate how this could be done: +

    + +
    +BEGIN TRANSACTION;
    +CREATE TEMPORARY TABLE t1_backup(a,b);
    +INSERT INTO t1_backup SELECT a,b FROM t1;
    +DROP TABLE t1;
    +CREATE TABLE t1(a,b);
    +INSERT INTO t1 SELECT a,b FROM t1_backup;
    +DROP TABLE t1_backup;
    +COMMIT;
    +
    +} + +faq { + I deleted a lot of data but the database file did not get any + smaller. Is this a bug? +} { +

    No. When you delete information from an SQLite database, the + unused disk space is added to an internal "free-list" and is reused + the next time you insert data. The disk space is not lost. But + neither is it returned to the operating system.

    + +

    If you delete a lot of data and want to shrink the database file, + run the VACUUM command. + VACUUM will reconstruct + the database from scratch. This will leave the database with an empty + free-list and a file that is minimal in size. Note, however, that the + VACUUM can take some time to run (around a half second per megabyte + on the Linux box where SQLite is developed) and it can use up to twice + as much temporary disk space as the original file while it is running. +

    + +

    As of SQLite version 3.1, an alternative to using the VACUUM command + is auto-vacuum mode, enabled using the + auto_vacuum pragma.

    +} + +faq { + Can I use SQLite in my commercial product without paying royalties? +} { +

    Yes. SQLite is in the + public domain. No claim of ownership is made + to any part of the code. You can do anything you want with it.

    +} + +faq { + How do I use a string literal that contains an embedded single-quote (') + character? +} { +

    The SQL standard specifies that single-quotes in strings are escaped + by putting two single quotes in a row. SQL works like the Pascal programming + language in the regard. SQLite follows this standard. Example: +

    + +
    +    INSERT INTO xyz VALUES('5 O''clock');
    +  
    +} + +faq {What is an SQLITE_SCHEMA error, and why am I getting one?} { +

    An SQLITE_SCHEMA error is returned when a + prepared SQL statement is no longer valid and cannot be executed. + When this occurs, the statement must be recompiled from SQL using + the + sqlite3_prepare() API. + In SQLite version 3, an SQLITE_SCHEMA error can + only occur when using the + sqlite3_prepare()/sqlite3_step()/sqlite3_finalize() + API to execute SQL, not when using the + sqlite3_exec(). This was not + the case in version 2.

    + +

    The most common reason for a prepared statement to become invalid + is that the schema of the database was modified after the SQL was + prepared (possibly by another process). The other reasons this can + happen are:

    +
      +
    • A database was DETACHed. +
    • The database was VACUUMed +
    • A user-function definition was deleted or changed. +
    • A collation sequence definition was deleted or changed. +
    • The authorization function was changed. +
    + +

    In all cases, the solution is to recompile the statement from SQL + and attempt to execute it again. Because a prepared statement can be + invalidated by another process changing the database schema, all code + that uses the + sqlite3_prepare()/sqlite3_step()/sqlite3_finalize() + API should be prepared to handle SQLITE_SCHEMA errors. An example + of one approach to this follows:

    + +
    +
    +    int rc;
    +    sqlite3_stmt *pStmt;
    +    char zSql[] = "SELECT .....";
    +
    +    do {
    +      /* Compile the statement from SQL. Assume success. */
    +      sqlite3_prepare(pDb, zSql, -1, &pStmt, 0);
    +
    +      while( SQLITE_ROW==sqlite3_step(pStmt) ){
    +        /* Do something with the row of available data */
    +      }
    +
    +      /* Finalize the statement. If an SQLITE_SCHEMA error has
    +      ** occured, then the above call to sqlite3_step() will have
    +      ** returned SQLITE_ERROR. sqlite3_finalize() will return
    +      ** SQLITE_SCHEMA. In this case the loop will execute again.
    +      */
    +      rc = sqlite3_finalize(pStmt);
    +    } while( rc==SQLITE_SCHEMA );
    +    
    +  
    +} + +faq {Why does ROUND(9.95,1) return 9.9 instead of 10.0? + Shouldn't 9.95 round up?} { +

    SQLite uses binary arithmetic and in binary, there is no + way to write 9.95 in a finite number of bits. The closest to + you can get to 9.95 in a 64-bit IEEE float (which is what + SQLite uses) is 9.949999999999999289457264239899814128875732421875. + So when you type "9.95", SQLite really understands the number to be + the much longer value shown above. And that value rounds down.

    + +

    This kind of problem comes up all the time when dealing with + floating point binary numbers. The general rule to remember is + that most fractional numbers that have a finite representation in decimal + (a.k.a "base-10") + do not have a finite representation in binary (a.k.a "base-2"). + And so they are + approximated using the closest binary number available. That + approximation is usually very close, but it will be slightly off + and in some cases can cause your results to be a little different + from what you might expect.

    +} + +# End of questions and answers. +############# + +puts {

    Frequently Asked Questions

    } + +# puts {
    } +# for {set i 1} {$i<$cnt} {incr i} { +# puts "
    ($i)
    " +# puts "
    [lindex $faq($i) 0]
    " +# } +# puts {
    } +puts {
      } +for {set i 1} {$i<$cnt} {incr i} { + puts "
    1. [lindex $faq($i) 0]
    2. " +} +puts {
    } + +for {set i 1} {$i<$cnt} {incr i} { + puts "
    " + puts "

    ($i) [lindex $faq($i) 0]

    \n" + puts "
    [lindex $faq($i) 1]
  • \n" +} +puts {} +
    ADDED pages/fileformat.in Index: pages/fileformat.in ================================================================== --- /dev/null +++ pages/fileformat.in @@ -0,0 +1,778 @@ +SQLite Database File Format (Version 2) + +

    SQLite 2.X Database File Format

    + +

    +This document describes the disk file format for SQLite versions 2.1 +through 2.8. SQLite version 3.0 and following uses a very different +format which is described separately. +

    + +

    1.0   Layers

    + +

    +SQLite is implemented in layers. +(See the architecture description.) +The format of database files is determined by three different +layers in the architecture. +

    + +
      +
    • The schema layer implemented by the VDBE.
    • +
    • The b-tree layer implemented by btree.c
    • +
    • The pager layer implemented by pager.c
    • +
    + +

    +We will describe each layer beginning with the bottom (pager) +layer and working upwards. +

    + +

    2.0   The Pager Layer

    + +

    +An SQLite database consists of +"pages" of data. Each page is 1024 bytes in size. +Pages are numbered beginning with 1. +A page number of 0 is used to indicate "no such page" in the +B-Tree and Schema layers. +

    + +

    +The pager layer is responsible for implementing transactions +with atomic commit and rollback. It does this using a separate +journal file. Whenever a new transaction is started, a journal +file is created that records the original state of the database. +If the program terminates before completing the transaction, the next +process to open the database can use the journal file to restore +the database to its original state. +

    + +

    +The journal file is located in the same directory as the database +file and has the same name as the database file but with the +characters "-journal" appended. +

    + +

    +The pager layer does not impose any content restrictions on the +main database file. As far as the pager is concerned, each page +contains 1024 bytes of arbitrary data. But there is structure to +the journal file. +

    + +

    +A journal file begins with 8 bytes as follows: +0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, and 0xd6. +Processes that are attempting to rollback a journal use these 8 bytes +as a sanity check to make sure the file they think is a journal really +is a valid journal. Prior version of SQLite used different journal +file formats. The magic numbers for these prior formats are different +so that if a new version of the library attempts to rollback a journal +created by an earlier version, it can detect that the journal uses +an obsolete format and make the necessary adjustments. This article +describes only the newest journal format - supported as of version +2.8.0. +

    + +

    +Following the 8 byte prefix is a three 4-byte integers that tell us +the number of pages that have been committed to the journal, +a magic number used for +sanity checking each page, and the +original size of the main database file before the transaction was +started. The number of committed pages is used to limit how far +into the journal to read. The use of the checksum magic number is +described below. +The original size of the database is used to restore the database +file back to its original size. +The size is expressed in pages (1024 bytes per page). +

    + +

    +All three integers in the journal header and all other multi-byte +numbers used in the journal file are big-endian. +That means that the most significant byte +occurs first. That way, a journal file that is +originally created on one machine can be rolled back by another +machine that uses a different byte order. So, for example, a +transaction that failed to complete on your big-endian SparcStation +can still be rolled back on your little-endian Linux box. +

    + +

    +After the 8-byte prefix and the three 4-byte integers, the +journal file consists of zero or more page records. Each page +record is a 4-byte (big-endian) page number followed by 1024 bytes +of data and a 4-byte checksum. +The data is the original content of the database page +before the transaction was started. So to roll back the transaction, +the data is simply written into the corresponding page of the +main database file. Pages can appear in the journal in any order, +but they are guaranteed to appear only once. All page numbers will be +between 1 and the maximum specified by the page size integer that +appeared at the beginning of the journal. +

    + +

    +The so-called checksum at the end of each record is not really a +checksum - it is the sum of the page number and the magic number which +was the second integer in the journal header. The purpose of this +value is to try to detect journal corruption that might have occurred +because of a power loss or OS crash that occurred which the journal +file was being written to disk. It could have been the case that the +meta-data for the journal file, specifically the size of the file, had +been written to the disk so that when the machine reboots it appears that +file is large enough to hold the current record. But even though the +file size has changed, the data for the file might not have made it to +the disk surface at the time of the OS crash or power loss. This means +that after reboot, the end of the journal file will contain quasi-random +garbage data. The checksum is an attempt to detect such corruption. If +the checksum does not match, that page of the journal is not rolled back. +

    + +

    +Here is a summary of the journal file format: +

    + +
      +
    • 8 byte prefix: 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, 0xd6
    • +
    • 4 byte number of records in journal
    • +
    • 4 byte magic number used for page checksums
    • +
    • 4 byte initial database page count
    • +
    • Zero or more instances of the following: +
        +
      • 4 byte page number
      • +
      • 1024 bytes of original data for the page
      • +
      • 4 byte checksum
      • +
      +
    • +
    + +

    3.0   The B-Tree Layer

    + +

    +The B-Tree layer builds on top of the pager layer to implement +one or more separate b-trees all in the same disk file. The +algorithms used are taken from Knuth's The Art Of Computer +Programming.

    + +

    +Page 1 of a database contains a header string used for sanity +checking, a few 32-bit words of configuration data, and a pointer +to the beginning of a list of unused pages in the database. +All other pages in the +database are either pages of a b-tree, overflow pages, or unused +pages on the freelist. +

    + +

    +Each b-tree page contains zero or more database entries. +Each entry has an unique key of one or more bytes and data of +zero or more bytes. +Both the key and data are arbitrary byte sequences. The combination +of key and data are collectively known as "payload". The current +implementation limits the amount of payload in a single entry to +1048576 bytes. This limit can be raised to 16777216 by adjusting +a single #define in the source code and recompiling. But most entries +contain less than a hundred bytes of payload so a megabyte limit seems +more than enough. +

    + +

    +Up to 238 bytes of payload for an entry can be held directly on +a b-tree page. Any additional payload is contained on a linked list +of overflow pages. This limit on the amount of payload held directly +on b-tree pages guarantees that each b-tree page can hold at least +4 entries. In practice, most entries are smaller than 238 bytes and +thus most pages can hold more than 4 entries. +

    + +

    +A single database file can hold any number of separate, independent b-trees. +Each b-tree is identified by its root page, which never changes. +Child pages of the b-tree may change as entries are added and removed +and pages split and combine. But the root page always stays the same. +The b-tree itself does not record which pages are root pages and which +are not. That information is handled entirely at the schema layer. +

    + +

    3.1   B-Tree Page 1 Details

    + +

    +Page 1 begins with the following 48-byte string: +

    + +
    +** This file contains an SQLite 2.1 database **
    +
    + +

    +If you count the number of characters in the string above, you will +see that there are only 47. A '\000' terminator byte is added to +bring the total to 48. +

    + +

    +A frequent question is why the string says version 2.1 when (as +of this writing) we are up to version 2.7.0 of SQLite and any +change to the second digit of the version is suppose to represent +a database format change. The answer to this is that the B-tree +layer has not changed any since version 2.1. There have been +database format changes since version 2.1 but those changes have +all been in the schema layer. Because the format of the b-tree +layer is unchanged since version 2.1.0, the header string still +says version 2.1. +

    + +

    +After the format string is a 4-byte integer used to determine the +byte-order of the database. The integer has a value of +0xdae37528. If this number is expressed as 0xda, 0xe3, 0x75, 0x28, then +the database is in a big-endian format and all 16 and 32-bit integers +elsewhere in the b-tree layer are also big-endian. If the number is +expressed as 0x28, 0x75, 0xe3, and 0xda, then the database is in a +little-endian format and all other multi-byte numbers in the b-tree +layer are also little-endian. +Prior to version 2.6.3, the SQLite engine was only able to read databases +that used the same byte order as the processor they were running on. +But beginning with 2.6.3, SQLite can read or write databases in any +byte order. +

    + +

    +After the byte-order code are six 4-byte integers. Each integer is in the +byte order determined by the byte-order code. The first integer is the +page number for the first page of the freelist. If there are no unused +pages in the database, then this integer is 0. The second integer is +the number of unused pages in the database. The last 4 integers are +not used by the b-tree layer. These are the so-called "meta" values that +are passed up to the schema layer +and used there for configuration and format version information. +All bytes of page 1 past beyond the meta-value integers are unused +and are initialized to zero. +

    + +

    +Here is a summary of the information contained on page 1 in the b-tree layer: +

    + +
      +
    • 48 byte header string
    • +
    • 4 byte integer used to determine the byte-order
    • +
    • 4 byte integer which is the first page of the freelist
    • +
    • 4 byte integer which is the number of pages on the freelist
    • +
    • 36 bytes of meta-data arranged as nine 4-byte integers
    • +
    • 928 bytes of unused space
    • +
    + +

    3.2   Structure Of A Single B-Tree Page

    + +

    +Conceptually, a b-tree page contains N database entries and N+1 pointers +to other b-tree pages. +

    + +
    + + + + + + + + + + + +
    Ptr
    0
    Entry
    0
    Ptr
    1
    Entry
    1
    ...Ptr
    N-1
    Entry
    N-1
    Ptr
    N
    +
    + +

    +The entries are arranged in increasing order. That is, the key to +Entry 0 is less than the key to Entry 1, and the key to Entry 1 is +less than the key of Entry 2, and so forth. The pointers point to +pages containing additional entries that have keys in between the +entries on either side. So Ptr 0 points to another b-tree page that +contains entries that all have keys less than Key 0, and Ptr 1 +points to a b-tree pages where all entries have keys greater than Key 0 +but less than Key 1, and so forth. +

    + +

    +Each b-tree page in SQLite consists of a header, zero or more "cells" +each holding a single entry and pointer, and zero or more "free blocks" +that represent unused space on the page. +

    + +

    +The header on a b-tree page is the first 8 bytes of the page. +The header contains the value +of the right-most pointer (Ptr N) and the byte offset into the page +of the first cell and the first free block. The pointer is a 32-bit +value and the offsets are each 16-bit values. We have: +

    + +
    + + + + + + + + + + + + + + + + +
    01234567
    Ptr NCell 0Freeblock 0
    +
    + +

    +The 1016 bytes of a b-tree page that come after the header contain +cells and freeblocks. All 1016 bytes are covered by either a cell +or a freeblock. +

    + +

    +The cells are connected in a linked list. Cell 0 contains Ptr 0 and +Entry 0. Bytes 4 and 5 of the header point to Cell 0. Cell 0 then +points to Cell 1 which contains Ptr 1 and Entry 1. And so forth. +Cells vary in size. Every cell has a 12-byte header and at least 4 +bytes of payload space. Space is allocated to payload in increments +of 4 bytes. Thus the minimum size of a cell is 16 bytes and up to +63 cells can fit on a single page. The size of a cell is always a multiple +of 4 bytes. +A cell can have up to 238 bytes of payload space. If +the payload is more than 238 bytes, then an additional 4 byte page +number is appended to the cell which is the page number of the first +overflow page containing the additional payload. The maximum size +of a cell is thus 254 bytes, meaning that a least 4 cells can fit into +the 1016 bytes of space available on a b-tree page. +An average cell is usually around 52 to 100 bytes in size with about +10 or 20 cells to a page. +

    + +

    +The data layout of a cell looks like this: +

    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    0123456789101112 ... 249250251252253
    PtrKeysize
    (low)
    NextKsz
    (hi)
    Dsz
    (hi)
    Datasize
    (low)
    PayloadOverflow
    Pointer
    +
    + +

    +The first four bytes are the pointer. The size of the key is a 24-bit +where the upper 8 bits are taken from byte 8 and the lower 16 bits are +taken from bytes 4 and 5 (or bytes 5 and 4 on little-endian machines.) +The size of the data is another 24-bit value where the upper 8 bits +are taken from byte 9 and the lower 16 bits are taken from bytes 10 and +11 or 11 and 10, depending on the byte order. Bytes 6 and 7 are the +offset to the next cell in the linked list of all cells on the current +page. This offset is 0 for the last cell on the page. +

    + +

    +The payload itself can be any number of bytes between 1 and 1048576. +But space to hold the payload is allocated in 4-byte chunks up to +238 bytes. If the entry contains more than 238 bytes of payload, then +additional payload data is stored on a linked list of overflow pages. +A 4 byte page number is appended to the cell that contains the first +page of this linked list. +

    + +

    +Each overflow page begins with a 4-byte value which is the +page number of the next overflow page in the list. This value is +0 for the last page in the list. The remaining +1020 bytes of the overflow page are available for storing payload. +Note that a full page is allocated regardless of the number of overflow +bytes stored. Thus, if the total payload for an entry is 239 bytes, +the first 238 are stored in the cell and the overflow page stores just +one byte. +

    + +

    +The structure of an overflow page looks like this: +

    + +
    + + + + + + + + + + + + +
    01234 ... 1023
    Next PageOverflow Data
    +
    + +

    +All space on a b-tree page which is not used by the header or by cells +is filled by freeblocks. Freeblocks, like cells, are variable in size. +The size of a freeblock is at least 4 bytes and is always a multiple of +4 bytes. +The first 4 bytes contain a header and the remaining bytes +are unused. The structure of the freeblock is as follows: +

    + +
    + + + + + + + + + + + + + +
    01234 ... 1015
    SizeNextUnused
    +
    + +

    +Freeblocks are stored in a linked list in increasing order. That is +to say, the first freeblock occurs at a lower index into the page than +the second free block, and so forth. The first 2 bytes of the header +are an integer which is the total number of bytes in the freeblock. +The second 2 bytes are the index into the page of the next freeblock +in the list. The last freeblock has a Next value of 0. +

    + +

    +When a new b-tree is created in a database, the root page of the b-tree +consist of a header and a single 1016 byte freeblock. As entries are +added, space is carved off of that freeblock and used to make cells. +When b-tree entries are deleted, the space used by their cells is converted +into freeblocks. Adjacent freeblocks are merged, but the page can still +become fragmented. The b-tree code will occasionally try to defragment +the page by moving all cells to the beginning and constructing a single +freeblock at the end to take up all remaining space. +

    + +

    3.3   The B-Tree Free Page List

    + +

    +When information is removed from an SQLite database such that one or +more pages are no longer needed, those pages are added to a list of +free pages so that they can be reused later when new information is +added. This subsection describes the structure of this freelist. +

    + +

    +The 32-bit integer beginning at byte-offset 52 in page 1 of the database +contains the address of the first page in a linked list of free pages. +If there are no free pages available, this integer has a value of 0. +The 32-bit integer at byte-offset 56 in page 1 contains the number of +free pages on the freelist. +

    + +

    +The freelist contains a trunk and many branches. The trunk of +the freelist is composed of overflow pages. That is to say, each page +contains a single 32-bit integer at byte offset 0 which +is the page number of the next page on the freelist trunk. +The payload area +of each trunk page is used to record pointers to branch pages. +The first 32-bit integer in the payload area of a trunk page +is the number of branch pages to follow (between 0 and 254) +and each subsequent 32-bit integer is a page number for a branch page. +The following diagram shows the structure of a trunk freelist page: +

    + +
    + + + + + + + + + + + + + + + + + +
    012345678 ... 1023
    Next trunk page# of branch pagesPage numbers for branch pages
    +
    + +

    +It is important to note that only the pages on the trunk of the freelist +contain pointers to other pages. The branch pages contain no +data whatsoever. The fact that the branch pages are completely +blank allows for an important optimization in the paging layer. When +a branch page is removed from the freelist to be reused, it is not +necessary to write the original content of that page into the rollback +journal. The branch page contained no data to begin with, so there is +no need to restore the page in the event of a rollback. Similarly, +when a page is not longer needed and is added to the freelist as a branch +page, it is not necessary to write the content of that page +into the database file. +Again, the page contains no real data so it is not necessary to record the +content of that page. By reducing the amount of disk I/O required, +these two optimizations allow some database operations +to go four to six times faster than they would otherwise. +

    + +

    4.0   The Schema Layer

    + +

    +The schema layer implements an SQL database on top of one or more +b-trees and keeps track of the root page numbers for all b-trees. +Where the b-tree layer provides only unformatted data storage with +a unique key, the schema layer allows each entry to contain multiple +columns. The schema layer also allows indices and non-unique key values. +

    + +

    +The schema layer implements two separate data storage abstractions: +tables and indices. Each table and each index uses its own b-tree +but they use the b-tree capabilities in different ways. For a table, +the b-tree key is a unique 4-byte integer and the b-tree data is the +content of the table row, encoded so that columns can be separately +extracted. For indices, the b-tree key varies in size depending on the +size of the fields being indexed and the b-tree data is empty. +

    + +

    4.1   SQL Table Implementation Details

    + +

    Each row of an SQL table is stored in a single b-tree entry. +The b-tree key is a 4-byte big-endian integer that is the ROWID +or INTEGER PRIMARY KEY for that table row. +The key is stored in a big-endian format so +that keys will sort in numerical order using memcmp() function.

    + +

    The content of a table row is stored in the data portion of +the corresponding b-tree table. The content is encoded to allow +individual columns of the row to be extracted as necessary. Assuming +that the table has N columns, the content is encoded as N+1 offsets +followed by N column values, as follows: +

    + +
    + + + + + + + + + + + + +
    offset 0offset 1...offset N-1offset Nvalue 0value 1...value N-1
    +
    + +

    +The offsets can be either 8-bit, 16-bit, or 24-bit integers depending +on how much data is to be stored. If the total size of the content +is less than 256 bytes then 8-bit offsets are used. If the total size +of the b-tree data is less than 65536 then 16-bit offsets are used. +24-bit offsets are used otherwise. Offsets are always little-endian, +which means that the least significant byte occurs first. +

    + +

    +Data is stored as a nul-terminated string. Any empty string consists +of just the nul terminator. A NULL value is an empty string with no +nul-terminator. Thus a NULL value occupies zero bytes and an empty string +occupies 1 byte. +

    + +

    +Column values are stored in the order that they appear in the CREATE TABLE +statement. The offsets at the beginning of the record contain the +byte index of the corresponding column value. Thus, Offset 0 contains +the byte index for Value 0, Offset 1 contains the byte offset +of Value 1, and so forth. The number of bytes in a column value can +always be found by subtracting offsets. This allows NULLs to be +recovered from the record unambiguously. +

    + +

    +Most columns are stored in the b-tree data as described above. +The one exception is column that has type INTEGER PRIMARY KEY. +INTEGER PRIMARY KEY columns correspond to the 4-byte b-tree key. +When an SQL statement attempts to read the INTEGER PRIMARY KEY, +the 4-byte b-tree key is read rather than information out of the +b-tree data. But there is still an Offset associated with the +INTEGER PRIMARY KEY, just like any other column. But the Value +associated with that offset is always NULL. +

    + +

    4.2   SQL Index Implementation Details

    + +

    +SQL indices are implement using a b-tree in which the key is used +but the data is always empty. The purpose of an index is to map +one or more column values into the ROWID for the table entry that +contains those column values. +

    + +

    +Each b-tree in an index consists of one or more column values followed +by a 4-byte ROWID. Each column value is nul-terminated (even NULL values) +and begins with a single character that indicates the datatype for that +column value. Only three datatypes are supported: NULL, Number, and +Text. NULL values are encoded as the character 'a' followed by the +nul terminator. Numbers are encoded as the character 'b' followed by +a string that has been crafted so that sorting the string using memcmp() +will sort the corresponding numbers in numerical order. (See the +sqliteRealToSortable() function in util.c of the SQLite sources for +additional information on this encoding.) Numbers are also nul-terminated. +Text values consists of the character 'c' followed by a copy of the +text string and a nul-terminator. These encoding rules result in +NULLs being sorted first, followed by numerical values in numerical +order, followed by text values in lexicographical order. +

    + +

    4.4   SQL Schema Storage And Root B-Tree Page Numbers

    + +

    +The database schema is stored in the database in a special tabled named +"sqlite_master" and which always has a root b-tree page number of 2. +This table contains the original CREATE TABLE, +CREATE INDEX, CREATE VIEW, and CREATE TRIGGER statements used to define +the database to begin with. Whenever an SQLite database is opened, +the sqlite_master table is scanned from beginning to end and +all the original CREATE statements are played back through the parser +in order to reconstruct an in-memory representation of the database +schema for use in subsequent command parsing. For each CREATE TABLE +and CREATE INDEX statement, the root page number for the corresponding +b-tree is also recorded in the sqlite_master table so that SQLite will +know where to look for the appropriate b-tree. +

    + +

    +SQLite users can query the sqlite_master table just like any other table +in the database. But the sqlite_master table cannot be directly written. +The sqlite_master table is automatically updated in response to CREATE +and DROP statements but it cannot be changed using INSERT, UPDATE, or +DELETE statements as that would risk corrupting the database. +

    + +

    +SQLite stores temporary tables and indices in a separate +file from the main database file. The temporary table database file +is the same structure as the main database file. The schema table +for the temporary tables is stored on page 2 just as in the main +database. But the schema table for the temporary database named +"sqlite_temp_master" instead of "sqlite_master". Other than the +name change, it works exactly the same. +

    + +

    4.4   Schema Version Numbering And Other Meta-Information

    + +

    +The nine 32-bit integers that are stored beginning at byte offset +60 of Page 1 in the b-tree layer are passed up into the schema layer +and used for versioning and configuration information. The meaning +of the first four integers is shown below. The other five are currently +unused. +

    + +
      +
    1. The schema version number
    2. +
    3. The format version number
    4. +
    5. The recommended pager cache size
    6. +
    7. The safety level
    8. +
    + +

    +The first meta-value, the schema version number, is used to detect when +the schema of the database is changed by a CREATE or DROP statement. +Recall that when a database is first opened the sqlite_master table is +scanned and an internal representation of the tables, indices, views, +and triggers for the database is built in memory. This internal +representation is used for all subsequent SQL command parsing and +execution. But what if another process were to change the schema +by adding or removing a table, index, view, or trigger? If the original +process were to continue using the old schema, it could potentially +corrupt the database by writing to a table that no longer exists. +To avoid this problem, the schema version number is changed whenever +a CREATE or DROP statement is executed. Before each command is +executed, the current schema version number for the database file +is compared against the schema version number from when the sqlite_master +table was last read. If those numbers are different, the internal +schema representation is erased and the sqlite_master table is reread +to reconstruct the internal schema representation. +(Calls to sqlite_exec() generally return SQLITE_SCHEMA when this happens.) +

    + +

    +The second meta-value is the schema format version number. This +number tells what version of the schema layer should be used to +interpret the file. There have been changes to the schema layer +over time and this number is used to detect when an older database +file is being processed by a newer version of the library. +As of this writing (SQLite version 2.7.0) the current format version +is "4". +

    + +

    +The third meta-value is the recommended pager cache size as set +by the DEFAULT_CACHE_SIZE pragma. If the value is positive it +means that synchronous behavior is enable (via the DEFAULT_SYNCHRONOUS +pragma) and if negative it means that synchronous behavior is +disabled. +

    + +

    +The fourth meta-value is safety level added in version 2.8.0. +A value of 1 corresponds to a SYNCHRONOUS setting of OFF. In other +words, SQLite does not pause to wait for journal data to reach the disk +surface before overwriting pages of the database. A value of 2 corresponds +to a SYNCHRONOUS setting of NORMAL. A value of 3 corresponds to a +SYNCHRONOUS setting of FULL. If the value is 0, that means it has not +been initialized so the default synchronous setting of NORMAL is used. +

    ADDED pages/formatchng.in Index: pages/formatchng.in ================================================================== --- /dev/null +++ pages/formatchng.in @@ -0,0 +1,278 @@ +File Format Changes in SQLite + +

    File Format Changes in SQLite

    + +

    +Every effort is made to keep SQLite fully backwards compatible from +one release to the next. Rarely, however, some +enhancements or bug fixes may require a change to +the underlying file format. When this happens and you +must convert the contents of your +databases into a portable ASCII representation using the old version +of the library then reload the data using the new version of the +library. +

    + +

    +You can tell if you should reload your databases by comparing the +version numbers of the old and new libraries. If the first digit +of the version number is different, then a reload of the database will +be required. If the second digit changes, newer versions of SQLite +will be able to read and write older database files, but older versions +of the library may have difficulty reading or writing newer database +files. +For example, upgrading from +version 2.8.14 to 3.0.0 requires a reload. Going from +version 3.0.8 to 3.1.0 is backwards compatible but not necessarily +forwards compatible. +

    + +

    +The following table summarizes the SQLite file format changes that have +occurred since version 1.0.0: +

    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Version ChangeApprox. DateDescription Of File Format Change
    1.0.32 to 2.0.02001-Sep-20Version 1.0.X of SQLite used the GDBM library as its backend + interface to the disk. Beginning in version 2.0.0, GDBM was replaced + by a custom B-Tree library written especially for SQLite. The new + B-Tree backend is twice as fast as GDBM, supports atomic commits and + rollback, and stores an entire database in a single disk file instead + using a separate file for each table as GDBM does. The two + file formats are not even remotely similar.
    2.0.8 to 2.1.02001-Nov-12The same basic B-Tree format is used but the details of the + index keys were changed in order to provide better query + optimization opportunities. Some of the headers were also changed in order + to increase the maximum size of a row from 64KB to 24MB.

    + + This change is an exception to the version number rule described above + in that it is neither forwards or backwards compatible. A complete + reload of the database is required. This is the only exception.

    2.1.7 to 2.2.02001-Dec-21Beginning with version 2.2.0, SQLite no longer builds an index for + an INTEGER PRIMARY KEY column. Instead, it uses that column as the actual + B-Tree key for the main table.

    Version 2.2.0 and later of the library + will automatically detect when it is reading a 2.1.x database and will + disable the new INTEGER PRIMARY KEY feature. In other words, version + 2.2.x is backwards compatible to version 2.1.x. But version 2.1.x is not + forward compatible with version 2.2.x. If you try to open + a 2.2.x database with an older 2.1.x library and that database contains + an INTEGER PRIMARY KEY, you will likely get a coredump. If the database + schema does not contain any INTEGER PRIMARY KEYs, then the version 2.1.x + and version 2.2.x database files will be identical and completely + interchangeable.

    +
    2.2.5 to 2.3.02002-Jan-30Beginning with version 2.3.0, SQLite supports some additional syntax + (the "ON CONFLICT" clause) in the CREATE TABLE and CREATE INDEX statements + that are stored in the SQLITE_MASTER table. If you create a database that + contains this new syntax, then try to read that database using version 2.2.5 + or earlier, the parser will not understand the new syntax and you will get + an error. Otherwise, databases for 2.2.x and 2.3.x are interchangeable.
    2.3.3 to 2.4.02002-Mar-10Beginning with version 2.4.0, SQLite added support for views. + Information about views is stored in the SQLITE_MASTER table. If an older + version of SQLite attempts to read a database that contains VIEW information + in the SQLITE_MASTER table, the parser will not understand the new syntax + and initialization will fail. Also, the + way SQLite keeps track of unused disk blocks in the database file + changed slightly. + If an older version of SQLite attempts to write a database that + was previously written by version 2.4.0 or later, then it may leak disk + blocks.
    2.4.12 to 2.5.02002-Jun-17Beginning with version 2.5.0, SQLite added support for triggers. + Information about triggers is stored in the SQLITE_MASTER table. If an older + version of SQLite attempts to read a database that contains a CREATE TRIGGER + in the SQLITE_MASTER table, the parser will not understand the new syntax + and initialization will fail. +
    2.5.6 to 2.6.02002-July-17A design flaw in the layout of indices required a file format change + to correct. This change appeared in version 2.6.0.

    + + If you use version 2.6.0 or later of the library to open a database file + that was originally created by version 2.5.6 or earlier, an attempt to + rebuild the database into the new format will occur automatically. + This can take some time for a large database. (Allow 1 or 2 seconds + per megabyte of database under Unix - longer under Windows.) This format + conversion is irreversible. It is strongly suggested + that you make a backup copy of older database files prior to opening them + with version 2.6.0 or later of the library, in case there are errors in + the format conversion logic.

    + + Version 2.6.0 or later of the library cannot open read-only database + files from version 2.5.6 or earlier, since read-only files cannot be + upgraded to the new format.

    +
    2.6.3 to 2.7.02002-Aug-13

    Beginning with version 2.7.0, SQLite understands two different + datatypes: text and numeric. Text data sorts in memcmp() order. + Numeric data sorts in numerical order if it looks like a number, + or in memcmp() order if it does not.

    + +

    When SQLite version 2.7.0 or later opens a 2.6.3 or earlier database, + it assumes all columns of all tables have type "numeric". For 2.7.0 + and later databases, columns have type "text" if their datatype + string contains the substrings "char" or "clob" or "blob" or "text". + Otherwise they are of type "numeric".

    + +

    Because "text" columns have a different sort order from numeric, + indices on "text" columns occur in a different order for version + 2.7.0 and later database. Hence version 2.6.3 and earlier of SQLite + will be unable to read a 2.7.0 or later database. But version 2.7.0 + and later of SQLite will read earlier databases.

    +
    2.7.6 to 2.8.02003-Feb-14

    Version 2.8.0 introduces a change to the format of the rollback + journal file. The main database file format is unchanged. Versions + 2.7.6 and earlier can read and write 2.8.0 databases and vice versa. + Version 2.8.0 can rollback a transaction that was started by version + 2.7.6 and earlier. But version 2.7.6 and earlier cannot rollback a + transaction started by version 2.8.0 or later.

    + +

    The only time this would ever be an issue is when you have a program + using version 2.8.0 or later that crashes with an incomplete + transaction, then you try to examine the database using version 2.7.6 or + earlier. The 2.7.6 code will not be able to read the journal file + and thus will not be able to rollback the incomplete transaction + to restore the database.

    +
    2.8.14 to 3.0.02004-Jun-18

    Version 3.0.0 is a major upgrade for SQLite that incorporates + support for UTF-16, BLOBs, and a more compact encoding that results + in database files that are typically 25% to 50% smaller. The new file + format is very different and is completely incompatible with the + version 2 file format.

    +
    3.0.8 to 3.1.02005-Jan-21

    Version 3.1.0 adds support for + autovacuum mode. + Prior versions of SQLite will be able to read an autovacuumed + database but will not be able to write it. If autovaccum is disabled + (which is the default condition) + then databases are fully forwards and backwards compatible.

    +
    3.1.6 to 3.2.02005-Mar-19

    Version 3.2.0 adds support for the + ALTER TABLE ADD COLUMN + command. A database that has been modified by this command can + not be read by a version of SQLite prior to 3.1.4. Running + VACUUM + after the ALTER TABLE + restores the database to a format such that it can be read by earlier + SQLite versions.

    +
    3.2.8 to 3.3.02006-Jan-10

    Version 3.3.0 adds support for descending indices and + uses a new encoding for boolean values that requires + less disk space. Version 3.3.0 can read and write database + files created by prior versions of SQLite. But prior versions + of SQLite will not be able to read or write databases created + by Version 3.3.0

    +

    If you need backwards and forwards capatibility, you can + compile with -DSQLITE_DEFAULT_FILE_FORMAT=1. Or at runtime + you can say "PRAGMA legacy_file_format=ON" prior to creating + a new database file

    +

    Once a database file is created, its format is fixed. So + a database file created by SQLite 3.2.8 and merely modified + by version 3.3.0 or later will retain the old format. Except, + the VACUUM command recreates the database so running VACUUM + on 3.3.0 or later will change the file format to the latest + edition.

    +
    3.3.6 to 3.3.72006-Aug-12

    The previous file format change has caused so much + grief that the default behavior has been changed back to + the original file format. This means that DESC option on + indices is ignored by default that the more efficient encoding + of boolean values is not used. In that way, older versions + of SQLite can read and write databases created by newer + versions. If the new features are desired, they can be + enabled using pragma: "PRAGMA legacy_file_format=OFF".

    +

    To be clear: both old and new file formats continue to + be understood and continue to work. But the old file format + is used by default instead of the new. This might change + again in some future release - we may go back to generating + the new file format by default - but probably not until + all users have upgraded to a version of SQLite that will + understand the new file format. That might take several + years.

    3.4.2 to 3.5.02007-Sep-3

    The design of the OS interface layer was changed for + release 3.5.0. Applications that implemented a custom OS + interface will need to be modified in order to upgrade. + There are also some subtly different semantics a few obscure + APIs. An article is avilable which + describing the changes in detail.

    + +

    The on-disk file format is unchanged.

    +
    +
    + +

    +To perform a database reload, have ready versions of the +sqlite command-line utility for both the old and new +version of SQLite. Call these two executables "sqlite-old" +and "sqlite-new". Suppose the name of your old database +is "old.db" and you want to create a new database with +the same information named "new.db". The command to do +this is as follows: +

    + +
    + sqlite-old old.db .dump | sqlite-new new.db +
    ADDED pages/index.in Index: pages/index.in ================================================================== --- /dev/null +++ pages/index.in @@ -0,0 +1,41 @@ +SQLite Home Page + + + + + + + + + + +
    +About Sqlite + +
  • SQLite is a C-library that implements a high-efficiency, + transactional SQL database engine using databases stored in + ordinary disk files. + More info...
  • +
    +Current Status + +
  • Version 3.5.2 +of SQLite is stable and is recommended for all users. +There are no known issues effecting database integrity or correctness.
  • +
    +Recent News + +
  • 2007-Nov-05: Version 3.5.2 released.
  • +
  • 2007-Oct-04: Version 3.5.1 released.
  • +
  • 2007-Sep-04: Version 3.5.0 released.
  • +
    +Quick Links + +
  • More details about SQLite - what it is and what + it is not.
  • +
  • Frequently Asked Questions about SQLite.
  • +
  • Well-known + companies using SQLite in their products.
  • +
  • Timeline of recent + development activity.
  • +
    ADDED pages/lang.in Index: pages/lang.in ================================================================== --- /dev/null +++ pages/lang.in @@ -0,0 +1,2205 @@ +Query Language Understood by SQLite + +if 0 { +if {[llength $argv]>0} { + set outputdir [lindex $argv 0] +} else { + set outputdir "" +} + +puts { +

    SQL As Understood By SQLite

    + +

    The SQLite library understands most of the standard SQL +language. But it does omit some features +while at the same time +adding a few features of its own. This document attempts to +describe precisely what parts of the SQL language SQLite does +and does not support. A list of keywords is +also provided.

    + +

    In all of the syntax diagrams that follow, literal text is shown in +bold blue. Non-terminal symbols are shown in italic red. Operators +that are part of the syntactic markup itself are shown in black roman.

    + +

    This document is just an overview of the SQL syntax implemented +by SQLite. Many low-level productions are omitted. For detailed information +on the language that SQLite understands, refer to the source code and +the grammar file "parse.y".

    + +
    +

    SQLite implements the follow syntax:

    +

      +} + +proc slink {label} { + if {[string match *.html $label]} { + return $label + } + if {[string length $::outputdir]==0} { + return #$label + } else { + return lang_$label.html + } +} + +foreach {section} [lsort -index 0 -dictionary { + {{CREATE TABLE} createtable} + {{CREATE VIRTUAL TABLE} createvtab} + {{CREATE INDEX} createindex} + {VACUUM vacuum} + {{DROP TABLE} droptable} + {{DROP INDEX} dropindex} + {INSERT insert} + {REPLACE replace} + {DELETE delete} + {UPDATE update} + {SELECT select} + {comment comment} + {COPY copy} + {EXPLAIN explain} + {expression expr} + {{BEGIN TRANSACTION} transaction} + {{COMMIT TRANSACTION} transaction} + {{END TRANSACTION} transaction} + {{ROLLBACK TRANSACTION} transaction} + {PRAGMA pragma.html} + {{ON CONFLICT clause} conflict} + {{CREATE VIEW} createview} + {{DROP VIEW} dropview} + {{CREATE TRIGGER} createtrigger} + {{DROP TRIGGER} droptrigger} + {{ATTACH DATABASE} attach} + {{DETACH DATABASE} detach} + {REINDEX reindex} + {{ALTER TABLE} altertable} + {{ANALYZE} analyze} +}] { + foreach {s_title s_tag} $section {} + puts "
    • $s_title
    • " +} +puts {

    +
    + +

    Details on the implementation of each command are provided in +the sequel.

    +} + +proc Operator {name} { + return "$name" +} +proc Nonterminal {name} { + return "$name" +} +proc Keyword {name} { + return "$name" +} +proc Example {text} { + puts "
    $text
    " +} + +proc Section {name label} { + global outputdir + + if {[string length $outputdir]!=0} { + if {[llength [info commands puts_standard]]>0} { + footer $::rcsid + } + + if {[string length $label]>0} { + rename puts puts_standard + proc puts {str} { + regsub -all {href="#([a-z]+)"} $str {href="lang_\1.html"} str + puts_standard $::section_file $str + } + rename footer footer_standard + proc footer {id} { + footer_standard $id + rename footer "" + rename puts "" + rename puts_standard puts + rename footer_standard footer + } + set ::section_file [open [file join $outputdir lang_$label.html] w] + header "Query Language Understood by SQLite: $name" + puts "

    SQL As Understood By SQLite

    " + puts "\[Contents\]" + puts "

    $name

    " + return + } + } + puts "\n
    " + if {$label!=""} { + puts "" + } + puts "

    $name

    \n" +} + +Section {ALTER TABLE} altertable + +Syntax {sql-statement} { +ALTER TABLE [ .] +} {alteration} { +RENAME TO +} {alteration} { +ADD [COLUMN] +} + +puts { +

    SQLite's version of the ALTER TABLE command allows the user to +rename or add a new column to an existing table. It is not possible +to remove a column from a table. +

    + +

    The RENAME TO syntax is used to rename the table identified by +[database-name.]table-name to new-table-name. This command +cannot be used to move a table between attached databases, only to rename +a table within the same database.

    + +

    If the table being renamed has triggers or indices, then these remain +attached to the table after it has been renamed. However, if there are +any view definitions, or statements executed by triggers that refer to +the table being renamed, these are not automatically modified to use the new +table name. If this is required, the triggers or view definitions must be +dropped and recreated to use the new table name by hand. +

    + +

    The ADD [COLUMN] syntax is used to add a new column to an existing table. +The new column is always appended to the end of the list of existing columns. +Column-def may take any of the forms permissable in a CREATE TABLE +statement, with the following restrictions: +

      +
    • The column may not have a PRIMARY KEY or UNIQUE constraint.
    • +
    • The column may not have a default value of CURRENT_TIME, CURRENT_DATE + or CURRENT_TIMESTAMP.
    • +
    • If a NOT NULL constraint is specified, then the column must have a + default value other than NULL. +
    + +

    The execution time of the ALTER TABLE command is independent of +the amount of data in the table. The ALTER TABLE command runs as quickly +on a table with 10 million rows as it does on a table with 1 row. +

    + +

    After ADD COLUMN has been run on a database, that database will not +be readable by SQLite version 3.1.3 and earlier until the database +is VACUUMed.

    +} + +Section {ANALYZE} analyze + +Syntax {sql-statement} { + ANALYZE +} +Syntax {sql-statement} { + ANALYZE +} +Syntax {sql-statement} { + ANALYZE [ .] +} + +puts { +

    The ANALYZE command gathers statistics about indices and stores them +in a special tables in the database where the query optimizer can use +them to help make better index choices. +If no arguments are given, all indices in all attached databases are +analyzed. If a database name is given as the argument, all indices +in that one database are analyzed. If the argument is a table name, +then only indices associated with that one table are analyzed.

    + +

    The initial implementation stores all statistics in a single +table named sqlite_stat1. Future enhancements may create +additional tables with the same name pattern except with the "1" +changed to a different digit. The sqlite_stat1 table cannot +be DROPped, +but all the content can be DELETEd which has the +same effect.

    +} + +Section {ATTACH DATABASE} attach + +Syntax {sql-statement} { +ATTACH [DATABASE] AS +} + +puts { +

    The ATTACH DATABASE statement adds another database +file to the current database connection. If the filename contains +punctuation characters it must be quoted. The names 'main' and +'temp' refer to the main database and the database used for +temporary tables. These cannot be detached. Attached databases +are removed using the DETACH DATABASE +statement.

    + +

    You can read from and write to an attached database and you +can modify the schema of the attached database. This is a new +feature of SQLite version 3.0. In SQLite 2.8, schema changes +to attached databases were not allowed.

    + +

    You cannot create a new table with the same name as a table in +an attached database, but you can attach a database which contains +tables whose names are duplicates of tables in the main database. It is +also permissible to attach the same database file multiple times.

    + +

    Tables in an attached database can be referred to using the syntax +database-name.table-name. If an attached table doesn't have +a duplicate table name in the main database, it doesn't require a +database name prefix. When a database is attached, all of its +tables which don't have duplicate names become the default table +of that name. Any tables of that name attached afterwards require the table +prefix. If the default table of a given name is detached, then +the last table of that name attached becomes the new default.

    + +

    +Transactions involving multiple attached databases are atomic, +assuming that the main database is not ":memory:". If the main +database is ":memory:" then +transactions continue to be atomic within each individual +database file. But if the host computer crashes in the middle +of a COMMIT where two or more database files are updated, +some of those files might get the changes where others +might not. +Atomic commit of attached databases is a new feature of SQLite version 3.0. +In SQLite version 2.8, all commits to attached databases behaved as if +the main database were ":memory:". +

    + +

    There is a compile-time limit of 10 attached database files.

    +} + + +Section {BEGIN TRANSACTION} transaction + +Syntax {sql-statement} { +BEGIN [ DEFERRED | IMMEDIATE | EXCLUSIVE ] [TRANSACTION []] +} +Syntax {sql-statement} { +END [TRANSACTION []] +} +Syntax {sql-statement} { +COMMIT [TRANSACTION []] +} +Syntax {sql-statement} { +ROLLBACK [TRANSACTION []] +} + +puts { + +

    +No changes can be made to the database except within a transaction. +Any command that changes the database (basically, any SQL command +other than SELECT) will automatically start a transaction if +one is not already in effect. Automatically started transactions +are committed at the conclusion of the command. +

    + +

    +Transactions can be started manually using the BEGIN +command. Such transactions usually persist until the next +COMMIT or ROLLBACK command. But a transaction will also +ROLLBACK if the database is closed or if an error occurs +and the ROLLBACK conflict resolution algorithm is specified. +See the documentation on the ON CONFLICT +clause for additional information about the ROLLBACK +conflict resolution algorithm. +

    + +

    +END TRANSACTION is an alias for COMMIT. +

    + +

    The optional transaction name is current ignored. SQLite +does not recognize nested transactions at this time. +However, future versions of SQLite may be enhanced to support nested +transactions and the transaction name would then become significant. +Application are advised not to use the transaction name in order +to avoid future compatibility problems.

    + +

    +Transactions can be deferred, immediate, or exclusive. +The default transaction behavior is deferred. +Deferred means that no locks are acquired +on the database until the database is first accessed. Thus with a +deferred transaction, the BEGIN statement itself does nothing. Locks +are not acquired until the first read or write operation. The first read +operation against a database creates a SHARED lock and the first +write operation creates a RESERVED lock. Because the acquisition of +locks is deferred until they are needed, it is possible that another +thread or process could create a separate transaction and write to +the database after the BEGIN on the current thread has executed. +If the transaction is immediate, then RESERVED locks +are acquired on all databases as soon as the BEGIN command is +executed, without waiting for the +database to be used. After a BEGIN IMMEDIATE, you are guaranteed that +no other thread or process will be able to write to the database or +do a BEGIN IMMEDIATE or BEGIN EXCLUSIVE. Other processes can continue +to read from the database, however. An exclusive transaction causes +EXCLUSIVE locks to be acquired on all databases. After a BEGIN +EXCLUSIVE, you are guaranteed that no other thread or process will +be able to read or write the database until the transaction is +complete. +

    + +

    +A description of the meaning of SHARED, RESERVED, and EXCLUSIVE locks +is available separately. +

    + +

    +The COMMIT command does not actually perform a commit until all +pending SQL commands finish. Thus if two or more SELECT statements +are in the middle of processing and a COMMIT is executed, the commit +will not actually occur until all SELECT statements finish. +

    + +

    +An attempt to execute COMMIT might result in an SQLITE_BUSY return code. +This indicates that another thread or process had a read lock on the database +that prevented the database from being updated. When COMMIT fails in this +way, the transaction remains active and the COMMIT can be retried later +after the reader has had a chance to clear. +

    + +

    Response To Errors Within A Transaction

    + +

    If certain kinds of errors occur within a transaction, the +transaction may or may not be rolled back automatically. The +errors that cause the behavior include:

    + +
      +
    • SQLITE_FULL: database or disk full +
    • SQLITE_IOERR: disk I/O error +
    • SQLITE_BUSY: database in use by another process +
    • SQLITE_NOMEM: out or memory +
    • SQLITE_INTERRUPT: processing interrupted by user request +
    + +

    +For all of these errors, SQLite attempts to undo just the one statement +it was working on and leave changes from prior statements within the +same transaction intact and continue with the transaction. However, +depending on the statement being evaluated and the point at which the +error occurs, it might be necessary for SQLite to rollback and +cancel the transaction. An application can tell which +course of action SQLite took by using the +sqlite3_get_autocommit() +C-language interface.

    + +

    It is recommended that applications respond to the errors +listed above by explicitly issuing a ROLLBACK command. If the +transaction has already been rolled back automatically +by the error response, then the ROLLBACK command will fail with an +error, but no harm is caused by this.

    + +

    Future versions of SQLite may extend the list of errors which +might cause automatic transaction rollback. Future versions of +SQLite might change the error response. In particular, we may +choose to simplify the interface in future versions of SQLite by +causing the errors above to force an unconditional rollback.

    +} + + +Section comment comment + +Syntax {comment} { | +} {SQL-comment} {-- +} {C-comment} {/STAR [STAR/] +} + +puts { +

    Comments aren't SQL commands, but can occur in SQL queries. They are +treated as whitespace by the parser. They can begin anywhere whitespace +can be found, including inside expressions that span multiple lines. +

    + +

    SQL comments only extend to the end of the current line.

    + +

    C comments can span any number of lines. If there is no terminating +delimiter, they extend to the end of the input. This is not treated as +an error. A new SQL statement can begin on a line after a multiline +comment ends. C comments can be embedded anywhere whitespace can occur, +including inside expressions, and in the middle of other SQL statements. +C comments do not nest. SQL comments inside a C comment will be ignored. +

    +} + + +Section COPY copy + +Syntax {sql-statement} { +COPY [ OR ] [ .] FROM +[ USING DELIMITERS ] +} + +puts { +

    The COPY command is available in SQLite version 2.8 and earlier. +The COPY command has been removed from SQLite version 3.0 due to +complications in trying to support it in a mixed UTF-8/16 environment. +In version 3.0, the command-line shell +contains a new command .import that can be used as a substitute +for COPY. +

    + +

    The COPY command is an extension used to load large amounts of +data into a table. It is modeled after a similar command found +in PostgreSQL. In fact, the SQLite COPY command is specifically +designed to be able to read the output of the PostgreSQL dump +utility pg_dump so that data can be easily transferred from +PostgreSQL into SQLite.

    + +

    The table-name is the name of an existing table which is to +be filled with data. The filename is a string or identifier that +names a file from which data will be read. The filename can be +the STDIN to read data from standard input.

    + +

    Each line of the input file is converted into a single record +in the table. Columns are separated by tabs. If a tab occurs as +data within a column, then that tab is preceded by a baskslash "\" +character. A baskslash in the data appears as two backslashes in +a row. The optional USING DELIMITERS clause can specify a delimiter +other than tab.

    + +

    If a column consists of the character "\N", that column is filled +with the value NULL.

    + +

    The optional conflict-clause allows the specification of an alternative +constraint conflict resolution algorithm to use for this one command. +See the section titled +ON CONFLICT for additional information.

    + +

    When the input data source is STDIN, the input can be terminated +by a line that contains only a baskslash and a dot:} +puts "\"[Operator \\.]\".

    " + + +Section {CREATE INDEX} createindex + +Syntax {sql-statement} { +CREATE [UNIQUE] INDEX [IF NOT EXISTS] [ .] +ON ( [, ]* ) +} {column-name} { + [ COLLATE ] [ ASC | DESC ] +} + +puts { +

    The CREATE INDEX command consists of the keywords "CREATE INDEX" followed +by the name of the new index, the keyword "ON", the name of a previously +created table that is to be indexed, and a parenthesized list of names of +columns in the table that are used for the index key. +Each column name can be followed by one of the "ASC" or "DESC" keywords +to indicate sort order, but the sort order is ignored in the current +implementation. Sorting is always done in ascending order.

    + +

    The COLLATE clause following each column name defines a collating +sequence used for text entires in that column. The default collating +sequence is the collating sequence defined for that column in the +CREATE TABLE statement. Or if no collating sequence is otherwise defined, +the built-in BINARY collating sequence is used.

    + +

    There are no arbitrary limits on the number of indices that can be +attached to a single table, nor on the number of columns in an index.

    + +

    If the UNIQUE keyword appears between CREATE and INDEX then duplicate +index entries are not allowed. Any attempt to insert a duplicate entry +will result in an error.

    + +

    The exact text +of each CREATE INDEX statement is stored in the sqlite_master +or sqlite_temp_master table, depending on whether the table +being indexed is temporary. Every time the database is opened, +all CREATE INDEX statements +are read from the sqlite_master table and used to regenerate +SQLite's internal representation of the index layout.

    + +

    If the optional IF NOT EXISTS clause is present and another index +with the same name aleady exists, then this command becomes a no-op.

    + +

    Indexes are removed with the DROP INDEX +command.

    +} + + +Section {CREATE TABLE} {createtable} + +Syntax {sql-command} { +CREATE [TEMP | TEMPORARY] TABLE [IF NOT EXISTS] [ .] ( + [, ]* + [, ]* +) +} {sql-command} { +CREATE [TEMP | TEMPORARY] TABLE [.] AS +} {column-def} { + [] [[CONSTRAINT ] ]* +} {type} { + | + ( ) | + ( , ) +} {column-constraint} { +NOT NULL [ ] | +PRIMARY KEY [] [ ] [AUTOINCREMENT] | +UNIQUE [ ] | +CHECK ( ) | +DEFAULT | +COLLATE +} {constraint} { +PRIMARY KEY ( ) [ ] | +UNIQUE ( ) [ ] | +CHECK ( ) +} {conflict-clause} { +ON CONFLICT +} + +puts { +

    A CREATE TABLE statement is basically the keywords "CREATE TABLE" +followed by the name of a new table and a parenthesized list of column +definitions and constraints. The table name can be either an identifier +or a string. Tables names that begin with "sqlite_" are reserved +for use by the engine.

    + +

    Each column definition is the name of the column followed by the +datatype for that column, then one or more optional column constraints. +The datatype for the column does not restrict what data may be put +in that column. +See Datatypes In SQLite Version 3 for +additional information. +The UNIQUE constraint causes an index to be created on the specified +columns. This index must contain unique keys. +The COLLATE clause specifies what text +collating function to use when comparing text entries for the column. +The built-in BINARY collating function is used by default. +

    +The DEFAULT constraint specifies a default value to use when doing an INSERT. +The value may be NULL, a string constant or a number. Starting with version +3.1.0, the default value may also be one of the special case-independant +keywords CURRENT_TIME, CURRENT_DATE or CURRENT_TIMESTAMP. If the value is +NULL, a string constant or number, it is literally inserted into the column +whenever an INSERT statement that does not specify a value for the column is +executed. If the value is CURRENT_TIME, CURRENT_DATE or CURRENT_TIMESTAMP, then +the current UTC date and/or time is inserted into the columns. For +CURRENT_TIME, the format is HH:MM:SS. For CURRENT_DATE, YYYY-MM-DD. The format +for CURRENT_TIMESTAMP is "YYYY-MM-DD HH:MM:SS". +

    + +

    Specifying a PRIMARY KEY normally just creates a UNIQUE index +on the corresponding columns. However, if primary key is on a single column +that has datatype INTEGER, then that column is used internally +as the actual key of the B-Tree for the table. This means that the column +may only hold unique integer values. (Except for this one case, +SQLite ignores the datatype specification of columns and allows +any kind of data to be put in a column regardless of its declared +datatype.) If a table does not have an INTEGER PRIMARY KEY column, +then the B-Tree key will be a automatically generated integer. + The +B-Tree key for a row can always be accessed using one of the +special names "ROWID", "OID", or "_ROWID_". +This is true regardless of whether or not there is an INTEGER +PRIMARY KEY. An INTEGER PRIMARY KEY column can also include the +keyword AUTOINCREMENT. The AUTOINCREMENT keyword modified the way +that B-Tree keys are automatically generated. Additional detail +on automatic B-Tree key generation is available +separately.

    + +

    According to the SQL standard, PRIMARY KEY should imply NOT NULL. +Unfortunately, due to a long-standing coding oversight, this is not +the case in SQLite. SQLite allows NULL values +in a PRIMARY KEY column. We could change SQLite to conform to the +standard (and we might do so in the future), but by the time the +oversight was discovered, SQLite was in such wide use that we feared +breaking legacy code if we fixed the problem. So for now we have +chosen to contain allowing NULLs in PRIMARY KEY columns. +Developers should be aware, however, that we may change SQLite to +conform to the SQL standard in future and should design new programs +accordingly.

    + +

    If the "TEMP" or "TEMPORARY" keyword occurs in between "CREATE" +and "TABLE" then the table that is created is only visible +within that same database connection +and is automatically deleted when +the database connection is closed. Any indices created on a temporary table +are also temporary. Temporary tables and indices are stored in a +separate file distinct from the main database file.

    + +

    If a <database-name> is specified, then the table is created in +the named database. It is an error to specify both a <database-name> +and the TEMP keyword, unless the <database-name> is "temp". If no +database name is specified, and the TEMP keyword is not present, +the table is created in the main database.

    + +

    The optional conflict-clause following each constraint +allows the specification of an alternative default +constraint conflict resolution algorithm for that constraint. +The default is abort ABORT. Different constraints within the same +table may have different default conflict resolution algorithms. +If an COPY, INSERT, or UPDATE command specifies a different conflict +resolution algorithm, then that algorithm is used in place of the +default algorithm specified in the CREATE TABLE statement. +See the section titled +ON CONFLICT for additional information.

    + +

    CHECK constraints are supported as of version 3.3.0. Prior +to version 3.3.0, CHECK constraints were parsed but not enforced.

    + +

    There are no arbitrary limits on the number +of columns or on the number of constraints in a table. +The total amount of data in a single row is limited to about +1 megabytes in version 2.8. In version 3.0 there is no arbitrary +limit on the amount of data in a row.

    + + +

    The CREATE TABLE AS form defines the table to be +the result set of a query. The names of the table columns are +the names of the columns in the result.

    + +

    The exact text +of each CREATE TABLE statement is stored in the sqlite_master +table. Every time the database is opened, all CREATE TABLE statements +are read from the sqlite_master table and used to regenerate +SQLite's internal representation of the table layout. +If the original command was a CREATE TABLE AS then then an equivalent +CREATE TABLE statement is synthesized and store in sqlite_master +in place of the original command. +The text of CREATE TEMPORARY TABLE statements are stored in the +sqlite_temp_master table. +

    + +

    If the optional IF NOT EXISTS clause is present and another table +with the same name aleady exists, then this command becomes a no-op.

    + +

    Tables are removed using the DROP TABLE +statement.

    +} + + +Section {CREATE TRIGGER} createtrigger + +Syntax {sql-statement} { +CREATE [TEMP | TEMPORARY] TRIGGER [IF NOT EXISTS] [ BEFORE | AFTER ] + ON [ .] + +} + +Syntax {sql-statement} { +CREATE [TEMP | TEMPORARY] TRIGGER [IF NOT EXISTS] INSTEAD OF + ON [ .] + +} + +Syntax {database-event} { +DELETE | +INSERT | +UPDATE | +UPDATE OF +} + +Syntax {trigger-action} { +[ FOR EACH ROW ] [ WHEN ] +BEGIN + ; [ ; ]* +END +} + +Syntax {trigger-step} { + | | + | +} + +puts { +

    The CREATE TRIGGER statement is used to add triggers to the +database schema. Triggers are database operations (the trigger-action) +that are automatically performed when a specified database event (the +database-event) occurs.

    + +

    A trigger may be specified to fire whenever a DELETE, INSERT or UPDATE of a +particular database table occurs, or whenever an UPDATE of one or more +specified columns of a table are updated.

    + +

    At this time SQLite supports only FOR EACH ROW triggers, not FOR EACH +STATEMENT triggers. Hence explicitly specifying FOR EACH ROW is optional. FOR +EACH ROW implies that the SQL statements specified as trigger-steps +may be executed (depending on the WHEN clause) for each database row being +inserted, updated or deleted by the statement causing the trigger to fire.

    + +

    Both the WHEN clause and the trigger-steps may access elements of +the row being inserted, deleted or updated using references of the form +"NEW.column-name" and "OLD.column-name", where +column-name is the name of a column from the table that the trigger +is associated with. OLD and NEW references may only be used in triggers on +trigger-events for which they are relevant, as follows:

    + + + + + + + + + + + + + + +
    INSERTNEW references are valid
    UPDATENEW and OLD references are valid
    DELETEOLD references are valid
    +

    + +

    If a WHEN clause is supplied, the SQL statements specified as trigger-steps are only executed for rows for which the WHEN clause is true. If no WHEN clause is supplied, the SQL statements are executed for all rows.

    + +

    The specified trigger-time determines when the trigger-steps +will be executed relative to the insertion, modification or removal of the +associated row.

    + +

    An ON CONFLICT clause may be specified as part of an UPDATE or INSERT +trigger-step. However if an ON CONFLICT clause is specified as part of +the statement causing the trigger to fire, then this conflict handling +policy is used instead.

    + +

    Triggers are automatically dropped when the table that they are +associated with is dropped.

    + +

    Triggers may be created on views, as well as ordinary tables, by specifying +INSTEAD OF in the CREATE TRIGGER statement. If one or more ON INSERT, ON DELETE +or ON UPDATE triggers are defined on a view, then it is not an error to execute +an INSERT, DELETE or UPDATE statement on the view, respectively. Thereafter, +executing an INSERT, DELETE or UPDATE on the view causes the associated + triggers to fire. The real tables underlying the view are not modified + (except possibly explicitly, by a trigger program).

    + +

    Example:

    + +

    Assuming that customer records are stored in the "customers" table, and +that order records are stored in the "orders" table, the following trigger +ensures that all associated orders are redirected when a customer changes +his or her address:

    +} +Example { +CREATE TRIGGER update_customer_address UPDATE OF address ON customers + BEGIN + UPDATE orders SET address = new.address WHERE customer_name = old.name; + END; +} +puts { +

    With this trigger installed, executing the statement:

    +} + +Example { +UPDATE customers SET address = '1 Main St.' WHERE name = 'Jack Jones'; +} +puts { +

    causes the following to be automatically executed:

    +} +Example { +UPDATE orders SET address = '1 Main St.' WHERE customer_name = 'Jack Jones'; +} + +puts { +

    Note that currently, triggers may behave oddly when created on tables + with INTEGER PRIMARY KEY fields. If a BEFORE trigger program modifies the + INTEGER PRIMARY KEY field of a row that will be subsequently updated by the + statement that causes the trigger to fire, then the update may not occur. + The workaround is to declare the table with a PRIMARY KEY column instead + of an INTEGER PRIMARY KEY column.

    +} + +puts { +

    A special SQL function RAISE() may be used within a trigger-program, with the following syntax

    +} +Syntax {raise-function} { +RAISE ( ABORT, ) | +RAISE ( FAIL, ) | +RAISE ( ROLLBACK, ) | +RAISE ( IGNORE ) +} +puts { +

    When one of the first three forms is called during trigger-program execution, the specified ON CONFLICT processing is performed (either ABORT, FAIL or + ROLLBACK) and the current query terminates. An error code of SQLITE_CONSTRAINT is returned to the user, along with the specified error message.

    + +

    When RAISE(IGNORE) is called, the remainder of the current trigger program, +the statement that caused the trigger program to execute and any subsequent + trigger programs that would of been executed are abandoned. No database + changes are rolled back. If the statement that caused the trigger program + to execute is itself part of a trigger program, then that trigger program + resumes execution at the beginning of the next step. +

    + +

    Triggers are removed using the DROP TRIGGER +statement.

    +} + + +Section {CREATE VIEW} {createview} + +Syntax {sql-command} { +CREATE [TEMP | TEMPORARY] VIEW [IF NOT EXISTS] [.] AS +} + +puts { +

    The CREATE VIEW command assigns a name to a pre-packaged +SELECT +statement. Once the view is created, it can be used in the FROM clause +of another SELECT in place of a table name. +

    + +

    If the "TEMP" or "TEMPORARY" keyword occurs in between "CREATE" +and "VIEW" then the view that is created is only visible to the +process that opened the database and is automatically deleted when +the database is closed.

    + +

    If a <database-name> is specified, then the view is created in +the named database. It is an error to specify both a <database-name> +and the TEMP keyword, unless the <database-name> is "temp". If no +database name is specified, and the TEMP keyword is not present, +the table is created in the main database.

    + +

    You cannot COPY, DELETE, INSERT or UPDATE a view. Views are read-only +in SQLite. However, in many cases you can use a +TRIGGER on the view to accomplish the same thing. Views are removed +with the DROP VIEW +command.

    +} + +Section {CREATE VIRTUAL TABLE} {createvtab} + +Syntax {sql-command} { +CREATE VIRTUAL TABLE [ .] USING [( )] +} + +puts { +

    A virtual table is an interface to an external storage or computation +engine that appears to be a table but does not actually store information +in the database file.

    + +

    In general, you can do anything with a virtual table that can be done +with an ordinary table, except that you cannot create triggers on a +virtual table. Some virtual table implementations might impose additional +restrictions. For example, many virtual tables are read-only.

    + +

    The <module-name> is the name of an object that implements +the virtual table. The <module-name> must be registered with +the SQLite database connection using +sqlite3_create_module +prior to issuing the CREATE VIRTUAL TABLE statement. +The module takes zero or more comma-separated arguments. +The arguments can be just about any text as long as it has balanced +parentheses. The argument syntax is sufficiently general that the +arguments can be made to appear as column definitions in a traditional +CREATE TABLE statement. +SQLite passes the module arguments directly +to the module without any interpretation. It is the responsibility +of the module implementation to parse and interpret its own arguments.

    + +

    A virtual table is destroyed using the ordinary +DROP TABLE statement. There is no +DROP VIRTUAL TABLE statement.

    +} + +Section DELETE delete + +Syntax {sql-statement} { +DELETE FROM [ .] [WHERE ] +} + +puts { +

    The DELETE command is used to remove records from a table. +The command consists of the "DELETE FROM" keywords followed by +the name of the table from which records are to be removed. +

    + +

    Without a WHERE clause, all rows of the table are removed. +If a WHERE clause is supplied, then only those rows that match +the expression are removed.

    +} + + +Section {DETACH DATABASE} detach + +Syntax {sql-command} { +DETACH [DATABASE] +} + +puts { +

    This statement detaches an additional database connection previously +attached using the ATTACH DATABASE statement. It +is possible to have the same database file attached multiple times using +different names, and detaching one connection to a file will leave the +others intact.

    + +

    This statement will fail if SQLite is in the middle of a transaction.

    +} + + +Section {DROP INDEX} dropindex + +Syntax {sql-command} { +DROP INDEX [IF EXISTS] [ .] +} + +puts { +

    The DROP INDEX statement removes an index added +with the +CREATE INDEX statement. The index named is completely removed from +the disk. The only way to recover the index is to reenter the +appropriate CREATE INDEX command.

    + +

    The DROP INDEX statement does not reduce the size of the database +file in the default mode. +Empty space in the database is retained for later INSERTs. To +remove free space in the database, use the VACUUM +command. If AUTOVACUUM mode is enabled for a database then space +will be freed automatically by DROP INDEX.

    +} + + +Section {DROP TABLE} droptable + +Syntax {sql-command} { +DROP TABLE [IF EXISTS] [.] +} + +puts { +

    The DROP TABLE statement removes a table added with the CREATE TABLE statement. The name specified is the +table name. It is completely removed from the database schema and the +disk file. The table can not be recovered. All indices associated +with the table are also deleted.

    + +

    The DROP TABLE statement does not reduce the size of the database +file in the default mode. Empty space in the database is retained for +later INSERTs. To +remove free space in the database, use the VACUUM +command. If AUTOVACUUM mode is enabled for a database then space +will be freed automatically by DROP TABLE.

    + +

    The optional IF EXISTS clause suppresses the error that would normally +result if the table does not exist.

    +} + + +Section {DROP TRIGGER} droptrigger +Syntax {sql-statement} { +DROP TRIGGER [IF EXISTS] [ .] +} +puts { +

    The DROP TRIGGER statement removes a trigger created by the +CREATE TRIGGER statement. The trigger is +deleted from the database schema. Note that triggers are automatically +dropped when the associated table is dropped.

    +} + + +Section {DROP VIEW} dropview + +Syntax {sql-command} { +DROP VIEW [IF EXISTS] +} + +puts { +

    The DROP VIEW statement removes a view created by the CREATE VIEW statement. The name specified is the +view name. It is removed from the database schema, but no actual data +in the underlying base tables is modified.

    +} + + +Section EXPLAIN explain + +Syntax {sql-statement} { +EXPLAIN +} + +puts { +

    The EXPLAIN command modifier is a non-standard extension. The +idea comes from a similar command found in PostgreSQL, but the operation +is completely different.

    + +

    If the EXPLAIN keyword appears before any other SQLite SQL command +then instead of actually executing the command, the SQLite library will +report back the sequence of virtual machine instructions it would have +used to execute the command had the EXPLAIN keyword not been present. +For additional information about virtual machine instructions see +the architecture description or the documentation +on available opcodes for the virtual machine.

    +} + + +Section expression expr + +Syntax {expr} { + | + [NOT] [ESCAPE ] | + | +( ) | + | + . | + . . | + | + | + ( | STAR ) | + ISNULL | + NOTNULL | + [NOT] BETWEEN AND | + [NOT] IN ( ) | + [NOT] IN ( ) | + [NOT] IN [ .] | +[EXISTS] ( ) | +CASE [] LP WHEN THEN RPPLUS [ELSE ] END | +CAST ( AS ) | + COLLATE +} {like-op} { +LIKE | GLOB | REGEXP | MATCH +} + +puts { +

    This section is different from the others. Most other sections of +this document talks about a particular SQL command. This section does +not talk about a standalone command but about "expressions" which are +subcomponents of most other commands.

    + +

    SQLite understands the following binary operators, in order from +highest to lowest precedence:

    + +
    +||
    +*    /    %
    ++    -
    +<<   >>   &    |
    +<    <=   >    >=
    +=    ==   !=   <>   IN
    +AND   
    +OR
    +
    + +

    Supported unary prefix operators are these:

    + +
    +-    +    !    ~    NOT
    +
    + +

    The COLLATE operator can be thought of as a unary postfix +operator. The COLLATE operator has the highest precedence. +It always binds more tightly than any prefix unary operator or +any binary operator.

    + +

    The unary operator [Operator +] is a no-op. It can be applied +to strings, numbers, or blobs and it always gives as its result the +value of the operand.

    + +

    Note that there are two variations of the equals and not equals +operators. Equals can be either} +puts "[Operator =] or [Operator ==]. +The non-equals operator can be either +[Operator !=] or [Operator {<>}]. +The [Operator ||] operator is \"concatenate\" - it joins together +the two strings of its operands. +The operator [Operator %] outputs the remainder of its left +operand modulo its right operand.

    + +

    The result of any binary operator is a numeric value, except +for the [Operator ||] concatenation operator which gives a string +result.

    " + +puts { + + +

    +A literal value is an integer number or a floating point number. +Scientific notation is supported. The "." character is always used +as the decimal point even if the locale setting specifies "," for +this role - the use of "," for the decimal point would result in +syntactic ambiguity. A string constant is formed by enclosing the +string in single quotes ('). A single quote within the string can +be encoded by putting two single quotes in a row - as in Pascal. +C-style escapes using the backslash character are not supported because +they are not standard SQL. +BLOB literals are string literals containing hexadecimal data and +preceded by a single "x" or "X" character. For example:

    + +
    +X'53514C697465'
    +
    + +

    +A literal value can also be the token "NULL". +

    + +

    +A parameter specifies a placeholder in the expression for a literal +value that is filled in at runtime using the +sqlite3_bind API. +Parameters can take several forms: +

    + + + + + + + + + + + + + + + + + + + + + +
    ?NNNA question mark followed by a number NNN holds a spot for the +NNN-th parameter. NNN must be between 1 and 999.
    ?A question mark that is not followed by a number holds a spot for +the next unused parameter.
    :AAAAA colon followed by an identifier name holds a spot for a named +parameter with the name AAAA. Named parameters are also numbered. +The number assigned is the next unused number. To avoid confusion, +it is best to avoid mixing named and numbered parameters.
    @AAAAAn "at" sign works exactly like a colon.
    $AAAAA dollar-sign followed by an identifier name also holds a spot for a named +parameter with the name AAAA. The identifier name in this case can include +one or more occurances of "::" and a suffix enclosed in "(...)" containing +any text at all. This syntax is the form of a variable name in the Tcl +programming language.
    + + +

    Parameters that are not assigned values using +sqlite3_bind are treated +as NULL.

    + + +

    The LIKE operator does a pattern matching comparison. The operand +to the right contains the pattern, the left hand operand contains the +string to match against the pattern. +} +puts "A percent symbol [Operator %] in the pattern matches any +sequence of zero or more characters in the string. An underscore +[Operator _] in the pattern matches any single character in the +string. Any other character matches itself or it's lower/upper case +equivalent (i.e. case-insensitive matching). (A bug: SQLite only +understands upper/lower case for 7-bit Latin characters. Hence the +LIKE operator is case sensitive for 8-bit iso8859 characters or UTF-8 +characters. For example, the expression 'a' LIKE 'A' +is TRUE but 'æ' LIKE 'Æ' is FALSE.).

    " + +puts { +

    If the optional ESCAPE clause is present, then the expression +following the ESCAPE keyword must evaluate to a string consisting of +a single character. This character may be used in the LIKE pattern +to include literal percent or underscore characters. The escape +character followed by a percent symbol, underscore or itself matches a +literal percent symbol, underscore or escape character in the string, +respectively. The infix LIKE operator is implemented by calling the +user function like(X,Y).

    +} + +puts { +The LIKE operator is not case sensitive and will match upper case +characters on one side against lower case characters on the other. +(A bug: SQLite only understands upper/lower case for 7-bit Latin +characters. Hence the LIKE operator is case sensitive for 8-bit +iso8859 characters or UTF-8 characters. For example, the expression +'a' LIKE 'A' is TRUE but +'æ' LIKE 'Æ' is FALSE.).

    + +

    The infix LIKE +operator is implemented by calling the user function +like(X,Y). If an ESCAPE clause is present, it adds +a third parameter to the function call. If the functionality of LIKE can be +overridden by defining an alternative implementation of the +like() SQL function.

    +

    + + +

    The GLOB operator is similar to LIKE but uses the Unix +file globbing syntax for its wildcards. Also, GLOB is case +sensitive, unlike LIKE. Both GLOB and LIKE may be preceded by +the NOT keyword to invert the sense of the test. The infix GLOB +operator is implemented by calling the user function +glob(X,Y) and can be modified by overriding +that function.

    + + +

    The REGEXP operator is a special syntax for the regexp() +user function. No regexp() user function is defined by default +and so use of the REGEXP operator will normally result in an +error message. If a user-defined function named "regexp" +is added at run-time, that function will be called in order +to implement the REGEXP operator.

    + + +

    The MATCH operator is a special syntax for the match() +user function. The default match() function implementation +raises and exception and is not really useful for anything. +But extensions can override the match() function with more +helpful logic.

    + +

    A column name can be any of the names defined in the CREATE TABLE +statement or one of the following special identifiers: "ROWID", +"OID", or "_ROWID_". +These special identifiers all describe the +unique integer key (the "row key") associated with every +row of every table. +The special identifiers only refer to the row key if the CREATE TABLE +statement does not define a real column with the same name. Row keys +act like read-only columns. A row key can be used anywhere a regular +column can be used, except that you cannot change the value +of a row key in an UPDATE or INSERT statement. +"SELECT * ..." does not return the row key.

    + +

    SELECT statements can appear in expressions as either the +right-hand operand of the IN operator, as a scalar quantity, or +as the operand of an EXISTS operator. +As a scalar quantity or the operand of an IN operator, +the SELECT should have only a single column in its +result. Compound SELECTs (connected with keywords like UNION or +EXCEPT) are allowed. +With the EXISTS operator, the columns in the result set of the SELECT are +ignored and the expression returns TRUE if one or more rows exist +and FALSE if the result set is empty. +If no terms in the SELECT expression refer to value in the containing +query, then the expression is evaluated once prior to any other +processing and the result is reused as necessary. If the SELECT expression +does contain variables from the outer query, then the SELECT is reevaluated +every time it is needed.

    + +

    When a SELECT is the right operand of the IN operator, the IN +operator returns TRUE if the result of the left operand is any of +the values generated by the select. The IN operator may be preceded +by the NOT keyword to invert the sense of the test.

    + +

    When a SELECT appears within an expression but is not the right +operand of an IN operator, then the first row of the result of the +SELECT becomes the value used in the expression. If the SELECT yields +more than one result row, all rows after the first are ignored. If +the SELECT yields no rows, then the value of the SELECT is NULL.

    + +

    A CAST expression changes the datatype of the into the +type specified by <type>. +<type> can be any non-empty type name that is valid +for the type in a column definition of a CREATE TABLE statement.

    + +

    Both simple and aggregate functions are supported. A simple +function can be used in any expression. Simple functions return +a result immediately based on their inputs. Aggregate functions +may only be used in a SELECT statement. Aggregate functions compute +their result across all rows of the result set.

    + + +Core Functions + +

    The core functions shown below are available by default. Additional +functions may be written in C and added to the database engine using +the sqlite3_create_function() +API.

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    abs(X)Return the absolute value of argument X.
    coalesce(X,Y,...)Return a copy of the first non-NULL argument. If +all arguments are NULL then NULL is returned. There must be at least +2 arguments.
    + +glob(X,Y)This function is used to implement the +"X GLOB Y" syntax of SQLite. The +sqlite3_create_function() +interface can +be used to override this function and thereby change the operation +of the GLOB operator.
    ifnull(X,Y)Return a copy of the first non-NULL argument. If +both arguments are NULL then NULL is returned. This behaves the same as +coalesce() above.
    + +hex(X)The argument is interpreted as a BLOB. The result +is a hexadecimal rendering of the content of that blob.
    last_insert_rowid()Return the ROWID +of the last row insert from this +connection to the database. This is the same value that would be returned +from the sqlite_last_insert_rowid() API function.
    length(X)Return the string length of X in characters. +If SQLite is configured to support UTF-8, then the number of UTF-8 +characters is returned, not the number of bytes.
    + +like(X,Y)
    +like(X,Y,Z)
    +This function is used to implement the "X LIKE Y [ESCAPE Z]" +syntax of SQL. If the optional ESCAPE clause is present, then the +user-function is invoked with three arguments. Otherwise, it is +invoked with two arguments only. The + +sqlite_create_function() interface can be used to override this +function and thereby change the operation of the LIKE operator. When doing this, it may be important +to override both the two and three argument versions of the like() +function. Otherwise, different code may be called to implement the +LIKE operator depending on whether or not an ESCAPE clause was +specified.
    load_extension(X)
    +load_extension(X,Y)
    Load SQLite extensions out of the shared library +file named X using the entry point Y. The result +is a NULL. If Y is omitted then the default entry point +of sqlite3_extension_init is used. This function raises +an exception if the extension fails to load or initialize correctly. + +

    This function will fail if the extension attempts to modify +or delete a SQL function or collating sequence. The +extension can add new functions or collating sequences, but cannot +modify or delete existing functions or collating sequences because +those functions and/or collating sequences might be used elsewhere +in the currently running SQL statement. To load an extension that +changes or deletes functions or collating sequences, use the +sqlite3_load_extension() +C-language API.

    +
    lower(X)Return a copy of string X will all characters +converted to lower case. The C library tolower() routine is used +for the conversion, which means that this function might not +work correctly on UTF-8 characters.
    + +ltrim(X)
    ltrim(X,Y)
    Return a string formed by removing any and all +characters that appear in Y from the left side of X. +If the Y argument is omitted, spaces are removed.
    max(X,Y,...)Return the argument with the maximum value. Arguments +may be strings in addition to numbers. The maximum value is determined +by the usual sort order. Note that max() is a simple function when +it has 2 or more arguments but converts to an aggregate function if given +only a single argument.
    min(X,Y,...)Return the argument with the minimum value. Arguments +may be strings in addition to numbers. The minimum value is determined +by the usual sort order. Note that min() is a simple function when +it has 2 or more arguments but converts to an aggregate function if given +only a single argument.
    nullif(X,Y)Return the first argument if the arguments are different, +otherwise return NULL.
    quote(X)This routine returns a string which is the value of +its argument suitable for inclusion into another SQL statement. +Strings are surrounded by single-quotes with escapes on interior quotes +as needed. BLOBs are encoded as hexadecimal literals. +The current implementation of VACUUM uses this function. The function +is also useful when writing triggers to implement undo/redo functionality. +
    random(*)Return a pseudo-random integer +between -9223372036854775808 and +9223372036854775807.
    + +replace(X,Y,Z)Return a string formed by substituting string Z for +every occurrance of string Y in string X. The BINARY +collating sequence is used for comparisons.
    + +randomblob(N)Return a N-byte blob containing pseudo-random bytes. +N should be a postive integer.
    round(X)
    round(X,Y)
    Round off the number X to Y digits to the +right of the decimal point. If the Y argument is omitted, 0 is +assumed.
    + +rtrim(X)
    rtrim(X,Y)
    Return a string formed by removing any and all +characters that appear in Y from the right side of X. +If the Y argument is omitted, spaces are removed.
    soundex(X)Compute the soundex encoding of the string X. +The string "?000" is returned if the argument is NULL. +This function is omitted from SQLite by default. +It is only available the -DSQLITE_SOUNDEX=1 compiler option +is used when SQLite is built.
    sqlite_version(*)Return the version string for the SQLite library +that is running. Example: "2.8.0"
    + substr(X,Y,Z)
    + substr(X,Y)
    Return a substring of input string X that begins +with the Y-th character and which is Z characters long. +If Z is omitted then all character through the end of the string +are returned. +The left-most character of X is number 1. If Y is negative +the the first character of the substring is found by counting from the +right rather than the left. If X is string +then characters indices refer to actual UTF-8 characters. If +X is a BLOB then the indices refer to bytes.
    + +trim(X)
    trim(X,Y)
    Return a string formed by removing any and all +characters that appear in Y from both ends of X. +If the Y argument is omitted, spaces are removed.
    typeof(X)Return the type of the expression X. The only +return values are "null", "integer", "real", "text", and "blob". +SQLite's type handling is +explained in Datatypes in SQLite Version 3.
    upper(X)Return a copy of input string X converted to all +upper-case letters. The implementation of this function uses the C library +routine toupper() which means it may not work correctly on +UTF-8 strings.
    zeroblob(N) +Return a BLOB consisting of N bytes of 0x00. SQLite +manages these zeroblobs very efficiently. Zeroblobs can be used to +reserve space for a BLOB that is later written using +incremental BLOB I/O.
    + +Date And Time Functions + +

    Date and time functions are documented in the + +SQLite Wiki.

    + + +Aggregate Functions + +

    +The aggregate functions shown below are available by default. Additional +aggregate functions written in C may be added using the +sqlite3_create_function() +API.

    + +

    +In any aggregate function that takes a single argument, that argument +can be preceeded by the keyword DISTINCT. In such cases, duplicate +elements are filtered before being passed into the aggregate function. +For example, the function "count(distinct X)" will return the number +of distinct values of column X instead of the total number of non-null +values in column X. +

    + + + + + + + + + + + + + + + + + + + + + + + + + +
    avg(X)Return the average value of all non-NULL X within a +group. String and BLOB values that do not look like numbers are +interpreted as 0. +The result of avg() is always a floating point value even if all +inputs are integers.

    count(X)
    count(*)
    The first form return a count of the number of times +that X is not NULL in a group. The second form (with no argument) +returns the total number of rows in the group.
    max(X)Return the maximum value of all values in the group. +The usual sort order is used to determine the maximum.
    min(X)Return the minimum non-NULL value of all values in the group. +The usual sort order is used to determine the minimum. NULL is only returned +if all values in the group are NULL.
    sum(X)
    total(X)
    Return the numeric sum of all non-NULL values in the group. + If there are no non-NULL input rows then sum() returns + NULL but total() returns 0.0. + NULL is not normally a helpful result for the sum of no rows + but the SQL standard requires it and most other + SQL database engines implement sum() that way so SQLite does it in the + same way in order to be compatible. The non-standard total() function + is provided as a convenient way to work around this design problem + in the SQL language.

    + +

    The result of total() is always a floating point value. + The result of sum() is an integer value if all non-NULL inputs are integers. + If any input to sum() is neither an integer or a NULL + then sum() returns a floating point value + which might be an approximation to the true sum.

    + +

    Sum() will throw an "integer overflow" exception if all inputs + are integers or NULL + and an integer overflow occurs at any point during the computation. + Total() never throws an exception.

    +
    +} + + +Section INSERT insert + +Syntax {sql-statement} { +INSERT [OR ] INTO [ .] [()] VALUES() | +INSERT [OR ] INTO [ .] [()] +} + +puts { +

    The INSERT statement comes in two basic forms. The first form +(with the "VALUES" keyword) creates a single new row in an existing table. +If no column-list is specified then the number of values must +be the same as the number of columns in the table. If a column-list +is specified, then the number of values must match the number of +specified columns. Columns of the table that do not appear in the +column list are filled with the default value, or with NULL if no +default value is specified. +

    + +

    The second form of the INSERT statement takes it data from a +SELECT statement. The number of columns in the result of the +SELECT must exactly match the number of columns in the table if +no column list is specified, or it must match the number of columns +name in the column list. A new entry is made in the table +for every row of the SELECT result. The SELECT may be simple +or compound.

    + +

    The optional conflict-clause allows the specification of an alternative +constraint conflict resolution algorithm to use during this one command. +See the section titled +ON CONFLICT for additional information. +For compatibility with MySQL, the parser allows the use of the +single keyword REPLACE as an alias for "INSERT OR REPLACE". +

    +} + + +Section {ON CONFLICT clause} conflict + +Syntax {conflict-clause} { +ON CONFLICT +} {conflict-algorithm} { +ROLLBACK | ABORT | FAIL | IGNORE | REPLACE +} + +puts { +

    The ON CONFLICT clause is not a separate SQL command. It is a +non-standard clause that can appear in many other SQL commands. +It is given its own section in this document because it is not +part of standard SQL and therefore might not be familiar.

    + +

    The syntax for the ON CONFLICT clause is as shown above for +the CREATE TABLE command. For the INSERT and +UPDATE commands, the keywords "ON CONFLICT" are replaced by "OR", to make +the syntax seem more natural. For example, instead of +"INSERT ON CONFLICT IGNORE" we have "INSERT OR IGNORE". +The keywords change but the meaning of the clause is the same +either way.

    + +

    The ON CONFLICT clause specifies an algorithm used to resolve +constraint conflicts. There are five choices: ROLLBACK, ABORT, +FAIL, IGNORE, and REPLACE. The default algorithm is ABORT. This +is what they mean:

    + +
    +
    ROLLBACK
    +

    When a constraint violation occurs, an immediate ROLLBACK +occurs, thus ending the current transaction, and the command aborts +with a return code of SQLITE_CONSTRAINT. If no transaction is +active (other than the implied transaction that is created on every +command) then this algorithm works the same as ABORT.

    + +
    ABORT
    +

    When a constraint violation occurs, the command backs out +any prior changes it might have made and aborts with a return code +of SQLITE_CONSTRAINT. But no ROLLBACK is executed so changes +from prior commands within the same transaction +are preserved. This is the default behavior.

    + +
    FAIL
    +

    When a constraint violation occurs, the command aborts with a +return code SQLITE_CONSTRAINT. But any changes to the database that +the command made prior to encountering the constraint violation +are preserved and are not backed out. For example, if an UPDATE +statement encountered a constraint violation on the 100th row that +it attempts to update, then the first 99 row changes are preserved +but changes to rows 100 and beyond never occur.

    + +
    IGNORE
    +

    When a constraint violation occurs, the one row that contains +the constraint violation is not inserted or changed. But the command +continues executing normally. Other rows before and after the row that +contained the constraint violation continue to be inserted or updated +normally. No error is returned.

    + +
    REPLACE
    +

    When a UNIQUE constraint violation occurs, the pre-existing rows +that are causing the constraint violation are removed prior to inserting +or updating the current row. Thus the insert or update always occurs. +The command continues executing normally. No error is returned. +If a NOT NULL constraint violation occurs, the NULL value is replaced +by the default value for that column. If the column has no default +value, then the ABORT algorithm is used. If a CHECK constraint violation +occurs then the IGNORE algorithm is used.

    + +

    When this conflict resolution strategy deletes rows in order to +satisfy a constraint, it does not invoke delete triggers on those +rows. This behavior might change in a future release.

    +
    + +

    The algorithm specified in the OR clause of a INSERT or UPDATE +overrides any algorithm specified in a CREATE TABLE. +If no algorithm is specified anywhere, the ABORT algorithm is used.

    +} + +Section REINDEX reindex + +Syntax {sql-statement} { + REINDEX +} +Syntax {sql-statement} { + REINDEX [ .] +} + +puts { +

    The REINDEX command is used to delete and recreate indices from scratch. +This is useful when the definition of a collation sequence has changed. +

    + +

    In the first form, all indices in all attached databases that use the +named collation sequence are recreated. In the second form, if +[database-name.]table/index-name identifies a table, then all indices +associated with the table are rebuilt. If an index is identified, then only +this specific index is deleted and recreated. +

    + +

    If no database-name is specified and there exists both a table or +index and a collation sequence of the specified name, then indices associated +with the collation sequence only are reconstructed. This ambiguity may be +dispelled by always specifying a database-name when reindexing a +specific table or index. +} + +Section REPLACE replace + +Syntax {sql-statement} { +REPLACE INTO [ .] [( )] VALUES ( ) | +REPLACE INTO [ .] [( )] +} + +puts { +

    The REPLACE command is an alias for the "INSERT OR REPLACE" variant +of the INSERT command. This alias is provided for +compatibility with MySQL. See the +INSERT command documentation for additional +information.

    +} + + +Section SELECT select + +Syntax {sql-statement} { +SELECT [ALL | DISTINCT] [FROM ] +[WHERE ] +[GROUP BY ] +[HAVING ] +[
    [
    ]* +} {table} { + [AS ] | +(
    + + + + + + +
    'keyword'A keyword in single quotes is interpreted as a literal string + if it occurs in a context where a string literal is allowed, otherwise + it is understood as an identifier.
    "keyword"A keyword in double-quotes is interpreted as an identifier if + it matches a known identifier. Otherwise it is interpreted as a + string literal.
    [keyword]A keyword enclosed in square brackets is always understood as + an identifier. This is not standard SQL. This quoting mechanism + is used by MS Access and SQL Server and is included in SQLite for + compatibility.
    + +

    + +

    Quoted keywords are unaesthetic. +To help you avoid them, SQLite allows many keywords to be used unquoted +as the names of databases, tables, indices, triggers, views, columns, +user-defined functions, collations, attached databases, and virtual +function modules. +In the list of keywords that follows, those that can be used as identifiers +are shown in an italic font. Keywords that must be quoted in order to be +used as identifiers are shown in bold.

    + +

    +SQLite adds new keywords from time to time when it take on new features. +So to prevent your code from being broken by future enhancements, you should +normally quote any indentifier that is an English language word, even if +you do not have to. +

    + +

    +The following are the keywords currently recognized by SQLite: +

    + +
    + + +
    +} + +set n [llength $keyword_list] +set nCol 5 +set nRow [expr {($n+$nCol-1)/$nCol}] +set i 0 +foreach word $keyword_list { + if {[string index $word end]=="*"} { + set word [string range $word 0 end-1] + set font i + } else { + set font b + } + if {$i==$nRow} { + puts "" + set i 1 + } else { + incr i + } + puts "<$font>$word
    " +} + +puts { +
    + +

    Special names

    + +

    The following are not keywords in SQLite, but are used as names of +system objects. They can be used as an identifier for a different +type of object.

    + +
    + _ROWID_
    + MAIN
    + OID
    + ROWID
    + SQLITE_MASTER
    + SQLITE_SEQUENCE
    + SQLITE_TEMP_MASTER
    + TEMP
    +
    +} + +puts {
    } +footer $rcsid +if {[string length $outputdir]} { + footer $rcsid +} +puts {
    } +} +
    ADDED pages/limits.in Index: pages/limits.in ================================================================== --- /dev/null +++ pages/limits.in @@ -0,0 +1,309 @@ +Implementation Limits For SQLite + +

    Limits In SQLite

    + +

    +"Limits" in the context of this article means sizes or +quantities that can not be exceeded. We are concerned +with things like the maximum number of bytes in a +BLOB or the maximum number of columns in a table. +

    + +

    +SQLite was originally designed with a policy of avoiding +arbitrary limits. +Of course, every program that runs on a machine with finite +memory and disk space has limits of some kind. But in SQLite, +those limits +were not well defined. The policy was that if it would fit +in memory and you could count it with a 32-bit integer, then +it should work. +

    + +

    +Unfortunately, the no-limits policy has been shown to create +problems. Because the upper bounds were not well +defined, they were not tested, and bugs (including possible +security exploits) were often found when pushing SQLite to +extremes. For this reason, newer versions of SQLite have +well-defined limits and those limits are tested as part of +the test suite. +

    + +

    +This article defines what the limits of SQLite are and how they +can be customized for specific applications. The default settings +for limits are normally quite large and adequate for almost every +application. Some applications may what to increase a limit here +or there, but we expect such needs to be rare. More commonly, +an application might want to recompile SQLite with much lower +limits to avoid excess resource utilization in the event of +bug in higher-level SQL statement generators or to help thwart +attackers who inject malicious SQL statements. +

    +} +proc limititem {title text} { + puts "
  • $title

    \n$text
  • " +} +puts { +
      +} + +limititem {Maximum length of a string or BLOB} { +

      +The maximum number of bytes in a string or BLOB in SQLite is defined +by the preprocessor macro SQLITE_MAX_LENGTH. The default value +of this macro is 1 billion (1 thousand million or 1,000,000,000). +You can raise or lower this value at compile-time using a command-line +option like this: +

      + +
      -DSQLITE_MAX_LENGTH=123456789
      + +

      +The current implementation will only support a string or BLOB +length up to 231-1 or 2147483647. And +some built-in functions such as hex() might fail well before that +point. In security-sensitive applications it is best not to +try to increase the maximum string and blob length. In fact, +you might do well to lower the maximum string and blob length +to something more in the range of a few million if that is +possible. +

      + +

      +During part of SQLite's INSERT and SELECT processing, the complete +content of each row in the database is encoded as a single BLOB. +So the SQLITE_MAX_LENGTH parameter also determines the maximum +number of bytes in a row. +

      +} + +limititem {Maximum Number Of Columns} { +

      +The SQLITE_MAX_COLUMN compile-time parameter is used to set an upper +bound on: +

      + +
        +
      • The number of columns in a table
      • +
      • The number of columns in an index
      • +
      • The number of columns in a view
      • +
      • The number of terms in the SET clause of an UPDATE statement
      • +
      • The number of columns in the result set of a SELECT statement
      • +
      • The number of terms in a GROUP BY or ORDER BY clause
      • +
      • The number of values in an INSERT statement
      • +
      + +

      +The default setting for SQLITE_MAX_COLUMN is 2000. You can change it +at compile time to values as large as 32676. You might be able to +redefine this value to be as large as billions, though nobody has ever +tried doing that so we do not know if it will work. On the other hand, there +are people who will argue that a well-normalized database design +will never need a value larger than about 100. +

      + +

      +In most applications, the number of columns is small - a few dozen. +There are places in the SQLite code generator that use algorithms +that are O(N²) where N is the number of columns. +So if you redefine SQLITE_MAX_COLUMN to be a +really huge number and you generate SQL that uses a large number of +columns, you may find that +sqlite3_prepare_v2() +runs slowly. +} + +limititem {Maximum Length Of An SQL Statement} { +

      +The maximum number of bytes in the text of an SQL statement is +limited to SQLITE_MAX_SQL_LENGTH which defaults to 1000000. You +can redefine this limit to be as large as the smaller of SQLITE_MAX_LENGTH +and 1073741824. +

      + +

      +If an SQL statement is limited to be a million bytes in length, then +obviously you will not be able to insert multi-million byte strings +by embedding them as literals inside of INSERT statements. But +you should not do that anyway. Use host parameters +for your data. Prepare short SQL statements like this: +

      + +
      +INSERT INTO tab1 VALUES(?,?,?); +
      + +

      +Then use the +sqlite3_bind_XXXX() functions +to bind your large string values to the SQL statement. The use of binding +obviates the need to escape quote characters in the string, reducing the +risk of SQL injection attacks. It is also runs faster since the large +string does not need to be parsed or copied as much. +

      +} + +limititem {Maximum Number Of Tables In A Join} { +

      +SQLite does not support joins containing more than 64 tables. +This limit arises from the fact that the SQLite code generator +uses bitmaps with one bit per join-table in the query optimizer. +

      +} + +limititem {Maximum Depth Of An Expression Tree} { +

      +SQLite parses expressions into a tree for processing. During +code generation, SQLite walks this tree recursively. The depth +of expression trees is therefore limited in order to avoid +using too much stack space. +

      + +

      +The SQLITE_MAX_EXPR_DEPTH parameter determines the maximum expression +tree depth. If the value is 0, then no limit is enforced. The +current implementation has a default value of 1000. +

      +} + +limititem {Maximum Number Of Arguments On A Function} { +

      +The SQLITE_MAX_FUNCTION_ARG parameter determines the maximum number +of parameters that can be passed to an SQL function. The default value +of this limit is 100. We know of no +technical reason why SQLite would not work with functions that have +millions of parameters. However, we suspect that anybody who tries +to invoke a function with millions of parameters is really +trying to find security exploits in systems that use SQLite, +not do useful work, +and so for that reason we have set this parameter relatively low. +} + +limititem {Maximum Number Of Terms In A Compound SELECT Statement} { +

      +A compound SELECT statement is two or more SELECT statements connected +by operators UNION, UNION ALL, EXCEPT, or INTERSECT. We call each +individual SELECT statement within a compound SELECT a "term". +

      + +

      +The code generator in SQLite processes compound SELECT statements using +a recursive algorithm. In order to limit the size of the stack, we +therefore limit the number of terms in a compound SELECT. The maximum +number of terms is SQLITE_MAX_COMPOUND_SELECT which defaults to 500. +We think this is a generous allotment since in practice we almost +never see the number of terms in a compound select exceed single digits. +

      +} + +limititem {Maximum Length Of A LIKE Or GLOB Pattern} { +

      +The pattern matching algorithm used in the default LIKE and GLOB +implementation of SQLite can exhibit O(N²) performance (where +N is the number of characters in the pattern) for certain pathological +cases. To avoid denial-of-service attacks from miscreants who are able +to specify their own LIKE or GLOB patterns, the length of the LIKE +or GLOB pattern is limited to SQLITE_MAX_LIKE_PATTERN_LENGTH bytes. +The default value of this limit is 50000. A modern workstation can +evaluate even a pathological LIKE or GLOB pattern of 50000 bytes +relatively quickly. The denial of service problem only comes into +play when the pattern length gets into millions of bytes. Nevertheless, +since most useful LIKE or GLOB patterns are at most a few dozen bytes +in length, paranoid application developers may want to reduce this +parameter to something in the range of a few hundred if they know that +external users are able to generate arbitrary patterns. +

      +} + +limititem {Maximum Number Of Host Parameters In A Single SQL Statement} { +

      +A host parameter is a place-holder in an SQL statement that is filled +in using one of the +sqlite3_bind_XXXX() interfaces. +Many SQL programmers are familiar with using a question mark ("?") as a +host parameter. SQLite also supports named host parameters prefaced +by ":", "$", or "@" and numbered host parameters of the form "?123". +

      + +

      +Each host parameter in an SQLite statement is assigned a number. The +numbers normally begin with 1 and increase by one with each new +parameter. However, when the "?123" form is used, the host parameter +number is the number that follows the question mark. +

      + +

      +The maximum value of a host parameter number is SQLITE_MAX_VARIABLE_NUMBER. +This setting defaults to 999. +

      +} + +limititem {Maximum Number Of Attached Databases} { +

      +The ATTACH statement is an SQLite extension +that allows two or more databases to be associated to the same database +connection and to operate as if they were a single database. The number +of simulataneously attached databases is limited to SQLITE_MAX_ATTACHED +which is set to 10 by default. +The code generator in SQLite uses bitmaps +to keep track of attached databases. That means that the number of +attached databases cannot be increased above 30 on a 32-bit machine +or 62 on a 64-bit machine. +} + +limititem {Maximum Database Page Size} { +

      +An SQLite database file is organized as pages. The size of each +page is a power of 2 between 512 and SQLITE_MAX_PAGE_SIZE. +The default value for SQLITE_MAX_PAGE_SIZE is 32768. The current +implementation will not support a larger value. +

      + +

      +It used to be the case that SQLite would allocate some stack +structures whose size was proportional to the maximum page size. +For this reason, SQLite would sometimes be compiled with a smaller +maximum page size on embedded devices with limited stack memory. But +more recent versions of SQLite put these large structures on the +heap, not on the stack, so reducing the maximum page size is no +longer necessary on embedded devices. +

      +} + +limititem {Maximum Number Of Pages In A Database File} { +

      +SQLite is able to limit the size of a database file to prevent +the database file from growing too large and consuming too much +disk or flash space. +The SQLITE_MAX_PAGE_COUNT parameter, which is normally set to +1073741823, is the maximum number of pages allowed in a single +database file. An attempt to insert new data that would cause +the database file to grow larger than this will return +SQLITE_FULL. +

      + +

      +The +max_page_count PRAGMA can be used to raise or lower this +limit at run-time. +

      + +

      +Note that the transaction processing in SQLite requires two bits +of heap memory for every page in the database file. For databases +of a few megabytes in size, this amounts to only a few hundred +bytes of heap memory. But for gigabyte-sized databases the amount +of heap memory required is getting into the kilobyte range and +for terabyte-sized databases, megabytes of heap memory must be +allocated and zeroed at each transaction. SQLite will +support very large databases in theory, but the current implementation +is optimized for the common SQLite use cases of embedded devices +and persistent stores for desktop applications. In other words, +SQLite is designed for use with databases sized in kilobytes or +megabytes not gigabytes. If you are building an application to +work with databases that are hundreds of gigabytes or more +in size, then you should perhaps consider using a different database +engine that is explicitly designed for such large data sets. +

      ADDED pages/lockingv3.in Index: pages/lockingv3.in ================================================================== --- /dev/null +++ pages/lockingv3.in @@ -0,0 +1,564 @@ +File Locking And Concurrency In SQLite Version 3 + + +proc HEADING {level title {label {}}} { + global pnum + incr pnum($level) + foreach i [array names pnum] { + if {$i>$level} {set pnum($i) 0} + } + set h [expr {$level+1}] + if {$h>6} {set h 6} + set n $pnum(1).$pnum(2) + for {set i 3} {$i<=$level} {incr i} { + append n .$pnum($i) + } + if {$label!=""} { + puts "" + } + puts "$n $title" +} +set pnum(1) 0 +set pnum(2) 0 +set pnum(3) 0 +set pnum(4) 0 +set pnum(5) 0 +set pnum(6) 0 +set pnum(7) 0 +set pnum(8) 0 + +HEADING 1 {File Locking And Concurrency In SQLite Version 3} + +puts { +

      Version 3 of SQLite introduces a more complex locking and journaling +mechanism designed to improve concurrency and reduce the writer starvation +problem. The new mechanism also allows atomic commits of transactions +involving multiple database files. +This document describes the new locking mechanism. +The intended audience is programmers who want to understand and/or modify +the pager code and reviewers working to verify the design +of SQLite version 3. +

      +} + +HEADING 1 {Overview} overview + +puts { +

      +Locking and concurrency control are handled by the the + +pager module. +The pager module is responsible for making SQLite "ACID" (Atomic, +Consistent, Isolated, and Durable). The pager module makes sure changes +happen all at once, that either all changes occur or none of them do, +that two or more processes do not try to access the database +in incompatible ways at the same time, and that once changes have been +written they persist until explicitly deleted. The pager also provides +an memory cache of some of the contents of the disk file.

      + +

      The pager is unconcerned +with the details of B-Trees, text encodings, indices, and so forth. +From the point of view of the pager the database consists of +a single file of uniform-sized blocks. Each block is called a +"page" and is usually 1024 bytes in size. The pages are numbered +beginning with 1. So the first 1024 bytes of the database are called +"page 1" and the second 1024 bytes are call "page 2" and so forth. All +other encoding details are handled by higher layers of the library. +The pager communicates with the operating system using one of several +modules +(Examples: + +os_unix.c, + +os_win.c) +that provides a uniform abstraction for operating system services. +

      + +

      The pager module effectively controls access for separate threads, or +separate processes, or both. Throughout this document whenever the +word "process" is written you may substitute the word "thread" without +changing the truth of the statement.

      +} + +HEADING 1 {Locking} locking + +puts { +

      +From the point of view of a single process, a database file +can be in one of five locking states: +

      + +

      + + + + + + + + + + + + + + + +
      UNLOCKED +No locks are held on the database. The database may be neither read nor +written. Any internally cached data is considered suspect and subject to +verification against the database file before being used. Other +processes can read or write the database as their own locking states +permit. This is the default state. +
      SHARED +The database may be read but not written. Any number of +processes can hold SHARED locks at the same time, hence there can be +many simultaneous readers. But no other thread or process is allowed +to write to the database file while one or more SHARED locks are active. +
      RESERVED +A RESERVED lock means that the process is planning on writing to the +database file at some point in the future but that it is currently just +reading from the file. Only a single RESERVED lock may be active at one +time, though multiple SHARED locks can coexist with a single RESERVED lock. +RESERVED differs from PENDING in that new SHARED locks can be acquired +while there is a RESERVED lock. +
      PENDING +A PENDING lock means that the process holding the lock wants to write +to the database as soon as possible and is just waiting on all current +SHARED locks to clear so that it can get an EXCLUSIVE lock. No new +SHARED locks are permitted against the database if +a PENDING lock is active, though existing SHARED locks are allowed to +continue. +
      EXCLUSIVE +An EXCLUSIVE lock is needed in order to write to the database file. +Only one EXCLUSIVE lock is allowed on the file and no other locks of +any kind are allowed to coexist with an EXCLUSIVE lock. In order to +maximize concurrency, SQLite works to minimize the amount of time that +EXCLUSIVE locks are held. +
      +

      + +

      +The operating system interface layer understands and tracks all five +locking states described above. +The pager module only tracks four of the five locking states. +A PENDING lock is always just a temporary +stepping stone on the path to an EXCLUSIVE lock and so the pager module +does not track PENDING locks. +

      +} + +HEADING 1 {The Rollback Journal} rollback + +puts { +

      Any time a process wants to make a changes to a database file, it +first records enough information in the rollback journal to +restore the database file back to its initial condition. Thus, before +altering any page of the database, the original contents of that page +must be written into the journal. The journal also records the initial +size of the database so that if the database file grows it can be truncated +back to its original size on a rollback.

      + +

      The rollback journal is a ordinary disk file that has the same name as +the database file with the suffix "-journal" added.

      + +

      If SQLite is working with multiple databases at the same time +(using the ATTACH command) then each database has its own journal. +But there is also a separate aggregate journal +called the master journal. +The master journal does not contain page data used for rolling back +changes. Instead the master journal contains the names of the +individual file journals for each of the ATTACHed databases. Each of +the individual file journals also contain the name of the master journal. +If there are no ATTACHed databases (or if none of the ATTACHed database +is participating in the current transaction) no master journal is +created and the normal rollback journal contains an empty string +in the place normally reserved for recording the name of the master +journal.

      + +

      A individual file journal is said to be hot +if it needs to be rolled back +in order to restore the integrity of its database. +A hot journal is created when a process is in the middle of a database +update and a program or operating system crash or power failure prevents +the update from completing. +Hot journals are an exception condition. +Hot journals exist to recover from crashes and power failures. +If everything is working correctly +(that is, if there are no crashes or power failures) +you will never get a hot journal. +

      + +

      +If no master journal is involved, then +a journal is hot if it exists and its corresponding database file +does not have a RESERVED lock. +If a master journal is named in the file journal, then the file journal +is hot if its master journal exists and there is no RESERVED +lock on the corresponding database file. +It is important to understand when a journal is hot so the +preceding rules will be repeated in bullets: +

      + +
        +
      • A journal is hot if... +
          +
        • It exists, and
        • +
        • It's master journal exists or the master journal name is an + empty string, and
        • +
        • There is no RESERVED lock on the corresponding database file.
        • +
        +
      • +
      +} + +HEADING 2 {Dealing with hot journals} hot_journals + +puts { +

      +Before reading from a a database file, SQLite always checks to see if that +database file has a hot journal. If the file does have a hot journal, then +the journal is rolled back before the file is read. In this way, we ensure +that the database file is in a consistent state before it is read. +

      + +

      When a process wants to read from a database file, it followed +the following sequence of steps: +

      + +
        +
      1. Open the database file and obtain a SHARED lock. If the SHARED lock + cannot be obtained, fail immediately and return SQLITE_BUSY.
      2. +
      3. Check to see if the database file has a hot journal. If the file + does not have a hot journal, we are done. Return immediately. + If there is a hot journal, that journal must be rolled back by + the subsequent steps of this algorithm.
      4. +
      5. Acquire a PENDING lock then an EXCLUSIVE lock on the database file. + (Note: Do not acquire a RESERVED lock because that would make + other processes think the journal was no longer hot.) If we + fail to acquire these locks it means another process + is already trying to do the rollback. In that case, + drop all locks, close the database, and return SQLITE_BUSY.
      6. +
      7. Read the journal file and roll back the changes.
      8. +
      9. Wait for the rolled back changes to be written onto + the surface of the disk. This protects the integrity of the database + in case another power failure or crash occurs.
      10. +
      11. Delete the journal file.
      12. +
      13. Delete the master journal file if it is safe to do so. + This step is optional. It is here only to prevent stale + master journals from cluttering up the disk drive. + See the discussion below for details.
      14. +
      15. Drop the EXCLUSIVE and PENDING locks but retain the SHARED lock.
      16. +
      + +

      After the algorithm above completes successfully, it is safe to +read from the database file. Once all reading has completed, the +SHARED lock is dropped.

      +} + +HEADING 2 {Deleting stale master journals} stale_master_journals + +puts { +

      A stale master journal is a master journal that is no longer being +used for anything. There is no requirement that stale master journals +be deleted. The only reason for doing so is to free up disk space.

      + +

      A master journal is stale if no individual file journals are pointing +to it. To figure out if a master journal is stale, we first read the +master journal to obtain the names of all of its file journals. Then +we check each of those file journals. If any of the file journals named +in the master journal exists and points back to the master journal, then +the master journal is not stale. If all file journals are either missing +or refer to other master journals or no master journal at all, then the +master journal we are testing is stale and can be safely deleted.

      +} + +HEADING 1 {Writing to a database file} writing + +puts { +

      To write to a database, a process must first acquire a SHARED lock +as described above (possibly rolling back incomplete changes if there +is a hot journal). +After a SHARED lock is obtained, a RESERVED lock must be acquired. +The RESERVED lock signals that the process intends to write to the +database at some point in the future. Only one process at a time +can hold a RESERVED lock. But other processes can continue to read +the database while the RESERVED lock is held. +

      + +

      If the process that wants to write is unable to obtain a RESERVED +lock, it must mean that another process already has a RESERVED lock. +In that case, the write attempt fails and returns SQLITE_BUSY.

      + +

      After obtaining a RESERVED lock, the process that wants to write +creates a rollback journal. The header of the journal is initialized +with the original size of the database file. Space in the journal header +is also reserved for a master journal name, though the master journal +name is initially empty.

      + +

      Before making changes to any page of the database, the process writes +the original content of that page into the rollback journal. Changes +to pages are held in memory at first and are not written to the disk. +The original database file remains unaltered, which means that other +processes can continue to read the database.

      + +

      Eventually, the writing process will want to update the database +file, either because its memory cache has filled up or because it is +ready to commit its changes. Before this happens, the writer must +make sure no other process is reading the database and that the rollback +journal data is safely on the disk surface so that it can be used to +rollback incomplete changes in the event of a power failure. +The steps are as follows:

      + +
        +
      1. Make sure all rollback journal data has actually been written to + the surface of the disk (and is not just being held in the operating + system's or disk controllers cache) so that if a power failure occurs + the data will still be there after power is restored.
      2. +
      3. Obtain a PENDING lock and then an EXCLUSIVE lock on the database file. + If other processes are still have SHARED locks, the writer might have + to wait until those SHARED locks clear before it is able to obtain + an EXCLUSIVE lock.
      4. +
      5. Write all page modifications currently held in memory out to the + original database disk file.
      6. +
      + +

      +If the reason for writing to the database file is because the memory +cache was full, then the writer will not commit right away. Instead, +the writer might continue to make changes to other pages. Before +subsequent changes are written to the database file, the rollback +journal must be flushed to disk again. Note also that the EXCLUSIVE +lock that the writer obtained in order to write to the database initially +must be held until all changes are committed. That means that no other +processes are able to access the database from the +time the memory cache first spills to disk until the transaction +commits. +

      + +

      +When a writer is ready to commit its changes, it executes the following +steps: +

      + +
        +
      1. + Obtain an EXCLUSIVE lock on the database file and + make sure all memory changes have been written to the database file + using the algorithm of steps 1-3 above.
      2. +
      3. Flush all database file changes to the disk. Wait for those changes + to actually be written onto the disk surface.
      4. +
      5. Delete the journal file. This is the instant when the changes are + committed. Prior to deleting the journal file, if a power failure + or crash occurs, the next process to open the database will see that + it has a hot journal and will roll the changes back. + After the journal is deleted, there will no longer be a hot journal + and the changes will persist. +
      6. +
      7. Drop the EXCLUSIVE and PENDING locks from the database file. +
      8. +
      + +

      As soon as PENDING lock is released from the database file, other +processes can begin reading the database again. In the current implementation, +the RESERVED lock is also released, but that is not essential. Future +versions of SQLite might provide a "CHECKPOINT" SQL command that will +commit all changes made so far within a transaction but retain the +RESERVED lock so that additional changes can be made without given +any other process an opportunity to write.

      + +

      If a transaction involves multiple databases, then a more complex +commit sequence is used, as follows:

      + +
        +
      1. + Make sure all individual database files have an EXCLUSIVE lock and a + valid journal. +
      2. Create a master-journal. The name of the master-journal is arbitrary. + (The current implementation appends random suffixes to the name of the + main database file until it finds a name that does not previously exist.) + Fill the master journal with the names of all the individual journals + and flush its contents to disk. +
      3. Write the name of the master journal into + all individual journals (in space set aside for that purpose in the + headers of the individual journals) and flush the contents of the + individual journals to disk and wait for those changes to reach the + disk surface. +
      4. Flush all database file changes to the disk. Wait for those changes + to actually be written onto the disk surface.
      5. +
      6. Delete the master journal file. This is the instant when the changes are + committed. Prior to deleting the master journal file, if a power failure + or crash occurs, the individual file journals will be considered hot + and will be rolled back by the next process that + attempts to read them. After the master journal has been deleted, + the file journals will no longer be considered hot and the changes + will persist. +
      7. +
      8. Delete all individual journal files. +
      9. Drop the EXCLUSIVE and PENDING locks from all database files. +
      10. +
      +} + +HEADING 2 {Writer starvation} writer_starvation + +puts { +

      In SQLite version 2, if many processes are reading from the database, +it might be the case that there is never a time when there are +no active readers. And if there is always at least one read lock on the +database, no process would ever be able to make changes to the database +because it would be impossible to acquire a write lock. This situation +is called writer starvation.

      + +

      SQLite version 3 seeks to avoid writer starvation through the use of +the PENDING lock. The PENDING lock allows existing readers to continue +but prevents new readers from connecting to the database. So when a +process wants to write a busy database, it can set a PENDING lock which +will prevent new readers from coming in. Assuming existing readers do +eventually complete, all SHARED locks will eventually clear and the +writer will be given a chance to make its changes.

      +} + +HEADING 1 {How To Corrupt Your Database Files} how_to_corrupt + +puts { +

      The pager module is robust but it is not completely failsafe. +It can be subverted. This section attempts to identify and explain +the risks.

      + +

      +Clearly, a hardware or operating system fault that introduces incorrect data +into the middle of the database file or journal will cause problems. +Likewise, +if a rogue process opens a database file or journal and writes malformed +data into the middle of it, then the database will become corrupt. +There is not much that can be done about these kinds of problems +so they are given no further attention. +

      + +

      +SQLite uses POSIX advisory locks to implement locking on Unix. On +windows it uses the LockFile(), LockFileEx(), and UnlockFile() system +calls. SQLite assumes that these system calls all work as advertised. If +that is not the case, then database corruption can result. One should +note that POSIX advisory locking is known to be buggy or even unimplemented +on many NFS implementations (including recent versions of Mac OS X) +and that there are reports of locking problems +for network filesystems under windows. Your best defense is to not +use SQLite for files on a network filesystem. +

      + +

      +SQLite uses the fsync() system call to flush data to the disk under Unix and +it uses the FlushFileBuffers() to do the same under windows. Once again, +SQLite assumes that these operating system services function as advertised. +But it has been reported that fsync() and FlushFileBuffers() do not always +work correctly, especially with inexpensive IDE disks. Apparently some +manufactures of IDE disks have defective controller chips that report +that data has reached the disk surface when in fact the data is still +in volatile cache memory in the disk drive electronics. There are also +reports that windows sometimes chooses to ignore FlushFileBuffers() for +unspecified reasons. The author cannot verify any of these reports. +But if they are true, it means that database corruption is a possibility +following an unexpected power loss. These are hardware and/or operating +system bugs that SQLite is unable to defend against. +

      + +

      +If a crash or power failure occurs and results in a hot journal but that +journal is deleted, the next process to open the database will not +know that it contains changes that need to be rolled back. The rollback +will not occur and the database will be left in an inconsistent state. +Rollback journals might be deleted for any number of reasons: +

      + +
        +
      • An administrator might be cleaning up after an OS crash or power failure, + see the journal file, think it is junk, and delete it.
      • +
      • Someone (or some process) might rename the database file but fail to + also rename its associated journal.
      • +
      • If the database file has aliases (hard or soft links) and the file + is opened by a different alias than the one used to create the journal, + then the journal will not be found. To avoid this problem, you should + not create links to SQLite database files.
      • +
      • Filesystem corruption following a power failure might cause the + journal to be renamed or deleted.
      • +
      + +

      +The last (fourth) bullet above merits additional comment. When SQLite creates +a journal file on Unix, it opens the directory that contains that file and +calls fsync() on the directory, in an effort to push the directory information +to disk. But suppose some other process is adding or removing unrelated +files to the directory that contains the database and journal at the the +moment of a power failure. The supposedly unrelated actions of this other +process might result in the journal file being dropped from the directory and +moved into "lost+found". This is an unlikely scenario, but it could happen. +The best defenses are to use a journaling filesystem or to keep the +database and journal in a directory by themselves. +

      + +

      +For a commit involving multiple databases and a master journal, if the +various databases were on different disk volumes and a power failure occurs +during the commit, then when the machine comes back up the disks might +be remounted with different names. Or some disks might not be mounted +at all. When this happens the individual file journals and the master +journal might not be able to find each other. The worst outcome from +this scenario is that the commit ceases to be atomic. +Some databases might be rolled back and others might not. +All databases will continue to be self-consistent. +To defend against this problem, keep all databases +on the same disk volume and/or remount disks using exactly the same names +after a power failure. +

      +} + +HEADING 1 {Transaction Control At The SQL Level} transaction_control + +puts { +

      +The changes to locking and concurrency control in SQLite version 3 also +introduce some subtle changes in the way transactions work at the SQL +language level. +By default, SQLite version 3 operates in autocommit mode. +In autocommit mode, +all changes to the database are committed as soon as all operations associated +with the current database connection complete.

      + +

      The SQL command "BEGIN TRANSACTION" (the TRANSACTION keyword +is optional) is used to take SQLite out of autocommit mode. +Note that the BEGIN command does not acquire any locks on the database. +After a BEGIN command, a SHARED lock will be acquired when the first +SELECT statement is executed. A RESERVED lock will be acquired when +the first INSERT, UPDATE, or DELETE statement is executed. No EXCLUSIVE +lock is acquired until either the memory cache fills up and must +be spilled to disk or until the transaction commits. In this way, +the system delays blocking read access to the file file until the +last possible moment. +

      + +

      The SQL command "COMMIT" does not actually commit the changes to +disk. It just turns autocommit back on. Then, at the conclusion of +the command, the regular autocommit logic takes over and causes the +actual commit to disk to occur. +The SQL command "ROLLBACK" also operates by turning autocommit back on, +but it also sets a flag that tells the autocommit logic to rollback rather +than commit.

      + +

      If the SQL COMMIT command turns autocommit on and the autocommit logic +then tries to commit change but fails because some other process is holding +a SHARED lock, then autocommit is turned back off automatically. This +allows the user to retry the COMMIT at a later time after the SHARED lock +has had an opportunity to clear.

      + +

      If multiple commands are being executed against the same SQLite database +connection at the same time, the autocommit is deferred until the very +last command completes. For example, if a SELECT statement is being +executed, the execution of the command will pause as each row of the +result is returned. During this pause other INSERT, UPDATE, or DELETE +commands can be executed against other tables in the database. But none +of these changes will commit until the original SELECT statement finishes. +

      +} +
      ADDED pages/mingw.in Index: pages/mingw.in ================================================================== --- /dev/null +++ pages/mingw.in @@ -0,0 +1,144 @@ +Notes On How To Build MinGW As A Cross-Compiler + +

      +Notes On How To Build MinGW As A Cross-Compiler +

      + +

      MinGW or +Minimalist GNU For Windows +is a version of the popular GCC compiler that builds Win95/Win98/WinNT +binaries. See the website for details.

      + +

      This page describes how you can build MinGW +from sources as a cross-compiler +running under Linux. Doing so will allow you to construct +WinNT binaries from the comfort and convenience of your +Unix desktop.

      + + +proc Link {path {file {}}} { + if {$file!=""} { + set path $path/$file + } else { + set file $path + } + puts "$file" +} + + +

      Here are the steps:

      + +
        +
      1. +

        Get a copy of source code. You will need the binutils, the +compiler, and the MinGW runtime. Each are available separately. +As of this writing, Mumit Khan has collected everything you need +together in one FTP site: + + +set ftpsite \ + ftp://ftp.nanotech.wisc.edu/pub/khan/gnu-win32/mingw32/snapshots/gcc-2.95.2-1 +Link $ftpsite + + +The three files you will need are:

        +
          +
        • + + Link $ftpsite binutils-19990818-1-src.tar.gz + puts
        • + Link $ftpsite gcc-2.95.2-1-src.tar.gz + puts
        • + Link $ftpsite mingw-20000203.zip + +
        • +
        + +

        Put all the downloads in a directory out of the way. The sequel +will assume all downloads are in a directory named +~/mingw/download.

        +
      2. + +
      3. +

        +Create a directory in which to install the new compiler suite and make +the new directory writable. +Depending on what directory you choose, you might need to become +root. The example shell commands that follow +will assume the installation directory is +/opt/mingw and that your user ID is drh.

        +
        +su
        +mkdir /opt/mingw
        +chown drh /opt/mingw
        +exit
        +
        +
      4. + +
      5. +

        Unpack the source tarballs into a separate directory.

        +
        +mkdir ~/mingw/src
        +cd ~/mingw/src
        +tar xzf ../download/binutils-*.tar.gz
        +tar xzf ../download/gcc-*.tar.gz
        +unzip ../download/mingw-*.zip
        +
        +
      6. + +
      7. +

        Create a directory in which to put all the build products.

        +
        +mkdir ~/mingw/bld
        +
        +
      8. + +
      9. +

        Configure and build binutils and add the results to your PATH.

        +
        +mkdir ~/mingw/bld/binutils
        +cd ~/mingw/bld/binutils
        +../../src/binutils/configure --prefix=/opt/mingw --target=i386-mingw32 -v
        +make 2>&1 | tee make.out
        +make install 2>&1 | tee make-install.out
        +export PATH=$PATH:/opt/mingw/bin
        +
        +
      10. + +
      11. +

        Manually copy the runtime include files into the installation directory +before trying to build the compiler.

        +
        +mkdir /opt/mingw/i386-mingw32/include
        +cd ~/mingw/src/mingw-runtime*/mingw/include
        +cp -r * /opt/mingw/i386-mingw32/include
        +
        +
      12. + +
      13. +

        Configure and build the compiler

        +
        +mkdir ~/mingw/bld/gcc
        +cd ~/mingw/bld/gcc
        +../../src/gcc-*/configure --prefix=/opt/mingw --target=i386-mingw32 -v
        +cd gcc
        +make installdirs
        +cd ..
        +make 2>&1 | tee make.out
        +make install
        +
        +
      14. + +
      15. +

        Configure and build the MinGW runtime

        +
        +mkdir ~/mingw/bld/runtime
        +cd ~/mingw/bld/runtime
        +../../src/mingw-runtime*/configure --prefix=/opt/mingw --target=i386-mingw32 -v
        +make install-target-w32api
        +make install
        +
        +
      16. +
      + +

      And you are done...

      ADDED pages/nulls.in Index: pages/nulls.in ================================================================== --- /dev/null +++ pages/nulls.in @@ -0,0 +1,321 @@ +NULL Handling in SQLite + +

      NULL Handling in SQLite Versus Other Database Engines

      + +

      +The goal is +to make SQLite handle NULLs in a standards-compliant way. +But the descriptions in the SQL standards on how to handle +NULLs seem ambiguous. +It is not clear from the standards documents exactly how NULLs should +be handled in all circumstances. +

      + +

      +So instead of going by the standards documents, various popular +SQL engines were tested to see how they handle NULLs. The idea +was to make SQLite work like all the other engines. +A SQL test script was developed and run by volunteers on various +SQL RDBMSes and the results of those tests were used to deduce +how each engine processed NULL values. +The original tests were run in May of 2002. +A copy of the test script is found at the end of this document. +

      + +

      +SQLite was originally coded in such a way that the answer to +all questions in the chart below would be "Yes". But the +experiments run on other SQL engines showed that none of them +worked this way. So SQLite was modified to work the same as +Oracle, PostgreSQL, and DB2. This involved making NULLs +indistinct for the purposes of the SELECT DISTINCT statement and +for the UNION operator in a SELECT. NULLs are still distinct +in a UNIQUE column. This seems somewhat arbitrary, but the desire +to be compatible with other engines outweighted that objection. +

      + +

      +It is possible to make SQLite treat NULLs as distinct for the +purposes of the SELECT DISTINCT and UNION. To do so, one should +change the value of the NULL_ALWAYS_DISTINCT #define in the +sqliteInt.h source file and recompile. +

      + +
      +

      +Update 2003-07-13: +Since this document was originally written some of the database engines +tested have been updated and users have been kind enough to send in +corrections to the chart below. The original data showed a wide variety +of behaviors, but over time the range of behaviors has converged toward +the PostgreSQL/Oracle model. The only significant difference +is that Informix and MS-SQL both threat NULLs as +indistinct in a UNIQUE column. +

      + +

      +The fact that NULLs are distinct for UNIQUE columns but are indistinct for +SELECT DISTINCT and UNION continues to be puzzling. It seems that NULLs +should be either distinct everywhere or nowhere. And the SQL standards +documents suggest that NULLs should be distinct everywhere. Yet as of +this writing, no SQL engine tested treats NULLs as distinct in a SELECT +DISTINCT statement or in a UNION. +

      +
      + + +

      +The following table shows the results of the NULL handling experiments. +

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        SQLitePostgreSQLOracleInformixDB2MS-SQLOCELOT
      Adding anything to null gives nullYesYesYesYesYesYesYes
      Multiplying null by zero gives nullYesYesYesYesYesYesYes
      nulls are distinct in a UNIQUE columnYesYesYesNo(Note 4)NoYes
      nulls are distinct in SELECT DISTINCTNoNoNoNoNoNoNo
      nulls are distinct in a UNIONNoNoNoNoNoNoNo
      "CASE WHEN null THEN 1 ELSE 0 END" is 0?YesYesYesYesYesYesYes
      "null OR true" is trueYesYesYesYesYesYesYes
      "not (null AND false)" is trueYesYesYesYesYesYesYes
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        MySQL
      3.23.41
      MySQL
      4.0.16
      FirebirdSQL
      Anywhere
      Borland
      Interbase
      Adding anything to null gives nullYesYesYesYesYes
      Multiplying null by zero gives nullYesYesYesYesYes
      nulls are distinct in a UNIQUE columnYesYesYes(Note 4)(Note 4)
      nulls are distinct in SELECT DISTINCTNoNoNo (Note 1)NoNo
      nulls are distinct in a UNION(Note 3)NoNo (Note 1)NoNo
      "CASE WHEN null THEN 1 ELSE 0 END" is 0?YesYesYesYes(Note 5)
      "null OR true" is trueYesYesYesYesYes
      "not (null AND false)" is trueNoYesYesYesYes
      + + + + + + + + + + + + + + + + + + + +
      Notes:  1. Older versions of firebird omits all NULLs from SELECT DISTINCT +and from UNION.
      2. Test data unavailable.
      3. MySQL version 3.23.41 does not support UNION.
      4. DB2, SQL Anywhere, and Borland Interbase +do not allow NULLs in a UNIQUE column.
      5. Borland Interbase does not support CASE expressions.
      +
      + +

       

      +

      +The following script was used to gather information for the table +above. +

      + +
      +-- I have about decided that SQL's treatment of NULLs is capricious and cannot be
      +-- deduced by logic.  It must be discovered by experiment.  To that end, I have 
      +-- prepared the following script to test how various SQL databases deal with NULL.
      +-- My aim is to use the information gather from this script to make SQLite as much
      +-- like other databases as possible.
      +--
      +-- If you could please run this script in your database engine and mail the results
      +-- to me at drh@hwaci.com, that will be a big help.  Please be sure to identify the
      +-- database engine you use for this test.  Thanks.
      +--
      +-- If you have to change anything to get this script to run with your database
      +-- engine, please send your revised script together with your results.
      +--
      +
      +-- Create a test table with data
      +create table t1(a int, b int, c int);
      +insert into t1 values(1,0,0);
      +insert into t1 values(2,0,1);
      +insert into t1 values(3,1,0);
      +insert into t1 values(4,1,1);
      +insert into t1 values(5,null,0);
      +insert into t1 values(6,null,1);
      +insert into t1 values(7,null,null);
      +
      +-- Check to see what CASE does with NULLs in its test expressions
      +select a, case when b<>0 then 1 else 0 end from t1;
      +select a+10, case when not b<>0 then 1 else 0 end from t1;
      +select a+20, case when b<>0 and c<>0 then 1 else 0 end from t1;
      +select a+30, case when not (b<>0 and c<>0) then 1 else 0 end from t1;
      +select a+40, case when b<>0 or c<>0 then 1 else 0 end from t1;
      +select a+50, case when not (b<>0 or c<>0) then 1 else 0 end from t1;
      +select a+60, case b when c then 1 else 0 end from t1;
      +select a+70, case c when b then 1 else 0 end from t1;
      +
      +-- What happens when you multiple a NULL by zero?
      +select a+80, b*0 from t1;
      +select a+90, b*c from t1;
      +
      +-- What happens to NULL for other operators?
      +select a+100, b+c from t1;
      +
      +-- Test the treatment of aggregate operators
      +select count(*), count(b), sum(b), avg(b), min(b), max(b) from t1;
      +
      +-- Check the behavior of NULLs in WHERE clauses
      +select a+110 from t1 where b<10;
      +select a+120 from t1 where not b>10;
      +select a+130 from t1 where b<10 OR c=1;
      +select a+140 from t1 where b<10 AND c=1;
      +select a+150 from t1 where not (b<10 AND c=1);
      +select a+160 from t1 where not (c=1 AND b<10);
      +
      +-- Check the behavior of NULLs in a DISTINCT query
      +select distinct b from t1;
      +
      +-- Check the behavior of NULLs in a UNION query
      +select b from t1 union select b from t1;
      +
      +-- Create a new table with a unique column.  Check to see if NULLs are considered
      +-- to be distinct.
      +create table t2(a int, b int unique);
      +insert into t2 values(1,1);
      +insert into t2 values(2,null);
      +insert into t2 values(3,null);
      +select * from t2;
      +
      +drop table t1;
      +drop table t2;
      +
      ADDED pages/oldnews.in Index: pages/oldnews.in ================================================================== --- /dev/null +++ pages/oldnews.in @@ -0,0 +1,508 @@ +SQLite Older News + + +proc newsitem {date title text} { + puts "

      $date - $title

      " + regsub -all "\n( *\n)+" $text "

      \n\n

      " txt + puts "

      $txt

      " + puts "
      " +} + +newsitem {2007-Aug-13} {Version 3.4.2} { + While stress-testing the + soft_heap_limit + feature, a bug that could lead to + database + corruption was + discovered and fixed. + Though the consequences of this bug are severe, the chances of hitting + it in a typical application are remote. Upgrading is recommended + only if you use the + sqlite3_soft_heap_limit + interface. +} + +newsitem {2007-Jly-20} {Version 3.4.1} { + This release fixes a bug in VACUUM that + can lead to + database corruption. The bug was introduced in version + 3.3.14. + Upgrading is recommended for all users. Also included are a slew of + other more routine + enhancements and bug fixes. +} + +newsitem {2007-Jun-18} {Version 3.4.0} { + This release fixes two separate bugs either of which + can lead to database corruption. Upgrading + is strongly recommended. If you must continue using an older version + of SQLite, please at least read about how to avoid these bugs + at + + CorruptionFollowingBusyError and + ticket #2418 +

      + This release also adds explicit limits on the + sizes and quantities of things SQLite will handle. The new limits might + causes compatibility problems for existing applications that + use excessively large strings, BLOBs, tables, or SQL statements. + The new limits can be increased at compile-time to work around any problems + that arise. Nevertheless, the version number of this release is + 3.4.0 instead of 3.3.18 in order to call attention to the possible + incompatibility. +

      + There are also new features, including + incremental BLOB I/O and + incremental vacuum. + See the change log + for additional information. +} + +newsitem {2007-Apr-25} {Version 3.3.17} { + This version fixes a bug in the forwards-compatibility logic of SQLite + that was causing a database to become unreadable when it should have + been read-only. Upgrade from 3.3.16 only if you plan to deploy into + a product that might need to be upgraded in the future. For day to day + use, it probably does not matter. +} + +newsitem {2007-Apr-18} {Version 3.3.16} { + Performance improvements added in 3.3.14 but mistakenly turned off + in 3.3.15 have been reinstated. A bug has been fixed that prevented + VACUUM from running if a NULL value was in a UNIQUE column. +} + +newsitem {2007-Apr-09} {Version 3.3.15} { + An annoying bug introduced in 3.3.14 has been fixed. There are + also many enhancements to the test suite. +} + +newsitem {2007-Apr-02} {Version 3.3.14} { + This version focuses on performance improvements. If you recompile + + the amalgamation using GCC option -O3 (the precompiled binaries + use -O2) you may see performance + improvements of 35% or more over version 3.3.13 depending on your + workload. This version also + adds support for + exclusive access mode. +} + +newsitem {2007-Feb-13} {Version 3.3.13} { + This version fixes a subtle bug in the ORDER BY optimizer that can + occur when using joins. There are also a few minor enhancements. + Upgrading is recommended. +} + +newsitem {2007-Jan-27} {Version 3.3.12} { + The first published build of the previous version used the wrong + set of source files. Consequently, many people downloaded a build + that was labeled as "3.3.11" but was really 3.3.10. Version 3.3.12 + is released to clear up the ambiguity. A couple more bugs have + also been fixed and + PRAGMA integrity_check has been enhanced. +} + +newsitem {2007-Jan-22} {Version 3.3.11} { + Version 3.3.11 fixes for a few more problems in version 3.3.9 that + version 3.3.10 failed to catch. Upgrading is recommended. +} + +newsitem {2007-Jan-9} {Version 3.3.10} { + Version 3.3.10 fixes several bugs that were introduced by the previous + release. Upgrading is recommended. +} + +newsitem {2007-Jan-4} {Version 3.3.9} { + Version 3.3.9 fixes bugs that can lead to database corruption under + obscure and difficult to reproduce circumstances. See + + DatabaseCorruption in the + wiki for details. + This release also adds the new + sqlite3_prepare_v2() + API and includes important bug fixes in the command-line + shell and enhancements to the query optimizer. Upgrading is + recommended. +} + +newsitem {2006-Oct-9} {Version 3.3.8} { + Version 3.3.8 adds support for full-text search using the + FTS1 + module. There are also minor bug fixes. Upgrade only if + you want to try out the new full-text search capabilities or if + you are having problems with 3.3.7. +} + +newsitem {2006-Aug-12} {Version 3.3.7} { + Version 3.3.7 includes support for loadable extensions and virtual + tables. But both features are still considered "beta" and their + APIs are subject to change in a future release. This release is + mostly to make available the minor bug fixes that have accumulated + since 3.3.6. Upgrading is not necessary. Do so only if you encounter + one of the obscure bugs that have been fixed or if you want to try + out the new features. +} + +newsitem {2006-Jun-19} {New Book About SQLite} { + + The Definitive Guide to SQLite, a new book by + Mike Owens. + is now available from Apress. + The books covers the latest SQLite internals as well as + the native C interface and bindings for PHP, Python, + Perl, Ruby, Tcl, and Java. Recommended. +} + + +newsitem {2006-Jun-6} {Version 3.3.6} { + Changes include improved tolerance for windows virus scanners + and faster :memory: databases. There are also fixes for several + obscure bugs. Upgrade if you are having problems. +} + +newsitem {2006-Apr-5} {Version 3.3.5} { + This release fixes many minor bugs and documentation typos and + provides some minor new features and performance enhancements. + Upgrade only if you are having problems or need one of the new features. +} + +newsitem {2006-Feb-11} {Version 3.3.4} { + This release fixes several bugs, including a + a blunder that might cause a deadlock on multithreaded systems. + Anyone using SQLite in a multithreaded environment should probably upgrade. +} + +newsitem {2006-Jan-31} {Version 3.3.3 stable} { + There have been no major problems discovered in version 3.3.2, so + we hereby declare the new APIs and language features to be stable + and supported. +} + +newsitem {2006-Jan-24} {Version 3.3.2 beta} { + More bug fixes and performance improvements as we move closer to + a production-ready version 3.3.x. +} + +newsitem {2006-Jan-16} {Version 3.3.1 alpha} { + Many bugs found in last week's alpha release have now been fixed and + the library is running much faster again. + + Database connections can now be moved between threads as long as the + connection holds no locks at the time it is moved. Thus the common + paradigm of maintaining a pool of database connections and handing + them off to transient worker threads is now supported. + Please help test this new feature. + See + the MultiThreading wiki page for additional + information. +} + +newsitem {2006-Jan-10} {Version 3.3.0 alpha} { + Version 3.3.0 adds support for CHECK constraints, DESC indices, + separate REAL and INTEGER column affinities, a new OS interface layer + design, and many other changes. The code passed a regression + test but should still be considered alpha. Please report any + problems. + + The file format for version 3.3.0 has changed slightly to support + descending indices and + a more efficient encoding of boolean values. SQLite 3.3.0 will read and + write legacy databases created with any prior version of SQLite 3. But + databases created by version 3.3.0 will not be readable or writable + by earlier versions of the SQLite. The older file format can be + specified at compile-time for those rare cases where it is needed. +} + +newsitem {2005-Dec-19} {Versions 3.2.8 and 2.8.17} { + These versions contain one-line changes to 3.2.7 and 2.8.16 to fix a bug + that has been present since March of 2002 and version 2.4.0. + That bug might possibly cause database corruption if a large INSERT or + UPDATE statement within a multi-statement transaction fails due to a + uniqueness constraint but the containing transaction commits. +} + + +newsitem {2005-Sep-24} {Version 3.2.7} { + This version fixes several minor and obscure bugs. + Upgrade only if you are having problems. +} + +newsitem {2005-Sep-16} {Version 3.2.6 - Critical Bug Fix} { + This version fixes a bug that can result in database + corruption if a VACUUM of a 1 gibibyte or larger database fails + (perhaps do to running out of disk space or an unexpected power loss) + and is later rolled back. +

      + Also in this release: + The ORDER BY and GROUP BY processing was rewritten to use less memory. + Support for COUNT(DISTINCT) was added. The LIKE operator can now be + used by the optimizer on columns with COLLATE NOCASE. +} + +newsitem {2005-Aug-27} {Version 3.2.5} { + This release fixes a few more lingering bugs in the new code. + We expect that this release will be stable and ready for production use. +} + +newsitem {2005-Aug-24} {Version 3.2.4} { + This release fixes a bug in the new optimizer that can lead to segfaults + when parsing very complex WHERE clauses. +} + +newsitem {2005-Aug-21} {Version 3.2.3} { + This release adds the ANALYZE command, + the CAST operator, and many + very substantial improvements to the query optimizer. See the + change log for additional + information. +} + +newsitem {2005-Aug-2} {2005 Open Source Award for SQLite} { + SQLite and its primary author D. Richard Hipp have been honored with + a 2005 Open Source + Award from Google and O'Reilly.
      +} + + +newsitem {2005-Jun-13} {Version 3.2.2} { + This release includes numerous minor bug fixes, speed improvements, + and code size reductions. There is no reason to upgrade unless you + are having problems or unless you just want to. +} + +newsitem {2005-Mar-29} {Version 3.2.1} { + This release fixes a memory allocation problem in the new + ALTER TABLE ADD COLUMN + command. +} + +newsitem {2005-Mar-21} {Version 3.2.0} { + The primary purpose for version 3.2.0 is to add support for + ALTER TABLE ADD COLUMN. + The new ADD COLUMN capability is made + possible by AOL developers supporting and embracing great + open-source software. Thanks, AOL! + + Version 3.2.0 also fixes an obscure but serious bug that was discovered + just prior to release. If you have a multi-statement transaction and + within that transaction an UPDATE or INSERT statement fails due to a + constraint, then you try to rollback the whole transaction, the rollback + might not work correctly. See + Ticket #1171 + for details. Upgrading is recommended for all users. +} + +newsitem {2005-Mar-16} {Version 3.1.6} { + Version 3.1.6 fixes a critical bug that can cause database corruption + when inserting rows into tables with around 125 columns. This bug was + introduced in version 3.0.0. See + Ticket #1163 + for additional information. +} + +newsitem {2005-Mar-11} {Versions 3.1.4 and 3.1.5 Released} { + Version 3.1.4 fixes a critical bug that could cause database corruption + if the autovacuum mode of version 3.1.0 is turned on (it is off by + default) and a CREATE UNIQUE INDEX is executed within a transaction but + fails because the indexed columns are not unique. Anyone using the + autovacuum feature and unique indices should upgrade. + + Version 3.1.5 adds the ability to disable + the F_FULLFSYNC ioctl() in OS-X by setting "PRAGMA synchronous=on" instead + of the default "PRAGMA synchronous=full". There was an attempt to add + this capability in 3.1.4 but it did not work due to a spelling error. +} + +newsitem {2005-Feb-19} {Version 3.1.3 Released} { + Version 3.1.3 cleans up some minor issues discovered in version 3.1.2. +} + +newsitem {2005-Feb-15} {Versions 2.8.16 and 3.1.2 Released} { + A critical bug in the VACUUM command that can lead to database + corruption has been fixed in both the 2.x branch and the main + 3.x line. This bug has existed in all prior versions of SQLite. + Even though it is unlikely you will ever encounter this bug, + it is suggested that all users upgrade. See + + ticket #1116. for additional information. + + Version 3.1.2 is also the first stable release of the 3.1 + series. SQLite 3.1 features added support for correlated + subqueries, autovacuum, autoincrement, ALTER TABLE, and + other enhancements. See the + release notes + for version 3.1.0 for a detailed description of the + changes available in the 3.1 series. +} + +newsitem {2005-Feb-01} {Version 3.1.1 (beta) Released} { + Version 3.1.1 (beta) is now available on the + website. Verison 3.1.1 is fully backwards compatible with the 3.0 series + and features many new features including Autovacuum and correlated + subqueries. The + release notes + From version 3.1.0 apply equally to this release beta. A stable release + is expected within a couple of weeks. +} + +newsitem {2005-Jan-21} {Version 3.1.0 (alpha) Released} { + Version 3.1.0 (alpha) is now available on the + website. Verison 3.1.0 is fully backwards compatible with the 3.0 series + and features many new features including Autovacuum and correlated + subqueries. See the + release notes + for details. + + This is an alpha release. A beta release is expected in about a week + with the first stable release to follow after two more weeks. +} + +newsitem {2004-Nov-09} {SQLite at the 2004 International PHP Conference} { + There was a talk on the architecture of SQLite and how to optimize + SQLite queries at the 2004 International PHP Conference in Frankfurt, + Germany. + + Slides from that talk are available. +} + +newsitem {2004-Oct-11} {Version 3.0.8} { + Version 3.0.8 of SQLite contains several code optimizations and minor + bug fixes and adds support for DEFERRED, IMMEDIATE, and EXCLUSIVE + transactions. This is an incremental release. There is no reason + to upgrade from version 3.0.7 if that version is working for you. +} + + +newsitem {2004-Oct-10} {SQLite at the 11th +Annual Tcl/Tk Conference} { + There will be a talk on the use of SQLite in Tcl/Tk at the + 11th Tcl/Tk Conference this week in + New Orleans. Visit + http://www.tcl.tk/ for details. + + Slides from the talk are available. +} + +newsitem {2004-Sep-18} {Version 3.0.7} { + Version 3.0 has now been in use by multiple projects for several + months with no major difficulties. We consider it stable and + ready for production use. +} + +newsitem {2004-Sep-02} {Version 3.0.6 (beta)} { + Because of some important changes to sqlite3_step(), + we have decided to + do an additional beta release prior to the first "stable" release. + If no serious problems are discovered in this version, we will + release version 3.0 "stable" in about a week. +} + + +newsitem {2004-Aug-29} {Version 3.0.5 (beta)} { + The fourth beta release of SQLite version 3.0 is now available. + The next release is expected to be called "stable". +} + + +newsitem {2004-Aug-08} {Version 3.0.4 (beta)} { + The third beta release of SQLite version 3.0 is now available. + This new beta fixes several bugs including a database corruption + problem that can occur when doing a DELETE while a SELECT is pending. + Expect at least one more beta before version 3.0 goes final. +} + +newsitem {2004-July-22} {Version 3.0.3 (beta)} { + The second beta release of SQLite version 3.0 is now available. + This new beta fixes many bugs and adds support for databases with + varying page sizes. The next 3.0 release will probably be called + a final or stable release. + + Version 3.0 adds support for internationalization and a new + more compact file format. + Details. + The API and file format have been fixed since 3.0.2. All + regression tests pass (over 100000 tests) and the test suite + exercises over 95% of the code. + + SQLite version 3.0 is made possible in part by AOL + developers supporting and embracing great Open-Source Software. +} + +newsitem {2004-Jly-22} {Version 2.8.15} { + SQLite version 2.8.15 is a maintenance release for the version 2.8 + series. Version 2.8 continues to be maintained with bug fixes, but + no new features will be added to version 2.8. All the changes in + this release are minor. If you are not having problems, there is + there is no reason to upgrade. +} + +newsitem {2004-Jun-30} {Version 3.0.2 (beta) Released} { + The first beta release of SQLite version 3.0 is now available. + Version 3.0 adds support for internationalization and a new + more compact file format. + Details. + As of this release, the API and file format are frozen. All + regression tests pass (over 100000 tests) and the test suite + exercises over 95% of the code. + + SQLite version 3.0 is made possible in part by AOL + developers supporting and embracing great Open-Source Software. +} + + +newsitem {2004-Jun-25} {Website hacked} { + The www.sqlite.org website was hacked sometime around 2004-Jun-22 + because the lead SQLite developer failed to properly patch CVS. + Evidence suggests that the attacker was unable to elevate privileges + above user "cvs". Nevertheless, as a precaution the entire website + has been reconstructed from scratch on a fresh machine. All services + should be back to normal as of 2004-Jun-28. +} + + +newsitem {2004-Jun-18} {Version 3.0.0 (alpha) Released} { + The first alpha release of SQLite version 3.0 is available for + public review and comment. Version 3.0 enhances internationalization support + through the use of UTF-16 and user-defined text collating sequences. + BLOBs can now be stored directly, without encoding. + A new file format results in databases that are 25% smaller (depending + on content). The code is also a little faster. In spite of the many + new features, the library footprint is still less than 240KB + (x86, gcc -O1). + Additional information. + + Our intent is to freeze the file format and API on 2004-Jul-01. + Users are encouraged to review and evaluate this alpha release carefully + and submit any feedback prior to that date. + + The 2.8 series of SQLite will continue to be supported with bug + fixes for the foreseeable future. +} + +newsitem {2004-Jun-09} {Version 2.8.14 Released} { + SQLite version 2.8.14 is a patch release to the stable 2.8 series. + There is no reason to upgrade if 2.8.13 is working ok for you. + This is only a bug-fix release. Most development effort is + going into version 3.0.0 which is due out soon. +} + +newsitem {2004-May-31} {CVS Access Temporarily Disabled} { + Anonymous access to the CVS repository will be suspended + for 2 weeks beginning on 2004-June-04. Everyone will still + be able to download + prepackaged source bundles, create or modify trouble tickets, or view + change logs during the CVS service interruption. Full open access to the + CVS repository will be restored on 2004-June-18. +} + +newsitem {2004-Apr-23} {Work Begins On SQLite Version 3} { + Work has begun on version 3 of SQLite. Version 3 is a major + changes to both the C-language API and the underlying file format + that will enable SQLite to better support internationalization. + The first beta is schedule for release on 2004-July-01. + + Plans are to continue to support SQLite version 2.8 with + bug fixes. But all new development will occur in version 3.0. +} + ADDED pages/omitted.in Index: pages/omitted.in ================================================================== --- /dev/null +++ pages/omitted.in @@ -0,0 +1,77 @@ +SQL Features That SQLite Does Not Implement + +

      SQL Features That SQLite Does Not Implement

      + +

      +Rather than try to list all the features of SQL92 that SQLite does +support, it is much easier to list those that it does not. +Unsupported features of SQL92 are shown below.

      + +

      +The order of this list gives some hint as to when a feature might +be added to SQLite. Those features near the top of the list are +likely to be added in the near future. There are no immediate +plans to add features near the bottom of the list. +

      + + + + +proc feature {name desc} { + puts "" + puts "" +} + +feature {FOREIGN KEY constraints} { + FOREIGN KEY constraints are parsed but are not enforced. +} + +feature {Complete trigger support} { + There is some support for triggers but it is not complete. Missing + subfeatures include FOR EACH STATEMENT triggers (currently all triggers + must be FOR EACH ROW), INSTEAD OF triggers on tables (currently + INSTEAD OF triggers are only allowed on views), and recursive + triggers - triggers that trigger themselves. +} + +feature {Complete ALTER TABLE support} { + Only the RENAME TABLE and ADD COLUMN variants of the + ALTER TABLE command are supported. Other kinds of ALTER TABLE operations + such as + DROP COLUMN, ALTER COLUMN, ADD CONSTRAINT, and so forth are omitted. +} + +feature {Nested transactions} { + The current implementation only allows a single active transaction. +} + +feature {RIGHT and FULL OUTER JOIN} { + LEFT OUTER JOIN is implemented, but not RIGHT OUTER JOIN or + FULL OUTER JOIN. +} + +feature {Writing to VIEWs} { + VIEWs in SQLite are read-only. You may not execute a DELETE, INSERT, or + UPDATE statement on a view. But you can create a trigger + that fires on an attempt to DELETE, INSERT, or UPDATE a view and do + what you need in the body of the trigger. +} + +feature {GRANT and REVOKE} { + Since SQLite reads and writes an ordinary disk file, the + only access permissions that can be applied are the normal + file access permissions of the underlying operating system. + The GRANT and REVOKE commands commonly found on client/server + RDBMSes are not implemented because they would be meaningless + for an embedded database engine. +} + +
      $name " + puts "$desc
      + +

      +If you find other SQL92 features that SQLite does not support, please +add them to the Wiki page at + +http://www.sqlite.org/cvstrac/wiki?p=Unsupported +

      ADDED pages/opcode.in Index: pages/opcode.in ================================================================== --- /dev/null +++ pages/opcode.in @@ -0,0 +1,239 @@ +SQLite Virtual Machine Opcodes + +

      SQLite Virtual Machine Opcodes

      + + +set fd [open $::SRC/src/vdbe.c r] +set file [read $fd] +close $fd +set current_op {} +foreach line [split $file \n] { + set line [string trim $line] + if {[string index $line 1]!="*"} { + set current_op {} + continue + } + if {[regexp {^/\* Opcode: } $line]} { + set current_op [lindex $line 2] + set txt [lrange $line 3 end] + regsub -all {>} $txt {\>} txt + regsub -all {<} $txt {\<} txt + set Opcode($current_op:args) $txt + lappend OpcodeList $current_op + continue + } + if {$current_op==""} continue + if {[regexp {^\*/} $line]} { + set current_op {} + continue + } + set line [string trim [string range $line 3 end]] + if {$line==""} { + append Opcode($current_op:text) \n

      + } else { + regsub -all {>} $line {\>} line + regsub -all {<} $line {\<} line + append Opcode($current_op:text) \n$line + } +} +unset file + + +

      Introduction

      + +

      In order to execute an SQL statement, the SQLite library first parses +the SQL, analyzes the statement, then generates a short program to execute +the statement. The program is generated for a "virtual machine" implemented +by the SQLite library. This document describes the operation of that +virtual machine.

      + +

      This document is intended as a reference, not a tutorial. +A separate Virtual Machine Tutorial is +available. If you are looking for a narrative description +of how the virtual machine works, you should read the tutorial +and not this document. Once you have a basic idea of what the +virtual machine does, you can refer back to this document for +the details on a particular opcode. +Unfortunately, the virtual machine tutorial was written for +SQLite version 1.0. There are substantial changes in the virtual +machine for version 2.0 and the document has not been updated. +

      + +

      The source code to the virtual machine is in the vdbe.c source +file. All of the opcode definitions further down in this document are +contained in comments in the source file. In fact, the opcode table +in this document +was generated by scanning the vdbe.c source file +and extracting the necessary information from comments. So the +source code comments are really the canonical source of information +about the virtual machine. When in doubt, refer to the source code.

      + +

      Each instruction in the virtual machine consists of an opcode and +up to three operands named P1, P2 and P3. P1 may be an arbitrary +integer. P2 must be a non-negative integer. P2 is always the +jump destination in any operation that might cause a jump. +P3 is a null-terminated +string or NULL. Some operators use all three operands. Some use +one or two. Some operators use none of the operands.

      + +

      The virtual machine begins execution on instruction number 0. +Execution continues until (1) a Halt instruction is seen, or +(2) the program counter becomes one greater than the address of +last instruction, or (3) there is an execution error. +When the virtual machine halts, all memory +that it allocated is released and all database cursors it may +have had open are closed. If the execution stopped due to an +error, any pending transactions are terminated and changes made +to the database are rolled back.

      + +

      The virtual machine also contains an operand stack of unlimited +depth. Many of the opcodes use operands from the stack. See the +individual opcode descriptions for details.

      + +

      The virtual machine can have zero or more cursors. Each cursor +is a pointer into a single table or index within the database. +There can be multiple cursors pointing at the same index or table. +All cursors operate independently, even cursors pointing to the same +indices or tables. +The only way for the virtual machine to interact with a database +file is through a cursor. +Instructions in the virtual +machine can create a new cursor (Open), read data from a cursor +(Column), advance the cursor to the next entry in the table +(Next) or index (NextIdx), and many other operations. +All cursors are automatically +closed when the virtual machine terminates.

      + +

      The virtual machine contains an arbitrary number of fixed memory +locations with addresses beginning at zero and growing upward. +Each memory location can hold an arbitrary string. The memory +cells are typically used to hold the result of a scalar SELECT +that is part of a larger expression.

      + +

      The virtual machine contains a single sorter. +The sorter is able to accumulate records, sort those records, +then play the records back in sorted order. The sorter is used +to implement the ORDER BY clause of a SELECT statement.

      + +

      The virtual machine contains a single "List". +The list stores a list of integers. The list is used to hold the +rowids for records of a database table that needs to be modified. +The WHERE clause of an UPDATE or DELETE statement scans through +the table and writes the rowid of every record to be modified +into the list. Then the list is played back and the table is modified +in a separate step.

      + +

      The virtual machine can contain an arbitrary number of "Sets". +Each set holds an arbitrary number of strings. Sets are used to +implement the IN operator with a constant right-hand side.

      + +

      The virtual machine can open a single external file for reading. +This external read file is used to implement the COPY command.

      + +

      Finally, the virtual machine can have a single set of aggregators. +An aggregator is a device used to implement the GROUP BY clause +of a SELECT. An aggregator has one or more slots that can hold +values being extracted by the select. The number of slots is the +same for all aggregators and is defined by the AggReset operation. +At any point in time a single aggregator is current or "has focus". +There are operations to read or write to memory slots of the aggregator +in focus. There are also operations to change the focus aggregator +and to scan through all aggregators.

      + +

      Viewing Programs Generated By SQLite

      + +

      Every SQL statement that SQLite interprets results in a program +for the virtual machine. But if you precede the SQL statement with +the keyword "EXPLAIN" the virtual machine will not execute the +program. Instead, the instructions of the program will be returned +like a query result. This feature is useful for debugging and +for learning how the virtual machine operates.

      + +

      You can use the sqlite command-line tool to see the +instructions generated by an SQL statement. The following is +an example:

      + + +proc Code {body} { + puts {
      } + regsub -all {&} [string trim $body] {\&} body + regsub -all {>} $body {\>} body + regsub -all {<} $body {\<} body + regsub -all {\(\(\(} $body {} body + regsub -all {\)\)\)} $body {} body + regsub -all { } $body {\ } body + regsub -all \n $body
      \n body + puts $body + puts {
      } +} + +Code { +$ (((sqlite ex1))) +sqlite> (((.explain))) +sqlite> (((explain delete from tbl1 where two<20;))) +addr opcode p1 p2 p3 +---- ------------ ----- ----- ---------------------------------------- +0 Transaction 0 0 +1 VerifyCookie 219 0 +2 ListOpen 0 0 +3 Open 0 3 tbl1 +4 Rewind 0 0 +5 Next 0 12 +6 Column 0 1 +7 Integer 20 0 +8 Ge 0 5 +9 Recno 0 0 +10 ListWrite 0 0 +11 Goto 0 5 +12 Close 0 0 +13 ListRewind 0 0 +14 OpenWrite 0 3 +15 ListRead 0 19 +16 MoveTo 0 0 +17 Delete 0 0 +18 Goto 0 15 +19 ListClose 0 0 +20 Commit 0 0 +} +
      + +

      All you have to do is add the "EXPLAIN" keyword to the front of the +SQL statement. But if you use the ".explain" command to sqlite +first, it will set up the output mode to make the program more easily +viewable.

      + +

      If sqlite has been compiled without the "-DNDEBUG=1" option +(that is, with the NDEBUG preprocessor macro not defined) then you +can put the SQLite virtual machine in a mode where it will trace its +execution by writing messages to standard output. The non-standard +SQL "PRAGMA" comments can be used to turn tracing on and off. To +turn tracing on, enter: +

      + +
      +PRAGMA vdbe_trace=on;
      +
      + +

      +You can turn tracing back off by entering a similar statement but +changing the value "on" to "off".

      + +

      The Opcodes

      + +

      There are currently puts [llength $OpcodeList] +opcodes defined by the virtual machine. +All currently defined opcodes are described in the table below. +This table was generated automatically by scanning the source code +from the file vdbe.c.

      + +

      + + + + foreach op [lsort -dictionary $OpcodeList] { + puts {" + } + +
      Opcode NameDescription
      } + puts "$op" + puts "[string trim $Opcode($op:text)]

      ADDED pages/optoverview.in Index: pages/optoverview.in ================================================================== --- /dev/null +++ pages/optoverview.in @@ -0,0 +1,513 @@ +The SQLite Query Optimizer Overview + + +proc CODE {text} { + puts "
      "
      +  puts $text
      +  puts "
      " +} +proc SYNTAX {text} { + puts "
      "
      +  set t2 [string map {& & < < > >} $text]
      +  regsub -all "/(\[^\n/\]+)/" $t2 {\1} t3
      +  puts "$t3"
      +  puts "
      " +} +proc IMAGE {name {caption {}}} { + puts "
      " + if {$caption!=""} { + puts "
      $caption" + } + puts "
      " +} +proc PARAGRAPH {text} { + # regsub -all "/(\[a-zA-Z0-9\]+)/" $text {\1} t2 + regsub -all "\\*(\[^\n*\]+)\\*" $text {\1} t3 + puts "

      $t3

      \n" +} +set level(0) 0 +set level(1) 0 +proc HEADING {n name {tag {}}} { + if {$tag!=""} { + puts "" + } + global level + incr level($n) + for {set i [expr {$n+1}]} {$i<10} {incr i} { + set level($i) 0 + } + if {$n==0} { + set num {} + } elseif {$n==1} { + set num $level(1).0 + } else { + set num $level(1) + for {set i 2} {$i<=$n} {incr i} { + append num .$level($i) + } + } + incr n 1 + puts "$num $name" +} + +HEADING 0 {The SQLite Query Optimizer Overview} + +PARAGRAPH { + This document provides a terse overview of how the query optimizer + for SQLite works. This is not a tutorial. The reader is likely to + need some prior knowledge of how database engines operate + in order to fully understand this text. +} + +HEADING 1 {WHERE clause analysis} where_clause + +PARAGRAPH { + The WHERE clause on a query is broken up into "terms" where each term + is separated from the others by an AND operator. +} +PARAGRAPH { + All terms of the WHERE clause are analyzed to see if they can be + satisfied using indices. + Terms that cannot be satisfied through the use of indices become + tests that are evaluated against each row of the relevant input + tables. No tests are done for terms that are completely satisfied by + indices. Sometimes + one or more terms will provide hints to indices but still must be + evaluated against each row of the input tables. +} + +PARAGRAPH { + The analysis of a term might cause new "virtual" terms to + be added to the WHERE clause. Virtual terms can be used with + indices to restrict a search. But virtual terms never generate code + that is tested against input rows. +} + +PARAGRAPH { + To be usable by an index a term must be of one of the following + forms: +} +SYNTAX { + /column/ = /expression/ + /column/ > /expression/ + /column/ >= /expression/ + /column/ < /expression/ + /column/ <= /expression/ + /expression/ = /column/ + /expression/ > /column/ + /expression/ >= /column/ + /expression/ < /column/ + /expression/ <= /column/ + /column/ IN (/expression-list/) + /column/ IN (/subquery/) +} +PARAGRAPH { + If an index is created using a statement like this: +} +CODE { + CREATE INDEX idx_ex1 ON ex1(a,b,c,d,e,...,y,z); +} +PARAGRAPH { + Then the index might be used if the initial columns of the index + (columns a, b, and so forth) appear in WHERE clause terms. + All index columns must be used with + the *=* or *IN* operators except for + the right-most column which can use inequalities. For the right-most + column of an index that is used, there can be up to two inequalities + that must sandwich the allowed values of the column between two extremes. +} +PARAGRAPH { + It is not necessary for every column of an index to appear in a + WHERE clause term in order for that index to be used. + But there can not be gaps in the columns of the index that are used. + Thus for the example index above, if there is no WHERE clause term + that constraints column c, then terms that constraint columns a and b can + be used with the index but not terms that constraint columns d through z. + Similarly, no index column will be used (for indexing purposes) + that is to the right of a + column that is constrained only by inequalities. + For the index above and WHERE clause like this: +} +CODE { + ... WHERE a=5 AND b IN (1,2,3) AND c>12 AND d='hello' +} +PARAGRAPH { + Only columns a, b, and c of the index would be usable. The d column + would not be usable because it occurs to the right of c and c is + constrained only by inequalities. +} + +HEADING 1 {The BETWEEN optimization} between_opt + +PARAGRAPH { + If a term of the WHERE clause is of the following form: +} +SYNTAX { + /expr1/ BETWEEN /expr2/ AND /expr3/ +} +PARAGRAPH { + Then two virtual terms are added as follows: +} +SYNTAX { + /expr1/ >= /expr2/ AND /expr1/ <= /expr3/ +} +PARAGRAPH { + If both virtual terms end up being used as constraints on an index, + then the original BETWEEN term is omitted and the corresponding test + is not performed on input rows. + Thus if the BETWEEN term ends up being used as an index constraint + no tests are ever performed on that term. + On the other hand, the + virtual terms themselves never causes tests to be performed on + input rows. + Thus if the BETWEEN term is not used as an index constraint and + instead must be used to test input rows, the expr1 expression is + only evaluated once. +} + +HEADING 1 {The OR optimization} or_opt + +PARAGRAPH { + If a term consists of multiple subterms containing a common column + name and separated by OR, like this: +} +SYNTAX { + /column/ = /expr1/ OR /column/ = /expr2/ OR /column/ = /expr3/ OR ... +} +PARAGRAPH { + Then the term is rewritten as follows: +} +SYNTAX { + /column/ IN (/expr1/,/expr2/,/expr3/,/expr4/,...) +} +PARAGRAPH { + The rewritten term then might go on to constraint an index using the + normal rules for *IN* operators. + Note that column must be the same column in every OR-connected subterm, + although the column can occur on either the left or the right side of + the *=* operator. +} + +HEADING 1 {The LIKE optimization} like_opt + +PARAGRAPH { + Terms that are composed of the LIKE or GLOB operator + can sometimes be used to constrain indices. + There are many conditions on this use: +} +PARAGRAPH { +
        +
      1. The left-hand side of the LIKE or GLOB operator must be the name + of an indexed column.
      2. +
      3. The right-hand side of the LIKE or GLOB must be a string literal + that does not begin with a wildcard character.
      4. +
      5. The ESCAPE clause cannot appear on the LIKE operator.
      6. +
      7. The build-in functions used to implement LIKE and GLOB must not + have been overloaded using the sqlite3_create_function() API.
      8. +
      9. For the GLOB operator, the column must use the default BINARY + collating sequence.
      10. +
      11. For the LIKE operator, if case_sensitive_like mode is enabled then + the column must use the default BINARY collating sequence, or if + case_sensitive_like mode is disabled then the column must use the + built-in NOCASE collating sequence.
      12. +
      +} +PARAGRAPH { + The LIKE operator has two modes that can be set by a pragma. The + default mode is for LIKE comparisons to be insensitive to differences + of case for latin1 characters. Thus, by default, the following + expression is true: +} +CODE { + 'a' LIKE 'A' +} +PARAGRAPH { + By turned on the case_sensitive_like pragma as follows: +} +CODE { + PRAGMA case_sensitive_like=ON; +} +PARAGRAPH { + Then the LIKE operator pays attention to case and the example above would + evaluate to false. Note that case insensitivity only applies to + latin1 characters - basically the upper and lower case letters of English + in the lower 127 byte codes of ASCII. International character sets + are case sensitive in SQLite unless a user-supplied collating + sequence is used. But if you employ a user-supplied collating sequence, + the LIKE optimization describe here will never be taken. +} +PARAGRAPH { + The LIKE operator is case insensitive by default because this is what + the SQL standard requires. You can change the default behavior at + compile time by using the -DSQLITE_CASE_SENSITIVE_LIKE command-line option + to the compiler. +} +PARAGRAPH { + The LIKE optimization might occur if the column named on the left of the + operator uses the BINARY collating sequence (which is the default) and + case_sensitive_like is turned on. Or the optimization might occur if + the column uses the built-in NOCASE collating sequence and the + case_sensitive_like mode is off. These are the only two combinations + under which LIKE operators will be optimized. If the column on the + right-hand side of the LIKE operator uses any collating sequence other + than the built-in BINARY and NOCASE collating sequences, then no optimizations + will ever be attempted on the LIKE operator. +} +PARAGRAPH { + The GLOB operator is always case sensitive. The column on the left side + of the GLOB operator must always use the built-in BINARY collating sequence + or no attempt will be made to optimize that operator with indices. +} +PARAGRAPH { + The right-hand side of the GLOB or LIKE operator must be a literal string + value that does not begin with a wildcard. If the right-hand side is a + parameter that is bound to a string, then no optimization is attempted. + If the right-hand side begins with a wildcard character then no + optimization is attempted. +} +PARAGRAPH { + Suppose the initial sequence of non-wildcard characters on the right-hand + side of the LIKE or GLOB operator is x. We are using a single + character to denote this non-wildcard prefix but the reader should + understand that the prefix can consist of more than 1 character. + Let y the smallest string that is the same length as /x/ but which + compares greater than x. For example, if x is *hello* then + y would be *hellp*. + The LIKE and GLOB optimizations consist of adding two virtual terms + like this: +} +SYNTAX { + /column/ >= /x/ AND /column/ < /y/ +} +PARAGRAPH { + Under most circumstances, the original LIKE or GLOB operator is still + tested against each input row even if the virtual terms are used to + constrain an index. This is because we do not know what additional + constraints may be imposed by characters to the right + of the x prefix. However, if there is only a single global wildcard + to the right of x, then the original LIKE or GLOB test is disabled. + In other words, if the pattern is like this: +} +SYNTAX { + /column/ LIKE /x/% + /column/ GLOB /x/* +} +PARAGRAPH { + Then the original LIKE or GLOB tests are disabled when the virtual + terms constrain an index because in that case we know that all of the + rows selected by the index will pass the LIKE or GLOB test. +} + +HEADING 1 {Joins} joins + +PARAGRAPH { + The current implementation of + SQLite uses only loop joins. That is to say, joins are implemented as + nested loops. +} +PARAGRAPH { + The default order of the nested loops in a join is for the left-most + table in the FROM clause to form the outer loop and the right-most + table to form the inner loop. + However, SQLite will nest the loops in a different order if doing so + will help it to select better indices. +} +PARAGRAPH { + Inner joins can be freely reordered. However a left outer join is + neither commutative nor associative and hence will not be reordered. + Inner joins to the left and right of the outer join might be reordered + if the optimizer thinks that is advantageous but the outer joins are + always evaluated in the order in which they occur. +} +PARAGRAPH { + When selecting the order of tables in a join, SQLite uses a greedy + algorithm that runs in polynomial time. +} +PARAGRAPH { + The ON and USING clauses of a join are converted into additional + terms of the WHERE clause prior to WHERE clause analysis described + above in paragraph 1.0. Thus + with SQLite, there is no advantage to use the newer SQL92 join syntax + over the older SQL89 comma-join syntax. They both end up accomplishing + exactly the same thing. +} +PARAGRAPH { + Join reordering is automatic and usually works well enough that + programmer do not have to think about it. But occasionally some + hints from the programmer are needed. For a description of when + hints might be necessary and how to provide those hints, see the + QueryPlans + page in the Wiki. +} + +HEADING 1 {Choosing between multiple indices} multi_index + +PARAGRAPH { + Each table in the FROM clause of a query can use at most one index, + and SQLite strives to use at least one index on each table. Sometimes, + two or more indices might be candidates for use on a single table. + For example: +} +CODE { + CREATE TABLE ex2(x,y,z); + CREATE INDEX ex2i1 ON ex2(x); + CREATE INDEX ex2i2 ON ex2(y); + SELECT z FROM ex2 WHERE x=5 AND y=6; +} +PARAGRAPH { + For the SELECT statement above, the optimizer can use the ex2i1 index + to lookup rows of ex2 that contain x=5 and then test each row against + the y=6 term. Or it can use the ex2i2 index to lookup rows + of ex2 that contain y=6 then test each of those rows against the + x=5 term. +} +PARAGRAPH { + When faced with a choice of two or more indices, SQLite tries to estimate + the total amount of work needed to perform the query using each option. + It then selects the option that gives the least estimated work. +} +PARAGRAPH { + To help the optimizer get a more accurate estimate of the work involved + in using various indices, the user may optional run the ANALYZE command. + The ANALYZE command scans all indices of database where there might + be a choice between two or more indices and gathers statistics on the + selectiveness of those indices. The results of this scan are stored + in the sqlite_stat1 table. + The contents of the sqlite_stat1 table are not updated as the database + changes so after making significant changes it might be prudent to + rerun ANALYZE. + The results of an ANALYZE command are only available to database connections + that are opened after the ANALYZE command completes. +} +PARAGRAPH { + Once created, the sqlite_stat1 table cannot be dropped. But its + content can be viewed, modified, or erased. Erasing the entire content + of the sqlite_stat1 table has the effect of undoing the ANALYZE command. + Changing the content of the sqlite_stat1 table can get the optimizer + deeply confused and cause it to make silly index choices. Making + updates to the sqlite_stat1 table (except by running ANALYZE) is + not recommended. +} +PARAGRAPH { + Terms of the WHERE clause can be manually disqualified for use with + indices by prepending a unary *+* operator to the column name. The + unary *+* is a no-op and will not slow down the evaluation of the test + specified by the term. + But it will prevent the term from constraining an index. + So, in the example above, if the query were rewritten as: +} +CODE { + SELECT z FROM ex2 WHERE +x=5 AND y=6; +} +PARAGRAPH { + The *+* operator on the *x* column would prevent that term from + constraining an index. This would force the use of the ex2i2 index. +} + +HEADING 1 {Avoidance of table lookups} index_only + +PARAGRAPH { + When doing an indexed lookup of a row, the usual procedure is to + do a binary search on the index to find the index entry, then extract + the rowid from the index and use that rowid to do a binary search on + the original table. Thus a typical indexed lookup involves two + binary searches. + If, however, all columns that were to be fetched from the table are + already available in the index itself, SQLite will use the values + contained in the index and will never look up the original table + row. This saves one binary search for each row and can make many + queries run twice as fast. +} + +HEADING 1 {ORDER BY optimizations} order_by + +PARAGRAPH { + SQLite attempts to use an index to satisfy the ORDER BY clause of a + query when possible. + When faced with the choice of using an index to satisfy WHERE clause + constraints or satisfying an ORDER BY clause, SQLite does the same + work analysis described in section 6.0 + and chooses the index that it believes will result in the fastest answer. + +} + +HEADING 1 {Subquery flattening} flattening + +PARAGRAPH { + When a subquery occurs in the FROM clause of a SELECT, the default + behavior is to evaluate the subquery into a transient table, then run + the outer SELECT against the transient table. + This is problematic since the transient table will not have any indices + and the outer query (which is likely a join) will be forced to do a + full table scan on the transient table. +} +PARAGRAPH { + To overcome this problem, SQLite attempts to flatten subqueries in + the FROM clause of a SELECT. + This involves inserting the FROM clause of the subquery into the + FROM clause of the outer query and rewriting expressions in + the outer query that refer to the result set of the subquery. + For example: +} +CODE { + SELECT a FROM (SELECT x+y AS a FROM t1 WHERE z<100) WHERE a>5 +} +PARAGRAPH { + Would be rewritten using query flattening as: +} +CODE { + SELECT x+y AS a FROM t1 WHERE z<100 AND a>5 +} +PARAGRAPH { + There is a long list of conditions that must all be met in order for + query flattening to occur. +} +PARAGRAPH { +
        +
      1. The subquery and the outer query do not both use aggregates.
      2. +
      3. The subquery is not an aggregate or the outer query is not a join.
      4. +
      5. The subquery is not the right operand of a left outer join, or + the subquery is not itself a join.
      6. +
      7. The subquery is not DISTINCT or the outer query is not a join.
      8. +
      9. The subquery is not DISTINCT or the outer query does not use + aggregates.
      10. +
      11. The subquery does not use aggregates or the outer query is not + DISTINCT.
      12. +
      13. The subquery has a FROM clause.
      14. +
      15. The subquery does not use LIMIT or the outer query is not a join.
      16. +
      17. The subquery does not use LIMIT or the outer query does not use + aggregates.
      18. +
      19. The subquery does not use aggregates or the outer query does not + use LIMIT.
      20. +
      21. The subquery and the outer query do not both have ORDER BY clauses.
      22. +
      23. The subquery is not the right term of a LEFT OUTER JOIN or the + subquery has no WHERE clause.
      24. +
      +} +PARAGRAPH { + The proof that query flattening may safely occur if all of the the + above conditions are met is left as an exercise to the reader. +} +PARAGRAPH { + Query flattening is an important optimization when views are used as + each use of a view is translated into a subquery. +} + +HEADING 1 {The MIN/MAX optimization} minmax + +PARAGRAPH { + Queries of the following forms will be optimized to run in logarithmic + time assuming appropriate indices exist: +} +CODE { + SELECT MIN(x) FROM table; + SELECT MAX(x) FROM table; +} +PARAGRAPH { + In order for these optimizations to occur, they must appear in exactly + the form shown above - changing only the name of the table and column. + It is not permissible to add a WHERE clause or do any arithmetic on the + result. The result set must contain a single column. + The column in the MIN or MAX function must be an indexed column. +} + ADDED pages/pragma.in Index: pages/pragma.in ================================================================== --- /dev/null +++ pages/pragma.in @@ -0,0 +1,620 @@ +Pragma statements supported by SQLite + + +proc Section {name {label {}}} { + puts "\n
      " + if {$label!=""} { + puts "" + } + puts "

      $name

      \n" +} +
      + +

      The PRAGMA command is a special command used to +modify the operation of the SQLite library or to query the library for +internal (non-table) data. The PRAGMA command is issued using the same +interface as other SQLite commands (e.g. SELECT, INSERT) but is +different in the following important respects: +

      +
        +
      • Specific pragma statements may be removed and others added in future + releases of SQLite. Use with caution! +
      • No error messages are generated if an unknown pragma is issued. + Unknown pragmas are simply ignored. This means if there is a typo in + a pragma statement the library does not inform the user of the fact. +
      • Some pragmas take effect during the SQL compilation stage, not the + execution stage. This means if using the C-language sqlite3_prepare(), + sqlite3_step(), sqlite3_finalize() API (or similar in a wrapper + interface), the pragma may be applied to the library during the + sqlite3_prepare() call. +
      • The pragma command is unlikely to be compatible with any other SQL + engine. +
      + +

      The available pragmas fall into four basic categories:

      + + + +Section {PRAGMA command syntax} syntax + +Syntax {sql-statement} { +PRAGMA [= ] | +PRAGMA () +} + + +

      The pragmas that take an integer value also accept +symbolic names. The strings "on", "true", and "yes" +are equivalent to 1. The strings "off", "false", +and "no" are equivalent to 0. These strings are case- +insensitive, and do not require quotes. An unrecognized string will be +treated as 1, and will not generate an error. When the value +is returned it is as an integer.

      + +Section {Pragmas to modify library operation} modify + +
        + +
      • PRAGMA auto_vacuum;
        + PRAGMA auto_vacuum =
        + 0 | none | 1 | full | 2 | incremental;

        +

        Query or set the auto-vacuum flag in the database.

        + +

        Normally, (that is to say when auto_vacuum is 0 or "none") + when a transaction that deletes data from a database is + committed, the database file remains the same size. Unused database file + pages are added to a "freelist" are reused for subsequent inserts. The + database file does not shrink. + In this mode the VACUUM + command can be used to reclaim unused space.

        + +

        When the auto-vacuum flag is 1 (full), the freelist pages are + moved to the end of the file and the file is truncated to remove + the freelist pages at every commit. + Note, however, that auto-vacuum only truncates the freelist pages + from the file. Auto-vacuum does not defragment the database nor + repack individual database pages the way that the + VACUUM command does. In fact, because + it moves pages around within the file, auto-vacuum can actually + make fragmentation worse.

        + +

        Auto-vacuuming is only possible if the database stores some + additional information that allows each database page to be + traced backwards to its referer. Therefore, auto-vacuuming must + be turned on before any tables are created. It is not possible + to enable or disable auto-vacuum after a table has been created.

        + +

        When the value of auto-vacuum is 2 (incremental) then the additional + information needed to do autovacuuming is stored in the database file + but autovacuuming does not occur automatically at each commit as it + does with auto_vacuum==full. In incremental mode, the separate + incremental_vacuum pragma must + be invoked to cause the vacuum to occur.

        + +

        The database connection can be changed between full and incremental + autovacuum mode at will. However, the connection cannot be changed + in and out of the "none" mode after any table has been created in the + database. +

      • + + +
      • PRAGMA cache_size; +
        PRAGMA cache_size =
        Number-of-pages;

        +

        Query or change the maximum number of database disk pages that SQLite + will hold in memory at once. Each page uses about 1.5K of memory. + The default cache size is 2000. If you are doing UPDATEs or DELETEs + that change many rows of a database and you do not mind if SQLite + uses more memory, you can increase the cache size for a possible speed + improvement.

        +

        When you change the cache size using the cache_size pragma, the + change only endures for the current session. The cache size reverts + to the default value when the database is closed and reopened. Use + the default_cache_size + pragma to check the cache size permanently.

      • + + +
      • PRAGMA case_sensitive_like; +
        PRAGMA case_sensitive_like =
        0 | 1;

        +

        The default behavior of the LIKE operator is to ignore case + for latin1 characters. Hence, by default 'a' LIKE 'A' is + true. The case_sensitive_like pragma can be turned on to change + this behavior. When case_sensitive_like is enabled, + 'a' LIKE 'A' is false but 'a' LIKE 'a' is still true.

        +
      • + + +
      • PRAGMA count_changes; +
        PRAGMA count_changes =
        0 | 1;

        +

        Query or change the count-changes flag. Normally, when the + count-changes flag is not set, INSERT, UPDATE and DELETE statements + return no data. When count-changes is set, each of these commands + returns a single row of data consisting of one integer value - the + number of rows inserted, modified or deleted by the command. The + returned change count does not include any insertions, modifications + or deletions performed by triggers.

        + + +
      • PRAGMA default_cache_size; +
        PRAGMA default_cache_size =
        Number-of-pages;

        +

        Query or change the maximum number of database disk pages that SQLite + will hold in memory at once. Each page uses 1K on disk and about + 1.5K in memory. + This pragma works like the + cache_size + pragma with the additional + feature that it changes the cache size persistently. With this pragma, + you can set the cache size once and that setting is retained and reused + every time you reopen the database.

      • + + +
      • PRAGMA default_synchronous;

        +

        This pragma was available in version 2.8 but was removed in version + 3.0. It is a dangerous pragma whose use is discouraged. To help + dissuide users of version 2.8 from employing this pragma, the documentation + will not tell you what it does.

      • + + + +
      • PRAGMA empty_result_callbacks; +
        PRAGMA empty_result_callbacks =
        0 | 1;

        +

        Query or change the empty-result-callbacks flag.

        +

        The empty-result-callbacks flag affects the sqlite3_exec API only. + Normally, when the empty-result-callbacks flag is cleared, the + callback function supplied to the sqlite3_exec() call is not invoked + for commands that return zero rows of data. When empty-result-callbacks + is set in this situation, the callback function is invoked exactly once, + with the third parameter set to 0 (NULL). This is to enable programs + that use the sqlite3_exec() API to retrieve column-names even when + a query returns no data. +

        + + +
      • PRAGMA encoding; +
        PRAGMA encoding = "UTF-8"; +
        PRAGMA encoding = "UTF-16"; +
        PRAGMA encoding = "UTF-16le"; +
        PRAGMA encoding = "UTF-16be";

        +

        In first form, if the main database has already been + created, then this pragma returns the text encoding used by the + main database, one of "UTF-8", "UTF-16le" (little-endian UTF-16 + encoding) or "UTF-16be" (big-endian UTF-16 encoding). If the main + database has not already been created, then the value returned is the + text encoding that will be used to create the main database, if + it is created by this session.

        +

        The second and subsequent forms of this pragma are only useful if + the main database has not already been created. In this case the + pragma sets the encoding that the main database will be created with if + it is created by this session. The string "UTF-16" is interpreted + as "UTF-16 encoding using native machine byte-ordering". If the second + and subsequent forms are used after the database file has already + been created, they have no effect and are silently ignored.

        + +

        Once an encoding has been set for a database, it cannot be changed.

        + +

        Databases created by the ATTACH command always use the same encoding + as the main database.

        +
      • + + +
      • PRAGMA full_column_names; +
        PRAGMA full_column_names =
        0 | 1;

        +

        Query or change the full-column-names flag. This flag affects + the way SQLite names columns of data returned by SELECT statements + when the expression for the column is a table-column name or the + wildcard "*". Normally, such result columns are named + <table-name/alias><column-name> if the SELECT statement joins + two or + more tables together, or simply <column-name> if the SELECT + statement queries a single table. When the full-column-names flag + is set, such columns are always named <table-name/alias> + <column-name> regardless of whether or not a join is performed. +

        +

        If both the short-column-names and full-column-names are set, + then the behaviour associated with the full-column-names flag is + exhibited. +

        +
      • + + +
      • PRAGMA fullfsync +
        PRAGMA fullfsync =
        0 | 1;

        +

        Query or change the fullfsync flag. This flag affects + determines whether or not the F_FULLFSYNC syncing method is used + on systems that support it. The default value is off. As of this + writing (2006-02-10) only Mac OS X supports F_FULLFSYNC. +

        +
      • + + +
      • PRAGMA incremental_vacuum(N);

        +

        The incremental_vacuum pragma causes up to N pages to + be removed from the freelist. The database file is truncated by + the same amount. The incremental_vacuum pragma has no effect if + the database is not in + auto_vacuum==incremental mode + or if there are no pages on the freelist. If there are fewer than + N pages on the freelist, then the entire freelist is cleared.

        + +

        As of version 3.4.0 (the first version that supports + incremental_vacuum) this feature is still experimental. Possible + future changes include enhancing incremental vacuum to do + defragmentation and node repacking just as the full-blown + VACUUM command does. And + incremental vacuum may be promoted from a pragma to a separate + SQL command, or perhaps some variation on the VACUUM command. + Programmers are cautioned to not become enamored with the + current syntax or functionality as it is likely to change.

        +
      • + + + +
      • PRAGMA legacy_file_format; +
        PRAGMA legacy_file_format = ON | OFF

        +

        This pragma sets or queries the value of the legacy_file_format + flag. When this flag is on, new SQLite databases are created in + a file format that is readable and writable by all versions of + SQLite going back to 3.0.0. When the flag is off, new databases + are created using the latest file format which might not be + readable or writable by older versions of SQLite.

        + +

        This flag only affects newly created databases. It has no + effect on databases that already exist.

        +
      • + + +
      • PRAGMA locking_mode; +
        PRAGMA locking_mode = NORMAL | EXCLUSIVE

        +

        This pragma sets or queries the database connection locking-mode. + The locking-mode is either NORMAL or EXCLUSIVE. + +

        In NORMAL locking-mode (the default), a database connection + unlocks the database file at the conclusion of each read or + write transaction. When the locking-mode is set to EXCLUSIVE, the + database connection never releases file-locks. The first time the + database is read in EXCLUSIVE mode, a shared lock is obtained and + held. The first time the database is written, an exclusive lock is + obtained and held.

        + +

        Database locks obtained by a connection in EXCLUSIVE mode may be + released either by closing the database connection, or by setting the + locking-mode back to NORMAL using this pragma and then accessing the + database file (for read or write). Simply setting the locking-mode to + NORMAL is not enough - locks are not be released until the next time + the database file is accessed.

        + +

        There are two reasons to set the locking-mode to EXCLUSIVE. One + is if the application actually wants to prevent other processes from + accessing the database file. The other is that a small number of + filesystem operations are saved by optimizations enabled in this + mode. This may be significant in embedded environments.

        + +

        When the locking_mode pragma specifies a particular database, + for example:

        + +
        +PRAGMA main.locking_mode=EXCLUSIVE; +
        + +

        Then the locking mode applies only to the named database. If no + database name qualifier preceeds the "locking_mode" keyword then + the locking mode is applied to all databases, including any new + databases added by subsequent ATTACH + commands.

        + +

        The "temp" database (in which TEMP tables and indices are stored) + always uses exclusive locking mode. The locking mode of temp cannot + be changed. All other databases use the normal locking mode by default + and are affected by this pragma.

        +
      • + + +
      • PRAGMA page_size; +
        PRAGMA page_size =
        bytes;

        +

        Query or set the page-size of the database. The page-size + may only be set if the database has not yet been created. The page + size must be a power of two greater than or equal to 512 and less + than or equal to 8192. The upper limit may be modified by setting + the value of macro SQLITE_MAX_PAGE_SIZE during compilation. The + maximum upper bound is 32768. +

        +
      • + + +
      • PRAGMA max_page_count; +
        PRAGMA max_page_count =
        N;

        +

        Query or set the maximum number of pages in the database file. + Both forms of the pragma return the maximum page count. The second + form attempts to modify the maximum page count. The maximum page + count cannot be reduced below the current database size. +

        +
      • + + +
      • PRAGMA read_uncommitted; +
        PRAGMA read_uncommitted =
        0 | 1;

        +

        Query, set, or clear READ UNCOMMITTED isolation. The default isolation + level for SQLite is SERIALIZABLE. Any process or thread can select + READ UNCOMMITTED isolation, but SERIALIZABLE will still be used except + between connections that share a common page and schema cache. + Cache sharing is enabled using the + + sqlite3_enable_shared_cache() API and is only available between + connections running the same thread. Cache sharing is off by default. +

        +
      • + + +
      • PRAGMA short_column_names; +
        PRAGMA short_column_names =
        0 | 1;

        +

        Query or change the short-column-names flag. This flag affects + the way SQLite names columns of data returned by SELECT statements + when the expression for the column is a table-column name or the + wildcard "*". Normally, such result columns are named + <table-name/alias>lt;column-name> if the SELECT statement + joins two or more tables together, or simply <column-name> if + the SELECT statement queries a single table. When the short-column-names + flag is set, such columns are always named <column-name> + regardless of whether or not a join is performed. +

        +

        If both the short-column-names and full-column-names are set, + then the behaviour associated with the full-column-names flag is + exhibited. +

        +
      • + + +
      • PRAGMA synchronous; +
        PRAGMA synchronous = FULL;
        (2) +
        PRAGMA synchronous = NORMAL;
        (1) +
        PRAGMA synchronous = OFF;
        (0)

        +

        Query or change the setting of the "synchronous" flag. + The first (query) form will return the setting as an + integer. When synchronous is FULL (2), the SQLite database engine will + pause at critical moments to make sure that data has actually been + written to the disk surface before continuing. This ensures that if + the operating system crashes or if there is a power failure, the database + will be uncorrupted after rebooting. FULL synchronous is very + safe, but it is also slow. + When synchronous is NORMAL, the SQLite database + engine will still pause at the most critical moments, but less often + than in FULL mode. There is a very small (though non-zero) chance that + a power failure at just the wrong time could corrupt the database in + NORMAL mode. But in practice, you are more likely to suffer + a catastrophic disk failure or some other unrecoverable hardware + fault. + With synchronous OFF (0), SQLite continues without pausing + as soon as it has handed data off to the operating system. + If the application running SQLite crashes, the data will be safe, but + the database might become corrupted if the operating system + crashes or the computer loses power before that data has been written + to the disk surface. On the other hand, some + operations are as much as 50 or more times faster with synchronous OFF. +

        +

        In SQLite version 2, the default value is NORMAL. For version 3, the + default was changed to FULL. +

        +
      • + + + +
      • PRAGMA temp_store; +
        PRAGMA temp_store = DEFAULT;
        (0) +
        PRAGMA temp_store = FILE;
        (1) +
        PRAGMA temp_store = MEMORY;
        (2)

        +

        Query or change the setting of the "temp_store" parameter. + When temp_store is DEFAULT (0), the compile-time C preprocessor macro + TEMP_STORE is used to determine where temporary tables and indices + are stored. When + temp_store is MEMORY (2) temporary tables and indices are kept in memory. + When temp_store is FILE (1) temporary tables and indices are stored + in a file. The + temp_store_directory pragma can be used to specify the directory + containing this file. + FILE is specified. When the temp_store setting is changed, + all existing temporary tables, indices, triggers, and views are + immediately deleted.

        + +

        It is possible for the library compile-time C preprocessor symbol + TEMP_STORE to override this pragma setting. The following table summarizes + the interaction of the TEMP_STORE preprocessor macro and the + temp_store pragma:

        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        TEMP_STOREPRAGMA
        temp_store
        Storage used for
        TEMP tables and indices
        0anyfile
        10file
        11file
        12memory
        20memory
        21file
        22memory
        3anymemory
        +
        +
      • +
        + + +
      • PRAGMA temp_store_directory; +
        PRAGMA temp_store_directory = 'directory-name';

        +

        Query or change the setting of the "temp_store_directory" - the + directory where files used for storing temporary tables and indices + are kept. This setting lasts for the duration of the current connection + only and resets to its default value for each new connection opened. + +

        When the temp_store_directory setting is changed, all existing temporary + tables, indices, triggers, and viewers are immediately deleted. In + practice, temp_store_directory should be set immediately after the + database is opened.

        + +

        The value directory-name should be enclosed in single quotes. + To revert the directory to the default, set the directory-name to + an empty string, e.g., PRAGMA temp_store_directory = ''. An + error is raised if directory-name is not found or is not + writable.

        + +

        The default directory for temporary files depends on the OS. For + Unix/Linux/OSX, the default is the is the first writable directory found + in the list of: /var/tmp, /usr/tmp, /tmp, and + current-directory. For Windows NT, the default + directory is determined by Windows, generally + C:\Documents and Settings\user-name\Local Settings\Temp\. + Temporary files created by SQLite are unlinked immediately after + opening, so that the operating system can automatically delete the + files when the SQLite process exits. Thus, temporary files are not + normally visible through ls or dir commands.

        + +
      • +
      + +Section {Pragmas to query the database schema} schema + +
        + +
      • PRAGMA database_list;

        +

        For each open database, invoke the callback function once with + information about that database. Arguments include the index and + the name the database was attached with. The first row will be for + the main database. The second row will be for the database used to + store temporary tables.

      • + + +
      • PRAGMA foreign_key_list(table-name);

        +

        For each foreign key that references a column in the argument + table, invoke the callback function with information about that + foreign key. The callback function will be invoked once for each + column in each foreign key.

      • + + +
      • PRAGMA [database].freelist_count;

        +

        Return the number of unused pages in the database file. Running + a "PRAGMA incremental_vaccum(N);" + command with a large value of N will shrink the database file by this + number of pages.

      • + + +
      • PRAGMA index_info(index-name);

        +

        For each column that the named index references, invoke the + callback function + once with information about that column, including the column name, + and the column number.

      • + + +
      • PRAGMA index_list(table-name);

        +

        For each index on the named table, invoke the callback function + once with information about that index. Arguments include the + index name and a flag to indicate whether or not the index must be + unique.

      • + + +
      • PRAGMA table_info(table-name);

        +

        For each column in the named table, invoke the callback function + once with information about that column, including the column name, + data type, whether or not the column can be NULL, and the default + value for the column.

      • +
      + +Section {Pragmas to query/modify version values} version + +
        + + +
      • PRAGMA [database.]schema_version; +
        PRAGMA [database.]schema_version =
        integer ; +
        PRAGMA [database.]user_version; +
        PRAGMA [database.]user_version =
        integer ; + + +

        The pragmas schema_version and user_version are used to set or get + the value of the schema-version and user-version, respectively. Both + the schema-version and the user-version are 32-bit signed integers + stored in the database header.

        + +

        The schema-version is usually only manipulated internally by SQLite. + It is incremented by SQLite whenever the database schema is modified + (by creating or dropping a table or index). The schema version is + used by SQLite each time a query is executed to ensure that the + internal cache of the schema used when compiling the SQL query matches + the schema of the database against which the compiled query is actually + executed. Subverting this mechanism by using "PRAGMA schema_version" + to modify the schema-version is potentially dangerous and may lead + to program crashes or database corruption. Use with caution!

        + +

        The user-version is not used internally by SQLite. It may be used by + applications for any purpose.

        +
      • +
      + +Section {Pragmas to debug the library} debug + +
        + +
      • PRAGMA integrity_check; +
        PRAGMA integrity_check(
        integer)

        +

        The command does an integrity check of the entire database. It + looks for out-of-order records, missing pages, malformed records, and + corrupt indices. + If any problems are found, then strings are returned (as multiple + rows with a single column per row) which describe + the problems. At most integer errors will be reported + before the analysis quits. The default value for integer + is 100. If no errors are found, a single row with the value "ok" is + returned.

      • + + +
      • PRAGMA parser_trace = ON; (1) +
        PRAGMA parser_trace = OFF;
        (0)

        +

        Turn tracing of the SQL parser inside of the + SQLite library on and off. This is used for debugging. + This only works if the library is compiled without the NDEBUG macro. +

      • + + +
      • PRAGMA vdbe_trace = ON; (1) +
        PRAGMA vdbe_trace = OFF;
        (0)

        +

        Turn tracing of the virtual database engine inside of the + SQLite library on and off. This is used for debugging. See the + VDBE documentation for more + information.

      • + + +
      • PRAGMA vdbe_listing = ON; (1) +
        PRAGMA vdbe_listing = OFF;
        (0)

        +

        Turn listings of virtual machine programs on and off. + With listing is on, the entire content of a program is printed + just prior to beginning execution. This is like automatically + executing an EXPLAIN prior to each statement. The statement + executes normally after the listing is printed. + This is used for debugging. See the + VDBE documentation for more + information.

      • +
      ADDED pages/quickstart.in Index: pages/quickstart.in ================================================================== --- /dev/null +++ pages/quickstart.in @@ -0,0 +1,103 @@ +SQLite In 5 Minutes Or Less + +

      Here is what you do to start experimenting with SQLite without having +to do a lot of tedious reading and configuration:

      + +

      Download The Code

      + +
        +
      • Get a copy of the prebuilt binaries for your machine, or get a copy +of the sources and compile them yourself. Visit +the download page for more information.

      • +
      + +

      Create A New Database

      + +
        +
      • At a shell or DOS prompt, enter: "sqlite3 test.db". This will +create a new database named "test.db". (You can use a different name if +you like.)

      • +
      • Enter SQL commands at the prompt to create and populate the +new database.

      • +
      • Additional documentation is available here

      • +
      + +

      Write Programs That Use SQLite

      + +
        +
      • Below is a simple TCL program that demonstrates how to use +the TCL interface to SQLite. The program executes the SQL statements +given as the second argument on the database defined by the first +argument. The commands to watch for are the sqlite3 command +on line 7 which opens an SQLite database and creates +a new TCL command named "db" to access that database, the +invocation of the db command on line 8 to execute +SQL commands against the database, and the closing of the database connection +on the last line of the script.

        + +
        +#!/usr/bin/tclsh
        +if {$argc!=2} {
        +  puts stderr "Usage: %s DATABASE SQL-STATEMENT"
        +  exit 1
        +}
        +load /usr/lib/tclsqlite3.so Sqlite3
        +sqlite3 db [lindex $argv 0]
        +db eval [lindex $argv 1] x {
        +  foreach v $x(*) {
        +    puts "$v = $x($v)"
        +  }
        +  puts ""
        +}
        +db close
        +
        +
      • + +
      • Below is a simple C program that demonstrates how to use +the C/C++ interface to SQLite. The name of a database is given by +the first argument and the second argument is one or more SQL statements +to execute against the database. The function calls to pay attention +to here are the call to sqlite3_open() on line 22 which opens +the database, sqlite3_exec() on line 27 that executes SQL +commands against the database, and sqlite3_close() on line 31 +that closes the database connection.

        + +
        +#include <stdio.h>
        +#include <sqlite3.h>
        +
        +static int callback(void *NotUsed, int argc, char **argv, char **azColName){
        +  int i;
        +  for(i=0; i<argc; i++){
        +    printf("%s = %s\n", azColName[i], argv[i] ? argv[i] : "NULL");
        +  }
        +  printf("\n");
        +  return 0;
        +}
        +
        +int main(int argc, char **argv){
        +  sqlite3 *db;
        +  char *zErrMsg = 0;
        +  int rc;
        +
        +  if( argc!=3 ){
        +    fprintf(stderr, "Usage: %s DATABASE SQL-STATEMENT\n", argv[0]);
        +    exit(1);
        +  }
        +  rc = sqlite3_open(argv[1], &db);
        +  if( rc ){
        +    fprintf(stderr, "Can't open database: %s\n", sqlite3_errmsg(db));
        +    sqlite3_close(db);
        +    exit(1);
        +  }
        +  rc = sqlite3_exec(db, argv[2], callback, 0, &zErrMsg);
        +  if( rc!=SQLITE_OK ){
        +    fprintf(stderr, "SQL error: %s\n", zErrMsg);
        +    sqlite3_free(zErrMsg);
        +  }
        +  sqlite3_close(db);
        +  return 0;
        +}
        +
        +
      • +
      ADDED pages/sharedcache.in Index: pages/sharedcache.in ================================================================== --- /dev/null +++ pages/sharedcache.in @@ -0,0 +1,199 @@ +SQLite Shared-Cache Mode + + +proc HEADING {level title} { + global pnum + incr pnum($level) + foreach i [array names pnum] { + if {$i>$level} {set pnum($i) 0} + } + set h [expr {$level+1}] + if {$h>6} {set h 6} + set n $pnum(1).$pnum(2) + for {set i 3} {$i<=$level} {incr i} { + append n .$pnum($i) + } + puts "$n $title" +} +set pnum(1) 0 +set pnum(2) 0 +set pnum(3) 0 +set pnum(4) 0 +set pnum(5) 0 +set pnum(6) 0 +set pnum(7) 0 +set pnum(8) 0 + + +HEADING 1 {SQLite Shared-Cache Mode} + +

      Starting with version 3.3.0, SQLite includes a special "shared-cache" +mode (disabled by default) intended for use in embedded servers. If +shared-cache mode is enabled and a thread establishes multiple connections +to the same database, the connections share a single data and schema cache. +This can significantly reduce the quantity of memory and IO required by +the system.

      + +

      Using shared-cache mode imposes some extra restrictions on +passing database handles between threads and changes the semantics +of the locking model in some cases. These details are described in full by +this document. A basic understanding of the normal SQLite locking model (see +File Locking And Concurrency In SQLite Version 3 +for details) is assumed.

      + +HEADING 1 {Shared-Cache Locking Model} + +

      Externally, from the point of view of another process or thread, two +or more database connections using a shared-cache appear as a single +connection. The locking protocol used to arbitrate between multiple +shared-caches or regular database users is described elsewhere. +

      + + +
      + + +
      +

      Figure 1

      + +

      Figure 1 depicts an example runtime configuration where three +database connections have been established. Connection 1 is a normal +SQLite database connection. Connections 2 and 3 share a cache (and so must +have been established by the same process thread). The normal locking +protocol is used to serialize database access between connection 1 and +the shared cache. The internal protocol used to serialize (or not, see +"Read-Uncommitted Isolation Mode" below) access to the shared-cache by +connections 2 and 3 is described in the remainder of this section. +

      + +

      There are three levels to the shared-cache locking model, +transaction level locking, table level locking and schema level locking. +They are described in the following three sub-sections.

      + +HEADING 2 {Transaction Level Locking} + +

      SQLite connections can open two kinds of transactions, read and write +transactions. This is not done explicitly, a transaction is implicitly a +read-transaction until it first writes to a database table, at which point +it becomes a write-transaction. +

      +

      At most one connection to a single shared cache may open a +write transaction at any one time. This may co-exist with any number of read +transactions. +

      + +HEADING 2 {Table Level Locking} + +

      When two or more connections use a shared-cache, locks are used to +serialize concurrent access attempts on a per-table basis. Tables support +two types of locks, "read-locks" and "write-locks". Locks are granted to +connections - at any one time, each database connection has either a +read-lock, write-lock or no lock on each database table. +

      + +

      At any one time, a single table may have any number of active read-locks +or a single active write lock. To read data a table, a connection must +first obtain a read-lock. To write to a table, a connection must obtain a +write-lock on that table. If a required table lock cannot be obtained, +the query fails and SQLITE_LOCKED is returned to the caller. +

      + +

      Once a connection obtains a table lock, it is not released until the +current transaction (read or write) is concluded. +

      + +HEADING 3 {Read-Uncommitted Isolation Mode} + +

      The behaviour described above may be modified slightly by using the +read_uncommitted pragma to change the isolation level from serialized +(the default), to read-uncommitted.

      + +

      A database connection in read-uncommitted mode does not attempt +to obtain read-locks before reading from database tables as described +above. This can lead to inconsistent query results if another database +connection modifies a table while it is being read, but it also means that +a read-transaction opened by a connection in read-uncommitted mode can +neither block nor be blocked by any other connection.

      + +

      Read-uncommitted mode has no effect on the locks required to write to +database tables (i.e. read-uncommitted connections must still obtain +write-locks and hence database writes may still block or be blocked). +Also, read-uncommitted mode has no effect on the sqlite_master +locks required by the rules enumerated below (see section +"Schema (sqlite_master) Level Locking"). +

      + +
      +  /* Set the value of the read-uncommitted flag:
      +  **
      +  **   True  -> Set the connection to read-uncommitted mode.
      +  **   False -> Set the connectino to serialized (the default) mode.
      +  */
      +  PRAGMA read_uncommitted = <boolean>;
      +
      +  /* Retrieve the current value of the read-uncommitted flag */
      +  PRAGMA read_uncommitted;
      +
      + +HEADING 2 {Schema (sqlite_master) Level Locking} + +

      The sqlite_master table supports shared-cache read and write +locks in the same way as all other database tables (see description +above). The following special rules also apply: +

      + +
        +
      • A connection must obtain a read-lock on sqlite_master before +accessing any database tables or obtaining any other read or write locks.
      • +
      • Before executing a statement that modifies the database schema (i.e. +a CREATE or DROP TABLE statement), a connection must obtain a write-lock on +sqlite_master. +
      • +
      • A connection may not compile an SQL statement if any other connection +is holding a write-lock on the sqlite_master table of any attached +database (including the default database, "main"). +
      • +
      + +HEADING 1 {Thread Related Issues} + +

      When shared-cache mode is enabled, a database connection may only be +used by the thread that called sqlite3_open() to create it. If another +thread attempts to use the database connection, in most cases an +SQLITE_MISUSE error is returned. However this is not guaranteed and +programs should not depend on this behaviour, in some cases a segfault +may result. +

      + +HEADING 1 {Enabling Shared-Cache Mode} + +

      Shared-cache mode is enabled on a thread-wide basis. Using the C +interface, the following API can be used to enable or disable shared-cache +mode for the calling thread: +

      + +
      +int sqlite3_enable_shared_cache(int);
      +
      + +

      It is illegal to call sqlite3_enable_shared_cache() if one or more +open database connections were opened by the calling thread. If the argument +is non-zero, shared-cache mode is enabled. If the argument is zero, +shared-cache mode is disabled. The return value is either SQLITE_OK (if the +operation was successful), SQLITE_NOMEM (if a malloc() failed), or +SQLITE_MISUSE (if the thread has open database connections). +

      ADDED pages/speed.in Index: pages/speed.in ================================================================== --- /dev/null +++ pages/speed.in @@ -0,0 +1,486 @@ +SQLite Database Speed Comparison + +

      Database Speed Comparison

      + + +Note: This document is old. It describes a speed comparison between +an older version of SQLite against archaic versions of MySQL and PostgreSQL. +Readers are invited to contribute more up-to-date speed comparisons +on the SQLite Wiki. +

      +The numbers here are old enough to be nearly meaningless. Until it is +updated, use this document only as proof that SQLite is not a +sluggard. + + +

      Executive Summary

      + +

      A series of tests were run to measure the relative performance of +SQLite 2.7.6, PostgreSQL 7.1.3, and MySQL 3.23.41. +The following are general +conclusions drawn from these experiments: +

      + +
        +
      • + SQLite 2.7.6 is significantly faster (sometimes as much as 10 or + 20 times faster) than the default PostgreSQL 7.1.3 installation + on RedHat 7.2 for most common operations. +

      • +
      • + SQLite 2.7.6 is often faster (sometimes + more than twice as fast) than MySQL 3.23.41 + for most common operations. +

      • +
      • + SQLite does not execute CREATE INDEX or DROP TABLE as fast as + the other databases. But this is not seen as a problem because + those are infrequent operations. +

      • +
      • + SQLite works best if you group multiple operations together into + a single transaction. +

      • +
      + +

      +The results presented here come with the following caveats: +

      + +
        +
      • + These tests did not attempt to measure multi-user performance or + optimization of complex queries involving multiple joins and subqueries. +

      • +
      • + These tests are on a relatively small (approximately 14 megabyte) database. + They do not measure how well the database engines scale to larger problems. +

      • +
      + +

      Test Environment

      + +

      +The platform used for these tests is a 1.6GHz Athlon with 1GB or memory +and an IDE disk drive. The operating system is RedHat Linux 7.2 with +a stock kernel. +

      + +

      +The PostgreSQL and MySQL servers used were as delivered by default on +RedHat 7.2. (PostgreSQL version 7.1.3 and MySQL version 3.23.41.) +No effort was made to tune these engines. Note in particular +the the default MySQL configuration on RedHat 7.2 does not support +transactions. Not having to support transactions gives MySQL a +big speed advantage, but SQLite is still able to hold its own on most +tests. +

      + +

      +I am told that the default PostgreSQL configuration in RedHat 7.3 +is unnecessarily conservative (it is designed to +work on a machine with 8MB of RAM) and that PostgreSQL could +be made to run a lot faster with some knowledgeable configuration +tuning. +Matt Sergeant reports that he has tuned his PostgreSQL installation +and rerun the tests shown below. His results show that +PostgreSQL and MySQL run at about the same speed. For Matt's +results, visit +

      + +
      + +http://www.sergeant.org/sqlite_vs_pgsync.html +
      + +

      +SQLite was tested in the same configuration that it appears +on the website. It was compiled with -O6 optimization and with +the -DNDEBUG=1 switch which disables the many "assert()" statements +in the SQLite code. The -DNDEBUG=1 compiler option roughly doubles +the speed of SQLite. +

      + +

      +All tests are conducted on an otherwise quiescent machine. +A simple Tcl script was used to generate and run all the tests. +A copy of this Tcl script can be found in the SQLite source tree +in the file tools/speedtest.tcl. +

      + +

      +The times reported on all tests represent wall-clock time +in seconds. Two separate time values are reported for SQLite. +The first value is for SQLite in its default configuration with +full disk synchronization turned on. With synchronization turned +on, SQLite executes +an fsync() system call (or the equivalent) at key points +to make certain that critical data has +actually been written to the disk drive surface. Synchronization +is necessary to guarantee the integrity of the database if the +operating system crashes or the computer powers down unexpectedly +in the middle of a database update. The second time reported for SQLite is +when synchronization is turned off. With synchronization off, +SQLite is sometimes much faster, but there is a risk that an +operating system crash or an unexpected power failure could +damage the database. Generally speaking, the synchronous SQLite +times are for comparison against PostgreSQL (which is also +synchronous) and the asynchronous SQLite times are for +comparison against the asynchronous MySQL engine. +

      + +

      Test 1: 1000 INSERTs

      +
      +CREATE TABLE t1(a INTEGER, b INTEGER, c VARCHAR(100));
      +INSERT INTO t1 VALUES(1,13153,'thirteen thousand one hundred fifty three');
      +INSERT INTO t1 VALUES(2,75560,'seventy five thousand five hundred sixty');
      +... 995 lines omitted
      +INSERT INTO t1 VALUES(998,66289,'sixty six thousand two hundred eighty nine');
      +INSERT INTO t1 VALUES(999,24322,'twenty four thousand three hundred twenty two');
      +INSERT INTO t1 VALUES(1000,94142,'ninety four thousand one hundred forty two');
      + +
      + + + + +
      PostgreSQL:   4.373
      MySQL:   0.114
      SQLite 2.7.6:   13.061
      SQLite 2.7.6 (nosync):   0.223
      + +

      +Because it does not have a central server to coordinate access, +SQLite must close and reopen the database file, and thus invalidate +its cache, for each transaction. In this test, each SQL statement +is a separate transaction so the database file must be opened and closed +and the cache must be flushed 1000 times. In spite of this, the asynchronous +version of SQLite is still nearly as fast as MySQL. Notice how much slower +the synchronous version is, however. SQLite calls fsync() after +each synchronous transaction to make sure that all data is safely on +the disk surface before continuing. For most of the 13 seconds in the +synchronous test, SQLite was sitting idle waiting on disk I/O to complete.

      + + +

      Test 2: 25000 INSERTs in a transaction

      +
      +BEGIN;
      +CREATE TABLE t2(a INTEGER, b INTEGER, c VARCHAR(100));
      +INSERT INTO t2 VALUES(1,59672,'fifty nine thousand six hundred seventy two');
      +... 24997 lines omitted
      +INSERT INTO t2 VALUES(24999,89569,'eighty nine thousand five hundred sixty nine');
      +INSERT INTO t2 VALUES(25000,94666,'ninety four thousand six hundred sixty six');
      +COMMIT;
      + +
      + + + + +
      PostgreSQL:   4.900
      MySQL:   2.184
      SQLite 2.7.6:   0.914
      SQLite 2.7.6 (nosync):   0.757
      + +

      +When all the INSERTs are put in a transaction, SQLite no longer has to +close and reopen the database or invalidate its cache between each statement. +It also does not +have to do any fsync()s until the very end. When unshackled in +this way, SQLite is much faster than either PostgreSQL and MySQL. +

      + +

      Test 3: 25000 INSERTs into an indexed table

      +
      +BEGIN;
      +CREATE TABLE t3(a INTEGER, b INTEGER, c VARCHAR(100));
      +CREATE INDEX i3 ON t3(c);
      +... 24998 lines omitted
      +INSERT INTO t3 VALUES(24999,88509,'eighty eight thousand five hundred nine');
      +INSERT INTO t3 VALUES(25000,84791,'eighty four thousand seven hundred ninety one');
      +COMMIT;
      + +
      + + + + +
      PostgreSQL:   8.175
      MySQL:   3.197
      SQLite 2.7.6:   1.555
      SQLite 2.7.6 (nosync):   1.402
      + +

      +There were reports that SQLite did not perform as well on an indexed table. +This test was recently added to disprove those rumors. It is true that +SQLite is not as fast at creating new index entries as the other engines +(see Test 6 below) but its overall speed is still better. +

      + +

      Test 4: 100 SELECTs without an index

      +
      +BEGIN;
      +SELECT count(*), avg(b) FROM t2 WHERE b>=0 AND b<1000;
      +SELECT count(*), avg(b) FROM t2 WHERE b>=100 AND b<1100;
      +... 96 lines omitted
      +SELECT count(*), avg(b) FROM t2 WHERE b>=9800 AND b<10800;
      +SELECT count(*), avg(b) FROM t2 WHERE b>=9900 AND b<10900;
      +COMMIT;
      + +
      + + + + +
      PostgreSQL:   3.629
      MySQL:   2.760
      SQLite 2.7.6:   2.494
      SQLite 2.7.6 (nosync):   2.526
      + + +

      +This test does 100 queries on a 25000 entry table without an index, +thus requiring a full table scan. Prior versions of SQLite used to +be slower than PostgreSQL and MySQL on this test, but recent performance +enhancements have increased its speed so that it is now the fastest +of the group. +

      + +

      Test 5: 100 SELECTs on a string comparison

      +
      +BEGIN;
      +SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%one%';
      +SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%two%';
      +... 96 lines omitted
      +SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ninety nine%';
      +SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%one hundred%';
      +COMMIT;
      + +
      + + + + +
      PostgreSQL:   13.409
      MySQL:   4.640
      SQLite 2.7.6:   3.362
      SQLite 2.7.6 (nosync):   3.372
      + +

      +This test still does 100 full table scans but it uses +uses string comparisons instead of numerical comparisons. +SQLite is over three times faster than PostgreSQL here and about 30% +faster than MySQL. +

      + +

      Test 6: Creating an index

      +
      +CREATE INDEX i2a ON t2(a);
      CREATE INDEX i2b ON t2(b); +
      + + + + +
      PostgreSQL:   0.381
      MySQL:   0.318
      SQLite 2.7.6:   0.777
      SQLite 2.7.6 (nosync):   0.659
      + +

      +SQLite is slower at creating new indices. This is not a huge problem +(since new indices are not created very often) but it is something that +is being worked on. Hopefully, future versions of SQLite will do better +here. +

      + +

      Test 7: 5000 SELECTs with an index

      +
      +SELECT count(*), avg(b) FROM t2 WHERE b>=0 AND b<100;
      +SELECT count(*), avg(b) FROM t2 WHERE b>=100 AND b<200;
      +SELECT count(*), avg(b) FROM t2 WHERE b>=200 AND b<300;
      +... 4994 lines omitted
      +SELECT count(*), avg(b) FROM t2 WHERE b>=499700 AND b<499800;
      +SELECT count(*), avg(b) FROM t2 WHERE b>=499800 AND b<499900;
      +SELECT count(*), avg(b) FROM t2 WHERE b>=499900 AND b<500000;
      + +
      + + + + +
      PostgreSQL:   4.614
      MySQL:   1.270
      SQLite 2.7.6:   1.121
      SQLite 2.7.6 (nosync):   1.162
      + +

      +All three database engines run faster when they have indices to work with. +But SQLite is still the fastest. +

      + +

      Test 8: 1000 UPDATEs without an index

      +
      +BEGIN;
      +UPDATE t1 SET b=b*2 WHERE a>=0 AND a<10;
      +UPDATE t1 SET b=b*2 WHERE a>=10 AND a<20;
      +... 996 lines omitted
      +UPDATE t1 SET b=b*2 WHERE a>=9980 AND a<9990;
      +UPDATE t1 SET b=b*2 WHERE a>=9990 AND a<10000;
      +COMMIT;
      + +
      + + + + +
      PostgreSQL:   1.739
      MySQL:   8.410
      SQLite 2.7.6:   0.637
      SQLite 2.7.6 (nosync):   0.638
      + +

      +For this particular UPDATE test, MySQL is consistently +five or ten times +slower than PostgreSQL and SQLite. I do not know why. MySQL is +normally a very fast engine. Perhaps this problem has been addressed +in later versions of MySQL. +

      + +

      Test 9: 25000 UPDATEs with an index

      +
      +BEGIN;
      +UPDATE t2 SET b=468026 WHERE a=1;
      +UPDATE t2 SET b=121928 WHERE a=2;
      +... 24996 lines omitted
      +UPDATE t2 SET b=35065 WHERE a=24999;
      +UPDATE t2 SET b=347393 WHERE a=25000;
      +COMMIT;
      + +
      + + + + +
      PostgreSQL:   18.797
      MySQL:   8.134
      SQLite 2.7.6:   3.520
      SQLite 2.7.6 (nosync):   3.104
      + +

      +As recently as version 2.7.0, SQLite ran at about the same speed as +MySQL on this test. But recent optimizations to SQLite have more +than doubled speed of UPDATEs. +

      + +

      Test 10: 25000 text UPDATEs with an index

      +
      +BEGIN;
      +UPDATE t2 SET c='one hundred forty eight thousand three hundred eighty two' WHERE a=1;
      +UPDATE t2 SET c='three hundred sixty six thousand five hundred two' WHERE a=2;
      +... 24996 lines omitted
      +UPDATE t2 SET c='three hundred eighty three thousand ninety nine' WHERE a=24999;
      +UPDATE t2 SET c='two hundred fifty six thousand eight hundred thirty' WHERE a=25000;
      +COMMIT;
      + +
      + + + + +
      PostgreSQL:   48.133
      MySQL:   6.982
      SQLite 2.7.6:   2.408
      SQLite 2.7.6 (nosync):   1.725
      + +

      +Here again, version 2.7.0 of SQLite used to run at about the same speed +as MySQL. But now version 2.7.6 is over two times faster than MySQL and +over twenty times faster than PostgreSQL. +

      + +

      +In fairness to PostgreSQL, it started thrashing on this test. A +knowledgeable administrator might be able to get PostgreSQL to run a lot +faster here by tweaking and tuning the server a little. +

      + +

      Test 11: INSERTs from a SELECT

      +
      +BEGIN;
      INSERT INTO t1 SELECT b,a,c FROM t2;
      INSERT INTO t2 SELECT b,a,c FROM t1;
      COMMIT; +
      + + + + +
      PostgreSQL:   61.364
      MySQL:   1.537
      SQLite 2.7.6:   2.787
      SQLite 2.7.6 (nosync):   1.599
      + +

      +The asynchronous SQLite is just a shade slower than MySQL on this test. +(MySQL seems to be especially adept at INSERT...SELECT statements.) +The PostgreSQL engine is still thrashing - most of the 61 seconds it used +were spent waiting on disk I/O. +

      + +

      Test 12: DELETE without an index

      +
      +DELETE FROM t2 WHERE c LIKE '%fifty%'; +
      + + + + +
      PostgreSQL:   1.509
      MySQL:   0.975
      SQLite 2.7.6:   4.004
      SQLite 2.7.6 (nosync):   0.560
      + +

      +The synchronous version of SQLite is the slowest of the group in this test, +but the asynchronous version is the fastest. +The difference is the extra time needed to execute fsync(). +

      + +

      Test 13: DELETE with an index

      +
      +DELETE FROM t2 WHERE a>10 AND a<20000; +
      + + + + +
      PostgreSQL:   1.316
      MySQL:   2.262
      SQLite 2.7.6:   2.068
      SQLite 2.7.6 (nosync):   0.752
      + +

      +This test is significant because it is one of the few where +PostgreSQL is faster than MySQL. The asynchronous SQLite is, +however, faster then both the other two. +

      + +

      Test 14: A big INSERT after a big DELETE

      +
      +INSERT INTO t2 SELECT * FROM t1; +
      + + + + +
      PostgreSQL:   13.168
      MySQL:   1.815
      SQLite 2.7.6:   3.210
      SQLite 2.7.6 (nosync):   1.485
      + +

      +Some older versions of SQLite (prior to version 2.4.0) +would show decreasing performance after a +sequence of DELETEs followed by new INSERTs. As this test shows, the +problem has now been resolved. +

      + +

      Test 15: A big DELETE followed by many small INSERTs

      +
      +BEGIN;
      +DELETE FROM t1;
      +INSERT INTO t1 VALUES(1,10719,'ten thousand seven hundred nineteen');
      +... 11997 lines omitted
      +INSERT INTO t1 VALUES(11999,72836,'seventy two thousand eight hundred thirty six');
      +INSERT INTO t1 VALUES(12000,64231,'sixty four thousand two hundred thirty one');
      +COMMIT;
      + +
      + + + + +
      PostgreSQL:   4.556
      MySQL:   1.704
      SQLite 2.7.6:   0.618
      SQLite 2.7.6 (nosync):   0.406
      + +

      +SQLite is very good at doing INSERTs within a transaction, which probably +explains why it is so much faster than the other databases at this test. +

      + +

      Test 16: DROP TABLE

      +
      +DROP TABLE t1;
      DROP TABLE t2;
      DROP TABLE t3; +
      + + + + +
      PostgreSQL:   0.135
      MySQL:   0.015
      SQLite 2.7.6:   0.939
      SQLite 2.7.6 (nosync):   0.254
      + +

      +SQLite is slower than the other databases when it comes to dropping tables. +This probably is because when SQLite drops a table, it has to go through and +erase the records in the database file that deal with that table. MySQL and +PostgreSQL, on the other hand, use separate files to represent each table +so they can drop a table simply by deleting a file, which is much faster. +

      + +

      +On the other hand, dropping tables is not a very common operation +so if SQLite takes a little longer, that is not seen as a big problem. +

      ADDED pages/sqlite.in Index: pages/sqlite.in ================================================================== --- /dev/null +++ pages/sqlite.in @@ -0,0 +1,565 @@ +sqlite3: A command-line access program for SQLite databases + +

      sqlite3: A command-line access program for SQLite databases

      + +

      The SQLite library includes a simple command-line utility named +sqlite3 that allows the user to manually enter and execute SQL +commands against an SQLite database. This document provides a brief +introduction on how to use sqlite3. + +

      Getting Started

      + +

      To start the sqlite3 program, just type "sqlite3" followed by +the name the file that holds the SQLite database. If the file does +not exist, a new one is created automatically. +The sqlite3 program will +then prompt you to enter SQL. Type in SQL statements (terminated by a +semicolon), press "Enter" and the SQL will be executed.

      + +

      For example, to create a new SQLite database named "ex1" +with a single table named "tbl1", you might do this:

      + + +proc Code {body} { + puts {
      } + regsub -all {&} [string trim $body] {\&} body + regsub -all {>} $body {\>} body + regsub -all {<} $body {\<} body + regsub -all {\(\(\(} $body {} body + regsub -all {\)\)\)} $body {} body + regsub -all { } $body {\ } body + regsub -all \n $body
      \n body + puts $body + puts {
      } +} + +Code { +$ (((sqlite3 ex1))) +SQLite version 3.3.10 +Enter ".help" for instructions +sqlite> (((create table tbl1(one varchar(10), two smallint);))) +sqlite> (((insert into tbl1 values('hello!',10);))) +sqlite> (((insert into tbl1 values('goodbye', 20);))) +sqlite> (((select * from tbl1;))) +hello!|10 +goodbye|20 +sqlite> +} +
      + +

      You can terminate the sqlite3 program by typing your systems +End-Of-File character (usually a Control-D) or the interrupt +character (usually a Control-C).

      + +

      Make sure you type a semicolon at the end of each SQL command! +The sqlite3 program looks for a semicolon to know when your SQL command is +complete. If you omit the semicolon, sqlite3 will give you a +continuation prompt and wait for you to enter more text to be +added to the current SQL command. This feature allows you to +enter SQL commands that span multiple lines. For example:

      + + +Code { +sqlite> (((CREATE TABLE tbl2 ())) + ...> ((( f1 varchar(30) primary key,))) + ...> ((( f2 text,))) + ...> ((( f3 real))) + ...> ((();))) +sqlite> +} + +

      Aside: Querying the SQLITE_MASTER table

      + +

      The database schema in an SQLite database is stored in +a special table named "sqlite_master". +You can execute "SELECT" statements against the +special sqlite_master table just like any other table +in an SQLite database. For example:

      + +Code { +$ (((sqlite3 ex1))) +SQlite vresion 3.3.10 +Enter ".help" for instructions +sqlite> (((select * from sqlite_master;))) + type = table + name = tbl1 +tbl_name = tbl1 +rootpage = 3 + sql = create table tbl1(one varchar(10), two smallint) +sqlite> +} + +

      +But you cannot execute DROP TABLE, UPDATE, INSERT or DELETE against +the sqlite_master table. The sqlite_master +table is updated automatically as you create or drop tables and +indices from the database. You can not make manual changes +to the sqlite_master table. +

      + +

      +The schema for TEMPORARY tables is not stored in the "sqlite_master" table +since TEMPORARY tables are not visible to applications other than the +application that created the table. The schema for TEMPORARY tables +is stored in another special table named "sqlite_temp_master". The +"sqlite_temp_master" table is temporary itself. +

      + +

      Special commands to sqlite3

      + +

      +Most of the time, sqlite3 just reads lines of input and passes them +on to the SQLite library for execution. +But if an input line begins with a dot ("."), then +that line is intercepted and interpreted by the sqlite3 program itself. +These "dot commands" are typically used to change the output format +of queries, or to execute certain prepackaged query statements. +

      + +

      +For a listing of the available dot commands, you can enter ".help" +at any time. For example: +

      + +Code { +sqlite> (((.help))) +.bail ON|OFF Stop after hitting an error. Default OFF +.databases List names and files of attached databases +.dump ?TABLE? ... Dump the database in an SQL text format +.echo ON|OFF Turn command echo on or off +.exit Exit this program +.explain ON|OFF Turn output mode suitable for EXPLAIN on or off. +.header(s) ON|OFF Turn display of headers on or off +.help Show this message +.import FILE TABLE Import data from FILE into TABLE +.indices TABLE Show names of all indices on TABLE +.load FILE ?ENTRY? Load an extension library +.mode MODE ?TABLE? Set output mode where MODE is one of: + csv Comma-separated values + column Left-aligned columns. (See .width) + html HTML code + insert SQL insert statements for TABLE + line One value per line + list Values delimited by .separator string + tabs Tab-separated values + tcl TCL list elements +.nullvalue STRING Print STRING in place of NULL values +.output FILENAME Send output to FILENAME +.output stdout Send output to the screen +.prompt MAIN CONTINUE Replace the standard prompts +.quit Exit this program +.read FILENAME Execute SQL in FILENAME +.schema ?TABLE? Show the CREATE statements +.separator STRING Change separator used by output mode and .import +.show Show the current values for various settings +.tables ?PATTERN? List names of tables matching a LIKE pattern +.timeout MS Try opening locked tables for MS milliseconds +.width NUM NUM ... Set column widths for "column" mode +sqlite> +} + +

      Changing Output Formats

      + +

      The sqlite3 program is able to show the results of a query +in eight different formats: "csv", "column", "html", "insert", +"line", "tabs", and "tcl". +You can use the ".mode" dot command to switch between these output +formats.

      + +

      The default output mode is "list". In +list mode, each record of a query result is written on one line of +output and each column within that record is separated by a specific +separator string. The default separator is a pipe symbol ("|"). +List mode is especially useful when you are going to send the output +of a query to another program (such as AWK) for additional processing.

      + +Code { +sqlite> (((.mode list))) +sqlite> (((select * from tbl1;))) +hello|10 +goodbye|20 +sqlite> +} + +

      You can use the ".separator" dot command to change the separator +for list mode. For example, to change the separator to a comma and +a space, you could do this:

      + +Code { +sqlite> (((.separator ", "))) +sqlite> (((select * from tbl1;))) +hello, 10 +goodbye, 20 +sqlite> +} + +

      In "line" mode, each column in a row of the database +is shown on a line by itself. Each line consists of the column +name, an equal sign and the column data. Successive records are +separated by a blank line. Here is an example of line mode +output:

      + +Code { +sqlite> (((.mode line))) +sqlite> (((select * from tbl1;))) +one = hello +two = 10 + +one = goodbye +two = 20 +sqlite> +} + +

      In column mode, each record is shown on a separate line with the +data aligned in columns. For example:

      + +Code { +sqlite> (((.mode column))) +sqlite> (((select * from tbl1;))) +one two +---------- ---------- +hello 10 +goodbye 20 +sqlite> +} + +

      By default, each column is at least 10 characters wide. +Data that is too wide to fit in a column is truncated. You can +adjust the column widths using the ".width" command. Like this:

      + +Code { +sqlite> (((.width 12 6))) +sqlite> (((select * from tbl1;))) +one two +------------ ------ +hello 10 +goodbye 20 +sqlite> +} + +

      The ".width" command in the example above sets the width of the first +column to 12 and the width of the second column to 6. All other column +widths were unaltered. You can gives as many arguments to ".width" as +necessary to specify the widths of as many columns as are in your +query results.

      + +

      If you specify a column a width of 0, then the column +width is automatically adjusted to be the maximum of three +numbers: 10, the width of the header, and the width of the +first row of data. This makes the column width self-adjusting. +The default width setting for every column is this +auto-adjusting 0 value.

      + +

      The column labels that appear on the first two lines of output +can be turned on and off using the ".header" dot command. In the +examples above, the column labels are on. To turn them off you +could do this:

      + +Code { +sqlite> (((.header off))) +sqlite> (((select * from tbl1;))) +hello 10 +goodbye 20 +sqlite> +} + +

      Another useful output mode is "insert". In insert mode, the output +is formatted to look like SQL INSERT statements. You can use insert +mode to generate text that can later be used to input data into a +different database.

      + +

      When specifying insert mode, you have to give an extra argument +which is the name of the table to be inserted into. For example:

      + +Code { +sqlite> (((.mode insert new_table))) +sqlite> (((select * from tbl1;))) +INSERT INTO 'new_table' VALUES('hello',10); +INSERT INTO 'new_table' VALUES('goodbye',20); +sqlite> +} + + +

      The last output mode is "html". In this mode, sqlite3 writes +the results of the query as an XHTML table. The beginning +<TABLE> and the ending </TABLE> are not written, but +all of the intervening <TR>s, <TH>s, and <TD>s +are. The html output mode is envisioned as being useful for +CGI.

      + + + +

      Writing results to a file

      + +

      By default, sqlite3 sends query results to standard output. You +can change this using the ".output" command. Just put the name of +an output file as an argument to the .output command and all subsequent +query results will be written to that file. Use ".output stdout" to +begin writing to standard output again. For example:

      + +Code { +sqlite> (((.mode list))) +sqlite> (((.separator |))) +sqlite> (((.output test_file_1.txt))) +sqlite> (((select * from tbl1;))) +sqlite> (((.exit))) +$ (((cat test_file_1.txt))) +hello|10 +goodbye|20 +$ +} + + +

      Querying the database schema

      + +

      The sqlite3 program provides several convenience commands that +are useful for looking at the schema of the database. There is +nothing that these commands do that cannot be done by some other +means. These commands are provided purely as a shortcut.

      + +

      For example, to see a list of the tables in the database, you +can enter ".tables".

      + + +Code { +sqlite> (((.tables))) +tbl1 +tbl2 +sqlite> +} + + +

      The ".tables" command is similar to setting list mode then +executing the following query:

      + +
      +SELECT name FROM sqlite_master 
      +WHERE type IN ('table','view') AND name NOT LIKE 'sqlite_%'
      +UNION ALL 
      +SELECT name FROM sqlite_temp_master 
      +WHERE type IN ('table','view') 
      +ORDER BY 1
      +
      + +

      In fact, if you look at the source code to the sqlite3 program +(found in the source tree in the file src/shell.c) you'll find +exactly the above query.

      + +

      The ".indices" command works in a similar way to list all of +the indices for a particular table. The ".indices" command takes +a single argument which is the name of the table for which the +indices are desired. Last, but not least, is the ".schema" command. +With no arguments, the ".schema" command shows the original CREATE TABLE +and CREATE INDEX statements that were used to build the current database. +If you give the name of a table to ".schema", it shows the original +CREATE statement used to make that table and all if its indices. +We have:

      + +Code { +sqlite> (((.schema))) +create table tbl1(one varchar(10), two smallint) +CREATE TABLE tbl2 ( + f1 varchar(30) primary key, + f2 text, + f3 real +) +sqlite> (((.schema tbl2))) +CREATE TABLE tbl2 ( + f1 varchar(30) primary key, + f2 text, + f3 real +) +sqlite> +} + + +

      The ".schema" command accomplishes the same thing as setting +list mode, then entering the following query:

      + +
      +SELECT sql FROM 
      +   (SELECT * FROM sqlite_master UNION ALL
      +    SELECT * FROM sqlite_temp_master)
      +WHERE type!='meta'
      +ORDER BY tbl_name, type DESC, name
      +
      + +

      Or, if you give an argument to ".schema" because you only +want the schema for a single table, the query looks like this:

      + +
      +SELECT sql FROM
      +   (SELECT * FROM sqlite_master UNION ALL
      +    SELECT * FROM sqlite_temp_master)
      +WHERE type!='meta' AND sql NOT NULL AND name NOT LIKE 'sqlite_%'
      +ORDER BY substr(type,2,1), name
      +
      + +

      +You can supply an argument to the .schema command. If you do, the +query looks like this: +

      + +
      +SELECT sql FROM
      +   (SELECT * FROM sqlite_master UNION ALL
      +    SELECT * FROM sqlite_temp_master)
      +WHERE tbl_name LIKE '%s'
      +  AND type!='meta' AND sql NOT NULL AND name NOT LIKE 'sqlite_%'
      +ORDER BY substr(type,2,1), name
      +
      + +

      The "%s" in the query is replace by your argument. This allows you +to view the schema for some subset of the database.

      + + +Code { +sqlite> (((.schema %abc%))) +} + + +

      +Along these same lines, +the ".table" command also accepts a pattern as its first argument. +If you give an argument to the .table command, a "%" is both +appended and prepended and a LIKE clause is added to the query. +This allows you to list only those tables that match a particular +pattern.

      + +

      The ".databases" command shows a list of all databases open in +the current connection. There will always be at least 2. The first +one is "main", the original database opened. The second is "temp", +the database used for temporary tables. There may be additional +databases listed for databases attached using the ATTACH statement. +The first output column is the name the database is attached with, +and the second column is the filename of the external file.

      + +Code { +sqlite> (((.databases))) +} + + +

      Converting An Entire Database To An ASCII Text File

      + +

      Use the ".dump" command to convert the entire contents of a +database into a single ASCII text file. This file can be converted +back into a database by piping it back into sqlite3.

      + +

      A good way to make an archival copy of a database is this:

      + + +Code { +$ (((echo '.dump' | sqlite3 ex1 | gzip -c >ex1.dump.gz))) +} + + +

      This generates a file named ex1.dump.gz that contains everything +you need to reconstruct the database at a later time, or on another +machine. To reconstruct the database, just type:

      + + +Code { +$ (((zcat ex1.dump.gz | sqlite3 ex2))) +} + + +

      The text format is pure SQL so you +can also use the .dump command to export an SQLite database +into other popular SQL database engines. Like this:

      + + +Code { +$ (((createdb ex2))) +$ (((sqlite3 ex1 .dump | psql ex2))) +} + + +

      Other Dot Commands

      + +

      The ".explain" dot command can be used to set the output mode +to "column" and to set the column widths to values that are reasonable +for looking at the output of an EXPLAIN command. The EXPLAIN command +is an SQLite-specific SQL extension that is useful for debugging. If any +regular SQL is prefaced by EXPLAIN, then the SQL command is parsed and +analyzed but is not executed. Instead, the sequence of virtual machine +instructions that would have been used to execute the SQL command are +returned like a query result. For example:

      + +Code { +sqlite> (((.explain))) +sqlite> (((explain delete from tbl1 where two<20;))) +addr opcode p1 p2 p3 +---- ------------ ----- ----- ------------------------------------- +0 ListOpen 0 0 +1 Open 0 1 tbl1 +2 Next 0 9 +3 Field 0 1 +4 Integer 20 0 +5 Ge 0 2 +6 Key 0 0 +7 ListWrite 0 0 +8 Goto 0 2 +9 Noop 0 0 +10 ListRewind 0 0 +11 ListRead 0 14 +12 Delete 0 0 +13 Goto 0 11 +14 ListClose 0 0 +} + + + +

      The ".timeout" command sets the amount of time that the sqlite3 +program will wait for locks to clear on files it is trying to access +before returning an error. The default value of the timeout is zero so +that an error is returned immediately if any needed database table or +index is locked.

      + +

      And finally, we mention the ".exit" command which causes the +sqlite3 program to exit.

      + +

      Using sqlite3 in a shell script

      + +

      +One way to use sqlite3 in a shell script is to use "echo" or +"cat" to generate a sequence of commands in a file, then invoke sqlite3 +while redirecting input from the generated command file. This +works fine and is appropriate in many circumstances. But as +an added convenience, sqlite3 allows a single SQL command to be +entered on the command line as a second argument after the +database name. When the sqlite3 program is launched with two +arguments, the second argument is passed to the SQLite library +for processing, the query results are printed on standard output +in list mode, and the program exits. This mechanism is designed +to make sqlite3 easy to use in conjunction with programs like +"awk". For example:

      + +Code { +$ (((sqlite3 ex1 'select * from tbl1' |))) +> ((( awk '{printf "
      %s%s\n",$1,$2 }'))) +
      hello10 +
      goodbye20 +$ +} + + +

      Ending shell commands

      + +

      +SQLite commands are normally terminated by a semicolon. In the shell +you can also use the word "GO" (case-insensitive) or a slash character +"/" on a line by itself to end a command. These are used by SQL Server +and Oracle, respectively. These won't work in sqlite3_exec(), +because the shell translates these into a semicolon before passing them +to that function.

      + + + +

      Compiling the sqlite3 program from sources

      + +

      +The sqlite3 program is built automatically when you compile the +SQLite library. Just get a copy of the source tree, run +"configure" and then "make".

      + +footer $rcsid ADDED pages/support.in Index: pages/support.in ================================================================== --- /dev/null +++ pages/support.in @@ -0,0 +1,74 @@ +SQLite Support Options + +

      SQLite Support Options

      + + +

      Mailing List

      +

      +A mailing list has been set up for asking questions and +for open discussion of problems +and issues by the SQLite user community. +To subscribe to the mailing list, send an email to + +sqlite-users-subscribe@sqlite.org. +If you would prefer to get digests rather than individual +emails, send a message to to + +sqlite-users-digest-subscribe@sqlite.org. +For additional information about operating and using this +mailing list, send a message to + +sqlite-users-help@sqlite.org and instructions will be +sent by to you by return email. +

      + +

      +There are multiple archives of the mailing list: +

      + +
      + +http://www.mail-archive.com/sqlite-users%40sqlite.org
      + +http://marc.info/?l=sqlite-users&r=1&w=2
      + +http://news.gmane.org/gmane.comp.db.sqlite.general +
      + +

      + + +

      Direct E-Mail To The Author

      + +

      +Use the mailing list. +Please do not send email directly to the author of SQLite +unless: +

        +
      • You have or intend to acquire a professional support contract +as described below, or
      • +
      • You are working on an open source project.
      • +
      +You are welcomed to use SQLite in closed source, proprietary, and/or +commerical projects and to ask questions about such use on the public +mailing list. But please do not ask to receive free direct technical +support. The software is free; direct technical support is not. +

      + + +

      Professional Support

      + +

      +If you would like professional support for SQLite +or if you want custom modifications to SQLite performed by the +original author, these services are available for a modest fee. +For additional information visit + +http://www.hwaci.com/sw/sqlite/prosupport.html or contact:

      + +
      +D. Richard Hipp
      +Hwaci - Applied Software Research
      +704.948.4565
      +drh@hwaci.com +
      ADDED pages/tclsqlite.in Index: pages/tclsqlite.in ================================================================== --- /dev/null +++ pages/tclsqlite.in @@ -0,0 +1,660 @@ +The Tcl interface to the SQLite library + +proc METHOD {name text} { + puts "\n

      The \"$name\" method

      \n" + puts $text +} +puts { +

      The Tcl interface to the SQLite library

      + +

      The SQLite library is designed to be very easy to use from +a Tcl or Tcl/Tk script. This document gives an overview of the Tcl +programming interface.

      + +

      The API

      + +

      The interface to the SQLite library consists of single +tcl command named sqlite3 +Because there is only this +one command, the interface is not placed in a separate +namespace.

      + +

      The sqlite3 command is used as follows:

      + +
      +sqlite3  dbcmd  database-name +
      + +

      +The sqlite3 command opens the database named in the second +argument. If the database does not already exist, it is +automatically created. +The sqlite3 command also creates a new Tcl +command to control the database. The name of the new Tcl command +is given by the first argument. This approach is similar to the +way widgets are created in Tk. +

      + +

      +The name of the database is just the name of a disk file in which +the database is stored. If the name of the database is an empty +string or the special name ":memory:" then a new database is created +in memory. +

      + +

      +Once an SQLite database is open, it can be controlled using +methods of the dbcmd. There are currently 22 methods +defined.

      + +

      +

        +} +foreach m [lsort { + authorizer + busy + cache + changes + close + collate + collation_needed + commit_hook + complete + copy + enable_load_extension + errorcode + eval + exists + function + last_insert_rowid + nullvalue + onecolumn + profile + progress + rollback_hook + timeout + total_changes + trace + transaction + update_hook + version +}] { + puts "
      • $m
      • " +} +puts { +
      +

      + +

      The use of each of these methods will be explained in the sequel, though +not in the order shown above.

      + +} + +############################################################################## +METHOD eval { +

      +The most useful dbcmd method is "eval". The eval method is used +to execute SQL on the database. The syntax of the eval method looks +like this:

      + +
      +dbcmd  eval  sql +    ?array-name ? ?script? +
      + +

      +The job of the eval method is to execute the SQL statement or statements +given in the second argument. For example, to create a new table in +a database, you can do this:

      + +
      +sqlite3 db1 ./testdb
      +db1 eval {CREATE TABLE t1(a int, b text)}
      +
      + +

      The above code creates a new table named t1 with columns +a and b. What could be simpler?

      + +

      Query results are returned as a list of column values. If a +query requests 2 columns and there are 3 rows matching the query, +then the returned list will contain 6 elements. For example:

      + +
      +db1 eval {INSERT INTO t1 VALUES(1,'hello')}
      +db1 eval {INSERT INTO t1 VALUES(2,'goodbye')}
      +db1 eval {INSERT INTO t1 VALUES(3,'howdy!')}
      +set x [db1 eval {SELECT * FROM t1 ORDER BY a}]
      +
      + +

      The variable $x is set by the above code to

      + +
      +1 hello 2 goodbye 3 howdy! +
      + +

      You can also process the results of a query one row at a time +by specifying the name of an array variable and a script following +the SQL code. For each row of the query result, the values of all +columns will be inserted into the array variable and the script will +be executed. For instance:

      + +
      +db1 eval {SELECT * FROM t1 ORDER BY a} values {
      +    parray values
      +    puts ""
      +}
      +
      + +

      This last code will give the following output:

      + +
      +values(*) = a b
      +values(a) = 1
      +values(b) = hello

      + +values(*) = a b
      +values(a) = 2
      +values(b) = goodbye

      + +values(*) = a b
      +values(a) = 3
      +values(b) = howdy!
      +

      + +

      +For each column in a row of the result, the name of that column +is used as an index in to array. The value of the column is stored +in the corresponding array entry. The special array index * is +used to store a list of column names in the order that they appear. +

      + +

      +If the array variable name is omitted or is the empty string, then the value of +each column is stored in a variable with the same name as the column +itself. For example: +

      + +
      +db1 eval {SELECT * FROM t1 ORDER BY a} {
      +    puts "a=$a b=$b"
      +}
      +
      + +

      +From this we get the following output +

      + +
      +a=1 b=hello
      +a=2 b=goodbye
      +a=3 b=howdy!
      +
      + +

      +Tcl variable names can appear in the SQL statement of the second argument +in any position where it is legal to put a string or number literal. The +value of the variable is substituted for the variable name. If the +variable does not exist a NULL values is used. For example: +

      + +
      +db1 eval {INSERT INTO t1 VALUES(5,$bigstring)} +
      + +

      +Note that it is not necessary to quote the $bigstring value. That happens +automatically. If $bigstring is a large string or binary object, this +technique is not only easier to write, it is also much more efficient +since it avoids making a copy of the content of $bigstring. +

      + +

      +If the $bigstring variable has both a string and a "bytearray" representation, +then TCL inserts the value as a string. If it has only a "bytearray" +representation, then the value is inserted as a BLOB. To force a +value to be inserted as a BLOB even if it also has a text representation, +us a "@" character to in place of the "$". Like this: +

      + +
      +db1 eval {INSERT INTO t1 VALUES(5,@bigstring)} +
      + +

      +If the variable does not have a bytearray representation, then "@" works +just like "$". +

      + +} + +############################################################################## +METHOD close { + +

      +As its name suggests, the "close" method to an SQLite database just +closes the database. This has the side-effect of deleting the +dbcmd Tcl command. Here is an example of opening and then +immediately closing a database: +

      + +
      +sqlite3 db1 ./testdb
      +db1 close
      +
      + +

      +If you delete the dbcmd directly, that has the same effect +as invoking the "close" method. So the following code is equivalent +to the previous:

      + +
      +sqlite3 db1 ./testdb
      +rename db1 {}
      +
      +} + +############################################################################## +METHOD transaction { + +

      +The "transaction" method is used to execute a TCL script inside an SQLite +database transaction. The transaction is committed when the script completes, +or it rolls back if the script fails. If the transaction occurs within +another transaction (even one that is started manually using BEGIN) it +is a no-op. +

      + +

      +The transaction command can be used to group together several SQLite +commands in a safe way. You can always start transactions manually using +BEGIN, of +course. But if an error occurs so that the COMMIT or ROLLBACK are never +run, then the database will remain locked indefinitely. Also, BEGIN +does not nest, so you have to make sure no other transactions are active +before starting a new one. The "transaction" method takes care of +all of these details automatically. +

      + +

      +The syntax looks like this: +

      + +
      +dbcmd  transaction  ?transaction-type? +  SCRIPT, +
      + + +

      +The transaction-type can be one of deferred, +exclusive or immediate. The default is deferred. +

      +} + +############################################################################## +METHOD cache { + +

      +The "eval" method described above keeps a cache of +prepared statements +for recently evaluated SQL commands. +The "cache" method is used to control this cache. +The first form of this command is:

      + +
      +dbcmd  cache size  N +
      + +

      This sets the maximum number of statements that can be cached. +The upper limit is 100. The default is 10. If you set the cache size +to 0, no caching is done.

      + +

      The second form of the command is this:

      + + +
      +dbcmd  cache flush +
      + +

      The cache-flush method +finalizes +all prepared statements currently +in the cache.

      + +} + +############################################################################## +METHOD complete { + +

      +The "complete" method takes a string of supposed SQL as its only argument. +It returns TRUE if the string is a complete statement of SQL and FALSE if +there is more to be entered.

      + +

      The "complete" method is useful when building interactive applications +in order to know when the user has finished entering a line of SQL code. +This is really just an interface to the +sqlite3_complete() C +function. +} + +############################################################################## +METHOD copy { + +

      +The "copy" method copies data from a file into a table. +It returns the number of rows processed successfully from the file. +The syntax of the copy method looks like this:

      + +
      +dbcmd  copy  conflict-algorithm +  table-name   file-name  +    ?column-separator ? +  ?null-indicator? +
      + +

      Conflict-alogrithm must be one of the SQLite conflict algorithms for +the INSERT statement: rollback, abort, +fail,ignore, or replace. See the SQLite Language +section for ON CONFLICT for more information. +The conflict-algorithm must be specified in lower case. +

      + +

      Table-name must already exists as a table. File-name must exist, and +each row must contain the same number of columns as defined in the table. +If a line in the file contains more or less than the number of columns defined, +the copy method rollbacks any inserts, and returns an error.

      + +

      Column-separator is an optional column separator string. The default is +the ASCII tab character \t.

      + +

      Null-indicator is an optional string that indicates a column value is null. +The default is an empty string. Note that column-separator and +null-indicator are optional positional arguments; if null-indicator +is specified, a column-separator argument must be specifed and +precede the null-indicator argument.

      + +

      The copy method implements similar functionality to the .import +SQLite shell command. +The SQLite 2.x COPY statement +(using the PostgreSQL COPY file format) +can be implemented with this method as:

      + +
      +dbcmd  copy  $conflictalgo +  $tablename   $filename  +    \t  +  \\N +
      + +} + +############################################################################## +METHOD timeout { + +

      The "timeout" method is used to control how long the SQLite library +will wait for locks to clear before giving up on a database transaction. +The default timeout is 0 millisecond. (In other words, the default behavior +is not to wait at all.)

      + +

      The SQLite database allows multiple simultaneous +readers or a single writer but not both. If any process is writing to +the database no other process is allows to read or write. If any process +is reading the database other processes are allowed to read but not write. +The entire database shared a single lock.

      + +

      When SQLite tries to open a database and finds that it is locked, it +can optionally delay for a short while and try to open the file again. +This process repeats until the query times out and SQLite returns a +failure. The timeout is adjustable. It is set to 0 by default so that +if the database is locked, the SQL statement fails immediately. But you +can use the "timeout" method to change the timeout value to a positive +number. For example:

      + +
      db1 timeout 2000
      + +

      The argument to the timeout method is the maximum number of milliseconds +to wait for the lock to clear. So in the example above, the maximum delay +would be 2 seconds.

      +} + +############################################################################## +METHOD busy { + +

      The "busy" method, like "timeout", only comes into play when the +database is locked. But the "busy" method gives the programmer much more +control over what action to take. The "busy" method specifies a callback +Tcl procedure that is invoked whenever SQLite tries to open a locked +database. This callback can do whatever is desired. Presumably, the +callback will do some other useful work for a short while (such as service +GUI events) then return +so that the lock can be tried again. The callback procedure should +return "0" if it wants SQLite to try again to open the database and +should return "1" if it wants SQLite to abandon the current operation. +} + +############################################################################## +METHOD exists { + +

      The "exists" method is similar to "onecolumn" and "eval" in that +it executes SQL statements. The difference is that the "exists" method +always returns a boolean value which is TRUE if a query in the SQL +statement it executes returns one or more rows and FALSE if the SQL +returns an empty set.

      + +

      The "exists" method is often used to test for the existance of +rows in a table. For example:

      + +
      +if {[db exists {SELECT 1 FROM table1 WHERE user=$user}]} {
      +   # Processing if $user exists
      +} else {
      +   # Processing if $user does not exist
      +} +
      +} + + +############################################################################## +METHOD last_insert_rowid { + +

      The "last_insert_rowid" method returns an integer which is the ROWID +of the most recently inserted database row.

      +} + +############################################################################## +METHOD function { + +

      The "function" method registers new SQL functions with the SQLite engine. +The arguments are the name of the new SQL function and a TCL command that +implements that function. Arguments to the function are appended to the +TCL command before it is invoked.

      + +

      +The following example creates a new SQL function named "hex" that converts +its numeric argument in to a hexadecimal encoded string: +

      + +
      +db function hex {format 0x%X} +
      + +} + +############################################################################## +METHOD nullvalue { + +

      +The "nullvalue" method changes the representation for NULL returned +as result of the "eval" method.

      + +
      +db1 nullvalue NULL +
      + +

      The "nullvalue" method is useful to differ between NULL and empty +column values as Tcl lacks a NULL representation. The default +representation for NULL values is an empty string.

      +} + + + +############################################################################## +METHOD onecolumn { + +

      The "onecolumn" method works like +"eval" in that it evaluates the +SQL query statement given as its argument. The difference is that +"onecolumn" returns a single element which is the first column of the +first row of the query result.

      + +

      This is a convenience method. It saves the user from having to +do a "[lindex ... 0]" on the results of an "eval" +in order to extract a single column result.

      +} + +############################################################################## +METHOD changes { + +

      The "changes" method returns an integer which is the number of rows +in the database that were inserted, deleted, and/or modified by the most +recent "eval" method.

      +} + +############################################################################## +METHOD total_changes { + +

      The "total_changes" method returns an integer which is the number of rows +in the database that were inserted, deleted, and/or modified since the +current database connection was first opened.

      +} + +############################################################################## +METHOD authorizer { + +

      The "authorizer" method provides access to the +sqlite3_set_authorizer +C/C++ interface. The argument to authorizer is the name of a procedure that +is called when SQL statements are being compiled in order to authorize +certain operations. The callback procedure takes 5 arguments which describe +the operation being coded. If the callback returns the text string +"SQLITE_OK", then the operation is allowed. If it returns "SQLITE_IGNORE", +then the operation is silently disabled. If the return is "SQLITE_DENY" +then the compilation fails with an error. +

      + +

      If the argument is an empty string then the authorizer is disabled. +If the argument is omitted, then the current authorizer is returned.

      +} + +############################################################################## +METHOD progress { + +

      This method registers a callback that is invoked periodically during +query processing. There are two arguments: the number of SQLite virtual +machine opcodes between invocations, and the TCL command to invoke. +Setting the progress callback to an empty string disables it.

      + +

      The progress callback can be used to display the status of a lengthy +query or to process GUI events during a lengthy query.

      +} + + +############################################################################## +METHOD collate { + +

      This method registers new text collating sequences. There are +two arguments: the name of the collating sequence and the name of a +TCL procedure that implements a comparison function for the collating +sequence. +

      + +

      For example, the following code implements a collating sequence called +"NOCASE" that sorts in text order without regard to case: +

      + +
      +proc nocase_compare {a b} {
      +    return [string compare [string tolower $a] [string tolower $b]]
      +}
      +db collate NOCASE nocase_compare
      +
      +} + +############################################################################## +METHOD collation_needed { + +

      This method registers a callback routine that is invoked when the SQLite +engine needs a particular collating sequence but does not have that +collating sequence registered. The callback can register the collating +sequence. The callback is invoked with a single parameter which is the +name of the needed collating sequence.

      +} + +############################################################################## +METHOD commit_hook { + +

      This method registers a callback routine that is invoked just before +SQLite tries to commit changes to a database. If the callback throws +an exception or returns a non-zero result, then the transaction rolls back +rather than commit.

      +} + +############################################################################## +METHOD rollback_hook { + +

      This method registers a callback routine that is invoked just before +SQLite tries to do a rollback. The script argument is run without change.

      +} + +############################################################################## +METHOD update_hook { + +

      This method registers a callback routine that is invoked just before +each row is modified by an UPDATE, INSERT, or DELETE statement. Four +arguments are appended to the callback before it is invoked:

      + +
        +
      • The keyword "INSERT", "UPDATE", or "DELETE", as appropriate
      • +
      • The name of the database which is being changed
      • +
      • The table that is being changed
      • +
      • The rowid of the row in the table being changed
      • +
      +} + +############################################################################## +METHOD incrblob { + +

      This method opens a TCL channel that can be used to read or write +into a preexisting BLOB in the database. The syntax is like this:

      + +
      +dbcmd  incrblob  ?-readonly?? +  ?DB?  TABLE  COLUMN  ROWID +
      + +

      +The command returns a new TCL channel for reading or writing to the BLOB. +The channel is opened using the underlying +sqlite3_blob_open() C-langauge +interface. Close the channel using the close command of TCL. +

      +} + +############################################################################## +METHOD errorcode { + +

      This method returns the numeric error code that resulted from the most +recent SQLite operation.

      +} + +############################################################################## +METHOD trace { + +

      The "trace" method registers a callback that is invoked as each SQL +statement is compiled. The text of the SQL is appended as a single string +to the command before it is invoked. This can be used (for example) to +keep a log of all SQL operations that an application performs. +

      +} +
      ADDED pages/vdbe.in Index: pages/vdbe.in ================================================================== --- /dev/null +++ pages/vdbe.in @@ -0,0 +1,1984 @@ +The Virtual Database Engine of SQLite + +puts { +

      The Virtual Database Engine of SQLite

      + +
      +This document describes the virtual machine used in SQLite version 2.8.0. +The virtual machine in SQLite version 3.0 and 3.1 is very similar in +concept but many of the opcodes have changed and the algorithms are +somewhat different. Use this document as a rough guide to the idea +behind the virtual machine in SQLite version 3, not as a reference on +how the virtual machine works. +
      +} + +puts { +

      If you want to know how the SQLite library works internally, +you need to begin with a solid understanding of the Virtual Database +Engine or VDBE. The VDBE occurs right in the middle of the +processing stream (see the architecture diagram) +and so it seems to touch most parts of the library. Even +parts of the code that do not directly interact with the VDBE +are usually in a supporting role. The VDBE really is the heart of +SQLite.

      + +

      This article is a brief introduction to how the VDBE +works and in particular how the various VDBE instructions +(documented here) work together +to do useful things with the database. The style is tutorial, +beginning with simple tasks and working toward solving more +complex problems. Along the way we will visit most +submodules in the SQLite library. After completeing this tutorial, +you should have a pretty good understanding of how SQLite works +and will be ready to begin studying the actual source code.

      + +

      Preliminaries

      + +

      The VDBE implements a virtual computer that runs a program in +its virtual machine language. The goal of each program is to +interrogate or change the database. Toward this end, the machine +language that the VDBE implements is specifically designed to +search, read, and modify databases.

      + +

      Each instruction of the VDBE language contains an opcode and +three operands labeled P1, P2, and P3. Operand P1 is an arbitrary +integer. P2 is a non-negative integer. P3 is a pointer to a data +structure or null-terminated string, possibly null. Only a few VDBE +instructions use all three operands. Many instructions use only +one or two operands. A significant number of instructions use +no operands at all but instead take their data and store their results +on the execution stack. The details of what each instruction +does and which operands it uses are described in the separate +opcode description document.

      + +

      A VDBE program begins +execution on instruction 0 and continues with successive instructions +until it either (1) encounters a fatal error, (2) executes a +Halt instruction, or (3) advances the program counter past the +last instruction of the program. When the VDBE completes execution, +all open database cursors are closed, all memory is freed, and +everything is popped from the stack. +So there are never any worries about memory leaks or +undeallocated resources.

      + +

      If you have done any assembly language programming or have +worked with any kind of abstract machine before, all of these +details should be familiar to you. So let's jump right in and +start looking as some code.

      + + +

      Inserting Records Into The Database

      + +

      We begin with a problem that can be solved using a VDBE program +that is only a few instructions long. Suppose we have an SQL +table that was created like this:

      + +
      +CREATE TABLE examp(one text, two int);
      +
      + +

      In words, we have a database table named "examp" that has two +columns of data named "one" and "two". Now suppose we want to insert a single +record into this table. Like this:

      + +
      +INSERT INTO examp VALUES('Hello, World!',99);
      +
      + +

      We can see the VDBE program that SQLite uses to implement this +INSERT using the sqlite command-line utility. First start +up sqlite on a new, empty database, then create the table. +Next change the output format of sqlite to a form that +is designed to work with VDBE program dumps by entering the +".explain" command. +Finally, enter the INSERT statement shown above, but precede the +INSERT with the special keyword "EXPLAIN". The EXPLAIN keyword +will cause sqlite to print the VDBE program rather than +execute it. We have:

      +} +proc Code {body} { + puts {
      } + regsub -all {&} [string trim $body] {\&} body + regsub -all {>} $body {\>} body + regsub -all {<} $body {\<} body + regsub -all {\(\(\(} $body {} body + regsub -all {\)\)\)} $body {} body + regsub -all { } $body {\ } body + regsub -all \n $body
      \n body + puts $body + puts {
      } +} + +Code { +$ (((sqlite test_database_1))) +sqlite> (((CREATE TABLE examp(one text, two int);))) +sqlite> (((.explain))) +sqlite> (((EXPLAIN INSERT INTO examp VALUES('Hello, World!',99);))) +addr opcode p1 p2 p3 +---- ------------ ----- ----- ----------------------------------- +0 Transaction 0 0 +1 VerifyCookie 0 81 +2 Transaction 1 0 +3 Integer 0 0 +4 OpenWrite 0 3 examp +5 NewRecno 0 0 +6 String 0 0 Hello, World! +7 Integer 99 0 99 +8 MakeRecord 2 0 +9 PutIntKey 0 1 +10 Close 0 0 +11 Commit 0 0 +12 Halt 0 0 +} + +puts {

      As you can see above, our simple insert statement is +implemented in 12 instructions. The first 3 and last 2 instructions are +a standard prologue and epilogue, so the real work is done in the middle +7 instructions. There are no jumps, so the program executes once through +from top to bottom. Let's now look at each instruction in detail.

      +} + +Code { +0 Transaction 0 0 +1 VerifyCookie 0 81 +2 Transaction 1 0 +} +puts { +

      The instruction Transaction +begins a transaction. The transaction ends when a Commit or Rollback +opcode is encountered. P1 is the index of the database file on which +the transaction is started. Index 0 is the main database file. A write +lock is obtained on the database file when a transaction is started. +No other process can read or write the file while the transaction is +underway. Starting a transaction also creates a rollback journal. A +transaction must be started before any changes can be made to the +database.

      + +

      The instruction VerifyCookie +checks cookie 0 (the database schema version) to make sure it is equal +to P2 (the value obtained when the database schema was last read). +P1 is the database number (0 for the main database). This is done to +make sure the database schema hasn't been changed by another thread, in +which case it has to be reread.

      + +

      The second Transaction +instruction begins a transaction and starts a rollback journal for +database 1, the database used for temporary tables.

      +} + +proc stack args { + puts "
      " + foreach elem $args { + puts "" + } + puts "
      $elem
      " +} + +Code { +3 Integer 0 0 +4 OpenWrite 0 3 examp +} +puts { +

      The instruction Integer pushes +the integer value P1 (0) onto the stack. Here 0 is the number of the +database to use in the following OpenWrite instruction. If P3 is not +NULL then it is a string representation of the same integer. Afterwards +the stack looks like this:

      +} +stack {(integer) 0} + +puts { +

      The instruction OpenWrite opens +a new read/write cursor with handle P1 (0 in this case) on table "examp", +whose root page is P2 (3, in this database file). Cursor handles can be +any non-negative integer. But the VDBE allocates cursors in an array +with the size of the array being one more than the largest cursor. So +to conserve memory, it is best to use handles beginning with zero and +working upward consecutively. Here P3 ("examp") is the name of the +table being opened, but this is unused, and only generated to make the +code easier to read. This instruction pops the database number to use +(0, the main database) from the top of the stack, so afterwards the +stack is empty again.

      +} + +Code { +5 NewRecno 0 0 +} +puts { +

      The instruction NewRecno creates +a new integer record number for the table pointed to by cursor P1. The +record number is one not currently used as a key in the table. The new +record number is pushed onto the stack. Afterwards the stack looks like +this:

      +} +stack {(integer) new record key} + +Code { +6 String 0 0 Hello, World! +} +puts { +

      The instruction String pushes its +P3 operand onto the stack. Afterwards the stack looks like this:

      +} +stack {(string) "Hello, World!"} \ + {(integer) new record key} + +Code { +7 Integer 99 0 99 +} +puts { +

      The instruction Integer pushes +its P1 operand (99) onto the stack. Afterwards the stack looks like +this:

      +} +stack {(integer) 99} \ + {(string) "Hello, World!"} \ + {(integer) new record key} + +Code { +8 MakeRecord 2 0 +} +puts { +

      The instruction MakeRecord pops +the top P1 elements off the stack (2 in this case) and converts them into +the binary format used for storing records in a database file. +(See the file format description for +details.) The new record generated by the MakeRecord instruction is +pushed back onto the stack. Afterwards the stack looks like this:

      + +} +stack {(record) "Hello, World!", 99} \ + {(integer) new record key} + +Code { +9 PutIntKey 0 1 +} +puts { +

      The instruction PutIntKey uses +the top 2 stack entries to write an entry into the table pointed to by +cursor P1. A new entry is created if it doesn't already exist or the +data for an existing entry is overwritten. The record data is the top +stack entry, and the key is the next entry down. The stack is popped +twice by this instruction. Because operand P2 is 1 the row change count +is incremented and the rowid is stored for subsequent return by the +sqlite_last_insert_rowid() function. If P2 is 0 the row change count is +unmodified. This instruction is where the insert actually occurs.

      +} + +Code { +10 Close 0 0 +} +puts { +

      The instruction Close closes a +cursor previously opened as P1 (0, the only open cursor). If P1 is not +currently open, this instruction is a no-op.

      +} + +Code { +11 Commit 0 0 +} +puts { +

      The instruction Commit causes all +modifications to the database that have been made since the last +Transaction to actually take effect. No additional modifications are +allowed until another transaction is started. The Commit instruction +deletes the journal file and releases the write lock on the database. +A read lock continues to be held if there are still cursors open.

      +} + +Code { +12 Halt 0 0 +} +puts { +

      The instruction Halt causes the VDBE +engine to exit immediately. All open cursors, Lists, Sorts, etc are +closed automatically. P1 is the result code returned by sqlite_exec(). +For a normal halt, this should be SQLITE_OK (0). For errors, it can be +some other value. The operand P2 is only used when there is an error. +There is an implied "Halt 0 0 0" instruction at the end of every +program, which the VDBE appends when it prepares a program to run.

      + + + +

      Tracing VDBE Program Execution

      + +

      If the SQLite library is compiled without the NDEBUG preprocessor +macro, then the PRAGMA vdbe_trace + causes the VDBE to trace the execution of programs. Though this +feature was originally intended for testing and debugging, it can also +be useful in learning about how the VDBE operates. +Use "PRAGMA vdbe_trace=ON;" to turn tracing on and +"PRAGMA vdbe_trace=OFF" to turn tracing back off. +Like this:

      +} + +Code { +sqlite> (((PRAGMA vdbe_trace=ON;))) + 0 Halt 0 0 +sqlite> (((INSERT INTO examp VALUES('Hello, World!',99);))) + 0 Transaction 0 0 + 1 VerifyCookie 0 81 + 2 Transaction 1 0 + 3 Integer 0 0 +Stack: i:0 + 4 OpenWrite 0 3 examp + 5 NewRecno 0 0 +Stack: i:2 + 6 String 0 0 Hello, World! +Stack: t[Hello,.World!] i:2 + 7 Integer 99 0 99 +Stack: si:99 t[Hello,.World!] i:2 + 8 MakeRecord 2 0 +Stack: s[...Hello,.World!.99] i:2 + 9 PutIntKey 0 1 + 10 Close 0 0 + 11 Commit 0 0 + 12 Halt 0 0 +} + +puts { +

      With tracing mode on, the VDBE prints each instruction prior +to executing it. After the instruction is executed, the top few +entries in the stack are displayed. The stack display is omitted +if the stack is empty.

      + +

      On the stack display, most entries are shown with a prefix +that tells the datatype of that stack entry. Integers begin +with "i:". Floating point values begin with "r:". +(The "r" stands for "real-number".) Strings begin with either +"s:", "t:", "e:" or "z:". +The difference among the string prefixes is caused by how their +memory is allocated. The z: strings are stored in memory obtained +from malloc(). The t: strings are statically allocated. +The e: strings are ephemeral. All other strings have the s: prefix. +This doesn't make any difference to you, +the observer, but it is vitally important to the VDBE since the +z: strings need to be passed to free() when they are +popped to avoid a memory leak. Note that only the first 10 +characters of string values are displayed and that binary +values (such as the result of the MakeRecord instruction) are +treated as strings. The only other datatype that can be stored +on the VDBE stack is a NULL, which is display without prefix +as simply "NULL". If an integer has been placed on the +stack as both an integer and a string, its prefix is "si:". + + + +

      Simple Queries

      + +

      At this point, you should understand the basics of how the VDBE +writes to a database. Now let's look at how it does queries. +We will use the following simple SELECT statement as our example:

      + +
      +SELECT * FROM examp;
      +
      + +

      The VDBE program generated for this SQL statement is as follows:

      +} + +Code { +sqlite> (((EXPLAIN SELECT * FROM examp;))) +addr opcode p1 p2 p3 +---- ------------ ----- ----- ----------------------------------- +0 ColumnName 0 0 one +1 ColumnName 1 0 two +2 Integer 0 0 +3 OpenRead 0 3 examp +4 VerifyCookie 0 81 +5 Rewind 0 10 +6 Column 0 0 +7 Column 0 1 +8 Callback 2 0 +9 Next 0 6 +10 Close 0 0 +11 Halt 0 0 +} + +puts { +

      Before we begin looking at this problem, let's briefly review +how queries work in SQLite so that we will know what we are trying +to accomplish. For each row in the result of a query, +SQLite will invoke a callback function with the following +prototype:

      + +
      +int Callback(void *pUserData, int nColumn, char *azData[], char *azColumnName[]);
      +
      + +

      The SQLite library supplies the VDBE with a pointer to the callback function +and the pUserData pointer. (Both the callback and the user data were +originally passed in as arguments to the sqlite_exec() API function.) +The job of the VDBE is to +come up with values for nColumn, azData[], +and azColumnName[]. +nColumn is the number of columns in the results, of course. +azColumnName[] is an array of strings where each string is the name +of one of the result columns. azData[] is an array of strings holding +the actual data.

      +} + +Code { +0 ColumnName 0 0 one +1 ColumnName 1 0 two +} +puts { +

      The first two instructions in the VDBE program for our query are +concerned with setting up values for azColumn. +The ColumnName instructions tell +the VDBE what values to fill in for each element of the azColumnName[] +array. Every query will begin with one ColumnName instruction for each +column in the result, and there will be a matching Column instruction for +each one later in the query. +

      +} + +Code { +2 Integer 0 0 +3 OpenRead 0 3 examp +4 VerifyCookie 0 81 +} +puts { +

      Instructions 2 and 3 open a read cursor on the database table that is +to be queried. This works the same as the OpenWrite instruction in the +INSERT example except that the cursor is opened for reading this time +instead of for writing. Instruction 4 verifies the database schema as +in the INSERT example.

      +} + +Code { +5 Rewind 0 10 +} +puts { +

      The Rewind instruction initializes +a loop that iterates over the "examp" table. It rewinds the cursor P1 +to the first entry in its table. This is required by the the Column and +Next instructions, which use the cursor to iterate through the table. +If the table is empty, then jump to P2 (10), which is the instruction just +past the loop. If the table is not empty, fall through to the following +instruction at 6, which is the beginning of the loop body.

      +} + +Code { +6 Column 0 0 +7 Column 0 1 +8 Callback 2 0 +} +puts { +

      The instructions 6 through 8 form the body of the loop that will +execute once for each record in the database file. + +The Column instructions at addresses 6 +and 7 each take the P2-th column from the P1-th cursor and push it onto +the stack. In this example, the first Column instruction is pushing the +value for the column "one" onto the stack and the second Column +instruction is pushing the value for column "two". + +The Callback instruction at address 8 +invokes the callback() function. The P1 operand to Callback becomes the +value for nColumn. The Callback instruction pops P1 values from +the stack and uses them to fill the azData[] array.

      +} + +Code { +9 Next 0 6 +} +puts { +

      The instruction at address 9 implements the branching part of the +loop. Together with the Rewind at address 5 it forms the loop logic. +This is a key concept that you should pay close attention to. +The Next instruction advances the cursor +P1 to the next record. If the cursor advance was successful, then jump +immediately to P2 (6, the beginning of the loop body). If the cursor +was at the end, then fall through to the following instruction, which +ends the loop.

      +} + +Code { +10 Close 0 0 +11 Halt 0 0 +} +puts { +

      The Close instruction at the end of the program closes the +cursor that points into the table "examp". It is not really necessary +to call Close here since all cursors will be automatically closed +by the VDBE when the program halts. But we needed an instruction +for the Rewind to jump to so we might as well go ahead and have that +instruction do something useful. +The Halt instruction ends the VDBE program.

      + +

      Note that the program for this SELECT query didn't contain the +Transaction and Commit instructions used in the INSERT example. Because +the SELECT is a read operation that doesn't alter the database, it +doesn't require a transaction.

      +} + + +puts { + +

      A Slightly More Complex Query

      + +

      The key points of the previous example were the use of the Callback +instruction to invoke the callback function, and the use of the Next +instruction to implement a loop over all records of the database file. +This example attempts to drive home those ideas by demonstrating a +slightly more complex query that involves more columns of +output, some of which are computed values, and a WHERE clause that +limits which records actually make it to the callback function. +Consider this query:

      + +
      +SELECT one, two, one || two AS 'both'
      +FROM examp
      +WHERE one LIKE 'H%'
      +
      + +

      This query is perhaps a bit contrived, but it does serve to +illustrate our points. The result will have three column with +names "one", "two", and "both". The first two columns are direct +copies of the two columns in the table and the third result +column is a string formed by concatenating the first and +second columns of the table. +Finally, the +WHERE clause says that we will only chose rows for the +results where the "one" column begins with an "H". +Here is what the VDBE program looks like for this query:

      +} + +Code { +addr opcode p1 p2 p3 +---- ------------ ----- ----- ----------------------------------- +0 ColumnName 0 0 one +1 ColumnName 1 0 two +2 ColumnName 2 0 both +3 Integer 0 0 +4 OpenRead 0 3 examp +5 VerifyCookie 0 81 +6 Rewind 0 18 +7 String 0 0 H% +8 Column 0 0 +9 Function 2 0 ptr(0x7f1ac0) +10 IfNot 1 17 +11 Column 0 0 +12 Column 0 1 +13 Column 0 0 +14 Column 0 1 +15 Concat 2 0 +16 Callback 3 0 +17 Next 0 7 +18 Close 0 0 +19 Halt 0 0 +} + +puts { +

      Except for the WHERE clause, the structure of the program for +this example is very much like the prior example, just with an +extra column. There are now 3 columns, instead of 2 as before, +and there are three ColumnName instructions. +A cursor is opened using the OpenRead instruction, just like in the +prior example. The Rewind instruction at address 6 and the +Next at address 17 form a loop over all records of the table. +The Close instruction at the end is there to give the +Rewind instruction something to jump to when it is done. All of +this is just like in the first query demonstration.

      + +

      The Callback instruction in this example has to generate +data for three result columns instead of two, but is otherwise +the same as in the first query. When the Callback instruction +is invoked, the left-most column of the result should be +the lowest in the stack and the right-most result column should +be the top of the stack. We can see the stack being set up +this way at addresses 11 through 15. The Column instructions at +11 and 12 push the values for the first two columns in the result. +The two Column instructions at 13 and 14 pull in the values needed +to compute the third result column and the Concat instruction at +15 joins them together into a single entry on the stack.

      + +

      The only thing that is really new about the current example +is the WHERE clause which is implemented by instructions at +addresses 7 through 10. Instructions at address 7 and 8 push +onto the stack the value of the "one" column from the table +and the literal string "H%". +The Function instruction at address 9 +pops these two values from the stack and pushes the result of the LIKE() +function back onto the stack. +The IfNot instruction pops the top stack +value and causes an immediate jump forward to the Next instruction if the +top value was false (not not like the literal string "H%"). +Taking this jump effectively skips the callback, which is the whole point +of the WHERE clause. If the result +of the comparison is true, the jump is not taken and control +falls through to the Callback instruction below.

      + +

      Notice how the LIKE operator is implemented. It is a user-defined +function in SQLite, so the address of its function definition is +specified in P3. The operand P1 is the number of function arguments for +it to take from the stack. In this case the LIKE() function takes 2 +arguments. The arguments are taken off the stack in reverse order +(right-to-left), so the pattern to match is the top stack element, and +the next element is the data to compare. The return value is pushed +onto the stack.

      + + + +

      A Template For SELECT Programs

      + +

      The first two query examples illustrate a kind of template that +every SELECT program will follow. Basically, we have:

      + +

      +

        +
      1. Initialize the azColumnName[] array for the callback.
      2. +
      3. Open a cursor into the table to be queried.
      4. +
      5. For each record in the table, do: +
          +
        1. If the WHERE clause evaluates to FALSE, then skip the steps that + follow and continue to the next record.
        2. +
        3. Compute all columns for the current row of the result.
        4. +
        5. Invoke the callback function for the current row of the result.
        6. +
        +
      6. Close the cursor.
      7. +
      +

      + +

      This template will be expanded considerably as we consider +additional complications such as joins, compound selects, using +indices to speed the search, sorting, and aggregate functions +with and without GROUP BY and HAVING clauses. +But the same basic ideas will continue to apply.

      + +

      UPDATE And DELETE Statements

      + +

      The UPDATE and DELETE statements are coded using a template +that is very similar to the SELECT statement template. The main +difference, of course, is that the end action is to modify the +database rather than invoke a callback function. Because it modifies +the database it will also use transactions. Let's begin +by looking at a DELETE statement:

      + +
      +DELETE FROM examp WHERE two<50;
      +
      + +

      This DELETE statement will remove every record from the "examp" +table where the "two" column is less than 50. +The code generated to do this is as follows:

      +} + +Code { +addr opcode p1 p2 p3 +---- ------------ ----- ----- ----------------------------------- +0 Transaction 1 0 +1 Transaction 0 0 +2 VerifyCookie 0 178 +3 Integer 0 0 +4 OpenRead 0 3 examp +5 Rewind 0 12 +6 Column 0 1 +7 Integer 50 0 50 +8 Ge 1 11 +9 Recno 0 0 +10 ListWrite 0 0 +11 Next 0 6 +12 Close 0 0 +13 ListRewind 0 0 +14 Integer 0 0 +15 OpenWrite 0 3 +16 ListRead 0 20 +17 NotExists 0 19 +18 Delete 0 1 +19 Goto 0 16 +20 ListReset 0 0 +21 Close 0 0 +22 Commit 0 0 +23 Halt 0 0 +} + +puts { +

      Here is what the program must do. First it has to locate all of +the records in the table "examp" that are to be deleted. This is +done using a loop very much like the loop used in the SELECT examples +above. Once all records have been located, then we can go back through +and delete them one by one. Note that we cannot delete each record +as soon as we find it. We have to locate all records first, then +go back and delete them. This is because the SQLite database +backend might change the scan order after a delete operation. +And if the scan +order changes in the middle of the scan, some records might be +visited more than once and other records might not be visited at all.

      + +

      So the implemention of DELETE is really in two loops. The first loop +(instructions 5 through 11) locates the records that are to be deleted +and saves their keys onto a temporary list, and the second loop +(instructions 16 through 19) uses the key list to delete the records one +by one.

      +} + + +Code { +0 Transaction 1 0 +1 Transaction 0 0 +2 VerifyCookie 0 178 +3 Integer 0 0 +4 OpenRead 0 3 examp +} +puts { +

      Instructions 0 though 4 are as in the INSERT example. They start +transactions for the main and temporary databases, verify the database +schema for the main database, and open a read cursor on the table +"examp". Notice that the cursor is opened for reading, not writing. At +this stage of the program we are only going to be scanning the table, +not changing it. We will reopen the same table for writing later, at +instruction 15.

      +} + +Code { +5 Rewind 0 12 +} +puts { +

      As in the SELECT example, the Rewind +instruction rewinds the cursor to the beginning of the table, readying +it for use in the loop body.

      +} + +Code { +6 Column 0 1 +7 Integer 50 0 50 +8 Ge 1 11 +} +puts { +

      The WHERE clause is implemented by instructions 6 through 8. +The job of the where clause is to skip the ListWrite if the WHERE +condition is false. To this end, it jumps ahead to the Next instruction +if the "two" column (extracted by the Column instruction) is +greater than or equal to 50.

      + +

      As before, the Column instruction uses cursor P1 and pushes the data +record in column P2 (1, column "two") onto the stack. The Integer +instruction pushes the value 50 onto the top of the stack. After these +two instructions the stack looks like:

      +} +stack {(integer) 50} \ + {(record) current record for column "two" } + +puts { +

      The Ge operator compares the top two +elements on the stack, pops them, and then branches based on the result +of the comparison. If the second element is >= the top element, then +jump to address P2 (the Next instruction at the end of the loop). +Because P1 is true, if either operand is NULL (and thus the result is +NULL) then take the jump. If we don't jump, just advance to the next +instruction.

      +} + +Code { +9 Recno 0 0 +10 ListWrite 0 0 +} +puts { +

      The Recno instruction pushes onto the +stack an integer which is the first 4 bytes of the the key to the current +entry in a sequential scan of the table pointed to by cursor P1. +The ListWrite instruction writes the +integer on the top of the stack into a temporary storage list and pops +the top element. This is the important work of this loop, to store the +keys of the records to be deleted so we can delete them in the second +loop. After this ListWrite instruction the stack is empty again.

      +} + +Code { +11 Next 0 6 +12 Close 0 0 +} +puts { +

      The Next instruction increments the cursor to point to the next +element in the table pointed to by cursor P0, and if it was successful +branches to P2 (6, the beginning of the loop body). The Close +instruction closes cursor P1. It doesn't affect the temporary storage +list because it isn't associated with cursor P1; it is instead a global +working list (which can be saved with ListPush).

      +} + +Code { +13 ListRewind 0 0 +} +puts { +

      The ListRewind instruction +rewinds the temporary storage list to the beginning. This prepares it +for use in the second loop.

      +} + +Code { +14 Integer 0 0 +15 OpenWrite 0 3 +} +puts { +

      As in the INSERT example, we push the database number P1 (0, the main +database) onto the stack and use OpenWrite to open the cursor P1 on table +P2 (base page 3, "examp") for modification.

      +} + +Code { +16 ListRead 0 20 +17 NotExists 0 19 +18 Delete 0 1 +19 Goto 0 16 +} +puts { +

      This loop does the actual deleting. It is organized differently from +the one in the UPDATE example. The ListRead instruction plays the role +that the Next did in the INSERT loop, but because it jumps to P2 on +failure, and Next jumps on success, we put it at the start of the loop +instead of the end. This means that we have to put a Goto at the end of +the loop to jump back to the the loop test at the beginning. So this +loop has the form of a C while(){...} loop, while the loop in the INSERT +example had the form of a do{...}while() loop. The Delete instruction +fills the role that the callback function did in the preceding examples. +

      +

      The ListRead instruction reads an +element from the temporary storage list and pushes it onto the stack. +If this was successful, it continues to the next instruction. If this +fails because the list is empty, it branches to P2, which is the +instruction just after the loop. Afterwards the stack looks like:

      +} +stack {(integer) key for current record} + +puts { +

      Notice the similarity between the ListRead and Next instructions. +Both operations work according to this rule: +

      +
      +Push the next "thing" onto the stack and fall through OR jump to P2, +depending on whether or not there is a next "thing" to push. +
      +

      One difference between Next and ListRead is their idea of a "thing". +The "things" for the Next instruction are records in a database file. +"Things" for ListRead are integer keys in a list. Another difference +is whether to jump or fall through if there is no next "thing". In this +case, Next falls through, and ListRead jumps. Later on, we will see +other looping instructions (NextIdx and SortNext) that operate using the +same principle.

      + +

      The NotExists instruction pops +the top stack element and uses it as an integer key. If a record with +that key does not exist in table P1, then jump to P2. If a record does +exist, then fall thru to the next instruction. In this case P2 takes +us to the Goto at the end of the loop, which jumps back to the ListRead +at the beginning. This could have been coded to have P2 be 16, the +ListRead at the start of the loop, but the SQLite parser which generated +this code didn't make that optimization.

      +

      The Delete does the work of this +loop; it pops an integer key off the stack (placed there by the +preceding ListRead) and deletes the record of cursor P1 that has that key. +Because P2 is true, the row change counter is incremented.

      +

      The Goto jumps back to the beginning +of the loop. This is the end of the loop.

      +} + +Code { +20 ListReset 0 0 +21 Close 0 0 +22 Commit 0 0 +23 Halt 0 0 +} +puts { +

      This block of instruction cleans up the VDBE program. Three of these +instructions aren't really required, but are generated by the SQLite +parser from its code templates, which are designed to handle more +complicated cases.

      +

      The ListReset instruction empties +the temporary storage list. This list is emptied automatically when the +VDBE program terminates, so it isn't necessary in this case. The Close +instruction closes the cursor P1. Again, this is done by the VDBE +engine when it is finished running this program. The Commit ends the +current transaction successfully, and causes all changes that occurred +in this transaction to be saved to the database. The final Halt is also +unneccessary, since it is added to every VDBE program when it is +prepared to run.

      + + +

      UPDATE statements work very much like DELETE statements except +that instead of deleting the record they replace it with a new one. +Consider this example: +

      + +
      +UPDATE examp SET one= '(' || one || ')' WHERE two < 50;
      +
      + +

      Instead of deleting records where the "two" column is less than +50, this statement just puts the "one" column in parentheses +The VDBE program to implement this statement follows:

      +} + +Code { +addr opcode p1 p2 p3 +---- ------------ ----- ----- ----------------------------------- +0 Transaction 1 0 +1 Transaction 0 0 +2 VerifyCookie 0 178 +3 Integer 0 0 +4 OpenRead 0 3 examp +5 Rewind 0 12 +6 Column 0 1 +7 Integer 50 0 50 +8 Ge 1 11 +9 Recno 0 0 +10 ListWrite 0 0 +11 Next 0 6 +12 Close 0 0 +13 Integer 0 0 +14 OpenWrite 0 3 +15 ListRewind 0 0 +16 ListRead 0 28 +17 Dup 0 0 +18 NotExists 0 16 +19 String 0 0 ( +20 Column 0 0 +21 Concat 2 0 +22 String 0 0 ) +23 Concat 2 0 +24 Column 0 1 +25 MakeRecord 2 0 +26 PutIntKey 0 1 +27 Goto 0 16 +28 ListReset 0 0 +29 Close 0 0 +30 Commit 0 0 +31 Halt 0 0 +} + +puts { +

      This program is essentially the same as the DELETE program except +that the body of the second loop has been replace by a sequence of +instructions (at addresses 17 through 26) that update the record rather +than delete it. Most of this instruction sequence should already be +familiar to you, but there are a couple of minor twists so we will go +over it briefly. Also note that the order of some of the instructions +before and after the 2nd loop has changed. This is just the way the +SQLite parser chose to output the code using a different template.

      + +

      As we enter the interior of the second loop (at instruction 17) +the stack contains a single integer which is the key of the +record we want to modify. We are going to need to use this +key twice: once to fetch the old value of the record and +a second time to write back the revised record. So the first instruction +is a Dup to make a duplicate of the key on the top of the stack. The +Dup instruction will duplicate any element of the stack, not just the top +element. You specify which element to duplication using the +P1 operand. When P1 is 0, the top of the stack is duplicated. +When P1 is 1, the next element down on the stack duplication. +And so forth.

      + +

      After duplicating the key, the next instruction, NotExists, +pops the stack once and uses the value popped as a key to +check the existence of a record in the database file. If there is no record +for this key, it jumps back to the ListRead to get another key.

      + +

      Instructions 19 through 25 construct a new database record +that will be used to replace the existing record. This is +the same kind of code that we saw +in the description of INSERT and will not be described further. +After instruction 25 executes, the stack looks like this:

      +} + +stack {(record) new data record} {(integer) key} + +puts { +

      The PutIntKey instruction (also described +during the discussion about INSERT) writes an entry into the +database file whose data is the top of the stack and whose key +is the next on the stack, and then pops the stack twice. The +PutIntKey instruction will overwrite the data of an existing record +with the same key, which is what we want here. Overwriting was not +an issue with INSERT because with INSERT the key was generated +by the NewRecno instruction which is guaranteed to provide a key +that has not been used before.

      +} + +if 0 {

      (By the way, since keys must +all be unique and each key is a 32-bit integer, a single +SQLite database table can have no more than 232 +rows. Actually, the Key instruction starts to become +very inefficient as you approach this upper bound, so it +is best to keep the number of entries below 231 +or so. Surely a couple billion records will be enough for +most applications!)

      +} + +puts { +

      CREATE and DROP

      + +

      Using CREATE or DROP to create or destroy a table or index is +really the same as doing an INSERT or DELETE from the special +"sqlite_master" table, at least from the point of view of the VDBE. +The sqlite_master table is a special table that is automatically +created for every SQLite database. It looks like this:

      + +
      +CREATE TABLE sqlite_master (
      +  type      TEXT,    -- either "table" or "index"
      +  name      TEXT,    -- name of this table or index
      +  tbl_name  TEXT,    -- for indices: name of associated table
      +  sql       TEXT     -- SQL text of the original CREATE statement
      +)
      +
      + +

      Every table (except the "sqlite_master" table itself) +and every named index in an SQLite database has an entry +in the sqlite_master table. You can query this table using +a SELECT statement just like any other table. But you are +not allowed to directly change the table using UPDATE, INSERT, +or DELETE. Changes to sqlite_master have to occur using +the CREATE and DROP commands because SQLite also has to update +some of its internal data structures when tables and indices +are added or destroyed.

      + +

      But from the point of view of the VDBE, a CREATE works +pretty much like an INSERT and a DROP works like a DELETE. +When the SQLite library opens to an existing database, +the first thing it does is a SELECT to read the "sql" +columns from all entries of the sqlite_master table. +The "sql" column contains the complete SQL text of the +CREATE statement that originally generated the index or +table. This text is fed back into the SQLite parser +and used to reconstruct the +internal data structures describing the index or table.

      + +

      Using Indexes To Speed Searching

      + +

      In the example queries above, every row of the table being +queried must be loaded off of the disk and examined, even if only +a small percentage of the rows end up in the result. This can +take a long time on a big table. To speed things up, SQLite +can use an index.

      + +

      An SQLite file associates a key with some data. For an SQLite +table, the database file is set up so that the key is an integer +and the data is the information for one row of the table. +Indices in SQLite reverse this arrangement. The index key +is (some of) the information being stored and the index data +is an integer. +To access a table row that has some particular +content, we first look up the content in the index table to find +its integer index, then we use that integer to look up the +complete record in the table.

      + +

      Note that SQLite uses b-trees, which are a sorted data structure, +so indices can be used when the WHERE clause of the SELECT statement +contains tests for equality or inequality. Queries like the following +can use an index if it is available:

      + +
      +SELECT * FROM examp WHERE two==50;
      +SELECT * FROM examp WHERE two<50;
      +SELECT * FROM examp WHERE two IN (50, 100);
      +
      + +

      If there exists an index that maps the "two" column of the "examp" +table into integers, then SQLite will use that index to find the integer +keys of all rows in examp that have a value of 50 for column two, or +all rows that are less than 50, etc. +But the following queries cannot use the index:

      + +
      +SELECT * FROM examp WHERE two%50 == 10;
      +SELECT * FROM examp WHERE two&127 == 3;
      +
      + +

      Note that the SQLite parser will not always generate code to use an +index, even if it is possible to do so. The following queries will not +currently use the index:

      + +
      +SELECT * FROM examp WHERE two+10 == 50;
      +SELECT * FROM examp WHERE two==50 OR two==100;
      +
      + +

      To understand better how indices work, lets first look at how +they are created. Let's go ahead and put an index on the two +column of the examp table. We have:

      + +
      +CREATE INDEX examp_idx1 ON examp(two);
      +
      + +

      The VDBE code generated by the above statement looks like the +following:

      +} + +Code { +addr opcode p1 p2 p3 +---- ------------ ----- ----- ----------------------------------- +0 Transaction 1 0 +1 Transaction 0 0 +2 VerifyCookie 0 178 +3 Integer 0 0 +4 OpenWrite 0 2 +5 NewRecno 0 0 +6 String 0 0 index +7 String 0 0 examp_idx1 +8 String 0 0 examp +9 CreateIndex 0 0 ptr(0x791380) +10 Dup 0 0 +11 Integer 0 0 +12 OpenWrite 1 0 +13 String 0 0 CREATE INDEX examp_idx1 ON examp(tw +14 MakeRecord 5 0 +15 PutIntKey 0 0 +16 Integer 0 0 +17 OpenRead 2 3 examp +18 Rewind 2 24 +19 Recno 2 0 +20 Column 2 1 +21 MakeIdxKey 1 0 n +22 IdxPut 1 0 indexed columns are not unique +23 Next 2 19 +24 Close 2 0 +25 Close 1 0 +26 Integer 333 0 +27 SetCookie 0 0 +28 Close 0 0 +29 Commit 0 0 +30 Halt 0 0 +} + +puts { +

      Remember that every table (except sqlite_master) and every named +index has an entry in the sqlite_master table. Since we are creating +a new index, we have to add a new entry to sqlite_master. This is +handled by instructions 3 through 15. Adding an entry to sqlite_master +works just like any other INSERT statement so we will not say anymore +about it here. In this example, we want to focus on populating the +new index with valid data, which happens on instructions 16 through +23.

      +} + +Code { +16 Integer 0 0 +17 OpenRead 2 3 examp +} +puts { +

      The first thing that happens is that we open the table being +indexed for reading. In order to construct an index for a table, +we have to know what is in that table. The index has already been +opened for writing using cursor 0 by instructions 3 and 4.

      +} + +Code { +18 Rewind 2 24 +19 Recno 2 0 +20 Column 2 1 +21 MakeIdxKey 1 0 n +22 IdxPut 1 0 indexed columns are not unique +23 Next 2 19 +} +puts { +

      Instructions 18 through 23 implement a loop over every row of the +table being indexed. For each table row, we first extract the integer +key for that row using Recno in instruction 19, then get the value of +the "two" column using Column in instruction 20. +The MakeIdxKey instruction at 21 +converts data from the "two" column (which is on the top of the stack) +into a valid index key. For an index on a single column, this is +basically a no-op. But if the P1 operand to MakeIdxKey had been +greater than one multiple entries would have been popped from the stack +and converted into a single index key. +The IdxPut instruction at 22 is what +actually creates the index entry. IdxPut pops two elements from the +stack. The top of the stack is used as a key to fetch an entry from the +index table. Then the integer which was second on stack is added to the +set of integers for that index and the new record is written back to the +database file. Note +that the same index entry can store multiple integers if there +are two or more table entries with the same value for the two +column. +

      + +

      Now let's look at how this index will be used. Consider the +following query:

      + +
      +SELECT * FROM examp WHERE two==50;
      +
      + +

      SQLite generates the following VDBE code to handle this query:

      +} + +Code { +addr opcode p1 p2 p3 +---- ------------ ----- ----- ----------------------------------- +0 ColumnName 0 0 one +1 ColumnName 1 0 two +2 Integer 0 0 +3 OpenRead 0 3 examp +4 VerifyCookie 0 256 +5 Integer 0 0 +6 OpenRead 1 4 examp_idx1 +7 Integer 50 0 50 +8 MakeKey 1 0 n +9 MemStore 0 0 +10 MoveTo 1 19 +11 MemLoad 0 0 +12 IdxGT 1 19 +13 IdxRecno 1 0 +14 MoveTo 0 0 +15 Column 0 0 +16 Column 0 1 +17 Callback 2 0 +18 Next 1 11 +19 Close 0 0 +20 Close 1 0 +21 Halt 0 0 +} + +puts { +

      The SELECT begins in a familiar fashion. First the column +names are initialized and the table being queried is opened. +Things become different beginning with instructions 5 and 6 where +the index file is also opened. Instructions 7 and 8 make +a key with the value of 50. +The MemStore instruction at 9 stores +the index key in VDBE memory location 0. The VDBE memory is used to +avoid having to fetch a value from deep in the stack, which can be done, +but makes the program harder to generate. The following instruction +MoveTo at address 10 pops the key off +the stack and moves the index cursor to the first row of the index with +that key. This initializes the cursor for use in the following loop.

      + +

      Instructions 11 through 18 implement a loop over all index records +with the key that was fetched by instruction 8. All of the index +records with this key will be contiguous in the index table, so we walk +through them and fetch the corresponding table key from the index. +This table key is then used to move the cursor to that row in the table. +The rest of the loop is the same as the loop for the non-indexed SELECT +query.

      + +

      The loop begins with the MemLoad +instruction at 11 which pushes a copy of the index key back onto the +stack. The instruction IdxGT at 12 +compares the key to the key in the current index record pointed to by +cursor P1. If the index key at the current cursor location is greater +than the the index we are looking for, then jump out of the loop.

      + +

      The instruction IdxRecno at 13 +pushes onto the stack the table record number from the index. The +following MoveTo pops it and moves the table cursor to that row. The +next 3 instructions select the column data the same way as in the non- +indexed case. The Column instructions fetch the column data and the +callback function is invoked. The final Next instruction advances the +index cursor, not the table cursor, to the next row, and then branches +back to the start of the loop if there are any index records left.

      + +

      Since the index is used to look up values in the table, +it is important that the index and table be kept consistent. +Now that there is an index on the examp table, we will have +to update that index whenever data is inserted, deleted, or +changed in the examp table. Remember the first example above +where we were able to insert a new row into the "examp" table using +12 VDBE instructions. Now that this table is indexed, 19 +instructions are required. The SQL statement is this:

      + +
      +INSERT INTO examp VALUES('Hello, World!',99);
      +
      + +

      And the generated code looks like this:

      +} + +Code { +addr opcode p1 p2 p3 +---- ------------ ----- ----- ----------------------------------- +0 Transaction 1 0 +1 Transaction 0 0 +2 VerifyCookie 0 256 +3 Integer 0 0 +4 OpenWrite 0 3 examp +5 Integer 0 0 +6 OpenWrite 1 4 examp_idx1 +7 NewRecno 0 0 +8 String 0 0 Hello, World! +9 Integer 99 0 99 +10 Dup 2 1 +11 Dup 1 1 +12 MakeIdxKey 1 0 n +13 IdxPut 1 0 +14 MakeRecord 2 0 +15 PutIntKey 0 1 +16 Close 0 0 +17 Close 1 0 +18 Commit 0 0 +19 Halt 0 0 +} + +puts { +

      At this point, you should understand the VDBE well enough to +figure out on your own how the above program works. So we will +not discuss it further in this text.

      + +

      Joins

      + +

      In a join, two or more tables are combined to generate a single +result. The result table consists of every possible combination +of rows from the tables being joined. The easiest and most natural +way to implement this is with nested loops.

      + +

      Recall the query template discussed above where there was a +single loop that searched through every record of the table. +In a join we have basically the same thing except that there +are nested loops. For example, to join two tables, the query +template might look something like this:

      + +

      +

        +
      1. Initialize the azColumnName[] array for the callback.
      2. +
      3. Open two cursors, one to each of the two tables being queried.
      4. +
      5. For each record in the first table, do: +
          +
        1. For each record in the second table do: +
            +
          1. If the WHERE clause evaluates to FALSE, then skip the steps that + follow and continue to the next record.
          2. +
          3. Compute all columns for the current row of the result.
          4. +
          5. Invoke the callback function for the current row of the result.
          6. +
        2. +
        +
      6. Close both cursors.
      7. +
      +

      + +

      This template will work, but it is likely to be slow since we +are now dealing with an O(N2) loop. But it often works +out that the WHERE clause can be factored into terms and that one or +more of those terms will involve only columns in the first table. +When this happens, we can factor part of the WHERE clause test out of +the inner loop and gain a lot of efficiency. So a better template +would be something like this:

      + +

      +

        +
      1. Initialize the azColumnName[] array for the callback.
      2. +
      3. Open two cursors, one to each of the two tables being queried.
      4. +
      5. For each record in the first table, do: +
          +
        1. Evaluate terms of the WHERE clause that only involve columns from + the first table. If any term is false (meaning that the whole + WHERE clause must be false) then skip the rest of this loop and + continue to the next record.
        2. +
        3. For each record in the second table do: +
            +
          1. If the WHERE clause evaluates to FALSE, then skip the steps that + follow and continue to the next record.
          2. +
          3. Compute all columns for the current row of the result.
          4. +
          5. Invoke the callback function for the current row of the result.
          6. +
        4. +
        +
      6. Close both cursors.
      7. +
      +

      + +

      Additional speed-up can occur if an index can be used to speed +the search of either or the two loops.

      + +

      SQLite always constructs the loops in the same order as the +tables appear in the FROM clause of the SELECT statement. The +left-most table becomes the outer loop and the right-most table +becomes the inner loop. It is possible, in theory, to reorder +the loops in some circumstances to speed the evaluation of the +join. But SQLite does not attempt this optimization.

      + +

      You can see how SQLite constructs nested loops in the following +example:

      + +
      +CREATE TABLE examp2(three int, four int);
      +SELECT * FROM examp, examp2 WHERE two<50 AND four==two;
      +
      +} + +Code { +addr opcode p1 p2 p3 +---- ------------ ----- ----- ----------------------------------- +0 ColumnName 0 0 examp.one +1 ColumnName 1 0 examp.two +2 ColumnName 2 0 examp2.three +3 ColumnName 3 0 examp2.four +4 Integer 0 0 +5 OpenRead 0 3 examp +6 VerifyCookie 0 909 +7 Integer 0 0 +8 OpenRead 1 5 examp2 +9 Rewind 0 24 +10 Column 0 1 +11 Integer 50 0 50 +12 Ge 1 23 +13 Rewind 1 23 +14 Column 1 1 +15 Column 0 1 +16 Ne 1 22 +17 Column 0 0 +18 Column 0 1 +19 Column 1 0 +20 Column 1 1 +21 Callback 4 0 +22 Next 1 14 +23 Next 0 10 +24 Close 0 0 +25 Close 1 0 +26 Halt 0 0 +} + +puts { +

      The outer loop over table examp is implement by instructions +7 through 23. The inner loop is instructions 13 through 22. +Notice that the "two<50" term of the WHERE expression involves +only columns from the first table and can be factored out of +the inner loop. SQLite does this and implements the "two<50" +test in instructions 10 through 12. The "four==two" test is +implement by instructions 14 through 16 in the inner loop.

      + +

      SQLite does not impose any arbitrary limits on the tables in +a join. It also allows a table to be joined with itself.

      + +

      The ORDER BY clause

      + +

      For historical reasons, and for efficiency, all sorting is currently +done in memory.

      + +

      SQLite implements the ORDER BY clause using a special +set of instructions to control an object called a sorter. In the +inner-most loop of the query, where there would normally be +a Callback instruction, instead a record is constructed that +contains both callback parameters and a key. This record +is added to the sorter (in a linked list). After the query loop +finishes, the list of records is sorted and this list is walked. For +each record on the list, the callback is invoked. Finally, the sorter +is closed and memory is deallocated.

      + +

      We can see the process in action in the following query:

      + +
      +SELECT * FROM examp ORDER BY one DESC, two;
      +
      +} + +Code { +addr opcode p1 p2 p3 +---- ------------ ----- ----- ----------------------------------- +0 ColumnName 0 0 one +1 ColumnName 1 0 two +2 Integer 0 0 +3 OpenRead 0 3 examp +4 VerifyCookie 0 909 +5 Rewind 0 14 +6 Column 0 0 +7 Column 0 1 +8 SortMakeRec 2 0 +9 Column 0 0 +10 Column 0 1 +11 SortMakeKey 2 0 D+ +12 SortPut 0 0 +13 Next 0 6 +14 Close 0 0 +15 Sort 0 0 +16 SortNext 0 19 +17 SortCallback 2 0 +18 Goto 0 16 +19 SortReset 0 0 +20 Halt 0 0 +} + +puts { +

      There is only one sorter object, so there are no instructions to open +or close it. It is opened automatically when needed, and it is closed +when the VDBE program halts.

      + +

      The query loop is built from instructions 5 through 13. Instructions +6 through 8 build a record that contains the azData[] values for a single +invocation of the callback. A sort key is generated by instructions +9 through 11. Instruction 12 combines the invocation record and the +sort key into a single entry and puts that entry on the sort list.

      + +

      The P3 argument of instruction 11 is of particular interest. The +sort key is formed by prepending one character from P3 to each string +and concatenating all the strings. The sort comparison function will +look at this character to determine whether the sort order is +ascending or descending, and whether to sort as a string or number. +In this example, the first column should be sorted as a string +in descending order so its prefix is "D" and the second column should +sorted numerically in ascending order so its prefix is "+". Ascending +string sorting uses "A", and descending numeric sorting uses "-".

      + +

      After the query loop ends, the table being queried is closed at +instruction 14. This is done early in order to allow other processes +or threads to access that table, if desired. The list of records +that was built up inside the query loop is sorted by the instruction +at 15. Instructions 16 through 18 walk through the record list +(which is now in sorted order) and invoke the callback once for +each record. Finally, the sorter is closed at instruction 19.

      + +

      Aggregate Functions And The GROUP BY and HAVING Clauses

      + +

      To compute aggregate functions, the VDBE implements a special +data structure and instructions for controlling that data structure. +The data structure is an unordered set of buckets, where each bucket +has a key and one or more memory locations. Within the query +loop, the GROUP BY clause is used to construct a key and the bucket +with that key is brought into focus. A new bucket is created with +the key if one did not previously exist. Once the bucket is in +focus, the memory locations of the bucket are used to accumulate +the values of the various aggregate functions. After the query +loop terminates, each bucket is visited once to generate a +single row of the results.

      + +

      An example will help to clarify this concept. Consider the +following query:

      + +
      +SELECT three, min(three+four)+avg(four) 
      +FROM examp2
      +GROUP BY three;
      +
      + + +

      The VDBE code generated for this query is as follows:

      +} + +Code { +addr opcode p1 p2 p3 +---- ------------ ----- ----- ----------------------------------- +0 ColumnName 0 0 three +1 ColumnName 1 0 min(three+four)+avg(four) +2 AggReset 0 3 +3 AggInit 0 1 ptr(0x7903a0) +4 AggInit 0 2 ptr(0x790700) +5 Integer 0 0 +6 OpenRead 0 5 examp2 +7 VerifyCookie 0 909 +8 Rewind 0 23 +9 Column 0 0 +10 MakeKey 1 0 n +11 AggFocus 0 14 +12 Column 0 0 +13 AggSet 0 0 +14 Column 0 0 +15 Column 0 1 +16 Add 0 0 +17 Integer 1 0 +18 AggFunc 0 1 ptr(0x7903a0) +19 Column 0 1 +20 Integer 2 0 +21 AggFunc 0 1 ptr(0x790700) +22 Next 0 9 +23 Close 0 0 +24 AggNext 0 31 +25 AggGet 0 0 +26 AggGet 0 1 +27 AggGet 0 2 +28 Add 0 0 +29 Callback 2 0 +30 Goto 0 24 +31 Noop 0 0 +32 Halt 0 0 +} + +puts { +

      The first instruction of interest is the +AggReset at 2. +The AggReset instruction initializes the set of buckets to be the +empty set and specifies the number of memory slots available in each +bucket as P2. In this example, each bucket will hold 3 memory slots. +It is not obvious, but if you look closely at the rest of the program +you can figure out what each of these slots is intended for.

      + +
      + + + + +
      Memory SlotIntended Use Of This Memory Slot
      0The "three" column -- the key to the bucket
      1The minimum "three+four" value
      2The sum of all "four" values. This is used to compute + "avg(four)".
      + +

      The query loop is implemented by instructions 8 through 22. +The aggregate key specified by the GROUP BY clause is computed +by instructions 9 and 10. Instruction 11 causes the appropriate +bucket to come into focus. If a bucket with the given key does +not already exists, a new bucket is created and control falls +through to instructions 12 and 13 which initialize the bucket. +If the bucket does already exist, then a jump is made to instruction +14. The values of aggregate functions are updated by the instructions +between 11 and 21. Instructions 14 through 18 update memory +slot 1 to hold the next value "min(three+four)". Then the sum of the +"four" column is updated by instructions 19 through 21.

      + +

      After the query loop is finished, the table "examp2" is closed at +instruction 23 so that its lock will be released and it can be +used by other threads or processes. The next step is to loop +over all aggregate buckets and output one row of the result for +each bucket. This is done by the loop at instructions 24 +through 30. The AggNext instruction at 24 brings the next bucket +into focus, or jumps to the end of the loop if all buckets have +been examined already. The 3 columns of the result are fetched from +the aggregator bucket in order at instructions 25 through 27. +Finally, the callback is invoked at instruction 29.

      + +

      In summary then, any query with aggregate functions is implemented +by two loops. The first loop scans the input table and computes +aggregate information into buckets and the second loop scans through +all the buckets to compute the final result.

      + +

      The realization that an aggregate query is really two consequtive +loops makes it much easier to understand the difference between +a WHERE clause and a HAVING clause in SQL query statement. The +WHERE clause is a restriction on the first loop and the HAVING +clause is a restriction on the second loop. You can see this +by adding both a WHERE and a HAVING clause to our example query:

      + + +
      +SELECT three, min(three+four)+avg(four) 
      +FROM examp2
      +WHERE three>four
      +GROUP BY three
      +HAVING avg(four)<10;
      +
      +} + +Code { +addr opcode p1 p2 p3 +---- ------------ ----- ----- ----------------------------------- +0 ColumnName 0 0 three +1 ColumnName 1 0 min(three+four)+avg(four) +2 AggReset 0 3 +3 AggInit 0 1 ptr(0x7903a0) +4 AggInit 0 2 ptr(0x790700) +5 Integer 0 0 +6 OpenRead 0 5 examp2 +7 VerifyCookie 0 909 +8 Rewind 0 26 +9 Column 0 0 +10 Column 0 1 +11 Le 1 25 +12 Column 0 0 +13 MakeKey 1 0 n +14 AggFocus 0 17 +15 Column 0 0 +16 AggSet 0 0 +17 Column 0 0 +18 Column 0 1 +19 Add 0 0 +20 Integer 1 0 +21 AggFunc 0 1 ptr(0x7903a0) +22 Column 0 1 +23 Integer 2 0 +24 AggFunc 0 1 ptr(0x790700) +25 Next 0 9 +26 Close 0 0 +27 AggNext 0 37 +28 AggGet 0 2 +29 Integer 10 0 10 +30 Ge 1 27 +31 AggGet 0 0 +32 AggGet 0 1 +33 AggGet 0 2 +34 Add 0 0 +35 Callback 2 0 +36 Goto 0 27 +37 Noop 0 0 +38 Halt 0 0 +} + +puts { +

      The code generated in this last example is the same as the +previous except for the addition of two conditional jumps used +to implement the extra WHERE and HAVING clauses. The WHERE +clause is implemented by instructions 9 through 11 in the query +loop. The HAVING clause is implemented by instruction 28 through +30 in the output loop.

      + +

      Using SELECT Statements As Terms In An Expression

      + +

      The very name "Structured Query Language" tells us that SQL should +support nested queries. And, in fact, two different kinds of nesting +are supported. Any SELECT statement that returns a single-row, single-column +result can be used as a term in an expression of another SELECT statement. +And, a SELECT statement that returns a single-column, multi-row result +can be used as the right-hand operand of the IN and NOT IN operators. +We will begin this section with an example of the first kind of nesting, +where a single-row, single-column SELECT is used as a term in an expression +of another SELECT. Here is our example:

      + +
      +SELECT * FROM examp
      +WHERE two!=(SELECT three FROM examp2
      +            WHERE four=5);
      +
      + +

      The way SQLite deals with this is to first run the inner SELECT +(the one against examp2) and store its result in a private memory +cell. SQLite then substitutes the value of this private memory +cell for the inner SELECT when it evaluates the outer SELECT. +The code looks like this:

      +} + +Code { +addr opcode p1 p2 p3 +---- ------------ ----- ----- ----------------------------------- +0 String 0 0 +1 MemStore 0 1 +2 Integer 0 0 +3 OpenRead 1 5 examp2 +4 VerifyCookie 0 909 +5 Rewind 1 13 +6 Column 1 1 +7 Integer 5 0 5 +8 Ne 1 12 +9 Column 1 0 +10 MemStore 0 1 +11 Goto 0 13 +12 Next 1 6 +13 Close 1 0 +14 ColumnName 0 0 one +15 ColumnName 1 0 two +16 Integer 0 0 +17 OpenRead 0 3 examp +18 Rewind 0 26 +19 Column 0 1 +20 MemLoad 0 0 +21 Eq 1 25 +22 Column 0 0 +23 Column 0 1 +24 Callback 2 0 +25 Next 0 19 +26 Close 0 0 +27 Halt 0 0 +} + +puts { +

      The private memory cell is initialized to NULL by the first +two instructions. Instructions 2 through 13 implement the inner +SELECT statement against the examp2 table. Notice that instead of +sending the result to a callback or storing the result on a sorter, +the result of the query is pushed into the memory cell by instruction +10 and the loop is abandoned by the jump at instruction 11. +The jump at instruction at 11 is vestigial and never executes.

      + +

      The outer SELECT is implemented by instructions 14 through 25. +In particular, the WHERE clause that contains the nested select +is implemented by instructions 19 through 21. You can see that +the result of the inner select is loaded onto the stack by instruction +20 and used by the conditional jump at 21.

      + +

      When the result of a sub-select is a scalar, a single private memory +cell can be used, as shown in the previous +example. But when the result of a sub-select is a vector, such +as when the sub-select is the right-hand operand of IN or NOT IN, +a different approach is needed. In this case, +the result of the sub-select is +stored in a transient table and the contents of that table +are tested using the Found or NotFound operators. Consider this +example:

      + +
      +SELECT * FROM examp
      +WHERE two IN (SELECT three FROM examp2);
      +
      + +

      The code generated to implement this last query is as follows:

      +} + +Code { +addr opcode p1 p2 p3 +---- ------------ ----- ----- ----------------------------------- +0 OpenTemp 1 1 +1 Integer 0 0 +2 OpenRead 2 5 examp2 +3 VerifyCookie 0 909 +4 Rewind 2 10 +5 Column 2 0 +6 IsNull -1 9 +7 String 0 0 +8 PutStrKey 1 0 +9 Next 2 5 +10 Close 2 0 +11 ColumnName 0 0 one +12 ColumnName 1 0 two +13 Integer 0 0 +14 OpenRead 0 3 examp +15 Rewind 0 25 +16 Column 0 1 +17 NotNull -1 20 +18 Pop 1 0 +19 Goto 0 24 +20 NotFound 1 24 +21 Column 0 0 +22 Column 0 1 +23 Callback 2 0 +24 Next 0 16 +25 Close 0 0 +26 Halt 0 0 +} + +puts { +

      The transient table in which the results of the inner SELECT are +stored is created by the OpenTemp +instruction at 0. This opcode is used for tables that exist for the +duration of a single SQL statement only. The transient cursor is always +opened read/write even if the main database is read-only. The transient +table is deleted automatically when the cursor is closed. The P2 value +of 1 means the cursor points to a BTree index, which has no data but can +have an arbitrary key.

      + +

      The inner SELECT statement is implemented by instructions 1 through 10. +All this code does is make an entry in the temporary table for each +row of the examp2 table with a non-NULL value for the "three" column. +The key for each temporary table entry is the "three" column of examp2 +and the data is an empty string since it is never used.

      + +

      The outer SELECT is implemented by instructions 11 through 25. In +particular, the WHERE clause containing the IN operator is implemented +by instructions at 16, 17, and 20. Instruction 16 pushes the value of +the "two" column for the current row onto the stack and instruction 17 +checks to see that it is non-NULL. If this is successful, execution +jumps to 20, where it tests to see if top of the stack matches any key +in the temporary table. The rest of the code is the same as what has +been shown before.

      + +

      Compound SELECT Statements

      + +

      SQLite also allows two or more SELECT statements to be joined as +peers using operators UNION, UNION ALL, INTERSECT, and EXCEPT. These +compound select statements are implemented using transient tables. +The implementation is slightly different for each operator, but the +basic ideas are the same. For an example we will use the EXCEPT +operator.

      + +
      +SELECT two FROM examp
      +EXCEPT
      +SELECT four FROM examp2;
      +
      + +

      The result of this last example should be every unique value +of the "two" column in the examp table, except any value that is +in the "four" column of examp2 is removed. The code to implement +this query is as follows:

      +} + +Code { +addr opcode p1 p2 p3 +---- ------------ ----- ----- ----------------------------------- +0 OpenTemp 0 1 +1 KeyAsData 0 1 +2 Integer 0 0 +3 OpenRead 1 3 examp +4 VerifyCookie 0 909 +5 Rewind 1 11 +6 Column 1 1 +7 MakeRecord 1 0 +8 String 0 0 +9 PutStrKey 0 0 +10 Next 1 6 +11 Close 1 0 +12 Integer 0 0 +13 OpenRead 2 5 examp2 +14 Rewind 2 20 +15 Column 2 1 +16 MakeRecord 1 0 +17 NotFound 0 19 +18 Delete 0 0 +19 Next 2 15 +20 Close 2 0 +21 ColumnName 0 0 four +22 Rewind 0 26 +23 Column 0 0 +24 Callback 1 0 +25 Next 0 23 +26 Close 0 0 +27 Halt 0 0 +} + +puts { +

      The transient table in which the result is built is created by +instruction 0. Three loops then follow. The loop at instructions +5 through 10 implements the first SELECT statement. The second +SELECT statement is implemented by the loop at instructions 14 through +19. Finally, a loop at instructions 22 through 25 reads the transient +table and invokes the callback once for each row in the result.

      + +

      Instruction 1 is of particular importance in this example. Normally, +the Column instruction extracts the value of a column from a larger +record in the data of an SQLite file entry. Instruction 1 sets a flag on +the transient table so that Column will instead treat the key of the +SQLite file entry as if it were data and extract column information from +the key.

      + +

      Here is what is going to happen: The first SELECT statement +will construct rows of the result and save each row as the key of +an entry in the transient table. The data for each entry in the +transient table is a never used so we fill it in with an empty string. +The second SELECT statement also constructs rows, but the rows +constructed by the second SELECT are removed from the transient table. +That is why we want the rows to be stored in the key of the SQLite file +instead of in the data -- so they can be easily located and deleted.

      + +

      Let's look more closely at what is happening here. The first +SELECT is implemented by the loop at instructions 5 through 10. +Instruction 5 intializes the loop by rewinding its cursor. +Instruction 6 extracts the value of the "two" column from "examp" +and instruction 7 converts this into a row. Instruction 8 pushes +an empty string onto the stack. Finally, instruction 9 writes the +row into the temporary table. But remember, the PutStrKey opcode uses +the top of the stack as the record data and the next on stack as the +key. For an INSERT statement, the row generated by the +MakeRecord opcode is the record data and the record key is an integer +created by the NewRecno opcode. But here the roles are reversed and +the row created by MakeRecord is the record key and the record data is +just an empty string.

      + +

      The second SELECT is implemented by instructions 14 through 19. +Instruction 14 intializes the loop by rewinding its cursor. +A new result row is created from the "four" column of table "examp2" +by instructions 15 and 16. But instead of using PutStrKey to write this +new row into the temporary table, we instead call Delete to remove +it from the temporary table if it exists.

      + +

      The result of the compound select is sent to the callback routine +by the loop at instructions 22 through 25. There is nothing new +or remarkable about this loop, except for the fact that the Column +instruction at 23 will be extracting a column out of the record key +rather than the record data.

      + +

      Summary

      + +

      This article has reviewed all of the major techniques used by +SQLite's VDBE to implement SQL statements. What has not been shown +is that most of these techniques can be used in combination to +generate code for an appropriately complex query statement. For +example, we have shown how sorting is accomplished on a simple query +and we have shown how to implement a compound query. But we did +not give an example of sorting in a compound query. This is because +sorting a compound query does not introduce any new concepts: it +merely combines two previous ideas (sorting and compounding) +in the same VDBE program.

      + +

      For additional information on how the SQLite library +functions, the reader is directed to look at the SQLite source +code directly. If you understand the material in this article, +you should not have much difficulty in following the sources. +Serious students of the internals of SQLite will probably +also what to make a careful study of the VDBE opcodes +as documented here. Most of the +opcode documentation is extracted from comments in the source +code using a script so you can also get information about the +various opcodes directly from the vdbe.c source file. +If you have successfully read this far, you should have little +difficulty understanding the rest.

      + +

      If you find errors in either the documentation or the code, +feel free to fix them and/or contact the author at +drh@hwaci.com. Your bug fixes or +suggestions are always welcomed.

      +} +
      ADDED pages/version3.in Index: pages/version3.in ================================================================== --- /dev/null +++ pages/version3.in @@ -0,0 +1,287 @@ +SQLite Version 3 Overview + +

      SQLite Version 3 Overview

      + +

      +SQLite version 3.0 introduces important changes to the library, including: +

      + +
        +
      • A more compact format for database files.
      • +
      • Manifest typing and BLOB support.
      • +
      • Support for both UTF-8 and UTF-16 text.
      • +
      • User-defined text collating sequences.
      • +
      • 64-bit ROWIDs.
      • +
      • Improved Concurrency.
      • +
      + +

      +This document is a quick introduction to the changes for SQLite 3.0 +for users who are already familiar with SQLite version 2.8. +

      + +

      Naming Changes

      + +

      +SQLite version 2.8 will continue to be supported with bug fixes +for the foreseeable future. In order to allow SQLite version 2.8 +and SQLite version 3.0 to peacefully coexist, the names of key files +and APIs in SQLite version 3.0 have been changed to include the +character "3". For example, the include file used by C programs +has been changed from "sqlite.h" to "sqlite3.h". And the name of +the shell program used to interact with databases has been changed +from "sqlite.exe" to "sqlite3.exe". With these changes, it is possible +to have both SQLite 2.8 and SQLite 3.0 installed on the same system at +the same time. And it is possible for the same C program to link +against both SQLite 2.8 and SQLite 3.0 at the same time and to use +both libraries at the same time. +

      + +

      New File Format

      + +

      +The format used by SQLite database files has been completely revised. +The old version 2.1 format and the new 3.0 format are incompatible with +one another. Version 2.8 of SQLite will not read a version 3.0 database +files and version 3.0 of SQLite will not read a version 2.8 database file. +

      + +

      +To convert an SQLite 2.8 database into an SQLite 3.0 database, have +ready the command-line shells for both version 2.8 and 3.0. Then +enter a command like the following: +

      + +
      +sqlite OLD.DB .dump | sqlite3 NEW.DB
      +
      + +

      +The new database file format uses B+trees for tables. In a B+tree, all +data is stored in the leaves of the tree instead of in both the leaves and +the intermediate branch nodes. The use of B+trees for tables allows for +better scalability and the storage of larger data fields without the use of +overflow pages. Traditional B-trees are still used for indices.

      + +

      +The new file format also supports variable pages sizes between 512 and +32768 bytes. The size of a page is stored in the file header so the +same library can read databases with different pages sizes, in theory, +though this feature has not yet been implemented in practice. +

      + +

      +The new file format omits unused fields from its disk images. For example, +indices use only the key part of a B-tree record and not the data. So +for indices, the field that records the length of the data is omitted. +Integer values such as the length of key and data are stored using +a variable-length encoding so that only one or two bytes are required to +store the most common cases but up to 64-bits of information can be encoded +if needed. +Integer and floating point data is stored on the disk in binary rather +than being converted into ASCII as in SQLite version 2.8. +These changes taken together result in database files that are typically +25% to 35% smaller than the equivalent files in SQLite version 2.8. +

      + +

      +Details of the low-level B-tree format used in SQLite version 3.0 can +be found in header comments to the +btree.c +source file. +

      + +

      Manifest Typing and BLOB Support

      + +

      +SQLite version 2.8 will deal with data in various formats internally, +but when writing to the disk or interacting through its API, SQLite 2.8 +always converts data into ASCII text. SQLite 3.0, in contrast, exposes +its internal data representations to the user and stores binary representations +to disk when appropriate. The exposing of non-ASCII representations was +added in order to support BLOBs. +

      + +

      +SQLite version 2.8 had the feature that any type of data could be stored +in any table column regardless of the declared type of that column. This +feature is retained in version 3.0, though in a slightly modified form. +Each table column will store any type of data, though columns have an +affinity for the format of data defined by their declared datatype. +When data is inserted into a column, that column will make at attempt +to convert the data format into the columns declared type. All SQL +database engines do this. The difference is that SQLite 3.0 will +still store the data even if a format conversion is not possible. +

      + +

      +For example, if you have a table column declared to be of type "INTEGER" +and you try to insert a string, the column will look at the text string +and see if it looks like a number. If the string does look like a number +it is converted into a number and into an integer if the number does not +have a fractional part, and stored that way. But if the string is not +a well-formed number it is still stored as a string. A column with a +type of "TEXT" tries to convert numbers into an ASCII-Text representation +before storing them. But BLOBs are stored in TEXT columns as BLOBs because +you cannot in general convert a BLOB into text. +

      + +

      +In most other SQL database engines the datatype is associated with +the table column that holds the data - with the data container. +In SQLite 3.0, the datatype is associated with the data itself, not +with its container. +Paul Graham in his book +ANSI Common Lisp +calls this property "Manifest Typing". +Other writers have other definitions for the term "manifest typing", +so beware of confusion. But by whatever name, that is the datatype +model supported by SQLite 3.0. +

      + +

      +Additional information about datatypes in SQLite version 3.0 is +available +separately. +

      + +

      Support for UTF-8 and UTF-16

      + +

      +The new API for SQLite 3.0 contains routines that accept text as +both UTF-8 and UTF-16 in the native byte order of the host machine. +Each database file manages text as either UTF-8, UTF-16BE (big-endian), +or UTF-16LE (little-endian). Internally and in the disk file, the +same text representation is used everywhere. If the text representation +specified by the database file (in the file header) does not match +the text representation required by the interface routines, then text +is converted on-the-fly. +Constantly converting text from one representation to another can be +computationally expensive, so it is suggested that programmers choose a +single representation and stick with it throughout their application. +

      + +

      +In the current implementation of SQLite, the SQL parser only works +with UTF-8 text. So if you supply UTF-16 text it will be converted. +This is just an implementation issue and there is nothing to prevent +future versions of SQLite from parsing UTF-16 encoded SQL natively. +

      + +

      +When creating new user-defined SQL functions and collating sequences, +each function or collating sequence can specify it if works with +UTF-8, UTF-16be, or UTF-16le. Separate implementations can be registered +for each encoding. If an SQL function or collating sequences is required +but a version for the current text encoding is not available, then +the text is automatically converted. As before, this conversion takes +computation time, so programmers are advised to pick a single +encoding and stick with it in order to minimize the amount of unnecessary +format juggling. +

      + +

      +SQLite is not particular about the text it receives and is more than +happy to process text strings that are not normalized or even +well-formed UTF-8 or UTF-16. Thus, programmers who want to store +IS08859 data can do so using the UTF-8 interfaces. As long as no +attempts are made to use a UTF-16 collating sequence or SQL function, +the byte sequence of the text will not be modified in any way. +

      + +

      User-defined Collating Sequences

      + +

      +A collating sequence is just a defined order for text. When SQLite 3.0 +sorts (or uses a comparison operator like "<" or ">=") the sort order +is first determined by the data type. +

      + +
        +
      • NULLs sort first
      • +
      • Numeric values sort next in numerical order
      • +
      • Text values come after numerics
      • +
      • BLOBs sort last
      • +
      + +

      +Collating sequences are used for comparing two text strings. +The collating sequence does not change the ordering of NULLs, numbers, +or BLOBs, only text. +

      + +

      +A collating sequence is implemented as a function that takes the +two strings being compared as inputs and returns negative, zero, or +positive if the first string is less than, equal to, or greater than +the second. +SQLite 3.0 comes with a single built-in collating sequence named "BINARY" +which is implemented using the memcmp() routine from the standard C library. +The BINARY collating sequence works well for English text. For other +languages or locales, alternative collating sequences may be preferred. +

      + +

      +The decision of which collating sequence to use is controlled by the +COLLATE clause in SQL. A COLLATE clause can occur on a table definition, +to define a default collating sequence to a table column, or on field +of an index, or in the ORDER BY clause of a SELECT statement. +Planned enhancements to SQLite are to include standard CAST() syntax +to allow the collating sequence of an expression to be defined. +

      + +

      64-bit ROWIDs

      + +

      +Every row of a table has a unique rowid. +If the table defines a column with the type "INTEGER PRIMARY KEY" then that +column becomes an alias for the rowid. But with or without an INTEGER PRIMARY +KEY column, every row still has a rowid. +

      + +

      +In SQLite version 3.0, the rowid is a 64-bit signed integer. +This is an expansion of SQLite version 2.8 which only permitted +rowids of 32-bits. +

      + +

      +To minimize storage space, the 64-bit rowid is stored as a variable length +integer. Rowids between 0 and 127 use only a single byte. +Rowids between 0 and 16383 use just 2 bytes. Up to 2097152 uses three +bytes. And so forth. Negative rowids are allowed but they always use +nine bytes of storage and so their use is discouraged. When rowids +are generated automatically by SQLite, they will always be non-negative. +

      + +

      Improved Concurrency

      + +

      +SQLite version 2.8 allowed multiple simultaneous readers or a single +writer but not both. SQLite version 3.0 allows one process to begin +writing the database while other processes continue to read. The +writer must still obtain an exclusive lock on the database for a brief +interval in order to commit its changes, but the exclusive lock is no +longer required for the entire write operation. +A more detailed report on the locking +behavior of SQLite version 3.0 is available separately. +

      + +

      +A limited form of table-level locking is now also available in SQLite. +If each table is stored in a separate database file, those separate +files can be attached to the main database (using the ATTACH command) +and the combined databases will function as one. But locks will only +be acquired on individual files as needed. So if you redefine "database" +to mean two or more database files, then it is entirely possible for +two processes to be writing to the same database at the same time. +To further support this capability, commits of transactions involving +two or more ATTACHed database are now atomic. +

      + +

      Credits

      + +

      +SQLite version 3.0 is made possible in part by AOL developers +supporting and embracing great Open-Source Software. +

      ADDED pages/whentouse.in Index: pages/whentouse.in ================================================================== --- /dev/null +++ pages/whentouse.in @@ -0,0 +1,247 @@ +Appropriate Uses For SQLite + +

      Appropriate Uses For SQLite

      + +

      +SQLite is different from most other SQL database engines in that its +primary design goal is to be simple: +

      + +
        +
      • Simple to administer
      • +
      • Simple to operate
      • +
      • Simple to embed in a larger program
      • +
      • Simple to maintain and customize
      • +
      + +

      +Many people like SQLite because it is small and fast. But those +qualities are just happy accidents. +Users also find that SQLite is very reliable. Reliability is +a consequence of simplicity. With less complication, there is +less to go wrong. So, yes, SQLite is small, fast, and reliable, +but first and foremost, SQLite strives to be simple. +

      + +

      +Simplicity in a database engine can be either a strength or a +weakness, depending on what you are trying to do. In order to +achieve simplicity, SQLite has had to sacrifice other characteristics +that some people find useful, such as high concurrency, fine-grained +access control, a rich set of built-in functions, stored procedures, +esoteric SQL language features, XML and/or Java extensions, +tera- or peta-byte scalability, and so forth. If you need some of these +features and do not mind the added complexity that they +bring, then SQLite is probably not the database for you. +SQLite is not intended to be an enterprise database engine. It +not designed to compete with Oracle or PostgreSQL. +

      + +

      +The basic rule of thumb for when it is appropriate to use SQLite is +this: Use SQLite in situations where simplicity of administration, +implementation, and maintenance are more important than the countless +complex features that enterprise database engines provide. +As it turns out, situations where simplicity is the better choice +are more common than many people realize. +

      + +

      Situations Where SQLite Works Well

      + +
        +
      • Websites

        + +

        SQLite usually will work great as the database engine for low to +medium traffic websites (which is to say, 99.9% of all websites). +The amount of web traffic that SQLite can handle depends, of course, +on how heavily the website uses its database. Generally +speaking, any site that gets fewer than a 100000 hits/day should work +fine with SQLite. +The 100000 hits/day figure is a conservative estimate, not a +hard upper bound. +SQLite has been demonstrated to work with 10 times that amount +of traffic.

        +
      • + +
      • Embedded devices and applications

        + +

        Because an SQLite database requires little or no administration, +SQLite is a good choice for devices or services that must work +unattended and without human support. SQLite is a good fit for +use in cellphones, PDAs, set-top boxes, and/or appliances. It also +works well as an embedded database in downloadable consumer applications. +

        +
      • + +
      • Application File Format

        + +

        +SQLite has been used with great success as the on-disk file format +for desktop applications such as financial analysis tools, CAD +packages, record keeping programs, and so forth. The traditional +File/Open operation does an sqlite3_open() and executes a +BEGIN TRANSACTION to get exclusive access to the content. File/Save +does a COMMIT followed by another BEGIN TRANSACTION. The use +of transactions guarantees that updates to the application file are atomic, +durable, isolated, and consistent. +

        + +

        +Temporary triggers can be added to the database to record all +changes into a (temporary) undo/redo log table. These changes can then +be played back when the user presses the Undo and Redo buttons. Using +this technique, a unlimited depth undo/redo implementation can be written +in surprising little code. +

        +
      • + +
      • Replacement for ad hoc disk files

        + +

        Many programs use fopen(), fread(), and fwrite() to create and +manage files of data in home-grown formats. SQLite works +particularly well as a +replacement for these ad hoc data files.

        +
      • + +
      • Internal or temporary databases

        + +

        +For programs that have a lot of data that must be sifted and sorted +in diverse ways, it is often easier and quicker to load the data into +an in-memory SQLite database and use queries with joins and ORDER BY +clauses to extract the data in the form and order needed rather than +to try to code the same operations manually. +Using an SQL database internally in this way also gives the program +greater flexibility since new columns and indices can be added without +having to recode every query. +

        +
      • + +
      • Command-line dataset analysis tool

        + +

        +Experienced SQL users can employ +the command-line sqlite program to analyze miscellaneous +datasets. Raw data can be imported from CSV files, then that +data can be sliced and diced to generate a myriad of summary +reports. Possible uses include website log analysis, sports +statistics analysis, compilation of programming metrics, and +analysis of experimental results. +

        + +

        +You can also do the same thing with a enterprise client/server +database, of course. The advantages to using SQLite in this situation +are that SQLite is much easier to set up and the resulting database +is a single file that you can store on a floppy disk or flash-memory stick +or email to a colleague. +

        +
      • + +
      • Stand-in for an enterprise database during demos or testing

        + +

        +If you are writing a client application for an enterprise database engine, +it makes sense to use a generic database backend that allows you to connect +to many different kinds of SQL database engines. It makes even better +sense to +go ahead and include SQLite in the mix of supported database and to statically +link the SQLite engine in with the client. That way the client program +can be used standalone with an SQLite data file for testing or for +demonstrations. +

        +
      • + +
      • Database Pedagogy

        + +

        +Because it is simple to setup and use (installation is trivial: just +copy the sqlite or sqlite.exe executable to the target machine +and run it) SQLite makes a good database engine for use in teaching SQL. +Students can easily create as many databases as they like and can +email databases to the instructor for comments or grading. For more +advanced students who are interested in studying how an RDBMS is +implemented, the modular and well-commented and documented SQLite code +can serve as a good basis. This is not to say that SQLite is an accurate +model of how other database engines are implemented, but rather a student who +understands how SQLite works can more quickly comprehend the operational +principles of other systems. +

        +
      • + +
      • Experimental SQL language extensions

        + +

        The simple, modular design of SQLite makes it a good platform for +prototyping new, experimental database language features or ideas. +

        +
      • + + +
      + +

      Situations Where Another RDBMS May Work Better

      + +
        +
      • Client/Server Applications

        + +

        If you have many client programs accessing a common database +over a network, you should consider using a client/server database +engine instead of SQLite. SQLite will work over a network filesystem, +but because of the latency associated with most network filesystems, +performance will not be great. Also, the file locking logic of +many network filesystems implementation contains bugs (on both Unix +and windows). If file locking does not work like it should, +it might be possible for two or more client programs to modify the +same part of the same database at the same time, resulting in +database corruption. Because this problem results from bugs in +the underlying filesystem implementation, there is nothing SQLite +can do to prevent it.

        + +

        A good rule of thumb is that you should avoid using SQLite +in situations where the same database will be accessed simultaneously +from many computers over a network filesystem.

        +
      • + +
      • High-volume Websites

        + +

        SQLite will normally work fine as the database backend to a website. +But if you website is so busy that your are thinking of splitting the +database component off onto a separate machine, then you should +definitely consider using an enterprise-class client/server database +engine instead of SQLite.

        +
      • + +
      • Very large datasets

        + +

        When you start a transaction in SQLite (which happens automatically +before any write operation that is not within an explicit BEGIN...COMMIT) +the engine has to allocate a bitmap of dirty pages in the disk file to +help it manage its rollback journal. SQLite needs 256 bytes of RAM for +every 1MiB of database (assuming a 1024-byte page size: less memory is +used with larger page sizes, of course). +For smaller databases, the amount of memory +required is not a problem, but when database begin to grow into the +multi-gigabyte range, the size of the bitmap can get quite large. If +you need to store and modify more than a few dozen GB of data, you should +consider using a different database engine. +

        +
      • + +
      • High Concurrency

        + +

        +SQLite uses reader/writer locks on the entire database file. That means +if any process is reading from any part of the database, all other +processes are prevented from writing any other part of the database. +Similarly, if any one process is writing to the database, +all other processes are prevented from reading any other part of the +database. +For many situations, this is not a problem. Each application +does its database work quickly and moves on, and no lock lasts for more +than a few dozen milliseconds. But there are some applications that require +more concurrency, and those applications may need to seek a different +solution. +

        +
      • + +
      DELETED pragma.tcl Index: pragma.tcl ================================================================== --- pragma.tcl +++ /dev/null @@ -1,635 +0,0 @@ -# -# Run this Tcl script to generate the pragma.html file. -# -set rcsid {$Id: pragma.tcl,v 1.28 2007/08/28 08:19:49 danielk1977 Exp $} -source common.tcl -header {Pragma statements supported by SQLite} - -proc Section {name {label {}}} { - puts "\n
      " - if {$label!=""} { - puts "" - } - puts "

      $name

      \n" -} - -puts { -

      The PRAGMA command is a special command used to -modify the operation of the SQLite library or to query the library for -internal (non-table) data. The PRAGMA command is issued using the same -interface as other SQLite commands (e.g. SELECT, INSERT) but is -different in the following important respects: -

      -
        -
      • Specific pragma statements may be removed and others added in future - releases of SQLite. Use with caution! -
      • No error messages are generated if an unknown pragma is issued. - Unknown pragmas are simply ignored. This means if there is a typo in - a pragma statement the library does not inform the user of the fact. -
      • Some pragmas take effect during the SQL compilation stage, not the - execution stage. This means if using the C-language sqlite3_prepare(), - sqlite3_step(), sqlite3_finalize() API (or similar in a wrapper - interface), the pragma may be applied to the library during the - sqlite3_prepare() call. -
      • The pragma command is unlikely to be compatible with any other SQL - engine. -
      - -

      The available pragmas fall into four basic categories:

      - -} - -Section {PRAGMA command syntax} syntax - -Syntax {sql-statement} { -PRAGMA [= ] | -PRAGMA () -} - -puts { -

      The pragmas that take an integer value also accept -symbolic names. The strings "on", "true", and "yes" -are equivalent to 1. The strings "off", "false", -and "no" are equivalent to 0. These strings are case- -insensitive, and do not require quotes. An unrecognized string will be -treated as 1, and will not generate an error. When the value -is returned it is as an integer.

      -} - -Section {Pragmas to modify library operation} modify - -puts { -
        - -
      • PRAGMA auto_vacuum;
        - PRAGMA auto_vacuum =
        - 0 | none | 1 | full | 2 | incremental;

        -

        Query or set the auto-vacuum flag in the database.

        - -

        Normally, (that is to say when auto_vacuum is 0 or "none") - when a transaction that deletes data from a database is - committed, the database file remains the same size. Unused database file - pages are added to a "freelist" are reused for subsequent inserts. The - database file does not shrink. - In this mode the VACUUM - command can be used to reclaim unused space.

        - -

        When the auto-vacuum flag is 1 (full), the freelist pages are - moved to the end of the file and the file is truncated to remove - the freelist pages at every commit. - Note, however, that auto-vacuum only truncates the freelist pages - from the file. Auto-vacuum does not defragment the database nor - repack individual database pages the way that the - VACUUM command does. In fact, because - it moves pages around within the file, auto-vacuum can actually - make fragmentation worse.

        - -

        Auto-vacuuming is only possible if the database stores some - additional information that allows each database page to be - traced backwards to its referer. Therefore, auto-vacuuming must - be turned on before any tables are created. It is not possible - to enable or disable auto-vacuum after a table has been created.

        - -

        When the value of auto-vacuum is 2 (incremental) then the additional - information needed to do autovacuuming is stored in the database file - but autovacuuming does not occur automatically at each commit as it - does with auto_vacuum==full. In incremental mode, the separate - incremental_vacuum pragma must - be invoked to cause the vacuum to occur.

        - -

        The database connection can be changed between full and incremental - autovacuum mode at will. However, the connection cannot be changed - in and out of the "none" mode after any table has been created in the - database. -

      • - - -
      • PRAGMA cache_size; -
        PRAGMA cache_size =
        Number-of-pages;

        -

        Query or change the maximum number of database disk pages that SQLite - will hold in memory at once. Each page uses about 1.5K of memory. - The default cache size is 2000. If you are doing UPDATEs or DELETEs - that change many rows of a database and you do not mind if SQLite - uses more memory, you can increase the cache size for a possible speed - improvement.

        -

        When you change the cache size using the cache_size pragma, the - change only endures for the current session. The cache size reverts - to the default value when the database is closed and reopened. Use - the default_cache_size - pragma to check the cache size permanently.

      • - - -
      • PRAGMA case_sensitive_like; -
        PRAGMA case_sensitive_like =
        0 | 1;

        -

        The default behavior of the LIKE operator is to ignore case - for latin1 characters. Hence, by default 'a' LIKE 'A' is - true. The case_sensitive_like pragma can be turned on to change - this behavior. When case_sensitive_like is enabled, - 'a' LIKE 'A' is false but 'a' LIKE 'a' is still true.

        -
      • - - -
      • PRAGMA count_changes; -
        PRAGMA count_changes =
        0 | 1;

        -

        Query or change the count-changes flag. Normally, when the - count-changes flag is not set, INSERT, UPDATE and DELETE statements - return no data. When count-changes is set, each of these commands - returns a single row of data consisting of one integer value - the - number of rows inserted, modified or deleted by the command. The - returned change count does not include any insertions, modifications - or deletions performed by triggers.

        - - -
      • PRAGMA default_cache_size; -
        PRAGMA default_cache_size =
        Number-of-pages;

        -

        Query or change the maximum number of database disk pages that SQLite - will hold in memory at once. Each page uses 1K on disk and about - 1.5K in memory. - This pragma works like the - cache_size - pragma with the additional - feature that it changes the cache size persistently. With this pragma, - you can set the cache size once and that setting is retained and reused - every time you reopen the database.

      • - - -
      • PRAGMA default_synchronous;

        -

        This pragma was available in version 2.8 but was removed in version - 3.0. It is a dangerous pragma whose use is discouraged. To help - dissuide users of version 2.8 from employing this pragma, the documentation - will not tell you what it does.

      • - - - -
      • PRAGMA empty_result_callbacks; -
        PRAGMA empty_result_callbacks =
        0 | 1;

        -

        Query or change the empty-result-callbacks flag.

        -

        The empty-result-callbacks flag affects the sqlite3_exec API only. - Normally, when the empty-result-callbacks flag is cleared, the - callback function supplied to the sqlite3_exec() call is not invoked - for commands that return zero rows of data. When empty-result-callbacks - is set in this situation, the callback function is invoked exactly once, - with the third parameter set to 0 (NULL). This is to enable programs - that use the sqlite3_exec() API to retrieve column-names even when - a query returns no data. -

        - - -
      • PRAGMA encoding; -
        PRAGMA encoding = "UTF-8"; -
        PRAGMA encoding = "UTF-16"; -
        PRAGMA encoding = "UTF-16le"; -
        PRAGMA encoding = "UTF-16be";

        -

        In first form, if the main database has already been - created, then this pragma returns the text encoding used by the - main database, one of "UTF-8", "UTF-16le" (little-endian UTF-16 - encoding) or "UTF-16be" (big-endian UTF-16 encoding). If the main - database has not already been created, then the value returned is the - text encoding that will be used to create the main database, if - it is created by this session.

        -

        The second and subsequent forms of this pragma are only useful if - the main database has not already been created. In this case the - pragma sets the encoding that the main database will be created with if - it is created by this session. The string "UTF-16" is interpreted - as "UTF-16 encoding using native machine byte-ordering". If the second - and subsequent forms are used after the database file has already - been created, they have no effect and are silently ignored.

        - -

        Once an encoding has been set for a database, it cannot be changed.

        - -

        Databases created by the ATTACH command always use the same encoding - as the main database.

        -
      • - - -
      • PRAGMA full_column_names; -
        PRAGMA full_column_names =
        0 | 1;

        -

        Query or change the full-column-names flag. This flag affects - the way SQLite names columns of data returned by SELECT statements - when the expression for the column is a table-column name or the - wildcard "*". Normally, such result columns are named - <table-name/alias><column-name> if the SELECT statement joins - two or - more tables together, or simply <column-name> if the SELECT - statement queries a single table. When the full-column-names flag - is set, such columns are always named <table-name/alias> - <column-name> regardless of whether or not a join is performed. -

        -

        If both the short-column-names and full-column-names are set, - then the behaviour associated with the full-column-names flag is - exhibited. -

        -
      • - - -
      • PRAGMA fullfsync -
        PRAGMA fullfsync =
        0 | 1;

        -

        Query or change the fullfsync flag. This flag affects - determines whether or not the F_FULLFSYNC syncing method is used - on systems that support it. The default value is off. As of this - writing (2006-02-10) only Mac OS X supports F_FULLFSYNC. -

        -
      • - - -
      • PRAGMA incremental_vacuum(N);

        -

        The incremental_vacuum pragma causes up to N pages to - be removed from the freelist. The database file is truncated by - the same amount. The incremental_vacuum pragma has no effect if - the database is not in - auto_vacuum==incremental mode - or if there are no pages on the freelist. If there are fewer than - N pages on the freelist, then the entire freelist is cleared.

        - -

        As of version 3.4.0 (the first version that supports - incremental_vacuum) this feature is still experimental. Possible - future changes include enhancing incremental vacuum to do - defragmentation and node repacking just as the full-blown - VACUUM command does. And - incremental vacuum may be promoted from a pragma to a separate - SQL command, or perhaps some variation on the VACUUM command. - Programmers are cautioned to not become enamored with the - current syntax or functionality as it is likely to change.

        -
      • - - - -
      • PRAGMA legacy_file_format; -
        PRAGMA legacy_file_format = ON | OFF

        -

        This pragma sets or queries the value of the legacy_file_format - flag. When this flag is on, new SQLite databases are created in - a file format that is readable and writable by all versions of - SQLite going back to 3.0.0. When the flag is off, new databases - are created using the latest file format which might not be - readable or writable by older versions of SQLite.

        - -

        This flag only affects newly created databases. It has no - effect on databases that already exist.

        -
      • - - -
      • PRAGMA locking_mode; -
        PRAGMA locking_mode = NORMAL | EXCLUSIVE

        -

        This pragma sets or queries the database connection locking-mode. - The locking-mode is either NORMAL or EXCLUSIVE. - -

        In NORMAL locking-mode (the default), a database connection - unlocks the database file at the conclusion of each read or - write transaction. When the locking-mode is set to EXCLUSIVE, the - database connection never releases file-locks. The first time the - database is read in EXCLUSIVE mode, a shared lock is obtained and - held. The first time the database is written, an exclusive lock is - obtained and held.

        - -

        Database locks obtained by a connection in EXCLUSIVE mode may be - released either by closing the database connection, or by setting the - locking-mode back to NORMAL using this pragma and then accessing the - database file (for read or write). Simply setting the locking-mode to - NORMAL is not enough - locks are not be released until the next time - the database file is accessed.

        - -

        There are two reasons to set the locking-mode to EXCLUSIVE. One - is if the application actually wants to prevent other processes from - accessing the database file. The other is that a small number of - filesystem operations are saved by optimizations enabled in this - mode. This may be significant in embedded environments.

        - -

        When the locking_mode pragma specifies a particular database, - for example:

        - -
        -PRAGMA main.locking_mode=EXCLUSIVE; -
        - -

        Then the locking mode applies only to the named database. If no - database name qualifier preceeds the "locking_mode" keyword then - the locking mode is applied to all databases, including any new - databases added by subsequent ATTACH - commands.

        - -

        The "temp" database (in which TEMP tables and indices are stored) - always uses exclusive locking mode. The locking mode of temp cannot - be changed. All other databases use the normal locking mode by default - and are affected by this pragma.

        -
      • - - -
      • PRAGMA page_size; -
        PRAGMA page_size =
        bytes;

        -

        Query or set the page-size of the database. The page-size - may only be set if the database has not yet been created. The page - size must be a power of two greater than or equal to 512 and less - than or equal to 8192. The upper limit may be modified by setting - the value of macro SQLITE_MAX_PAGE_SIZE during compilation. The - maximum upper bound is 32768. -

        -
      • - - -
      • PRAGMA max_page_count; -
        PRAGMA max_page_count =
        N;

        -

        Query or set the maximum number of pages in the database file. - Both forms of the pragma return the maximum page count. The second - form attempts to modify the maximum page count. The maximum page - count cannot be reduced below the current database size. -

        -
      • - - -
      • PRAGMA read_uncommitted; -
        PRAGMA read_uncommitted =
        0 | 1;

        -

        Query, set, or clear READ UNCOMMITTED isolation. The default isolation - level for SQLite is SERIALIZABLE. Any process or thread can select - READ UNCOMMITTED isolation, but SERIALIZABLE will still be used except - between connections that share a common page and schema cache. - Cache sharing is enabled using the - - sqlite3_enable_shared_cache() API and is only available between - connections running the same thread. Cache sharing is off by default. -

        -
      • - - -
      • PRAGMA short_column_names; -
        PRAGMA short_column_names =
        0 | 1;

        -

        Query or change the short-column-names flag. This flag affects - the way SQLite names columns of data returned by SELECT statements - when the expression for the column is a table-column name or the - wildcard "*". Normally, such result columns are named - <table-name/alias>lt;column-name> if the SELECT statement - joins two or more tables together, or simply <column-name> if - the SELECT statement queries a single table. When the short-column-names - flag is set, such columns are always named <column-name> - regardless of whether or not a join is performed. -

        -

        If both the short-column-names and full-column-names are set, - then the behaviour associated with the full-column-names flag is - exhibited. -

        -
      • - - -
      • PRAGMA synchronous; -
        PRAGMA synchronous = FULL;
        (2) -
        PRAGMA synchronous = NORMAL;
        (1) -
        PRAGMA synchronous = OFF;
        (0)

        -

        Query or change the setting of the "synchronous" flag. - The first (query) form will return the setting as an - integer. When synchronous is FULL (2), the SQLite database engine will - pause at critical moments to make sure that data has actually been - written to the disk surface before continuing. This ensures that if - the operating system crashes or if there is a power failure, the database - will be uncorrupted after rebooting. FULL synchronous is very - safe, but it is also slow. - When synchronous is NORMAL, the SQLite database - engine will still pause at the most critical moments, but less often - than in FULL mode. There is a very small (though non-zero) chance that - a power failure at just the wrong time could corrupt the database in - NORMAL mode. But in practice, you are more likely to suffer - a catastrophic disk failure or some other unrecoverable hardware - fault. - With synchronous OFF (0), SQLite continues without pausing - as soon as it has handed data off to the operating system. - If the application running SQLite crashes, the data will be safe, but - the database might become corrupted if the operating system - crashes or the computer loses power before that data has been written - to the disk surface. On the other hand, some - operations are as much as 50 or more times faster with synchronous OFF. -

        -

        In SQLite version 2, the default value is NORMAL. For version 3, the - default was changed to FULL. -

        -
      • - - - -
      • PRAGMA temp_store; -
        PRAGMA temp_store = DEFAULT;
        (0) -
        PRAGMA temp_store = FILE;
        (1) -
        PRAGMA temp_store = MEMORY;
        (2)

        -

        Query or change the setting of the "temp_store" parameter. - When temp_store is DEFAULT (0), the compile-time C preprocessor macro - TEMP_STORE is used to determine where temporary tables and indices - are stored. When - temp_store is MEMORY (2) temporary tables and indices are kept in memory. - When temp_store is FILE (1) temporary tables and indices are stored - in a file. The - temp_store_directory pragma can be used to specify the directory - containing this file. - FILE is specified. When the temp_store setting is changed, - all existing temporary tables, indices, triggers, and views are - immediately deleted.

        - -

        It is possible for the library compile-time C preprocessor symbol - TEMP_STORE to override this pragma setting. The following table summarizes - the interaction of the TEMP_STORE preprocessor macro and the - temp_store pragma:

        - -
        - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        TEMP_STOREPRAGMA
        temp_store
        Storage used for
        TEMP tables and indices
        0anyfile
        10file
        11file
        12memory
        20memory
        21file
        22memory
        3anymemory
        -
        -
      • -
        - - -
      • PRAGMA temp_store_directory; -
        PRAGMA temp_store_directory = 'directory-name';

        -

        Query or change the setting of the "temp_store_directory" - the - directory where files used for storing temporary tables and indices - are kept. This setting lasts for the duration of the current connection - only and resets to its default value for each new connection opened. - -

        When the temp_store_directory setting is changed, all existing temporary - tables, indices, triggers, and viewers are immediately deleted. In - practice, temp_store_directory should be set immediately after the - database is opened.

        - -

        The value directory-name should be enclosed in single quotes. - To revert the directory to the default, set the directory-name to - an empty string, e.g., PRAGMA temp_store_directory = ''. An - error is raised if directory-name is not found or is not - writable.

        - -

        The default directory for temporary files depends on the OS. For - Unix/Linux/OSX, the default is the is the first writable directory found - in the list of: /var/tmp, /usr/tmp, /tmp, and - current-directory. For Windows NT, the default - directory is determined by Windows, generally - C:\Documents and Settings\user-name\Local Settings\Temp\. - Temporary files created by SQLite are unlinked immediately after - opening, so that the operating system can automatically delete the - files when the SQLite process exits. Thus, temporary files are not - normally visible through ls or dir commands.

        - -
      • -
      -} - -Section {Pragmas to query the database schema} schema - -puts { -
        - -
      • PRAGMA database_list;

        -

        For each open database, invoke the callback function once with - information about that database. Arguments include the index and - the name the database was attached with. The first row will be for - the main database. The second row will be for the database used to - store temporary tables.

      • - - -
      • PRAGMA foreign_key_list(table-name);

        -

        For each foreign key that references a column in the argument - table, invoke the callback function with information about that - foreign key. The callback function will be invoked once for each - column in each foreign key.

      • - - -
      • PRAGMA [database].freelist_count;

        -

        Return the number of unused pages in the database file. Running - a "PRAGMA incremental_vaccum(N);" - command with a large value of N will shrink the database file by this - number of pages.

      • - - -
      • PRAGMA index_info(index-name);

        -

        For each column that the named index references, invoke the - callback function - once with information about that column, including the column name, - and the column number.

      • - - -
      • PRAGMA index_list(table-name);

        -

        For each index on the named table, invoke the callback function - once with information about that index. Arguments include the - index name and a flag to indicate whether or not the index must be - unique.

      • - - -
      • PRAGMA table_info(table-name);

        -

        For each column in the named table, invoke the callback function - once with information about that column, including the column name, - data type, whether or not the column can be NULL, and the default - value for the column.

      • -
      -} - -Section {Pragmas to query/modify version values} version - -puts { - -
        - - -
      • PRAGMA [database.]schema_version; -
        PRAGMA [database.]schema_version =
        integer ; -
        PRAGMA [database.]user_version; -
        PRAGMA [database.]user_version =
        integer ; - - -

        The pragmas schema_version and user_version are used to set or get - the value of the schema-version and user-version, respectively. Both - the schema-version and the user-version are 32-bit signed integers - stored in the database header.

        - -

        The schema-version is usually only manipulated internally by SQLite. - It is incremented by SQLite whenever the database schema is modified - (by creating or dropping a table or index). The schema version is - used by SQLite each time a query is executed to ensure that the - internal cache of the schema used when compiling the SQL query matches - the schema of the database against which the compiled query is actually - executed. Subverting this mechanism by using "PRAGMA schema_version" - to modify the schema-version is potentially dangerous and may lead - to program crashes or database corruption. Use with caution!

        - -

        The user-version is not used internally by SQLite. It may be used by - applications for any purpose.

        -
      • -
      -} - -Section {Pragmas to debug the library} debug - -puts { -
        - -
      • PRAGMA integrity_check; -
        PRAGMA integrity_check(
        integer)

        -

        The command does an integrity check of the entire database. It - looks for out-of-order records, missing pages, malformed records, and - corrupt indices. - If any problems are found, then strings are returned (as multiple - rows with a single column per row) which describe - the problems. At most integer errors will be reported - before the analysis quits. The default value for integer - is 100. If no errors are found, a single row with the value "ok" is - returned.

      • - - -
      • PRAGMA parser_trace = ON; (1) -
        PRAGMA parser_trace = OFF;
        (0)

        -

        Turn tracing of the SQL parser inside of the - SQLite library on and off. This is used for debugging. - This only works if the library is compiled without the NDEBUG macro. -

      • - - -
      • PRAGMA vdbe_trace = ON; (1) -
        PRAGMA vdbe_trace = OFF;
        (0)

        -

        Turn tracing of the virtual database engine inside of the - SQLite library on and off. This is used for debugging. See the - VDBE documentation for more - information.

      • - - -
      • PRAGMA vdbe_listing = ON; (1) -
        PRAGMA vdbe_listing = OFF;
        (0)

        -

        Turn listings of virtual machine programs on and off. - With listing is on, the entire content of a program is printed - just prior to beginning execution. This is like automatically - executing an EXPLAIN prior to each statement. The statement - executes normally after the listing is printed. - This is used for debugging. See the - VDBE documentation for more - information.

      • -
      - -} DELETED quickstart.tcl Index: quickstart.tcl ================================================================== --- quickstart.tcl +++ /dev/null @@ -1,110 +0,0 @@ -# -# Run this TCL script to generate HTML for the quickstart.html file. -# -set rcsid {$Id: quickstart.tcl,v 1.8 2006/06/13 11:27:22 drh Exp $} -source common.tcl -header {SQLite In 5 Minutes Or Less} -puts { -

      Here is what you do to start experimenting with SQLite without having -to do a lot of tedious reading and configuration:

      - -

      Download The Code

      - -
        -
      • Get a copy of the prebuilt binaries for your machine, or get a copy -of the sources and compile them yourself. Visit -the download page for more information.

      • -
      - -

      Create A New Database

      - -
        -
      • At a shell or DOS prompt, enter: "sqlite3 test.db". This will -create a new database named "test.db". (You can use a different name if -you like.)

      • -
      • Enter SQL commands at the prompt to create and populate the -new database.

      • -
      • Additional documentation is available here

      • -
      - -

      Write Programs That Use SQLite

      - -
        -
      • Below is a simple TCL program that demonstrates how to use -the TCL interface to SQLite. The program executes the SQL statements -given as the second argument on the database defined by the first -argument. The commands to watch for are the sqlite3 command -on line 7 which opens an SQLite database and creates -a new TCL command named "db" to access that database, the -invocation of the db command on line 8 to execute -SQL commands against the database, and the closing of the database connection -on the last line of the script.

        - -
        -#!/usr/bin/tclsh
        -if {$argc!=2} {
        -  puts stderr "Usage: %s DATABASE SQL-STATEMENT"
        -  exit 1
        -}
        -load /usr/lib/tclsqlite3.so Sqlite3
        -sqlite3 db [lindex $argv 0]
        -db eval [lindex $argv 1] x {
        -  foreach v $x(*) {
        -    puts "$v = $x($v)"
        -  }
        -  puts ""
        -}
        -db close
        -
        -
      • - -
      • Below is a simple C program that demonstrates how to use -the C/C++ interface to SQLite. The name of a database is given by -the first argument and the second argument is one or more SQL statements -to execute against the database. The function calls to pay attention -to here are the call to sqlite3_open() on line 22 which opens -the database, sqlite3_exec() on line 27 that executes SQL -commands against the database, and sqlite3_close() on line 31 -that closes the database connection.

        - -
        -#include <stdio.h>
        -#include <sqlite3.h>
        -
        -static int callback(void *NotUsed, int argc, char **argv, char **azColName){
        -  int i;
        -  for(i=0; i<argc; i++){
        -    printf("%s = %s\n", azColName[i], argv[i] ? argv[i] : "NULL");
        -  }
        -  printf("\n");
        -  return 0;
        -}
        -
        -int main(int argc, char **argv){
        -  sqlite3 *db;
        -  char *zErrMsg = 0;
        -  int rc;
        -
        -  if( argc!=3 ){
        -    fprintf(stderr, "Usage: %s DATABASE SQL-STATEMENT\n", argv[0]);
        -    exit(1);
        -  }
        -  rc = sqlite3_open(argv[1], &db);
        -  if( rc ){
        -    fprintf(stderr, "Can't open database: %s\n", sqlite3_errmsg(db));
        -    sqlite3_close(db);
        -    exit(1);
        -  }
        -  rc = sqlite3_exec(db, argv[2], callback, 0, &zErrMsg);
        -  if( rc!=SQLITE_OK ){
        -    fprintf(stderr, "SQL error: %s\n", zErrMsg);
        -    sqlite3_free(zErrMsg);
        -  }
        -  sqlite3_close(db);
        -  return 0;
        -}
        -
        -
      • -
      -} -footer {$Id: quickstart.tcl,v 1.8 2006/06/13 11:27:22 drh Exp $} ADDED rawpages/copyright-release.html Index: rawpages/copyright-release.html ================================================================== --- /dev/null +++ rawpages/copyright-release.html @@ -0,0 +1,109 @@ + + +

      +Copyright Release for
      +Contributions To SQLite +

      + +

      +SQLite is software that implements an embeddable SQL database engine. +SQLite is available for free download from http://www.sqlite.org/. +The principal author and maintainer of SQLite has disclaimed all +copyright interest in his contributions to SQLite +and thus released his contributions into the public domain. +In order to keep the SQLite software unencumbered by copyright +claims, the principal author asks others who may from time to +time contribute changes and enhancements to likewise disclaim +their own individual copyright interest. +

      + +

      +Because the SQLite software found at http://www.sqlite.org/ is in the +public domain, anyone is free to download the SQLite software +from that website, make changes to the software, use, distribute, +or sell the modified software, under either the original name or +under some new name, without any need to obtain permission, pay +royalties, acknowledge the original source of the software, or +in any other way compensate, identify, or notify the original authors. +Nobody is in any way compelled to contribute their SQLite changes and +enhancements back to the SQLite website. This document concerns +only changes and enhancements to SQLite that are intentionally and +deliberately contributed back to the SQLite website. +

      + +

      +For the purposes of this document, "SQLite software" shall mean any +computer source code, documentation, makefiles, test scripts, or +other information that is published on the SQLite website, +http://www.sqlite.org/. Precompiled binaries are excluded from +the definition of "SQLite software" in this document because the +process of compiling the software may introduce information from +outside sources which is not properly a part of SQLite. +

      + +

      +The header comments on the SQLite source files exhort the reader to +share freely and to never take more than one gives. +In the spirit of that exhortation I make the following declarations: +

      + +
        +
      1. +I dedicate to the public domain +any and all copyright interest in the SQLite software that +was publicly available on the SQLite website (http://www.sqlite.org/) prior +to the date of the signature below and any changes or enhancements to +the SQLite software +that I may cause to be published on that website in the future. +I make this dedication for the benefit of the public at large and +to the detriment of my heirs and successors. I intend this +dedication to be an overt act of relinquishment in perpetuity of +all present and future rights to the SQLite software under copyright +law. +

      2. + +
      3. +To the best of my knowledge and belief, the changes and enhancements that +I have contributed to SQLite are either originally written by me +or are derived from prior works which I have verified are also +in the public domain and are not subject to claims of copyright +by other parties. +

      4. + +
      5. +To the best of my knowledge and belief, no individual, business, organization, +government, or other entity has any copyright interest +in the SQLite software as it existed on the +SQLite website as of the date on the signature line below. +

      6. + +
      7. +I agree never to publish any additional information +to the SQLite website (by CVS, email, scp, FTP, or any other means) unless +that information is an original work of authorship by me or is derived from +prior published versions of SQLite. +I agree never to copy and paste code into the SQLite code base from +other sources. +I agree never to publish on the SQLite website any information that +would violate a law or breach a contract. +

      8. +
      + +

      + + + + + +
      +Signature: +

       

      +

       

      +

       

      +
      +Date: +
      +Name (printed): +
      + + ADDED rawpages/copyright-release.pdf Index: rawpages/copyright-release.pdf ================================================================== --- /dev/null +++ rawpages/copyright-release.pdf cannot compute difference between binary files DELETED shared.gif Index: shared.gif ================================================================== --- shared.gif +++ /dev/null cannot compute difference between binary files DELETED sharedcache.tcl Index: sharedcache.tcl ================================================================== --- sharedcache.tcl +++ /dev/null @@ -1,221 +0,0 @@ -# -# Run this script to generated a sharedcache.html output file -# -set rcsid {$Id: } -source common.tcl -header {SQLite Shared-Cache Mode} - -proc HEADING {level title} { - global pnum - incr pnum($level) - foreach i [array names pnum] { - if {$i>$level} {set pnum($i) 0} - } - set h [expr {$level+1}] - if {$h>6} {set h 6} - set n $pnum(1).$pnum(2) - for {set i 3} {$i<=$level} {incr i} { - append n .$pnum($i) - } - puts "$n $title" -} -set pnum(1) 0 -set pnum(2) 0 -set pnum(3) 0 -set pnum(4) 0 -set pnum(5) 0 -set pnum(6) 0 -set pnum(7) 0 -set pnum(8) 0 - -HEADING 1 {SQLite Shared-Cache Mode} - -puts { -

      Starting with version 3.3.0, SQLite includes a special "shared-cache" -mode (disabled by default) intended for use in embedded servers. If -shared-cache mode is enabled and a thread establishes multiple connections -to the same database, the connections share a single data and schema cache. -This can significantly reduce the quantity of memory and IO required by -the system.

      - -

      Using shared-cache mode imposes some extra restrictions on -passing database handles between threads and changes the semantics -of the locking model in some cases. These details are described in full by -this document. A basic understanding of the normal SQLite locking model (see -File Locking And Concurrency In SQLite Version 3 -for details) is assumed.

      -} - -HEADING 1 {Shared-Cache Locking Model} - -puts { -

      Externally, from the point of view of another process or thread, two -or more database connections using a shared-cache appear as a single -connection. The locking protocol used to arbitrate between multiple -shared-caches or regular database users is described elsewhere. -

      - - -
      - - -
      -

      Figure 1

      - -

      Figure 1 depicts an example runtime configuration where three -database connections have been established. Connection 1 is a normal -SQLite database connection. Connections 2 and 3 share a cache (and so must -have been established by the same process thread). The normal locking -protocol is used to serialize database access between connection 1 and -the shared cache. The internal protocol used to serialize (or not, see -"Read-Uncommitted Isolation Mode" below) access to the shared-cache by -connections 2 and 3 is described in the remainder of this section. -

      - -

      There are three levels to the shared-cache locking model, -transaction level locking, table level locking and schema level locking. -They are described in the following three sub-sections.

      - -} - -HEADING 2 {Transaction Level Locking} - -puts { -

      SQLite connections can open two kinds of transactions, read and write -transactions. This is not done explicitly, a transaction is implicitly a -read-transaction until it first writes to a database table, at which point -it becomes a write-transaction. -

      -

      At most one connection to a single shared cache may open a -write transaction at any one time. This may co-exist with any number of read -transactions. -

      -} - -HEADING 2 {Table Level Locking} - -puts { -

      When two or more connections use a shared-cache, locks are used to -serialize concurrent access attempts on a per-table basis. Tables support -two types of locks, "read-locks" and "write-locks". Locks are granted to -connections - at any one time, each database connection has either a -read-lock, write-lock or no lock on each database table. -

      - -

      At any one time, a single table may have any number of active read-locks -or a single active write lock. To read data a table, a connection must -first obtain a read-lock. To write to a table, a connection must obtain a -write-lock on that table. If a required table lock cannot be obtained, -the query fails and SQLITE_LOCKED is returned to the caller. -

      - -

      Once a connection obtains a table lock, it is not released until the -current transaction (read or write) is concluded. -

      -} - -HEADING 3 {Read-Uncommitted Isolation Mode} - -puts { -

      The behaviour described above may be modified slightly by using the -read_uncommitted pragma to change the isolation level from serialized -(the default), to read-uncommitted.

      - -

      A database connection in read-uncommitted mode does not attempt -to obtain read-locks before reading from database tables as described -above. This can lead to inconsistent query results if another database -connection modifies a table while it is being read, but it also means that -a read-transaction opened by a connection in read-uncommitted mode can -neither block nor be blocked by any other connection.

      - -

      Read-uncommitted mode has no effect on the locks required to write to -database tables (i.e. read-uncommitted connections must still obtain -write-locks and hence database writes may still block or be blocked). -Also, read-uncommitted mode has no effect on the sqlite_master -locks required by the rules enumerated below (see section -"Schema (sqlite_master) Level Locking"). -

      - -
      -  /* Set the value of the read-uncommitted flag:
      -  **
      -  **   True  -> Set the connection to read-uncommitted mode.
      -  **   False -> Set the connectino to serialized (the default) mode.
      -  */
      -  PRAGMA read_uncommitted = <boolean>;
      -
      -  /* Retrieve the current value of the read-uncommitted flag */
      -  PRAGMA read_uncommitted;
      -
      -} - -HEADING 2 {Schema (sqlite_master) Level Locking} - -puts { -

      The sqlite_master table supports shared-cache read and write -locks in the same way as all other database tables (see description -above). The following special rules also apply: -

      - -
        -
      • A connection must obtain a read-lock on sqlite_master before -accessing any database tables or obtaining any other read or write locks.
      • -
      • Before executing a statement that modifies the database schema (i.e. -a CREATE or DROP TABLE statement), a connection must obtain a write-lock on -sqlite_master. -
      • -
      • A connection may not compile an SQL statement if any other connection -is holding a write-lock on the sqlite_master table of any attached -database (including the default database, "main"). -
      • -
      -} - -HEADING 1 {Thread Related Issues} - -puts { -

      When shared-cache mode is enabled, a database connection may only be -used by the thread that called sqlite3_open() to create it. If another -thread attempts to use the database connection, in most cases an -SQLITE_MISUSE error is returned. However this is not guaranteed and -programs should not depend on this behaviour, in some cases a segfault -may result. -

      -} - -HEADING 1 {Enabling Shared-Cache Mode} - -puts { -

      Shared-cache mode is enabled on a thread-wide basis. Using the C -interface, the following API can be used to enable or disable shared-cache -mode for the calling thread: -

      - -
      -int sqlite3_enable_shared_cache(int);
      -
      - -

      It is illegal to call sqlite3_enable_shared_cache() if one or more -open database connections were opened by the calling thread. If the argument -is non-zero, shared-cache mode is enabled. If the argument is zero, -shared-cache mode is disabled. The return value is either SQLITE_OK (if the -operation was successful), SQLITE_NOMEM (if a malloc() failed), or -SQLITE_MISUSE (if the thread has open database connections). -

      -} - -footer $rcsid DELETED speed.tcl Index: speed.tcl ================================================================== --- speed.tcl +++ /dev/null @@ -1,495 +0,0 @@ -# -# Run this Tcl script to generate the speed.html file. -# -set rcsid {$Id: speed.tcl,v 1.17 2005/03/12 15:55:11 drh Exp $ } -source common.tcl -header {SQLite Database Speed Comparison} - -puts { -

      Database Speed Comparison

      - - -Note: This document is old. It describes a speed comparison between -an older version of SQLite against archaic versions of MySQL and PostgreSQL. -Readers are invited to contribute more up-to-date speed comparisons -on the SQLite Wiki. -

      -The numbers here are old enough to be nearly meaningless. Until it is -updated, use this document only as proof that SQLite is not a -sluggard. - - -

      Executive Summary

      - -

      A series of tests were run to measure the relative performance of -SQLite 2.7.6, PostgreSQL 7.1.3, and MySQL 3.23.41. -The following are general -conclusions drawn from these experiments: -

      - -
        -
      • - SQLite 2.7.6 is significantly faster (sometimes as much as 10 or - 20 times faster) than the default PostgreSQL 7.1.3 installation - on RedHat 7.2 for most common operations. -

      • -
      • - SQLite 2.7.6 is often faster (sometimes - more than twice as fast) than MySQL 3.23.41 - for most common operations. -

      • -
      • - SQLite does not execute CREATE INDEX or DROP TABLE as fast as - the other databases. But this is not seen as a problem because - those are infrequent operations. -

      • -
      • - SQLite works best if you group multiple operations together into - a single transaction. -

      • -
      - -

      -The results presented here come with the following caveats: -

      - -
        -
      • - These tests did not attempt to measure multi-user performance or - optimization of complex queries involving multiple joins and subqueries. -

      • -
      • - These tests are on a relatively small (approximately 14 megabyte) database. - They do not measure how well the database engines scale to larger problems. -

      • -
      - -

      Test Environment

      - -

      -The platform used for these tests is a 1.6GHz Athlon with 1GB or memory -and an IDE disk drive. The operating system is RedHat Linux 7.2 with -a stock kernel. -

      - -

      -The PostgreSQL and MySQL servers used were as delivered by default on -RedHat 7.2. (PostgreSQL version 7.1.3 and MySQL version 3.23.41.) -No effort was made to tune these engines. Note in particular -the the default MySQL configuration on RedHat 7.2 does not support -transactions. Not having to support transactions gives MySQL a -big speed advantage, but SQLite is still able to hold its own on most -tests. -

      - -

      -I am told that the default PostgreSQL configuration in RedHat 7.3 -is unnecessarily conservative (it is designed to -work on a machine with 8MB of RAM) and that PostgreSQL could -be made to run a lot faster with some knowledgeable configuration -tuning. -Matt Sergeant reports that he has tuned his PostgreSQL installation -and rerun the tests shown below. His results show that -PostgreSQL and MySQL run at about the same speed. For Matt's -results, visit -

      - -
      - -http://www.sergeant.org/sqlite_vs_pgsync.html -
      - -

      -SQLite was tested in the same configuration that it appears -on the website. It was compiled with -O6 optimization and with -the -DNDEBUG=1 switch which disables the many "assert()" statements -in the SQLite code. The -DNDEBUG=1 compiler option roughly doubles -the speed of SQLite. -

      - -

      -All tests are conducted on an otherwise quiescent machine. -A simple Tcl script was used to generate and run all the tests. -A copy of this Tcl script can be found in the SQLite source tree -in the file tools/speedtest.tcl. -

      - -

      -The times reported on all tests represent wall-clock time -in seconds. Two separate time values are reported for SQLite. -The first value is for SQLite in its default configuration with -full disk synchronization turned on. With synchronization turned -on, SQLite executes -an fsync() system call (or the equivalent) at key points -to make certain that critical data has -actually been written to the disk drive surface. Synchronization -is necessary to guarantee the integrity of the database if the -operating system crashes or the computer powers down unexpectedly -in the middle of a database update. The second time reported for SQLite is -when synchronization is turned off. With synchronization off, -SQLite is sometimes much faster, but there is a risk that an -operating system crash or an unexpected power failure could -damage the database. Generally speaking, the synchronous SQLite -times are for comparison against PostgreSQL (which is also -synchronous) and the asynchronous SQLite times are for -comparison against the asynchronous MySQL engine. -

      - -

      Test 1: 1000 INSERTs

      -
      -CREATE TABLE t1(a INTEGER, b INTEGER, c VARCHAR(100));
      -INSERT INTO t1 VALUES(1,13153,'thirteen thousand one hundred fifty three');
      -INSERT INTO t1 VALUES(2,75560,'seventy five thousand five hundred sixty');
      -... 995 lines omitted
      -INSERT INTO t1 VALUES(998,66289,'sixty six thousand two hundred eighty nine');
      -INSERT INTO t1 VALUES(999,24322,'twenty four thousand three hundred twenty two');
      -INSERT INTO t1 VALUES(1000,94142,'ninety four thousand one hundred forty two');
      - -
      - - - - -
      PostgreSQL:   4.373
      MySQL:   0.114
      SQLite 2.7.6:   13.061
      SQLite 2.7.6 (nosync):   0.223
      - -

      -Because it does not have a central server to coordinate access, -SQLite must close and reopen the database file, and thus invalidate -its cache, for each transaction. In this test, each SQL statement -is a separate transaction so the database file must be opened and closed -and the cache must be flushed 1000 times. In spite of this, the asynchronous -version of SQLite is still nearly as fast as MySQL. Notice how much slower -the synchronous version is, however. SQLite calls fsync() after -each synchronous transaction to make sure that all data is safely on -the disk surface before continuing. For most of the 13 seconds in the -synchronous test, SQLite was sitting idle waiting on disk I/O to complete.

      - - -

      Test 2: 25000 INSERTs in a transaction

      -
      -BEGIN;
      -CREATE TABLE t2(a INTEGER, b INTEGER, c VARCHAR(100));
      -INSERT INTO t2 VALUES(1,59672,'fifty nine thousand six hundred seventy two');
      -... 24997 lines omitted
      -INSERT INTO t2 VALUES(24999,89569,'eighty nine thousand five hundred sixty nine');
      -INSERT INTO t2 VALUES(25000,94666,'ninety four thousand six hundred sixty six');
      -COMMIT;
      - -
      - - - - -
      PostgreSQL:   4.900
      MySQL:   2.184
      SQLite 2.7.6:   0.914
      SQLite 2.7.6 (nosync):   0.757
      - -

      -When all the INSERTs are put in a transaction, SQLite no longer has to -close and reopen the database or invalidate its cache between each statement. -It also does not -have to do any fsync()s until the very end. When unshackled in -this way, SQLite is much faster than either PostgreSQL and MySQL. -

      - -

      Test 3: 25000 INSERTs into an indexed table

      -
      -BEGIN;
      -CREATE TABLE t3(a INTEGER, b INTEGER, c VARCHAR(100));
      -CREATE INDEX i3 ON t3(c);
      -... 24998 lines omitted
      -INSERT INTO t3 VALUES(24999,88509,'eighty eight thousand five hundred nine');
      -INSERT INTO t3 VALUES(25000,84791,'eighty four thousand seven hundred ninety one');
      -COMMIT;
      - -
      - - - - -
      PostgreSQL:   8.175
      MySQL:   3.197
      SQLite 2.7.6:   1.555
      SQLite 2.7.6 (nosync):   1.402
      - -

      -There were reports that SQLite did not perform as well on an indexed table. -This test was recently added to disprove those rumors. It is true that -SQLite is not as fast at creating new index entries as the other engines -(see Test 6 below) but its overall speed is still better. -

      - -

      Test 4: 100 SELECTs without an index

      -
      -BEGIN;
      -SELECT count(*), avg(b) FROM t2 WHERE b>=0 AND b<1000;
      -SELECT count(*), avg(b) FROM t2 WHERE b>=100 AND b<1100;
      -... 96 lines omitted
      -SELECT count(*), avg(b) FROM t2 WHERE b>=9800 AND b<10800;
      -SELECT count(*), avg(b) FROM t2 WHERE b>=9900 AND b<10900;
      -COMMIT;
      - -
      - - - - -
      PostgreSQL:   3.629
      MySQL:   2.760
      SQLite 2.7.6:   2.494
      SQLite 2.7.6 (nosync):   2.526
      - - -

      -This test does 100 queries on a 25000 entry table without an index, -thus requiring a full table scan. Prior versions of SQLite used to -be slower than PostgreSQL and MySQL on this test, but recent performance -enhancements have increased its speed so that it is now the fastest -of the group. -

      - -

      Test 5: 100 SELECTs on a string comparison

      -
      -BEGIN;
      -SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%one%';
      -SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%two%';
      -... 96 lines omitted
      -SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ninety nine%';
      -SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%one hundred%';
      -COMMIT;
      - -
      - - - - -
      PostgreSQL:   13.409
      MySQL:   4.640
      SQLite 2.7.6:   3.362
      SQLite 2.7.6 (nosync):   3.372
      - -

      -This test still does 100 full table scans but it uses -uses string comparisons instead of numerical comparisons. -SQLite is over three times faster than PostgreSQL here and about 30% -faster than MySQL. -

      - -

      Test 6: Creating an index

      -
      -CREATE INDEX i2a ON t2(a);
      CREATE INDEX i2b ON t2(b); -
      - - - - -
      PostgreSQL:   0.381
      MySQL:   0.318
      SQLite 2.7.6:   0.777
      SQLite 2.7.6 (nosync):   0.659
      - -

      -SQLite is slower at creating new indices. This is not a huge problem -(since new indices are not created very often) but it is something that -is being worked on. Hopefully, future versions of SQLite will do better -here. -

      - -

      Test 7: 5000 SELECTs with an index

      -
      -SELECT count(*), avg(b) FROM t2 WHERE b>=0 AND b<100;
      -SELECT count(*), avg(b) FROM t2 WHERE b>=100 AND b<200;
      -SELECT count(*), avg(b) FROM t2 WHERE b>=200 AND b<300;
      -... 4994 lines omitted
      -SELECT count(*), avg(b) FROM t2 WHERE b>=499700 AND b<499800;
      -SELECT count(*), avg(b) FROM t2 WHERE b>=499800 AND b<499900;
      -SELECT count(*), avg(b) FROM t2 WHERE b>=499900 AND b<500000;
      - -
      - - - - -
      PostgreSQL:   4.614
      MySQL:   1.270
      SQLite 2.7.6:   1.121
      SQLite 2.7.6 (nosync):   1.162
      - -

      -All three database engines run faster when they have indices to work with. -But SQLite is still the fastest. -

      - -

      Test 8: 1000 UPDATEs without an index

      -
      -BEGIN;
      -UPDATE t1 SET b=b*2 WHERE a>=0 AND a<10;
      -UPDATE t1 SET b=b*2 WHERE a>=10 AND a<20;
      -... 996 lines omitted
      -UPDATE t1 SET b=b*2 WHERE a>=9980 AND a<9990;
      -UPDATE t1 SET b=b*2 WHERE a>=9990 AND a<10000;
      -COMMIT;
      - -
      - - - - -
      PostgreSQL:   1.739
      MySQL:   8.410
      SQLite 2.7.6:   0.637
      SQLite 2.7.6 (nosync):   0.638
      - -

      -For this particular UPDATE test, MySQL is consistently -five or ten times -slower than PostgreSQL and SQLite. I do not know why. MySQL is -normally a very fast engine. Perhaps this problem has been addressed -in later versions of MySQL. -

      - -

      Test 9: 25000 UPDATEs with an index

      -
      -BEGIN;
      -UPDATE t2 SET b=468026 WHERE a=1;
      -UPDATE t2 SET b=121928 WHERE a=2;
      -... 24996 lines omitted
      -UPDATE t2 SET b=35065 WHERE a=24999;
      -UPDATE t2 SET b=347393 WHERE a=25000;
      -COMMIT;
      - -
      - - - - -
      PostgreSQL:   18.797
      MySQL:   8.134
      SQLite 2.7.6:   3.520
      SQLite 2.7.6 (nosync):   3.104
      - -

      -As recently as version 2.7.0, SQLite ran at about the same speed as -MySQL on this test. But recent optimizations to SQLite have more -than doubled speed of UPDATEs. -

      - -

      Test 10: 25000 text UPDATEs with an index

      -
      -BEGIN;
      -UPDATE t2 SET c='one hundred forty eight thousand three hundred eighty two' WHERE a=1;
      -UPDATE t2 SET c='three hundred sixty six thousand five hundred two' WHERE a=2;
      -... 24996 lines omitted
      -UPDATE t2 SET c='three hundred eighty three thousand ninety nine' WHERE a=24999;
      -UPDATE t2 SET c='two hundred fifty six thousand eight hundred thirty' WHERE a=25000;
      -COMMIT;
      - -
      - - - - -
      PostgreSQL:   48.133
      MySQL:   6.982
      SQLite 2.7.6:   2.408
      SQLite 2.7.6 (nosync):   1.725
      - -

      -Here again, version 2.7.0 of SQLite used to run at about the same speed -as MySQL. But now version 2.7.6 is over two times faster than MySQL and -over twenty times faster than PostgreSQL. -

      - -

      -In fairness to PostgreSQL, it started thrashing on this test. A -knowledgeable administrator might be able to get PostgreSQL to run a lot -faster here by tweaking and tuning the server a little. -

      - -

      Test 11: INSERTs from a SELECT

      -
      -BEGIN;
      INSERT INTO t1 SELECT b,a,c FROM t2;
      INSERT INTO t2 SELECT b,a,c FROM t1;
      COMMIT; -
      - - - - -
      PostgreSQL:   61.364
      MySQL:   1.537
      SQLite 2.7.6:   2.787
      SQLite 2.7.6 (nosync):   1.599
      - -

      -The asynchronous SQLite is just a shade slower than MySQL on this test. -(MySQL seems to be especially adept at INSERT...SELECT statements.) -The PostgreSQL engine is still thrashing - most of the 61 seconds it used -were spent waiting on disk I/O. -

      - -

      Test 12: DELETE without an index

      -
      -DELETE FROM t2 WHERE c LIKE '%fifty%'; -
      - - - - -
      PostgreSQL:   1.509
      MySQL:   0.975
      SQLite 2.7.6:   4.004
      SQLite 2.7.6 (nosync):   0.560
      - -

      -The synchronous version of SQLite is the slowest of the group in this test, -but the asynchronous version is the fastest. -The difference is the extra time needed to execute fsync(). -

      - -

      Test 13: DELETE with an index

      -
      -DELETE FROM t2 WHERE a>10 AND a<20000; -
      - - - - -
      PostgreSQL:   1.316
      MySQL:   2.262
      SQLite 2.7.6:   2.068
      SQLite 2.7.6 (nosync):   0.752
      - -

      -This test is significant because it is one of the few where -PostgreSQL is faster than MySQL. The asynchronous SQLite is, -however, faster then both the other two. -

      - -

      Test 14: A big INSERT after a big DELETE

      -
      -INSERT INTO t2 SELECT * FROM t1; -
      - - - - -
      PostgreSQL:   13.168
      MySQL:   1.815
      SQLite 2.7.6:   3.210
      SQLite 2.7.6 (nosync):   1.485
      - -

      -Some older versions of SQLite (prior to version 2.4.0) -would show decreasing performance after a -sequence of DELETEs followed by new INSERTs. As this test shows, the -problem has now been resolved. -

      - -

      Test 15: A big DELETE followed by many small INSERTs

      -
      -BEGIN;
      -DELETE FROM t1;
      -INSERT INTO t1 VALUES(1,10719,'ten thousand seven hundred nineteen');
      -... 11997 lines omitted
      -INSERT INTO t1 VALUES(11999,72836,'seventy two thousand eight hundred thirty six');
      -INSERT INTO t1 VALUES(12000,64231,'sixty four thousand two hundred thirty one');
      -COMMIT;
      - -
      - - - - -
      PostgreSQL:   4.556
      MySQL:   1.704
      SQLite 2.7.6:   0.618
      SQLite 2.7.6 (nosync):   0.406
      - -

      -SQLite is very good at doing INSERTs within a transaction, which probably -explains why it is so much faster than the other databases at this test. -

      - -

      Test 16: DROP TABLE

      -
      -DROP TABLE t1;
      DROP TABLE t2;
      DROP TABLE t3; -
      - - - - -
      PostgreSQL:   0.135
      MySQL:   0.015
      SQLite 2.7.6:   0.939
      SQLite 2.7.6 (nosync):   0.254
      - -

      -SQLite is slower than the other databases when it comes to dropping tables. -This probably is because when SQLite drops a table, it has to go through and -erase the records in the database file that deal with that table. MySQL and -PostgreSQL, on the other hand, use separate files to represent each table -so they can drop a table simply by deleting a file, which is much faster. -

      - -

      -On the other hand, dropping tables is not a very common operation -so if SQLite takes a little longer, that is not seen as a big problem. -

      - -} -footer $rcsid DELETED sqlite.tcl Index: sqlite.tcl ================================================================== --- sqlite.tcl +++ /dev/null @@ -1,582 +0,0 @@ -# -# Run this Tcl script to generate the sqlite.html file. -# -set rcsid {$Id: sqlite.tcl,v 1.25 2007/01/08 14:31:36 drh Exp $} -source common.tcl -header {sqlite3: A command-line access program for SQLite databases} -puts { -

      sqlite3: A command-line access program for SQLite databases

      - -

      The SQLite library includes a simple command-line utility named -sqlite3 that allows the user to manually enter and execute SQL -commands against an SQLite database. This document provides a brief -introduction on how to use sqlite3. - -

      Getting Started

      - -

      To start the sqlite3 program, just type "sqlite3" followed by -the name the file that holds the SQLite database. If the file does -not exist, a new one is created automatically. -The sqlite3 program will -then prompt you to enter SQL. Type in SQL statements (terminated by a -semicolon), press "Enter" and the SQL will be executed.

      - -

      For example, to create a new SQLite database named "ex1" -with a single table named "tbl1", you might do this:

      -} - -proc Code {body} { - puts {
      } - regsub -all {&} [string trim $body] {\&} body - regsub -all {>} $body {\>} body - regsub -all {<} $body {\<} body - regsub -all {\(\(\(} $body {} body - regsub -all {\)\)\)} $body {} body - regsub -all { } $body {\ } body - regsub -all \n $body
      \n body - puts $body - puts {
      } -} - -Code { -$ (((sqlite3 ex1))) -SQLite version 3.3.10 -Enter ".help" for instructions -sqlite> (((create table tbl1(one varchar(10), two smallint);))) -sqlite> (((insert into tbl1 values('hello!',10);))) -sqlite> (((insert into tbl1 values('goodbye', 20);))) -sqlite> (((select * from tbl1;))) -hello!|10 -goodbye|20 -sqlite> -} - -puts { -

      You can terminate the sqlite3 program by typing your systems -End-Of-File character (usually a Control-D) or the interrupt -character (usually a Control-C).

      - -

      Make sure you type a semicolon at the end of each SQL command! -The sqlite3 program looks for a semicolon to know when your SQL command is -complete. If you omit the semicolon, sqlite3 will give you a -continuation prompt and wait for you to enter more text to be -added to the current SQL command. This feature allows you to -enter SQL commands that span multiple lines. For example:

      -} - -Code { -sqlite> (((CREATE TABLE tbl2 ())) - ...> ((( f1 varchar(30) primary key,))) - ...> ((( f2 text,))) - ...> ((( f3 real))) - ...> ((();))) -sqlite> -} - -puts { - -

      Aside: Querying the SQLITE_MASTER table

      - -

      The database schema in an SQLite database is stored in -a special table named "sqlite_master". -You can execute "SELECT" statements against the -special sqlite_master table just like any other table -in an SQLite database. For example:

      -} - -Code { -$ (((sqlite3 ex1))) -SQlite vresion 3.3.10 -Enter ".help" for instructions -sqlite> (((select * from sqlite_master;))) - type = table - name = tbl1 -tbl_name = tbl1 -rootpage = 3 - sql = create table tbl1(one varchar(10), two smallint) -sqlite> -} - -puts { -

      -But you cannot execute DROP TABLE, UPDATE, INSERT or DELETE against -the sqlite_master table. The sqlite_master -table is updated automatically as you create or drop tables and -indices from the database. You can not make manual changes -to the sqlite_master table. -

      - -

      -The schema for TEMPORARY tables is not stored in the "sqlite_master" table -since TEMPORARY tables are not visible to applications other than the -application that created the table. The schema for TEMPORARY tables -is stored in another special table named "sqlite_temp_master". The -"sqlite_temp_master" table is temporary itself. -

      - -

      Special commands to sqlite3

      - -

      -Most of the time, sqlite3 just reads lines of input and passes them -on to the SQLite library for execution. -But if an input line begins with a dot ("."), then -that line is intercepted and interpreted by the sqlite3 program itself. -These "dot commands" are typically used to change the output format -of queries, or to execute certain prepackaged query statements. -

      - -

      -For a listing of the available dot commands, you can enter ".help" -at any time. For example: -

      } - -Code { -sqlite> (((.help))) -.bail ON|OFF Stop after hitting an error. Default OFF -.databases List names and files of attached databases -.dump ?TABLE? ... Dump the database in an SQL text format -.echo ON|OFF Turn command echo on or off -.exit Exit this program -.explain ON|OFF Turn output mode suitable for EXPLAIN on or off. -.header(s) ON|OFF Turn display of headers on or off -.help Show this message -.import FILE TABLE Import data from FILE into TABLE -.indices TABLE Show names of all indices on TABLE -.load FILE ?ENTRY? Load an extension library -.mode MODE ?TABLE? Set output mode where MODE is one of: - csv Comma-separated values - column Left-aligned columns. (See .width) - html HTML code - insert SQL insert statements for TABLE - line One value per line - list Values delimited by .separator string - tabs Tab-separated values - tcl TCL list elements -.nullvalue STRING Print STRING in place of NULL values -.output FILENAME Send output to FILENAME -.output stdout Send output to the screen -.prompt MAIN CONTINUE Replace the standard prompts -.quit Exit this program -.read FILENAME Execute SQL in FILENAME -.schema ?TABLE? Show the CREATE statements -.separator STRING Change separator used by output mode and .import -.show Show the current values for various settings -.tables ?PATTERN? List names of tables matching a LIKE pattern -.timeout MS Try opening locked tables for MS milliseconds -.width NUM NUM ... Set column widths for "column" mode -sqlite> -} - -puts { -

      Changing Output Formats

      - -

      The sqlite3 program is able to show the results of a query -in eight different formats: "csv", "column", "html", "insert", -"line", "tabs", and "tcl". -You can use the ".mode" dot command to switch between these output -formats.

      - -

      The default output mode is "list". In -list mode, each record of a query result is written on one line of -output and each column within that record is separated by a specific -separator string. The default separator is a pipe symbol ("|"). -List mode is especially useful when you are going to send the output -of a query to another program (such as AWK) for additional processing.

      } - -Code { -sqlite> (((.mode list))) -sqlite> (((select * from tbl1;))) -hello|10 -goodbye|20 -sqlite> -} - -puts { -

      You can use the ".separator" dot command to change the separator -for list mode. For example, to change the separator to a comma and -a space, you could do this:

      } - -Code { -sqlite> (((.separator ", "))) -sqlite> (((select * from tbl1;))) -hello, 10 -goodbye, 20 -sqlite> -} - -puts { -

      In "line" mode, each column in a row of the database -is shown on a line by itself. Each line consists of the column -name, an equal sign and the column data. Successive records are -separated by a blank line. Here is an example of line mode -output:

      } - -Code { -sqlite> (((.mode line))) -sqlite> (((select * from tbl1;))) -one = hello -two = 10 - -one = goodbye -two = 20 -sqlite> -} - -puts { -

      In column mode, each record is shown on a separate line with the -data aligned in columns. For example:

      } - -Code { -sqlite> (((.mode column))) -sqlite> (((select * from tbl1;))) -one two ----------- ---------- -hello 10 -goodbye 20 -sqlite> -} - -puts { -

      By default, each column is at least 10 characters wide. -Data that is too wide to fit in a column is truncated. You can -adjust the column widths using the ".width" command. Like this:

      } - -Code { -sqlite> (((.width 12 6))) -sqlite> (((select * from tbl1;))) -one two ------------- ------ -hello 10 -goodbye 20 -sqlite> -} - -puts { -

      The ".width" command in the example above sets the width of the first -column to 12 and the width of the second column to 6. All other column -widths were unaltered. You can gives as many arguments to ".width" as -necessary to specify the widths of as many columns as are in your -query results.

      - -

      If you specify a column a width of 0, then the column -width is automatically adjusted to be the maximum of three -numbers: 10, the width of the header, and the width of the -first row of data. This makes the column width self-adjusting. -The default width setting for every column is this -auto-adjusting 0 value.

      - -

      The column labels that appear on the first two lines of output -can be turned on and off using the ".header" dot command. In the -examples above, the column labels are on. To turn them off you -could do this:

      } - -Code { -sqlite> (((.header off))) -sqlite> (((select * from tbl1;))) -hello 10 -goodbye 20 -sqlite> -} - -puts { -

      Another useful output mode is "insert". In insert mode, the output -is formatted to look like SQL INSERT statements. You can use insert -mode to generate text that can later be used to input data into a -different database.

      - -

      When specifying insert mode, you have to give an extra argument -which is the name of the table to be inserted into. For example:

      -} - -Code { -sqlite> (((.mode insert new_table))) -sqlite> (((select * from tbl1;))) -INSERT INTO 'new_table' VALUES('hello',10); -INSERT INTO 'new_table' VALUES('goodbye',20); -sqlite> -} - -puts { -

      The last output mode is "html". In this mode, sqlite3 writes -the results of the query as an XHTML table. The beginning -<TABLE> and the ending </TABLE> are not written, but -all of the intervening <TR>s, <TH>s, and <TD>s -are. The html output mode is envisioned as being useful for -CGI.

      -} - -puts { -

      Writing results to a file

      - -

      By default, sqlite3 sends query results to standard output. You -can change this using the ".output" command. Just put the name of -an output file as an argument to the .output command and all subsequent -query results will be written to that file. Use ".output stdout" to -begin writing to standard output again. For example:

      } - -Code { -sqlite> (((.mode list))) -sqlite> (((.separator |))) -sqlite> (((.output test_file_1.txt))) -sqlite> (((select * from tbl1;))) -sqlite> (((.exit))) -$ (((cat test_file_1.txt))) -hello|10 -goodbye|20 -$ -} - -puts { -

      Querying the database schema

      - -

      The sqlite3 program provides several convenience commands that -are useful for looking at the schema of the database. There is -nothing that these commands do that cannot be done by some other -means. These commands are provided purely as a shortcut.

      - -

      For example, to see a list of the tables in the database, you -can enter ".tables".

      -} - -Code { -sqlite> (((.tables))) -tbl1 -tbl2 -sqlite> -} - -puts { -

      The ".tables" command is similar to setting list mode then -executing the following query:

      - -
      -SELECT name FROM sqlite_master 
      -WHERE type IN ('table','view') AND name NOT LIKE 'sqlite_%'
      -UNION ALL 
      -SELECT name FROM sqlite_temp_master 
      -WHERE type IN ('table','view') 
      -ORDER BY 1
      -
      - -

      In fact, if you look at the source code to the sqlite3 program -(found in the source tree in the file src/shell.c) you'll find -exactly the above query.

      - -

      The ".indices" command works in a similar way to list all of -the indices for a particular table. The ".indices" command takes -a single argument which is the name of the table for which the -indices are desired. Last, but not least, is the ".schema" command. -With no arguments, the ".schema" command shows the original CREATE TABLE -and CREATE INDEX statements that were used to build the current database. -If you give the name of a table to ".schema", it shows the original -CREATE statement used to make that table and all if its indices. -We have:

      } - -Code { -sqlite> (((.schema))) -create table tbl1(one varchar(10), two smallint) -CREATE TABLE tbl2 ( - f1 varchar(30) primary key, - f2 text, - f3 real -) -sqlite> (((.schema tbl2))) -CREATE TABLE tbl2 ( - f1 varchar(30) primary key, - f2 text, - f3 real -) -sqlite> -} - -puts { -

      The ".schema" command accomplishes the same thing as setting -list mode, then entering the following query:

      - -
      -SELECT sql FROM 
      -   (SELECT * FROM sqlite_master UNION ALL
      -    SELECT * FROM sqlite_temp_master)
      -WHERE type!='meta'
      -ORDER BY tbl_name, type DESC, name
      -
      - -

      Or, if you give an argument to ".schema" because you only -want the schema for a single table, the query looks like this:

      - -
      -SELECT sql FROM
      -   (SELECT * FROM sqlite_master UNION ALL
      -    SELECT * FROM sqlite_temp_master)
      -WHERE type!='meta' AND sql NOT NULL AND name NOT LIKE 'sqlite_%'
      -ORDER BY substr(type,2,1), name
      -
      - -

      -You can supply an argument to the .schema command. If you do, the -query looks like this: -

      - -
      -SELECT sql FROM
      -   (SELECT * FROM sqlite_master UNION ALL
      -    SELECT * FROM sqlite_temp_master)
      -WHERE tbl_name LIKE '%s'
      -  AND type!='meta' AND sql NOT NULL AND name NOT LIKE 'sqlite_%'
      -ORDER BY substr(type,2,1), name
      -
      - -

      The "%s" in the query is replace by your argument. This allows you -to view the schema for some subset of the database.

      -} - -Code { -sqlite> (((.schema %abc%))) -} - -puts { -

      -Along these same lines, -the ".table" command also accepts a pattern as its first argument. -If you give an argument to the .table command, a "%" is both -appended and prepended and a LIKE clause is added to the query. -This allows you to list only those tables that match a particular -pattern.

      - -

      The ".databases" command shows a list of all databases open in -the current connection. There will always be at least 2. The first -one is "main", the original database opened. The second is "temp", -the database used for temporary tables. There may be additional -databases listed for databases attached using the ATTACH statement. -The first output column is the name the database is attached with, -and the second column is the filename of the external file.

      } - -Code { -sqlite> (((.databases))) -} - -puts { -

      Converting An Entire Database To An ASCII Text File

      - -

      Use the ".dump" command to convert the entire contents of a -database into a single ASCII text file. This file can be converted -back into a database by piping it back into sqlite3.

      - -

      A good way to make an archival copy of a database is this:

      -} - -Code { -$ (((echo '.dump' | sqlite3 ex1 | gzip -c >ex1.dump.gz))) -} - -puts { -

      This generates a file named ex1.dump.gz that contains everything -you need to reconstruct the database at a later time, or on another -machine. To reconstruct the database, just type:

      -} - -Code { -$ (((zcat ex1.dump.gz | sqlite3 ex2))) -} - -puts { -

      The text format is pure SQL so you -can also use the .dump command to export an SQLite database -into other popular SQL database engines. Like this:

      -} - -Code { -$ (((createdb ex2))) -$ (((sqlite3 ex1 .dump | psql ex2))) -} - -puts { -

      Other Dot Commands

      - -

      The ".explain" dot command can be used to set the output mode -to "column" and to set the column widths to values that are reasonable -for looking at the output of an EXPLAIN command. The EXPLAIN command -is an SQLite-specific SQL extension that is useful for debugging. If any -regular SQL is prefaced by EXPLAIN, then the SQL command is parsed and -analyzed but is not executed. Instead, the sequence of virtual machine -instructions that would have been used to execute the SQL command are -returned like a query result. For example:

      } - -Code { -sqlite> (((.explain))) -sqlite> (((explain delete from tbl1 where two<20;))) -addr opcode p1 p2 p3 ----- ------------ ----- ----- ------------------------------------- -0 ListOpen 0 0 -1 Open 0 1 tbl1 -2 Next 0 9 -3 Field 0 1 -4 Integer 20 0 -5 Ge 0 2 -6 Key 0 0 -7 ListWrite 0 0 -8 Goto 0 2 -9 Noop 0 0 -10 ListRewind 0 0 -11 ListRead 0 14 -12 Delete 0 0 -13 Goto 0 11 -14 ListClose 0 0 -} - -puts { - -

      The ".timeout" command sets the amount of time that the sqlite3 -program will wait for locks to clear on files it is trying to access -before returning an error. The default value of the timeout is zero so -that an error is returned immediately if any needed database table or -index is locked.

      - -

      And finally, we mention the ".exit" command which causes the -sqlite3 program to exit.

      - -

      Using sqlite3 in a shell script

      - -

      -One way to use sqlite3 in a shell script is to use "echo" or -"cat" to generate a sequence of commands in a file, then invoke sqlite3 -while redirecting input from the generated command file. This -works fine and is appropriate in many circumstances. But as -an added convenience, sqlite3 allows a single SQL command to be -entered on the command line as a second argument after the -database name. When the sqlite3 program is launched with two -arguments, the second argument is passed to the SQLite library -for processing, the query results are printed on standard output -in list mode, and the program exits. This mechanism is designed -to make sqlite3 easy to use in conjunction with programs like -"awk". For example:

      } - -Code { -$ (((sqlite3 ex1 'select * from tbl1' |))) -> ((( awk '{printf "
      %s%s\n",$1,$2 }'))) -
      hello10 -
      goodbye20 -$ -} - -puts { -

      Ending shell commands

      - -

      -SQLite commands are normally terminated by a semicolon. In the shell -you can also use the word "GO" (case-insensitive) or a slash character -"/" on a line by itself to end a command. These are used by SQL Server -and Oracle, respectively. These won't work in sqlite3_exec(), -because the shell translates these into a semicolon before passing them -to that function.

      -} - -puts { -

      Compiling the sqlite3 program from sources

      - -

      -The sqlite3 program is built automatically when you compile the -SQLite library. Just get a copy of the source tree, run -"configure" and then "make".

      -} -footer $rcsid DELETED support.tcl Index: support.tcl ================================================================== --- support.tcl +++ /dev/null @@ -1,79 +0,0 @@ -set rcsid {$Id: support.tcl,v 1.7 2007/06/21 13:30:40 drh Exp $} -source common.tcl -header {SQLite Support Options} -puts { -

      SQLite Support Options

      - - -

      Mailing List

      -

      -A mailing list has been set up for asking questions and -for open discussion of problems -and issues by the SQLite user community. -To subscribe to the mailing list, send an email to - -sqlite-users-subscribe@sqlite.org. -If you would prefer to get digests rather than individual -emails, send a message to to - -sqlite-users-digest-subscribe@sqlite.org. -For additional information about operating and using this -mailing list, send a message to - -sqlite-users-help@sqlite.org and instructions will be -sent by to you by return email. -

      - -

      -There are multiple archives of the mailing list: -

      - -
      - -http://www.mail-archive.com/sqlite-users%40sqlite.org
      - -http://marc.info/?l=sqlite-users&r=1&w=2
      - -http://news.gmane.org/gmane.comp.db.sqlite.general -
      - -

      - - -

      Direct E-Mail To The Author

      - -

      -Use the mailing list. -Please do not send email directly to the author of SQLite -unless: -

        -
      • You have or intend to acquire a professional support contract -as described below, or
      • -
      • You are working on an open source project.
      • -
      -You are welcomed to use SQLite in closed source, proprietary, and/or -commerical projects and to ask questions about such use on the public -mailing list. But please do not ask to receive free direct technical -support. The software is free; direct technical support is not. -

      - - -

      Professional Support

      - -

      -If you would like professional support for SQLite -or if you want custom modifications to SQLite performed by the -original author, these services are available for a modest fee. -For additional information visit - -http://www.hwaci.com/sw/sqlite/prosupport.html or contact:

      - -
      -D. Richard Hipp
      -Hwaci - Applied Software Research
      -704.948.4565
      -drh@hwaci.com -
      - -} -footer $rcsid DELETED table-ex1b2.gif Index: table-ex1b2.gif ================================================================== --- table-ex1b2.gif +++ /dev/null cannot compute difference between binary files DELETED tclsqlite.tcl Index: tclsqlite.tcl ================================================================== --- tclsqlite.tcl +++ /dev/null @@ -1,666 +0,0 @@ -# -# Run this Tcl script to generate the tclsqlite.html file. -# -set rcsid {$Id: tclsqlite.tcl,v 1.17 2007/06/19 17:48:57 drh Exp $} -source common.tcl -header {The Tcl interface to the SQLite library} -proc METHOD {name text} { - puts "\n

      The \"$name\" method

      \n" - puts $text -} -puts { -

      The Tcl interface to the SQLite library

      - -

      The SQLite library is designed to be very easy to use from -a Tcl or Tcl/Tk script. This document gives an overview of the Tcl -programming interface.

      - -

      The API

      - -

      The interface to the SQLite library consists of single -tcl command named sqlite3 -Because there is only this -one command, the interface is not placed in a separate -namespace.

      - -

      The sqlite3 command is used as follows:

      - -
      -sqlite3  dbcmd  database-name -
      - -

      -The sqlite3 command opens the database named in the second -argument. If the database does not already exist, it is -automatically created. -The sqlite3 command also creates a new Tcl -command to control the database. The name of the new Tcl command -is given by the first argument. This approach is similar to the -way widgets are created in Tk. -

      - -

      -The name of the database is just the name of a disk file in which -the database is stored. If the name of the database is an empty -string or the special name ":memory:" then a new database is created -in memory. -

      - -

      -Once an SQLite database is open, it can be controlled using -methods of the dbcmd. There are currently 22 methods -defined.

      - -

      -

      -

      - -

      The use of each of these methods will be explained in the sequel, though -not in the order shown above.

      - -} - -############################################################################## -METHOD eval { -

      -The most useful dbcmd method is "eval". The eval method is used -to execute SQL on the database. The syntax of the eval method looks -like this:

      - -
      -dbcmd  eval  sql -    ?array-name ? ?script? -
      - -

      -The job of the eval method is to execute the SQL statement or statements -given in the second argument. For example, to create a new table in -a database, you can do this:

      - -
      -sqlite3 db1 ./testdb
      -db1 eval {CREATE TABLE t1(a int, b text)}
      -
      - -

      The above code creates a new table named t1 with columns -a and b. What could be simpler?

      - -

      Query results are returned as a list of column values. If a -query requests 2 columns and there are 3 rows matching the query, -then the returned list will contain 6 elements. For example:

      - -
      -db1 eval {INSERT INTO t1 VALUES(1,'hello')}
      -db1 eval {INSERT INTO t1 VALUES(2,'goodbye')}
      -db1 eval {INSERT INTO t1 VALUES(3,'howdy!')}
      -set x [db1 eval {SELECT * FROM t1 ORDER BY a}]
      -
      - -

      The variable $x is set by the above code to

      - -
      -1 hello 2 goodbye 3 howdy! -
      - -

      You can also process the results of a query one row at a time -by specifying the name of an array variable and a script following -the SQL code. For each row of the query result, the values of all -columns will be inserted into the array variable and the script will -be executed. For instance:

      - -
      -db1 eval {SELECT * FROM t1 ORDER BY a} values {
      -    parray values
      -    puts ""
      -}
      -
      - -

      This last code will give the following output:

      - -
      -values(*) = a b
      -values(a) = 1
      -values(b) = hello

      - -values(*) = a b
      -values(a) = 2
      -values(b) = goodbye

      - -values(*) = a b
      -values(a) = 3
      -values(b) = howdy!
      -

      - -

      -For each column in a row of the result, the name of that column -is used as an index in to array. The value of the column is stored -in the corresponding array entry. The special array index * is -used to store a list of column names in the order that they appear. -

      - -

      -If the array variable name is omitted or is the empty string, then the value of -each column is stored in a variable with the same name as the column -itself. For example: -

      - -
      -db1 eval {SELECT * FROM t1 ORDER BY a} {
      -    puts "a=$a b=$b"
      -}
      -
      - -

      -From this we get the following output -

      - -
      -a=1 b=hello
      -a=2 b=goodbye
      -a=3 b=howdy!
      -
      - -

      -Tcl variable names can appear in the SQL statement of the second argument -in any position where it is legal to put a string or number literal. The -value of the variable is substituted for the variable name. If the -variable does not exist a NULL values is used. For example: -

      - -
      -db1 eval {INSERT INTO t1 VALUES(5,$bigstring)} -
      - -

      -Note that it is not necessary to quote the $bigstring value. That happens -automatically. If $bigstring is a large string or binary object, this -technique is not only easier to write, it is also much more efficient -since it avoids making a copy of the content of $bigstring. -

      - -

      -If the $bigstring variable has both a string and a "bytearray" representation, -then TCL inserts the value as a string. If it has only a "bytearray" -representation, then the value is inserted as a BLOB. To force a -value to be inserted as a BLOB even if it also has a text representation, -us a "@" character to in place of the "$". Like this: -

      - -
      -db1 eval {INSERT INTO t1 VALUES(5,@bigstring)} -
      - -

      -If the variable does not have a bytearray representation, then "@" works -just like "$". -

      - -} - -############################################################################## -METHOD close { - -

      -As its name suggests, the "close" method to an SQLite database just -closes the database. This has the side-effect of deleting the -dbcmd Tcl command. Here is an example of opening and then -immediately closing a database: -

      - -
      -sqlite3 db1 ./testdb
      -db1 close
      -
      - -

      -If you delete the dbcmd directly, that has the same effect -as invoking the "close" method. So the following code is equivalent -to the previous:

      - -
      -sqlite3 db1 ./testdb
      -rename db1 {}
      -
      -} - -############################################################################## -METHOD transaction { - -

      -The "transaction" method is used to execute a TCL script inside an SQLite -database transaction. The transaction is committed when the script completes, -or it rolls back if the script fails. If the transaction occurs within -another transaction (even one that is started manually using BEGIN) it -is a no-op. -

      - -

      -The transaction command can be used to group together several SQLite -commands in a safe way. You can always start transactions manually using -BEGIN, of -course. But if an error occurs so that the COMMIT or ROLLBACK are never -run, then the database will remain locked indefinitely. Also, BEGIN -does not nest, so you have to make sure no other transactions are active -before starting a new one. The "transaction" method takes care of -all of these details automatically. -

      - -

      -The syntax looks like this: -

      - -
      -dbcmd  transaction  ?transaction-type? -  SCRIPT, -
      - - -

      -The transaction-type can be one of deferred, -exclusive or immediate. The default is deferred. -

      -} - -############################################################################## -METHOD cache { - -

      -The "eval" method described above keeps a cache of -prepared statements -for recently evaluated SQL commands. -The "cache" method is used to control this cache. -The first form of this command is:

      - -
      -dbcmd  cache size  N -
      - -

      This sets the maximum number of statements that can be cached. -The upper limit is 100. The default is 10. If you set the cache size -to 0, no caching is done.

      - -

      The second form of the command is this:

      - - -
      -dbcmd  cache flush -
      - -

      The cache-flush method -finalizes -all prepared statements currently -in the cache.

      - -} - -############################################################################## -METHOD complete { - -

      -The "complete" method takes a string of supposed SQL as its only argument. -It returns TRUE if the string is a complete statement of SQL and FALSE if -there is more to be entered.

      - -

      The "complete" method is useful when building interactive applications -in order to know when the user has finished entering a line of SQL code. -This is really just an interface to the -sqlite3_complete() C -function. -} - -############################################################################## -METHOD copy { - -

      -The "copy" method copies data from a file into a table. -It returns the number of rows processed successfully from the file. -The syntax of the copy method looks like this:

      - -
      -dbcmd  copy  conflict-algorithm -  table-name   file-name  -    ?column-separator ? -  ?null-indicator? -
      - -

      Conflict-alogrithm must be one of the SQLite conflict algorithms for -the INSERT statement: rollback, abort, -fail,ignore, or replace. See the SQLite Language -section for ON CONFLICT for more information. -The conflict-algorithm must be specified in lower case. -

      - -

      Table-name must already exists as a table. File-name must exist, and -each row must contain the same number of columns as defined in the table. -If a line in the file contains more or less than the number of columns defined, -the copy method rollbacks any inserts, and returns an error.

      - -

      Column-separator is an optional column separator string. The default is -the ASCII tab character \t.

      - -

      Null-indicator is an optional string that indicates a column value is null. -The default is an empty string. Note that column-separator and -null-indicator are optional positional arguments; if null-indicator -is specified, a column-separator argument must be specifed and -precede the null-indicator argument.

      - -

      The copy method implements similar functionality to the .import -SQLite shell command. -The SQLite 2.x COPY statement -(using the PostgreSQL COPY file format) -can be implemented with this method as:

      - -
      -dbcmd  copy  $conflictalgo -  $tablename   $filename  -    \t  -  \\N -
      - -} - -############################################################################## -METHOD timeout { - -

      The "timeout" method is used to control how long the SQLite library -will wait for locks to clear before giving up on a database transaction. -The default timeout is 0 millisecond. (In other words, the default behavior -is not to wait at all.)

      - -

      The SQLite database allows multiple simultaneous -readers or a single writer but not both. If any process is writing to -the database no other process is allows to read or write. If any process -is reading the database other processes are allowed to read but not write. -The entire database shared a single lock.

      - -

      When SQLite tries to open a database and finds that it is locked, it -can optionally delay for a short while and try to open the file again. -This process repeats until the query times out and SQLite returns a -failure. The timeout is adjustable. It is set to 0 by default so that -if the database is locked, the SQL statement fails immediately. But you -can use the "timeout" method to change the timeout value to a positive -number. For example:

      - -
      db1 timeout 2000
      - -

      The argument to the timeout method is the maximum number of milliseconds -to wait for the lock to clear. So in the example above, the maximum delay -would be 2 seconds.

      -} - -############################################################################## -METHOD busy { - -

      The "busy" method, like "timeout", only comes into play when the -database is locked. But the "busy" method gives the programmer much more -control over what action to take. The "busy" method specifies a callback -Tcl procedure that is invoked whenever SQLite tries to open a locked -database. This callback can do whatever is desired. Presumably, the -callback will do some other useful work for a short while (such as service -GUI events) then return -so that the lock can be tried again. The callback procedure should -return "0" if it wants SQLite to try again to open the database and -should return "1" if it wants SQLite to abandon the current operation. -} - -############################################################################## -METHOD exists { - -

      The "exists" method is similar to "onecolumn" and "eval" in that -it executes SQL statements. The difference is that the "exists" method -always returns a boolean value which is TRUE if a query in the SQL -statement it executes returns one or more rows and FALSE if the SQL -returns an empty set.

      - -

      The "exists" method is often used to test for the existance of -rows in a table. For example:

      - -
      -if {[db exists {SELECT 1 FROM table1 WHERE user=$user}]} {
      -   # Processing if $user exists
      -} else {
      -   # Processing if $user does not exist
      -} -
      -} - - -############################################################################## -METHOD last_insert_rowid { - -

      The "last_insert_rowid" method returns an integer which is the ROWID -of the most recently inserted database row.

      -} - -############################################################################## -METHOD function { - -

      The "function" method registers new SQL functions with the SQLite engine. -The arguments are the name of the new SQL function and a TCL command that -implements that function. Arguments to the function are appended to the -TCL command before it is invoked.

      - -

      -The following example creates a new SQL function named "hex" that converts -its numeric argument in to a hexadecimal encoded string: -

      - -
      -db function hex {format 0x%X} -
      - -} - -############################################################################## -METHOD nullvalue { - -

      -The "nullvalue" method changes the representation for NULL returned -as result of the "eval" method.

      - -
      -db1 nullvalue NULL -
      - -

      The "nullvalue" method is useful to differ between NULL and empty -column values as Tcl lacks a NULL representation. The default -representation for NULL values is an empty string.

      -} - - - -############################################################################## -METHOD onecolumn { - -

      The "onecolumn" method works like -"eval" in that it evaluates the -SQL query statement given as its argument. The difference is that -"onecolumn" returns a single element which is the first column of the -first row of the query result.

      - -

      This is a convenience method. It saves the user from having to -do a "[lindex ... 0]" on the results of an "eval" -in order to extract a single column result.

      -} - -############################################################################## -METHOD changes { - -

      The "changes" method returns an integer which is the number of rows -in the database that were inserted, deleted, and/or modified by the most -recent "eval" method.

      -} - -############################################################################## -METHOD total_changes { - -

      The "total_changes" method returns an integer which is the number of rows -in the database that were inserted, deleted, and/or modified since the -current database connection was first opened.

      -} - -############################################################################## -METHOD authorizer { - -

      The "authorizer" method provides access to the -sqlite3_set_authorizer -C/C++ interface. The argument to authorizer is the name of a procedure that -is called when SQL statements are being compiled in order to authorize -certain operations. The callback procedure takes 5 arguments which describe -the operation being coded. If the callback returns the text string -"SQLITE_OK", then the operation is allowed. If it returns "SQLITE_IGNORE", -then the operation is silently disabled. If the return is "SQLITE_DENY" -then the compilation fails with an error. -

      - -

      If the argument is an empty string then the authorizer is disabled. -If the argument is omitted, then the current authorizer is returned.

      -} - -############################################################################## -METHOD progress { - -

      This method registers a callback that is invoked periodically during -query processing. There are two arguments: the number of SQLite virtual -machine opcodes between invocations, and the TCL command to invoke. -Setting the progress callback to an empty string disables it.

      - -

      The progress callback can be used to display the status of a lengthy -query or to process GUI events during a lengthy query.

      -} - - -############################################################################## -METHOD collate { - -

      This method registers new text collating sequences. There are -two arguments: the name of the collating sequence and the name of a -TCL procedure that implements a comparison function for the collating -sequence. -

      - -

      For example, the following code implements a collating sequence called -"NOCASE" that sorts in text order without regard to case: -

      - -
      -proc nocase_compare {a b} {
      -    return [string compare [string tolower $a] [string tolower $b]]
      -}
      -db collate NOCASE nocase_compare
      -
      -} - -############################################################################## -METHOD collation_needed { - -

      This method registers a callback routine that is invoked when the SQLite -engine needs a particular collating sequence but does not have that -collating sequence registered. The callback can register the collating -sequence. The callback is invoked with a single parameter which is the -name of the needed collating sequence.

      -} - -############################################################################## -METHOD commit_hook { - -

      This method registers a callback routine that is invoked just before -SQLite tries to commit changes to a database. If the callback throws -an exception or returns a non-zero result, then the transaction rolls back -rather than commit.

      -} - -############################################################################## -METHOD rollback_hook { - -

      This method registers a callback routine that is invoked just before -SQLite tries to do a rollback. The script argument is run without change.

      -} - -############################################################################## -METHOD update_hook { - -

      This method registers a callback routine that is invoked just before -each row is modified by an UPDATE, INSERT, or DELETE statement. Four -arguments are appended to the callback before it is invoked:

      - -
        -
      • The keyword "INSERT", "UPDATE", or "DELETE", as appropriate
      • -
      • The name of the database which is being changed
      • -
      • The table that is being changed
      • -
      • The rowid of the row in the table being changed
      • -
      -} - -############################################################################## -METHOD incrblob { - -

      This method opens a TCL channel that can be used to read or write -into a preexisting BLOB in the database. The syntax is like this:

      - -
      -dbcmd  incrblob  ?-readonly?? -  ?DB?  TABLE  COLUMN  ROWID -
      - -

      -The command returns a new TCL channel for reading or writing to the BLOB. -The channel is opened using the underlying -sqlite3_blob_open() C-langauge -interface. Close the channel using the close command of TCL. -

      -} - -############################################################################## -METHOD errorcode { - -

      This method returns the numeric error code that resulted from the most -recent SQLite operation.

      -} - -############################################################################## -METHOD trace { - -

      The "trace" method registers a callback that is invoked as each SQL -statement is compiled. The text of the SQL is appended as a single string -to the command before it is invoked. This can be used (for example) to -keep a log of all SQL operations that an application performs. -

      -} - - -footer $rcsid DELETED vdbe.tcl Index: vdbe.tcl ================================================================== --- vdbe.tcl +++ /dev/null @@ -1,1988 +0,0 @@ -# -# Run this Tcl script to generate the vdbe.html file. -# -set rcsid {$Id: vdbe.tcl,v 1.14 2005/03/12 15:55:11 drh Exp $} -source common.tcl -header {The Virtual Database Engine of SQLite} -puts { -

      The Virtual Database Engine of SQLite

      - -
      -This document describes the virtual machine used in SQLite version 2.8.0. -The virtual machine in SQLite version 3.0 and 3.1 is very similar in -concept but many of the opcodes have changed and the algorithms are -somewhat different. Use this document as a rough guide to the idea -behind the virtual machine in SQLite version 3, not as a reference on -how the virtual machine works. -
      -} - -puts { -

      If you want to know how the SQLite library works internally, -you need to begin with a solid understanding of the Virtual Database -Engine or VDBE. The VDBE occurs right in the middle of the -processing stream (see the architecture diagram) -and so it seems to touch most parts of the library. Even -parts of the code that do not directly interact with the VDBE -are usually in a supporting role. The VDBE really is the heart of -SQLite.

      - -

      This article is a brief introduction to how the VDBE -works and in particular how the various VDBE instructions -(documented here) work together -to do useful things with the database. The style is tutorial, -beginning with simple tasks and working toward solving more -complex problems. Along the way we will visit most -submodules in the SQLite library. After completeing this tutorial, -you should have a pretty good understanding of how SQLite works -and will be ready to begin studying the actual source code.

      - -

      Preliminaries

      - -

      The VDBE implements a virtual computer that runs a program in -its virtual machine language. The goal of each program is to -interrogate or change the database. Toward this end, the machine -language that the VDBE implements is specifically designed to -search, read, and modify databases.

      - -

      Each instruction of the VDBE language contains an opcode and -three operands labeled P1, P2, and P3. Operand P1 is an arbitrary -integer. P2 is a non-negative integer. P3 is a pointer to a data -structure or null-terminated string, possibly null. Only a few VDBE -instructions use all three operands. Many instructions use only -one or two operands. A significant number of instructions use -no operands at all but instead take their data and store their results -on the execution stack. The details of what each instruction -does and which operands it uses are described in the separate -opcode description document.

      - -

      A VDBE program begins -execution on instruction 0 and continues with successive instructions -until it either (1) encounters a fatal error, (2) executes a -Halt instruction, or (3) advances the program counter past the -last instruction of the program. When the VDBE completes execution, -all open database cursors are closed, all memory is freed, and -everything is popped from the stack. -So there are never any worries about memory leaks or -undeallocated resources.

      - -

      If you have done any assembly language programming or have -worked with any kind of abstract machine before, all of these -details should be familiar to you. So let's jump right in and -start looking as some code.

      - - -

      Inserting Records Into The Database

      - -

      We begin with a problem that can be solved using a VDBE program -that is only a few instructions long. Suppose we have an SQL -table that was created like this:

      - -
      -CREATE TABLE examp(one text, two int);
      -
      - -

      In words, we have a database table named "examp" that has two -columns of data named "one" and "two". Now suppose we want to insert a single -record into this table. Like this:

      - -
      -INSERT INTO examp VALUES('Hello, World!',99);
      -
      - -

      We can see the VDBE program that SQLite uses to implement this -INSERT using the sqlite command-line utility. First start -up sqlite on a new, empty database, then create the table. -Next change the output format of sqlite to a form that -is designed to work with VDBE program dumps by entering the -".explain" command. -Finally, enter the INSERT statement shown above, but precede the -INSERT with the special keyword "EXPLAIN". The EXPLAIN keyword -will cause sqlite to print the VDBE program rather than -execute it. We have:

      -} -proc Code {body} { - puts {
      } - regsub -all {&} [string trim $body] {\&} body - regsub -all {>} $body {\>} body - regsub -all {<} $body {\<} body - regsub -all {\(\(\(} $body {} body - regsub -all {\)\)\)} $body {} body - regsub -all { } $body {\ } body - regsub -all \n $body
      \n body - puts $body - puts {
      } -} - -Code { -$ (((sqlite test_database_1))) -sqlite> (((CREATE TABLE examp(one text, two int);))) -sqlite> (((.explain))) -sqlite> (((EXPLAIN INSERT INTO examp VALUES('Hello, World!',99);))) -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 Transaction 0 0 -1 VerifyCookie 0 81 -2 Transaction 1 0 -3 Integer 0 0 -4 OpenWrite 0 3 examp -5 NewRecno 0 0 -6 String 0 0 Hello, World! -7 Integer 99 0 99 -8 MakeRecord 2 0 -9 PutIntKey 0 1 -10 Close 0 0 -11 Commit 0 0 -12 Halt 0 0 -} - -puts {

      As you can see above, our simple insert statement is -implemented in 12 instructions. The first 3 and last 2 instructions are -a standard prologue and epilogue, so the real work is done in the middle -7 instructions. There are no jumps, so the program executes once through -from top to bottom. Let's now look at each instruction in detail.

      -} - -Code { -0 Transaction 0 0 -1 VerifyCookie 0 81 -2 Transaction 1 0 -} -puts { -

      The instruction Transaction -begins a transaction. The transaction ends when a Commit or Rollback -opcode is encountered. P1 is the index of the database file on which -the transaction is started. Index 0 is the main database file. A write -lock is obtained on the database file when a transaction is started. -No other process can read or write the file while the transaction is -underway. Starting a transaction also creates a rollback journal. A -transaction must be started before any changes can be made to the -database.

      - -

      The instruction VerifyCookie -checks cookie 0 (the database schema version) to make sure it is equal -to P2 (the value obtained when the database schema was last read). -P1 is the database number (0 for the main database). This is done to -make sure the database schema hasn't been changed by another thread, in -which case it has to be reread.

      - -

      The second Transaction -instruction begins a transaction and starts a rollback journal for -database 1, the database used for temporary tables.

      -} - -proc stack args { - puts "
      " - foreach elem $args { - puts "" - } - puts "
      $elem
      " -} - -Code { -3 Integer 0 0 -4 OpenWrite 0 3 examp -} -puts { -

      The instruction Integer pushes -the integer value P1 (0) onto the stack. Here 0 is the number of the -database to use in the following OpenWrite instruction. If P3 is not -NULL then it is a string representation of the same integer. Afterwards -the stack looks like this:

      -} -stack {(integer) 0} - -puts { -

      The instruction OpenWrite opens -a new read/write cursor with handle P1 (0 in this case) on table "examp", -whose root page is P2 (3, in this database file). Cursor handles can be -any non-negative integer. But the VDBE allocates cursors in an array -with the size of the array being one more than the largest cursor. So -to conserve memory, it is best to use handles beginning with zero and -working upward consecutively. Here P3 ("examp") is the name of the -table being opened, but this is unused, and only generated to make the -code easier to read. This instruction pops the database number to use -(0, the main database) from the top of the stack, so afterwards the -stack is empty again.

      -} - -Code { -5 NewRecno 0 0 -} -puts { -

      The instruction NewRecno creates -a new integer record number for the table pointed to by cursor P1. The -record number is one not currently used as a key in the table. The new -record number is pushed onto the stack. Afterwards the stack looks like -this:

      -} -stack {(integer) new record key} - -Code { -6 String 0 0 Hello, World! -} -puts { -

      The instruction String pushes its -P3 operand onto the stack. Afterwards the stack looks like this:

      -} -stack {(string) "Hello, World!"} \ - {(integer) new record key} - -Code { -7 Integer 99 0 99 -} -puts { -

      The instruction Integer pushes -its P1 operand (99) onto the stack. Afterwards the stack looks like -this:

      -} -stack {(integer) 99} \ - {(string) "Hello, World!"} \ - {(integer) new record key} - -Code { -8 MakeRecord 2 0 -} -puts { -

      The instruction MakeRecord pops -the top P1 elements off the stack (2 in this case) and converts them into -the binary format used for storing records in a database file. -(See the file format description for -details.) The new record generated by the MakeRecord instruction is -pushed back onto the stack. Afterwards the stack looks like this:

      - -} -stack {(record) "Hello, World!", 99} \ - {(integer) new record key} - -Code { -9 PutIntKey 0 1 -} -puts { -

      The instruction PutIntKey uses -the top 2 stack entries to write an entry into the table pointed to by -cursor P1. A new entry is created if it doesn't already exist or the -data for an existing entry is overwritten. The record data is the top -stack entry, and the key is the next entry down. The stack is popped -twice by this instruction. Because operand P2 is 1 the row change count -is incremented and the rowid is stored for subsequent return by the -sqlite_last_insert_rowid() function. If P2 is 0 the row change count is -unmodified. This instruction is where the insert actually occurs.

      -} - -Code { -10 Close 0 0 -} -puts { -

      The instruction Close closes a -cursor previously opened as P1 (0, the only open cursor). If P1 is not -currently open, this instruction is a no-op.

      -} - -Code { -11 Commit 0 0 -} -puts { -

      The instruction Commit causes all -modifications to the database that have been made since the last -Transaction to actually take effect. No additional modifications are -allowed until another transaction is started. The Commit instruction -deletes the journal file and releases the write lock on the database. -A read lock continues to be held if there are still cursors open.

      -} - -Code { -12 Halt 0 0 -} -puts { -

      The instruction Halt causes the VDBE -engine to exit immediately. All open cursors, Lists, Sorts, etc are -closed automatically. P1 is the result code returned by sqlite_exec(). -For a normal halt, this should be SQLITE_OK (0). For errors, it can be -some other value. The operand P2 is only used when there is an error. -There is an implied "Halt 0 0 0" instruction at the end of every -program, which the VDBE appends when it prepares a program to run.

      - - - -

      Tracing VDBE Program Execution

      - -

      If the SQLite library is compiled without the NDEBUG preprocessor -macro, then the PRAGMA vdbe_trace - causes the VDBE to trace the execution of programs. Though this -feature was originally intended for testing and debugging, it can also -be useful in learning about how the VDBE operates. -Use "PRAGMA vdbe_trace=ON;" to turn tracing on and -"PRAGMA vdbe_trace=OFF" to turn tracing back off. -Like this:

      -} - -Code { -sqlite> (((PRAGMA vdbe_trace=ON;))) - 0 Halt 0 0 -sqlite> (((INSERT INTO examp VALUES('Hello, World!',99);))) - 0 Transaction 0 0 - 1 VerifyCookie 0 81 - 2 Transaction 1 0 - 3 Integer 0 0 -Stack: i:0 - 4 OpenWrite 0 3 examp - 5 NewRecno 0 0 -Stack: i:2 - 6 String 0 0 Hello, World! -Stack: t[Hello,.World!] i:2 - 7 Integer 99 0 99 -Stack: si:99 t[Hello,.World!] i:2 - 8 MakeRecord 2 0 -Stack: s[...Hello,.World!.99] i:2 - 9 PutIntKey 0 1 - 10 Close 0 0 - 11 Commit 0 0 - 12 Halt 0 0 -} - -puts { -

      With tracing mode on, the VDBE prints each instruction prior -to executing it. After the instruction is executed, the top few -entries in the stack are displayed. The stack display is omitted -if the stack is empty.

      - -

      On the stack display, most entries are shown with a prefix -that tells the datatype of that stack entry. Integers begin -with "i:". Floating point values begin with "r:". -(The "r" stands for "real-number".) Strings begin with either -"s:", "t:", "e:" or "z:". -The difference among the string prefixes is caused by how their -memory is allocated. The z: strings are stored in memory obtained -from malloc(). The t: strings are statically allocated. -The e: strings are ephemeral. All other strings have the s: prefix. -This doesn't make any difference to you, -the observer, but it is vitally important to the VDBE since the -z: strings need to be passed to free() when they are -popped to avoid a memory leak. Note that only the first 10 -characters of string values are displayed and that binary -values (such as the result of the MakeRecord instruction) are -treated as strings. The only other datatype that can be stored -on the VDBE stack is a NULL, which is display without prefix -as simply "NULL". If an integer has been placed on the -stack as both an integer and a string, its prefix is "si:". - - - -

      Simple Queries

      - -

      At this point, you should understand the basics of how the VDBE -writes to a database. Now let's look at how it does queries. -We will use the following simple SELECT statement as our example:

      - -
      -SELECT * FROM examp;
      -
      - -

      The VDBE program generated for this SQL statement is as follows:

      -} - -Code { -sqlite> (((EXPLAIN SELECT * FROM examp;))) -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 ColumnName 0 0 one -1 ColumnName 1 0 two -2 Integer 0 0 -3 OpenRead 0 3 examp -4 VerifyCookie 0 81 -5 Rewind 0 10 -6 Column 0 0 -7 Column 0 1 -8 Callback 2 0 -9 Next 0 6 -10 Close 0 0 -11 Halt 0 0 -} - -puts { -

      Before we begin looking at this problem, let's briefly review -how queries work in SQLite so that we will know what we are trying -to accomplish. For each row in the result of a query, -SQLite will invoke a callback function with the following -prototype:

      - -
      -int Callback(void *pUserData, int nColumn, char *azData[], char *azColumnName[]);
      -
      - -

      The SQLite library supplies the VDBE with a pointer to the callback function -and the pUserData pointer. (Both the callback and the user data were -originally passed in as arguments to the sqlite_exec() API function.) -The job of the VDBE is to -come up with values for nColumn, azData[], -and azColumnName[]. -nColumn is the number of columns in the results, of course. -azColumnName[] is an array of strings where each string is the name -of one of the result columns. azData[] is an array of strings holding -the actual data.

      -} - -Code { -0 ColumnName 0 0 one -1 ColumnName 1 0 two -} -puts { -

      The first two instructions in the VDBE program for our query are -concerned with setting up values for azColumn. -The ColumnName instructions tell -the VDBE what values to fill in for each element of the azColumnName[] -array. Every query will begin with one ColumnName instruction for each -column in the result, and there will be a matching Column instruction for -each one later in the query. -

      -} - -Code { -2 Integer 0 0 -3 OpenRead 0 3 examp -4 VerifyCookie 0 81 -} -puts { -

      Instructions 2 and 3 open a read cursor on the database table that is -to be queried. This works the same as the OpenWrite instruction in the -INSERT example except that the cursor is opened for reading this time -instead of for writing. Instruction 4 verifies the database schema as -in the INSERT example.

      -} - -Code { -5 Rewind 0 10 -} -puts { -

      The Rewind instruction initializes -a loop that iterates over the "examp" table. It rewinds the cursor P1 -to the first entry in its table. This is required by the the Column and -Next instructions, which use the cursor to iterate through the table. -If the table is empty, then jump to P2 (10), which is the instruction just -past the loop. If the table is not empty, fall through to the following -instruction at 6, which is the beginning of the loop body.

      -} - -Code { -6 Column 0 0 -7 Column 0 1 -8 Callback 2 0 -} -puts { -

      The instructions 6 through 8 form the body of the loop that will -execute once for each record in the database file. - -The Column instructions at addresses 6 -and 7 each take the P2-th column from the P1-th cursor and push it onto -the stack. In this example, the first Column instruction is pushing the -value for the column "one" onto the stack and the second Column -instruction is pushing the value for column "two". - -The Callback instruction at address 8 -invokes the callback() function. The P1 operand to Callback becomes the -value for nColumn. The Callback instruction pops P1 values from -the stack and uses them to fill the azData[] array.

      -} - -Code { -9 Next 0 6 -} -puts { -

      The instruction at address 9 implements the branching part of the -loop. Together with the Rewind at address 5 it forms the loop logic. -This is a key concept that you should pay close attention to. -The Next instruction advances the cursor -P1 to the next record. If the cursor advance was successful, then jump -immediately to P2 (6, the beginning of the loop body). If the cursor -was at the end, then fall through to the following instruction, which -ends the loop.

      -} - -Code { -10 Close 0 0 -11 Halt 0 0 -} -puts { -

      The Close instruction at the end of the program closes the -cursor that points into the table "examp". It is not really necessary -to call Close here since all cursors will be automatically closed -by the VDBE when the program halts. But we needed an instruction -for the Rewind to jump to so we might as well go ahead and have that -instruction do something useful. -The Halt instruction ends the VDBE program.

      - -

      Note that the program for this SELECT query didn't contain the -Transaction and Commit instructions used in the INSERT example. Because -the SELECT is a read operation that doesn't alter the database, it -doesn't require a transaction.

      -} - - -puts { - -

      A Slightly More Complex Query

      - -

      The key points of the previous example were the use of the Callback -instruction to invoke the callback function, and the use of the Next -instruction to implement a loop over all records of the database file. -This example attempts to drive home those ideas by demonstrating a -slightly more complex query that involves more columns of -output, some of which are computed values, and a WHERE clause that -limits which records actually make it to the callback function. -Consider this query:

      - -
      -SELECT one, two, one || two AS 'both'
      -FROM examp
      -WHERE one LIKE 'H%'
      -
      - -

      This query is perhaps a bit contrived, but it does serve to -illustrate our points. The result will have three column with -names "one", "two", and "both". The first two columns are direct -copies of the two columns in the table and the third result -column is a string formed by concatenating the first and -second columns of the table. -Finally, the -WHERE clause says that we will only chose rows for the -results where the "one" column begins with an "H". -Here is what the VDBE program looks like for this query:

      -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 ColumnName 0 0 one -1 ColumnName 1 0 two -2 ColumnName 2 0 both -3 Integer 0 0 -4 OpenRead 0 3 examp -5 VerifyCookie 0 81 -6 Rewind 0 18 -7 String 0 0 H% -8 Column 0 0 -9 Function 2 0 ptr(0x7f1ac0) -10 IfNot 1 17 -11 Column 0 0 -12 Column 0 1 -13 Column 0 0 -14 Column 0 1 -15 Concat 2 0 -16 Callback 3 0 -17 Next 0 7 -18 Close 0 0 -19 Halt 0 0 -} - -puts { -

      Except for the WHERE clause, the structure of the program for -this example is very much like the prior example, just with an -extra column. There are now 3 columns, instead of 2 as before, -and there are three ColumnName instructions. -A cursor is opened using the OpenRead instruction, just like in the -prior example. The Rewind instruction at address 6 and the -Next at address 17 form a loop over all records of the table. -The Close instruction at the end is there to give the -Rewind instruction something to jump to when it is done. All of -this is just like in the first query demonstration.

      - -

      The Callback instruction in this example has to generate -data for three result columns instead of two, but is otherwise -the same as in the first query. When the Callback instruction -is invoked, the left-most column of the result should be -the lowest in the stack and the right-most result column should -be the top of the stack. We can see the stack being set up -this way at addresses 11 through 15. The Column instructions at -11 and 12 push the values for the first two columns in the result. -The two Column instructions at 13 and 14 pull in the values needed -to compute the third result column and the Concat instruction at -15 joins them together into a single entry on the stack.

      - -

      The only thing that is really new about the current example -is the WHERE clause which is implemented by instructions at -addresses 7 through 10. Instructions at address 7 and 8 push -onto the stack the value of the "one" column from the table -and the literal string "H%". -The Function instruction at address 9 -pops these two values from the stack and pushes the result of the LIKE() -function back onto the stack. -The IfNot instruction pops the top stack -value and causes an immediate jump forward to the Next instruction if the -top value was false (not not like the literal string "H%"). -Taking this jump effectively skips the callback, which is the whole point -of the WHERE clause. If the result -of the comparison is true, the jump is not taken and control -falls through to the Callback instruction below.

      - -

      Notice how the LIKE operator is implemented. It is a user-defined -function in SQLite, so the address of its function definition is -specified in P3. The operand P1 is the number of function arguments for -it to take from the stack. In this case the LIKE() function takes 2 -arguments. The arguments are taken off the stack in reverse order -(right-to-left), so the pattern to match is the top stack element, and -the next element is the data to compare. The return value is pushed -onto the stack.

      - - - -

      A Template For SELECT Programs

      - -

      The first two query examples illustrate a kind of template that -every SELECT program will follow. Basically, we have:

      - -

      -

        -
      1. Initialize the azColumnName[] array for the callback.
      2. -
      3. Open a cursor into the table to be queried.
      4. -
      5. For each record in the table, do: -
          -
        1. If the WHERE clause evaluates to FALSE, then skip the steps that - follow and continue to the next record.
        2. -
        3. Compute all columns for the current row of the result.
        4. -
        5. Invoke the callback function for the current row of the result.
        6. -
        -
      6. Close the cursor.
      7. -
      -

      - -

      This template will be expanded considerably as we consider -additional complications such as joins, compound selects, using -indices to speed the search, sorting, and aggregate functions -with and without GROUP BY and HAVING clauses. -But the same basic ideas will continue to apply.

      - -

      UPDATE And DELETE Statements

      - -

      The UPDATE and DELETE statements are coded using a template -that is very similar to the SELECT statement template. The main -difference, of course, is that the end action is to modify the -database rather than invoke a callback function. Because it modifies -the database it will also use transactions. Let's begin -by looking at a DELETE statement:

      - -
      -DELETE FROM examp WHERE two<50;
      -
      - -

      This DELETE statement will remove every record from the "examp" -table where the "two" column is less than 50. -The code generated to do this is as follows:

      -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 Transaction 1 0 -1 Transaction 0 0 -2 VerifyCookie 0 178 -3 Integer 0 0 -4 OpenRead 0 3 examp -5 Rewind 0 12 -6 Column 0 1 -7 Integer 50 0 50 -8 Ge 1 11 -9 Recno 0 0 -10 ListWrite 0 0 -11 Next 0 6 -12 Close 0 0 -13 ListRewind 0 0 -14 Integer 0 0 -15 OpenWrite 0 3 -16 ListRead 0 20 -17 NotExists 0 19 -18 Delete 0 1 -19 Goto 0 16 -20 ListReset 0 0 -21 Close 0 0 -22 Commit 0 0 -23 Halt 0 0 -} - -puts { -

      Here is what the program must do. First it has to locate all of -the records in the table "examp" that are to be deleted. This is -done using a loop very much like the loop used in the SELECT examples -above. Once all records have been located, then we can go back through -and delete them one by one. Note that we cannot delete each record -as soon as we find it. We have to locate all records first, then -go back and delete them. This is because the SQLite database -backend might change the scan order after a delete operation. -And if the scan -order changes in the middle of the scan, some records might be -visited more than once and other records might not be visited at all.

      - -

      So the implemention of DELETE is really in two loops. The first loop -(instructions 5 through 11) locates the records that are to be deleted -and saves their keys onto a temporary list, and the second loop -(instructions 16 through 19) uses the key list to delete the records one -by one.

      -} - - -Code { -0 Transaction 1 0 -1 Transaction 0 0 -2 VerifyCookie 0 178 -3 Integer 0 0 -4 OpenRead 0 3 examp -} -puts { -

      Instructions 0 though 4 are as in the INSERT example. They start -transactions for the main and temporary databases, verify the database -schema for the main database, and open a read cursor on the table -"examp". Notice that the cursor is opened for reading, not writing. At -this stage of the program we are only going to be scanning the table, -not changing it. We will reopen the same table for writing later, at -instruction 15.

      -} - -Code { -5 Rewind 0 12 -} -puts { -

      As in the SELECT example, the Rewind -instruction rewinds the cursor to the beginning of the table, readying -it for use in the loop body.

      -} - -Code { -6 Column 0 1 -7 Integer 50 0 50 -8 Ge 1 11 -} -puts { -

      The WHERE clause is implemented by instructions 6 through 8. -The job of the where clause is to skip the ListWrite if the WHERE -condition is false. To this end, it jumps ahead to the Next instruction -if the "two" column (extracted by the Column instruction) is -greater than or equal to 50.

      - -

      As before, the Column instruction uses cursor P1 and pushes the data -record in column P2 (1, column "two") onto the stack. The Integer -instruction pushes the value 50 onto the top of the stack. After these -two instructions the stack looks like:

      -} -stack {(integer) 50} \ - {(record) current record for column "two" } - -puts { -

      The Ge operator compares the top two -elements on the stack, pops them, and then branches based on the result -of the comparison. If the second element is >= the top element, then -jump to address P2 (the Next instruction at the end of the loop). -Because P1 is true, if either operand is NULL (and thus the result is -NULL) then take the jump. If we don't jump, just advance to the next -instruction.

      -} - -Code { -9 Recno 0 0 -10 ListWrite 0 0 -} -puts { -

      The Recno instruction pushes onto the -stack an integer which is the first 4 bytes of the the key to the current -entry in a sequential scan of the table pointed to by cursor P1. -The ListWrite instruction writes the -integer on the top of the stack into a temporary storage list and pops -the top element. This is the important work of this loop, to store the -keys of the records to be deleted so we can delete them in the second -loop. After this ListWrite instruction the stack is empty again.

      -} - -Code { -11 Next 0 6 -12 Close 0 0 -} -puts { -

      The Next instruction increments the cursor to point to the next -element in the table pointed to by cursor P0, and if it was successful -branches to P2 (6, the beginning of the loop body). The Close -instruction closes cursor P1. It doesn't affect the temporary storage -list because it isn't associated with cursor P1; it is instead a global -working list (which can be saved with ListPush).

      -} - -Code { -13 ListRewind 0 0 -} -puts { -

      The ListRewind instruction -rewinds the temporary storage list to the beginning. This prepares it -for use in the second loop.

      -} - -Code { -14 Integer 0 0 -15 OpenWrite 0 3 -} -puts { -

      As in the INSERT example, we push the database number P1 (0, the main -database) onto the stack and use OpenWrite to open the cursor P1 on table -P2 (base page 3, "examp") for modification.

      -} - -Code { -16 ListRead 0 20 -17 NotExists 0 19 -18 Delete 0 1 -19 Goto 0 16 -} -puts { -

      This loop does the actual deleting. It is organized differently from -the one in the UPDATE example. The ListRead instruction plays the role -that the Next did in the INSERT loop, but because it jumps to P2 on -failure, and Next jumps on success, we put it at the start of the loop -instead of the end. This means that we have to put a Goto at the end of -the loop to jump back to the the loop test at the beginning. So this -loop has the form of a C while(){...} loop, while the loop in the INSERT -example had the form of a do{...}while() loop. The Delete instruction -fills the role that the callback function did in the preceding examples. -

      -

      The ListRead instruction reads an -element from the temporary storage list and pushes it onto the stack. -If this was successful, it continues to the next instruction. If this -fails because the list is empty, it branches to P2, which is the -instruction just after the loop. Afterwards the stack looks like:

      -} -stack {(integer) key for current record} - -puts { -

      Notice the similarity between the ListRead and Next instructions. -Both operations work according to this rule: -

      -
      -Push the next "thing" onto the stack and fall through OR jump to P2, -depending on whether or not there is a next "thing" to push. -
      -

      One difference between Next and ListRead is their idea of a "thing". -The "things" for the Next instruction are records in a database file. -"Things" for ListRead are integer keys in a list. Another difference -is whether to jump or fall through if there is no next "thing". In this -case, Next falls through, and ListRead jumps. Later on, we will see -other looping instructions (NextIdx and SortNext) that operate using the -same principle.

      - -

      The NotExists instruction pops -the top stack element and uses it as an integer key. If a record with -that key does not exist in table P1, then jump to P2. If a record does -exist, then fall thru to the next instruction. In this case P2 takes -us to the Goto at the end of the loop, which jumps back to the ListRead -at the beginning. This could have been coded to have P2 be 16, the -ListRead at the start of the loop, but the SQLite parser which generated -this code didn't make that optimization.

      -

      The Delete does the work of this -loop; it pops an integer key off the stack (placed there by the -preceding ListRead) and deletes the record of cursor P1 that has that key. -Because P2 is true, the row change counter is incremented.

      -

      The Goto jumps back to the beginning -of the loop. This is the end of the loop.

      -} - -Code { -20 ListReset 0 0 -21 Close 0 0 -22 Commit 0 0 -23 Halt 0 0 -} -puts { -

      This block of instruction cleans up the VDBE program. Three of these -instructions aren't really required, but are generated by the SQLite -parser from its code templates, which are designed to handle more -complicated cases.

      -

      The ListReset instruction empties -the temporary storage list. This list is emptied automatically when the -VDBE program terminates, so it isn't necessary in this case. The Close -instruction closes the cursor P1. Again, this is done by the VDBE -engine when it is finished running this program. The Commit ends the -current transaction successfully, and causes all changes that occurred -in this transaction to be saved to the database. The final Halt is also -unneccessary, since it is added to every VDBE program when it is -prepared to run.

      - - -

      UPDATE statements work very much like DELETE statements except -that instead of deleting the record they replace it with a new one. -Consider this example: -

      - -
      -UPDATE examp SET one= '(' || one || ')' WHERE two < 50;
      -
      - -

      Instead of deleting records where the "two" column is less than -50, this statement just puts the "one" column in parentheses -The VDBE program to implement this statement follows:

      -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 Transaction 1 0 -1 Transaction 0 0 -2 VerifyCookie 0 178 -3 Integer 0 0 -4 OpenRead 0 3 examp -5 Rewind 0 12 -6 Column 0 1 -7 Integer 50 0 50 -8 Ge 1 11 -9 Recno 0 0 -10 ListWrite 0 0 -11 Next 0 6 -12 Close 0 0 -13 Integer 0 0 -14 OpenWrite 0 3 -15 ListRewind 0 0 -16 ListRead 0 28 -17 Dup 0 0 -18 NotExists 0 16 -19 String 0 0 ( -20 Column 0 0 -21 Concat 2 0 -22 String 0 0 ) -23 Concat 2 0 -24 Column 0 1 -25 MakeRecord 2 0 -26 PutIntKey 0 1 -27 Goto 0 16 -28 ListReset 0 0 -29 Close 0 0 -30 Commit 0 0 -31 Halt 0 0 -} - -puts { -

      This program is essentially the same as the DELETE program except -that the body of the second loop has been replace by a sequence of -instructions (at addresses 17 through 26) that update the record rather -than delete it. Most of this instruction sequence should already be -familiar to you, but there are a couple of minor twists so we will go -over it briefly. Also note that the order of some of the instructions -before and after the 2nd loop has changed. This is just the way the -SQLite parser chose to output the code using a different template.

      - -

      As we enter the interior of the second loop (at instruction 17) -the stack contains a single integer which is the key of the -record we want to modify. We are going to need to use this -key twice: once to fetch the old value of the record and -a second time to write back the revised record. So the first instruction -is a Dup to make a duplicate of the key on the top of the stack. The -Dup instruction will duplicate any element of the stack, not just the top -element. You specify which element to duplication using the -P1 operand. When P1 is 0, the top of the stack is duplicated. -When P1 is 1, the next element down on the stack duplication. -And so forth.

      - -

      After duplicating the key, the next instruction, NotExists, -pops the stack once and uses the value popped as a key to -check the existence of a record in the database file. If there is no record -for this key, it jumps back to the ListRead to get another key.

      - -

      Instructions 19 through 25 construct a new database record -that will be used to replace the existing record. This is -the same kind of code that we saw -in the description of INSERT and will not be described further. -After instruction 25 executes, the stack looks like this:

      -} - -stack {(record) new data record} {(integer) key} - -puts { -

      The PutIntKey instruction (also described -during the discussion about INSERT) writes an entry into the -database file whose data is the top of the stack and whose key -is the next on the stack, and then pops the stack twice. The -PutIntKey instruction will overwrite the data of an existing record -with the same key, which is what we want here. Overwriting was not -an issue with INSERT because with INSERT the key was generated -by the NewRecno instruction which is guaranteed to provide a key -that has not been used before.

      -} - -if 0 {

      (By the way, since keys must -all be unique and each key is a 32-bit integer, a single -SQLite database table can have no more than 232 -rows. Actually, the Key instruction starts to become -very inefficient as you approach this upper bound, so it -is best to keep the number of entries below 231 -or so. Surely a couple billion records will be enough for -most applications!)

      -} - -puts { -

      CREATE and DROP

      - -

      Using CREATE or DROP to create or destroy a table or index is -really the same as doing an INSERT or DELETE from the special -"sqlite_master" table, at least from the point of view of the VDBE. -The sqlite_master table is a special table that is automatically -created for every SQLite database. It looks like this:

      - -
      -CREATE TABLE sqlite_master (
      -  type      TEXT,    -- either "table" or "index"
      -  name      TEXT,    -- name of this table or index
      -  tbl_name  TEXT,    -- for indices: name of associated table
      -  sql       TEXT     -- SQL text of the original CREATE statement
      -)
      -
      - -

      Every table (except the "sqlite_master" table itself) -and every named index in an SQLite database has an entry -in the sqlite_master table. You can query this table using -a SELECT statement just like any other table. But you are -not allowed to directly change the table using UPDATE, INSERT, -or DELETE. Changes to sqlite_master have to occur using -the CREATE and DROP commands because SQLite also has to update -some of its internal data structures when tables and indices -are added or destroyed.

      - -

      But from the point of view of the VDBE, a CREATE works -pretty much like an INSERT and a DROP works like a DELETE. -When the SQLite library opens to an existing database, -the first thing it does is a SELECT to read the "sql" -columns from all entries of the sqlite_master table. -The "sql" column contains the complete SQL text of the -CREATE statement that originally generated the index or -table. This text is fed back into the SQLite parser -and used to reconstruct the -internal data structures describing the index or table.

      - -

      Using Indexes To Speed Searching

      - -

      In the example queries above, every row of the table being -queried must be loaded off of the disk and examined, even if only -a small percentage of the rows end up in the result. This can -take a long time on a big table. To speed things up, SQLite -can use an index.

      - -

      An SQLite file associates a key with some data. For an SQLite -table, the database file is set up so that the key is an integer -and the data is the information for one row of the table. -Indices in SQLite reverse this arrangement. The index key -is (some of) the information being stored and the index data -is an integer. -To access a table row that has some particular -content, we first look up the content in the index table to find -its integer index, then we use that integer to look up the -complete record in the table.

      - -

      Note that SQLite uses b-trees, which are a sorted data structure, -so indices can be used when the WHERE clause of the SELECT statement -contains tests for equality or inequality. Queries like the following -can use an index if it is available:

      - -
      -SELECT * FROM examp WHERE two==50;
      -SELECT * FROM examp WHERE two<50;
      -SELECT * FROM examp WHERE two IN (50, 100);
      -
      - -

      If there exists an index that maps the "two" column of the "examp" -table into integers, then SQLite will use that index to find the integer -keys of all rows in examp that have a value of 50 for column two, or -all rows that are less than 50, etc. -But the following queries cannot use the index:

      - -
      -SELECT * FROM examp WHERE two%50 == 10;
      -SELECT * FROM examp WHERE two&127 == 3;
      -
      - -

      Note that the SQLite parser will not always generate code to use an -index, even if it is possible to do so. The following queries will not -currently use the index:

      - -
      -SELECT * FROM examp WHERE two+10 == 50;
      -SELECT * FROM examp WHERE two==50 OR two==100;
      -
      - -

      To understand better how indices work, lets first look at how -they are created. Let's go ahead and put an index on the two -column of the examp table. We have:

      - -
      -CREATE INDEX examp_idx1 ON examp(two);
      -
      - -

      The VDBE code generated by the above statement looks like the -following:

      -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 Transaction 1 0 -1 Transaction 0 0 -2 VerifyCookie 0 178 -3 Integer 0 0 -4 OpenWrite 0 2 -5 NewRecno 0 0 -6 String 0 0 index -7 String 0 0 examp_idx1 -8 String 0 0 examp -9 CreateIndex 0 0 ptr(0x791380) -10 Dup 0 0 -11 Integer 0 0 -12 OpenWrite 1 0 -13 String 0 0 CREATE INDEX examp_idx1 ON examp(tw -14 MakeRecord 5 0 -15 PutIntKey 0 0 -16 Integer 0 0 -17 OpenRead 2 3 examp -18 Rewind 2 24 -19 Recno 2 0 -20 Column 2 1 -21 MakeIdxKey 1 0 n -22 IdxPut 1 0 indexed columns are not unique -23 Next 2 19 -24 Close 2 0 -25 Close 1 0 -26 Integer 333 0 -27 SetCookie 0 0 -28 Close 0 0 -29 Commit 0 0 -30 Halt 0 0 -} - -puts { -

      Remember that every table (except sqlite_master) and every named -index has an entry in the sqlite_master table. Since we are creating -a new index, we have to add a new entry to sqlite_master. This is -handled by instructions 3 through 15. Adding an entry to sqlite_master -works just like any other INSERT statement so we will not say anymore -about it here. In this example, we want to focus on populating the -new index with valid data, which happens on instructions 16 through -23.

      -} - -Code { -16 Integer 0 0 -17 OpenRead 2 3 examp -} -puts { -

      The first thing that happens is that we open the table being -indexed for reading. In order to construct an index for a table, -we have to know what is in that table. The index has already been -opened for writing using cursor 0 by instructions 3 and 4.

      -} - -Code { -18 Rewind 2 24 -19 Recno 2 0 -20 Column 2 1 -21 MakeIdxKey 1 0 n -22 IdxPut 1 0 indexed columns are not unique -23 Next 2 19 -} -puts { -

      Instructions 18 through 23 implement a loop over every row of the -table being indexed. For each table row, we first extract the integer -key for that row using Recno in instruction 19, then get the value of -the "two" column using Column in instruction 20. -The MakeIdxKey instruction at 21 -converts data from the "two" column (which is on the top of the stack) -into a valid index key. For an index on a single column, this is -basically a no-op. But if the P1 operand to MakeIdxKey had been -greater than one multiple entries would have been popped from the stack -and converted into a single index key. -The IdxPut instruction at 22 is what -actually creates the index entry. IdxPut pops two elements from the -stack. The top of the stack is used as a key to fetch an entry from the -index table. Then the integer which was second on stack is added to the -set of integers for that index and the new record is written back to the -database file. Note -that the same index entry can store multiple integers if there -are two or more table entries with the same value for the two -column. -

      - -

      Now let's look at how this index will be used. Consider the -following query:

      - -
      -SELECT * FROM examp WHERE two==50;
      -
      - -

      SQLite generates the following VDBE code to handle this query:

      -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 ColumnName 0 0 one -1 ColumnName 1 0 two -2 Integer 0 0 -3 OpenRead 0 3 examp -4 VerifyCookie 0 256 -5 Integer 0 0 -6 OpenRead 1 4 examp_idx1 -7 Integer 50 0 50 -8 MakeKey 1 0 n -9 MemStore 0 0 -10 MoveTo 1 19 -11 MemLoad 0 0 -12 IdxGT 1 19 -13 IdxRecno 1 0 -14 MoveTo 0 0 -15 Column 0 0 -16 Column 0 1 -17 Callback 2 0 -18 Next 1 11 -19 Close 0 0 -20 Close 1 0 -21 Halt 0 0 -} - -puts { -

      The SELECT begins in a familiar fashion. First the column -names are initialized and the table being queried is opened. -Things become different beginning with instructions 5 and 6 where -the index file is also opened. Instructions 7 and 8 make -a key with the value of 50. -The MemStore instruction at 9 stores -the index key in VDBE memory location 0. The VDBE memory is used to -avoid having to fetch a value from deep in the stack, which can be done, -but makes the program harder to generate. The following instruction -MoveTo at address 10 pops the key off -the stack and moves the index cursor to the first row of the index with -that key. This initializes the cursor for use in the following loop.

      - -

      Instructions 11 through 18 implement a loop over all index records -with the key that was fetched by instruction 8. All of the index -records with this key will be contiguous in the index table, so we walk -through them and fetch the corresponding table key from the index. -This table key is then used to move the cursor to that row in the table. -The rest of the loop is the same as the loop for the non-indexed SELECT -query.

      - -

      The loop begins with the MemLoad -instruction at 11 which pushes a copy of the index key back onto the -stack. The instruction IdxGT at 12 -compares the key to the key in the current index record pointed to by -cursor P1. If the index key at the current cursor location is greater -than the the index we are looking for, then jump out of the loop.

      - -

      The instruction IdxRecno at 13 -pushes onto the stack the table record number from the index. The -following MoveTo pops it and moves the table cursor to that row. The -next 3 instructions select the column data the same way as in the non- -indexed case. The Column instructions fetch the column data and the -callback function is invoked. The final Next instruction advances the -index cursor, not the table cursor, to the next row, and then branches -back to the start of the loop if there are any index records left.

      - -

      Since the index is used to look up values in the table, -it is important that the index and table be kept consistent. -Now that there is an index on the examp table, we will have -to update that index whenever data is inserted, deleted, or -changed in the examp table. Remember the first example above -where we were able to insert a new row into the "examp" table using -12 VDBE instructions. Now that this table is indexed, 19 -instructions are required. The SQL statement is this:

      - -
      -INSERT INTO examp VALUES('Hello, World!',99);
      -
      - -

      And the generated code looks like this:

      -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 Transaction 1 0 -1 Transaction 0 0 -2 VerifyCookie 0 256 -3 Integer 0 0 -4 OpenWrite 0 3 examp -5 Integer 0 0 -6 OpenWrite 1 4 examp_idx1 -7 NewRecno 0 0 -8 String 0 0 Hello, World! -9 Integer 99 0 99 -10 Dup 2 1 -11 Dup 1 1 -12 MakeIdxKey 1 0 n -13 IdxPut 1 0 -14 MakeRecord 2 0 -15 PutIntKey 0 1 -16 Close 0 0 -17 Close 1 0 -18 Commit 0 0 -19 Halt 0 0 -} - -puts { -

      At this point, you should understand the VDBE well enough to -figure out on your own how the above program works. So we will -not discuss it further in this text.

      - -

      Joins

      - -

      In a join, two or more tables are combined to generate a single -result. The result table consists of every possible combination -of rows from the tables being joined. The easiest and most natural -way to implement this is with nested loops.

      - -

      Recall the query template discussed above where there was a -single loop that searched through every record of the table. -In a join we have basically the same thing except that there -are nested loops. For example, to join two tables, the query -template might look something like this:

      - -

      -

        -
      1. Initialize the azColumnName[] array for the callback.
      2. -
      3. Open two cursors, one to each of the two tables being queried.
      4. -
      5. For each record in the first table, do: -
          -
        1. For each record in the second table do: -
            -
          1. If the WHERE clause evaluates to FALSE, then skip the steps that - follow and continue to the next record.
          2. -
          3. Compute all columns for the current row of the result.
          4. -
          5. Invoke the callback function for the current row of the result.
          6. -
        2. -
        -
      6. Close both cursors.
      7. -
      -

      - -

      This template will work, but it is likely to be slow since we -are now dealing with an O(N2) loop. But it often works -out that the WHERE clause can be factored into terms and that one or -more of those terms will involve only columns in the first table. -When this happens, we can factor part of the WHERE clause test out of -the inner loop and gain a lot of efficiency. So a better template -would be something like this:

      - -

      -

        -
      1. Initialize the azColumnName[] array for the callback.
      2. -
      3. Open two cursors, one to each of the two tables being queried.
      4. -
      5. For each record in the first table, do: -
          -
        1. Evaluate terms of the WHERE clause that only involve columns from - the first table. If any term is false (meaning that the whole - WHERE clause must be false) then skip the rest of this loop and - continue to the next record.
        2. -
        3. For each record in the second table do: -
            -
          1. If the WHERE clause evaluates to FALSE, then skip the steps that - follow and continue to the next record.
          2. -
          3. Compute all columns for the current row of the result.
          4. -
          5. Invoke the callback function for the current row of the result.
          6. -
        4. -
        -
      6. Close both cursors.
      7. -
      -

      - -

      Additional speed-up can occur if an index can be used to speed -the search of either or the two loops.

      - -

      SQLite always constructs the loops in the same order as the -tables appear in the FROM clause of the SELECT statement. The -left-most table becomes the outer loop and the right-most table -becomes the inner loop. It is possible, in theory, to reorder -the loops in some circumstances to speed the evaluation of the -join. But SQLite does not attempt this optimization.

      - -

      You can see how SQLite constructs nested loops in the following -example:

      - -
      -CREATE TABLE examp2(three int, four int);
      -SELECT * FROM examp, examp2 WHERE two<50 AND four==two;
      -
      -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 ColumnName 0 0 examp.one -1 ColumnName 1 0 examp.two -2 ColumnName 2 0 examp2.three -3 ColumnName 3 0 examp2.four -4 Integer 0 0 -5 OpenRead 0 3 examp -6 VerifyCookie 0 909 -7 Integer 0 0 -8 OpenRead 1 5 examp2 -9 Rewind 0 24 -10 Column 0 1 -11 Integer 50 0 50 -12 Ge 1 23 -13 Rewind 1 23 -14 Column 1 1 -15 Column 0 1 -16 Ne 1 22 -17 Column 0 0 -18 Column 0 1 -19 Column 1 0 -20 Column 1 1 -21 Callback 4 0 -22 Next 1 14 -23 Next 0 10 -24 Close 0 0 -25 Close 1 0 -26 Halt 0 0 -} - -puts { -

      The outer loop over table examp is implement by instructions -7 through 23. The inner loop is instructions 13 through 22. -Notice that the "two<50" term of the WHERE expression involves -only columns from the first table and can be factored out of -the inner loop. SQLite does this and implements the "two<50" -test in instructions 10 through 12. The "four==two" test is -implement by instructions 14 through 16 in the inner loop.

      - -

      SQLite does not impose any arbitrary limits on the tables in -a join. It also allows a table to be joined with itself.

      - -

      The ORDER BY clause

      - -

      For historical reasons, and for efficiency, all sorting is currently -done in memory.

      - -

      SQLite implements the ORDER BY clause using a special -set of instructions to control an object called a sorter. In the -inner-most loop of the query, where there would normally be -a Callback instruction, instead a record is constructed that -contains both callback parameters and a key. This record -is added to the sorter (in a linked list). After the query loop -finishes, the list of records is sorted and this list is walked. For -each record on the list, the callback is invoked. Finally, the sorter -is closed and memory is deallocated.

      - -

      We can see the process in action in the following query:

      - -
      -SELECT * FROM examp ORDER BY one DESC, two;
      -
      -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 ColumnName 0 0 one -1 ColumnName 1 0 two -2 Integer 0 0 -3 OpenRead 0 3 examp -4 VerifyCookie 0 909 -5 Rewind 0 14 -6 Column 0 0 -7 Column 0 1 -8 SortMakeRec 2 0 -9 Column 0 0 -10 Column 0 1 -11 SortMakeKey 2 0 D+ -12 SortPut 0 0 -13 Next 0 6 -14 Close 0 0 -15 Sort 0 0 -16 SortNext 0 19 -17 SortCallback 2 0 -18 Goto 0 16 -19 SortReset 0 0 -20 Halt 0 0 -} - -puts { -

      There is only one sorter object, so there are no instructions to open -or close it. It is opened automatically when needed, and it is closed -when the VDBE program halts.

      - -

      The query loop is built from instructions 5 through 13. Instructions -6 through 8 build a record that contains the azData[] values for a single -invocation of the callback. A sort key is generated by instructions -9 through 11. Instruction 12 combines the invocation record and the -sort key into a single entry and puts that entry on the sort list.

      - -

      The P3 argument of instruction 11 is of particular interest. The -sort key is formed by prepending one character from P3 to each string -and concatenating all the strings. The sort comparison function will -look at this character to determine whether the sort order is -ascending or descending, and whether to sort as a string or number. -In this example, the first column should be sorted as a string -in descending order so its prefix is "D" and the second column should -sorted numerically in ascending order so its prefix is "+". Ascending -string sorting uses "A", and descending numeric sorting uses "-".

      - -

      After the query loop ends, the table being queried is closed at -instruction 14. This is done early in order to allow other processes -or threads to access that table, if desired. The list of records -that was built up inside the query loop is sorted by the instruction -at 15. Instructions 16 through 18 walk through the record list -(which is now in sorted order) and invoke the callback once for -each record. Finally, the sorter is closed at instruction 19.

      - -

      Aggregate Functions And The GROUP BY and HAVING Clauses

      - -

      To compute aggregate functions, the VDBE implements a special -data structure and instructions for controlling that data structure. -The data structure is an unordered set of buckets, where each bucket -has a key and one or more memory locations. Within the query -loop, the GROUP BY clause is used to construct a key and the bucket -with that key is brought into focus. A new bucket is created with -the key if one did not previously exist. Once the bucket is in -focus, the memory locations of the bucket are used to accumulate -the values of the various aggregate functions. After the query -loop terminates, each bucket is visited once to generate a -single row of the results.

      - -

      An example will help to clarify this concept. Consider the -following query:

      - -
      -SELECT three, min(three+four)+avg(four) 
      -FROM examp2
      -GROUP BY three;
      -
      - - -

      The VDBE code generated for this query is as follows:

      -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 ColumnName 0 0 three -1 ColumnName 1 0 min(three+four)+avg(four) -2 AggReset 0 3 -3 AggInit 0 1 ptr(0x7903a0) -4 AggInit 0 2 ptr(0x790700) -5 Integer 0 0 -6 OpenRead 0 5 examp2 -7 VerifyCookie 0 909 -8 Rewind 0 23 -9 Column 0 0 -10 MakeKey 1 0 n -11 AggFocus 0 14 -12 Column 0 0 -13 AggSet 0 0 -14 Column 0 0 -15 Column 0 1 -16 Add 0 0 -17 Integer 1 0 -18 AggFunc 0 1 ptr(0x7903a0) -19 Column 0 1 -20 Integer 2 0 -21 AggFunc 0 1 ptr(0x790700) -22 Next 0 9 -23 Close 0 0 -24 AggNext 0 31 -25 AggGet 0 0 -26 AggGet 0 1 -27 AggGet 0 2 -28 Add 0 0 -29 Callback 2 0 -30 Goto 0 24 -31 Noop 0 0 -32 Halt 0 0 -} - -puts { -

      The first instruction of interest is the -AggReset at 2. -The AggReset instruction initializes the set of buckets to be the -empty set and specifies the number of memory slots available in each -bucket as P2. In this example, each bucket will hold 3 memory slots. -It is not obvious, but if you look closely at the rest of the program -you can figure out what each of these slots is intended for.

      - -
      - - - - -
      Memory SlotIntended Use Of This Memory Slot
      0The "three" column -- the key to the bucket
      1The minimum "three+four" value
      2The sum of all "four" values. This is used to compute - "avg(four)".
      - -

      The query loop is implemented by instructions 8 through 22. -The aggregate key specified by the GROUP BY clause is computed -by instructions 9 and 10. Instruction 11 causes the appropriate -bucket to come into focus. If a bucket with the given key does -not already exists, a new bucket is created and control falls -through to instructions 12 and 13 which initialize the bucket. -If the bucket does already exist, then a jump is made to instruction -14. The values of aggregate functions are updated by the instructions -between 11 and 21. Instructions 14 through 18 update memory -slot 1 to hold the next value "min(three+four)". Then the sum of the -"four" column is updated by instructions 19 through 21.

      - -

      After the query loop is finished, the table "examp2" is closed at -instruction 23 so that its lock will be released and it can be -used by other threads or processes. The next step is to loop -over all aggregate buckets and output one row of the result for -each bucket. This is done by the loop at instructions 24 -through 30. The AggNext instruction at 24 brings the next bucket -into focus, or jumps to the end of the loop if all buckets have -been examined already. The 3 columns of the result are fetched from -the aggregator bucket in order at instructions 25 through 27. -Finally, the callback is invoked at instruction 29.

      - -

      In summary then, any query with aggregate functions is implemented -by two loops. The first loop scans the input table and computes -aggregate information into buckets and the second loop scans through -all the buckets to compute the final result.

      - -

      The realization that an aggregate query is really two consequtive -loops makes it much easier to understand the difference between -a WHERE clause and a HAVING clause in SQL query statement. The -WHERE clause is a restriction on the first loop and the HAVING -clause is a restriction on the second loop. You can see this -by adding both a WHERE and a HAVING clause to our example query:

      - - -
      -SELECT three, min(three+four)+avg(four) 
      -FROM examp2
      -WHERE three>four
      -GROUP BY three
      -HAVING avg(four)<10;
      -
      -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 ColumnName 0 0 three -1 ColumnName 1 0 min(three+four)+avg(four) -2 AggReset 0 3 -3 AggInit 0 1 ptr(0x7903a0) -4 AggInit 0 2 ptr(0x790700) -5 Integer 0 0 -6 OpenRead 0 5 examp2 -7 VerifyCookie 0 909 -8 Rewind 0 26 -9 Column 0 0 -10 Column 0 1 -11 Le 1 25 -12 Column 0 0 -13 MakeKey 1 0 n -14 AggFocus 0 17 -15 Column 0 0 -16 AggSet 0 0 -17 Column 0 0 -18 Column 0 1 -19 Add 0 0 -20 Integer 1 0 -21 AggFunc 0 1 ptr(0x7903a0) -22 Column 0 1 -23 Integer 2 0 -24 AggFunc 0 1 ptr(0x790700) -25 Next 0 9 -26 Close 0 0 -27 AggNext 0 37 -28 AggGet 0 2 -29 Integer 10 0 10 -30 Ge 1 27 -31 AggGet 0 0 -32 AggGet 0 1 -33 AggGet 0 2 -34 Add 0 0 -35 Callback 2 0 -36 Goto 0 27 -37 Noop 0 0 -38 Halt 0 0 -} - -puts { -

      The code generated in this last example is the same as the -previous except for the addition of two conditional jumps used -to implement the extra WHERE and HAVING clauses. The WHERE -clause is implemented by instructions 9 through 11 in the query -loop. The HAVING clause is implemented by instruction 28 through -30 in the output loop.

      - -

      Using SELECT Statements As Terms In An Expression

      - -

      The very name "Structured Query Language" tells us that SQL should -support nested queries. And, in fact, two different kinds of nesting -are supported. Any SELECT statement that returns a single-row, single-column -result can be used as a term in an expression of another SELECT statement. -And, a SELECT statement that returns a single-column, multi-row result -can be used as the right-hand operand of the IN and NOT IN operators. -We will begin this section with an example of the first kind of nesting, -where a single-row, single-column SELECT is used as a term in an expression -of another SELECT. Here is our example:

      - -
      -SELECT * FROM examp
      -WHERE two!=(SELECT three FROM examp2
      -            WHERE four=5);
      -
      - -

      The way SQLite deals with this is to first run the inner SELECT -(the one against examp2) and store its result in a private memory -cell. SQLite then substitutes the value of this private memory -cell for the inner SELECT when it evaluates the outer SELECT. -The code looks like this:

      -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 String 0 0 -1 MemStore 0 1 -2 Integer 0 0 -3 OpenRead 1 5 examp2 -4 VerifyCookie 0 909 -5 Rewind 1 13 -6 Column 1 1 -7 Integer 5 0 5 -8 Ne 1 12 -9 Column 1 0 -10 MemStore 0 1 -11 Goto 0 13 -12 Next 1 6 -13 Close 1 0 -14 ColumnName 0 0 one -15 ColumnName 1 0 two -16 Integer 0 0 -17 OpenRead 0 3 examp -18 Rewind 0 26 -19 Column 0 1 -20 MemLoad 0 0 -21 Eq 1 25 -22 Column 0 0 -23 Column 0 1 -24 Callback 2 0 -25 Next 0 19 -26 Close 0 0 -27 Halt 0 0 -} - -puts { -

      The private memory cell is initialized to NULL by the first -two instructions. Instructions 2 through 13 implement the inner -SELECT statement against the examp2 table. Notice that instead of -sending the result to a callback or storing the result on a sorter, -the result of the query is pushed into the memory cell by instruction -10 and the loop is abandoned by the jump at instruction 11. -The jump at instruction at 11 is vestigial and never executes.

      - -

      The outer SELECT is implemented by instructions 14 through 25. -In particular, the WHERE clause that contains the nested select -is implemented by instructions 19 through 21. You can see that -the result of the inner select is loaded onto the stack by instruction -20 and used by the conditional jump at 21.

      - -

      When the result of a sub-select is a scalar, a single private memory -cell can be used, as shown in the previous -example. But when the result of a sub-select is a vector, such -as when the sub-select is the right-hand operand of IN or NOT IN, -a different approach is needed. In this case, -the result of the sub-select is -stored in a transient table and the contents of that table -are tested using the Found or NotFound operators. Consider this -example:

      - -
      -SELECT * FROM examp
      -WHERE two IN (SELECT three FROM examp2);
      -
      - -

      The code generated to implement this last query is as follows:

      -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 OpenTemp 1 1 -1 Integer 0 0 -2 OpenRead 2 5 examp2 -3 VerifyCookie 0 909 -4 Rewind 2 10 -5 Column 2 0 -6 IsNull -1 9 -7 String 0 0 -8 PutStrKey 1 0 -9 Next 2 5 -10 Close 2 0 -11 ColumnName 0 0 one -12 ColumnName 1 0 two -13 Integer 0 0 -14 OpenRead 0 3 examp -15 Rewind 0 25 -16 Column 0 1 -17 NotNull -1 20 -18 Pop 1 0 -19 Goto 0 24 -20 NotFound 1 24 -21 Column 0 0 -22 Column 0 1 -23 Callback 2 0 -24 Next 0 16 -25 Close 0 0 -26 Halt 0 0 -} - -puts { -

      The transient table in which the results of the inner SELECT are -stored is created by the OpenTemp -instruction at 0. This opcode is used for tables that exist for the -duration of a single SQL statement only. The transient cursor is always -opened read/write even if the main database is read-only. The transient -table is deleted automatically when the cursor is closed. The P2 value -of 1 means the cursor points to a BTree index, which has no data but can -have an arbitrary key.

      - -

      The inner SELECT statement is implemented by instructions 1 through 10. -All this code does is make an entry in the temporary table for each -row of the examp2 table with a non-NULL value for the "three" column. -The key for each temporary table entry is the "three" column of examp2 -and the data is an empty string since it is never used.

      - -

      The outer SELECT is implemented by instructions 11 through 25. In -particular, the WHERE clause containing the IN operator is implemented -by instructions at 16, 17, and 20. Instruction 16 pushes the value of -the "two" column for the current row onto the stack and instruction 17 -checks to see that it is non-NULL. If this is successful, execution -jumps to 20, where it tests to see if top of the stack matches any key -in the temporary table. The rest of the code is the same as what has -been shown before.

      - -

      Compound SELECT Statements

      - -

      SQLite also allows two or more SELECT statements to be joined as -peers using operators UNION, UNION ALL, INTERSECT, and EXCEPT. These -compound select statements are implemented using transient tables. -The implementation is slightly different for each operator, but the -basic ideas are the same. For an example we will use the EXCEPT -operator.

      - -
      -SELECT two FROM examp
      -EXCEPT
      -SELECT four FROM examp2;
      -
      - -

      The result of this last example should be every unique value -of the "two" column in the examp table, except any value that is -in the "four" column of examp2 is removed. The code to implement -this query is as follows:

      -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 OpenTemp 0 1 -1 KeyAsData 0 1 -2 Integer 0 0 -3 OpenRead 1 3 examp -4 VerifyCookie 0 909 -5 Rewind 1 11 -6 Column 1 1 -7 MakeRecord 1 0 -8 String 0 0 -9 PutStrKey 0 0 -10 Next 1 6 -11 Close 1 0 -12 Integer 0 0 -13 OpenRead 2 5 examp2 -14 Rewind 2 20 -15 Column 2 1 -16 MakeRecord 1 0 -17 NotFound 0 19 -18 Delete 0 0 -19 Next 2 15 -20 Close 2 0 -21 ColumnName 0 0 four -22 Rewind 0 26 -23 Column 0 0 -24 Callback 1 0 -25 Next 0 23 -26 Close 0 0 -27 Halt 0 0 -} - -puts { -

      The transient table in which the result is built is created by -instruction 0. Three loops then follow. The loop at instructions -5 through 10 implements the first SELECT statement. The second -SELECT statement is implemented by the loop at instructions 14 through -19. Finally, a loop at instructions 22 through 25 reads the transient -table and invokes the callback once for each row in the result.

      - -

      Instruction 1 is of particular importance in this example. Normally, -the Column instruction extracts the value of a column from a larger -record in the data of an SQLite file entry. Instruction 1 sets a flag on -the transient table so that Column will instead treat the key of the -SQLite file entry as if it were data and extract column information from -the key.

      - -

      Here is what is going to happen: The first SELECT statement -will construct rows of the result and save each row as the key of -an entry in the transient table. The data for each entry in the -transient table is a never used so we fill it in with an empty string. -The second SELECT statement also constructs rows, but the rows -constructed by the second SELECT are removed from the transient table. -That is why we want the rows to be stored in the key of the SQLite file -instead of in the data -- so they can be easily located and deleted.

      - -

      Let's look more closely at what is happening here. The first -SELECT is implemented by the loop at instructions 5 through 10. -Instruction 5 intializes the loop by rewinding its cursor. -Instruction 6 extracts the value of the "two" column from "examp" -and instruction 7 converts this into a row. Instruction 8 pushes -an empty string onto the stack. Finally, instruction 9 writes the -row into the temporary table. But remember, the PutStrKey opcode uses -the top of the stack as the record data and the next on stack as the -key. For an INSERT statement, the row generated by the -MakeRecord opcode is the record data and the record key is an integer -created by the NewRecno opcode. But here the roles are reversed and -the row created by MakeRecord is the record key and the record data is -just an empty string.

      - -

      The second SELECT is implemented by instructions 14 through 19. -Instruction 14 intializes the loop by rewinding its cursor. -A new result row is created from the "four" column of table "examp2" -by instructions 15 and 16. But instead of using PutStrKey to write this -new row into the temporary table, we instead call Delete to remove -it from the temporary table if it exists.

      - -

      The result of the compound select is sent to the callback routine -by the loop at instructions 22 through 25. There is nothing new -or remarkable about this loop, except for the fact that the Column -instruction at 23 will be extracting a column out of the record key -rather than the record data.

      - -

      Summary

      - -

      This article has reviewed all of the major techniques used by -SQLite's VDBE to implement SQL statements. What has not been shown -is that most of these techniques can be used in combination to -generate code for an appropriately complex query statement. For -example, we have shown how sorting is accomplished on a simple query -and we have shown how to implement a compound query. But we did -not give an example of sorting in a compound query. This is because -sorting a compound query does not introduce any new concepts: it -merely combines two previous ideas (sorting and compounding) -in the same VDBE program.

      - -

      For additional information on how the SQLite library -functions, the reader is directed to look at the SQLite source -code directly. If you understand the material in this article, -you should not have much difficulty in following the sources. -Serious students of the internals of SQLite will probably -also what to make a careful study of the VDBE opcodes -as documented here. Most of the -opcode documentation is extracted from comments in the source -code using a script so you can also get information about the -various opcodes directly from the vdbe.c source file. -If you have successfully read this far, you should have little -difficulty understanding the rest.

      - -

      If you find errors in either the documentation or the code, -feel free to fix them and/or contact the author at -drh@hwaci.com. Your bug fixes or -suggestions are always welcomed.

      -} -footer $rcsid DELETED version3.tcl Index: version3.tcl ================================================================== --- version3.tcl +++ /dev/null @@ -1,293 +0,0 @@ -#!/usr/bin/tclsh -source common.tcl -header {SQLite Version 3 Overview} -puts { -

      SQLite Version 3 Overview

      - -

      -SQLite version 3.0 introduces important changes to the library, including: -

      - -
        -
      • A more compact format for database files.
      • -
      • Manifest typing and BLOB support.
      • -
      • Support for both UTF-8 and UTF-16 text.
      • -
      • User-defined text collating sequences.
      • -
      • 64-bit ROWIDs.
      • -
      • Improved Concurrency.
      • -
      - -

      -This document is a quick introduction to the changes for SQLite 3.0 -for users who are already familiar with SQLite version 2.8. -

      - -

      Naming Changes

      - -

      -SQLite version 2.8 will continue to be supported with bug fixes -for the foreseeable future. In order to allow SQLite version 2.8 -and SQLite version 3.0 to peacefully coexist, the names of key files -and APIs in SQLite version 3.0 have been changed to include the -character "3". For example, the include file used by C programs -has been changed from "sqlite.h" to "sqlite3.h". And the name of -the shell program used to interact with databases has been changed -from "sqlite.exe" to "sqlite3.exe". With these changes, it is possible -to have both SQLite 2.8 and SQLite 3.0 installed on the same system at -the same time. And it is possible for the same C program to link -against both SQLite 2.8 and SQLite 3.0 at the same time and to use -both libraries at the same time. -

      - -

      New File Format

      - -

      -The format used by SQLite database files has been completely revised. -The old version 2.1 format and the new 3.0 format are incompatible with -one another. Version 2.8 of SQLite will not read a version 3.0 database -files and version 3.0 of SQLite will not read a version 2.8 database file. -

      - -

      -To convert an SQLite 2.8 database into an SQLite 3.0 database, have -ready the command-line shells for both version 2.8 and 3.0. Then -enter a command like the following: -

      - -
      -sqlite OLD.DB .dump | sqlite3 NEW.DB
      -
      - -

      -The new database file format uses B+trees for tables. In a B+tree, all -data is stored in the leaves of the tree instead of in both the leaves and -the intermediate branch nodes. The use of B+trees for tables allows for -better scalability and the storage of larger data fields without the use of -overflow pages. Traditional B-trees are still used for indices.

      - -

      -The new file format also supports variable pages sizes between 512 and -32768 bytes. The size of a page is stored in the file header so the -same library can read databases with different pages sizes, in theory, -though this feature has not yet been implemented in practice. -

      - -

      -The new file format omits unused fields from its disk images. For example, -indices use only the key part of a B-tree record and not the data. So -for indices, the field that records the length of the data is omitted. -Integer values such as the length of key and data are stored using -a variable-length encoding so that only one or two bytes are required to -store the most common cases but up to 64-bits of information can be encoded -if needed. -Integer and floating point data is stored on the disk in binary rather -than being converted into ASCII as in SQLite version 2.8. -These changes taken together result in database files that are typically -25% to 35% smaller than the equivalent files in SQLite version 2.8. -

      - -

      -Details of the low-level B-tree format used in SQLite version 3.0 can -be found in header comments to the -btree.c -source file. -

      - -

      Manifest Typing and BLOB Support

      - -

      -SQLite version 2.8 will deal with data in various formats internally, -but when writing to the disk or interacting through its API, SQLite 2.8 -always converts data into ASCII text. SQLite 3.0, in contrast, exposes -its internal data representations to the user and stores binary representations -to disk when appropriate. The exposing of non-ASCII representations was -added in order to support BLOBs. -

      - -

      -SQLite version 2.8 had the feature that any type of data could be stored -in any table column regardless of the declared type of that column. This -feature is retained in version 3.0, though in a slightly modified form. -Each table column will store any type of data, though columns have an -affinity for the format of data defined by their declared datatype. -When data is inserted into a column, that column will make at attempt -to convert the data format into the columns declared type. All SQL -database engines do this. The difference is that SQLite 3.0 will -still store the data even if a format conversion is not possible. -

      - -

      -For example, if you have a table column declared to be of type "INTEGER" -and you try to insert a string, the column will look at the text string -and see if it looks like a number. If the string does look like a number -it is converted into a number and into an integer if the number does not -have a fractional part, and stored that way. But if the string is not -a well-formed number it is still stored as a string. A column with a -type of "TEXT" tries to convert numbers into an ASCII-Text representation -before storing them. But BLOBs are stored in TEXT columns as BLOBs because -you cannot in general convert a BLOB into text. -

      - -

      -In most other SQL database engines the datatype is associated with -the table column that holds the data - with the data container. -In SQLite 3.0, the datatype is associated with the data itself, not -with its container. -Paul Graham in his book -ANSI Common Lisp -calls this property "Manifest Typing". -Other writers have other definitions for the term "manifest typing", -so beware of confusion. But by whatever name, that is the datatype -model supported by SQLite 3.0. -

      - -

      -Additional information about datatypes in SQLite version 3.0 is -available -separately. -

      - -

      Support for UTF-8 and UTF-16

      - -

      -The new API for SQLite 3.0 contains routines that accept text as -both UTF-8 and UTF-16 in the native byte order of the host machine. -Each database file manages text as either UTF-8, UTF-16BE (big-endian), -or UTF-16LE (little-endian). Internally and in the disk file, the -same text representation is used everywhere. If the text representation -specified by the database file (in the file header) does not match -the text representation required by the interface routines, then text -is converted on-the-fly. -Constantly converting text from one representation to another can be -computationally expensive, so it is suggested that programmers choose a -single representation and stick with it throughout their application. -

      - -

      -In the current implementation of SQLite, the SQL parser only works -with UTF-8 text. So if you supply UTF-16 text it will be converted. -This is just an implementation issue and there is nothing to prevent -future versions of SQLite from parsing UTF-16 encoded SQL natively. -

      - -

      -When creating new user-defined SQL functions and collating sequences, -each function or collating sequence can specify it if works with -UTF-8, UTF-16be, or UTF-16le. Separate implementations can be registered -for each encoding. If an SQL function or collating sequences is required -but a version for the current text encoding is not available, then -the text is automatically converted. As before, this conversion takes -computation time, so programmers are advised to pick a single -encoding and stick with it in order to minimize the amount of unnecessary -format juggling. -

      - -

      -SQLite is not particular about the text it receives and is more than -happy to process text strings that are not normalized or even -well-formed UTF-8 or UTF-16. Thus, programmers who want to store -IS08859 data can do so using the UTF-8 interfaces. As long as no -attempts are made to use a UTF-16 collating sequence or SQL function, -the byte sequence of the text will not be modified in any way. -

      - -

      User-defined Collating Sequences

      - -

      -A collating sequence is just a defined order for text. When SQLite 3.0 -sorts (or uses a comparison operator like "<" or ">=") the sort order -is first determined by the data type. -

      - -
        -
      • NULLs sort first
      • -
      • Numeric values sort next in numerical order
      • -
      • Text values come after numerics
      • -
      • BLOBs sort last
      • -
      - -

      -Collating sequences are used for comparing two text strings. -The collating sequence does not change the ordering of NULLs, numbers, -or BLOBs, only text. -

      - -

      -A collating sequence is implemented as a function that takes the -two strings being compared as inputs and returns negative, zero, or -positive if the first string is less than, equal to, or greater than -the second. -SQLite 3.0 comes with a single built-in collating sequence named "BINARY" -which is implemented using the memcmp() routine from the standard C library. -The BINARY collating sequence works well for English text. For other -languages or locales, alternative collating sequences may be preferred. -

      - -

      -The decision of which collating sequence to use is controlled by the -COLLATE clause in SQL. A COLLATE clause can occur on a table definition, -to define a default collating sequence to a table column, or on field -of an index, or in the ORDER BY clause of a SELECT statement. -Planned enhancements to SQLite are to include standard CAST() syntax -to allow the collating sequence of an expression to be defined. -

      - -

      64-bit ROWIDs

      - -

      -Every row of a table has a unique rowid. -If the table defines a column with the type "INTEGER PRIMARY KEY" then that -column becomes an alias for the rowid. But with or without an INTEGER PRIMARY -KEY column, every row still has a rowid. -

      - -

      -In SQLite version 3.0, the rowid is a 64-bit signed integer. -This is an expansion of SQLite version 2.8 which only permitted -rowids of 32-bits. -

      - -

      -To minimize storage space, the 64-bit rowid is stored as a variable length -integer. Rowids between 0 and 127 use only a single byte. -Rowids between 0 and 16383 use just 2 bytes. Up to 2097152 uses three -bytes. And so forth. Negative rowids are allowed but they always use -nine bytes of storage and so their use is discouraged. When rowids -are generated automatically by SQLite, they will always be non-negative. -

      - -

      Improved Concurrency

      - -

      -SQLite version 2.8 allowed multiple simultaneous readers or a single -writer but not both. SQLite version 3.0 allows one process to begin -writing the database while other processes continue to read. The -writer must still obtain an exclusive lock on the database for a brief -interval in order to commit its changes, but the exclusive lock is no -longer required for the entire write operation. -A more detailed report on the locking -behavior of SQLite version 3.0 is available separately. -

      - -

      -A limited form of table-level locking is now also available in SQLite. -If each table is stored in a separate database file, those separate -files can be attached to the main database (using the ATTACH command) -and the combined databases will function as one. But locks will only -be acquired on individual files as needed. So if you redefine "database" -to mean two or more database files, then it is entirely possible for -two processes to be writing to the same database at the same time. -To further support this capability, commits of transactions involving -two or more ATTACHed database are now atomic. -

      - -

      Credits

      - -

      -SQLite version 3.0 is made possible in part by AOL developers -supporting and embracing great Open-Source Software. -

      - - -} -footer {$Id: version3.tcl,v 1.6 2006/03/03 21:39:54 drh Exp $} DELETED whentouse.tcl Index: whentouse.tcl ================================================================== --- whentouse.tcl +++ /dev/null @@ -1,254 +0,0 @@ -# -# Run this TCL script to generate HTML for the goals.html file. -# -set rcsid {$Id: whentouse.tcl,v 1.7 2007/04/14 12:04:39 drh Exp $} -source common.tcl -header {Appropriate Uses For SQLite} - -puts { -

      -SQLite is different from most other SQL database engines in that its -primary design goal is to be simple: -

      - -
        -
      • Simple to administer
      • -
      • Simple to operate
      • -
      • Simple to embed in a larger program
      • -
      • Simple to maintain and customize
      • -
      - -

      -Many people like SQLite because it is small and fast. But those -qualities are just happy accidents. -Users also find that SQLite is very reliable. Reliability is -a consequence of simplicity. With less complication, there is -less to go wrong. So, yes, SQLite is small, fast, and reliable, -but first and foremost, SQLite strives to be simple. -

      - -

      -Simplicity in a database engine can be either a strength or a -weakness, depending on what you are trying to do. In order to -achieve simplicity, SQLite has had to sacrifice other characteristics -that some people find useful, such as high concurrency, fine-grained -access control, a rich set of built-in functions, stored procedures, -esoteric SQL language features, XML and/or Java extensions, -tera- or peta-byte scalability, and so forth. If you need some of these -features and do not mind the added complexity that they -bring, then SQLite is probably not the database for you. -SQLite is not intended to be an enterprise database engine. It -not designed to compete with Oracle or PostgreSQL. -

      - -

      -The basic rule of thumb for when it is appropriate to use SQLite is -this: Use SQLite in situations where simplicity of administration, -implementation, and maintenance are more important than the countless -complex features that enterprise database engines provide. -As it turns out, situations where simplicity is the better choice -are more common than many people realize. -

      - -

      Situations Where SQLite Works Well

      - -
        -
      • Websites

        - -

        SQLite usually will work great as the database engine for low to -medium traffic websites (which is to say, 99.9% of all websites). -The amount of web traffic that SQLite can handle depends, of course, -on how heavily the website uses its database. Generally -speaking, any site that gets fewer than a 100000 hits/day should work -fine with SQLite. -The 100000 hits/day figure is a conservative estimate, not a -hard upper bound. -SQLite has been demonstrated to work with 10 times that amount -of traffic.

        -
      • - -
      • Embedded devices and applications

        - -

        Because an SQLite database requires little or no administration, -SQLite is a good choice for devices or services that must work -unattended and without human support. SQLite is a good fit for -use in cellphones, PDAs, set-top boxes, and/or appliances. It also -works well as an embedded database in downloadable consumer applications. -

        -
      • - -
      • Application File Format

        - -

        -SQLite has been used with great success as the on-disk file format -for desktop applications such as financial analysis tools, CAD -packages, record keeping programs, and so forth. The traditional -File/Open operation does an sqlite3_open() and executes a -BEGIN TRANSACTION to get exclusive access to the content. File/Save -does a COMMIT followed by another BEGIN TRANSACTION. The use -of transactions guarantees that updates to the application file are atomic, -durable, isolated, and consistent. -

        - -

        -Temporary triggers can be added to the database to record all -changes into a (temporary) undo/redo log table. These changes can then -be played back when the user presses the Undo and Redo buttons. Using -this technique, a unlimited depth undo/redo implementation can be written -in surprising little code. -

        -
      • - -
      • Replacement for ad hoc disk files

        - -

        Many programs use fopen(), fread(), and fwrite() to create and -manage files of data in home-grown formats. SQLite works -particularly well as a -replacement for these ad hoc data files.

        -
      • - -
      • Internal or temporary databases

        - -

        -For programs that have a lot of data that must be sifted and sorted -in diverse ways, it is often easier and quicker to load the data into -an in-memory SQLite database and use queries with joins and ORDER BY -clauses to extract the data in the form and order needed rather than -to try to code the same operations manually. -Using an SQL database internally in this way also gives the program -greater flexibility since new columns and indices can be added without -having to recode every query. -

        -
      • - -
      • Command-line dataset analysis tool

        - -

        -Experienced SQL users can employ -the command-line sqlite program to analyze miscellaneous -datasets. Raw data can be imported from CSV files, then that -data can be sliced and diced to generate a myriad of summary -reports. Possible uses include website log analysis, sports -statistics analysis, compilation of programming metrics, and -analysis of experimental results. -

        - -

        -You can also do the same thing with a enterprise client/server -database, of course. The advantages to using SQLite in this situation -are that SQLite is much easier to set up and the resulting database -is a single file that you can store on a floppy disk or flash-memory stick -or email to a colleague. -

        -
      • - -
      • Stand-in for an enterprise database during demos or testing

        - -

        -If you are writing a client application for an enterprise database engine, -it makes sense to use a generic database backend that allows you to connect -to many different kinds of SQL database engines. It makes even better -sense to -go ahead and include SQLite in the mix of supported database and to statically -link the SQLite engine in with the client. That way the client program -can be used standalone with an SQLite data file for testing or for -demonstrations. -

        -
      • - -
      • Database Pedagogy

        - -

        -Because it is simple to setup and use (installation is trivial: just -copy the sqlite or sqlite.exe executable to the target machine -and run it) SQLite makes a good database engine for use in teaching SQL. -Students can easily create as many databases as they like and can -email databases to the instructor for comments or grading. For more -advanced students who are interested in studying how an RDBMS is -implemented, the modular and well-commented and documented SQLite code -can serve as a good basis. This is not to say that SQLite is an accurate -model of how other database engines are implemented, but rather a student who -understands how SQLite works can more quickly comprehend the operational -principles of other systems. -

        -
      • - -
      • Experimental SQL language extensions

        - -

        The simple, modular design of SQLite makes it a good platform for -prototyping new, experimental database language features or ideas. -

        -
      • - - -
      - -

      Situations Where Another RDBMS May Work Better

      - -
        -
      • Client/Server Applications

        - -

        If you have many client programs accessing a common database -over a network, you should consider using a client/server database -engine instead of SQLite. SQLite will work over a network filesystem, -but because of the latency associated with most network filesystems, -performance will not be great. Also, the file locking logic of -many network filesystems implementation contains bugs (on both Unix -and windows). If file locking does not work like it should, -it might be possible for two or more client programs to modify the -same part of the same database at the same time, resulting in -database corruption. Because this problem results from bugs in -the underlying filesystem implementation, there is nothing SQLite -can do to prevent it.

        - -

        A good rule of thumb is that you should avoid using SQLite -in situations where the same database will be accessed simultaneously -from many computers over a network filesystem.

        -
      • - -
      • High-volume Websites

        - -

        SQLite will normally work fine as the database backend to a website. -But if you website is so busy that your are thinking of splitting the -database component off onto a separate machine, then you should -definitely consider using an enterprise-class client/server database -engine instead of SQLite.

        -
      • - -
      • Very large datasets

        - -

        When you start a transaction in SQLite (which happens automatically -before any write operation that is not within an explicit BEGIN...COMMIT) -the engine has to allocate a bitmap of dirty pages in the disk file to -help it manage its rollback journal. SQLite needs 256 bytes of RAM for -every 1MiB of database (assuming a 1024-byte page size: less memory is -used with larger page sizes, of course). -For smaller databases, the amount of memory -required is not a problem, but when database begin to grow into the -multi-gigabyte range, the size of the bitmap can get quite large. If -you need to store and modify more than a few dozen GB of data, you should -consider using a different database engine. -

        -
      • - -
      • High Concurrency

        - -

        -SQLite uses reader/writer locks on the entire database file. That means -if any process is reading from any part of the database, all other -processes are prevented from writing any other part of the database. -Similarly, if any one process is writing to the database, -all other processes are prevented from reading any other part of the -database. -For many situations, this is not a problem. Each application -does its database work quickly and moves on, and no lock lasts for more -than a few dozen milliseconds. But there are some applications that require -more concurrency, and those applications may need to seek a different -solution. -

        -
      • - -
      - -} -footer $rcsid ADDED wrap.tcl Index: wrap.tcl ================================================================== --- /dev/null +++ wrap.tcl @@ -0,0 +1,160 @@ +#!/usr/bin/tclsh +# +# This script processes raw page text into its final form for display. +# Invoke this command as follows: +# +# tclsh wrap.tcl $(DOC) $(SRC) $(DEST) source1.in source2.in ... +# +# The $(DOC) and $(SRC) values are the names of directories containing +# the documentation source and program source. $(DEST) is the name of +# of the directory where generated HTML is written. sourceN.in is the +# input file to be processed. The output is sourceN.html in the +# local directory. +# +# Changes made to the source files: +# +# * An appropriate header is prepended to the file. +# * Any ... in the input is moved into the prepended +# header. +# * An appropriate footer is appended. +# * Scripts within ... are evaluated. Output that +# is emitted from these scripts by "puts" appears in place of +# the original script. +# +set DOC [lindex $argv 0] +set SRC [lindex $argv 1] +set DEST [lindex $argv 2] +set HOMEDIR [pwd] +rename puts real_puts +proc puts {text} { + real_puts $::OUT $text + flush $::OUT +} + +# The following proc is used to ensure consistent formatting in the +# HTML generated by lang.tcl and pragma.tcl. +# +proc Syntax {args} { + puts {} + foreach {rule body} $args { + puts "" + regsub -all < $body {%LT} body + regsub -all > $body {%GT} body + regsub -all %LT $body {} body + regsub -all %GT $body {} body + regsub -all {[]|[*?]} $body {&} body + regsub -all "\n" [string trim $body] "
      \n" body + regsub -all "\n *" $body "\n\\ \\ \\ \\ " body + regsub -all {[|,.*()]} $body {&} body + regsub -all { = } $body { = } body + regsub -all {STAR} $body {*} body + ## These metacharacters must be handled to undo being + ## treated as SQL punctuation characters above. + regsub -all {RPPLUS} $body {
      )+} body + regsub -all {LP} $body {(} body + regsub -all {RP} $body {)} body + ## Place the left-hand side of the rule in the 2nd table column. + puts "" + } + puts {
      " + puts "$rule ::=$body
      } +} + +foreach infile [lrange $argv 3 end] { + cd $HOMEDIR + real_puts "Processing $infile" + set fd [open $infile r] + set in [read $fd] + close $fd + set title {No Title} + regexp {([^\n]*)} $in all title + regsub {[^\n]*} $in {} in + set outfile [file root [file tail $infile]].html + set ::OUT [open $::DEST/$outfile w] + puts {} + puts {} + puts "$title" + puts { + + } + puts {} + puts { +
      + + +
      +
      The World's Most Used SQL Database.
      + + + } + regsub -all {} $in "\175; eval \173" in + regsub -all {} $in "\175; puts \173" in + eval "puts \173$in\175" + cd $::HOMEDIR + puts {
      } + set mtime [file mtime $infile] + set date [clock format $mtime -format {%Y/%m/%d %H:%M:%S UTC} -gmt 1] + puts "This page last modified $date" + puts {
      } + close $::OUT +}
    "
    -  set t2 [string map {& & < < > >} $text]
    -  regsub -all "/(\[^\n/\]+)/" $t2 {\1} t3
    -  puts "$t3"
    -  puts "
    " -} -proc IMAGE {name {caption {}}} { - puts "
    " - if {$caption!=""} { - puts "
    $caption" - } - puts "
    " -} -proc PARAGRAPH {text} { - # regsub -all "/(\[a-zA-Z0-9\]+)/" $text {\1} t2 - regsub -all "\\*(\[^\n*\]+)\\*" $text {\1} t3 - puts "

    $t3

    \n" -} -set level(0) 0 -set level(1) 0 -proc HEADING {n name {tag {}}} { - if {$tag!=""} { - puts "