Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | Merge latest trunk changes into the apple-osx branch. |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | apple-osx |
Files: | files | file ages | folders |
SHA1: |
c5f7977b899e93bf90bd44572db78c74 |
User & Date: | dan 2011-08-23 18:06:05.000 |
Context
2011-08-26
| ||
19:18 | Merge the latest trunk changes into the apple-osx branch. (check-in: 55af80035f user: drh tags: apple-osx) | |
2011-08-23
| ||
18:06 | Merge latest trunk changes into the apple-osx branch. (check-in: c5f7977b89 user: dan tags: apple-osx) | |
16:41 | Have the "crash" VFS used by the tcl tests (test6.c) handle SQLITE_FCNTL_SIZE_HINT internally, instead of passing it directly through to the underlying VFS. This is important if the crash VFS is simulating non-default device characteristics such as SQLITE_DEVCAP_SEQUENTIAL or ATOMIC. (check-in: fac8bc8f34 user: dan tags: trunk) | |
2011-08-02
| ||
18:25 | Merge all the latest trunk changes into the apple-osx branch. (check-in: 77376b332b user: drh tags: apple-osx) | |
Changes
Changes to Makefile.in.
︙ | ︙ | |||
173 174 175 176 177 178 179 | memjournal.lo \ mutex.lo mutex_noop.lo mutex_os2.lo mutex_unix.lo mutex_w32.lo \ notify.lo opcodes.lo os.lo os_os2.lo os_unix.lo os_win.lo \ pager.lo parse.lo pcache.lo pcache1.lo pragma.lo prepare.lo printf.lo \ random.lo resolve.lo rowset.lo rtree.lo select.lo status.lo \ table.lo tokenize.lo trigger.lo \ update.lo util.lo vacuum.lo \ | | | | 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 | memjournal.lo \ mutex.lo mutex_noop.lo mutex_os2.lo mutex_unix.lo mutex_w32.lo \ notify.lo opcodes.lo os.lo os_os2.lo os_unix.lo os_win.lo \ pager.lo parse.lo pcache.lo pcache1.lo pragma.lo prepare.lo printf.lo \ random.lo resolve.lo rowset.lo rtree.lo select.lo status.lo \ table.lo tokenize.lo trigger.lo \ update.lo util.lo vacuum.lo \ vdbe.lo vdbeapi.lo vdbeaux.lo vdbeblob.lo vdbemem.lo vdbesort.lo \ vdbetrace.lo wal.lo walker.lo where.lo utf.lo vtab.lo # Object files for the amalgamation. # LIBOBJS1 = sqlite3.lo # Determine the real value of LIBOBJ based on the 'configure' script # |
︙ | ︙ | |||
270 271 272 273 274 275 276 277 278 279 280 281 282 283 | $(TOP)/src/vacuum.c \ $(TOP)/src/vdbe.c \ $(TOP)/src/vdbe.h \ $(TOP)/src/vdbeapi.c \ $(TOP)/src/vdbeaux.c \ $(TOP)/src/vdbeblob.c \ $(TOP)/src/vdbemem.c \ $(TOP)/src/vdbetrace.c \ $(TOP)/src/vdbeInt.h \ $(TOP)/src/vtab.c \ $(TOP)/src/wal.c \ $(TOP)/src/wal.h \ $(TOP)/src/walker.c \ $(TOP)/src/where.c | > | 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 | $(TOP)/src/vacuum.c \ $(TOP)/src/vdbe.c \ $(TOP)/src/vdbe.h \ $(TOP)/src/vdbeapi.c \ $(TOP)/src/vdbeaux.c \ $(TOP)/src/vdbeblob.c \ $(TOP)/src/vdbemem.c \ $(TOP)/src/vdbesort.c \ $(TOP)/src/vdbetrace.c \ $(TOP)/src/vdbeInt.h \ $(TOP)/src/vtab.c \ $(TOP)/src/wal.c \ $(TOP)/src/wal.h \ $(TOP)/src/walker.c \ $(TOP)/src/where.c |
︙ | ︙ | |||
750 751 752 753 754 755 756 757 758 759 760 761 762 763 | vdbeblob.lo: $(TOP)/src/vdbeblob.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbeblob.c vdbemem.lo: $(TOP)/src/vdbemem.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbemem.c vdbetrace.lo: $(TOP)/src/vdbetrace.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbetrace.c vtab.lo: $(TOP)/src/vtab.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vtab.c wal.lo: $(TOP)/src/wal.c $(HDR) | > > > | 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 | vdbeblob.lo: $(TOP)/src/vdbeblob.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbeblob.c vdbemem.lo: $(TOP)/src/vdbemem.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbemem.c vdbesort.lo: $(TOP)/src/vdbesort.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbesort.c vdbetrace.lo: $(TOP)/src/vdbetrace.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbetrace.c vtab.lo: $(TOP)/src/vtab.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vtab.c wal.lo: $(TOP)/src/wal.c $(HDR) |
︙ | ︙ |
Changes to Makefile.msc.
︙ | ︙ | |||
39 40 41 42 43 44 45 46 47 48 49 50 51 52 | # Define -DNDEBUG to compile without debugging (i.e., for production usage) # Omitting the define will cause extra debugging code to be inserted and # includes extra comments when "EXPLAIN stmt" is used. # TCC = $(TCC) -DNDEBUG # The locations of the Tcl header and library files. Also, the library that # non-stubs enabled programs using Tcl must link against. These variables # (TCLINCDIR, TCLLIBDIR, and LIBTCL) may be overridden via the environment # prior to running nmake in order to match the actual installed location and # version on this machine. # !if "$(TCLINCDIR)" == "" | > > > > > | 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 | # Define -DNDEBUG to compile without debugging (i.e., for production usage) # Omitting the define will cause extra debugging code to be inserted and # includes extra comments when "EXPLAIN stmt" is used. # TCC = $(TCC) -DNDEBUG # # Prevent warnings about "insecure" runtime library functions being used. # TCC = $(TCC) -D_CRT_SECURE_NO_DEPRECATE -D_CRT_SECURE_NO_WARNINGS # The locations of the Tcl header and library files. Also, the library that # non-stubs enabled programs using Tcl must link against. These variables # (TCLINCDIR, TCLLIBDIR, and LIBTCL) may be overridden via the environment # prior to running nmake in order to match the actual installed location and # version on this machine. # !if "$(TCLINCDIR)" == "" |
︙ | ︙ | |||
153 154 155 156 157 158 159 | memjournal.lo \ mutex.lo mutex_noop.lo mutex_os2.lo mutex_unix.lo mutex_w32.lo \ notify.lo opcodes.lo os.lo os_os2.lo os_unix.lo os_win.lo \ pager.lo parse.lo pcache.lo pcache1.lo pragma.lo prepare.lo printf.lo \ random.lo resolve.lo rowset.lo rtree.lo select.lo status.lo \ table.lo tokenize.lo trigger.lo \ update.lo util.lo vacuum.lo \ | | | | 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 | memjournal.lo \ mutex.lo mutex_noop.lo mutex_os2.lo mutex_unix.lo mutex_w32.lo \ notify.lo opcodes.lo os.lo os_os2.lo os_unix.lo os_win.lo \ pager.lo parse.lo pcache.lo pcache1.lo pragma.lo prepare.lo printf.lo \ random.lo resolve.lo rowset.lo rtree.lo select.lo status.lo \ table.lo tokenize.lo trigger.lo \ update.lo util.lo vacuum.lo \ vdbe.lo vdbeapi.lo vdbeaux.lo vdbeblob.lo vdbemem.lo vdbesort.lo \ vdbetrace.lo wal.lo walker.lo where.lo utf.lo vtab.lo # Object files for the amalgamation. # LIBOBJS1 = sqlite3.lo # Determine the real value of LIBOBJ based on the 'configure' script # |
︙ | ︙ | |||
253 254 255 256 257 258 259 260 261 262 263 264 265 266 | $(TOP)\src\vacuum.c \ $(TOP)\src\vdbe.c \ $(TOP)\src\vdbe.h \ $(TOP)\src\vdbeapi.c \ $(TOP)\src\vdbeaux.c \ $(TOP)\src\vdbeblob.c \ $(TOP)\src\vdbemem.c \ $(TOP)\src\vdbetrace.c \ $(TOP)\src\vdbeInt.h \ $(TOP)\src\vtab.c \ $(TOP)\src\wal.c \ $(TOP)\src\wal.h \ $(TOP)\src\walker.c \ $(TOP)\src\where.c | > | 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 | $(TOP)\src\vacuum.c \ $(TOP)\src\vdbe.c \ $(TOP)\src\vdbe.h \ $(TOP)\src\vdbeapi.c \ $(TOP)\src\vdbeaux.c \ $(TOP)\src\vdbeblob.c \ $(TOP)\src\vdbemem.c \ $(TOP)\src\vdbesort.c \ $(TOP)\src\vdbetrace.c \ $(TOP)\src\vdbeInt.h \ $(TOP)\src\vtab.c \ $(TOP)\src\wal.c \ $(TOP)\src\wal.h \ $(TOP)\src\walker.c \ $(TOP)\src\where.c |
︙ | ︙ | |||
394 395 396 397 398 399 400 401 402 403 404 405 406 407 | $(TOP)\src\tokenize.c \ $(TOP)\src\utf.c \ $(TOP)\src\util.c \ $(TOP)\src\vdbeapi.c \ $(TOP)\src\vdbeaux.c \ $(TOP)\src\vdbe.c \ $(TOP)\src\vdbemem.c \ $(TOP)\src\vdbetrace.c \ $(TOP)\src\where.c \ parse.c \ $(TOP)\ext\fts3\fts3.c \ $(TOP)\ext\fts3\fts3_aux.c \ $(TOP)\ext\fts3\fts3_expr.c \ $(TOP)\ext\fts3\fts3_tokenizer.c \ | > | 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 | $(TOP)\src\tokenize.c \ $(TOP)\src\utf.c \ $(TOP)\src\util.c \ $(TOP)\src\vdbeapi.c \ $(TOP)\src\vdbeaux.c \ $(TOP)\src\vdbe.c \ $(TOP)\src\vdbemem.c \ $(TOP)\src\vdbesort.c \ $(TOP)\src\vdbetrace.c \ $(TOP)\src\where.c \ parse.c \ $(TOP)\ext\fts3\fts3.c \ $(TOP)\ext\fts3\fts3_aux.c \ $(TOP)\ext\fts3\fts3_expr.c \ $(TOP)\ext\fts3\fts3_tokenizer.c \ |
︙ | ︙ | |||
706 707 708 709 710 711 712 713 714 715 716 717 718 719 | $(LTCOMPILE) -c $(TOP)\src\vdbeaux.c vdbeblob.lo: $(TOP)\src\vdbeblob.c $(HDR) $(LTCOMPILE) -c $(TOP)\src\vdbeblob.c vdbemem.lo: $(TOP)\src\vdbemem.c $(HDR) $(LTCOMPILE) -c $(TOP)\src\vdbemem.c vdbetrace.lo: $(TOP)\src\vdbetrace.c $(HDR) $(LTCOMPILE) -c $(TOP)\src\vdbetrace.c vtab.lo: $(TOP)\src\vtab.c $(HDR) $(LTCOMPILE) -c $(TOP)\src\vtab.c | > > > | 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 | $(LTCOMPILE) -c $(TOP)\src\vdbeaux.c vdbeblob.lo: $(TOP)\src\vdbeblob.c $(HDR) $(LTCOMPILE) -c $(TOP)\src\vdbeblob.c vdbemem.lo: $(TOP)\src\vdbemem.c $(HDR) $(LTCOMPILE) -c $(TOP)\src\vdbemem.c vdbesort.lo: $(TOP)\src\vdbesort.c $(HDR) $(LTCOMPILE) -c $(TOP)\src\vdbesort.c vdbetrace.lo: $(TOP)\src\vdbetrace.c $(HDR) $(LTCOMPILE) -c $(TOP)\src\vdbetrace.c vtab.lo: $(TOP)\src\vtab.c $(HDR) $(LTCOMPILE) -c $(TOP)\src\vtab.c |
︙ | ︙ | |||
883 884 885 886 887 888 889 | # Windows section # dll: sqlite3.dll sqlite3.def: libsqlite3.lib echo EXPORTS > sqlite3.def dumpbin /all libsqlite3.lib \ | | | 893 894 895 896 897 898 899 900 901 902 903 904 | # Windows section # dll: sqlite3.dll sqlite3.def: libsqlite3.lib echo EXPORTS > sqlite3.def dumpbin /all libsqlite3.lib \ | $(NAWK) "/ 1 _?sqlite3_/ { sub(/^.* _?/,\"\");print }" \ | sort >> sqlite3.def sqlite3.dll: $(LIBOBJ) sqlite3.def link $(LTLINKOPTS) /DLL /DEF:sqlite3.def /OUT:$@ $(LIBOBJ) |
Changes to configure.
1 2 | #! /bin/sh # Guess values for system-dependent variables and create Makefiles. | | > | > > > | | | | | | > > > < < < < < < < < < < < > > > > > > | | | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 | #! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.65 for sqlite 3.7.8. # # # Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, # 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # # # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body |
︙ | ︙ | |||
74 75 76 77 78 79 80 | PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi | < < < < < < < | | | | > | > > | > | > > | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > < < < | | < < < < < < | < < < < < < < < < < < < < < < < | < < < < < < < | < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | | | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < | | < < < < < < < < | | | > > < < < < < < | 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 | PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : else exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 test \$(( 1 + 1 )) = 2 || exit 1" if (eval "$as_required") 2>/dev/null; then : as_have_required=yes else as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir/$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : CONFIG_SHELL=$as_shell as_have_required=yes if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : break 2 fi fi done;; esac as_found=false done $as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : CONFIG_SHELL=$SHELL as_have_required=yes fi; } IFS=$as_save_IFS if test "x$CONFIG_SHELL" != x; then : # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV export CONFIG_SHELL exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"} fi if test x$as_have_required = xno; then : $as_echo "$0: This script requires a shell more modern than all" $as_echo "$0: the shells that I found on your system." if test x${ZSH_VERSION+set} = xset ; then $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, $0: including any error possibly output before this $0: message. Then install a modern shell, or manually run $0: the script under such a shell if you do have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir" } # as_fn_mkdir_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith # as_fn_error ERROR [LINENO LOG_FD] # --------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with status $?, using 1 if that was 0. as_fn_error () { as_status=$?; test $as_status -eq 0 && as_status=1 if test "$3"; then as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3 fi $as_echo "$as_me: error: $1" >&2 as_fn_exit $as_status } # as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null |
︙ | ︙ | |||
530 531 532 533 534 535 536 | else as_ln_s='cp -p' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then | | | | < | 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 | else as_ln_s='cp -p' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi if test -x / >/dev/null 2>&1; then as_test_x='test -x' else if ls -dL / >/dev/null 2>&1; then as_ls_L_option=L else as_ls_L_option= fi as_test_x=' eval sh -c '\'' if test -d "$1"; then test -d "$1/."; else case $1 in #( -*)set "./$1";; esac; case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( ???[sx]*):;;*)false;;esac;fi '\'' sh ' fi as_executable_p=$as_test_x # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" # Check that we are running under the correct shell. SHELL=${CONFIG_SHELL-/bin/sh} case X$lt_ECHO in |
︙ | ︙ | |||
716 717 718 719 720 721 722 | if test "X$lt_ECHO" = "X$CONFIG_SHELL $0 --fallback-echo"; then lt_ECHO="$CONFIG_SHELL \\\$\$0 --fallback-echo" fi | | > < > | 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 | if test "X$lt_ECHO" = "X$CONFIG_SHELL $0 --fallback-echo"; then lt_ECHO="$CONFIG_SHELL \\\$\$0 --fallback-echo" fi test -n "$DJDIR" || exec 7<&0 </dev/null exec 6>&1 # Name of the host. # hostname on some systems (SVR3.2, Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME='sqlite' PACKAGE_TARNAME='sqlite' PACKAGE_VERSION='3.7.8' PACKAGE_STRING='sqlite 3.7.8' PACKAGE_BUGREPORT='' PACKAGE_URL='' # Factoring default headers for most tests. ac_includes_default="\ #include <stdio.h> #ifdef HAVE_SYS_TYPES_H # include <sys/types.h> #endif |
︙ | ︙ | |||
779 780 781 782 783 784 785 | #ifdef HAVE_STDINT_H # include <stdint.h> #endif #ifdef HAVE_UNISTD_H # include <unistd.h> #endif" | | > > | > > | > > > | > > > > > > > > > > | | > > > | | < < < < < < < < < < < < < < < < < < < < < | > > | | | | < < | | < < < | < > | < | > | | > | > | | > | > | > > | | < < < < < < < | | | < < | | > > | > | | < | > | < > | > > > | > | > | | | | < | > < > > > > > > > > > > > > > > > > > > > > | < | | | < | < < < < < < < < < < | < < < | < < | < | < | 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 | #ifdef HAVE_STDINT_H # include <stdint.h> #endif #ifdef HAVE_UNISTD_H # include <unistd.h> #endif" ac_subst_vars='LTLIBOBJS LIBOBJS BUILD_CFLAGS USE_GCOV OPT_FEATURE_FLAGS USE_AMALGAMATION TARGET_DEBUG TARGET_HAVE_READLINE TARGET_READLINE_INC TARGET_READLINE_LIBS HAVE_TCL TCL_STUB_LIB_SPEC TCL_STUB_LIB_FLAG TCL_STUB_LIB_FILE TCL_LIB_SPEC TCL_LIB_FLAG TCL_LIB_FILE TCL_INCLUDE_SPEC TCL_LIBS TCL_SRC_DIR TCL_BIN_DIR TCL_VERSION TARGET_EXEEXT SQLITE_OS_OS2 SQLITE_OS_WIN SQLITE_OS_UNIX BUILD_EXEEXT TEMP_STORE ALLOWRELEASE XTHREADCONNECT SQLITE_THREADSAFE BUILD_CC VERSION_NUMBER RELEASE VERSION program_prefix TCLLIBDIR TCLSH_CMD AWK INSTALL_DATA INSTALL_SCRIPT INSTALL_PROGRAM CPP OTOOL64 OTOOL LIPO NMEDIT DSYMUTIL lt_ECHO RANLIB STRIP AR OBJDUMP LN_S NM ac_ct_DUMPBIN DUMPBIN LD FGREP EGREP GREP SED OBJEXT EXEEXT ac_ct_CC CPPFLAGS LDFLAGS CFLAGS CC host_os host_vendor host_cpu host build_os build_vendor build_cpu build LIBTOOL target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='' ac_user_opts=' enable_option_checking enable_shared enable_static with_pic enable_fast_install |
︙ | ︙ | |||
1037 1038 1039 1040 1041 1042 1043 | | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && | | < | 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 | | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" |
︙ | ︙ | |||
1064 1065 1066 1067 1068 1069 1070 | -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && | | < | 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 | -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" |
︙ | ︙ | |||
1269 1270 1271 1272 1273 1274 1275 | -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && | | < | < | 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 | -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" |
︙ | ︙ | |||
1317 1318 1319 1320 1321 1322 1323 | -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; | | | < > | | < > | < | < | | 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 | -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error "unrecognized option: \`$ac_option' Try \`$0 --help' for more information." ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option} ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error "unrecognized options: $ac_unrecognized_opts" ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ |
︙ | ︙ | |||
1375 1376 1377 1378 1379 1380 1381 | eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac | | < | 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 | eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias |
︙ | ︙ | |||
1406 1407 1408 1409 1410 1411 1412 | test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || | | < | < | 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 | test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || |
︙ | ︙ | |||
1449 1450 1451 1452 1453 1454 1455 | srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." | | < | < | 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 | srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info |
︙ | ︙ | |||
1592 1593 1594 1595 1596 1597 1598 | Some influential environment variables: CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a nonstandard directory <lib dir> LIBS libraries to pass to the linker, e.g. -l<library> | | > | 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 | Some influential environment variables: CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a nonstandard directory <lib dir> LIBS libraries to pass to the linker, e.g. -l<library> CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if you have headers in a nonstandard directory <include dir> CPP C preprocessor TCLLIBDIR Where to install tcl plugin Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to the package provider. _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue |
︙ | ︙ | |||
1663 1664 1665 1666 1667 1668 1669 | done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF sqlite configure 3.7.8 | | < | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | | 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 | done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF sqlite configure 3.7.8 generated by GNU Autoconf 2.65 Copyright (C) 2009 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## # ac_fn_c_try_compile LINENO # -------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} as_fn_set_status $ac_retval } # ac_fn_c_try_compile # ac_fn_c_try_link LINENO # ----------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} as_fn_set_status $ac_retval } # ac_fn_c_try_link # ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists and can be compiled using the include files in # INCLUDES, setting the cache variable VAR accordingly. ac_fn_c_check_header_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} } # ac_fn_c_check_header_compile # ac_fn_c_try_cpp LINENO # ---------------------- # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} as_fn_set_status $ac_retval } # ac_fn_c_try_cpp # ac_fn_c_try_run LINENO # ---------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. Assumes # that executables *can* be run. ac_fn_c_try_run () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then : ac_retval=0 else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} as_fn_set_status $ac_retval } # ac_fn_c_try_run # ac_fn_c_check_func LINENO FUNC VAR # ---------------------------------- # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_c_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case <limits.h> declares $2. For example, HP-UX 11i <limits.h> declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer <limits.h> to <assert.h> if __STDC__ is defined, since <limits.h> exists even on freestanding compilers. */ #ifdef __STDC__ # include <limits.h> #else # include <assert.h> #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} } # ac_fn_c_check_func # ac_fn_c_check_type LINENO TYPE VAR INCLUDES # ------------------------------------------- # Tests whether TYPE exists after having included INCLUDES, setting cache # variable VAR accordingly. ac_fn_c_check_type () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : $as_echo_n "(cached) " >&6 else eval "$3=no" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof ($2)) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof (($2))) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else eval "$3=yes" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} } # ac_fn_c_check_type # ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using # the include files in INCLUDES and setting the cache variable VAR # accordingly. ac_fn_c_check_header_mongrel () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <$2> _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} } # ac_fn_c_check_header_mongrel cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by sqlite $as_me 3.7.8, which was generated by GNU Autoconf 2.65. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME |
︙ | ︙ | |||
1713 1714 1715 1716 1717 1718 1719 | _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. | | | | 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 | _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF |
︙ | ︙ | |||
1751 1752 1753 1754 1755 1756 1757 | -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in | | | | | | | 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 | -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? |
︙ | ︙ | |||
1800 1801 1802 1803 1804 1805 1806 | # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( | | | | | 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 | # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ |
︙ | ︙ | |||
1878 1879 1880 1881 1882 1883 1884 | $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do | | > > < < < < > > > > | | | | | | | | | | | | | | > > | < | < | | | < < < < < < < < < < < < < < < < < < < < < < < < < | | < | | 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 | $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h $as_echo "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_URL "$PACKAGE_URL" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then ac_site_file1=$CONFIG_SITE elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu sqlite_version_sanity_check=`cat $srcdir/VERSION | tr -d '\n'` if test "$PACKAGE_VERSION" != "$sqlite_version_sanity_check" ; then as_fn_error "configure script is out of date: configure \$PACKAGE_VERSION = $PACKAGE_VERSION top level VERSION file = $sqlite_version_sanity_check please regen with autoconf" "$LINENO" 5 fi # The following RCS revision string applies to configure.in # $Revision: 1.56 $ ######### # Programs needed # case `pwd` in *\ * | *\ *) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 $as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; esac macro_version='2.2.6' macro_revision='1.3012' |
︙ | ︙ | |||
2089 2090 2091 2092 2093 2094 2095 | ltmain="$ac_aux_dir/ltmain.sh" ac_aux_dir= for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do | > | | < < < < < < < < | | | | > < | < < | < | | < | < < | < | < | < | | < | < | < | < | 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 | ltmain="$ac_aux_dir/ltmain.sh" ac_aux_dir= for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do for ac_t in install-sh install.sh shtool; do if test -f "$ac_dir/$ac_t"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/$ac_t -c" break 2 fi done done if test -z "$ac_aux_dir"; then as_fn_error "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5 fi # These three variables are undocumented and unsupported, # and are intended to be withdrawn in a future Autoconf release. # They can cause serious problems if a builder's source tree is in a directory # whose full name contains unusual characters. ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. # Make sure we can run config.sub. $SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || as_fn_error "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 $as_echo_n "checking build system type... " >&6; } if test "${ac_cv_build+set}" = set; then : $as_echo_n "(cached) " >&6 else ac_build_alias=$build_alias test "x$ac_build_alias" = x && ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` test "x$ac_build_alias" = x && as_fn_error "cannot guess build type; you must specify one" "$LINENO" 5 ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || as_fn_error "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 $as_echo "$ac_cv_build" >&6; } case $ac_cv_build in *-*-*) ;; *) as_fn_error "invalid value of canonical build" "$LINENO" 5;; esac build=$ac_cv_build ac_save_IFS=$IFS; IFS='-' set x $ac_cv_build shift build_cpu=$1 build_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: build_os=$* IFS=$ac_save_IFS case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 $as_echo_n "checking host system type... " >&6; } if test "${ac_cv_host+set}" = set; then : $as_echo_n "(cached) " >&6 else if test "x$host_alias" = x; then ac_cv_host=$ac_cv_build else ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || as_fn_error "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 $as_echo "$ac_cv_host" >&6; } case $ac_cv_host in *-*-*) ;; *) as_fn_error "invalid value of canonical host" "$LINENO" 5;; esac host=$ac_cv_host ac_save_IFS=$IFS; IFS='-' set x $ac_cv_host shift host_cpu=$1 host_vendor=$2 |
︙ | ︙ | |||
2209 2210 2211 2212 2213 2214 2215 | ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 | | | | | | | | | | | | | | | | < < | < < | | | | | | | | | | | | | | | | | | | | | | | | | | | | | < < | < < | | | | < | | < < < < < < < < < < | | | | | < < | < < > | < | < > | | | > | < < < < | | | | | | | | | | | < | | > | | | < < < < < < < < < < < < < < < < < < < < < < < < | < < < | < < | < < < | < < > > | | > < < < < < < < | | | | | | | | | | < < | | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | | | < < < < | | | | | | | | | < < | | | | < < < < < | < < < < < < < < < < < < < < < < < < < | < | | | | | > > > > > > > > > | > > | < | | < < < < < < < < < < < < < < < < < < | | | < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | | | | < < < < | 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 | ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_CC+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_CC+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error "no acceptable C compiler found in \$PATH See \`config.log' for more details." "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 $as_echo_n "checking whether the C compiler works... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi if test -z "$ac_file"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { as_fn_set_status 77 as_fn_error "C compiler cannot create executables See \`config.log' for more details." "$LINENO" 5; }; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 $as_echo_n "checking for C compiler default output file name... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details." "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <stdio.h> int main () { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error "cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details." "$LINENO" 5; } fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if test "${ac_cv_objext+set}" = set; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error "cannot compute suffix of object files: cannot compile See \`config.log' for more details." "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if test "${ac_cv_c_compiler_gnu+set}" = set; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if test "${ac_cv_prog_cc_g+set}" = set; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if test "${ac_cv_prog_cc_c89+set}" = set; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <stdarg.h> #include <stdio.h> #include <sys/types.h> #include <sys/stat.h> /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; |
︙ | ︙ | |||
3078 3079 3080 3081 3082 3083 3084 | return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" | < | < < < < < < < < < < < < < < < < < < < < < < | | | > > | | | | | | < | < | | | | | | < | < | | | | | | < | < | | | | | | < | < | | 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 | return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 $as_echo_n "checking for a sed that does not truncate output... " >&6; } if test "${ac_cv_path_SED+set}" = set; then : $as_echo_n "(cached) " >&6 else ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ for ac_i in 1 2 3 4 5 6 7; do ac_script="$ac_script$as_nl$ac_script" done echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed { ac_script=; unset ac_script;} if test -z "$SED"; then ac_path_SED_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" { test -f "$ac_path_SED" && $as_test_x "$ac_path_SED"; } || continue # Check for GNU ac_path_SED and select it if it is found. # Check for GNU $ac_path_SED case `"$ac_path_SED" --version 2>&1` in *GNU*) ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo '' >> "conftest.nl" "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_SED_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_SED="$ac_path_SED" ac_path_SED_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_SED_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_SED"; then as_fn_error "no acceptable sed could be found in \$PATH" "$LINENO" 5 fi else ac_cv_path_SED=$SED fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 $as_echo "$ac_cv_path_SED" >&6; } SED="$ac_cv_path_SED" rm -f conftest.sed test -z "$SED" && SED=sed Xsed="$SED -e 1s/^X//" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } if test "${ac_cv_path_GREP+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then ac_path_GREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_GREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then as_fn_error "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_GREP=$GREP fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } if test "${ac_cv_path_EGREP+set}" = set; then : $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else if test -z "$EGREP"; then ac_path_EGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_EGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then as_fn_error "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_EGREP=$EGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 $as_echo_n "checking for fgrep... " >&6; } if test "${ac_cv_path_FGREP+set}" = set; then : $as_echo_n "(cached) " >&6 else if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 then ac_cv_path_FGREP="$GREP -F" else if test -z "$FGREP"; then ac_path_FGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in fgrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" { test -f "$ac_path_FGREP" && $as_test_x "$ac_path_FGREP"; } || continue # Check for GNU ac_path_FGREP and select it if it is found. # Check for GNU $ac_path_FGREP case `"$ac_path_FGREP" --version 2>&1` in *GNU*) ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'FGREP' >> "conftest.nl" "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_FGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_FGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_FGREP"; then as_fn_error "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_FGREP=$FGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 $as_echo "$ac_cv_path_FGREP" >&6; } FGREP="$ac_cv_path_FGREP" test -z "$GREP" && GREP=grep |
︙ | ︙ | |||
3440 3441 3442 3443 3444 3445 3446 | # Check whether --with-gnu-ld was given. | | | | 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 | # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then : withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes else with_gnu_ld=no fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; |
︙ | ︙ | |||
3479 3480 3481 3482 3483 3484 3485 | ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then | | | | | 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 | ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if test "${lt_cv_path_LD+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -z "$LD"; then lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. |
︙ | ︙ | |||
3516 3517 3518 3519 3520 3521 3522 | else lt_cv_path_LD="$LD" # Let the user override the test with a path. fi fi LD="$lt_cv_path_LD" if test -n "$LD"; then | | | < | < | | | | | | 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 | else lt_cv_path_LD="$LD" # Let the user override the test with a path. fi fi LD="$lt_cv_path_LD" if test -n "$LD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && as_fn_error "no acceptable ld found in \$PATH" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if test "${lt_cv_prog_gnu_ld+set}" = set; then : $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 </dev/null` in *GNU* | *'with BFD'*) lt_cv_prog_gnu_ld=yes ;; *) lt_cv_prog_gnu_ld=no ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_gnu_ld" >&5 $as_echo "$lt_cv_prog_gnu_ld" >&6; } with_gnu_ld=$lt_cv_prog_gnu_ld { $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 $as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } if test "${lt_cv_path_NM+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$NM"; then # Let the user override the test. lt_cv_path_NM="$NM" else lt_nm_to_check="${ac_tool_prefix}nm" |
︙ | ︙ | |||
3601 3602 3603 3604 3605 3606 3607 | fi done IFS="$lt_save_ifs" done : ${lt_cv_path_NM=no} fi fi | | | | | | | | | | | | | | | | | < < | < < | | | | | | | | | | | | 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 | fi done IFS="$lt_save_ifs" done : ${lt_cv_path_NM=no} fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 $as_echo "$lt_cv_path_NM" >&6; } if test "$lt_cv_path_NM" != "no"; then NM="$lt_cv_path_NM" else # Didn't find any BSD compatible name lister, look for dumpbin. if test -n "$ac_tool_prefix"; then for ac_prog in "dumpbin -symbols" "link -dump -symbols" do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_DUMPBIN+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$DUMPBIN"; then ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DUMPBIN=$ac_cv_prog_DUMPBIN if test -n "$DUMPBIN"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 $as_echo "$DUMPBIN" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$DUMPBIN" && break done fi if test -z "$DUMPBIN"; then ac_ct_DUMPBIN=$DUMPBIN for ac_prog in "dumpbin -symbols" "link -dump -symbols" do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_DUMPBIN+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DUMPBIN"; then ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN if test -n "$ac_ct_DUMPBIN"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 $as_echo "$ac_ct_DUMPBIN" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_DUMPBIN" && break done if test "x$ac_ct_DUMPBIN" = x; then DUMPBIN=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DUMPBIN=$ac_ct_DUMPBIN fi fi if test "$DUMPBIN" != ":"; then NM="$DUMPBIN" fi fi test -z "$NM" && NM=nm { $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 $as_echo_n "checking the name lister ($NM) interface... " >&6; } if test "${lt_cv_nm_interface+set}" = set; then : $as_echo_n "(cached) " >&6 else lt_cv_nm_interface="BSD nm" echo "int some_variable = 0;" > conftest.$ac_ext (eval echo "\"\$as_me:3891: $ac_compile\"" >&5) (eval "$ac_compile" 2>conftest.err) cat conftest.err >&5 (eval echo "\"\$as_me:3894: $NM \\\"conftest.$ac_objext\\\"\"" >&5) (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) cat conftest.err >&5 (eval echo "\"\$as_me:3897: output\"" >&5) cat conftest.out >&5 if $GREP 'External.*some_variable' conftest.out > /dev/null; then lt_cv_nm_interface="MS dumpbin" fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 $as_echo "$lt_cv_nm_interface" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 $as_echo_n "checking whether ln -s works... " >&6; } LN_S=$as_ln_s if test "$LN_S" = "ln -s"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 $as_echo "no, using $LN_S" >&6; } fi # find the maximum length of command line arguments { $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 $as_echo_n "checking the maximum length of command line arguments... " >&6; } if test "${lt_cv_sys_max_cmd_len+set}" = set; then : $as_echo_n "(cached) " >&6 else i=0 teststring="ABCD" case $build_os in msdosdjgpp*) |
︙ | ︙ | |||
3878 3879 3880 3881 3882 3883 3884 | fi ;; esac fi if test -n $lt_cv_sys_max_cmd_len ; then | | | | | | | | 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 | fi ;; esac fi if test -n $lt_cv_sys_max_cmd_len ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 $as_echo "$lt_cv_sys_max_cmd_len" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 $as_echo "none" >&6; } fi max_cmd_len=$lt_cv_sys_max_cmd_len : ${CP="cp -f"} : ${MV="mv -f"} : ${RM="rm -f"} { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands some XSI constructs" >&5 $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; } # Try some XSI features xsi_shell=no ( _lt_dummy="a/b/c" test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ = c,a/b,, \ && eval 'test $(( 1 + 1 )) -eq 2 \ && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ && xsi_shell=yes { $as_echo "$as_me:${as_lineno-$LINENO}: result: $xsi_shell" >&5 $as_echo "$xsi_shell" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands \"+=\"" >&5 $as_echo_n "checking whether the shell understands \"+=\"... " >&6; } lt_shell_append=no ( foo=bar; set foo baz; eval "$1+=\$2" && test "$foo" = barbaz ) \ >/dev/null 2>&1 \ && lt_shell_append=yes { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_shell_append" >&5 $as_echo "$lt_shell_append" >&6; } if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then lt_unset=unset else lt_unset=false |
︙ | ︙ | |||
3950 3951 3952 3953 3954 3955 3956 |
| | | | | 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 | { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 $as_echo_n "checking for $LD option to reload object files... " >&6; } if test "${lt_cv_ld_reload_flag+set}" = set; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_reload_flag='-r' fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 $as_echo "$lt_cv_ld_reload_flag" >&6; } reload_flag=$lt_cv_ld_reload_flag case $reload_flag in "" | " "*) ;; *) reload_flag=" $reload_flag" ;; esac reload_cmds='$LD$reload_flag -o $output$reload_objs' |
︙ | ︙ | |||
3986 3987 3988 3989 3990 3991 3992 | if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. set dummy ${ac_tool_prefix}objdump; ac_word=$2 | | | | | | | | | | | | | | | | < < | < < | | | 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 | if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. set dummy ${ac_tool_prefix}objdump; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_OBJDUMP+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$OBJDUMP"; then ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OBJDUMP=$ac_cv_prog_OBJDUMP if test -n "$OBJDUMP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 $as_echo "$OBJDUMP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OBJDUMP"; then ac_ct_OBJDUMP=$OBJDUMP # Extract the first word of "objdump", so it can be a program name with args. set dummy objdump; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_OBJDUMP+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OBJDUMP"; then ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_OBJDUMP="objdump" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP if test -n "$ac_ct_OBJDUMP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 $as_echo "$ac_ct_OBJDUMP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OBJDUMP" = x; then OBJDUMP="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OBJDUMP=$ac_ct_OBJDUMP fi else OBJDUMP="$ac_cv_prog_OBJDUMP" fi test -z "$OBJDUMP" && OBJDUMP=objdump { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 $as_echo_n "checking how to recognize dependent libraries... " >&6; } if test "${lt_cv_deplibs_check_method+set}" = set; then : $as_echo_n "(cached) " >&6 else lt_cv_file_magic_cmd='$MAGIC_CMD' lt_cv_file_magic_test_file= lt_cv_deplibs_check_method='unknown' # Need to set the preceding variable on all platforms that support # interlibrary dependencies. |
︙ | ︙ | |||
4285 4286 4287 4288 4289 4290 4291 | tpf*) lt_cv_deplibs_check_method=pass_all ;; esac fi | | | | | | | | | | | | | | | | | < < | < < | 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 | tpf*) lt_cv_deplibs_check_method=pass_all ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 $as_echo "$lt_cv_deplibs_check_method" >&6; } file_magic_cmd=$lt_cv_file_magic_cmd deplibs_check_method=$lt_cv_deplibs_check_method test -z "$deplibs_check_method" && deplibs_check_method=unknown if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. set dummy ${ac_tool_prefix}ar; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_AR+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$AR"; then ac_cv_prog_AR="$AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_AR="${ac_tool_prefix}ar" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AR=$ac_cv_prog_AR if test -n "$AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 $as_echo "$AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_AR"; then ac_ct_AR=$AR # Extract the first word of "ar", so it can be a program name with args. set dummy ar; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_AR+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_AR"; then ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_AR="ar" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_AR=$ac_cv_prog_ac_ct_AR if test -n "$ac_ct_AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 $as_echo "$ac_ct_AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_AR" = x; then AR="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac AR=$ac_ct_AR fi else AR="$ac_cv_prog_AR" fi |
︙ | ︙ | |||
4414 4415 4416 4417 4418 4419 4420 | if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 | | | | | | | | | | | | | | | | < < | < < | | | | | | | | | | | | | | | < < | < < | 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 | if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_STRIP+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 $as_echo "$STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_STRIP="strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 $as_echo "$ac_ct_STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi test -z "$STRIP" && STRIP=: if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. set dummy ${ac_tool_prefix}ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_RANLIB+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$RANLIB"; then ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi RANLIB=$ac_cv_prog_RANLIB if test -n "$RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 $as_echo "$RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_RANLIB"; then ac_ct_RANLIB=$RANLIB # Extract the first word of "ranlib", so it can be a program name with args. set dummy ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_RANLIB"; then ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_RANLIB="ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB if test -n "$ac_ct_RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 $as_echo "$ac_ct_RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_RANLIB" = x; then RANLIB=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac RANLIB=$ac_ct_RANLIB fi else RANLIB="$ac_cv_prog_RANLIB" fi |
︙ | ︙ | |||
4678 4679 4680 4681 4682 4683 4684 | LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # Check for command to grab the raw symbol name followed by C symbol from nm. | | | | 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 | LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # Check for command to grab the raw symbol name followed by C symbol from nm. { $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 $as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } if test "${lt_cv_sys_global_symbol_pipe+set}" = set; then : $as_echo_n "(cached) " >&6 else # These are sane defaults that work on at least a few old systems. # [They come from Ultrix. What could be older than Ultrix?!! ;)] # Character class describing NM global symbol codes. |
︙ | ︙ | |||
4796 4797 4798 4799 4800 4801 4802 | void nm_test_func(void){} #ifdef __cplusplus } #endif int main(){nm_test_var='a';nm_test_func();return(0);} _LT_EOF | | | | | | | | 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 | void nm_test_func(void){} #ifdef __cplusplus } #endif int main(){nm_test_var='a';nm_test_func();return(0);} _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then # Now try to grab the symbols. nlist=conftest.nm if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist\""; } >&5 (eval $NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s "$nlist"; then # Try sorting and uniquifying the output. if sort "$nlist" | uniq > "$nlist"T; then mv -f "$nlist"T "$nlist" else rm -f "$nlist"T fi |
︙ | ︙ | |||
4860 4861 4862 4863 4864 4865 4866 | _LT_EOF # Now try linking the two files. mv conftest.$ac_objext conftstm.$ac_objext lt_save_LIBS="$LIBS" lt_save_CFLAGS="$CFLAGS" LIBS="conftstm.$ac_objext" CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" | | | | | 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 | _LT_EOF # Now try linking the two files. mv conftest.$ac_objext conftstm.$ac_objext lt_save_LIBS="$LIBS" lt_save_CFLAGS="$CFLAGS" LIBS="conftstm.$ac_objext" CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s conftest${ac_exeext}; then pipe_works=yes fi LIBS="$lt_save_LIBS" CFLAGS="$lt_save_CFLAGS" else echo "cannot find nm_test_func in $nlist" >&5 fi |
︙ | ︙ | |||
4898 4899 4900 4901 4902 4903 4904 | fi if test -z "$lt_cv_sys_global_symbol_pipe"; then lt_cv_sys_global_symbol_to_cdecl= fi if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then | | | | 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 | fi if test -z "$lt_cv_sys_global_symbol_pipe"; then lt_cv_sys_global_symbol_to_cdecl= fi if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 $as_echo "failed" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 $as_echo "ok" >&6; } fi |
︙ | ︙ | |||
4928 4929 4930 4931 4932 4933 4934 | # Check whether --enable-libtool-lock was given. | | | | | | | | | | 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 | # Check whether --enable-libtool-lock was given. if test "${enable_libtool_lock+set}" = set; then : enableval=$enable_libtool_lock; fi test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes # Some flags need to be propagated to the compiler or linker for good # libtool support. case $host in ia64-*-hpux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.$ac_objext` in *ELF-32*) HPUX_IA64_MODE="32" ;; *ELF-64*) HPUX_IA64_MODE="64" ;; esac fi rm -rf conftest* ;; *-*-irix6*) # Find out which ABI we are using. echo '#line 5103 "configure"' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then if test "$lt_cv_prog_gnu_ld" = yes; then case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -melf32bsmip" ;; *N32*) LD="${LD-ld} -melf32bmipn32" |
︙ | ︙ | |||
4997 4998 4999 5000 5001 5002 5003 | rm -rf conftest* ;; x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext | | | | | 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 | rm -rf conftest* ;; x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.o` in *32-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_i386_fbsd" ;; x86_64-*linux*) |
︙ | ︙ | |||
5050 5051 5052 5053 5054 5055 5056 | rm -rf conftest* ;; *-*-sco3.2v5*) # On SCO OpenServer 5, we need -belf to get full-featured binaries. SAVE_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -belf" | | | | < < < < < < < < < < < < < < < < < < < < < < < < | < < < | < < | | | | | | | 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 | rm -rf conftest* ;; *-*-sco3.2v5*) # On SCO OpenServer 5, we need -belf to get full-featured binaries. SAVE_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -belf" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 $as_echo_n "checking whether the C compiler needs -belf... " >&6; } if test "${lt_cv_cc_needs_belf+set}" = set; then : $as_echo_n "(cached) " >&6 else ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_cc_needs_belf=yes else lt_cv_cc_needs_belf=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 $as_echo "$lt_cv_cc_needs_belf" >&6; } if test x"$lt_cv_cc_needs_belf" != x"yes"; then # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf CFLAGS="$SAVE_CFLAGS" fi ;; sparc*-*solaris*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.o` in *64-bit*) case $lt_cv_prog_gnu_ld in yes*) LD="${LD-ld} -m elf64_sparc" ;; *) if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then LD="${LD-ld} -64" |
︙ | ︙ | |||
5155 5156 5157 5158 5159 5160 5161 | case $host_os in rhapsody* | darwin*) if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 | | | | | | | | | | | | | | | | < < | < < | | | | | | | | | | | | | | | < < | < < | | | | | | | | | | | | | | | < < | < < | | | | | | | | | | | | | | | < < | < < | | | | | | | | | | | | | | | < < | < < | 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 | case $host_os in rhapsody* | darwin*) if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_DSYMUTIL+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$DSYMUTIL"; then ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DSYMUTIL=$ac_cv_prog_DSYMUTIL if test -n "$DSYMUTIL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 $as_echo "$DSYMUTIL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_DSYMUTIL"; then ac_ct_DSYMUTIL=$DSYMUTIL # Extract the first word of "dsymutil", so it can be a program name with args. set dummy dsymutil; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_DSYMUTIL+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DSYMUTIL"; then ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL if test -n "$ac_ct_DSYMUTIL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 $as_echo "$ac_ct_DSYMUTIL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_DSYMUTIL" = x; then DSYMUTIL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DSYMUTIL=$ac_ct_DSYMUTIL fi else DSYMUTIL="$ac_cv_prog_DSYMUTIL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. set dummy ${ac_tool_prefix}nmedit; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_NMEDIT+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$NMEDIT"; then ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi NMEDIT=$ac_cv_prog_NMEDIT if test -n "$NMEDIT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 $as_echo "$NMEDIT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_NMEDIT"; then ac_ct_NMEDIT=$NMEDIT # Extract the first word of "nmedit", so it can be a program name with args. set dummy nmedit; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_NMEDIT+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_NMEDIT"; then ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_NMEDIT="nmedit" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT if test -n "$ac_ct_NMEDIT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 $as_echo "$ac_ct_NMEDIT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_NMEDIT" = x; then NMEDIT=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac NMEDIT=$ac_ct_NMEDIT fi else NMEDIT="$ac_cv_prog_NMEDIT" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. set dummy ${ac_tool_prefix}lipo; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_LIPO+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$LIPO"; then ac_cv_prog_LIPO="$LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_LIPO="${ac_tool_prefix}lipo" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi LIPO=$ac_cv_prog_LIPO if test -n "$LIPO"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 $as_echo "$LIPO" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_LIPO"; then ac_ct_LIPO=$LIPO # Extract the first word of "lipo", so it can be a program name with args. set dummy lipo; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_LIPO+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_LIPO"; then ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_LIPO="lipo" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO if test -n "$ac_ct_LIPO"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 $as_echo "$ac_ct_LIPO" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_LIPO" = x; then LIPO=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac LIPO=$ac_ct_LIPO fi else LIPO="$ac_cv_prog_LIPO" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. set dummy ${ac_tool_prefix}otool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_OTOOL+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$OTOOL"; then ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_OTOOL="${ac_tool_prefix}otool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OTOOL=$ac_cv_prog_OTOOL if test -n "$OTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 $as_echo "$OTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OTOOL"; then ac_ct_OTOOL=$OTOOL # Extract the first word of "otool", so it can be a program name with args. set dummy otool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_OTOOL+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OTOOL"; then ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_OTOOL="otool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL if test -n "$ac_ct_OTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 $as_echo "$ac_ct_OTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OTOOL" = x; then OTOOL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OTOOL=$ac_ct_OTOOL fi else OTOOL="$ac_cv_prog_OTOOL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. set dummy ${ac_tool_prefix}otool64; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_OTOOL64+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$OTOOL64"; then ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OTOOL64=$ac_cv_prog_OTOOL64 if test -n "$OTOOL64"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 $as_echo "$OTOOL64" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OTOOL64"; then ac_ct_OTOOL64=$OTOOL64 # Extract the first word of "otool64", so it can be a program name with args. set dummy otool64; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_OTOOL64+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OTOOL64"; then ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_OTOOL64="otool64" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 if test -n "$ac_ct_OTOOL64"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 $as_echo "$ac_ct_OTOOL64" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OTOOL64" = x; then OTOOL64=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OTOOL64=$ac_ct_OTOOL64 fi else OTOOL64="$ac_cv_prog_OTOOL64" fi |
︙ | ︙ | |||
5658 5659 5660 5661 5662 5663 5664 |
| | | | 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 | { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 $as_echo_n "checking for -single_module linker flag... " >&6; } if test "${lt_cv_apple_cc_single_mod+set}" = set; then : $as_echo_n "(cached) " >&6 else lt_cv_apple_cc_single_mod=no if test -z "${LT_MULTI_MODULE}"; then # By default we will add the -single_module flag. You can override # by either setting the environment variable LT_MULTI_MODULE # non-empty at configure time, or by adding -multi_module to the |
︙ | ︙ | |||
5685 5686 5687 5688 5689 5690 5691 | else cat conftest.err >&5 fi rm -rf libconftest.dylib* rm -f conftest.* fi fi | | | | | < < < < < < < < < < < < < < < < < < < < < < < < | < < < | < < | | | | 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 | else cat conftest.err >&5 fi rm -rf libconftest.dylib* rm -f conftest.* fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 $as_echo "$lt_cv_apple_cc_single_mod" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 $as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } if test "${lt_cv_ld_exported_symbols_list+set}" = set; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_exported_symbols_list=no save_LDFLAGS=$LDFLAGS echo "_main" > conftest.sym LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_ld_exported_symbols_list=yes else lt_cv_ld_exported_symbols_list=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 $as_echo "$lt_cv_ld_exported_symbols_list" >&6; } case $host_os in rhapsody* | darwin1.[012]) _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; darwin1.*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; darwin*) # darwin 5.x on |
︙ | ︙ | |||
5788 5789 5790 5791 5792 5793 5794 | esac ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu | | | | < < < < < < < < < < < < < < < < < < < < | | < < < < | < < < < < < < < < < < < < < < < < < < < | < < < < | | | < < < < < < < < < < < < < < < < < < < < | | < < < < | < < < < < < < < < < < < < < < < < < < < | < < < < | | | | | | < | | | < < < < < | < < < < < < < < < < < < < < < < < < < | < | < < < < | | | < < < < | | | | < < < < | 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 | esac ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if test "${ac_cv_prog_CPP+set}" = set; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since # <limits.h> exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include <limits.h> #else # include <assert.h> #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <ac_nonexistent.h> _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since # <limits.h> exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include <limits.h> #else # include <assert.h> #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <ac_nonexistent.h> _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details." "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if test "${ac_cv_header_stdc+set}" = set; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <float.h> int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <string.h> _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <stdlib.h> _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <ctype.h> #include <stdlib.h> #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else |
︙ | ︙ | |||
6148 6149 6150 6151 6152 6153 6154 | for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF | < < < < < < < < < < < < < < < < < < < < < | | < < < < < | < | > < | < | < < < < < < < < < < | | < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < | | < | < < < < < < < < < | < < < < < < < < < < < < | | < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < | | | 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 | for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " eval as_val=\$$as_ac_Header if test "x$as_val" = x""yes; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_header in dlfcn.h do : ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default " if test "x$ac_cv_header_dlfcn_h" = x""yes; then : cat >>confdefs.h <<_ACEOF #define HAVE_DLFCN_H 1 _ACEOF fi done # Set options enable_dlopen=no enable_win32_dll=no # Check whether --enable-shared was given. if test "${enable_shared+set}" = set; then : enableval=$enable_shared; p=${PACKAGE-default} case $enableval in yes) enable_shared=yes ;; no) enable_shared=no ;; *) enable_shared=no # Look at the argument we got. We use all the common list separators. |
︙ | ︙ | |||
6373 6374 6375 6376 6377 6378 6379 | # Check whether --enable-static was given. | | | 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 | # Check whether --enable-static was given. if test "${enable_static+set}" = set; then : enableval=$enable_static; p=${PACKAGE-default} case $enableval in yes) enable_static=yes ;; no) enable_static=no ;; *) enable_static=no # Look at the argument we got. We use all the common list separators. |
︙ | ︙ | |||
6405 6406 6407 6408 6409 6410 6411 | # Check whether --with-pic was given. | | | | 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 | # Check whether --with-pic was given. if test "${with_pic+set}" = set; then : withval=$with_pic; pic_mode="$withval" else pic_mode=default fi test -z "$pic_mode" && pic_mode=default # Check whether --enable-fast-install was given. if test "${enable_fast_install+set}" = set; then : enableval=$enable_fast_install; p=${PACKAGE-default} case $enableval in yes) enable_fast_install=yes ;; no) enable_fast_install=no ;; *) enable_fast_install=no # Look at the argument we got. We use all the common list separators. |
︙ | ︙ | |||
6502 6503 6504 6505 6506 6507 6508 | if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi | | | | | 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 | if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 $as_echo_n "checking for objdir... " >&6; } if test "${lt_cv_objdir+set}" = set; then : $as_echo_n "(cached) " >&6 else rm -f .libs 2>/dev/null mkdir .libs 2>/dev/null if test -d .libs; then lt_cv_objdir=.libs else # MS-DOS does not allow filenames that begin with a dot. lt_cv_objdir=_libs fi rmdir .libs 2>/dev/null fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 $as_echo "$lt_cv_objdir" >&6; } objdir=$lt_cv_objdir |
︙ | ︙ | |||
6610 6611 6612 6613 6614 6615 6616 | # Only perform the check for file, if the check method requires it test -z "$MAGIC_CMD" && MAGIC_CMD=file case $deplibs_check_method in file_magic*) if test "$file_magic_cmd" = '$MAGIC_CMD'; then | | | | 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 | # Only perform the check for file, if the check method requires it test -z "$MAGIC_CMD" && MAGIC_CMD=file case $deplibs_check_method in file_magic*) if test "$file_magic_cmd" = '$MAGIC_CMD'; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 $as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } if test "${lt_cv_path_MAGIC_CMD+set}" = set; then : $as_echo_n "(cached) " >&6 else case $MAGIC_CMD in [\\/*] | ?:[\\/]*) lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. ;; *) |
︙ | ︙ | |||
6663 6664 6665 6666 6667 6668 6669 | MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac fi MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if test -n "$MAGIC_CMD"; then | | | | | | 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 | MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac fi MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if test -n "$MAGIC_CMD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 $as_echo "$MAGIC_CMD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test -z "$lt_cv_path_MAGIC_CMD"; then if test -n "$ac_tool_prefix"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5 $as_echo_n "checking for file... " >&6; } if test "${lt_cv_path_MAGIC_CMD+set}" = set; then : $as_echo_n "(cached) " >&6 else case $MAGIC_CMD in [\\/*] | ?:[\\/]*) lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. ;; *) |
︙ | ︙ | |||
6729 6730 6731 6732 6733 6734 6735 | MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac fi MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if test -n "$MAGIC_CMD"; then | | | | 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 | MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac fi MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if test -n "$MAGIC_CMD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 $as_echo "$MAGIC_CMD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else MAGIC_CMD=: fi |
︙ | ︙ | |||
6809 6810 6811 6812 6813 6814 6815 | if test -n "$compiler"; then lt_prog_compiler_no_builtin_flag= if test "$GCC" = yes; then lt_prog_compiler_no_builtin_flag=' -fno-builtin' | | | | | | | | 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 | if test -n "$compiler"; then lt_prog_compiler_no_builtin_flag= if test "$GCC" = yes; then lt_prog_compiler_no_builtin_flag=' -fno-builtin' { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 $as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_rtti_exceptions=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-fno-rtti -fno-exceptions" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:6629: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:6633: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_rtti_exceptions=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 $as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" else : fi fi lt_prog_compiler_wl= lt_prog_compiler_pic= lt_prog_compiler_static= { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 $as_echo_n "checking for $compiler option to produce PIC... " >&6; } if test "$GCC" = yes; then lt_prog_compiler_wl='-Wl,' lt_prog_compiler_static='-static' case $host_os in |
︙ | ︙ | |||
7136 7137 7138 7139 7140 7141 7142 | *djgpp*) lt_prog_compiler_pic= ;; *) lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" ;; esac | | | | | | | | 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 | *djgpp*) lt_prog_compiler_pic= ;; *) lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 $as_echo "$lt_prog_compiler_pic" >&6; } # # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 $as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } if test "${lt_cv_prog_compiler_pic_works+set}" = set; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_works=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic -DPIC" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:6968: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:6972: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_pic_works=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 $as_echo "$lt_cv_prog_compiler_pic_works" >&6; } if test x"$lt_cv_prog_compiler_pic_works" = xyes; then case $lt_prog_compiler_pic in "" | " "*) ;; *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; esac |
︙ | ︙ | |||
7207 7208 7209 7210 7211 7212 7213 | # # Check to make sure the static flag actually works. # wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" | | | | 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 | # # Check to make sure the static flag actually works. # wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 $as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } if test "${lt_cv_prog_compiler_static_works+set}" = set; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_static_works=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then |
︙ | ︙ | |||
7235 7236 7237 7238 7239 7240 7241 | lt_cv_prog_compiler_static_works=yes fi fi $RM -r conftest* LDFLAGS="$save_LDFLAGS" fi | | | | | | | 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 | lt_cv_prog_compiler_static_works=yes fi fi $RM -r conftest* LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 $as_echo "$lt_cv_prog_compiler_static_works" >&6; } if test x"$lt_cv_prog_compiler_static_works" = xyes; then : else lt_prog_compiler_static= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if test "${lt_cv_prog_compiler_c_o+set}" = set; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:7073: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:7077: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then |
︙ | ︙ | |||
7297 7298 7299 7300 7301 7302 7303 | test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi | | | | | | | 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 | test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 $as_echo "$lt_cv_prog_compiler_c_o" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if test "${lt_cv_prog_compiler_c_o+set}" = set; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:7128: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:7132: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then |
︙ | ︙ | |||
7352 7353 7354 7355 7356 7357 7358 | test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi | | | | | | | 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 | test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 $as_echo "$lt_cv_prog_compiler_c_o" >&6; } hard_links="nottested" if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then # do not overwrite the value of need_locks provided by the user { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 $as_echo_n "checking if we can lock with hard links... " >&6; } hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 $as_echo "$hard_links" >&6; } if test "$hard_links" = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 $as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} need_locks=warn fi else need_locks=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } runpath_var= allow_undefined_flag= always_export_symbols=no archive_cmds= archive_expsym_cmds= |
︙ | ︙ | |||
7827 7828 7829 7830 7831 7832 7833 | always_export_symbols=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. allow_undefined_flag='-berok' # Determine the default libpath from the value encoded in an # empty executable. | | | > > > > > > > > > | > > > > > > > > > > > > > > > > > > > > > > > > > > > | < < < < < < < < < < < < < < < < < < < < | < < < < < < < | | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 | always_export_symbols=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. allow_undefined_flag='-berok' # Determine the default libpath from the value encoded in an # empty executable. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/ p } }' aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" else if test "$host_cpu" = ia64; then hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' allow_undefined_flag="-z nodefs" archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/ p } }' aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. no_undefined_flag=' ${wl}-bernotok' allow_undefined_flag=' ${wl}-berok' |
︙ | ︙ | |||
8168 8169 8170 8171 8172 8173 8174 | if test "$GCC" = yes; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' # Try to use the -exported_symbol ld option, if it does not # work, assume that -exports_file does not work either and # implicitly export all symbols. save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" | | > < < < < < < < < < < < < < < < < < < < < | < < < < < < < | | | 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 | if test "$GCC" = yes; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' # Try to use the -exported_symbol ld option, if it does not # work, assume that -exports_file does not work either and # implicitly export all symbols. save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int foo(void) {} _ACEOF if ac_fn_c_try_link "$LINENO"; then : archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS="$save_LDFLAGS" else archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' fi archive_cmds_need_lc='no' hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' |
︙ | ︙ | |||
8459 8460 8461 8462 8463 8464 8465 | sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) export_dynamic_flag_spec='${wl}-Blargedynsym' ;; esac fi fi | | | 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 | sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) export_dynamic_flag_spec='${wl}-Blargedynsym' ;; esac fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 $as_echo "$ld_shlibs" >&6; } test "$ld_shlibs" = no && can_build_shared=no with_gnu_ld=$with_gnu_ld |
︙ | ︙ | |||
8496 8497 8498 8499 8500 8501 8502 | *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. | | | | | | | | | | 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 | *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 $as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } $RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$lt_prog_compiler_wl pic_flag=$lt_prog_compiler_pic compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$allow_undefined_flag allow_undefined_flag= if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } then archive_cmds_need_lc=no else archive_cmds_need_lc=yes fi allow_undefined_flag=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* { $as_echo "$as_me:${as_lineno-$LINENO}: result: $archive_cmds_need_lc" >&5 $as_echo "$archive_cmds_need_lc" >&6; } ;; esac fi ;; esac |
︙ | ︙ | |||
8698 8699 8700 8701 8702 8703 8704 |
| | | 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 | { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 $as_echo_n "checking dynamic linker characteristics... " >&6; } if test "$GCC" = yes; then case $host_os in darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; *) lt_awk_arg="/^libraries:/" ;; esac |
︙ | ︙ | |||
9133 9134 9135 9136 9137 9138 9139 | shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" | | < < < < < < < < < < < < < < < < < < < < < < < < | | < < < < < < < < | | | 8840 8841 8842 8843 8844 8845 8846 8847 8848 8849 8850 8851 8852 8853 8854 8855 8856 8857 8858 8859 8860 8861 8862 8863 8864 8865 8866 8867 8868 8869 8870 8871 | shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : shlibpath_overrides_runpath=yes fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS libdir=$save_libdir # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes |
︙ | ︙ | |||
9394 9395 9396 9397 9398 9399 9400 | shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac | | | 9069 9070 9071 9072 9073 9074 9075 9076 9077 9078 9079 9080 9081 9082 9083 | shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 $as_echo "$dynamic_linker" >&6; } test "$dynamic_linker" = no && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi |
︙ | ︙ | |||
9496 9497 9498 9499 9500 9501 9502 |
| | | 9171 9172 9173 9174 9175 9176 9177 9178 9179 9180 9181 9182 9183 9184 9185 | { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 $as_echo_n "checking how to hardcode library paths into programs... " >&6; } hardcode_action= if test -n "$hardcode_libdir_flag_spec" || test -n "$runpath_var" || test "X$hardcode_automatic" = "Xyes" ; then # We can hardcode non-existent directories. |
︙ | ︙ | |||
9521 9522 9523 9524 9525 9526 9527 | hardcode_action=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. hardcode_action=unsupported fi | | | 9196 9197 9198 9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 | hardcode_action=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. hardcode_action=unsupported fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 $as_echo "$hardcode_action" >&6; } if test "$hardcode_action" = relink || test "$inherit_rpath" = yes; then # Fast installation is not supported enable_fast_install=no elif test "$shlibpath_overrides_runpath" = yes || |
︙ | ︙ | |||
9566 9567 9568 9569 9570 9571 9572 | cygwin*) lt_cv_dlopen="dlopen" lt_cv_dlopen_libs= ;; darwin*) # if libdl is installed we need to link against it | | | | < < < < < < < < < < < < < < < < < < < < < < < < | < < < | < < | | | | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < | < < < < < < < < < | | | < < < < < < < < < < < < < < < < < < < < < < < < | < < < | < < | | | | > > > > | | | | | < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | | < < | < < < | | > | | | | | | | | | < < < < < < < < < < < < < < < < < < < < < < < < | | < < < | < < | | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | | | | | < < < < < < < < < < < < < < < < < < < < < < < < | < < < | < < | | | | | 9241 9242 9243 9244 9245 9246 9247 9248 9249 9250 9251 9252 9253 9254 9255 9256 9257 9258 9259 9260 9261 9262 9263 9264 9265 9266 9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305 9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350 9351 9352 9353 9354 9355 9356 9357 9358 9359 9360 9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385 9386 9387 9388 9389 9390 9391 9392 9393 9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424 9425 9426 9427 9428 9429 9430 9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442 9443 9444 9445 9446 9447 9448 9449 9450 9451 9452 9453 9454 9455 9456 9457 9458 9459 9460 9461 9462 9463 9464 9465 | cygwin*) lt_cv_dlopen="dlopen" lt_cv_dlopen_libs= ;; darwin*) # if libdl is installed we need to link against it { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if test "${ac_cv_lib_dl_dlopen+set}" = set; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlopen=yes else ac_cv_lib_dl_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = x""yes; then : lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" else lt_cv_dlopen="dyld" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes fi ;; *) ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" if test "x$ac_cv_func_shl_load" = x""yes; then : lt_cv_dlopen="shl_load" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 $as_echo_n "checking for shl_load in -ldld... " >&6; } if test "${ac_cv_lib_dld_shl_load+set}" = set; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char shl_load (); int main () { return shl_load (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dld_shl_load=yes else ac_cv_lib_dld_shl_load=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 $as_echo "$ac_cv_lib_dld_shl_load" >&6; } if test "x$ac_cv_lib_dld_shl_load" = x""yes; then : lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld" else ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" if test "x$ac_cv_func_dlopen" = x""yes; then : lt_cv_dlopen="dlopen" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if test "${ac_cv_lib_dl_dlopen+set}" = set; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlopen=yes else ac_cv_lib_dl_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = x""yes; then : lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 $as_echo_n "checking for dlopen in -lsvld... " >&6; } if test "${ac_cv_lib_svld_dlopen+set}" = set; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsvld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_svld_dlopen=yes else ac_cv_lib_svld_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 $as_echo "$ac_cv_lib_svld_dlopen" >&6; } if test "x$ac_cv_lib_svld_dlopen" = x""yes; then : lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 $as_echo_n "checking for dld_link in -ldld... " >&6; } if test "${ac_cv_lib_dld_dld_link+set}" = set; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dld_link (); int main () { return dld_link (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dld_dld_link=yes else ac_cv_lib_dld_dld_link=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 $as_echo "$ac_cv_lib_dld_dld_link" >&6; } if test "x$ac_cv_lib_dld_dld_link" = x""yes; then : lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld" fi fi |
︙ | ︙ | |||
10128 10129 10130 10131 10132 10133 10134 | save_LDFLAGS="$LDFLAGS" wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" save_LIBS="$LIBS" LIBS="$lt_cv_dlopen_libs $LIBS" | | | | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 9490 9491 9492 9493 9494 9495 9496 9497 9498 9499 9500 9501 9502 9503 9504 9505 9506 9507 9508 9509 9510 9511 9512 9513 9514 9515 9516 9517 9518 9519 9520 9521 9522 9523 9524 9525 9526 9527 9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539 9540 9541 9542 9543 9544 9545 9546 9547 9548 9549 9550 9551 9552 9553 9554 9555 9556 9557 9558 9559 9560 9561 9562 9563 9564 9565 9566 9567 9568 9569 9570 9571 9572 9573 9574 9575 9576 9577 9578 9579 9580 9581 9582 9583 9584 9585 9586 9587 9588 9589 9590 9591 9592 9593 9594 9595 9596 9597 9598 9599 9600 9601 9602 9603 9604 9605 9606 9607 9608 9609 9610 9611 | save_LDFLAGS="$LDFLAGS" wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" save_LIBS="$LIBS" LIBS="$lt_cv_dlopen_libs $LIBS" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 $as_echo_n "checking whether a program can dlopen itself... " >&6; } if test "${lt_cv_dlopen_self+set}" = set; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : lt_cv_dlopen_self=cross else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF #line 9508 "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include <dlfcn.h> #endif #include <stdio.h> #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif void fnord() { int i=42;} int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; /* dlclose (self); */ } else puts (dlerror ()); return status; } _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then (./conftest; exit; ) >&5 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; esac else : # compilation failed lt_cv_dlopen_self=no fi fi rm -fr conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 $as_echo "$lt_cv_dlopen_self" >&6; } if test "x$lt_cv_dlopen_self" = xyes; then wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 $as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } if test "${lt_cv_dlopen_self_static+set}" = set; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : lt_cv_dlopen_self_static=cross else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF #line 9604 "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include <dlfcn.h> #endif #include <stdio.h> |
︙ | ︙ | |||
10198 10199 10200 10201 10202 10203 10204 | } else puts (dlerror ()); return status; } _LT_EOF | < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | | | | 9656 9657 9658 9659 9660 9661 9662 9663 9664 9665 9666 9667 9668 9669 9670 9671 9672 9673 9674 9675 9676 9677 9678 9679 9680 9681 9682 9683 9684 9685 9686 9687 9688 9689 9690 9691 | } else puts (dlerror ()); return status; } _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then (./conftest; exit; ) >&5 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; esac else : # compilation failed lt_cv_dlopen_self_static=no fi fi rm -fr conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 $as_echo "$lt_cv_dlopen_self_static" >&6; } fi CPPFLAGS="$save_CPPFLAGS" LDFLAGS="$save_LDFLAGS" LIBS="$save_LIBS" ;; |
︙ | ︙ | |||
10354 10355 10356 10357 10358 10359 10360 | striplib= old_striplib= | | | | | | | | | | | | | 9716 9717 9718 9719 9720 9721 9722 9723 9724 9725 9726 9727 9728 9729 9730 9731 9732 9733 9734 9735 9736 9737 9738 9739 9740 9741 9742 9743 9744 9745 9746 9747 9748 9749 9750 9751 9752 9753 9754 9755 9756 9757 9758 9759 9760 9761 9762 9763 9764 9765 9766 9767 9768 9769 9770 9771 9772 9773 9774 9775 9776 9777 9778 9779 9780 9781 9782 9783 9784 9785 9786 9787 9788 9789 9790 9791 9792 9793 9794 9795 9796 9797 9798 9799 9800 9801 9802 9803 | striplib= old_striplib= { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 $as_echo_n "checking whether stripping libraries is possible... " >&6; } if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" test -z "$striplib" && striplib="$STRIP --strip-unneeded" { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else # FIXME - insert some real tests, host_os isn't really good enough case $host_os in darwin*) if test -n "$STRIP" ; then striplib="$STRIP -x" old_striplib="$STRIP -S" { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ;; esac fi # Report which library types will actually be built { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 $as_echo_n "checking if libtool supports shared libraries... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 $as_echo "$can_build_shared" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 $as_echo_n "checking whether to build shared libraries... " >&6; } test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[4-9]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 $as_echo "$enable_shared" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 $as_echo_n "checking whether to build static libraries... " >&6; } # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 $as_echo "$enable_static" >&6; } fi ac_ext=c |
︙ | ︙ | |||
10476 10477 10478 10479 10480 10481 10482 | # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. | | | | | | | | 9838 9839 9840 9841 9842 9843 9844 9845 9846 9847 9848 9849 9850 9851 9852 9853 9854 9855 9856 9857 9858 9859 9860 9861 9862 9863 9864 9865 9866 9867 | # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 $as_echo_n "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then if test "${ac_cv_path_install+set}" = set; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in #(( ./ | .// | /[cC]/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do |
︙ | ︙ | |||
10528 10529 10530 10531 10532 10533 10534 | fi fi done done ;; esac | | | | | | | | | | | | | | | > > > > > > > > > > > > > > > > > > > > > > > > > > > > | > > > > > > > > > > > > > > | < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < | < < < < < < < < < < < < < < < < < < < < < < | | | | < < < < < | < < < < < < < < < < < < < < < < < < < < < < | < < < < < | < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < | | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < | < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | | < < < < < < < < < < < < < < | < < < < | < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | 9890 9891 9892 9893 9894 9895 9896 9897 9898 9899 9900 9901 9902 9903 9904 9905 9906 9907 9908 9909 9910 9911 9912 9913 9914 9915 9916 9917 9918 9919 9920 9921 9922 9923 9924 9925 9926 9927 9928 9929 9930 9931 9932 9933 9934 9935 9936 9937 9938 9939 9940 9941 9942 9943 9944 9945 9946 9947 9948 9949 9950 9951 9952 9953 9954 9955 9956 9957 9958 9959 9960 9961 9962 9963 9964 9965 9966 9967 9968 9969 9970 9971 9972 9973 9974 9975 9976 9977 9978 9979 9980 9981 9982 9983 9984 9985 9986 9987 9988 9989 9990 9991 9992 9993 9994 9995 9996 9997 9998 9999 10000 10001 10002 10003 10004 10005 10006 10007 10008 10009 10010 10011 10012 10013 10014 10015 10016 10017 10018 10019 10020 10021 10022 10023 10024 10025 10026 10027 10028 10029 10030 10031 10032 10033 10034 10035 10036 10037 10038 10039 10040 10041 10042 10043 10044 10045 10046 10047 10048 10049 10050 10051 10052 10053 10054 10055 10056 10057 10058 10059 10060 10061 10062 10063 10064 10065 10066 10067 10068 10069 10070 10071 10072 10073 10074 10075 10076 10077 10078 10079 10080 10081 10082 10083 10084 10085 10086 10087 10088 10089 10090 10091 10092 10093 10094 10095 10096 10097 10098 10099 10100 10101 10102 10103 10104 10105 10106 10107 10108 10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141 10142 10143 10144 10145 10146 10147 10148 10149 10150 10151 10152 10153 10154 10155 10156 10157 10158 10159 10160 10161 10162 10163 10164 10165 10166 10167 10168 10169 10170 10171 10172 10173 10174 10175 10176 10177 10178 10179 10180 10181 10182 10183 10184 10185 10186 10187 10188 10189 10190 10191 10192 10193 10194 10195 10196 10197 10198 10199 10200 10201 10202 10203 10204 10205 10206 10207 10208 10209 10210 10211 10212 10213 10214 10215 10216 10217 10218 10219 10220 10221 10222 10223 10224 10225 10226 10227 10228 10229 10230 10231 10232 10233 10234 10235 10236 10237 10238 10239 10240 10241 10242 10243 10244 10245 10246 10247 10248 10249 10250 10251 10252 10253 10254 10255 10256 10257 10258 10259 10260 10261 10262 10263 10264 10265 10266 10267 10268 10269 10270 10271 10272 10273 10274 10275 10276 10277 10278 10279 10280 10281 10282 10283 10284 10285 10286 10287 10288 10289 10290 10291 10292 10293 10294 10295 | fi fi done done ;; esac done IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 $as_echo "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_AWK+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_AWK="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 $as_echo "$AWK" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AWK" && break done ######### # Enable large file support (if special flags are necessary) # # Check whether --enable-largefile was given. if test "${enable_largefile+set}" = set; then : enableval=$enable_largefile; fi if test "$enable_largefile" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5 $as_echo_n "checking for special C compiler options needed for large files... " >&6; } if test "${ac_cv_sys_largefile_CC+set}" = set; then : $as_echo_n "(cached) " >&6 else ac_cv_sys_largefile_CC=no if test "$GCC" != yes; then ac_save_CC=$CC while :; do # IRIX 6.2 and later do not support large files by default, # so use the C compiler's -n32 option if that helps. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <sys/types.h> /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : break fi rm -f core conftest.err conftest.$ac_objext CC="$CC -n32" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_largefile_CC=' -n32'; break fi rm -f core conftest.err conftest.$ac_objext break done CC=$ac_save_CC rm -f conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5 $as_echo "$ac_cv_sys_largefile_CC" >&6; } if test "$ac_cv_sys_largefile_CC" != no; then CC=$CC$ac_cv_sys_largefile_CC fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5 $as_echo_n "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } if test "${ac_cv_sys_file_offset_bits+set}" = set; then : $as_echo_n "(cached) " >&6 else while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <sys/types.h> /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_file_offset_bits=no; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #define _FILE_OFFSET_BITS 64 #include <sys/types.h> /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_file_offset_bits=64; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_sys_file_offset_bits=unknown break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 $as_echo "$ac_cv_sys_file_offset_bits" >&6; } case $ac_cv_sys_file_offset_bits in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF #define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits _ACEOF ;; esac rm -rf conftest* if test $ac_cv_sys_file_offset_bits = unknown; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5 $as_echo_n "checking for _LARGE_FILES value needed for large files... " >&6; } if test "${ac_cv_sys_large_files+set}" = set; then : $as_echo_n "(cached) " >&6 else while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <sys/types.h> /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_large_files=no; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #define _LARGE_FILES 1 #include <sys/types.h> /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_large_files=1; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_sys_large_files=unknown break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 $as_echo "$ac_cv_sys_large_files" >&6; } case $ac_cv_sys_large_files in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF #define _LARGE_FILES $ac_cv_sys_large_files _ACEOF ;; esac rm -rf conftest* fi fi ######### # Check for needed/wanted data types ac_fn_c_check_type "$LINENO" "int8_t" "ac_cv_type_int8_t" "$ac_includes_default" if test "x$ac_cv_type_int8_t" = x""yes; then : cat >>confdefs.h <<_ACEOF #define HAVE_INT8_T 1 _ACEOF fi ac_fn_c_check_type "$LINENO" "int16_t" "ac_cv_type_int16_t" "$ac_includes_default" if test "x$ac_cv_type_int16_t" = x""yes; then : cat >>confdefs.h <<_ACEOF #define HAVE_INT16_T 1 _ACEOF fi ac_fn_c_check_type "$LINENO" "int32_t" "ac_cv_type_int32_t" "$ac_includes_default" if test "x$ac_cv_type_int32_t" = x""yes; then : cat >>confdefs.h <<_ACEOF #define HAVE_INT32_T 1 _ACEOF fi ac_fn_c_check_type "$LINENO" "int64_t" "ac_cv_type_int64_t" "$ac_includes_default" if test "x$ac_cv_type_int64_t" = x""yes; then : cat >>confdefs.h <<_ACEOF #define HAVE_INT64_T 1 _ACEOF fi ac_fn_c_check_type "$LINENO" "intptr_t" "ac_cv_type_intptr_t" "$ac_includes_default" if test "x$ac_cv_type_intptr_t" = x""yes; then : cat >>confdefs.h <<_ACEOF #define HAVE_INTPTR_T 1 _ACEOF fi ac_fn_c_check_type "$LINENO" "uint8_t" "ac_cv_type_uint8_t" "$ac_includes_default" if test "x$ac_cv_type_uint8_t" = x""yes; then : cat >>confdefs.h <<_ACEOF #define HAVE_UINT8_T 1 _ACEOF fi ac_fn_c_check_type "$LINENO" "uint16_t" "ac_cv_type_uint16_t" "$ac_includes_default" if test "x$ac_cv_type_uint16_t" = x""yes; then : cat >>confdefs.h <<_ACEOF #define HAVE_UINT16_T 1 _ACEOF fi ac_fn_c_check_type "$LINENO" "uint32_t" "ac_cv_type_uint32_t" "$ac_includes_default" if test "x$ac_cv_type_uint32_t" = x""yes; then : cat >>confdefs.h <<_ACEOF #define HAVE_UINT32_T 1 _ACEOF fi ac_fn_c_check_type "$LINENO" "uint64_t" "ac_cv_type_uint64_t" "$ac_includes_default" if test "x$ac_cv_type_uint64_t" = x""yes; then : cat >>confdefs.h <<_ACEOF #define HAVE_UINT64_T 1 _ACEOF fi ac_fn_c_check_type "$LINENO" "uintptr_t" "ac_cv_type_uintptr_t" "$ac_includes_default" if test "x$ac_cv_type_uintptr_t" = x""yes; then : cat >>confdefs.h <<_ACEOF #define HAVE_UINTPTR_T 1 _ACEOF fi ######### # Check for needed/wanted headers for ac_header in sys/types.h stdlib.h stdint.h inttypes.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" eval as_val=\$$as_ac_Header if test "x$as_val" = x""yes; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done ######### # Figure out whether or not we have these functions # for ac_func in usleep fdatasync localtime_r gmtime_r localtime_s utime do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" eval as_val=\$$as_ac_var if test "x$as_val" = x""yes; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done |
︙ | ︙ | |||
12256 12257 12258 12259 12260 12261 12262 | # if not, then we fall back to plain tclsh. # TODO: try other versions before falling back? # for ac_prog in tclsh8.5 tclsh do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 | | | | | | | | | 10304 10305 10306 10307 10308 10309 10310 10311 10312 10313 10314 10315 10316 10317 10318 10319 10320 10321 10322 10323 10324 10325 10326 10327 10328 10329 10330 10331 10332 10333 10334 10335 10336 10337 10338 10339 10340 10341 10342 10343 10344 10345 10346 10347 10348 | # if not, then we fall back to plain tclsh. # TODO: try other versions before falling back? # for ac_prog in tclsh8.5 tclsh do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_TCLSH_CMD+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$TCLSH_CMD"; then ac_cv_prog_TCLSH_CMD="$TCLSH_CMD" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_TCLSH_CMD="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi TCLSH_CMD=$ac_cv_prog_TCLSH_CMD if test -n "$TCLSH_CMD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $TCLSH_CMD" >&5 $as_echo "$TCLSH_CMD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$TCLSH_CMD" && break done test -n "$TCLSH_CMD" || TCLSH_CMD="none" |
︙ | ︙ | |||
12324 12325 12326 12327 12328 12329 12330 | # if test "$program_prefix" = "NONE"; then program_prefix="" fi VERSION=`cat $srcdir/VERSION | sed 's/^\([0-9]*\.*[0-9]*\).*/\1/'` | | | | | | 10372 10373 10374 10375 10376 10377 10378 10379 10380 10381 10382 10383 10384 10385 10386 10387 10388 10389 10390 10391 10392 10393 10394 10395 10396 10397 10398 10399 10400 10401 10402 10403 10404 10405 10406 10407 10408 10409 | # if test "$program_prefix" = "NONE"; then program_prefix="" fi VERSION=`cat $srcdir/VERSION | sed 's/^\([0-9]*\.*[0-9]*\).*/\1/'` { $as_echo "$as_me:${as_lineno-$LINENO}: Version set to $VERSION" >&5 $as_echo "$as_me: Version set to $VERSION" >&6;} RELEASE=`cat $srcdir/VERSION` { $as_echo "$as_me:${as_lineno-$LINENO}: Release set to $RELEASE" >&5 $as_echo "$as_me: Release set to $RELEASE" >&6;} VERSION_NUMBER=`cat $srcdir/VERSION \ | sed 's/[^0-9]/ /g' \ | awk '{printf "%d%03d%03d",$1,$2,$3}'` { $as_echo "$as_me:${as_lineno-$LINENO}: Version number set to $VERSION_NUMBER" >&5 $as_echo "$as_me: Version number set to $VERSION_NUMBER" >&6;} ######### # Check to see if the --with-hints=FILE option is used. If there is none, # then check for a files named "$host.hints" and ../$hosts.hints where # $host is the hostname of the build system. If still no hints are # found, try looking in $system.hints and ../$system.hints where # $system is the result of uname -s. # # Check whether --with-hints was given. if test "${with_hints+set}" = set; then : withval=$with_hints; hints=$withval fi if test "$hints" = ""; then host=`hostname | sed 's/\..*//'` if test -r $host.hints; then hints=$host.hints |
︙ | ︙ | |||
12372 12373 12374 12375 12376 12377 12378 | else if test -r ../$sys.hints; then hints=../$sys.hints fi fi fi if test "$hints" != ""; then | | | | | | | | | | | | | | | | < < < < | 10420 10421 10422 10423 10424 10425 10426 10427 10428 10429 10430 10431 10432 10433 10434 10435 10436 10437 10438 10439 10440 10441 10442 10443 10444 10445 10446 10447 10448 10449 10450 10451 10452 10453 10454 10455 10456 10457 10458 10459 10460 10461 10462 10463 10464 10465 10466 10467 10468 10469 10470 10471 10472 10473 10474 10475 10476 10477 10478 10479 10480 10481 10482 10483 10484 10485 10486 10487 10488 10489 10490 10491 10492 10493 10494 10495 10496 10497 10498 10499 10500 10501 10502 10503 10504 10505 10506 10507 10508 10509 10510 10511 10512 10513 10514 10515 10516 10517 10518 10519 10520 10521 10522 10523 10524 10525 10526 10527 | else if test -r ../$sys.hints; then hints=../$sys.hints fi fi fi if test "$hints" != ""; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: reading hints from $hints" >&5 $as_echo "reading hints from $hints" >&6; } . $hints fi ######### # Locate a compiler for the build machine. This compiler should # generate command-line programs that run on the build machine. # if test x"$cross_compiling" = xno; then BUILD_CC=$CC BUILD_CFLAGS=$CFLAGS else if test "${BUILD_CC+set}" != set; then for ac_prog in gcc cc cl do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_BUILD_CC+set}" = set; then : $as_echo_n "(cached) " >&6 else if test -n "$BUILD_CC"; then ac_cv_prog_BUILD_CC="$BUILD_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_BUILD_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi BUILD_CC=$ac_cv_prog_BUILD_CC if test -n "$BUILD_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $BUILD_CC" >&5 $as_echo "$BUILD_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$BUILD_CC" && break done fi if test "${BUILD_CFLAGS+set}" != set; then BUILD_CFLAGS="-g" fi fi ########## # Do we want to support multithreaded use of sqlite # # Check whether --enable-threadsafe was given. if test "${enable_threadsafe+set}" = set; then : enableval=$enable_threadsafe; else enable_threadsafe=yes fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to support threadsafe operation" >&5 $as_echo_n "checking whether to support threadsafe operation... " >&6; } if test "$enable_threadsafe" = "no"; then SQLITE_THREADSAFE=0 { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } else SQLITE_THREADSAFE=1 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi if test "$SQLITE_THREADSAFE" = "1"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing pthread_create" >&5 $as_echo_n "checking for library containing pthread_create... " >&6; } if test "${ac_cv_search_pthread_create+set}" = set; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" |
︙ | ︙ | |||
12494 12495 12496 12497 12498 12499 12500 | for ac_lib in '' pthread; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi | < < < < < < < < < < < < < < < < < < < < | < < < < < < < | | | | | | | | | | | | | | | | | | | | | | | | | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | 10538 10539 10540 10541 10542 10543 10544 10545 10546 10547 10548 10549 10550 10551 10552 10553 10554 10555 10556 10557 10558 10559 10560 10561 10562 10563 10564 10565 10566 10567 10568 10569 10570 10571 10572 10573 10574 10575 10576 10577 10578 10579 10580 10581 10582 10583 10584 10585 10586 10587 10588 10589 10590 10591 10592 10593 10594 10595 10596 10597 10598 10599 10600 10601 10602 10603 10604 10605 10606 10607 10608 10609 10610 10611 10612 10613 10614 10615 10616 10617 10618 10619 10620 10621 10622 10623 10624 10625 10626 10627 10628 10629 10630 10631 10632 10633 10634 10635 10636 10637 10638 10639 10640 10641 10642 10643 10644 10645 10646 10647 10648 10649 10650 10651 10652 10653 10654 10655 10656 10657 10658 10659 10660 10661 10662 10663 10664 10665 10666 10667 10668 10669 10670 10671 10672 10673 10674 10675 10676 10677 10678 10679 10680 10681 10682 10683 10684 | for ac_lib in '' pthread; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_pthread_create=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if test "${ac_cv_search_pthread_create+set}" = set; then : break fi done if test "${ac_cv_search_pthread_create+set}" = set; then : else ac_cv_search_pthread_create=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_pthread_create" >&5 $as_echo "$ac_cv_search_pthread_create" >&6; } ac_res=$ac_cv_search_pthread_create if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi fi ########## # Do we want to allow a connection created in one thread to be used # in another thread. This does not work on many Linux systems (ex: RedHat 9) # due to bugs in the threading implementations. This is thus off by default. # # Check whether --enable-cross-thread-connections was given. if test "${enable_cross_thread_connections+set}" = set; then : enableval=$enable_cross_thread_connections; else enable_xthreadconnect=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to allow connections to be shared across threads" >&5 $as_echo_n "checking whether to allow connections to be shared across threads... " >&6; } if test "$enable_xthreadconnect" = "no"; then XTHREADCONNECT='' { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } else XTHREADCONNECT='-DSQLITE_ALLOW_XTHREAD_CONNECT=1' { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi ########## # Do we want to support release # # Check whether --enable-releasemode was given. if test "${enable_releasemode+set}" = set; then : enableval=$enable_releasemode; else enable_releasemode=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to support shared library linked as release mode or not" >&5 $as_echo_n "checking whether to support shared library linked as release mode or not... " >&6; } if test "$enable_releasemode" = "no"; then ALLOWRELEASE="" { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } else ALLOWRELEASE="-release `cat $srcdir/VERSION`" { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi ########## # Do we want temporary databases in memory # # Check whether --enable-tempstore was given. if test "${enable_tempstore+set}" = set; then : enableval=$enable_tempstore; else enable_tempstore=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use an in-ram database for temporary tables" >&5 $as_echo_n "checking whether to use an in-ram database for temporary tables... " >&6; } case "$enable_tempstore" in never ) TEMP_STORE=0 { $as_echo "$as_me:${as_lineno-$LINENO}: result: never" >&5 $as_echo "never" >&6; } ;; no ) TEMP_STORE=1 { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ;; yes ) TEMP_STORE=2 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } ;; always ) TEMP_STORE=3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: always" >&5 $as_echo "always" >&6; } ;; * ) TEMP_STORE=1 { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ;; esac ########### # Lots of things are different if we are compiling for Windows using # the CYGWIN environment. So check for that special case and handle # things accordingly. # { $as_echo "$as_me:${as_lineno-$LINENO}: checking if executables have the .exe suffix" >&5 $as_echo_n "checking if executables have the .exe suffix... " >&6; } if test "$config_BUILD_EXEEXT" = ".exe"; then CYGWIN=yes { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: unknown" >&5 $as_echo "unknown" >&6; } fi if test "$CYGWIN" != "yes"; then case $host_os in *cygwin* ) CYGWIN=yes;; * ) CYGWIN=no;; esac fi |
︙ | ︙ | |||
12741 12742 12743 12744 12745 12746 12747 | # # This code is derived from the SC_PATH_TCLCONFIG and SC_LOAD_TCLCONFIG # macros in the in the tcl.m4 file of the standard TCL distribution. # Those macros could not be used directly since we have to make some # minor changes to accomodate systems that do not have TCL installed. # # Check whether --enable-tcl was given. | | | | | < | < | 10722 10723 10724 10725 10726 10727 10728 10729 10730 10731 10732 10733 10734 10735 10736 10737 10738 10739 10740 10741 10742 10743 10744 10745 10746 10747 10748 10749 10750 10751 10752 10753 10754 10755 10756 10757 10758 10759 10760 | # # This code is derived from the SC_PATH_TCLCONFIG and SC_LOAD_TCLCONFIG # macros in the in the tcl.m4 file of the standard TCL distribution. # Those macros could not be used directly since we have to make some # minor changes to accomodate systems that do not have TCL installed. # # Check whether --enable-tcl was given. if test "${enable_tcl+set}" = set; then : enableval=$enable_tcl; use_tcl=$enableval else use_tcl=yes fi if test "${use_tcl}" = "yes" ; then # Check whether --with-tcl was given. if test "${with_tcl+set}" = set; then : withval=$with_tcl; with_tclconfig=${withval} fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Tcl configuration" >&5 $as_echo_n "checking for Tcl configuration... " >&6; } if test "${ac_cv_c_tclconfig+set}" = set; then : $as_echo_n "(cached) " >&6 else # First check to see if --with-tcl was specified. if test x"${with_tclconfig}" != x ; then if test -f "${with_tclconfig}/tclConfig.sh" ; then ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)` else as_fn_error "${with_tclconfig} directory doesn't contain tclConfig.sh" "$LINENO" 5 fi fi # Start autosearch by asking tclsh if test x"$cross_compiling" = xno; then for i in `echo 'puts stdout $auto_path' | ${TCLSH_CMD}` do |
︙ | ︙ | |||
12840 12841 12842 12843 12844 12845 12846 | fi fi if test x"${ac_cv_c_tclconfig}" = x ; then use_tcl=no | | | | | | | | | 10819 10820 10821 10822 10823 10824 10825 10826 10827 10828 10829 10830 10831 10832 10833 10834 10835 10836 10837 10838 10839 10840 10841 10842 10843 10844 10845 10846 10847 10848 10849 10850 10851 | fi fi if test x"${ac_cv_c_tclconfig}" = x ; then use_tcl=no { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Can't find Tcl configuration definitions" >&5 $as_echo "$as_me: WARNING: Can't find Tcl configuration definitions" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: *** Without Tcl the regression tests cannot be executed ***" >&5 $as_echo "$as_me: WARNING: *** Without Tcl the regression tests cannot be executed ***" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: *** Consider using --with-tcl=... to define location of Tcl ***" >&5 $as_echo "$as_me: WARNING: *** Consider using --with-tcl=... to define location of Tcl ***" >&2;} else TCL_BIN_DIR=${ac_cv_c_tclconfig} { $as_echo "$as_me:${as_lineno-$LINENO}: result: found $TCL_BIN_DIR/tclConfig.sh" >&5 $as_echo "found $TCL_BIN_DIR/tclConfig.sh" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for existence of $TCL_BIN_DIR/tclConfig.sh" >&5 $as_echo_n "checking for existence of $TCL_BIN_DIR/tclConfig.sh... " >&6; } if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: loading" >&5 $as_echo "loading" >&6; } . $TCL_BIN_DIR/tclConfig.sh else { $as_echo "$as_me:${as_lineno-$LINENO}: result: file not found" >&5 $as_echo "file not found" >&6; } fi # # If the TCL_BIN_DIR is the build directory (not the install directory), # then set the common variable name to the value of the build variables. # For example, the variable TCL_LIB_SPEC will be set to the value |
︙ | ︙ | |||
12919 12920 12921 12922 12923 12924 12925 | # Figure out what C libraries are required to compile programs # that use "readline()" library. # TARGET_READLINE_LIBS="" TARGET_READLINE_INC="" TARGET_HAVE_READLINE=0 # Check whether --enable-readline was given. | | | | | | < < < < | 10898 10899 10900 10901 10902 10903 10904 10905 10906 10907 10908 10909 10910 10911 10912 10913 10914 10915 10916 10917 10918 10919 10920 10921 10922 10923 10924 10925 10926 10927 10928 10929 10930 10931 10932 10933 10934 10935 10936 10937 10938 10939 | # Figure out what C libraries are required to compile programs # that use "readline()" library. # TARGET_READLINE_LIBS="" TARGET_READLINE_INC="" TARGET_HAVE_READLINE=0 # Check whether --enable-readline was given. if test "${enable_readline+set}" = set; then : enableval=$enable_readline; with_readline=$enableval else with_readline=auto fi if test x"$with_readline" != xno; then found="yes" # Check whether --with-readline-lib was given. if test "${with_readline_lib+set}" = set; then : withval=$with_readline_lib; with_readline_lib=$withval else with_readline_lib="auto" fi if test "x$with_readline_lib" = xauto; then save_LIBS="$LIBS" LIBS="" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing tgetent" >&5 $as_echo_n "checking for library containing tgetent... " >&6; } if test "${ac_cv_search_tgetent+set}" = set; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" |
︙ | ︙ | |||
12975 12976 12977 12978 12979 12980 12981 | for ac_lib in '' readline ncurses curses termcap; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi | < < < < < < < < < < < < < < < < < < < < | < < < < < < < | | | | | | | | | | < < < < < < < < < < < < < < < < < < < < < < < < | < < < | < < | | | | | < < < < < < < < < < < < < < < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < | | < | < | < | | | | 10950 10951 10952 10953 10954 10955 10956 10957 10958 10959 10960 10961 10962 10963 10964 10965 10966 10967 10968 10969 10970 10971 10972 10973 10974 10975 10976 10977 10978 10979 10980 10981 10982 10983 10984 10985 10986 10987 10988 10989 10990 10991 10992 10993 10994 10995 10996 10997 10998 10999 11000 11001 11002 11003 11004 11005 11006 11007 11008 11009 11010 11011 11012 11013 11014 11015 11016 11017 11018 11019 11020 11021 11022 11023 11024 11025 11026 11027 11028 11029 11030 11031 11032 11033 11034 11035 11036 11037 11038 11039 11040 11041 11042 11043 11044 11045 11046 11047 11048 11049 11050 11051 11052 11053 11054 11055 11056 11057 11058 11059 11060 11061 11062 11063 11064 11065 11066 11067 11068 11069 11070 11071 11072 11073 11074 11075 | for ac_lib in '' readline ncurses curses termcap; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_tgetent=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if test "${ac_cv_search_tgetent+set}" = set; then : break fi done if test "${ac_cv_search_tgetent+set}" = set; then : else ac_cv_search_tgetent=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_tgetent" >&5 $as_echo "$ac_cv_search_tgetent" >&6; } ac_res=$ac_cv_search_tgetent if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" term_LIBS="$LIBS" else term_LIBS="" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for readline in -lreadline" >&5 $as_echo_n "checking for readline in -lreadline... " >&6; } if test "${ac_cv_lib_readline_readline+set}" = set; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lreadline $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char readline (); int main () { return readline (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_readline_readline=yes else ac_cv_lib_readline_readline=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_readline_readline" >&5 $as_echo "$ac_cv_lib_readline_readline" >&6; } if test "x$ac_cv_lib_readline_readline" = x""yes; then : TARGET_READLINE_LIBS="-lreadline" else found="no" fi TARGET_READLINE_LIBS="$TARGET_READLINE_LIBS $term_LIBS" LIBS="$save_LIBS" else TARGET_READLINE_LIBS="$with_readline_lib" fi # Check whether --with-readline-inc was given. if test "${with_readline_inc+set}" = set; then : withval=$with_readline_inc; with_readline_inc=$withval else with_readline_inc="auto" fi if test "x$with_readline_inc" = xauto; then ac_fn_c_check_header_mongrel "$LINENO" "readline.h" "ac_cv_header_readline_h" "$ac_includes_default" if test "x$ac_cv_header_readline_h" = x""yes; then : found="yes" else found="no" if test "$cross_compiling" != yes; then for dir in /usr /usr/local /usr/local/readline /usr/contrib /mingw; do for subdir in include include/readline; do as_ac_File=`$as_echo "ac_cv_file_$dir/$subdir/readline.h" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $dir/$subdir/readline.h" >&5 $as_echo_n "checking for $dir/$subdir/readline.h... " >&6; } if { as_var=$as_ac_File; eval "test \"\${$as_var+set}\" = set"; }; then : $as_echo_n "(cached) " >&6 else test "$cross_compiling" = yes && as_fn_error "cannot check for file existence when cross compiling" "$LINENO" 5 if test -r "$dir/$subdir/readline.h"; then eval "$as_ac_File=yes" else eval "$as_ac_File=no" fi fi eval ac_res=\$$as_ac_File { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval as_val=\$$as_ac_File if test "x$as_val" = x""yes; then : found=yes fi if test "$found" = "yes"; then TARGET_READLINE_INC="-I$dir/$subdir" break fi |
︙ | ︙ | |||
13308 13309 13310 13311 13312 13313 13314 | ########## # Figure out what C libraries are required to compile programs # that use "fdatasync()" function. # | | | | < < < < | 11098 11099 11100 11101 11102 11103 11104 11105 11106 11107 11108 11109 11110 11111 11112 11113 11114 11115 11116 11117 11118 | ########## # Figure out what C libraries are required to compile programs # that use "fdatasync()" function. # { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing fdatasync" >&5 $as_echo_n "checking for library containing fdatasync... " >&6; } if test "${ac_cv_search_fdatasync+set}" = set; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" |
︙ | ︙ | |||
13343 13344 13345 13346 13347 13348 13349 | for ac_lib in '' rt; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi | < < < < < < < < < < < < < < < < < < < < | < < < < < < < | | | | | | | | | | | | | < < < < | 11129 11130 11131 11132 11133 11134 11135 11136 11137 11138 11139 11140 11141 11142 11143 11144 11145 11146 11147 11148 11149 11150 11151 11152 11153 11154 11155 11156 11157 11158 11159 11160 11161 11162 11163 11164 11165 11166 11167 11168 11169 11170 11171 11172 11173 11174 11175 11176 11177 11178 11179 11180 11181 11182 11183 11184 11185 11186 11187 11188 11189 11190 11191 11192 11193 11194 11195 11196 11197 11198 11199 11200 11201 11202 11203 11204 11205 11206 11207 11208 11209 11210 11211 11212 11213 11214 11215 11216 | for ac_lib in '' rt; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_fdatasync=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if test "${ac_cv_search_fdatasync+set}" = set; then : break fi done if test "${ac_cv_search_fdatasync+set}" = set; then : else ac_cv_search_fdatasync=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_fdatasync" >&5 $as_echo "$ac_cv_search_fdatasync" >&6; } ac_res=$ac_cv_search_fdatasync if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi ######### # check for debug enabled # Check whether --enable-debug was given. if test "${enable_debug+set}" = set; then : enableval=$enable_debug; use_debug=$enableval else use_debug=no fi if test "${use_debug}" = "yes" ; then TARGET_DEBUG="-DSQLITE_DEBUG=1" else TARGET_DEBUG="-DNDEBUG" fi ######### # See whether we should use the amalgamation to build # Check whether --enable-amalgamation was given. if test "${enable_amalgamation+set}" = set; then : enableval=$enable_amalgamation; use_amalgamation=$enableval else use_amalgamation=yes fi if test "${use_amalgamation}" != "yes" ; then USE_AMALGAMATION=0 fi ######### # See whether we should allow loadable extensions # Check whether --enable-load-extension was given. if test "${enable_load_extension+set}" = set; then : enableval=$enable_load_extension; use_loadextension=$enableval else use_loadextension=no fi if test "${use_loadextension}" = "yes" ; then OPT_FEATURE_FLAGS="" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing dlopen" >&5 $as_echo_n "checking for library containing dlopen... " >&6; } if test "${ac_cv_search_dlopen+set}" = set; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" |
︙ | ︙ | |||
13472 13473 13474 13475 13476 13477 13478 | for ac_lib in '' dl; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi | < < < < < < < < < < < < < < < < < < < < | < < < < < < < | | | | | | | | 11227 11228 11229 11230 11231 11232 11233 11234 11235 11236 11237 11238 11239 11240 11241 11242 11243 11244 11245 11246 11247 11248 11249 11250 11251 11252 11253 11254 11255 11256 11257 11258 11259 11260 11261 | for ac_lib in '' dl; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_dlopen=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if test "${ac_cv_search_dlopen+set}" = set; then : break fi done if test "${ac_cv_search_dlopen+set}" = set; then : else ac_cv_search_dlopen=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_dlopen" >&5 $as_echo "$ac_cv_search_dlopen" >&6; } ac_res=$ac_cv_search_dlopen if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi else OPT_FEATURE_FLAGS="-DSQLITE_OMIT_LOAD_EXTENSION=1" fi |
︙ | ︙ | |||
13582 13583 13584 13585 13586 13587 13588 | done BUILD_CFLAGS=$ac_temp_BUILD_CFLAGS ######### # See whether we should use GCOV # Check whether --enable-gcov was given. | | | 11310 11311 11312 11313 11314 11315 11316 11317 11318 11319 11320 11321 11322 11323 11324 | done BUILD_CFLAGS=$ac_temp_BUILD_CFLAGS ######### # See whether we should use GCOV # Check whether --enable-gcov was given. if test "${enable_gcov+set}" = set; then : enableval=$enable_gcov; use_gcov=$enableval else use_gcov=no fi if test "${use_gcov}" = "yes" ; then USE_GCOV=1 |
︙ | ︙ | |||
13634 13635 13636 13637 13638 13639 13640 | # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( | | | | | | | 11362 11363 11364 11365 11366 11367 11368 11369 11370 11371 11372 11373 11374 11375 11376 11377 11378 11379 11380 11381 11382 11383 11384 11385 11386 11387 11388 11389 11390 11391 | # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" |
︙ | ︙ | |||
13672 13673 13674 13675 13676 13677 13678 | s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then test "x$cache_file" != "x/dev/null" && | | | | | | > | > | | | | | | | | | > > > < < < < < < < < < < < > > > > > > | | | 11400 11401 11402 11403 11404 11405 11406 11407 11408 11409 11410 11411 11412 11413 11414 11415 11416 11417 11418 11419 11420 11421 11422 11423 11424 11425 11426 11427 11428 11429 11430 11431 11432 11433 11434 11435 11436 11437 11438 11439 11440 11441 11442 11443 11444 11445 11446 11447 11448 11449 11450 11451 11452 11453 11454 11455 11456 11457 11458 11459 11460 11461 11462 11463 11464 11465 11466 11467 11468 11469 11470 11471 11472 11473 11474 11475 11476 11477 11478 11479 11480 11481 11482 11483 11484 11485 11486 11487 11488 11489 11490 11491 11492 11493 11494 11495 11496 11497 11498 11499 11500 11501 11502 11503 11504 11505 11506 11507 11508 11509 11510 11511 11512 11513 11514 11515 11516 | s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then test "x$cache_file" != "x/dev/null" && { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} cat confcache >$cache_file else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' DEFS=-DHAVE_CONFIG_H ac_libobjs= ac_ltlibobjs= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs : ${CONFIG_STATUS=./config.status} ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body |
︙ | ︙ | |||
13797 13798 13799 13800 13801 13802 13803 | PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi | < < < < < < < | | | | > | > > | > | > > | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > < < < | | | < < < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < | < < < < < < | | | > > < < < < < < | 11525 11526 11527 11528 11529 11530 11531 11532 11533 11534 11535 11536 11537 11538 11539 11540 11541 11542 11543 11544 11545 11546 11547 11548 11549 11550 11551 11552 11553 11554 11555 11556 11557 11558 11559 11560 11561 11562 11563 11564 11565 11566 11567 11568 11569 11570 11571 11572 11573 11574 11575 11576 11577 11578 11579 11580 11581 11582 11583 11584 11585 11586 11587 11588 11589 11590 11591 11592 11593 11594 11595 11596 11597 11598 11599 11600 11601 11602 11603 11604 11605 11606 11607 11608 11609 11610 11611 11612 11613 11614 11615 11616 11617 11618 11619 11620 11621 11622 11623 11624 11625 11626 11627 11628 11629 11630 11631 11632 11633 11634 11635 11636 11637 11638 11639 11640 11641 11642 11643 11644 11645 11646 11647 11648 11649 11650 11651 11652 11653 11654 11655 11656 11657 11658 11659 11660 11661 11662 11663 11664 11665 11666 11667 11668 11669 11670 11671 11672 11673 11674 11675 11676 11677 11678 11679 11680 11681 11682 11683 11684 11685 11686 11687 11688 11689 11690 11691 11692 11693 11694 11695 11696 11697 11698 11699 11700 11701 11702 11703 11704 11705 11706 11707 11708 11709 11710 11711 11712 11713 11714 11715 11716 11717 11718 11719 11720 11721 11722 11723 11724 11725 11726 11727 11728 | PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error ERROR [LINENO LOG_FD] # --------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with status $?, using 1 if that was 0. as_fn_error () { as_status=$?; test $as_status -eq 0 && as_status=1 if test "$3"; then as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3 fi $as_echo "$as_me: error: $1" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null |
︙ | ︙ | |||
13983 13984 13985 13986 13987 13988 13989 13990 | fi else as_ln_s='cp -p' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | | | > > > > > > | | > > > > > > > > > | | > | > > | | | | > | | | | 11743 11744 11745 11746 11747 11748 11749 11750 11751 11752 11753 11754 11755 11756 11757 11758 11759 11760 11761 11762 11763 11764 11765 11766 11767 11768 11769 11770 11771 11772 11773 11774 11775 11776 11777 11778 11779 11780 11781 11782 11783 11784 11785 11786 11787 11788 11789 11790 11791 11792 11793 11794 11795 11796 11797 11798 11799 11800 11801 11802 11803 11804 11805 11806 11807 11808 11809 11810 11811 11812 11813 11814 11815 11816 11817 11818 11819 11820 11821 11822 11823 11824 11825 11826 11827 11828 11829 11830 11831 11832 11833 11834 11835 11836 11837 11838 11839 11840 11841 11842 11843 11844 11845 11846 11847 11848 11849 11850 11851 11852 11853 11854 11855 11856 11857 11858 11859 11860 11861 11862 11863 11864 11865 11866 11867 11868 11869 11870 11871 11872 11873 11874 11875 11876 11877 11878 11879 11880 11881 11882 11883 11884 11885 11886 11887 11888 11889 11890 11891 11892 11893 11894 11895 11896 11897 11898 11899 11900 11901 11902 11903 11904 11905 11906 11907 11908 11909 11910 11911 11912 11913 11914 11915 11916 11917 11918 11919 11920 11921 11922 11923 11924 | fi else as_ln_s='cp -p' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi if test -x / >/dev/null 2>&1; then as_test_x='test -x' else if ls -dL / >/dev/null 2>&1; then as_ls_L_option=L else as_ls_L_option= fi as_test_x=' eval sh -c '\'' if test -d "$1"; then test -d "$1/."; else case $1 in #( -*)set "./$1";; esac; case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( ???[sx]*):;;*)false;;esac;fi '\'' sh ' fi as_executable_p=$as_test_x # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by sqlite $as_me 3.7.8, which was generated by GNU Autoconf 2.65. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac case $ac_config_headers in *" "*) set x $ac_config_headers; shift; ac_config_headers=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_headers="$ac_config_headers" config_commands="$ac_config_commands" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Configuration commands: $config_commands Report bugs to the package provider." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ sqlite config.status 3.7.8 configured by $0, generated by GNU Autoconf 2.65, with options \\"\$ac_cs_config\\" Copyright (C) 2009 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' INSTALL='$INSTALL' AWK='$AWK' |
︙ | ︙ | |||
14118 14119 14120 14121 14122 14123 14124 14125 14126 14127 14128 14129 14130 14131 | case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac | > > | | | | < | | < | | 11945 11946 11947 11948 11949 11950 11951 11952 11953 11954 11955 11956 11957 11958 11959 11960 11961 11962 11963 11964 11965 11966 11967 11968 11969 11970 11971 11972 11973 11974 11975 11976 11977 11978 11979 11980 11981 11982 11983 11984 11985 11986 11987 11988 11989 11990 11991 | case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_HEADERS " '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header as_fn_error "ambiguous option: \`$1' Try \`$0 --help' for more information.";; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= |
︙ | ︙ | |||
14460 14461 14462 14463 14464 14465 14466 | do case $ac_config_target in "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; "sqlite3.pc") CONFIG_FILES="$CONFIG_FILES sqlite3.pc" ;; | < | < | 12287 12288 12289 12290 12291 12292 12293 12294 12295 12296 12297 12298 12299 12300 12301 | do case $ac_config_target in "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; "sqlite3.pc") CONFIG_FILES="$CONFIG_FILES sqlite3.pc" ;; *) as_fn_error "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely |
︙ | ︙ | |||
14489 14490 14491 14492 14493 14494 14495 | # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= trap 'exit_status=$? { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status ' 0 | | < < | < < | > > > > > > | < | < < | < | > < | < | 12314 12315 12316 12317 12318 12319 12320 12321 12322 12323 12324 12325 12326 12327 12328 12329 12330 12331 12332 12333 12334 12335 12336 12337 12338 12339 12340 12341 12342 12343 12344 12345 12346 12347 12348 12349 12350 12351 12352 12353 12354 12355 12356 12357 12358 12359 12360 12361 12362 12363 12364 12365 12366 12367 12368 12369 12370 12371 12372 12373 12374 12375 12376 12377 12378 12379 12380 12381 | # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= trap 'exit_status=$? { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error "cannot create a temporary directory in ." "$LINENO" 5 # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 |
︙ | ︙ | |||
14567 14568 14569 14570 14571 14572 14573 | s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h | | | | 12389 12390 12391 12392 12393 12394 12395 12396 12397 12398 12399 12400 12401 12402 12403 12404 12405 12406 12407 12408 12409 12410 12411 12412 12413 12414 12415 12416 12417 | s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p |
︙ | ︙ | |||
14634 14635 14636 14637 14638 14639 14640 | _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \ | < | < | 12456 12457 12458 12459 12460 12461 12462 12463 12464 12465 12466 12467 12468 12469 12470 | _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \ || as_fn_error "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove $(srcdir), # ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then |
︙ | ︙ | |||
14677 14678 14679 14680 14681 14682 14683 | # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_t=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_t"; then break elif $ac_last_try; then | < | < | 12497 12498 12499 12500 12501 12502 12503 12504 12505 12506 12507 12508 12509 12510 12511 | # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_t=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_t"; then break elif $ac_last_try; then as_fn_error "could not make $CONFIG_HEADERS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done # For the awk script, D is an array of macro values keyed by name, # likewise P contains macro parameters if any. Preserve backslash |
︙ | ︙ | |||
14745 14746 14747 14748 14749 14750 14751 14752 14753 | mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] if (D_is_set[macro]) { # Preserve the white space surrounding the "#". | > < | < | < | < < | 12563 12564 12565 12566 12567 12568 12569 12570 12571 12572 12573 12574 12575 12576 12577 12578 12579 12580 12581 12582 12583 12584 12585 12586 12587 12588 12589 12590 12591 12592 12593 12594 12595 12596 12597 12598 12599 12600 12601 12602 12603 12604 12605 12606 12607 12608 12609 | mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 as_fn_error "could not setup config headers machinery" "$LINENO" 5 fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS |
︙ | ︙ | |||
14809 14810 14811 14812 14813 14814 14815 | # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || | < | < | | < | < | 12623 12624 12625 12626 12627 12628 12629 12630 12631 12632 12633 12634 12635 12636 12637 12638 12639 12640 12641 12642 12643 12644 12645 12646 12647 12648 12649 12650 12651 12652 12653 12654 12655 12656 12657 12658 12659 12660 12661 12662 12663 12664 | # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$tmp/stdin" \ || as_fn_error "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ |
︙ | ︙ | |||
14868 14869 14870 14871 14872 14873 14874 | q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` | | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | 12678 12679 12680 12681 12682 12683 12684 12685 12686 12687 12688 12689 12690 12691 12692 | q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. |
︙ | ︙ | |||
14960 14961 14962 14963 14964 14965 14966 | esac _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= | < | < | | | 12730 12731 12732 12733 12734 12735 12736 12737 12738 12739 12740 12741 12742 12743 12744 12745 12746 12747 12748 12749 12750 12751 12752 12753 12754 12755 12756 12757 12758 12759 12760 12761 12762 12763 12764 12765 12766 12767 | esac _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 |
︙ | ︙ | |||
15013 15014 15015 15016 15017 15018 15019 | s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \ | < | < | < | < < | < | < | < < | < | | 12781 12782 12783 12784 12785 12786 12787 12788 12789 12790 12791 12792 12793 12794 12795 12796 12797 12798 12799 12800 12801 12802 12803 12804 12805 12806 12807 12808 12809 12810 12811 12812 12813 12814 12815 12816 12817 12818 12819 12820 12821 12822 12823 12824 12825 12826 12827 12828 12829 12830 12831 12832 12833 12834 12835 12836 12837 | s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \ || as_fn_error "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined." >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined." >&2;} rm -f "$tmp/stdin" case $ac_file in -) cat "$tmp/out" && rm -f "$tmp/out";; *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";; esac \ || as_fn_error "could not create $ac_file" "$LINENO" 5 ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" } >"$tmp/config.h" \ || as_fn_error "could not create $ac_file" "$LINENO" 5 if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$tmp/config.h" "$ac_file" \ || as_fn_error "could not create $ac_file" "$LINENO" 5 fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error "could not create -" "$LINENO" 5 fi ;; :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 $as_echo "$as_me: executing $ac_file commands" >&6;} ;; esac case $ac_file$ac_mode in "libtool":C) |
︙ | ︙ | |||
15712 15713 15714 15715 15716 15717 15718 | ;; esac done # for ac_tag | | < < | < | | | | 13470 13471 13472 13473 13474 13475 13476 13477 13478 13479 13480 13481 13482 13483 13484 13485 13486 13487 13488 13489 13490 13491 13492 13493 13494 13495 13496 13497 13498 13499 13500 13501 13502 13503 13504 13505 13506 13507 13508 13509 | ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit $? fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi |
Changes to main.mk.
︙ | ︙ | |||
61 62 63 64 65 66 67 | memjournal.o \ mutex.o mutex_noop.o mutex_os2.o mutex_unix.o mutex_w32.o \ notify.o opcodes.o os.o os_os2.o os_unix.o os_win.o \ pager.o parse.o pcache.o pcache1.o pragma.o prepare.o printf.o \ random.o resolve.o rowset.o rtree.o select.o status.o \ table.o tokenize.o trigger.o \ update.o util.o vacuum.o \ | | | | 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 | memjournal.o \ mutex.o mutex_noop.o mutex_os2.o mutex_unix.o mutex_w32.o \ notify.o opcodes.o os.o os_os2.o os_unix.o os_win.o \ pager.o parse.o pcache.o pcache1.o pragma.o prepare.o printf.o \ random.o resolve.o rowset.o rtree.o select.o status.o \ table.o tokenize.o trigger.o \ update.o util.o vacuum.o \ vdbe.o vdbeapi.o vdbeaux.o vdbeblob.o vdbemem.o vdbesort.o \ vdbetrace.o wal.o walker.o where.o utf.o vtab.o # All of the source code files. # SRC = \ $(TOP)/src/alter.c \ |
︙ | ︙ | |||
151 152 153 154 155 156 157 158 159 160 161 162 163 164 | $(TOP)/src/vacuum.c \ $(TOP)/src/vdbe.c \ $(TOP)/src/vdbe.h \ $(TOP)/src/vdbeapi.c \ $(TOP)/src/vdbeaux.c \ $(TOP)/src/vdbeblob.c \ $(TOP)/src/vdbemem.c \ $(TOP)/src/vdbetrace.c \ $(TOP)/src/vdbeInt.h \ $(TOP)/src/vtab.c \ $(TOP)/src/wal.c \ $(TOP)/src/wal.h \ $(TOP)/src/walker.c \ $(TOP)/src/where.c | > | 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 | $(TOP)/src/vacuum.c \ $(TOP)/src/vdbe.c \ $(TOP)/src/vdbe.h \ $(TOP)/src/vdbeapi.c \ $(TOP)/src/vdbeaux.c \ $(TOP)/src/vdbeblob.c \ $(TOP)/src/vdbemem.c \ $(TOP)/src/vdbesort.c \ $(TOP)/src/vdbetrace.c \ $(TOP)/src/vdbeInt.h \ $(TOP)/src/vtab.c \ $(TOP)/src/wal.c \ $(TOP)/src/wal.h \ $(TOP)/src/walker.c \ $(TOP)/src/where.c |
︙ | ︙ | |||
382 383 384 385 386 387 388 389 390 391 392 393 394 395 | sqlite3.c: target_source $(TOP)/tool/mksqlite3c.tcl tclsh $(TOP)/tool/mksqlite3c.tcl echo '#ifndef USE_SYSTEM_SQLITE' >tclsqlite3.c cat sqlite3.c >>tclsqlite3.c echo '#endif /* USE_SYSTEM_SQLITE */' >>tclsqlite3.c cat $(TOP)/src/tclsqlite.c >>tclsqlite3.c sqlite3-all.c: sqlite3.c $(TOP)/tool/split-sqlite3c.tcl tclsh $(TOP)/tool/split-sqlite3c.tcl fts2amal.c: target_source $(TOP)/ext/fts2/mkfts2amal.tcl tclsh $(TOP)/ext/fts2/mkfts2amal.tcl | > > > > > > > > | 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 | sqlite3.c: target_source $(TOP)/tool/mksqlite3c.tcl tclsh $(TOP)/tool/mksqlite3c.tcl echo '#ifndef USE_SYSTEM_SQLITE' >tclsqlite3.c cat sqlite3.c >>tclsqlite3.c echo '#endif /* USE_SYSTEM_SQLITE */' >>tclsqlite3.c cat $(TOP)/src/tclsqlite.c >>tclsqlite3.c sqlite3-debug.c: target_source $(TOP)/tool/mksqlite3c.tcl tclsh $(TOP)/tool/mksqlite3c.tcl --linemacros echo '#ifndef USE_SYSTEM_SQLITE' >tclsqlite3.c cat sqlite3.c >>tclsqlite3.c echo '#endif /* USE_SYSTEM_SQLITE */' >>tclsqlite3.c echo '#line 1 "tclsqlite.c"' >>tclsqlite3.c cat $(TOP)/src/tclsqlite.c >>tclsqlite3.c sqlite3-all.c: sqlite3.c $(TOP)/tool/split-sqlite3c.tcl tclsh $(TOP)/tool/split-sqlite3c.tcl fts2amal.c: target_source $(TOP)/ext/fts2/mkfts2amal.tcl tclsh $(TOP)/ext/fts2/mkfts2amal.tcl |
︙ | ︙ |
Changes to src/backup.c.
︙ | ︙ | |||
414 415 416 417 418 419 420 421 422 423 424 425 426 427 | && (rc = sqlite3BtreeUpdateMeta(p->pDest,1,p->iDestSchema+1))==SQLITE_OK ){ int nDestTruncate; if( p->pDestDb ){ sqlite3ResetInternalSchema(p->pDestDb, -1); } /* Set nDestTruncate to the final number of pages in the destination ** database. The complication here is that the destination page ** size may be different to the source page size. ** ** If the source page size is smaller than the destination page size, ** round up. In this case the call to sqlite3OsTruncate() below will | > > > > > > > > > | 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 | && (rc = sqlite3BtreeUpdateMeta(p->pDest,1,p->iDestSchema+1))==SQLITE_OK ){ int nDestTruncate; if( p->pDestDb ){ sqlite3ResetInternalSchema(p->pDestDb, -1); } if( destMode==PAGER_JOURNALMODE_WAL ){ /* This call cannot fail. The success of the BtreeUpdateMeta() ** method above indicates that a write transaction has been opened ** and page 1 is already dirty. Therefore this always succeeds. */ TESTONLY(int rc2 =) sqlite3BtreeSetVersion(p->pDest, 2); assert( rc2==SQLITE_OK ); } /* Set nDestTruncate to the final number of pages in the destination ** database. The complication here is that the destination page ** size may be different to the source page size. ** ** If the source page size is smaller than the destination page size, ** round up. In this case the call to sqlite3OsTruncate() below will |
︙ | ︙ |
Changes to src/btree.c.
︙ | ︙ | |||
7300 7301 7302 7303 7304 7305 7306 7307 7308 | */ zeroPage(pPage, PTF_INTKEY|PTF_LEAF ); releasePage(pPage); } return rc; } int sqlite3BtreeDropTable(Btree *p, int iTable, int *piMoved){ int rc; sqlite3BtreeEnter(p); | > > > > > > | > | 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 | */ zeroPage(pPage, PTF_INTKEY|PTF_LEAF ); releasePage(pPage); } return rc; } int sqlite3BtreeDropTable(Btree *p, int iTable, int *piMoved){ BtShared *pBt = p->pBt; int rc; sqlite3BtreeEnter(p); if( (pBt->openFlags&BTREE_SINGLE) ){ pBt->nPage = 0; sqlite3PagerTruncateImage(pBt->pPager, 1); rc = newDatabase(pBt); }else{ rc = btreeDropTable(p, iTable, piMoved); } sqlite3BtreeLeave(p); return rc; } /* ** This function may only be called if the b-tree connection already |
︙ | ︙ | |||
8165 8166 8167 8168 8169 8170 8171 | ** "write version" (single byte at byte offset 19) fields in the database ** header to iVersion. */ int sqlite3BtreeSetVersion(Btree *pBtree, int iVersion){ BtShared *pBt = pBtree->pBt; int rc; /* Return code */ | < | 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 | ** "write version" (single byte at byte offset 19) fields in the database ** header to iVersion. */ int sqlite3BtreeSetVersion(Btree *pBtree, int iVersion){ BtShared *pBt = pBtree->pBt; int rc; /* Return code */ assert( iVersion==1 || iVersion==2 ); /* If setting the version fields to 1, do not automatically open the ** WAL connection, even if the version fields are currently set to 2. */ pBt->doNotUseWAL = (u8)(iVersion==1); |
︙ | ︙ | |||
8191 8192 8193 8194 8195 8196 8197 | } } } pBt->doNotUseWAL = 0; return rc; } | > > | 8197 8198 8199 8200 8201 8202 8203 8204 8205 | } } } pBt->doNotUseWAL = 0; return rc; } |
Changes to src/build.c.
︙ | ︙ | |||
1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 | int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); destroyRootPage(pParse, iLargest, iDb); iDestroyed = iLargest; } } #endif } /* ** This routine is called to do the work of a DROP TABLE statement. ** pName is the name of the table to be dropped. */ void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView, int noErr){ Table *pTab; | > > > > > > > > > > > > > > > > > > > > > > > | 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 | int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); destroyRootPage(pParse, iLargest, iDb); iDestroyed = iLargest; } } #endif } /* ** Remove entries from the sqlite_stat1 and sqlite_stat2 tables ** after a DROP INDEX or DROP TABLE command. */ static void sqlite3ClearStatTables( Parse *pParse, /* The parsing context */ int iDb, /* The database number */ const char *zType, /* "idx" or "tbl" */ const char *zName /* Name of index or table */ ){ static const char *azStatTab[] = { "sqlite_stat1", "sqlite_stat2" }; int i; const char *zDbName = pParse->db->aDb[iDb].zName; for(i=0; i<ArraySize(azStatTab); i++){ if( sqlite3FindTable(pParse->db, azStatTab[i], zDbName) ){ sqlite3NestedParse(pParse, "DELETE FROM %Q.%s WHERE %s=%Q", zDbName, azStatTab[i], zType, zName ); } } } /* ** This routine is called to do the work of a DROP TABLE statement. ** pName is the name of the table to be dropped. */ void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView, int noErr){ Table *pTab; |
︙ | ︙ | |||
2115 2116 2117 2118 2119 2120 2121 | ** dropped. Triggers are handled seperately because a trigger can be ** created in the temp database that refers to a table in another ** database. */ sqlite3NestedParse(pParse, "DELETE FROM %Q.%s WHERE tbl_name=%Q and type!='trigger'", pDb->zName, SCHEMA_TABLE(iDb), pTab->zName); | | < < < < < < < | 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 | ** dropped. Triggers are handled seperately because a trigger can be ** created in the temp database that refers to a table in another ** database. */ sqlite3NestedParse(pParse, "DELETE FROM %Q.%s WHERE tbl_name=%Q and type!='trigger'", pDb->zName, SCHEMA_TABLE(iDb), pTab->zName); sqlite3ClearStatTables(pParse, iDb, "tbl", pTab->zName); if( !isView && !IsVirtual(pTab) ){ destroyTable(pParse, pTab); } /* Remove the table entry from SQLite's internal schema and modify ** the schema cookie. */ |
︙ | ︙ | |||
2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 | ** the index already exists and must be cleared before being refilled and ** the root page number of the index is taken from pIndex->tnum. */ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ Table *pTab = pIndex->pTable; /* The table that is indexed */ int iTab = pParse->nTab++; /* Btree cursor used for pTab */ int iIdx = pParse->nTab++; /* Btree cursor used for pIndex */ int addr1; /* Address of top of loop */ int tnum; /* Root page of index */ Vdbe *v; /* Generate code into this virtual machine */ KeyInfo *pKey; /* KeyInfo for index */ int regIdxKey; /* Registers containing the index key */ int regRecord; /* Register holding assemblied index record */ sqlite3 *db = pParse->db; /* The database connection */ int iDb = sqlite3SchemaToIndex(db, pIndex->pSchema); #ifndef SQLITE_OMIT_AUTHORIZATION if( sqlite3AuthCheck(pParse, SQLITE_REINDEX, pIndex->zName, 0, db->aDb[iDb].zName ) ){ return; } #endif | > > > > > > > > > > | 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 | ** the index already exists and must be cleared before being refilled and ** the root page number of the index is taken from pIndex->tnum. */ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ Table *pTab = pIndex->pTable; /* The table that is indexed */ int iTab = pParse->nTab++; /* Btree cursor used for pTab */ int iIdx = pParse->nTab++; /* Btree cursor used for pIndex */ int iSorter = iTab; /* Cursor opened by OpenSorter (if in use) */ int addr1; /* Address of top of loop */ int tnum; /* Root page of index */ Vdbe *v; /* Generate code into this virtual machine */ KeyInfo *pKey; /* KeyInfo for index */ int regIdxKey; /* Registers containing the index key */ int regRecord; /* Register holding assemblied index record */ sqlite3 *db = pParse->db; /* The database connection */ int iDb = sqlite3SchemaToIndex(db, pIndex->pSchema); /* Set bUseSorter to use OP_OpenSorter, or clear it to insert directly ** into the index. The sorter is used unless either OMIT_MERGE_SORT is ** defined or the system is configured to store temp files in-memory. */ #ifdef SQLITE_OMIT_MERGE_SORT static const int bUseSorter = 0; #else const int bUseSorter = !sqlite3TempInMemory(pParse->db); #endif #ifndef SQLITE_OMIT_AUTHORIZATION if( sqlite3AuthCheck(pParse, SQLITE_REINDEX, pIndex->zName, 0, db->aDb[iDb].zName ) ){ return; } #endif |
︙ | ︙ | |||
2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 | } pKey = sqlite3IndexKeyinfo(pParse, pIndex); sqlite3VdbeAddOp4(v, OP_OpenWrite, iIdx, tnum, iDb, (char *)pKey, P4_KEYINFO_HANDOFF); if( memRootPage>=0 ){ sqlite3VdbeChangeP5(v, 1); } sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead); addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iTab, 0); regRecord = sqlite3GetTempReg(pParse); regIdxKey = sqlite3GenerateIndexKey(pParse, pIndex, iTab, regRecord, 1); if( pIndex->onError!=OE_None ){ const int regRowid = regIdxKey + pIndex->nColumn; const int j2 = sqlite3VdbeCurrentAddr(v) + 2; void * const pRegKey = SQLITE_INT_TO_PTR(regIdxKey); /* The registers accessed by the OP_IsUnique opcode were allocated ** using sqlite3GetTempRange() inside of the sqlite3GenerateIndexKey() ** call above. Just before that function was freed they were released ** (made available to the compiler for reuse) using ** sqlite3ReleaseTempRange(). So in some ways having the OP_IsUnique ** opcode use the values stored within seems dangerous. However, since ** we can be sure that no other temp registers have been allocated ** since sqlite3ReleaseTempRange() was called, it is safe to do so. */ sqlite3VdbeAddOp4(v, OP_IsUnique, iIdx, j2, regRowid, pRegKey, P4_INT32); sqlite3HaltConstraint( pParse, OE_Abort, "indexed columns are not unique", P4_STATIC); } | > > > > > > > > > > > > > > > > > > | | > > | 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 | } pKey = sqlite3IndexKeyinfo(pParse, pIndex); sqlite3VdbeAddOp4(v, OP_OpenWrite, iIdx, tnum, iDb, (char *)pKey, P4_KEYINFO_HANDOFF); if( memRootPage>=0 ){ sqlite3VdbeChangeP5(v, 1); } /* Open the sorter cursor if we are to use one. */ if( bUseSorter ){ iSorter = pParse->nTab++; sqlite3VdbeAddOp4(v, OP_OpenSorter, iSorter, 0, 0, (char*)pKey, P4_KEYINFO); } /* Open the table. Loop through all rows of the table, inserting index ** records into the sorter. */ sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead); addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iTab, 0); regRecord = sqlite3GetTempReg(pParse); regIdxKey = sqlite3GenerateIndexKey(pParse, pIndex, iTab, regRecord, 1); if( bUseSorter ){ sqlite3VdbeAddOp2(v, OP_IdxInsert, iSorter, regRecord); sqlite3VdbeAddOp2(v, OP_Next, iTab, addr1+1); sqlite3VdbeJumpHere(v, addr1); addr1 = sqlite3VdbeAddOp2(v, OP_Sort, iSorter, 0); sqlite3VdbeAddOp2(v, OP_RowKey, iSorter, regRecord); } if( pIndex->onError!=OE_None ){ const int regRowid = regIdxKey + pIndex->nColumn; const int j2 = sqlite3VdbeCurrentAddr(v) + 2; void * const pRegKey = SQLITE_INT_TO_PTR(regIdxKey); /* The registers accessed by the OP_IsUnique opcode were allocated ** using sqlite3GetTempRange() inside of the sqlite3GenerateIndexKey() ** call above. Just before that function was freed they were released ** (made available to the compiler for reuse) using ** sqlite3ReleaseTempRange(). So in some ways having the OP_IsUnique ** opcode use the values stored within seems dangerous. However, since ** we can be sure that no other temp registers have been allocated ** since sqlite3ReleaseTempRange() was called, it is safe to do so. */ sqlite3VdbeAddOp4(v, OP_IsUnique, iIdx, j2, regRowid, pRegKey, P4_INT32); sqlite3HaltConstraint( pParse, OE_Abort, "indexed columns are not unique", P4_STATIC); } sqlite3VdbeAddOp3(v, OP_IdxInsert, iIdx, regRecord, bUseSorter); sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); sqlite3ReleaseTempReg(pParse, regRecord); sqlite3VdbeAddOp2(v, OP_Next, iSorter, addr1+1); sqlite3VdbeJumpHere(v, addr1); sqlite3VdbeAddOp1(v, OP_Close, iTab); sqlite3VdbeAddOp1(v, OP_Close, iIdx); sqlite3VdbeAddOp1(v, OP_Close, iSorter); } /* ** Create a new index for an SQL table. pName1.pName2 is the name of the index ** and pTblList is the name of the table that is to be indexed. Both will ** be NULL for a primary key or an index that is created to satisfy a ** UNIQUE constraint. If pTable and pIndex are NULL, use pParse->pNewTable |
︙ | ︙ | |||
2945 2946 2947 2948 2949 2950 2951 | /* Generate code to remove the index and from the master table */ v = sqlite3GetVdbe(pParse); if( v ){ sqlite3BeginWriteOperation(pParse, 1, iDb); sqlite3NestedParse(pParse, "DELETE FROM %Q.%s WHERE name=%Q AND type='index'", | | < < < < | < < | 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 | /* Generate code to remove the index and from the master table */ v = sqlite3GetVdbe(pParse); if( v ){ sqlite3BeginWriteOperation(pParse, 1, iDb); sqlite3NestedParse(pParse, "DELETE FROM %Q.%s WHERE name=%Q AND type='index'", db->aDb[iDb].zName, SCHEMA_TABLE(iDb), pIndex->zName ); sqlite3ClearStatTables(pParse, iDb, "idx", pIndex->zName); sqlite3ChangeCookie(pParse, iDb); destroyRootPage(pParse, pIndex->tnum, iDb); sqlite3VdbeAddOp4(v, OP_DropIndex, iDb, 0, 0, pIndex->zName, 0); } exit_drop_index: sqlite3SrcListDelete(db, pName); |
︙ | ︙ | |||
3325 3326 3327 3328 3329 3330 3331 | ** A natural cross join B ** ** The operator is "natural cross join". The A and B operands are stored ** in p->a[0] and p->a[1], respectively. The parser initially stores the ** operator with A. This routine shifts that operator over to B. */ void sqlite3SrcListShiftJoinType(SrcList *p){ | | > | 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 | ** A natural cross join B ** ** The operator is "natural cross join". The A and B operands are stored ** in p->a[0] and p->a[1], respectively. The parser initially stores the ** operator with A. This routine shifts that operator over to B. */ void sqlite3SrcListShiftJoinType(SrcList *p){ if( p ){ int i; assert( p->a || p->nSrc==0 ); for(i=p->nSrc-1; i>0; i--){ p->a[i].jointype = p->a[i-1].jointype; } p->a[0].jointype = 0; } } |
︙ | ︙ |
Changes to src/ctime.c.
︙ | ︙ | |||
252 253 254 255 256 257 258 259 260 261 262 263 264 265 | "OMIT_LOCALTIME", #endif #ifdef SQLITE_OMIT_LOOKASIDE "OMIT_LOOKASIDE", #endif #ifdef SQLITE_OMIT_MEMORYDB "OMIT_MEMORYDB", #endif #ifdef SQLITE_OMIT_OR_OPTIMIZATION "OMIT_OR_OPTIMIZATION", #endif #ifdef SQLITE_OMIT_PAGER_PRAGMAS "OMIT_PAGER_PRAGMAS", #endif | > > > | 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 | "OMIT_LOCALTIME", #endif #ifdef SQLITE_OMIT_LOOKASIDE "OMIT_LOOKASIDE", #endif #ifdef SQLITE_OMIT_MEMORYDB "OMIT_MEMORYDB", #endif #ifdef SQLITE_OMIT_MERGE_SORT "OMIT_MERGE_SORT", #endif #ifdef SQLITE_OMIT_OR_OPTIMIZATION "OMIT_OR_OPTIMIZATION", #endif #ifdef SQLITE_OMIT_PAGER_PRAGMAS "OMIT_PAGER_PRAGMAS", #endif |
︙ | ︙ | |||
318 319 320 321 322 323 324 325 326 327 328 329 330 331 | "OMIT_WAL", #endif #ifdef SQLITE_OMIT_WSD "OMIT_WSD", #endif #ifdef SQLITE_OMIT_XFER_OPT "OMIT_XFER_OPT", #endif #ifdef SQLITE_PERFORMANCE_TRACE "PERFORMANCE_TRACE", #endif #ifdef SQLITE_PROXY_DEBUG "PROXY_DEBUG", #endif | > > > | 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 | "OMIT_WAL", #endif #ifdef SQLITE_OMIT_WSD "OMIT_WSD", #endif #ifdef SQLITE_OMIT_XFER_OPT "OMIT_XFER_OPT", #endif #ifdef SQLITE_PAGECACHE_BLOCKALLOC "PAGECACHE_BLOCKALLOC", #endif #ifdef SQLITE_PERFORMANCE_TRACE "PERFORMANCE_TRACE", #endif #ifdef SQLITE_PROXY_DEBUG "PROXY_DEBUG", #endif |
︙ | ︙ |
Changes to src/fkey.c.
︙ | ︙ | |||
730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 | ** early. */ if( pParse->disableTriggers ){ pTo = sqlite3FindTable(db, pFKey->zTo, zDb); }else{ pTo = sqlite3LocateTable(pParse, 0, pFKey->zTo, zDb); } if( !pTo || locateFkeyIndex(pParse, pTo, pFKey, &pIdx, &aiFree) ){ if( !isIgnoreErrors || db->mallocFailed ) return; continue; } assert( pFKey->nCol==1 || (aiFree && pIdx) ); if( aiFree ){ aiCol = aiFree; }else{ | > > > > > > > > > > > > > > > > > | 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 | ** early. */ if( pParse->disableTriggers ){ pTo = sqlite3FindTable(db, pFKey->zTo, zDb); }else{ pTo = sqlite3LocateTable(pParse, 0, pFKey->zTo, zDb); } if( !pTo || locateFkeyIndex(pParse, pTo, pFKey, &pIdx, &aiFree) ){ assert( isIgnoreErrors==0 || (regOld!=0 && regNew==0) ); if( !isIgnoreErrors || db->mallocFailed ) return; if( pTo==0 ){ /* If isIgnoreErrors is true, then a table is being dropped. In this ** case SQLite runs a "DELETE FROM xxx" on the table being dropped ** before actually dropping it in order to check FK constraints. ** If the parent table of an FK constraint on the current table is ** missing, behave as if it is empty. i.e. decrement the relevant ** FK counter for each row of the current table with non-NULL keys. */ Vdbe *v = sqlite3GetVdbe(pParse); int iJump = sqlite3VdbeCurrentAddr(v) + pFKey->nCol + 1; for(i=0; i<pFKey->nCol; i++){ int iReg = pFKey->aCol[i].iFrom + regOld + 1; sqlite3VdbeAddOp2(v, OP_IsNull, iReg, iJump); } sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, -1); } continue; } assert( pFKey->nCol==1 || (aiFree && pIdx) ); if( aiFree ){ aiCol = aiFree; }else{ |
︙ | ︙ |
Changes to src/os_unix.c.
︙ | ︙ | |||
209 210 211 212 213 214 215 | ** VFS implementations. */ typedef struct unixFile unixFile; struct unixFile { sqlite3_io_methods const *pMethod; /* Always the first entry */ unixInodeInfo *pInode; /* Info about locks on this inode */ int h; /* The file descriptor */ | < | 209 210 211 212 213 214 215 216 217 218 219 220 221 222 | ** VFS implementations. */ typedef struct unixFile unixFile; struct unixFile { sqlite3_io_methods const *pMethod; /* Always the first entry */ unixInodeInfo *pInode; /* Info about locks on this inode */ int h; /* The file descriptor */ unsigned char eFileLock; /* The type of lock held on this fd */ unsigned char ctrlFlags; /* Behavioral bits. UNIXFILE_* flags */ int lastErrno; /* The unix errno from last I/O error */ void *lockingContext; /* Locking style specific state */ UnixUnusedFd *pUnused; /* Pre-allocated UnixUnusedFd */ const char *zPath; /* Name of the file */ unixShm *pShm; /* Shared memory segment information */ |
︙ | ︙ | |||
257 258 259 260 261 262 263 264 265 266 267 268 269 270 | /* ** Allowed values for the unixFile.ctrlFlags bitmask: */ #define UNIXFILE_EXCL 0x01 /* Connections from one process only */ #define UNIXFILE_RDONLY 0x02 /* Connection is read only */ #define UNIXFILE_PERSIST_WAL 0x04 /* Persistent WAL mode */ /* ** Include code that is common to all os_*.c files */ #include "os_common.h" /* | > | 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 | /* ** Allowed values for the unixFile.ctrlFlags bitmask: */ #define UNIXFILE_EXCL 0x01 /* Connections from one process only */ #define UNIXFILE_RDONLY 0x02 /* Connection is read only */ #define UNIXFILE_PERSIST_WAL 0x04 /* Persistent WAL mode */ #define UNIXFILE_DIRSYNC 0x08 /* Directory sync needed */ /* ** Include code that is common to all os_*.c files */ #include "os_common.h" /* |
︙ | ︙ | |||
581 582 583 584 585 586 587 588 589 590 591 592 593 594 | ** The safest way to deal with the problem is to always use this wrapper ** which always has the same well-defined interface. */ static int posixOpen(const char *zFile, int flags, int mode){ return open(zFile, flags, mode); } /* ** Many system calls are accessed through pointer-to-functions so that ** they may be overridden at runtime to facilitate fault injection during ** testing and sandboxing. The following array holds the names and pointers ** to all overrideable system calls. */ static struct unix_syscall { | > > > | 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 | ** The safest way to deal with the problem is to always use this wrapper ** which always has the same well-defined interface. */ static int posixOpen(const char *zFile, int flags, int mode){ return open(zFile, flags, mode); } /* Forward reference */ static int openDirectory(const char*, int*); /* ** Many system calls are accessed through pointer-to-functions so that ** they may be overridden at runtime to facilitate fault injection during ** testing and sandboxing. The following array holds the names and pointers ** to all overrideable system calls. */ static struct unix_syscall { |
︙ | ︙ | |||
677 678 679 680 681 682 683 684 685 686 687 688 689 690 | #if defined(HAVE_POSIX_FALLOCATE) && HAVE_POSIX_FALLOCATE { "fallocate", (sqlite3_syscall_ptr)posix_fallocate, 0 }, #else { "fallocate", (sqlite3_syscall_ptr)0, 0 }, #endif #define osFallocate ((int(*)(int,off_t,off_t))aSyscall[15].pCurrent) }; /* End of the overrideable system calls */ /* ** This is the xSetSystemCall() method of sqlite3_vfs for all of the ** "unix" VFSes. Return SQLITE_OK opon successfully updating the ** system call pointer, or SQLITE_NOTFOUND if there is no configurable ** system call named zName. | > > > > > > | 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 | #if defined(HAVE_POSIX_FALLOCATE) && HAVE_POSIX_FALLOCATE { "fallocate", (sqlite3_syscall_ptr)posix_fallocate, 0 }, #else { "fallocate", (sqlite3_syscall_ptr)0, 0 }, #endif #define osFallocate ((int(*)(int,off_t,off_t))aSyscall[15].pCurrent) { "unlink", (sqlite3_syscall_ptr)unlink, 0 }, #define osUnlink ((int(*)(const char*))aSyscall[16].pCurrent) { "openDirectory", (sqlite3_syscall_ptr)openDirectory, 0 }, #define osOpenDirectory ((int(*)(const char*,int*))aSyscall[17].pCurrent) }; /* End of the overrideable system calls */ /* ** This is the xSetSystemCall() method of sqlite3_vfs for all of the ** "unix" VFSes. Return SQLITE_OK opon successfully updating the ** system call pointer, or SQLITE_NOTFOUND if there is no configurable ** system call named zName. |
︙ | ︙ | |||
2087 2088 2089 2090 2091 2092 2093 | ** It is *not* necessary to hold the mutex when this routine is called, ** even on VxWorks. A mutex will be acquired on VxWorks by the ** vxworksReleaseFileId() routine. */ static int closeUnixFile(sqlite3_file *id){ unixFile *pFile = (unixFile*)id; #if OSCLOSE_CHECK_CLOSE_IOERR | < < < < < < < < < < < < < | | 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 | ** It is *not* necessary to hold the mutex when this routine is called, ** even on VxWorks. A mutex will be acquired on VxWorks by the ** vxworksReleaseFileId() routine. */ static int closeUnixFile(sqlite3_file *id){ unixFile *pFile = (unixFile*)id; #if OSCLOSE_CHECK_CLOSE_IOERR if( pFile->h>=0 ){ int err = close(pFile->h); if( err ){ pFile->lastErrno = errno; return SQLITE_IOERR_CLOSE; }else{ pFile->h=-1; } } #else if( pFile->h>=0 ){ robust_close(pFile, pFile->h, __LINE__); pFile->h = -1; } #endif #if OS_VXWORKS if( pFile->pId ){ if( pFile->isDelete ){ osUnlink(pFile->pId->zCanonicalName); } vxworksReleaseFileId(pFile->pId); pFile->pId = 0; } #endif OSTRACE(("CLOSE %-3d\n", pFile->h)); OpenCounter(-1); |
︙ | ︙ | |||
2374 2375 2376 2377 2378 2379 2380 | if( eFileLock==SHARED_LOCK ){ pFile->eFileLock = SHARED_LOCK; return SQLITE_OK; } /* To fully unlock the database, delete the lock file */ assert( eFileLock==NO_LOCK ); | | | 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 | if( eFileLock==SHARED_LOCK ){ pFile->eFileLock = SHARED_LOCK; return SQLITE_OK; } /* To fully unlock the database, delete the lock file */ assert( eFileLock==NO_LOCK ); if( osUnlink(zLockFile) ){ int rc = 0; int tErrno = errno; if( ENOENT != tErrno ){ #if OSLOCKING_CHECK_BUSY_IOERR rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_UNLOCK); #else rc = SQLITE_IOERR_UNLOCK; |
︙ | ︙ | |||
3406 3407 3408 3409 3410 3411 3412 | #endif TIMER_START; #if defined(USE_PREAD) do{ got = osPwrite(id->h, pBuf, cnt, offset); }while( got<0 && errno==EINTR ); #elif defined(USE_PREAD64) do{ got = osPwrite64(id->h, pBuf, cnt, offset);}while( got<0 && errno==EINTR); #else | > | | | | | | | | | | | > | 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 | #endif TIMER_START; #if defined(USE_PREAD) do{ got = osPwrite(id->h, pBuf, cnt, offset); }while( got<0 && errno==EINTR ); #elif defined(USE_PREAD64) do{ got = osPwrite64(id->h, pBuf, cnt, offset);}while( got<0 && errno==EINTR); #else do{ newOffset = lseek(id->h, offset, SEEK_SET); SimulateIOError( newOffset-- ); if( newOffset!=offset ){ if( newOffset == -1 ){ ((unixFile*)id)->lastErrno = errno; }else{ ((unixFile*)id)->lastErrno = 0; } return -1; } got = osWrite(id->h, pBuf, cnt); }while( got<0 && errno==EINTR ); #endif TIMER_END; if( got<0 ){ ((unixFile*)id)->lastErrno = errno; } OSTRACE(("WRITE %-3d %5d %7lld %llu\n", id->h, got, offset, TIMER_ELAPSED)); |
︙ | ︙ | |||
3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 | #endif /* ifdef SQLITE_NO_SYNC elif HAVE_FULLFSYNC */ if( OS_VXWORKS && rc!= -1 ){ rc = 0; } return rc; } /* ** Make sure all writes to a particular file are committed to disk. ** ** If dataOnly==0 then both the file itself and its metadata (file ** size, access time, etc) are synced. If dataOnly!=0 then only the ** file data is synced. | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 | #endif /* ifdef SQLITE_NO_SYNC elif HAVE_FULLFSYNC */ if( OS_VXWORKS && rc!= -1 ){ rc = 0; } return rc; } /* ** Open a file descriptor to the directory containing file zFilename. ** If successful, *pFd is set to the opened file descriptor and ** SQLITE_OK is returned. If an error occurs, either SQLITE_NOMEM ** or SQLITE_CANTOPEN is returned and *pFd is set to an undefined ** value. ** ** The directory file descriptor is used for only one thing - to ** fsync() a directory to make sure file creation and deletion events ** are flushed to disk. Such fsyncs are not needed on newer ** journaling filesystems, but are required on older filesystems. ** ** This routine can be overridden using the xSetSysCall interface. ** The ability to override this routine was added in support of the ** chromium sandbox. Opening a directory is a security risk (we are ** told) so making it overrideable allows the chromium sandbox to ** replace this routine with a harmless no-op. To make this routine ** a no-op, replace it with a stub that returns SQLITE_OK but leaves ** *pFd set to a negative number. ** ** If SQLITE_OK is returned, the caller is responsible for closing ** the file descriptor *pFd using close(). */ static int openDirectory(const char *zFilename, int *pFd){ int ii; int fd = -1; char zDirname[MAX_PATHNAME+1]; sqlite3_snprintf(MAX_PATHNAME, zDirname, "%s", zFilename); for(ii=(int)strlen(zDirname); ii>1 && zDirname[ii]!='/'; ii--); if( ii>0 ){ zDirname[ii] = '\0'; fd = robust_open(zDirname, O_RDONLY|O_BINARY, 0); if( fd>=0 ){ #ifdef FD_CLOEXEC osFcntl(fd, F_SETFD, osFcntl(fd, F_GETFD, 0) | FD_CLOEXEC); #endif OSTRACE(("OPENDIR %-3d %s\n", fd, zDirname)); } } *pFd = fd; return (fd>=0?SQLITE_OK:unixLogError(SQLITE_CANTOPEN_BKPT, "open", zDirname)); } /* ** Make sure all writes to a particular file are committed to disk. ** ** If dataOnly==0 then both the file itself and its metadata (file ** size, access time, etc) are synced. If dataOnly!=0 then only the ** file data is synced. |
︙ | ︙ | |||
3673 3674 3675 3676 3677 3678 3679 | OSTRACE(("SYNC %-3d\n", pFile->h)); rc = full_fsync(pFile->h, isFullsync, isDataOnly); SimulateIOError( rc=1 ); if( rc ){ pFile->lastErrno = errno; return unixLogError(SQLITE_IOERR_FSYNC, "full_fsync", pFile->zPath); } | | < < < | < > | | > > > | < < < < < < | | < | < | < < | | | | < > > > | 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 | OSTRACE(("SYNC %-3d\n", pFile->h)); rc = full_fsync(pFile->h, isFullsync, isDataOnly); SimulateIOError( rc=1 ); if( rc ){ pFile->lastErrno = errno; return unixLogError(SQLITE_IOERR_FSYNC, "full_fsync", pFile->zPath); } /* Also fsync the directory containing the file if the DIRSYNC flag ** is set. This is a one-time occurrance. Many systems (examples: AIX) ** are unable to fsync a directory, so ignore errors on the fsync. */ if( pFile->ctrlFlags & UNIXFILE_DIRSYNC ){ int dirfd; OSTRACE(("DIRSYNC %s (have_fullfsync=%d fullsync=%d)\n", pFile->zPath, HAVE_FULLFSYNC, isFullsync)); rc = osOpenDirectory(pFile->zPath, &dirfd); if( rc==SQLITE_OK && dirfd>=0 ){ full_fsync(dirfd, 0, 0); #if OSCLOSE_CHECK_CLOSE_IOERR if( close(pFile->dirfd) ){ pFile->lastErrno = errno; rc = SQLITE_IOERR_DIR_CLOSE; } #else robust_close(pFile, dirfd, __LINE__); #endif } pFile->ctrlFlags &= ~UNIXFILE_DIRSYNC; } return rc; } /* ** Truncate an open file to a specified size */ |
︙ | ︙ | |||
3869 3870 3871 3872 3873 3874 3875 | return SQLITE_OK; } case SQLITE_FCNTL_CHUNK_SIZE: { pFile->szChunk = *(int *)pArg; return SQLITE_OK; } case SQLITE_FCNTL_SIZE_HINT: { | > > | > > | 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 | return SQLITE_OK; } case SQLITE_FCNTL_CHUNK_SIZE: { pFile->szChunk = *(int *)pArg; return SQLITE_OK; } case SQLITE_FCNTL_SIZE_HINT: { int rc; SimulateIOErrorBenign(1); rc = fcntlSizeHint(pFile, *(i64 *)pArg); SimulateIOErrorBenign(0); return rc; } case SQLITE_FCNTL_PERSIST_WAL: { int bPersist = *(int*)pArg; if( bPersist<0 ){ *(int*)pArg = (pFile->ctrlFlags & UNIXFILE_PERSIST_WAL)!=0; }else if( bPersist==0 ){ pFile->ctrlFlags &= ~UNIXFILE_PERSIST_WAL; |
︙ | ︙ | |||
4848 4849 4850 4851 4852 4853 4854 | /* If pShmNode->nRef has reached 0, then close the underlying ** shared-memory file, too */ unixEnterMutex(); assert( pShmNode->nRef>0 ); pShmNode->nRef--; if( pShmNode->nRef==0 ){ | | | 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 | /* If pShmNode->nRef has reached 0, then close the underlying ** shared-memory file, too */ unixEnterMutex(); assert( pShmNode->nRef>0 ); pShmNode->nRef--; if( pShmNode->nRef==0 ){ if( deleteFlag && pShmNode->h>=0 ) osUnlink(pShmNode->zFilename); unixShmPurge(pDbFd); } unixLeaveMutex(); return SQLITE_OK; } |
︙ | ︙ | |||
5161 5162 5163 5164 5165 5166 5167 | /* ** Initialize the contents of the unixFile structure pointed to by pId. */ static int fillInUnixFile( sqlite3_vfs *pVfs, /* Pointer to vfs object */ int h, /* Open file descriptor of file being opened */ | | | 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 | /* ** Initialize the contents of the unixFile structure pointed to by pId. */ static int fillInUnixFile( sqlite3_vfs *pVfs, /* Pointer to vfs object */ int h, /* Open file descriptor of file being opened */ int syncDir, /* True to sync directory on first sync */ sqlite3_file *pId, /* Write to the unixFile structure here */ const char *zFilename, /* Name of the file being opened */ int noLock, /* Omit locking if true */ int isDelete, /* Delete on close if true */ int isReadOnly /* True if the file is opened read-only */ ){ const sqlite3_io_methods *pLockingStyle; |
︙ | ︙ | |||
5192 5193 5194 5195 5196 5197 5198 | || pVfs->pAppData==(void*)&autolockIoFinder ); #else assert( zFilename==0 || zFilename[0]=='/' ); #endif OSTRACE(("OPEN %-3d %s\n", h, zFilename)); pNew->h = h; | < > > > | 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 | || pVfs->pAppData==(void*)&autolockIoFinder ); #else assert( zFilename==0 || zFilename[0]=='/' ); #endif OSTRACE(("OPEN %-3d %s\n", h, zFilename)); pNew->h = h; pNew->zPath = zFilename; if( memcmp(pVfs->zName,"unix-excl",10)==0 ){ pNew->ctrlFlags = UNIXFILE_EXCL; }else{ pNew->ctrlFlags = 0; } if( isReadOnly ){ pNew->ctrlFlags |= UNIXFILE_RDONLY; } if( syncDir ){ pNew->ctrlFlags |= UNIXFILE_DIRSYNC; } #if OS_VXWORKS pNew->pId = vxworksFindFileId(zFilename); if( pNew->pId==0 ){ noLock = 1; rc = SQLITE_NOMEM; } |
︙ | ︙ | |||
5328 5329 5330 5331 5332 5333 5334 | #endif pNew->lastErrno = 0; #if OS_VXWORKS if( rc!=SQLITE_OK ){ if( h>=0 ) robust_close(pNew, h, __LINE__); h = -1; | | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 | #endif pNew->lastErrno = 0; #if OS_VXWORKS if( rc!=SQLITE_OK ){ if( h>=0 ) robust_close(pNew, h, __LINE__); h = -1; osUnlink(zFilename); isDelete = 0; } pNew->isDelete = isDelete; #endif if( rc!=SQLITE_OK ){ if( h>=0 ) robust_close(pNew, h, __LINE__); }else{ pNew->pMethod = pLockingStyle; OpenCounter(+1); } return rc; } /* ** Return the name of a directory in which to put temporary files. ** If no suitable temporary file directory can be found, return NULL. */ static const char *unixTempFileDir(void){ static const char *azDirs[] = { 0, |
︙ | ︙ | |||
5610 5611 5612 5613 5614 5615 5616 | const char *zPath, /* Pathname of file to be opened */ sqlite3_file *pFile, /* The file descriptor to be filled in */ int flags, /* Input flags to control the opening */ int *pOutFlags /* Output flags returned to SQLite core */ ){ unixFile *p = (unixFile *)pFile; int fd = -1; /* File descriptor returned by open() */ | < | 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 | const char *zPath, /* Pathname of file to be opened */ sqlite3_file *pFile, /* The file descriptor to be filled in */ int flags, /* Input flags to control the opening */ int *pOutFlags /* Output flags returned to SQLite core */ ){ unixFile *p = (unixFile *)pFile; int fd = -1; /* File descriptor returned by open() */ int openFlags = 0; /* Flags to pass to open() */ #if SQLITE_ENABLE_DATA_PROTECTION int eType = flags&0xFF0FFF00; /* Type of file to open */ #else int eType = flags&0xFFFFFF00; /* Type of file to open */ #endif int noLock; /* True to omit locking primitives */ |
︙ | ︙ | |||
5633 5634 5635 5636 5637 5638 5639 | int isAutoProxy = (flags & SQLITE_OPEN_AUTOPROXY); #endif /* If creating a master or main-file journal, this function will open ** a file-descriptor on the directory too. The first time unixSync() ** is called the directory file descriptor will be fsync()ed and close()d. */ | | | 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 | int isAutoProxy = (flags & SQLITE_OPEN_AUTOPROXY); #endif /* If creating a master or main-file journal, this function will open ** a file-descriptor on the directory too. The first time unixSync() ** is called the directory file descriptor will be fsync()ed and close()d. */ int syncDir = (isCreate && ( eType==SQLITE_OPEN_MASTER_JOURNAL || eType==SQLITE_OPEN_MAIN_JOURNAL || eType==SQLITE_OPEN_WAL )); /* If argument zPath is a NULL pointer, this function is required to open ** a temporary file. Use this buffer to store the file name in. |
︙ | ︙ | |||
5687 5688 5689 5690 5691 5692 5693 | if( !pUnused ){ return SQLITE_NOMEM; } } p->pUnused = pUnused; }else if( !zName ){ /* If zName is NULL, the upper layer is requesting a temp file. */ | | | 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 | if( !pUnused ){ return SQLITE_NOMEM; } } p->pUnused = pUnused; }else if( !zName ){ /* If zName is NULL, the upper layer is requesting a temp file. */ assert(isDelete && !syncDir); rc = unixGetTempname(MAX_PATHNAME+1, zTmpname); if( rc!=SQLITE_OK ){ return rc; } zName = zTmpname; } |
︙ | ︙ | |||
5759 5760 5761 5762 5763 5764 5765 | p->pUnused->flags = flags; } if( isDelete ){ #if OS_VXWORKS zPath = zName; #else | | < < < < < < < < < < < < < < | 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 | p->pUnused->flags = flags; } if( isDelete ){ #if OS_VXWORKS zPath = zName; #else osUnlink(zName); #endif } #if SQLITE_ENABLE_LOCKING_STYLE else{ p->openFlags = openFlags; } #endif #ifdef FD_CLOEXEC osFcntl(fd, F_SETFD, osFcntl(fd, F_GETFD, 0) | FD_CLOEXEC); #endif noLock = eType!=SQLITE_OPEN_MAIN_DB; #if defined(__APPLE__) || SQLITE_ENABLE_LOCKING_STYLE struct statfs fsInfo; if( fstatfs(fd, &fsInfo) == -1 ){ ((unixFile*)pFile)->lastErrno = errno; robust_close(p, fd, __LINE__); return SQLITE_IOERR_ACCESS; } if (0 == strncmp("msdos", fsInfo.f_fstypename, 5)) { ((unixFile*)pFile)->fsFlags |= SQLITE_FSFLAGS_IS_MSDOS; } if (0 == strncmp("exfat", fsInfo.f_fstypename, 5)) { |
︙ | ︙ | |||
5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 | int useProxy = 0; /* SQLITE_FORCE_PROXY_LOCKING==1 means force always use proxy, 0 means ** never use proxy, NULL means use proxy for non-local files only. */ if( envforce!=NULL ){ useProxy = atoi(envforce)>0; }else{ useProxy = !(fsInfo.f_flags&MNT_LOCAL); } if( useProxy ){ | > > > > > > > > > > > > > > | | 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 | int useProxy = 0; /* SQLITE_FORCE_PROXY_LOCKING==1 means force always use proxy, 0 means ** never use proxy, NULL means use proxy for non-local files only. */ if( envforce!=NULL ){ useProxy = atoi(envforce)>0; }else{ struct statfs fsInfo; if( statfs(zPath, &fsInfo) == -1 ){ /* In theory, the close(fd) call is sub-optimal. If the file opened ** with fd is a database file, and there are other connections open ** on that file that are currently holding advisory locks on it, ** then the call to close() will cancel those locks. In practice, ** we're assuming that statfs() doesn't fail very often. At least ** not while other file descriptors opened by the same process on ** the same file are working. */ p->lastErrno = errno; robust_close(p, fd, __LINE__); rc = SQLITE_IOERR_ACCESS; goto open_finished; } useProxy = !(fsInfo.f_flags&MNT_LOCAL); } if( useProxy ){ rc = fillInUnixFile(pVfs, fd, syncDir, pFile, zPath, noLock, isDelete, isReadonly); if( rc==SQLITE_OK ){ /* cache the pMethod in case the transform fails */ const struct sqlite3_io_methods *pMethod = pFile->pMethods; rc = proxyTransformUnixFile((unixFile*)pFile, ":auto:"); if( rc!=SQLITE_OK ){ /* Use unixClose to clean up the resources added in fillInUnixFile |
︙ | ︙ | |||
5844 5845 5846 5847 5848 5849 5850 | } } goto open_finished; } } #endif | | | 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 | } } goto open_finished; } } #endif rc = fillInUnixFile(pVfs, fd, syncDir, pFile, zPath, noLock, isDelete, isReadonly); open_finished: if( rc!=SQLITE_OK ){ sqlite3_free(p->pUnused); } return rc; } |
︙ | ︙ | |||
5866 5867 5868 5869 5870 5871 5872 | sqlite3_vfs *NotUsed, /* VFS containing this as the xDelete method */ const char *zPath, /* Name of file to be deleted */ int dirSync /* If true, fsync() directory after deleting file */ ){ int rc = SQLITE_OK; UNUSED_PARAMETER(NotUsed); SimulateIOError(return SQLITE_IOERR_DELETE); | | | | 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 | sqlite3_vfs *NotUsed, /* VFS containing this as the xDelete method */ const char *zPath, /* Name of file to be deleted */ int dirSync /* If true, fsync() directory after deleting file */ ){ int rc = SQLITE_OK; UNUSED_PARAMETER(NotUsed); SimulateIOError(return SQLITE_IOERR_DELETE); if( osUnlink(zPath)==(-1) && errno!=ENOENT ){ return unixLogError(SQLITE_IOERR_DELETE, "unlink", zPath); } #ifndef SQLITE_DISABLE_DIRSYNC if( dirSync ){ int fd; rc = osOpenDirectory(zPath, &fd); if( rc==SQLITE_OK ){ #if OS_VXWORKS if( fsync(fd)==-1 ) #else if( fsync(fd) ) #endif { |
︙ | ︙ | |||
6473 6474 6475 6476 6477 6478 6479 | */ static int proxyCreateUnixFile( const char *path, /* path for the new unixFile */ unixFile **ppFile, /* unixFile created and returned by ref */ int islockfile /* if non zero missing dirs will be created */ ) { int fd = -1; | < | 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 | */ static int proxyCreateUnixFile( const char *path, /* path for the new unixFile */ unixFile **ppFile, /* unixFile created and returned by ref */ int islockfile /* if non zero missing dirs will be created */ ) { int fd = -1; unixFile *pNew; int rc = SQLITE_OK; int openFlags = O_RDWR | O_CREAT; sqlite3_vfs dummyVfs; int terrno = 0; UnixUnusedFd *pUnused = NULL; |
︙ | ︙ | |||
6539 6540 6541 6542 6543 6544 6545 | memset(&dummyVfs, 0, sizeof(dummyVfs)); dummyVfs.pAppData = (void*)&autolockIoFinder; dummyVfs.zName = "dummy"; pUnused->fd = fd; pUnused->flags = openFlags; pNew->pUnused = pUnused; | | | 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 | memset(&dummyVfs, 0, sizeof(dummyVfs)); dummyVfs.pAppData = (void*)&autolockIoFinder; dummyVfs.zName = "dummy"; pUnused->fd = fd; pUnused->flags = openFlags; pNew->pUnused = pUnused; rc = fillInUnixFile(&dummyVfs, fd, 0, (sqlite3_file*)pNew, path, 0, 0, 0); if( rc==SQLITE_OK ){ *ppFile = pNew; return SQLITE_OK; } end_create_proxy: robust_close(pNew, fd, __LINE__); sqlite3_free(pNew); |
︙ | ︙ | |||
6653 6654 6655 6656 6657 6658 6659 | robust_close(pFile, conchFile->h, __LINE__); conchFile->h = fd; conchFile->openFlags = O_RDWR | O_CREAT; end_breaklock: if( rc ){ if( fd>=0 ){ | | | 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 | robust_close(pFile, conchFile->h, __LINE__); conchFile->h = fd; conchFile->openFlags = O_RDWR | O_CREAT; end_breaklock: if( rc ){ if( fd>=0 ){ osUnlink(tPath); robust_close(pFile, fd, __LINE__); } fprintf(stderr, "failed to break stale lock on %s, %s\n", cPath, errmsg); } return rc; } |
︙ | ︙ | |||
7503 7504 7505 7506 7507 7508 7509 | UNIXVFS("unix-proxy", proxyIoFinder ), #endif }; unsigned int i; /* Loop counter */ /* Double-check that the aSyscall[] array has been constructed ** correctly. See ticket [bb3a86e890c8e96ab] */ | | | 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 | UNIXVFS("unix-proxy", proxyIoFinder ), #endif }; unsigned int i; /* Loop counter */ /* Double-check that the aSyscall[] array has been constructed ** correctly. See ticket [bb3a86e890c8e96ab] */ assert( ArraySize(aSyscall)==18 ); /* Register all VFSes defined in the aVfs[] array */ for(i=0; i<(sizeof(aVfs)/sizeof(sqlite3_vfs)); i++){ sqlite3_vfs_register(&aVfs[i], i==0); } return SQLITE_OK; } |
︙ | ︙ |
Changes to src/os_win.c.
︙ | ︙ | |||
2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 | DWORD dwFlagsAndAttributes = 0; #if SQLITE_OS_WINCE int isTemp = 0; #endif winFile *pFile = (winFile*)id; void *zConverted; /* Filename in OS encoding */ const char *zUtf8Name = zName; /* Filename in UTF-8 encoding */ /* If argument zPath is a NULL pointer, this function is required to open ** a temporary file. Use this buffer to store the file name in. */ char zTmpname[MAX_PATH+1]; /* Buffer used to create temp filename */ int rc = SQLITE_OK; /* Function Return Code */ | > | 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 | DWORD dwFlagsAndAttributes = 0; #if SQLITE_OS_WINCE int isTemp = 0; #endif winFile *pFile = (winFile*)id; void *zConverted; /* Filename in OS encoding */ const char *zUtf8Name = zName; /* Filename in UTF-8 encoding */ int cnt = 0; /* If argument zPath is a NULL pointer, this function is required to open ** a temporary file. Use this buffer to store the file name in. */ char zTmpname[MAX_PATH+1]; /* Buffer used to create temp filename */ int rc = SQLITE_OK; /* Function Return Code */ |
︙ | ︙ | |||
2306 2307 2308 2309 2310 2311 2312 | /* Reports from the internet are that performance is always ** better if FILE_FLAG_RANDOM_ACCESS is used. Ticket #2699. */ #if SQLITE_OS_WINCE dwFlagsAndAttributes |= FILE_FLAG_RANDOM_ACCESS; #endif if( isNT() ){ | | | | < | | | < > | | | < | | | < > > > | 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 | /* Reports from the internet are that performance is always ** better if FILE_FLAG_RANDOM_ACCESS is used. Ticket #2699. */ #if SQLITE_OS_WINCE dwFlagsAndAttributes |= FILE_FLAG_RANDOM_ACCESS; #endif if( isNT() ){ while( (h = CreateFileW((WCHAR*)zConverted, dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, dwFlagsAndAttributes, NULL))==INVALID_HANDLE_VALUE && retryIoerr(&cnt) ){} /* isNT() is 1 if SQLITE_OS_WINCE==1, so this else is never executed. ** Since the ASCII version of these Windows API do not exist for WINCE, ** it's important to not reference them for WINCE builds. */ #if SQLITE_OS_WINCE==0 }else{ while( (h = CreateFileA((char*)zConverted, dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, dwFlagsAndAttributes, NULL))==INVALID_HANDLE_VALUE && retryIoerr(&cnt) ){} #endif } logIoerr(cnt); OSTRACE(("OPEN %d %s 0x%lx %s\n", h, zName, dwDesiredAccess, h==INVALID_HANDLE_VALUE ? "failed" : "ok")); if( h==INVALID_HANDLE_VALUE ){ pFile->lastErrno = GetLastError(); |
︙ | ︙ |
Changes to src/pcache1.c.
︙ | ︙ | |||
20 21 22 23 24 25 26 27 28 29 30 31 32 33 | #include "sqliteInt.h" typedef struct PCache1 PCache1; typedef struct PgHdr1 PgHdr1; typedef struct PgFreeslot PgFreeslot; typedef struct PGroup PGroup; /* Each page cache (or PCache) belongs to a PGroup. A PGroup is a set ** of one or more PCaches that are able to recycle each others unpinned ** pages when they are under memory pressure. A PGroup is an instance of ** the following object. ** ** This page cache implementation works in one of two modes: ** | > > > | 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 | #include "sqliteInt.h" typedef struct PCache1 PCache1; typedef struct PgHdr1 PgHdr1; typedef struct PgFreeslot PgFreeslot; typedef struct PGroup PGroup; typedef struct PGroupBlock PGroupBlock; typedef struct PGroupBlockList PGroupBlockList; /* Each page cache (or PCache) belongs to a PGroup. A PGroup is a set ** of one or more PCaches that are able to recycle each others unpinned ** pages when they are under memory pressure. A PGroup is an instance of ** the following object. ** ** This page cache implementation works in one of two modes: ** |
︙ | ︙ | |||
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 | struct PGroup { sqlite3_mutex *mutex; /* MUTEX_STATIC_LRU or NULL */ int nMaxPage; /* Sum of nMax for purgeable caches */ int nMinPage; /* Sum of nMin for purgeable caches */ int mxPinned; /* nMaxpage + 10 - nMinPage */ int nCurrentPage; /* Number of purgeable pages allocated */ PgHdr1 *pLruHead, *pLruTail; /* LRU list of unpinned pages */ }; /* Each page cache is an instance of the following object. Every ** open database file (including each in-memory database and each ** temporary or transient database) has a single page cache which ** is an instance of this object. ** ** Pointers to structures of this type are cast and returned as | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 | struct PGroup { sqlite3_mutex *mutex; /* MUTEX_STATIC_LRU or NULL */ int nMaxPage; /* Sum of nMax for purgeable caches */ int nMinPage; /* Sum of nMin for purgeable caches */ int mxPinned; /* nMaxpage + 10 - nMinPage */ int nCurrentPage; /* Number of purgeable pages allocated */ PgHdr1 *pLruHead, *pLruTail; /* LRU list of unpinned pages */ PGroupBlockList *pBlockList; /* List of block-lists for this group */ }; /* ** If SQLITE_PAGECACHE_BLOCKALLOC is defined when the library is built, ** each PGroup structure has a linked list of the the following starting ** at PGroup.pBlockList. There is one entry for each distinct page-size ** currently used by members of the PGroup (i.e. 1024 bytes, 4096 bytes ** etc.). Variable PGroupBlockList.nByte is set to the actual allocation ** size requested by each pcache, which is the database page-size plus ** the various header structures used by the pcache, pager and btree layers. ** Usually around (pgsz+200) bytes. ** ** This size (pgsz+200) bytes is not allocated efficiently by some ** implementations of malloc. In particular, some implementations are only ** able to allocate blocks of memory chunks of 2^N bytes, where N is some ** integer value. Since the page-size is a power of 2, this means we ** end up wasting (pgsz-200) bytes in each allocation. ** ** If SQLITE_PAGECACHE_BLOCKALLOC is defined, the (pgsz+200) byte blocks ** are not allocated directly. Instead, blocks of roughly M*(pgsz+200) bytes ** are requested from malloc allocator. After a block is returned, ** sqlite3MallocSize() is used to determine how many (pgsz+200) byte ** allocations can fit in the space returned by malloc(). This value may ** be more than M. ** ** The blocks are stored in a doubly-linked list. Variable PGroupBlock.nEntry ** contains the number of allocations that will fit in the aData[] space. ** nEntry is limited to the number of bits in bitmask mUsed. If a slot ** within aData is in use, the corresponding bit in mUsed is set. Thus ** when (mUsed+1==(1 << nEntry)) the block is completely full. ** ** Each time a slot within a block is freed, the block is moved to the start ** of the linked-list. And if a block becomes completely full, then it is ** moved to the end of the list. As a result, when searching for a free ** slot, only the first block in the list need be examined. If it is full, ** then it is guaranteed that all blocks are full. */ struct PGroupBlockList { int nByte; /* Size of each allocation in bytes */ PGroupBlock *pFirst; /* First PGroupBlock in list */ PGroupBlock *pLast; /* Last PGroupBlock in list */ PGroupBlockList *pNext; /* Next block-list attached to group */ }; struct PGroupBlock { Bitmask mUsed; /* Mask of used slots */ int nEntry; /* Maximum number of allocations in aData[] */ u8 *aData; /* Pointer to data block */ PGroupBlock *pNext; /* Next PGroupBlock in list */ PGroupBlock *pPrev; /* Previous PGroupBlock in list */ PGroupBlockList *pList; /* Owner list */ }; /* Minimum value for PGroupBlock.nEntry */ #define PAGECACHE_BLOCKALLOC_MINENTRY 15 /* Each page cache is an instance of the following object. Every ** open database file (including each in-memory database and each ** temporary or transient database) has a single page cache which ** is an instance of this object. ** ** Pointers to structures of this type are cast and returned as |
︙ | ︙ | |||
153 154 155 156 157 158 159 160 161 162 163 164 165 166 | ** a pointer to a block of szPage bytes of data and the return value is ** a pointer to the associated PgHdr1 structure. ** ** assert( PGHDR1_TO_PAGE(PAGE_TO_PGHDR1(pCache, X))==X ); */ #define PGHDR1_TO_PAGE(p) (void*)(((char*)p) - p->pCache->szPage) #define PAGE_TO_PGHDR1(c, p) (PgHdr1*)(((char*)p) + c->szPage) /* ** Macros to enter and leave the PCache LRU mutex. */ #define pcache1EnterMutex(X) sqlite3_mutex_enter((X)->mutex) #define pcache1LeaveMutex(X) sqlite3_mutex_leave((X)->mutex) | > > > > > > > > > > > | 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 | ** a pointer to a block of szPage bytes of data and the return value is ** a pointer to the associated PgHdr1 structure. ** ** assert( PGHDR1_TO_PAGE(PAGE_TO_PGHDR1(pCache, X))==X ); */ #define PGHDR1_TO_PAGE(p) (void*)(((char*)p) - p->pCache->szPage) #define PAGE_TO_PGHDR1(c, p) (PgHdr1*)(((char*)p) + c->szPage) /* ** Blocks used by the SQLITE_PAGECACHE_BLOCKALLOC blocks to store/retrieve ** a PGroupBlock pointer based on a pointer to a page buffer. */ #define PAGE_SET_BLOCKPTR(pCache, pPg, pBlock) \ ( *(PGroupBlock **)&(((u8*)pPg)[sizeof(PgHdr1) + pCache->szPage]) = pBlock ) #define PAGE_GET_BLOCKPTR(pCache, pPg) \ ( *(PGroupBlock **)&(((u8*)pPg)[sizeof(PgHdr1) + pCache->szPage]) ) /* ** Macros to enter and leave the PCache LRU mutex. */ #define pcache1EnterMutex(X) sqlite3_mutex_enter((X)->mutex) #define pcache1LeaveMutex(X) sqlite3_mutex_leave((X)->mutex) |
︙ | ︙ | |||
279 280 281 282 283 284 285 286 287 288 289 290 | iSize = sqlite3MallocSize(p); sqlite3MemdebugSetType(p, MEMTYPE_PCACHE); return iSize; } } #endif /* SQLITE_ENABLE_MEMORY_MANAGEMENT */ /* ** Allocate a new page object initially associated with cache pCache. */ static PgHdr1 *pcache1AllocPage(PCache1 *pCache){ int nByte = sizeof(PgHdr1) + pCache->szPage; | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 | iSize = sqlite3MallocSize(p); sqlite3MemdebugSetType(p, MEMTYPE_PCACHE); return iSize; } } #endif /* SQLITE_ENABLE_MEMORY_MANAGEMENT */ #ifdef SQLITE_PAGECACHE_BLOCKALLOC /* ** The block pBlock belongs to list pList but is not currently linked in. ** Insert it into the start of the list. */ static void addBlockToList(PGroupBlockList *pList, PGroupBlock *pBlock){ pBlock->pPrev = 0; pBlock->pNext = pList->pFirst; pList->pFirst = pBlock; if( pBlock->pNext ){ pBlock->pNext->pPrev = pBlock; }else{ assert( pList->pLast==0 ); pList->pLast = pBlock; } } /* ** If there are no blocks in the list headed by pList, remove pList ** from the pGroup->pBlockList list and free it with sqlite3_free(). */ static void freeListIfEmpty(PGroup *pGroup, PGroupBlockList *pList){ assert( sqlite3_mutex_held(pGroup->mutex) ); if( pList->pFirst==0 ){ PGroupBlockList **pp; for(pp=&pGroup->pBlockList; *pp!=pList; pp=&(*pp)->pNext); *pp = (*pp)->pNext; sqlite3_free(pList); } } #endif /* SQLITE_PAGECACHE_BLOCKALLOC */ /* ** Allocate a new page object initially associated with cache pCache. */ static PgHdr1 *pcache1AllocPage(PCache1 *pCache){ int nByte = sizeof(PgHdr1) + pCache->szPage; void *pPg = 0; PgHdr1 *p; #ifdef SQLITE_PAGECACHE_BLOCKALLOC PGroup *pGroup = pCache->pGroup; PGroupBlockList *pList; PGroupBlock *pBlock; int i; nByte += sizeof(PGroupBlockList *); nByte = ROUND8(nByte); do{ for(pList=pGroup->pBlockList; pList; pList=pList->pNext){ if( pList->nByte==nByte ) break; } if( pList==0 ){ PGroupBlockList *pNew; pcache1LeaveMutex(pCache->pGroup); pNew = (PGroupBlockList *)sqlite3MallocZero(sizeof(PGroupBlockList)); pcache1EnterMutex(pCache->pGroup); if( pNew==0 ){ /* malloc() failure. Return early. */ return 0; } for(pList=pGroup->pBlockList; pList; pList=pList->pNext){ if( pList->nByte==nByte ) break; } if( pList ){ sqlite3_free(pNew); }else{ pNew->nByte = nByte; pNew->pNext = pGroup->pBlockList; pGroup->pBlockList = pNew; pList = pNew; } } }while( pList==0 ); pBlock = pList->pFirst; if( pBlock==0 || pBlock->mUsed==(((Bitmask)1<<pBlock->nEntry)-1) ){ int sz; /* Allocate a new block. Try to allocate enough space for the PGroupBlock ** structure and MINENTRY allocations of nByte bytes each. If the ** allocator returns more memory than requested, then more than MINENTRY ** allocations may fit in it. */ pcache1LeaveMutex(pCache->pGroup); sz = sizeof(PGroupBlock) + PAGECACHE_BLOCKALLOC_MINENTRY * nByte; pBlock = (PGroupBlock *)sqlite3Malloc(sz); pcache1EnterMutex(pCache->pGroup); if( !pBlock ){ freeListIfEmpty(pGroup, pList); return 0; } pBlock->nEntry = (sqlite3MallocSize(pBlock) - sizeof(PGroupBlock)) / nByte; if( pBlock->nEntry>=BMS ){ pBlock->nEntry = BMS-1; } pBlock->pList = pList; pBlock->mUsed = 0; pBlock->aData = (u8 *)&pBlock[1]; addBlockToList(pList, pBlock); sz = sqlite3MallocSize(pBlock); sqlite3_mutex_enter(pcache1.mutex); sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_OVERFLOW, sz); sqlite3_mutex_leave(pcache1.mutex); } for(i=0; pPg==0 && ALWAYS(i<pBlock->nEntry); i++){ if( 0==(pBlock->mUsed & ((Bitmask)1<<i)) ){ pBlock->mUsed |= ((Bitmask)1<<i); pPg = (void *)&pBlock->aData[pList->nByte * i]; } } assert( pPg ); PAGE_SET_BLOCKPTR(pCache, pPg, pBlock); /* If the block is now full, shift it to the end of the list */ if( pBlock->mUsed==(((Bitmask)1<<pBlock->nEntry)-1) && pList->pLast!=pBlock ){ assert( pList->pFirst==pBlock ); assert( pBlock->pPrev==0 ); assert( pList->pLast->pNext==0 ); pList->pFirst = pBlock->pNext; pList->pFirst->pPrev = 0; pBlock->pPrev = pList->pLast; pBlock->pNext = 0; pList->pLast->pNext = pBlock; pList->pLast = pBlock; } #else /* The group mutex must be released before pcache1Alloc() is called. This ** is because it may call sqlite3_release_memory(), which assumes that ** this mutex is not held. */ assert( sqlite3_mutex_held(pCache->pGroup->mutex) ); pcache1LeaveMutex(pCache->pGroup); pPg = pcache1Alloc(nByte); pcache1EnterMutex(pCache->pGroup); #endif if( pPg ){ p = PAGE_TO_PGHDR1(pCache, pPg); if( pCache->bPurgeable ){ pCache->pGroup->nCurrentPage++; } }else{ p = 0; |
︙ | ︙ | |||
307 308 309 310 311 312 313 314 315 316 | ** The pointer is allowed to be NULL, which is prudent. But it turns out ** that the current implementation happens to never call this routine ** with a NULL pointer, so we mark the NULL test with ALWAYS(). */ static void pcache1FreePage(PgHdr1 *p){ if( ALWAYS(p) ){ PCache1 *pCache = p->pCache; if( pCache->bPurgeable ){ pCache->pGroup->nCurrentPage--; } | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > < | 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 | ** The pointer is allowed to be NULL, which is prudent. But it turns out ** that the current implementation happens to never call this routine ** with a NULL pointer, so we mark the NULL test with ALWAYS(). */ static void pcache1FreePage(PgHdr1 *p){ if( ALWAYS(p) ){ PCache1 *pCache = p->pCache; void *pPg = PGHDR1_TO_PAGE(p); #ifdef SQLITE_PAGECACHE_BLOCKALLOC PGroupBlock *pBlock = PAGE_GET_BLOCKPTR(pCache, pPg); PGroupBlockList *pList = pBlock->pList; int i = ((u8 *)pPg - pBlock->aData) / pList->nByte; assert( pPg==(void *)&pBlock->aData[i*pList->nByte] ); assert( pBlock->mUsed & ((Bitmask)1<<i) ); pBlock->mUsed &= ~((Bitmask)1<<i); /* Remove the block from the list. If it is completely empty, free it. ** Or if it is not completely empty, re-insert it at the start of the ** list. */ if( pList->pFirst==pBlock ){ pList->pFirst = pBlock->pNext; if( pList->pFirst ) pList->pFirst->pPrev = 0; }else{ pBlock->pPrev->pNext = pBlock->pNext; } if( pList->pLast==pBlock ){ pList->pLast = pBlock->pPrev; if( pList->pLast ) pList->pLast->pNext = 0; }else{ pBlock->pNext->pPrev = pBlock->pPrev; } if( pBlock->mUsed==0 ){ PGroup *pGroup = p->pCache->pGroup; int sz = sqlite3MallocSize(pBlock); sqlite3_mutex_enter(pcache1.mutex); sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_OVERFLOW, -sz); sqlite3_mutex_leave(pcache1.mutex); freeListIfEmpty(pGroup, pList); sqlite3_free(pBlock); }else{ addBlockToList(pList, pBlock); } #else assert( sqlite3_mutex_held(p->pCache->pGroup->mutex) ); pcache1Free(pPg); #endif if( pCache->bPurgeable ){ pCache->pGroup->nCurrentPage--; } } } /* ** Malloc function used by SQLite to obtain space from the buffer configured ** using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no such buffer ** exists, this function falls back to sqlite3Malloc(). |
︙ | ︙ | |||
748 749 750 751 752 753 754 | } /* Step 5. If a usable page buffer has still not been found, ** attempt to allocate a new one. */ if( !pPage ){ if( createFlag==1 ) sqlite3BeginBenignMalloc(); | < < | 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 | } /* Step 5. If a usable page buffer has still not been found, ** attempt to allocate a new one. */ if( !pPage ){ if( createFlag==1 ) sqlite3BeginBenignMalloc(); pPage = pcache1AllocPage(pCache); if( createFlag==1 ) sqlite3EndBenignMalloc(); } if( pPage ){ unsigned int h = iKey % pCache->nHash; pCache->nPage++; pPage->iKey = iKey; |
︙ | ︙ |
Changes to src/tclsqlite.c.
︙ | ︙ | |||
2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 | } dbEvalInit(&sEval, pDb, objv[2], 0); rc = dbEvalStep(&sEval); if( choice==DB_ONECOLUMN ){ if( rc==TCL_OK ){ Tcl_SetObjResult(interp, dbEvalColumnValue(&sEval, 0)); } }else if( rc==TCL_BREAK || rc==TCL_OK ){ Tcl_SetObjResult(interp, Tcl_NewBooleanObj(rc==TCL_OK)); } dbEvalFinalize(&sEval); if( rc==TCL_BREAK ){ | > > | 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 | } dbEvalInit(&sEval, pDb, objv[2], 0); rc = dbEvalStep(&sEval); if( choice==DB_ONECOLUMN ){ if( rc==TCL_OK ){ Tcl_SetObjResult(interp, dbEvalColumnValue(&sEval, 0)); }else if( rc==TCL_BREAK ){ Tcl_ResetResult(interp); } }else if( rc==TCL_BREAK || rc==TCL_OK ){ Tcl_SetObjResult(interp, Tcl_NewBooleanObj(rc==TCL_OK)); } dbEvalFinalize(&sEval); if( rc==TCL_BREAK ){ |
︙ | ︙ |
Changes to src/test1.c.
︙ | ︙ | |||
5082 5083 5084 5085 5086 5087 5088 | } return TCL_OK; } /* ** tclcmd: file_control_sizehint_test DB DBNAME SIZE ** | | < | | 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 | } return TCL_OK; } /* ** tclcmd: file_control_sizehint_test DB DBNAME SIZE ** ** This TCL command runs the sqlite3_file_control interface ** with SQLITE_FCNTL_SIZE_HINT */ static int file_control_sizehint_test( ClientData clientData, /* Pointer to sqlite3_enable_XXX function */ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ int objc, /* Number of arguments */ Tcl_Obj *CONST objv[] /* Command arguments */ ){ |
︙ | ︙ | |||
5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 | #if SQLITE_OS_WIN /* ** Information passed from the main thread into the windows file locker ** background thread. */ struct win32FileLocker { HANDLE h; /* Handle of the file to be locked */ int delay1; /* Delay before locking */ int delay2; /* Delay before unlocking */ int ok; /* Finished ok */ int err; /* True if an error occurs */ }; #endif #if SQLITE_OS_WIN /* ** The background thread that does file locking. */ static void win32_file_locker(void *pAppData){ struct win32FileLocker *p = (struct win32FileLocker*)pAppData; if( p->delay1 ) Sleep(p->delay1); if( LockFile(p->h, 0, 0, 100000000, 0) ){ Sleep(p->delay2); UnlockFile(p->h, 0, 0, 100000000, 0); p->ok = 1; }else{ p->err = 1; | > > > > > > > > | 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 | #if SQLITE_OS_WIN /* ** Information passed from the main thread into the windows file locker ** background thread. */ struct win32FileLocker { char *evName; /* Name of event to signal thread startup */ HANDLE h; /* Handle of the file to be locked */ int delay1; /* Delay before locking */ int delay2; /* Delay before unlocking */ int ok; /* Finished ok */ int err; /* True if an error occurs */ }; #endif #if SQLITE_OS_WIN /* ** The background thread that does file locking. */ static void win32_file_locker(void *pAppData){ struct win32FileLocker *p = (struct win32FileLocker*)pAppData; if( p->evName ){ HANDLE ev = OpenEvent(EVENT_MODIFY_STATE, FALSE, p->evName); if ( ev ){ SetEvent(ev); CloseHandle(ev); } } if( p->delay1 ) Sleep(p->delay1); if( LockFile(p->h, 0, 0, 100000000, 0) ){ Sleep(p->delay2); UnlockFile(p->h, 0, 0, 100000000, 0); p->ok = 1; }else{ p->err = 1; |
︙ | ︙ | |||
5870 5871 5872 5873 5874 5875 5876 | */ static int win32_file_lock( void * clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ | | > > > < | 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 | */ static int win32_file_lock( void * clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ static struct win32FileLocker x = { "win32_file_lock", 0, 0, 0, 0, 0 }; const char *zFilename; char zBuf[200]; int retry = 0; HANDLE ev; DWORD wResult; if( objc!=4 && objc!=1 ){ Tcl_WrongNumArgs(interp, 1, objv, "FILENAME DELAY1 DELAY2"); return TCL_ERROR; } if( objc==1 ){ sqlite3_snprintf(sizeof(zBuf), zBuf, "%d %d %d %d %d", x.ok, x.err, x.delay1, x.delay2, x.h); Tcl_AppendResult(interp, zBuf, (char*)0); return TCL_OK; } while( x.h && retry<30 ){ retry++; |
︙ | ︙ | |||
5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 | zFilename = Tcl_GetString(objv[1]); x.h = CreateFile(zFilename, GENERIC_READ|GENERIC_WRITE, FILE_SHARE_READ|FILE_SHARE_WRITE, 0, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, 0); if( !x.h ){ Tcl_AppendResult(interp, "cannot open file: ", zFilename, (char*)0); return TCL_ERROR; } _beginthread(win32_file_locker, 0, (void*)&x); Sleep(0); return TCL_OK; } #endif /* ** optimization_control DB OPT BOOLEAN | > > > > > > > > > > > > | 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 | zFilename = Tcl_GetString(objv[1]); x.h = CreateFile(zFilename, GENERIC_READ|GENERIC_WRITE, FILE_SHARE_READ|FILE_SHARE_WRITE, 0, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, 0); if( !x.h ){ Tcl_AppendResult(interp, "cannot open file: ", zFilename, (char*)0); return TCL_ERROR; } ev = CreateEvent(NULL, TRUE, FALSE, x.evName); if ( !ev ){ Tcl_AppendResult(interp, "cannot create event: ", x.evName, (char*)0); return TCL_ERROR; } _beginthread(win32_file_locker, 0, (void*)&x); Sleep(0); if ( (wResult = WaitForSingleObject(ev, 10000))!=WAIT_OBJECT_0 ){ sqlite3_snprintf(sizeof(zBuf), zBuf, "0x%x", wResult); Tcl_AppendResult(interp, "wait failed: ", zBuf, (char*)0); CloseHandle(ev); return TCL_ERROR; } CloseHandle(ev); return TCL_OK; } #endif /* ** optimization_control DB OPT BOOLEAN |
︙ | ︙ |
Changes to src/test6.c.
︙ | ︙ | |||
501 502 503 504 505 506 507 508 509 510 511 512 513 514 | static int cfUnlock(sqlite3_file *pFile, int eLock){ return sqlite3OsUnlock(((CrashFile *)pFile)->pRealFile, eLock); } static int cfCheckReservedLock(sqlite3_file *pFile, int *pResOut){ return sqlite3OsCheckReservedLock(((CrashFile *)pFile)->pRealFile, pResOut); } static int cfFileControl(sqlite3_file *pFile, int op, void *pArg){ return sqlite3OsFileControl(((CrashFile *)pFile)->pRealFile, op, pArg); } /* ** The xSectorSize() and xDeviceCharacteristics() functions return ** the global values configured by the [sqlite_crashparams] tcl * interface. | > > > > > > > | 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 | static int cfUnlock(sqlite3_file *pFile, int eLock){ return sqlite3OsUnlock(((CrashFile *)pFile)->pRealFile, eLock); } static int cfCheckReservedLock(sqlite3_file *pFile, int *pResOut){ return sqlite3OsCheckReservedLock(((CrashFile *)pFile)->pRealFile, pResOut); } static int cfFileControl(sqlite3_file *pFile, int op, void *pArg){ if( op==SQLITE_FCNTL_SIZE_HINT ){ CrashFile *pCrash = (CrashFile *)pFile; i64 nByte = *(i64 *)pArg; if( nByte>pCrash->iSize ){ return cfWrite(pFile, "", 1, nByte-1); } } return sqlite3OsFileControl(((CrashFile *)pFile)->pRealFile, op, pArg); } /* ** The xSectorSize() and xDeviceCharacteristics() functions return ** the global values configured by the [sqlite_crashparams] tcl * interface. |
︙ | ︙ |
Changes to src/test_config.c.
︙ | ︙ | |||
358 359 360 361 362 363 364 365 366 367 368 369 370 371 | #endif #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT Tcl_SetVar2(interp, "sqlite_options", "memorymanage", "1", TCL_GLOBAL_ONLY); #else Tcl_SetVar2(interp, "sqlite_options", "memorymanage", "0", TCL_GLOBAL_ONLY); #endif #ifdef SQLITE_OMIT_OR_OPTIMIZATION Tcl_SetVar2(interp, "sqlite_options", "or_opt", "0", TCL_GLOBAL_ONLY); #else Tcl_SetVar2(interp, "sqlite_options", "or_opt", "1", TCL_GLOBAL_ONLY); #endif | > > > > > > | 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 | #endif #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT Tcl_SetVar2(interp, "sqlite_options", "memorymanage", "1", TCL_GLOBAL_ONLY); #else Tcl_SetVar2(interp, "sqlite_options", "memorymanage", "0", TCL_GLOBAL_ONLY); #endif #ifdef SQLITE_OMIT_MERGE_SORT Tcl_SetVar2(interp, "sqlite_options", "mergesort", "0", TCL_GLOBAL_ONLY); #else Tcl_SetVar2(interp, "sqlite_options", "mergesort", "1", TCL_GLOBAL_ONLY); #endif #ifdef SQLITE_OMIT_OR_OPTIMIZATION Tcl_SetVar2(interp, "sqlite_options", "or_opt", "0", TCL_GLOBAL_ONLY); #else Tcl_SetVar2(interp, "sqlite_options", "or_opt", "1", TCL_GLOBAL_ONLY); #endif |
︙ | ︙ | |||
554 555 556 557 558 559 560 561 562 563 564 565 566 567 | #endif #ifdef YYTRACKMAXSTACKDEPTH Tcl_SetVar2(interp, "sqlite_options", "yytrackmaxstackdepth", "1", TCL_GLOBAL_ONLY); #else Tcl_SetVar2(interp, "sqlite_options", "yytrackmaxstackdepth", "0", TCL_GLOBAL_ONLY); #endif #ifdef __APPLE__ # if defined(__ppc__) Tcl_SetVar2(interp, "os_options", "arch", "ppc", TCL_GLOBAL_ONLY); # elif defined(__i386__) Tcl_SetVar2(interp, "os_options", "arch", "i386", TCL_GLOBAL_ONLY); # elif defined(__x86_64__) | > > > > > > | 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 | #endif #ifdef YYTRACKMAXSTACKDEPTH Tcl_SetVar2(interp, "sqlite_options", "yytrackmaxstackdepth", "1", TCL_GLOBAL_ONLY); #else Tcl_SetVar2(interp, "sqlite_options", "yytrackmaxstackdepth", "0", TCL_GLOBAL_ONLY); #endif #ifdef SQLITE_PAGECACHE_BLOCKALLOC Tcl_SetVar2(interp, "sqlite_options", "blockalloc", "1", TCL_GLOBAL_ONLY); #else Tcl_SetVar2(interp, "sqlite_options", "blockalloc", "0", TCL_GLOBAL_ONLY); #endif #ifdef __APPLE__ # if defined(__ppc__) Tcl_SetVar2(interp, "os_options", "arch", "ppc", TCL_GLOBAL_ONLY); # elif defined(__i386__) Tcl_SetVar2(interp, "os_options", "arch", "i386", TCL_GLOBAL_ONLY); # elif defined(__x86_64__) |
︙ | ︙ |
Changes to src/test_multiplex.c.
︙ | ︙ | |||
35 36 37 38 39 40 41 | ** If the makeDefault parameter is TRUE then multiplex becomes the new ** default VFS. Otherwise, you can use the multiplex VFS by specifying ** "multiplex" as the 4th parameter to sqlite3_open_v2() or by employing ** URI filenames and adding "vfs=multiplex" as a parameter to the filename ** URI. ** ** The multiplex VFS allows databases up to 32 GiB in size. But it splits | | | > > > > > | 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 | ** If the makeDefault parameter is TRUE then multiplex becomes the new ** default VFS. Otherwise, you can use the multiplex VFS by specifying ** "multiplex" as the 4th parameter to sqlite3_open_v2() or by employing ** URI filenames and adding "vfs=multiplex" as a parameter to the filename ** URI. ** ** The multiplex VFS allows databases up to 32 GiB in size. But it splits ** the files up into smaller pieces, so that they will work even on ** filesystems that do not support large files. The default chunk size ** is 2147418112 bytes (which is 64KiB less than 2GiB) but this can be ** changed at compile-time by defining the SQLITE_MULTIPLEX_CHUNK_SIZE ** macro. Use the "chunksize=NNNN" query parameter with a URI filename ** in order to select an alternative chunk size for individual connections ** at run-time. */ #include "sqlite3.h" #include <string.h> #include <assert.h> #include <stdlib.h> #include "test_multiplex.h" |
︙ | ︙ |
Changes to src/test_syscall.c.
︙ | ︙ | |||
321 322 323 324 325 326 327 328 329 330 331 332 333 334 | } /* ** A wrapper around write(). */ static int ts_write(int fd, const void *aBuf, size_t nBuf){ if( tsIsFailErrno("write") ){ return -1; } return orig_write(fd, aBuf, nBuf); } /* ** A wrapper around pwrite(). | > | 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 | } /* ** A wrapper around write(). */ static int ts_write(int fd, const void *aBuf, size_t nBuf){ if( tsIsFailErrno("write") ){ if( tsErrno("write")==EINTR ) orig_write(fd, aBuf, nBuf/2); return -1; } return orig_write(fd, aBuf, nBuf); } /* ** A wrapper around pwrite(). |
︙ | ︙ | |||
667 668 669 670 671 672 673 | return TCL_OK; } #else int SqlitetestSyscall_Init(Tcl_Interp *interp){ return TCL_OK; } #endif | < | 668 669 670 671 672 673 674 | return TCL_OK; } #else int SqlitetestSyscall_Init(Tcl_Interp *interp){ return TCL_OK; } #endif |
Changes to src/test_vfs.c.
︙ | ︙ | |||
119 120 121 122 123 124 125 126 127 128 129 130 131 132 | #define TESTVFS_SYNC_MASK 0x00000200 #define TESTVFS_DELETE_MASK 0x00000400 #define TESTVFS_CLOSE_MASK 0x00000800 #define TESTVFS_WRITE_MASK 0x00001000 #define TESTVFS_TRUNCATE_MASK 0x00002000 #define TESTVFS_ACCESS_MASK 0x00004000 #define TESTVFS_FULLPATHNAME_MASK 0x00008000 #define TESTVFS_ALL_MASK 0x0001FFFF #define TESTVFS_MAX_PAGES 1024 /* ** A shared-memory buffer. There is one of these objects for each shared | > > | 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 | #define TESTVFS_SYNC_MASK 0x00000200 #define TESTVFS_DELETE_MASK 0x00000400 #define TESTVFS_CLOSE_MASK 0x00000800 #define TESTVFS_WRITE_MASK 0x00001000 #define TESTVFS_TRUNCATE_MASK 0x00002000 #define TESTVFS_ACCESS_MASK 0x00004000 #define TESTVFS_FULLPATHNAME_MASK 0x00008000 #define TESTVFS_READ_MASK 0x00010000 #define TESTVFS_ALL_MASK 0x0001FFFF #define TESTVFS_MAX_PAGES 1024 /* ** A shared-memory buffer. There is one of these objects for each shared |
︙ | ︙ | |||
321 322 323 324 325 326 327 | */ static int tvfsRead( sqlite3_file *pFile, void *zBuf, int iAmt, sqlite_int64 iOfst ){ | > | > > > > > > > > > > > | > > | 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 | */ static int tvfsRead( sqlite3_file *pFile, void *zBuf, int iAmt, sqlite_int64 iOfst ){ int rc = SQLITE_OK; TestvfsFd *pFd = tvfsGetFd(pFile); Testvfs *p = (Testvfs *)pFd->pVfs->pAppData; if( p->pScript && p->mask&TESTVFS_READ_MASK ){ tvfsExecTcl(p, "xRead", Tcl_NewStringObj(pFd->zFilename, -1), pFd->pShmId, 0 ); tvfsResultCode(p, &rc); } if( rc==SQLITE_OK && p->mask&TESTVFS_READ_MASK && tvfsInjectIoerr(p) ){ rc = SQLITE_IOERR; } if( rc==SQLITE_OK ){ rc = sqlite3OsRead(pFd->pReal, zBuf, iAmt, iOfst); } return rc; } /* ** Write data to an tvfs-file. */ static int tvfsWrite( sqlite3_file *pFile, |
︙ | ︙ | |||
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 | { "xShmLock", TESTVFS_SHMLOCK_MASK }, { "xShmBarrier", TESTVFS_SHMBARRIER_MASK }, { "xShmUnmap", TESTVFS_SHMCLOSE_MASK }, { "xShmMap", TESTVFS_SHMMAP_MASK }, { "xSync", TESTVFS_SYNC_MASK }, { "xDelete", TESTVFS_DELETE_MASK }, { "xWrite", TESTVFS_WRITE_MASK }, { "xTruncate", TESTVFS_TRUNCATE_MASK }, { "xOpen", TESTVFS_OPEN_MASK }, { "xClose", TESTVFS_CLOSE_MASK }, { "xAccess", TESTVFS_ACCESS_MASK }, { "xFullPathname", TESTVFS_FULLPATHNAME_MASK }, }; Tcl_Obj **apElem = 0; | > | 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 | { "xShmLock", TESTVFS_SHMLOCK_MASK }, { "xShmBarrier", TESTVFS_SHMBARRIER_MASK }, { "xShmUnmap", TESTVFS_SHMCLOSE_MASK }, { "xShmMap", TESTVFS_SHMMAP_MASK }, { "xSync", TESTVFS_SYNC_MASK }, { "xDelete", TESTVFS_DELETE_MASK }, { "xWrite", TESTVFS_WRITE_MASK }, { "xRead", TESTVFS_READ_MASK }, { "xTruncate", TESTVFS_TRUNCATE_MASK }, { "xOpen", TESTVFS_OPEN_MASK }, { "xClose", TESTVFS_CLOSE_MASK }, { "xAccess", TESTVFS_ACCESS_MASK }, { "xFullPathname", TESTVFS_FULLPATHNAME_MASK }, }; Tcl_Obj **apElem = 0; |
︙ | ︙ |
Changes to src/vdbe.c.
︙ | ︙ | |||
153 154 155 156 157 158 159 160 161 162 163 164 165 166 | /* ** Call sqlite3VdbeMemExpandBlob() on the supplied value (type Mem*) ** P if required. */ #define ExpandBlob(P) (((P)->flags&MEM_Zero)?sqlite3VdbeMemExpandBlob(P):0) /* ** Argument pMem points at a register that will be passed to a ** user-defined function or returned to the user as the result of a query. ** This routine sets the pMem->type variable used by the sqlite3_value_*() ** routines. */ void sqlite3VdbeMemStoreType(Mem *pMem){ | > > > > > > > | 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 | /* ** Call sqlite3VdbeMemExpandBlob() on the supplied value (type Mem*) ** P if required. */ #define ExpandBlob(P) (((P)->flags&MEM_Zero)?sqlite3VdbeMemExpandBlob(P):0) /* Return true if the cursor was opened using the OP_OpenSorter opcode. */ #ifdef SQLITE_OMIT_MERGE_SORT # define isSorter(x) 0 #else # define isSorter(x) ((x)->pSorter!=0) #endif /* ** Argument pMem points at a register that will be passed to a ** user-defined function or returned to the user as the result of a query. ** This routine sets the pMem->type variable used by the sqlite3_value_*() ** routines. */ void sqlite3VdbeMemStoreType(Mem *pMem){ |
︙ | ︙ | |||
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 | assert( pOut<=&aMem[p->nMem] ); assert( pIn1<=&aMem[p->nMem] ); assert( memIsValid(pIn1) ); memAboutToChange(p, pOut); zMalloc = pOut->zMalloc; pOut->zMalloc = 0; sqlite3VdbeMemMove(pOut, pIn1); pIn1->zMalloc = zMalloc; REGISTER_TRACE(p2++, pOut); pIn1++; pOut++; } break; } | > > > > > | 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 | assert( pOut<=&aMem[p->nMem] ); assert( pIn1<=&aMem[p->nMem] ); assert( memIsValid(pIn1) ); memAboutToChange(p, pOut); zMalloc = pOut->zMalloc; pOut->zMalloc = 0; sqlite3VdbeMemMove(pOut, pIn1); #ifdef SQLITE_DEBUG if( pOut->pScopyFrom>=&aMem[p1] && pOut->pScopyFrom<&aMem[p1+pOp->p3] ){ pOut->pScopyFrom += p1 - pOp->p2; } #endif pIn1->zMalloc = zMalloc; REGISTER_TRACE(p2++, pOut); pIn1++; pOut++; } break; } |
︙ | ︙ | |||
3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 | /* Opcode: OpenAutoindex P1 P2 * P4 * ** ** This opcode works the same as OP_OpenEphemeral. It has a ** different name to distinguish its use. Tables created using ** by this opcode will be used for automatically created transient ** indices in joins. */ case OP_OpenAutoindex: case OP_OpenEphemeral: { VdbeCursor *pCx; static const int vfsFlags = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | | > | 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 | /* Opcode: OpenAutoindex P1 P2 * P4 * ** ** This opcode works the same as OP_OpenEphemeral. It has a ** different name to distinguish its use. Tables created using ** by this opcode will be used for automatically created transient ** indices in joins. */ case OP_OpenSorter: case OP_OpenAutoindex: case OP_OpenEphemeral: { VdbeCursor *pCx; static const int vfsFlags = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | |
︙ | ︙ | |||
3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 | }else{ rc = sqlite3BtreeCursor(pCx->pBt, MASTER_ROOT, 1, 0, pCx->pCursor); pCx->isTable = 1; } } pCx->isOrdered = (pOp->p5!=BTREE_UNORDERED); pCx->isIndex = !pCx->isTable; break; } /* Opcode: OpenPseudo P1 P2 P3 * * ** ** Open a new cursor that points to a fake table that contains a single ** row of data. The content of that one row in the content of memory | > > > > > | 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 | }else{ rc = sqlite3BtreeCursor(pCx->pBt, MASTER_ROOT, 1, 0, pCx->pCursor); pCx->isTable = 1; } } pCx->isOrdered = (pOp->p5!=BTREE_UNORDERED); pCx->isIndex = !pCx->isTable; #ifndef SQLITE_OMIT_MERGE_SORT if( rc==SQLITE_OK && pOp->opcode==OP_OpenSorter ){ rc = sqlite3VdbeSorterInit(db, pCx); } #endif break; } /* Opcode: OpenPseudo P1 P2 P3 * * ** ** Open a new cursor that points to a fake table that contains a single ** row of data. The content of that one row in the content of memory |
︙ | ︙ | |||
4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 | assert( pOp->p1>=0 && pOp->p1<p->nCursor ); pC = p->apCsr[pOp->p1]; assert( pC->isTable || pOp->opcode==OP_RowKey ); assert( pC->isIndex || pOp->opcode==OP_RowData ); assert( pC!=0 ); assert( pC->nullRow==0 ); assert( pC->pseudoTableReg==0 ); assert( pC->pCursor!=0 ); pCrsr = pC->pCursor; assert( sqlite3BtreeCursorIsValid(pCrsr) ); /* The OP_RowKey and OP_RowData opcodes always follow OP_NotExists or ** OP_Rewind/Op_Next with no intervening instructions that might invalidate ** the cursor. Hence the following sqlite3VdbeCursorMoveto() call is always | > > > > > > > | 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 | assert( pOp->p1>=0 && pOp->p1<p->nCursor ); pC = p->apCsr[pOp->p1]; assert( pC->isTable || pOp->opcode==OP_RowKey ); assert( pC->isIndex || pOp->opcode==OP_RowData ); assert( pC!=0 ); assert( pC->nullRow==0 ); assert( pC->pseudoTableReg==0 ); if( isSorter(pC) ){ assert( pOp->opcode==OP_RowKey ); rc = sqlite3VdbeSorterRowkey(pC, pOut); break; } assert( pC->pCursor!=0 ); pCrsr = pC->pCursor; assert( sqlite3BtreeCursorIsValid(pCrsr) ); /* The OP_RowKey and OP_RowData opcodes always follow OP_NotExists or ** OP_Rewind/Op_Next with no intervening instructions that might invalidate ** the cursor. Hence the following sqlite3VdbeCursorMoveto() call is always |
︙ | ︙ | |||
4253 4254 4255 4256 4257 4258 4259 | BtCursor *pCrsr; int res; assert( pOp->p1>=0 && pOp->p1<p->nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); res = 1; | > > | | 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 | BtCursor *pCrsr; int res; assert( pOp->p1>=0 && pOp->p1<p->nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); res = 1; if( isSorter(pC) ){ rc = sqlite3VdbeSorterRewind(db, pC, &res); }else if( (pCrsr = pC->pCursor)!=0 ){ rc = sqlite3BtreeFirst(pCrsr, &res); pC->atFirst = res==0 ?1:0; pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; pC->rowidIsValid = 0; } pC->nullRow = (u8)res; |
︙ | ︙ | |||
4307 4308 4309 4310 4311 4312 4313 | CHECK_FOR_INTERRUPT; assert( pOp->p1>=0 && pOp->p1<p->nCursor ); assert( pOp->p5<=ArraySize(p->aCounter) ); pC = p->apCsr[pOp->p1]; if( pC==0 ){ break; /* See ticket #2273 */ } | > > > > | | | | | | | | | > | 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 | CHECK_FOR_INTERRUPT; assert( pOp->p1>=0 && pOp->p1<p->nCursor ); assert( pOp->p5<=ArraySize(p->aCounter) ); pC = p->apCsr[pOp->p1]; if( pC==0 ){ break; /* See ticket #2273 */ } if( isSorter(pC) ){ assert( pOp->opcode==OP_Next ); rc = sqlite3VdbeSorterNext(db, pC, &res); }else{ pCrsr = pC->pCursor; if( pCrsr==0 ){ pC->nullRow = 1; break; } res = 1; assert( pC->deferredMoveto==0 ); rc = pOp->opcode==OP_Next ? sqlite3BtreeNext(pCrsr, &res) : sqlite3BtreePrevious(pCrsr, &res); } pC->nullRow = (u8)res; pC->cacheStatus = CACHE_STALE; if( res==0 ){ pc = pOp->p2 - 1; if( pOp->p5 ) p->aCounter[pOp->p5-1]++; #ifdef SQLITE_TEST sqlite3_search_count++; |
︙ | ︙ | |||
4359 4360 4361 4362 4363 4364 4365 | pCrsr = pC->pCursor; if( ALWAYS(pCrsr!=0) ){ assert( pC->isTable==0 ); rc = ExpandBlob(pIn2); if( rc==SQLITE_OK ){ nKey = pIn2->n; zKey = pIn2->z; | > > | | | | > | 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 | pCrsr = pC->pCursor; if( ALWAYS(pCrsr!=0) ){ assert( pC->isTable==0 ); rc = ExpandBlob(pIn2); if( rc==SQLITE_OK ){ nKey = pIn2->n; zKey = pIn2->z; rc = sqlite3VdbeSorterWrite(db, pC, nKey); if( rc==SQLITE_OK ){ rc = sqlite3BtreeInsert(pCrsr, zKey, nKey, "", 0, 0, pOp->p3, ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0) ); assert( pC->deferredMoveto==0 ); } pC->cacheStatus = CACHE_STALE; } } break; } /* Opcode: IdxDelete P1 P2 P3 * * |
︙ | ︙ |
Changes to src/vdbeInt.h.
︙ | ︙ | |||
26 27 28 29 30 31 32 33 34 35 36 37 38 39 | typedef struct VdbeOp Op; /* ** Boolean values */ typedef unsigned char Bool; /* ** A cursor is a pointer into a single BTree within a database file. ** The cursor can seek to a BTree entry with a particular key, or ** loop over all entries of the Btree. You can also insert new BTree ** entries or retrieve the key or data from the entry that the cursor ** is currently pointing to. ** | > > > | 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 | typedef struct VdbeOp Op; /* ** Boolean values */ typedef unsigned char Bool; /* Opaque type used by code in vdbesort.c */ typedef struct VdbeSorter VdbeSorter; /* ** A cursor is a pointer into a single BTree within a database file. ** The cursor can seek to a BTree entry with a particular key, or ** loop over all entries of the Btree. You can also insert new BTree ** entries or retrieve the key or data from the entry that the cursor ** is currently pointing to. ** |
︙ | ︙ | |||
57 58 59 60 61 62 63 64 65 66 67 68 69 70 | Bool isIndex; /* True if an index containing keys only - no data */ Bool isOrdered; /* True if the underlying table is BTREE_UNORDERED */ sqlite3_vtab_cursor *pVtabCursor; /* The cursor for a virtual table */ const sqlite3_module *pModule; /* Module for cursor pVtabCursor */ i64 seqCount; /* Sequence counter */ i64 movetoTarget; /* Argument to the deferred sqlite3BtreeMoveto() */ i64 lastRowid; /* Last rowid from a Next or NextIdx operation */ /* Result of last sqlite3BtreeMoveto() done by an OP_NotExists or ** OP_IsUnique opcode on this cursor. */ int seekResult; /* Cached information about the header for the data record that the ** cursor is currently pointing to. Only valid if cacheStatus matches | > | 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 | Bool isIndex; /* True if an index containing keys only - no data */ Bool isOrdered; /* True if the underlying table is BTREE_UNORDERED */ sqlite3_vtab_cursor *pVtabCursor; /* The cursor for a virtual table */ const sqlite3_module *pModule; /* Module for cursor pVtabCursor */ i64 seqCount; /* Sequence counter */ i64 movetoTarget; /* Argument to the deferred sqlite3BtreeMoveto() */ i64 lastRowid; /* Last rowid from a Next or NextIdx operation */ VdbeSorter *pSorter; /* Sorter object for OP_OpenSorter cursors */ /* Result of last sqlite3BtreeMoveto() done by an OP_NotExists or ** OP_IsUnique opcode on this cursor. */ int seekResult; /* Cached information about the header for the data record that the ** cursor is currently pointing to. Only valid if cacheStatus matches |
︙ | ︙ | |||
383 384 385 386 387 388 389 390 391 392 393 394 395 396 | int sqlite3VdbeMemFinalize(Mem*, FuncDef*); const char *sqlite3OpcodeName(int); int sqlite3VdbeMemGrow(Mem *pMem, int n, int preserve); int sqlite3VdbeCloseStatement(Vdbe *, int); void sqlite3VdbeFrameDelete(VdbeFrame*); int sqlite3VdbeFrameRestore(VdbeFrame *); void sqlite3VdbeMemStoreType(Mem *pMem); #if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE>0 void sqlite3VdbeEnter(Vdbe*); void sqlite3VdbeLeave(Vdbe*); #else # define sqlite3VdbeEnter(X) # define sqlite3VdbeLeave(X) | > > > > > > > > > > > > > > > > | 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 | int sqlite3VdbeMemFinalize(Mem*, FuncDef*); const char *sqlite3OpcodeName(int); int sqlite3VdbeMemGrow(Mem *pMem, int n, int preserve); int sqlite3VdbeCloseStatement(Vdbe *, int); void sqlite3VdbeFrameDelete(VdbeFrame*); int sqlite3VdbeFrameRestore(VdbeFrame *); void sqlite3VdbeMemStoreType(Mem *pMem); #ifdef SQLITE_OMIT_MERGE_SORT # define sqlite3VdbeSorterInit(Y,Z) SQLITE_OK # define sqlite3VdbeSorterWrite(X,Y,Z) SQLITE_OK # define sqlite3VdbeSorterClose(Y,Z) # define sqlite3VdbeSorterRowkey(Y,Z) SQLITE_OK # define sqlite3VdbeSorterRewind(X,Y,Z) SQLITE_OK # define sqlite3VdbeSorterNext(X,Y,Z) SQLITE_OK #else int sqlite3VdbeSorterInit(sqlite3 *, VdbeCursor *); int sqlite3VdbeSorterWrite(sqlite3 *, VdbeCursor *, int); void sqlite3VdbeSorterClose(sqlite3 *, VdbeCursor *); int sqlite3VdbeSorterRowkey(VdbeCursor *, Mem *); int sqlite3VdbeSorterRewind(sqlite3 *, VdbeCursor *, int *); int sqlite3VdbeSorterNext(sqlite3 *, VdbeCursor *, int *); #endif #if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE>0 void sqlite3VdbeEnter(Vdbe*); void sqlite3VdbeLeave(Vdbe*); #else # define sqlite3VdbeEnter(X) # define sqlite3VdbeLeave(X) |
︙ | ︙ |
Changes to src/vdbeaux.c.
︙ | ︙ | |||
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 | ** Close a VDBE cursor and release all the resources that cursor ** happens to hold. */ void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){ if( pCx==0 ){ return; } if( pCx->pBt ){ sqlite3BtreeClose(pCx->pBt); /* The pCx->pCursor will be close automatically, if it exists, by ** the call above. */ }else if( pCx->pCursor ){ sqlite3BtreeCloseCursor(pCx->pCursor); } | > | 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 | ** Close a VDBE cursor and release all the resources that cursor ** happens to hold. */ void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){ if( pCx==0 ){ return; } sqlite3VdbeSorterClose(p->db, pCx); if( pCx->pBt ){ sqlite3BtreeClose(pCx->pBt); /* The pCx->pCursor will be close automatically, if it exists, by ** the call above. */ }else if( pCx->pCursor ){ sqlite3BtreeCloseCursor(pCx->pCursor); } |
︙ | ︙ |
Added src/vdbesort.c.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 | /* ** 2011 July 9 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains code for the VdbeSorter object, used in concert with ** a VdbeCursor to sort large numbers of keys (as may be required, for ** example, by CREATE INDEX statements on tables too large to fit in main ** memory). */ #include "sqliteInt.h" #include "vdbeInt.h" #ifndef SQLITE_OMIT_MERGE_SORT typedef struct VdbeSorterIter VdbeSorterIter; /* ** NOTES ON DATA STRUCTURE USED FOR N-WAY MERGES: ** ** As keys are added to the sorter, they are written to disk in a series ** of sorted packed-memory-arrays (PMAs). The size of each PMA is roughly ** the same as the cache-size allowed for temporary databases. In order ** to allow the caller to extract keys from the sorter in sorted order, ** all PMAs currently stored on disk must be merged together. This comment ** describes the data structure used to do so. The structure supports ** merging any number of arrays in a single pass with no redundant comparison ** operations. ** ** The aIter[] array contains an iterator for each of the PMAs being merged. ** An aIter[] iterator either points to a valid key or else is at EOF. For ** the purposes of the paragraphs below, we assume that the array is actually ** N elements in size, where N is the smallest power of 2 greater to or equal ** to the number of iterators being merged. The extra aIter[] elements are ** treated as if they are empty (always at EOF). ** ** The aTree[] array is also N elements in size. The value of N is stored in ** the VdbeSorter.nTree variable. ** ** The final (N/2) elements of aTree[] contain the results of comparing ** pairs of iterator keys together. Element i contains the result of ** comparing aIter[2*i-N] and aIter[2*i-N+1]. Whichever key is smaller, the ** aTree element is set to the index of it. ** ** For the purposes of this comparison, EOF is considered greater than any ** other key value. If the keys are equal (only possible with two EOF ** values), it doesn't matter which index is stored. ** ** The (N/4) elements of aTree[] that preceed the final (N/2) described ** above contains the index of the smallest of each block of 4 iterators. ** And so on. So that aTree[1] contains the index of the iterator that ** currently points to the smallest key value. aTree[0] is unused. ** ** Example: ** ** aIter[0] -> Banana ** aIter[1] -> Feijoa ** aIter[2] -> Elderberry ** aIter[3] -> Currant ** aIter[4] -> Grapefruit ** aIter[5] -> Apple ** aIter[6] -> Durian ** aIter[7] -> EOF ** ** aTree[] = { X, 5 0, 5 0, 3, 5, 6 } ** ** The current element is "Apple" (the value of the key indicated by ** iterator 5). When the Next() operation is invoked, iterator 5 will ** be advanced to the next key in its segment. Say the next key is ** "Eggplant": ** ** aIter[5] -> Eggplant ** ** The contents of aTree[] are updated first by comparing the new iterator ** 5 key to the current key of iterator 4 (still "Grapefruit"). The iterator ** 5 value is still smaller, so aTree[6] is set to 5. And so on up the tree. ** The value of iterator 6 - "Durian" - is now smaller than that of iterator ** 5, so aTree[3] is set to 6. Key 0 is smaller than key 6 (Banana<Durian), ** so the value written into element 1 of the array is 0. As follows: ** ** aTree[] = { X, 0 0, 6 0, 3, 5, 6 } ** ** In other words, each time we advance to the next sorter element, log2(N) ** key comparison operations are required, where N is the number of segments ** being merged (rounded up to the next power of 2). */ struct VdbeSorter { int nWorking; /* Start a new b-tree after this many pages */ int nBtree; /* Current size of b-tree contents as PMA */ int nTree; /* Used size of aTree/aIter (power of 2) */ VdbeSorterIter *aIter; /* Array of iterators to merge */ int *aTree; /* Current state of incremental merge */ i64 iWriteOff; /* Current write offset within file pTemp1 */ i64 iReadOff; /* Current read offset within file pTemp1 */ sqlite3_file *pTemp1; /* PMA file 1 */ int nPMA; /* Number of PMAs stored in pTemp1 */ }; /* ** The following type is an iterator for a PMA. It caches the current key in ** variables nKey/aKey. If the iterator is at EOF, pFile==0. */ struct VdbeSorterIter { i64 iReadOff; /* Current read offset */ i64 iEof; /* 1 byte past EOF for this iterator */ sqlite3_file *pFile; /* File iterator is reading from */ int nAlloc; /* Bytes of space at aAlloc */ u8 *aAlloc; /* Allocated space */ int nKey; /* Number of bytes in key */ u8 *aKey; /* Pointer to current key */ }; /* Minimum allowable value for the VdbeSorter.nWorking variable */ #define SORTER_MIN_WORKING 10 /* Maximum number of segments to merge in a single pass. */ #define SORTER_MAX_MERGE_COUNT 16 /* ** Free all memory belonging to the VdbeSorterIter object passed as the second ** argument. All structure fields are set to zero before returning. */ static void vdbeSorterIterZero(sqlite3 *db, VdbeSorterIter *pIter){ sqlite3DbFree(db, pIter->aAlloc); memset(pIter, 0, sizeof(VdbeSorterIter)); } /* ** Advance iterator pIter to the next key in its PMA. Return SQLITE_OK if ** no error occurs, or an SQLite error code if one does. */ static int vdbeSorterIterNext( sqlite3 *db, /* Database handle (for sqlite3DbMalloc() ) */ VdbeSorterIter *pIter /* Iterator to advance */ ){ int rc; /* Return Code */ int nRead; /* Number of bytes read */ int nRec; /* Size of record in bytes */ int iOff; /* Size of serialized size varint in bytes */ nRead = pIter->iEof - pIter->iReadOff; if( nRead>5 ) nRead = 5; if( nRead<=0 ){ /* This is an EOF condition */ vdbeSorterIterZero(db, pIter); return SQLITE_OK; } rc = sqlite3OsRead(pIter->pFile, pIter->aAlloc, nRead, pIter->iReadOff); iOff = getVarint32(pIter->aAlloc, nRec); if( rc==SQLITE_OK && (iOff+nRec)>nRead ){ int nRead2; /* Number of extra bytes to read */ if( (iOff+nRec)>pIter->nAlloc ){ int nNew = pIter->nAlloc*2; while( (iOff+nRec)>nNew ) nNew = nNew*2; pIter->aAlloc = sqlite3DbReallocOrFree(db, pIter->aAlloc, nNew); if( !pIter->aAlloc ) return SQLITE_NOMEM; pIter->nAlloc = nNew; } nRead2 = iOff + nRec - nRead; rc = sqlite3OsRead( pIter->pFile, &pIter->aAlloc[nRead], nRead2, pIter->iReadOff+nRead ); } assert( nRec>0 || rc!=SQLITE_OK ); pIter->iReadOff += iOff+nRec; pIter->nKey = nRec; pIter->aKey = &pIter->aAlloc[iOff]; return rc; } /* ** Write a single varint, value iVal, to file-descriptor pFile. Return ** SQLITE_OK if successful, or an SQLite error code if some error occurs. ** ** The value of *piOffset when this function is called is used as the byte ** offset in file pFile to write to. Before returning, *piOffset is ** incremented by the number of bytes written. */ static int vdbeSorterWriteVarint( sqlite3_file *pFile, /* File to write to */ i64 iVal, /* Value to write as a varint */ i64 *piOffset /* IN/OUT: Write offset in file pFile */ ){ u8 aVarint[9]; /* Buffer large enough for a varint */ int nVarint; /* Number of used bytes in varint */ int rc; /* Result of write() call */ nVarint = sqlite3PutVarint(aVarint, iVal); rc = sqlite3OsWrite(pFile, aVarint, nVarint, *piOffset); *piOffset += nVarint; return rc; } /* ** Read a single varint from file-descriptor pFile. Return SQLITE_OK if ** successful, or an SQLite error code if some error occurs. ** ** The value of *piOffset when this function is called is used as the ** byte offset in file pFile from whence to read the varint. If successful ** (i.e. if no IO error occurs), then *piOffset is set to the offset of ** the first byte past the end of the varint before returning. *piVal is ** set to the integer value read. If an error occurs, the final values of ** both *piOffset and *piVal are undefined. */ static int vdbeSorterReadVarint( sqlite3_file *pFile, /* File to read from */ i64 iEof, /* Total number of bytes in file */ i64 *piOffset, /* IN/OUT: Read offset in pFile */ i64 *piVal /* OUT: Value read from file */ ){ u8 aVarint[9]; /* Buffer large enough for a varint */ i64 iOff = *piOffset; /* Offset in file to read from */ int nRead = 9; /* Number of bytes to read from file */ int rc; /* Return code */ assert( iEof>iOff ); if( (iEof-iOff)<nRead ){ nRead = iEof-iOff; } rc = sqlite3OsRead(pFile, aVarint, nRead, iOff); if( rc==SQLITE_OK ){ *piOffset += getVarint(aVarint, (u64 *)piVal); } return rc; } /* ** Initialize iterator pIter to scan through the PMA stored in file pFile ** starting at offset iStart and ending at offset iEof-1. This function ** leaves the iterator pointing to the first key in the PMA (or EOF if the ** PMA is empty). */ static int vdbeSorterIterInit( sqlite3 *db, /* Database handle */ VdbeSorter *pSorter, /* Sorter object */ i64 iStart, /* Start offset in pFile */ VdbeSorterIter *pIter, /* Iterator to populate */ i64 *pnByte /* IN/OUT: Increment this value by PMA size */ ){ int rc; assert( pSorter->iWriteOff>iStart ); assert( pIter->aAlloc==0 ); pIter->pFile = pSorter->pTemp1; pIter->iReadOff = iStart; pIter->nAlloc = 128; pIter->aAlloc = (u8 *)sqlite3DbMallocRaw(db, pIter->nAlloc); if( !pIter->aAlloc ){ rc = SQLITE_NOMEM; }else{ i64 iEof = pSorter->iWriteOff; /* EOF of file pSorter->pTemp1 */ i64 nByte; /* Total size of PMA in bytes */ rc = vdbeSorterReadVarint(pSorter->pTemp1, iEof, &pIter->iReadOff, &nByte); *pnByte += nByte; pIter->iEof = pIter->iReadOff + nByte; } if( rc==SQLITE_OK ){ rc = vdbeSorterIterNext(db, pIter); } return rc; } /* ** This function is called to compare two iterator keys when merging ** multiple b-tree segments. Parameter iOut is the index of the aTree[] ** value to recalculate. */ static int vdbeSorterDoCompare(VdbeCursor *pCsr, int iOut){ VdbeSorter *pSorter = pCsr->pSorter; int i1; int i2; int iRes; VdbeSorterIter *p1; VdbeSorterIter *p2; assert( iOut<pSorter->nTree && iOut>0 ); if( iOut>=(pSorter->nTree/2) ){ i1 = (iOut - pSorter->nTree/2) * 2; i2 = i1 + 1; }else{ i1 = pSorter->aTree[iOut*2]; i2 = pSorter->aTree[iOut*2+1]; } p1 = &pSorter->aIter[i1]; p2 = &pSorter->aIter[i2]; if( p1->pFile==0 ){ iRes = i2; }else if( p2->pFile==0 ){ iRes = i1; }else{ char aSpace[150]; UnpackedRecord *r1; r1 = sqlite3VdbeRecordUnpack( pCsr->pKeyInfo, p1->nKey, p1->aKey, aSpace, sizeof(aSpace) ); if( r1==0 ) return SQLITE_NOMEM; if( sqlite3VdbeRecordCompare(p2->nKey, p2->aKey, r1)>=0 ){ iRes = i1; }else{ iRes = i2; } sqlite3VdbeDeleteUnpackedRecord(r1); } pSorter->aTree[iOut] = iRes; return SQLITE_OK; } /* ** Initialize the temporary index cursor just opened as a sorter cursor. */ int sqlite3VdbeSorterInit(sqlite3 *db, VdbeCursor *pCsr){ assert( pCsr->pKeyInfo && pCsr->pBt ); pCsr->pSorter = sqlite3DbMallocZero(db, sizeof(VdbeSorter)); return (pCsr->pSorter ? SQLITE_OK : SQLITE_NOMEM); } /* ** Free any cursor components allocated by sqlite3VdbeSorterXXX routines. */ void sqlite3VdbeSorterClose(sqlite3 *db, VdbeCursor *pCsr){ VdbeSorter *pSorter = pCsr->pSorter; if( pSorter ){ if( pSorter->aIter ){ int i; for(i=0; i<pSorter->nTree; i++){ vdbeSorterIterZero(db, &pSorter->aIter[i]); } sqlite3DbFree(db, pSorter->aIter); } if( pSorter->pTemp1 ){ sqlite3OsCloseFree(pSorter->pTemp1); } sqlite3DbFree(db, pSorter); pCsr->pSorter = 0; } } /* ** Allocate space for a file-handle and open a temporary file. If successful, ** set *ppFile to point to the malloc'd file-handle and return SQLITE_OK. ** Otherwise, set *ppFile to 0 and return an SQLite error code. */ static int vdbeSorterOpenTempFile(sqlite3 *db, sqlite3_file **ppFile){ int dummy; return sqlite3OsOpenMalloc(db->pVfs, 0, ppFile, SQLITE_OPEN_TEMP_JOURNAL | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE, &dummy ); } /* ** Write the current contents of the b-tree to a PMA. Return SQLITE_OK ** if successful, or an SQLite error code otherwise. ** ** The format of a PMA is: ** ** * A varint. This varint contains the total number of bytes of content ** in the PMA (not including the varint itself). ** ** * One or more records packed end-to-end in order of ascending keys. ** Each record consists of a varint followed by a blob of data (the ** key). The varint is the number of bytes in the blob of data. */ static int vdbeSorterBtreeToPMA(sqlite3 *db, VdbeCursor *pCsr){ int rc = SQLITE_OK; /* Return code */ VdbeSorter *pSorter = pCsr->pSorter; int res = 0; rc = sqlite3BtreeFirst(pCsr->pCursor, &res); if( rc!=SQLITE_OK || res ) return rc; assert( pSorter->nBtree>0 ); /* If the first temporary PMA file has not been opened, open it now. */ if( pSorter->pTemp1==0 ){ rc = vdbeSorterOpenTempFile(db, &pSorter->pTemp1); assert( rc!=SQLITE_OK || pSorter->pTemp1 ); assert( pSorter->iWriteOff==0 ); assert( pSorter->nPMA==0 ); } if( rc==SQLITE_OK ){ i64 iWriteOff = pSorter->iWriteOff; void *aMalloc = 0; /* Array used to hold a single record */ int nMalloc = 0; /* Allocated size of aMalloc[] in bytes */ pSorter->nPMA++; for( rc = vdbeSorterWriteVarint(pSorter->pTemp1, pSorter->nBtree, &iWriteOff); rc==SQLITE_OK && res==0; rc = sqlite3BtreeNext(pCsr->pCursor, &res) ){ i64 nKey; /* Size of this key in bytes */ /* Write the size of the record in bytes to the output file */ (void)sqlite3BtreeKeySize(pCsr->pCursor, &nKey); rc = vdbeSorterWriteVarint(pSorter->pTemp1, nKey, &iWriteOff); /* Make sure the aMalloc[] buffer is large enough for the record */ if( rc==SQLITE_OK && nKey>nMalloc ){ aMalloc = sqlite3DbReallocOrFree(db, aMalloc, nKey); if( !aMalloc ){ rc = SQLITE_NOMEM; }else{ nMalloc = nKey; } } /* Write the record itself to the output file */ if( rc==SQLITE_OK ){ rc = sqlite3BtreeKey(pCsr->pCursor, 0, nKey, aMalloc); if( rc==SQLITE_OK ){ rc = sqlite3OsWrite(pSorter->pTemp1, aMalloc, nKey, iWriteOff); iWriteOff += nKey; } } if( rc!=SQLITE_OK ) break; } /* This assert verifies that unless an error has occurred, the size of ** the PMA on disk is the same as the expected size stored in ** pSorter->nBtree. */ assert( rc!=SQLITE_OK || pSorter->nBtree==( iWriteOff-pSorter->iWriteOff-sqlite3VarintLen(pSorter->nBtree) )); pSorter->iWriteOff = iWriteOff; sqlite3DbFree(db, aMalloc); } pSorter->nBtree = 0; return rc; } /* ** This function is called on a sorter cursor by the VDBE before each row ** is inserted into VdbeCursor.pCsr. Argument nKey is the size of the key, in ** bytes, about to be inserted. ** ** If it is determined that the temporary b-tree accessed via VdbeCursor.pCsr ** is large enough, its contents are written to a sorted PMA on disk and the ** tree emptied. This prevents the b-tree (which must be small enough to ** fit entirely in the cache in order to support efficient inserts) from ** growing too large. ** ** An SQLite error code is returned if an error occurs. Otherwise, SQLITE_OK. */ int sqlite3VdbeSorterWrite(sqlite3 *db, VdbeCursor *pCsr, int nKey){ int rc = SQLITE_OK; /* Return code */ VdbeSorter *pSorter = pCsr->pSorter; if( pSorter ){ Pager *pPager = sqlite3BtreePager(pCsr->pBt); int nPage; /* Current size of temporary file in pages */ /* Determine how many pages the temporary b-tree has grown to */ sqlite3PagerPagecount(pPager, &nPage); /* If pSorter->nWorking is still zero, but the temporary file has been ** created in the file-system, then the most recent insert into the ** current b-tree segment probably caused the cache to overflow (it is ** also possible that sqlite3_release_memory() was called). So set the ** size of the working set to a little less than the current size of the ** file in pages. */ if( pSorter->nWorking==0 && sqlite3PagerFile(pPager)->pMethods ){ pSorter->nWorking = nPage-5; if( pSorter->nWorking<SORTER_MIN_WORKING ){ pSorter->nWorking = SORTER_MIN_WORKING; } } /* If the number of pages used by the current b-tree segment is greater ** than the size of the working set (VdbeSorter.nWorking), start a new ** segment b-tree. */ if( pSorter->nWorking && nPage>=pSorter->nWorking ){ BtCursor *p = pCsr->pCursor;/* Cursor structure to close and reopen */ int iRoot; /* Root page of new tree */ /* Copy the current contents of the b-tree into a PMA in sorted order. ** Close the currently open b-tree cursor. */ rc = vdbeSorterBtreeToPMA(db, pCsr); sqlite3BtreeCloseCursor(p); if( rc==SQLITE_OK ){ rc = sqlite3BtreeDropTable(pCsr->pBt, 2, 0); #ifdef SQLITE_DEBUG sqlite3PagerPagecount(pPager, &nPage); assert( rc!=SQLITE_OK || nPage==1 ); #endif } if( rc==SQLITE_OK ){ rc = sqlite3BtreeCreateTable(pCsr->pBt, &iRoot, BTREE_BLOBKEY); } if( rc==SQLITE_OK ){ assert( iRoot==2 ); rc = sqlite3BtreeCursor(pCsr->pBt, iRoot, 1, pCsr->pKeyInfo, p); } } pSorter->nBtree += sqlite3VarintLen(nKey) + nKey; } return rc; } /* ** Helper function for sqlite3VdbeSorterRewind(). */ static int vdbeSorterInitMerge( sqlite3 *db, /* Database handle */ VdbeCursor *pCsr, /* Cursor handle for this sorter */ i64 *pnByte /* Sum of bytes in all opened PMAs */ ){ VdbeSorter *pSorter = pCsr->pSorter; int rc = SQLITE_OK; /* Return code */ int i; /* Used to iterator through aIter[] */ i64 nByte = 0; /* Total bytes in all opened PMAs */ /* Initialize the iterators. */ for(i=0; rc==SQLITE_OK && i<SORTER_MAX_MERGE_COUNT; i++){ VdbeSorterIter *pIter = &pSorter->aIter[i]; rc = vdbeSorterIterInit(db, pSorter, pSorter->iReadOff, pIter, &nByte); pSorter->iReadOff = pIter->iEof; assert( pSorter->iReadOff<=pSorter->iWriteOff || rc!=SQLITE_OK ); if( pSorter->iReadOff>=pSorter->iWriteOff ) break; } /* Initialize the aTree[] array. */ for(i=pSorter->nTree-1; rc==SQLITE_OK && i>0; i--){ rc = vdbeSorterDoCompare(pCsr, i); } *pnByte = nByte; return rc; } /* ** Once the sorter has been populated, this function is called to prepare ** for iterating through its contents in sorted order. */ int sqlite3VdbeSorterRewind(sqlite3 *db, VdbeCursor *pCsr, int *pbEof){ VdbeSorter *pSorter = pCsr->pSorter; int rc; /* Return code */ sqlite3_file *pTemp2 = 0; /* Second temp file to use */ i64 iWrite2 = 0; /* Write offset for pTemp2 */ int nIter; /* Number of iterators used */ int nByte; /* Bytes of space required for aIter/aTree */ int N = 2; /* Power of 2 >= nIter */ assert( pSorter ); /* Write the current b-tree to a PMA. Close the b-tree cursor. */ rc = vdbeSorterBtreeToPMA(db, pCsr); sqlite3BtreeCloseCursor(pCsr->pCursor); if( rc!=SQLITE_OK ) return rc; if( pSorter->nPMA==0 ){ *pbEof = 1; return SQLITE_OK; } /* Allocate space for aIter[] and aTree[]. */ nIter = pSorter->nPMA; if( nIter>SORTER_MAX_MERGE_COUNT ) nIter = SORTER_MAX_MERGE_COUNT; assert( nIter>0 ); while( N<nIter ) N += N; nByte = N * (sizeof(int) + sizeof(VdbeSorterIter)); pSorter->aIter = (VdbeSorterIter *)sqlite3DbMallocZero(db, nByte); if( !pSorter->aIter ) return SQLITE_NOMEM; pSorter->aTree = (int *)&pSorter->aIter[N]; pSorter->nTree = N; do { int iNew; /* Index of new, merged, PMA */ for(iNew=0; rc==SQLITE_OK && iNew*SORTER_MAX_MERGE_COUNT<pSorter->nPMA; iNew++ ){ i64 nWrite; /* Number of bytes in new PMA */ /* If there are SORTER_MAX_MERGE_COUNT or less PMAs in file pTemp1, ** initialize an iterator for each of them and break out of the loop. ** These iterators will be incrementally merged as the VDBE layer calls ** sqlite3VdbeSorterNext(). ** ** Otherwise, if pTemp1 contains more than SORTER_MAX_MERGE_COUNT PMAs, ** initialize interators for SORTER_MAX_MERGE_COUNT of them. These PMAs ** are merged into a single PMA that is written to file pTemp2. */ rc = vdbeSorterInitMerge(db, pCsr, &nWrite); assert( rc!=SQLITE_OK || pSorter->aIter[ pSorter->aTree[1] ].pFile ); if( rc!=SQLITE_OK || pSorter->nPMA<=SORTER_MAX_MERGE_COUNT ){ break; } /* Open the second temp file, if it is not already open. */ if( pTemp2==0 ){ assert( iWrite2==0 ); rc = vdbeSorterOpenTempFile(db, &pTemp2); } if( rc==SQLITE_OK ){ rc = vdbeSorterWriteVarint(pTemp2, nWrite, &iWrite2); } if( rc==SQLITE_OK ){ int bEof = 0; while( rc==SQLITE_OK && bEof==0 ){ int nByte; VdbeSorterIter *pIter = &pSorter->aIter[ pSorter->aTree[1] ]; assert( pIter->pFile ); nByte = pIter->nKey + sqlite3VarintLen(pIter->nKey); rc = sqlite3OsWrite(pTemp2, pIter->aAlloc, nByte, iWrite2); iWrite2 += nByte; if( rc==SQLITE_OK ){ rc = sqlite3VdbeSorterNext(db, pCsr, &bEof); } } } } if( pSorter->nPMA<=SORTER_MAX_MERGE_COUNT ){ break; }else{ sqlite3_file *pTmp = pSorter->pTemp1; pSorter->nPMA = iNew; pSorter->pTemp1 = pTemp2; pTemp2 = pTmp; pSorter->iWriteOff = iWrite2; pSorter->iReadOff = 0; iWrite2 = 0; } }while( rc==SQLITE_OK ); if( pTemp2 ){ sqlite3OsCloseFree(pTemp2); } *pbEof = (pSorter->aIter[pSorter->aTree[1]].pFile==0); return rc; } /* ** Advance to the next element in the sorter. */ int sqlite3VdbeSorterNext(sqlite3 *db, VdbeCursor *pCsr, int *pbEof){ VdbeSorter *pSorter = pCsr->pSorter; int iPrev = pSorter->aTree[1]; /* Index of iterator to advance */ int i; /* Index of aTree[] to recalculate */ int rc; /* Return code */ rc = vdbeSorterIterNext(db, &pSorter->aIter[iPrev]); for(i=(pSorter->nTree+iPrev)/2; rc==SQLITE_OK && i>0; i=i/2){ rc = vdbeSorterDoCompare(pCsr, i); } *pbEof = (pSorter->aIter[pSorter->aTree[1]].pFile==0); return rc; } /* ** Copy the current sorter key into the memory cell pOut. */ int sqlite3VdbeSorterRowkey(VdbeCursor *pCsr, Mem *pOut){ VdbeSorter *pSorter = pCsr->pSorter; VdbeSorterIter *pIter; pIter = &pSorter->aIter[ pSorter->aTree[1] ]; /* Coverage testing note: As things are currently, this call will always ** succeed. This is because the memory cell passed by the VDBE layer ** happens to be the same one as was used to assemble the keys before they ** were passed to the sorter - meaning it is always large enough for the ** largest key. But this could change very easily, so we leave the call ** to sqlite3VdbeMemGrow() in. */ if( NEVER(sqlite3VdbeMemGrow(pOut, pIter->nKey, 0)) ){ return SQLITE_NOMEM; } pOut->n = pIter->nKey; MemSetTypeFlag(pOut, MEM_Blob); memcpy(pOut->z, pIter->aKey, pIter->nKey); return SQLITE_OK; } #endif /* #ifndef SQLITE_OMIT_MERGE_SORT */ |
Changes to test/8_3_names.test.
︙ | ︙ | |||
63 64 65 66 67 68 69 | } file exists test.db-journal } 0 do_test 8_3_names-2.1 { file exists test.nal } 1 forcedelete test2.db test2.nal test2.db-journal | | | | 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 | } file exists test.db-journal } 0 do_test 8_3_names-2.1 { file exists test.nal } 1 forcedelete test2.db test2.nal test2.db-journal copy_file test.db test2.db copy_file test.nal test2.nal do_test 8_3_names-2.2 { db eval { COMMIT; SELECT length(x) FROM t1 } } 15000 do_test 8_3_names-2.3 { |
︙ | ︙ | |||
97 98 99 100 101 102 103 | } file exists test.db-journal } 1 do_test 8_3_names-3.1 { file exists test.nal } 0 forcedelete test2.db test2.nal test2.db-journal | | | | 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 | } file exists test.db-journal } 1 do_test 8_3_names-3.1 { file exists test.nal } 0 forcedelete test2.db test2.nal test2.db-journal copy_file test.db test2.db copy_file test.db-journal test2.db-journal do_test 8_3_names-3.2 { db eval { COMMIT; SELECT length(x) FROM t1 } } 15000 do_test 8_3_names-3.3 { |
︙ | ︙ |
Changes to test/alter.test.
︙ | ︙ | |||
217 218 219 220 221 222 223 | index {sqlite_autoindex_<t2>_2} <t2> \ ] # Check that ALTER TABLE works on attached databases. # ifcapable attach { do_test alter-1.8.1 { | | | | 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 | index {sqlite_autoindex_<t2>_2} <t2> \ ] # Check that ALTER TABLE works on attached databases. # ifcapable attach { do_test alter-1.8.1 { forcedelete test2.db forcedelete test2.db-journal execsql { ATTACH 'test2.db' AS aux; } } {} do_test alter-1.8.2 { execsql { CREATE TABLE t4(a PRIMARY KEY, b, c); |
︙ | ︙ | |||
408 409 410 411 412 413 414 | INSERT INTO t9 VALUES(4, 5, 6); } set ::TRIGGER } {trig3 4 5 6} # Make sure "ON" cannot be used as a database, table or column name without # quoting. Otherwise the sqlite_alter_trigger() function might not work. | | | | 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 | INSERT INTO t9 VALUES(4, 5, 6); } set ::TRIGGER } {trig3 4 5 6} # Make sure "ON" cannot be used as a database, table or column name without # quoting. Otherwise the sqlite_alter_trigger() function might not work. forcedelete test3.db forcedelete test3.db-journal ifcapable attach { do_test alter-3.2.1 { catchsql { ATTACH 'test3.db' AS ON; } } {1 {near "ON": syntax error}} do_test alter-3.2.2 { |
︙ | ︙ |
Changes to test/alter2.test.
︙ | ︙ | |||
311 312 313 314 315 316 317 | db close set_file_format 2 sqlite3 db test.db get_file_format } {2} ifcapable attach { do_test alter2-6.2 { | | | | 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 | db close set_file_format 2 sqlite3 db test.db get_file_format } {2} ifcapable attach { do_test alter2-6.2 { forcedelete test2.db-journal forcedelete test2.db execsql { ATTACH 'test2.db' AS aux; CREATE TABLE aux.t1(a, b); } get_file_format test2.db } $default_file_format } |
︙ | ︙ |
Changes to test/alter3.test.
︙ | ︙ | |||
192 193 194 195 196 197 198 | PRAGMA schema_version; } } {11} } do_test alter3-4.1 { db close | | | 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 | PRAGMA schema_version; } } {11} } do_test alter3-4.1 { db close forcedelete test.db set ::DB [sqlite3 db test.db] execsql { PRAGMA legacy_file_format=ON; CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 100); INSERT INTO t1 VALUES(2, 300); SELECT * FROM t1; |
︙ | ︙ | |||
233 234 235 236 237 238 239 | execsql { DROP TABLE t1; } } {} ifcapable attach { do_test alter3-5.1 { | | | | 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 | execsql { DROP TABLE t1; } } {} ifcapable attach { do_test alter3-5.1 { forcedelete test2.db forcedelete test2.db-journal execsql { CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 'one'); INSERT INTO t1 VALUES(2, 'two'); ATTACH 'test2.db' AS aux; CREATE TABLE aux.t1 AS SELECT * FROM t1; PRAGMA aux.schema_version = 30; |
︙ | ︙ |
Changes to test/alter4.test.
︙ | ︙ | |||
174 175 176 177 178 179 180 | PRAGMA schema_version; } } {10} } do_test alter4-4.1 { db close | | | 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 | PRAGMA schema_version; } } {10} } do_test alter4-4.1 { db close forcedelete test.db set ::DB [sqlite3 db test.db] execsql { CREATE TEMP TABLE t1(a, b); INSERT INTO t1 VALUES(1, 100); INSERT INTO t1 VALUES(2, 300); SELECT * FROM t1; } |
︙ | ︙ | |||
209 210 211 212 213 214 215 | execsql { DROP TABLE t1; } } {} ifcapable attach { do_test alter4-5.1 { | | | | 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 | execsql { DROP TABLE t1; } } {} ifcapable attach { do_test alter4-5.1 { forcedelete test2.db forcedelete test2.db-journal execsql { CREATE TEMP TABLE t1(a, b); INSERT INTO t1 VALUES(1, 'one'); INSERT INTO t1 VALUES(2, 'two'); ATTACH 'test2.db' AS aux; CREATE TABLE aux.t1 AS SELECT * FROM t1; PRAGMA aux.schema_version = 30; |
︙ | ︙ |
Changes to test/analyze.test.
︙ | ︙ | |||
282 283 284 285 286 287 288 289 290 291 292 293 294 295 | } db close sqlite3 db test.db execsql { SELECT * FROM t4 WHERE x=1234; } } {} # This test corrupts the database file so it must be the last test # in the series. # do_test analyze-99.1 { execsql { PRAGMA writable_schema=on; | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 | } db close sqlite3 db test.db execsql { SELECT * FROM t4 WHERE x=1234; } } {} # Verify that DROP TABLE and DROP INDEX remove entries from the # sqlite_stat1 and sqlite_stat2 tables. # do_test analyze-5.0 { execsql { DELETE FROM t3; DELETE FROM t4; INSERT INTO t3 VALUES(1,2,3,4); INSERT INTO t3 VALUES(5,6,7,8); INSERT INTO t3 SELECT a+8, b+8, c+8, d+8 FROM t3; INSERT INTO t3 SELECT a+16, b+16, c+16, d+16 FROM t3; INSERT INTO t3 SELECT a+32, b+32, c+32, d+32 FROM t3; INSERT INTO t3 SELECT a+64, b+64, c+64, d+64 FROM t3; INSERT INTO t4 SELECT a, b, c FROM t3; ANALYZE; SELECT DISTINCT idx FROM sqlite_stat1 ORDER BY 1; SELECT DISTINCT tbl FROM sqlite_stat1 ORDER BY 1; } } {t3i1 t3i2 t3i3 t4i1 t4i2 t3 t4} ifcapable stat2 { do_test analyze-5.1 { execsql { SELECT DISTINCT idx FROM sqlite_stat2 ORDER BY 1; SELECT DISTINCT tbl FROM sqlite_stat2 ORDER BY 1; } } {t3i1 t3i2 t3i3 t4i1 t4i2 t3 t4} } do_test analyze-5.2 { execsql { DROP INDEX t3i2; SELECT DISTINCT idx FROM sqlite_stat1 ORDER BY 1; SELECT DISTINCT tbl FROM sqlite_stat1 ORDER BY 1; } } {t3i1 t3i3 t4i1 t4i2 t3 t4} ifcapable stat2 { do_test analyze-5.3 { execsql { SELECT DISTINCT idx FROM sqlite_stat2 ORDER BY 1; SELECT DISTINCT tbl FROM sqlite_stat2 ORDER BY 1; } } {t3i1 t3i3 t4i1 t4i2 t3 t4} } do_test analyze-5.4 { execsql { DROP TABLE t3; SELECT DISTINCT idx FROM sqlite_stat1 ORDER BY 1; SELECT DISTINCT tbl FROM sqlite_stat1 ORDER BY 1; } } {t4i1 t4i2 t4} ifcapable stat2 { do_test analyze-5.5 { execsql { SELECT DISTINCT idx FROM sqlite_stat2 ORDER BY 1; SELECT DISTINCT tbl FROM sqlite_stat2 ORDER BY 1; } } {t4i1 t4i2 t4} } # This test corrupts the database file so it must be the last test # in the series. # do_test analyze-99.1 { execsql { PRAGMA writable_schema=on; |
︙ | ︙ |
Changes to test/async.test.
︙ | ︙ | |||
64 65 66 67 68 69 70 | foreach testfile [lsort -dictionary [glob $testdir/*.test]] { set tail [file tail $testfile] if {[lsearch -exact $ASYNC_INCLUDE $tail]<0} continue source $testfile # Make sure everything is flushed through. This is because [source]ing # the next test file will delete the database file on disk (using | | | 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 | foreach testfile [lsort -dictionary [glob $testdir/*.test]] { set tail [file tail $testfile] if {[lsearch -exact $ASYNC_INCLUDE $tail]<0} continue source $testfile # Make sure everything is flushed through. This is because [source]ing # the next test file will delete the database file on disk (using # [delete_file]). If the asynchronous backend still has the file # open, it will become confused. # flush_async_queue } # Flush the write-queue and disable asynchronous IO. This should ensure # all allocated memory is cleaned up. |
︙ | ︙ |
Changes to test/async2.test.
︙ | ︙ | |||
47 48 49 50 51 52 53 | db close foreach err [list ioerr malloc-transient malloc-persistent] { set ::go 10 for {set n 1} {$::go} {incr n} { set ::sqlite_io_error_pending 0 sqlite3_memdebug_fail -1 | | | 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 | db close foreach err [list ioerr malloc-transient malloc-persistent] { set ::go 10 for {set n 1} {$::go} {incr n} { set ::sqlite_io_error_pending 0 sqlite3_memdebug_fail -1 forcedelete test.db test.db-journal sqlite3 db test.db execsql $::setup_script db close sqlite3async_initialize "" 1 sqlite3 db test.db sqlite3_db_config_lookaside db 0 0 0 |
︙ | ︙ |
Changes to test/async3.test.
︙ | ︙ | |||
37 38 39 40 41 42 43 | chocolate/banana/./vanilla/file.db chocolate/banana/../banana/vanilla/file.db chocolate/banana/./vanilla/extra_bit/../file.db } do_test async3-1.0 { file mkdir [file join chocolate banana vanilla] | | | | 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 | chocolate/banana/./vanilla/file.db chocolate/banana/../banana/vanilla/file.db chocolate/banana/./vanilla/extra_bit/../file.db } do_test async3-1.0 { file mkdir [file join chocolate banana vanilla] forcedelete chocolate/banana/vanilla/file.db forcedelete chocolate/banana/vanilla/file.db-journal } {} do_test async3-1.1 { sqlite3 db chocolate/banana/vanilla/file.db execsql { CREATE TABLE abc(a, b, c); BEGIN; |
︙ | ︙ |
Changes to test/async5.test.
︙ | ︙ | |||
16 17 18 19 20 21 22 | if {[info commands sqlite3async_initialize] eq ""} { # The async logic is not built into this system finish_test return } db close | | | 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 | if {[info commands sqlite3async_initialize] eq ""} { # The async logic is not built into this system finish_test return } db close forcedelete test2.db sqlite3async_initialize "" 1 sqlite3async_control halt never sqlite3 db test.db do_test async5-1.1 { execsql { ATTACH 'test2.db' AS next; |
︙ | ︙ |
Changes to test/attach.test.
︙ | ︙ | |||
20 21 22 23 24 25 26 | ifcapable !attach { finish_test return } for {set i 2} {$i<=15} {incr i} { | | | | 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | ifcapable !attach { finish_test return } for {set i 2} {$i<=15} {incr i} { forcedelete test$i.db forcedelete test$i.db-journal } do_test attach-1.1 { execsql { CREATE TABLE t1(a,b); INSERT INTO t1 VALUES(1,2); INSERT INTO t1 VALUES(3,4); |
︙ | ︙ | |||
624 625 626 627 628 629 630 | # Tests for the sqliteFix...() routines in attach.c # ifcapable {trigger} { do_test attach-5.1 { db close sqlite3 db test.db db2 close | | | 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 | # Tests for the sqliteFix...() routines in attach.c # ifcapable {trigger} { do_test attach-5.1 { db close sqlite3 db test.db db2 close forcedelete test2.db sqlite3 db2 test2.db catchsql { ATTACH DATABASE 'test.db' AS orig; CREATE TRIGGER r1 AFTER INSERT ON orig.t1 BEGIN SELECT 'no-op'; END; } db2 |
︙ | ︙ | |||
720 721 722 723 724 725 726 | if {$tcl_platform(platform)=="unix"} { sqlite3 dbx cannot-read dbx eval {CREATE TABLE t1(a,b,c)} dbx close file attributes cannot-read -permission 0000 if {[file writable cannot-read]} { #puts "\n**** Tests do not work when run as root ****" | | | | | | | 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 | if {$tcl_platform(platform)=="unix"} { sqlite3 dbx cannot-read dbx eval {CREATE TABLE t1(a,b,c)} dbx close file attributes cannot-read -permission 0000 if {[file writable cannot-read]} { #puts "\n**** Tests do not work when run as root ****" forcedelete cannot-read #exit 1 } else { do_test attach-6.2 { catchsql { ATTACH DATABASE 'cannot-read' AS noread; } } {1 {unable to open database: cannot-read}} do_test attach-6.2.2 { db errorcode } {14} } forcedelete cannot-read } # Check the error message if we try to access a database that has # not been attached. do_test attach-6.3 { catchsql { CREATE TABLE no_such_db.t1(a, b, c); } } {1 {unknown database no_such_db}} for {set i 2} {$i<=15} {incr i} { catch {db$i close} } db close forcedelete test2.db forcedelete no-such-file ifcapable subquery { do_test attach-7.1 { forcedelete test.db test.db-journal sqlite3 db test.db catchsql { DETACH RAISE ( IGNORE ) IN ( SELECT "AAAAAA" . * ORDER BY REGISTER LIMIT "AAAAAA" . "AAAAAA" OFFSET RAISE ( IGNORE ) NOT NULL ) } } {1 {no such table: AAAAAA}} } |
︙ | ︙ | |||
774 775 776 777 778 779 780 | catchsql { ATTACH 'test2.db' AS t2; } } {1 {file is encrypted or is not a database}} do_test attach-8.2 { db errorcode } {26} | | | | | 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 | catchsql { ATTACH 'test2.db' AS t2; } } {1 {file is encrypted or is not a database}} do_test attach-8.2 { db errorcode } {26} forcedelete test2.db do_test attach-8.3 { sqlite3 db2 test2.db db2 eval {CREATE TABLE t1(x); BEGIN EXCLUSIVE} catchsql { ATTACH 'test2.db' AS t2; } } {1 {database is locked}} do_test attach-8.4 { db errorcode } {5} db2 close forcedelete test2.db # Test that it is possible to attach the same database more than # once when not in shared-cache mode. That this is not possible in # shared-cache mode is tested in shared7.test. do_test attach-9.1 { forcedelete test4.db execsql { ATTACH 'test4.db' AS aux1; CREATE TABLE aux1.t1(a, b); INSERT INTO aux1.t1 VALUES(1, 2); ATTACH 'test4.db' AS aux2; SELECT * FROM aux2.t1; } |
︙ | ︙ |
Changes to test/attach2.test.
︙ | ︙ | |||
29 30 31 32 33 34 35 | # sure we can attach test2.db from test.db. # do_test attach2-1.1 { db eval { CREATE TABLE t1(a,b); CREATE INDEX x1 ON t1(a); } | | | | 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 | # sure we can attach test2.db from test.db. # do_test attach2-1.1 { db eval { CREATE TABLE t1(a,b); CREATE INDEX x1 ON t1(a); } forcedelete test2.db forcedelete test2.db-journal sqlite3 db2 test2.db db2 eval { CREATE TABLE t1(a,b); CREATE INDEX x1 ON t1(a); } catchsql { ATTACH 'test2.db' AS t2; |
︙ | ︙ | |||
323 324 325 326 327 328 329 | } {} do_test attach2-4.15 { execsql {SELECT * FROM t1} db2 } {1 2 1 2} db close db2 close | | | | 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 | } {} do_test attach2-4.15 { execsql {SELECT * FROM t1} db2 } {1 2 1 2} db close db2 close forcedelete test2.db sqlite3_soft_heap_limit $soft_limit # These tests - attach2-5.* - check that the master journal file is deleted # correctly when a multi-file transaction is committed or rolled back. # # Update: It's not actually created if a rollback occurs, so that test # doesn't really prove too much. foreach f [glob test.db*] {forcedelete $f} do_test attach2-5.1 { sqlite3 db test.db execsql { ATTACH 'test.db2' AS aux; } } {} do_test attach2-5.2 { |
︙ | ︙ |
Changes to test/attach3.test.
︙ | ︙ | |||
31 32 33 34 35 36 37 | # Create tables t1 and t2 in the main database execsql { CREATE TABLE t1(a, b); CREATE TABLE t2(c, d); } # Create tables t1 and t2 in database file test2.db | | | | 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 | # Create tables t1 and t2 in the main database execsql { CREATE TABLE t1(a, b); CREATE TABLE t2(c, d); } # Create tables t1 and t2 in database file test2.db forcedelete test2.db forcedelete test2.db-journal sqlite3 db2 test2.db execsql { CREATE TABLE t1(a, b); CREATE TABLE t2(c, d); } db2 db2 close |
︙ | ︙ |
Changes to test/attachmalloc.test.
︙ | ︙ | |||
25 26 27 28 29 30 31 | source $testdir/malloc_common.tcl do_malloc_test attachmalloc-1 -tclprep { catch { db close } for {set i 2} {$i<=4} {incr i} { catch { db$i close } | | | | | | 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 | source $testdir/malloc_common.tcl do_malloc_test attachmalloc-1 -tclprep { catch { db close } for {set i 2} {$i<=4} {incr i} { catch { db$i close } forcedelete test$i.db forcedelete test$i.db-journal } } -tclbody { if {[catch {sqlite3 db test.db}]} { error "out of memory" } sqlite3_db_config_lookaside db 0 0 0 sqlite3_extended_result_codes db 1 } -sqlbody { ATTACH 'test2.db' AS two; CREATE TABLE two.t1(x); ATTACH 'test3.db' AS three; CREATE TABLE three.t1(x); ATTACH 'test4.db' AS four; CREATE TABLE four.t1(x); } do_malloc_test attachmalloc-2 -tclprep { forcedelete test2.db forcedelete test2.db-journal sqlite3 db2 test2.db db2 eval { CREATE TABLE t1(a, b, c); CREATE INDEX i1 ON t1(a, b); } db2 close } -sqlbody { |
︙ | ︙ |
Changes to test/autoinc.test.
︙ | ︙ | |||
423 424 425 426 427 428 429 | } {} } # Make sure AUTOINCREMENT works on ATTACH-ed tables. # ifcapable tempdb&&attach { do_test autoinc-5.1 { | | | | 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 | } {} } # Make sure AUTOINCREMENT works on ATTACH-ed tables. # ifcapable tempdb&&attach { do_test autoinc-5.1 { forcedelete test2.db forcedelete test2.db-journal sqlite3 db2 test2.db execsql { CREATE TABLE t4(m INTEGER PRIMARY KEY AUTOINCREMENT, n); CREATE TABLE t5(o, p INTEGER PRIMARY KEY AUTOINCREMENT); } db2; execsql { ATTACH 'test2.db' as aux; |
︙ | ︙ | |||
516 517 518 519 520 521 522 | # Ticket #1283. Make sure that preparing but never running a statement # that creates the sqlite_sequence table does not mess up the database. # do_test autoinc-8.1 { catch {db2 close} catch {db close} | | | 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 | # Ticket #1283. Make sure that preparing but never running a statement # that creates the sqlite_sequence table does not mess up the database. # do_test autoinc-8.1 { catch {db2 close} catch {db close} forcedelete test.db sqlite3 db test.db set DB [sqlite3_connection_pointer db] set STMT [sqlite3_prepare $DB { CREATE TABLE t1( x INTEGER PRIMARY KEY AUTOINCREMENT ) } -1 TAIL] |
︙ | ︙ |
Changes to test/autovacuum.test.
︙ | ︙ | |||
463 464 465 466 467 468 469 | PRAGMA auto_vacuum = 0; PRAGMA auto_vacuum; } } {1} do_test autovacuum-3.4 { db close | | | 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 | PRAGMA auto_vacuum = 0; PRAGMA auto_vacuum; } } {1} do_test autovacuum-3.4 { db close forcedelete test.db sqlite3 db test.db execsql { PRAGMA auto_vacuum; } } $AUTOVACUUM do_test autovacuum-3.5 { execsql { |
︙ | ︙ | |||
498 499 500 501 502 503 504 | # rolled back no corruption occurs. # do_test autovacuum-4.0 { # The last round of tests may have left the db in non-autovacuum mode. # Reset everything just in case. # db close | | | 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 | # rolled back no corruption occurs. # do_test autovacuum-4.0 { # The last round of tests may have left the db in non-autovacuum mode. # Reset everything just in case. # db close forcedelete test.db test.db-journal sqlite3 db test.db execsql { PRAGMA auto_vacuum = 1; PRAGMA auto_vacuum; } } {1} do_test autovacuum-4.1 { |
︙ | ︙ | |||
595 596 597 598 599 600 601 | #--------------------------------------------------------------------- # Test cases autovacuum-7.X test the case where a page must be moved # and the destination location collides with at least one other # entry in the page hash-table (internal to the pager.c module. # do_test autovacuum-7.1 { db close | | | | 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 | #--------------------------------------------------------------------- # Test cases autovacuum-7.X test the case where a page must be moved # and the destination location collides with at least one other # entry in the page hash-table (internal to the pager.c module. # do_test autovacuum-7.1 { db close forcedelete test.db forcedelete test.db-journal sqlite3 db test.db execsql { PRAGMA auto_vacuum=1; CREATE TABLE t1(a, b, PRIMARY KEY(a, b)); INSERT INTO t1 VALUES(randstr(400,400),randstr(400,400)); INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 2 |
︙ | ︙ |
Changes to test/autovacuum_ioerr2.test.
︙ | ︙ | |||
71 72 73 74 75 76 77 | BEGIN; INSERT INTO abc2 VALUES(10); DROP TABLE abc; COMMIT; DROP TABLE abc2; } | | | 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 | BEGIN; INSERT INTO abc2 VALUES(10); DROP TABLE abc; COMMIT; DROP TABLE abc2; } forcedelete backup.db ifcapable subquery { do_ioerr_test autovacuum-ioerr2-4 -tclprep { if {![file exists backup.db]} { sqlite3 dbb backup.db execsql { PRAGMA auto_vacuum = 1; BEGIN; |
︙ | ︙ | |||
95 96 97 98 99 100 101 | execsql { COMMIT; PRAGMA cache_size = 10; } dbb dbb close } db close | | | | | 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 | execsql { COMMIT; PRAGMA cache_size = 10; } dbb dbb close } db close forcedelete test.db forcedelete test.db-journal forcecopy backup.db test.db set ::DB [sqlite3 db test.db] execsql { PRAGMA cache_size = 10; } } -sqlbody { BEGIN; DELETE FROM abc WHERE oid < 3; |
︙ | ︙ |
Changes to test/backcompat.test.
︙ | ︙ | |||
57 58 59 60 61 62 63 | puts -nonewline "Testing against $bin - " flush stdout puts "version [get_version $bin]" } proc do_backcompat_test {rv bin1 bin2 script} { | | | 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 | puts -nonewline "Testing against $bin - " flush stdout puts "version [get_version $bin]" } proc do_backcompat_test {rv bin1 bin2 script} { forcedelete test.db if {$bin1 != ""} { set ::bc_chan1 [launch_testfixture $bin1] } set ::bc_chan2 [launch_testfixture $bin2] if { $rv } { proc code2 {tcl} { uplevel #0 $tcl } if {$bin1 != ""} { proc code2 {tcl} { testfixture $::bc_chan1 $tcl } } |
︙ | ︙ | |||
148 149 150 151 152 153 154 | set ret [list] foreach f {test.db test.db-journal test.db-wal} { lappend ret [read_file $f] } set ret } proc write_file_system {data} { foreach f {test.db test.db-journal test.db-wal} d $data { if {[string length $d] == 0} { | | | 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 | set ret [list] foreach f {test.db test.db-journal test.db-wal} { lappend ret [read_file $f] } set ret } proc write_file_system {data} { foreach f {test.db test.db-journal test.db-wal} d $data { if {[string length $d] == 0} { forcedelete $f } else { write_file $f $d } } } #------------------------------------------------------------------------- |
︙ | ︙ |
Changes to test/backup.test.
︙ | ︙ | |||
69 70 71 72 73 74 75 | # Sanity check to verify that the [test_contents] proc works. # test_contents backup-1.2 db main db main # Check that it is possible to create and finish backup operations. # do_test backup-1.3.1 { | | | 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 | # Sanity check to verify that the [test_contents] proc works. # test_contents backup-1.2 db main db main # Check that it is possible to create and finish backup operations. # do_test backup-1.3.1 { delete_file test2.db sqlite3 db2 test2.db sqlite3_backup B db2 main db main } {B} do_test backup-1.3.2 { B finish } {SQLITE_OK} do_test backup-1.3.3 { |
︙ | ︙ | |||
164 165 166 167 168 169 170 | set file_dest temp }] { foreach rows_dest {0 3 10} { foreach pgsz_dest {512 1024 2048} { foreach nPagePerStep {1 200} { # Open the databases. | | | | 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 | set file_dest temp }] { foreach rows_dest {0 3 10} { foreach pgsz_dest {512 1024 2048} { foreach nPagePerStep {1 200} { # Open the databases. catch { delete_file test.db } catch { delete_file test2.db } eval $zOpenScript # Set to true if copying to an in-memory destination. Copying to an # in-memory destination is only possible if the initial destination # page size is the same as the source page size (in this case 1024 bytes). # set isMemDest [expr { |
︙ | ︙ | |||
275 276 277 278 279 280 281 | # * Target database page-size is smaller than the source. # set iTest 1 foreach nSrcPg {10 64 65 66 100} { foreach nDestRow {10 100} { foreach nDestPgsz {512 1024 2048 4096} { | | | | 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 | # * Target database page-size is smaller than the source. # set iTest 1 foreach nSrcPg {10 64 65 66 100} { foreach nDestRow {10 100} { foreach nDestPgsz {512 1024 2048 4096} { catch { delete_file test.db } catch { delete_file test2.db } sqlite3 db test.db sqlite3 db2 test2.db # Set up the content of the two databases. # execsql { PRAGMA page_size = 1024 } execsql "PRAGMA page_size = $nDestPgsz" db2 |
︙ | ︙ | |||
323 324 325 326 327 328 329 | incr iTest } } } #-------------------------------------------------------------------- do_test backup-3.$iTest.1 { | | | | 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 | incr iTest } } } #-------------------------------------------------------------------- do_test backup-3.$iTest.1 { catch { forcedelete test.db } catch { forcedelete test2.db } sqlite3 db test.db set iTab 1 db eval { PRAGMA page_size = 512 } while {[file size test.db] <= $::sqlite_pending_byte} { db eval "CREATE TABLE t${iTab}(a, b, c)" incr iTab |
︙ | ︙ | |||
388 389 390 391 392 393 394 | catch { sqlite3_backup B db main db2 aux } } {1} do_test backup-4.1.4 { sqlite3_errmsg db } {unknown database aux} do_test backup-4.2.1 { | | | | 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 | catch { sqlite3_backup B db main db2 aux } } {1} do_test backup-4.1.4 { sqlite3_errmsg db } {unknown database aux} do_test backup-4.2.1 { catch { forcedelete test3.db } catch { forcedelete test4.db } execsql { ATTACH 'test3.db' AS aux1; CREATE TABLE aux1.t1(a, b); } execsql { ATTACH 'test4.db' AS aux2; CREATE TABLE aux2.t2(a, b); |
︙ | ︙ | |||
435 436 437 438 439 440 441 | set rc [catch {sqlite3_backup B db main db aux1}] list $rc [sqlite3_errcode db] [sqlite3_errmsg db] } {1 SQLITE_ERROR {source and destination must be distinct}} db close db2 close do_test backup-4.5.1 { | | | 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 | set rc [catch {sqlite3_backup B db main db aux1}] list $rc [sqlite3_errcode db] [sqlite3_errmsg db] } {1 SQLITE_ERROR {source and destination must be distinct}} db close db2 close do_test backup-4.5.1 { catch { forcedelete test.db } sqlite3 db test.db sqlite3 db2 :memory: execsql { CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 2); } execsql { |
︙ | ︙ | |||
487 488 489 490 491 492 493 | # # 1) Backing up file-to-file. The writer writes via an external pager. # 2) Backing up file-to-file. The writer writes via the same pager as # is used by the backup operation. # 3) Backing up memory-to-file. # set iTest 0 | | | | | 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 | # # 1) Backing up file-to-file. The writer writes via an external pager. # 2) Backing up file-to-file. The writer writes via the same pager as # is used by the backup operation. # 3) Backing up memory-to-file. # set iTest 0 forcedelete bak.db-wal foreach {writer file} {db test.db db3 test.db db :memory:} { incr iTest catch { delete_file bak.db } sqlite3 db2 bak.db catch { delete_file $file } sqlite3 db $file sqlite3 db3 $file do_test backup-5.$iTest.1.1 { execsql { BEGIN; CREATE TABLE t1(a, b); |
︙ | ︙ | |||
593 594 595 596 597 598 599 | } {SQLITE_OK} integrity_check backup-5.$iTest.4.5 db2 test_contents backup-5.$iTest.4.6 db main db2 main catch {db close} catch {db2 close} catch {db3 close} | | | | 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 | } {SQLITE_OK} integrity_check backup-5.$iTest.4.5 db2 test_contents backup-5.$iTest.4.6 db main db2 main catch {db close} catch {db2 close} catch {db3 close} catch { delete_file bak.db } sqlite3 db2 bak.db catch { delete_file $file } sqlite3 db $file sqlite3 db3 $file do_test backup-5.$iTest.5.1 { execsql { PRAGMA auto_vacuum = incremental; BEGIN; CREATE TABLE t1(a, b); |
︙ | ︙ | |||
640 641 642 643 644 645 646 | # End of backup-5.* tests. #--------------------------------------------------------------------- #--------------------------------------------------------------------- # Test the sqlite3_backup_remaining() and backup_pagecount() APIs. # do_test backup-6.1 { | | | | 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 | # End of backup-5.* tests. #--------------------------------------------------------------------- #--------------------------------------------------------------------- # Test the sqlite3_backup_remaining() and backup_pagecount() APIs. # do_test backup-6.1 { catch { forcedelete test.db } catch { forcedelete test2.db } sqlite3 db test.db sqlite3 db2 test2.db execsql { BEGIN; CREATE TABLE t1(a, b); CREATE INDEX i1 ON t1(a, b); INSERT INTO t1 VALUES(1, randstr(1000,1000)); |
︙ | ︙ | |||
697 698 699 700 701 702 703 | # backup-7.2.*: Attempt to step the backup process while a # write-transaction is underway on the source pager (return # SQLITE_LOCKED). # # backup-7.3.*: Destination database is externally locked (return SQLITE_BUSY). # do_test backup-7.0 { | | | | 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 | # backup-7.2.*: Attempt to step the backup process while a # write-transaction is underway on the source pager (return # SQLITE_LOCKED). # # backup-7.3.*: Destination database is externally locked (return SQLITE_BUSY). # do_test backup-7.0 { catch { forcedelete test.db } catch { forcedelete test2.db } sqlite3 db2 test2.db sqlite3 db test.db execsql { CREATE TABLE t1(a, b); CREATE INDEX i1 ON t1(a, b); INSERT INTO t1 VALUES(1, randstr(1000,1000)); INSERT INTO t1 SELECT a+ 1, randstr(1000,1000) FROM t1; |
︙ | ︙ | |||
750 751 752 753 754 755 756 | } {SQLITE_OK} test_contents backup-7.2.5 db main db2 main integrity_check backup-7.3.6 db2 do_test backup-7.3.1 { db2 close db3 close | | | 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 | } {SQLITE_OK} test_contents backup-7.2.5 db main db2 main integrity_check backup-7.3.6 db2 do_test backup-7.3.1 { db2 close db3 close forcedelete test2.db sqlite3 db2 test2.db sqlite3 db3 test2.db sqlite3_backup B db2 main db main execsql { BEGIN ; CREATE TABLE t2(a, b); } db3 B step 5 |
︙ | ︙ | |||
779 780 781 782 783 784 785 | # The following tests, backup-8.*, test attaching multiple backup # processes to the same source database. Also, reading from the source # database while a read transaction is active. # # These tests reuse the database "test.db" left over from backup-7.*. # do_test backup-8.1 { | | | | 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 | # The following tests, backup-8.*, test attaching multiple backup # processes to the same source database. Also, reading from the source # database while a read transaction is active. # # These tests reuse the database "test.db" left over from backup-7.*. # do_test backup-8.1 { catch { forcedelete test2.db } catch { forcedelete test3.db } sqlite3 db2 test2.db sqlite3 db3 test3.db sqlite3_backup B2 db2 main db main sqlite3_backup B3 db3 main db main list [B2 finish] [B3 finish] } {SQLITE_OK SQLITE_OK} |
︙ | ︙ | |||
861 862 863 864 865 866 867 | do_test backup-9.2.3 { B finish } {SQLITE_OK} catch {db2 close} ifcapable memorymanage { db close | | | | 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 | do_test backup-9.2.3 { B finish } {SQLITE_OK} catch {db2 close} ifcapable memorymanage { db close forcedelete test.db forcedelete bak.db sqlite3 db test.db sqlite3 db2 test.db sqlite3 db3 bak.db do_test backup-10.1.1 { execsql { |
︙ | ︙ | |||
914 915 916 917 918 919 920 | # Test that if the database is written to via the same database handle being # used as the source by a backup operation: # # 10.1.*: If the db is in-memory, the backup is restarted. # 10.2.*: If the db is a file, the backup is not restarted. # db close | | | 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 | # Test that if the database is written to via the same database handle being # used as the source by a backup operation: # # 10.1.*: If the db is in-memory, the backup is restarted. # 10.2.*: If the db is a file, the backup is not restarted. # db close forcedelete test.db test.db-journal foreach {tn file rc} { 1 test.db SQLITE_DONE 2 :memory: SQLITE_OK } { do_test backup-10.$tn.1 { sqlite3 db $file execsql { |
︙ | ︙ | |||
944 945 946 947 948 949 950 | do_test backup-10.$tn.2 { set pgs [execsql {pragma page_count}] expr {$pgs > 50 && $pgs < 75} } {1} do_test backup-10.$tn.3 { | | | 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 | do_test backup-10.$tn.2 { set pgs [execsql {pragma page_count}] expr {$pgs > 50 && $pgs < 75} } {1} do_test backup-10.$tn.3 { forcedelete bak.db bak.db-journal sqlite3 db2 bak.db sqlite3_backup B db2 main db main B step 50 } {SQLITE_OK} do_test backup-10.$tn.4 { execsql { UPDATE t1 SET b = randomblob(200) WHERE a IN (1, 250) } |
︙ | ︙ |
Changes to test/backup2.test.
︙ | ︙ | |||
57 58 59 60 61 62 63 | unset -nocomplain cksum set cksum [dbcksum db main] # Make a backup of the test data. Verify that the backup copy # is identical to the original. # do_test backup2-2 { | | | | | | 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 | unset -nocomplain cksum set cksum [dbcksum db main] # Make a backup of the test data. Verify that the backup copy # is identical to the original. # do_test backup2-2 { forcedelete bu1.db db backup bu1.db sqlite3 db2 bu1.db dbcksum db2 main } $cksum # Delete the original. Restore from backup. Verify the content is # unchanged. # do_test backup2-3.1 { db close forcedelete test.db test.db-journal sqlite3 db test.db db2 eval {BEGIN EXCLUSIVE} set rc [catch {db restore bu1.db} res] lappend rc $res db2 eval {ROLLBACK} set rc } {1 {restore failed: source database busy}} do_test backup2-3.2 { db close forcedelete test.db test.db-journal sqlite3 db test.db db restore bu1.db dbcksum db main } $cksum # Use alternative databases - other than "main". # do_test backup2-4 { db restore temp bu1.db dbcksum db temp } $cksum do_test backup2-5 { db2 close forcedelete bu1.db bu2.db db backup temp bu2.db sqlite3 db2 bu2.db dbcksum db2 main } $cksum # Try to backup to a readonly file. # |
︙ | ︙ | |||
123 124 125 126 127 128 129 | set rc [catch {db backup temp bu2.db} res] lappend rc $res } {1 {backup failed: file is encrypted or is not a database}} # Try to backup database that does not exist # do_test backup2-8 { | | | | | | | 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 | set rc [catch {db backup temp bu2.db} res] lappend rc $res } {1 {backup failed: file is encrypted or is not a database}} # Try to backup database that does not exist # do_test backup2-8 { forcedelete bu1.db set rc [catch {db backup aux1 bu1.db} res] lappend rc $res } {1 {backup failed: unknown database aux1}} # Invalid syntax on the backup method # do_test backup2-9 { set rc [catch {db backup} res] lappend rc $res } {1 {wrong # args: should be "db backup ?DATABASE? FILENAME"}} # Try to restore from an unreadable file. # if {$tcl_platform(platform)=="windows"} { do_test backup2-10 { forcedelete bu3.db file mkdir bu3.db set rc [catch {db restore temp bu3.db} res] lappend rc $res } {1 {cannot open source database: unable to open database file}} } if {$tcl_platform(platform)!="windows"} { do_test backup2-10 { forcedelete bu3.db file mkdir bu3.db set rc [catch {db restore temp bu3.db} res] lappend rc $res } {1 {cannot open source database: disk I/O error}} } # Try to restore from something that is not a database file. # do_test backup2-11 { set rc [catch {db restore temp bu2.db} res] lappend rc $res } {1 {restore failed: file is encrypted or is not a database}} # Try to restore a database that does not exist # do_test backup2-12 { set rc [catch {db restore aux1 bu2.db} res] lappend rc $res } {1 {restore failed: unknown database aux1}} do_test backup2-13 { forcedelete bu4.db set rc [catch {db restore bu4.db} res] lappend rc $res } {1 {cannot open source database: unable to open database file}} # Invalid syntax on the restore method # do_test backup2-14 { set rc [catch {db restore} res] lappend rc $res } {1 {wrong # args: should be "db restore ?DATABASE? FILENAME"}} forcedelete bu1.db bu2.db bu3.db bu4.db finish_test |
Changes to test/backup_ioerr.test.
︙ | ︙ | |||
56 57 58 59 60 61 62 | expr {$nPage>130 && $nPage<160} } {1} do_test backup_ioerr-1.2 { expr {[file size test.db] > $sqlite_pending_byte} } {1} do_test backup_ioerr-1.3 { db close | | | 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 | expr {$nPage>130 && $nPage<160} } {1} do_test backup_ioerr-1.2 { expr {[file size test.db] > $sqlite_pending_byte} } {1} do_test backup_ioerr-1.3 { db close forcedelete test.db } {} # Turn off IO error simulation. # proc clear_ioerr_simulation {} { set ::sqlite_io_error_hit 0 set ::sqlite_io_error_hardhit 0 |
︙ | ︙ | |||
151 152 153 154 155 156 157 | set bStop 0 for {set iError 1} {$bStop == 0} {incr iError} { # Disable IO error simulation. clear_ioerr_simulation catch { ddb close } catch { sdb close } | | | | 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 | set bStop 0 for {set iError 1} {$bStop == 0} {incr iError} { # Disable IO error simulation. clear_ioerr_simulation catch { ddb close } catch { sdb close } catch { forcedelete test.db } catch { forcedelete bak.db } # Open the source and destination databases. sqlite3 sdb test.db sqlite3 ddb bak.db # Step 1: Populate the source and destination databases. populate_database sdb |
︙ | ︙ |
Changes to test/capi3.test.
︙ | ︙ | |||
693 694 695 696 697 698 699 | db close } if {![sqlite3 -has-codec]} { # Now test that the library correctly handles bogus entries in the # sqlite_master table (schema corruption). do_test capi3-8.1 { | | | 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 | db close } if {![sqlite3 -has-codec]} { # Now test that the library correctly handles bogus entries in the # sqlite_master table (schema corruption). do_test capi3-8.1 { forcedelete test.db test.db-journal sqlite3 db test.db execsql { CREATE TABLE t1(a); } db close } {} do_test capi3-8.2 { |
︙ | ︙ | |||
718 719 720 721 722 723 724 | SELECT * FROM sqlite_master; } } {1 {malformed database schema (?)}} do_test capi3-8.4 { # Build a 5-field row record. The first field is a string 'table', and # subsequent fields are all NULL. db close | | | | | 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 | SELECT * FROM sqlite_master; } } {1 {malformed database schema (?)}} do_test capi3-8.4 { # Build a 5-field row record. The first field is a string 'table', and # subsequent fields are all NULL. db close forcedelete test.db test.db-journal sqlite3 db test.db execsql { CREATE TABLE t1(a); PRAGMA writable_schema=ON; INSERT INTO sqlite_master VALUES('table',NULL,NULL,NULL,NULL); } db close } {}; do_test capi3-8.5 { catch { sqlite3 db test.db } catchsql { SELECT * FROM sqlite_master; } } {1 {malformed database schema (?)}} db close } forcedelete test.db forcedelete test.db-journal # Test the english language string equivalents for sqlite error codes set code2english [list \ SQLITE_OK {not an error} \ SQLITE_ERROR {SQL logic error or missing database} \ SQLITE_PERM {access permission denied} \ |
︙ | ︙ |
Changes to test/capi3c.test.
︙ | ︙ | |||
666 667 668 669 670 671 672 | db close } if {![sqlite3 -has-codec]} { # Now test that the library correctly handles bogus entries in the # sqlite_master table (schema corruption). do_test capi3c-8.1 { | | | 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 | db close } if {![sqlite3 -has-codec]} { # Now test that the library correctly handles bogus entries in the # sqlite_master table (schema corruption). do_test capi3c-8.1 { forcedelete test.db test.db-journal sqlite3 db test.db execsql { CREATE TABLE t1(a); } db close } {} do_test capi3c-8.2 { |
︙ | ︙ | |||
691 692 693 694 695 696 697 | SELECT * FROM sqlite_master; } } {1 {malformed database schema (?)}} do_test capi3c-8.4 { # Build a 5-field row record. The first field is a string 'table', and # subsequent fields are all NULL. db close | | | | | 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 | SELECT * FROM sqlite_master; } } {1 {malformed database schema (?)}} do_test capi3c-8.4 { # Build a 5-field row record. The first field is a string 'table', and # subsequent fields are all NULL. db close forcedelete test.db test.db-journal sqlite3 db test.db execsql { CREATE TABLE t1(a); PRAGMA writable_schema=ON; INSERT INTO sqlite_master VALUES('table',NULL,NULL,NULL,NULL); } db close } {}; do_test capi3c-8.5 { catch { sqlite3 db test.db } catchsql { SELECT * FROM sqlite_master; } } {1 {malformed database schema (?)}} db close } forcedelete test.db forcedelete test.db-journal # Test the english language string equivalents for sqlite error codes set code2english [list \ SQLITE_OK {not an error} \ SQLITE_ERROR {SQL logic error or missing database} \ SQLITE_PERM {access permission denied} \ |
︙ | ︙ |
Changes to test/collate7.test.
︙ | ︙ | |||
40 41 42 43 44 45 46 | do_test collate7-1.4 { sqlite3_create_collation_v2 db CASELESS caseless_cmp {incr ::caseless_del} db close set ::caseless_del } {2} do_test collate7-2.1 { | | | 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 | do_test collate7-1.4 { sqlite3_create_collation_v2 db CASELESS caseless_cmp {incr ::caseless_del} db close set ::caseless_del } {2} do_test collate7-2.1 { forcedelete test.db test.db-journal sqlite3 db test.db sqlite3_create_collation_v2 db CASELESS caseless_cmp {incr ::caseless_del} execsql { PRAGMA encoding='utf-16'; CREATE TABLE abc16(a COLLATE CASELESS, b, c); } db set ::caseless_del |
︙ | ︙ |
Changes to test/corrupt.test.
︙ | ︙ | |||
11 12 13 14 15 16 17 | # This file implements regression tests for SQLite library. # # This file implements tests to make sure SQLite does not crash or # segfault if it sees a corrupt database file. # # $Id: corrupt.test,v 1.12 2009/07/13 09:41:45 danielk1977 Exp $ | | | 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 | # This file implements regression tests for SQLite library. # # This file implements tests to make sure SQLite does not crash or # segfault if it sees a corrupt database file. # # $Id: corrupt.test,v 1.12 2009/07/13 09:41:45 danielk1977 Exp $ catch {forcedelete test.db test.db-journal test.bu} set testdir [file dirname $argv0] source $testdir/tester.tcl # Do not use a codec for tests in this file, as the database file is # manipulated directly using tcl scripts (using the [hexio_write] command). # |
︙ | ︙ | |||
47 48 49 50 51 52 53 | CREATE TABLE t2 AS SELECT * FROM t1; DELETE FROM t2 WHERE rowid%5!=0; COMMIT; } } {} integrity_check corrupt-1.2 | < < < < < < < < < < < < | | | 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 | CREATE TABLE t2 AS SELECT * FROM t1; DELETE FROM t2 WHERE rowid%5!=0; COMMIT; } } {} integrity_check corrupt-1.2 # Setup for the tests. Make a backup copy of the good database in test.bu. # Create a string of garbage data that is 256 bytes long. # forcecopy test.db test.bu set fsize [file size test.db] set junk "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" while {[string length $junk]<256} {append junk $junk} set junk [string range $junk 0 255] # Go through the database and write garbage data into each 256 segment # of the file. Then do various operations on the file to make sure that # the database engine can recover gracefully from the corruption. # for {set i [expr {1*256}]} {$i<$fsize-256} {incr i 256} { set tn [expr {$i/256}] db close forcecopy test.bu test.db set fd [open test.db r+] fconfigure $fd -translation binary seek $fd $i puts -nonewline $fd $junk close $fd do_test corrupt-2.$tn.1 { sqlite3 db test.db |
︙ | ︙ | |||
128 129 130 131 132 133 134 | #------------------------------------------------------------------------ # For these tests, swap the rootpage entries of t1 (a table) and t1i1 (an # index on t1) in sqlite_master. Then perform a few different queries # and make sure this is detected as corruption. # do_test corrupt-3.1 { db close | | | 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 | #------------------------------------------------------------------------ # For these tests, swap the rootpage entries of t1 (a table) and t1i1 (an # index on t1) in sqlite_master. Then perform a few different queries # and make sure this is detected as corruption. # do_test corrupt-3.1 { db close forcecopy test.bu test.db sqlite3 db test.db list } {} do_test corrupt-3.2 { set t1_r [execsql {SELECT rootpage FROM sqlite_master WHERE name = 't1i1'}] set t1i1_r [execsql {SELECT rootpage FROM sqlite_master WHERE name = 't1'}] set cookie [expr [execsql {PRAGMA schema_version}] + 1] |
︙ | ︙ | |||
177 178 179 180 181 182 183 | catchsql { SELECT * FROM t1 WHERE x = 'abcde'; } } {1 {database disk image is malformed}} do_test corrupt-4.1 { db close | | | 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 | catchsql { SELECT * FROM t1 WHERE x = 'abcde'; } } {1 {database disk image is malformed}} do_test corrupt-4.1 { db close forcedelete test.db test.db-journal sqlite3 db test.db execsql { PRAGMA page_size = 1024; CREATE TABLE t1(a INTEGER PRIMARY KEY, b TEXT); } for {set i 0} {$i < 10} {incr i} { set text [string repeat $i 220] |
︙ | ︙ | |||
209 210 211 212 213 214 215 | # index b-tree as expected. At one point this was causing an assert() # to fail. catchsql { DELETE FROM t1 WHERE rowid = 3 } } {1 {database disk image is malformed}} do_test corrupt-5.1 { db close | | | 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 | # index b-tree as expected. At one point this was causing an assert() # to fail. catchsql { DELETE FROM t1 WHERE rowid = 3 } } {1 {database disk image is malformed}} do_test corrupt-5.1 { db close forcedelete test.db test.db-journal sqlite3 db test.db execsql { PRAGMA page_size = 1024 } set ct "CREATE TABLE t1(c0 " set i 0 while {[string length $ct] < 950} { append ct ", c[incr i]" } append ct ")" |
︙ | ︙ | |||
232 233 234 235 236 237 238 | } {1 {database disk image is malformed}} # At one point, the specific corruption caused by this test case was # causing a buffer overwrite. Although a crash was never demonstrated, # running this testcase under valgrind revealed the problem. do_test corrupt-6.1 { db close | | | 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 | } {1 {database disk image is malformed}} # At one point, the specific corruption caused by this test case was # causing a buffer overwrite. Although a crash was never demonstrated, # running this testcase under valgrind revealed the problem. do_test corrupt-6.1 { db close forcedelete test.db test.db-journal sqlite3 db test.db execsql { PRAGMA page_size = 1024; CREATE TABLE t1(x); } # The root page of t1 is 1024 bytes in size. The header is 8 bytes, and # each of the cells inserted by the following INSERT statements consume |
︙ | ︙ | |||
260 261 262 263 264 265 266 | sqlite3 db test.db catchsql { INSERT INTO t1 VALUES( randomblob(10) ) } } {1 {database disk image is malformed}} ifcapable oversize_cell_check { db close | | | 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 | sqlite3 db test.db catchsql { INSERT INTO t1 VALUES( randomblob(10) ) } } {1 {database disk image is malformed}} ifcapable oversize_cell_check { db close forcedelete test.db test.db-journal sqlite3 db test.db execsql { PRAGMA page_size = 1024; CREATE TABLE t1(x); } do_test corrupt-7.1 { for {set i 0} {$i < 39} {incr i} { |
︙ | ︙ | |||
313 314 315 316 317 318 319 | catchsql { INSERT INTO t1 VALUES(X'000100020003000400050006000700080009000A'); } } {1 {database disk image is malformed}} } db close | | | | 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 | catchsql { INSERT INTO t1 VALUES(X'000100020003000400050006000700080009000A'); } } {1 {database disk image is malformed}} } db close forcedelete test.db test.db-journal do_test corrupt-8.1 { sqlite3 db test.db execsql { PRAGMA page_size = 1024; PRAGMA secure_delete = on; PRAGMA auto_vacuum = 0; CREATE TABLE t1(x INTEGER PRIMARY KEY, y); INSERT INTO t1 VALUES(5, randomblob(1900)); } hexio_write test.db 2044 [hexio_render_int32 2] hexio_write test.db 24 [hexio_render_int32 45] catchsql { INSERT OR REPLACE INTO t1 VALUES(5, randomblob(1900)) } } {1 {database disk image is malformed}} db close forcedelete test.db test.db-journal do_test corrupt-8.2 { sqlite3 db test.db execsql { PRAGMA page_size = 1024; PRAGMA secure_delete = on; PRAGMA auto_vacuum = 0; CREATE TABLE t1(x INTEGER PRIMARY KEY, y); |
︙ | ︙ |
Changes to test/corrupt2.test.
︙ | ︙ | |||
37 38 39 40 41 42 43 | CREATE TABLE abc(a, b, c); } } {} do_test corrupt2-1.2 { # Corrupt the 16 byte magic string at the start of the file | | | | | | | | | | | | | | | | | 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 | CREATE TABLE abc(a, b, c); } } {} do_test corrupt2-1.2 { # Corrupt the 16 byte magic string at the start of the file forcedelete corrupt.db forcedelete corrupt.db-journal forcecopy test.db corrupt.db set f [open corrupt.db RDWR] seek $f 8 start puts $f blah close $f sqlite3 db2 corrupt.db catchsql " $::presql SELECT * FROM sqlite_master; " db2 } {1 {file is encrypted or is not a database}} do_test corrupt2-1.3 { db2 close # Corrupt the page-size (bytes 16 and 17 of page 1). forcedelete corrupt.db forcedelete corrupt.db-journal forcecopy test.db corrupt.db set f [open corrupt.db RDWR] fconfigure $f -encoding binary seek $f 16 start puts -nonewline $f "\x00\xFF" close $f sqlite3 db2 corrupt.db catchsql " $::presql SELECT * FROM sqlite_master; " db2 } {1 {file is encrypted or is not a database}} do_test corrupt2-1.4 { db2 close # Corrupt the free-block list on page 1. forcedelete corrupt.db forcedelete corrupt.db-journal forcecopy test.db corrupt.db set f [open corrupt.db RDWR] fconfigure $f -encoding binary seek $f 101 start puts -nonewline $f "\xFF\xFF" close $f sqlite3 db2 corrupt.db catchsql " $::presql SELECT * FROM sqlite_master; " db2 } {1 {database disk image is malformed}} do_test corrupt2-1.5 { db2 close # Corrupt the free-block list on page 1. forcedelete corrupt.db forcedelete corrupt.db-journal forcecopy test.db corrupt.db set f [open corrupt.db RDWR] fconfigure $f -encoding binary seek $f 101 start puts -nonewline $f "\x00\xC8" seek $f 200 start puts -nonewline $f "\x00\x00" puts -nonewline $f "\x10\x00" close $f sqlite3 db2 corrupt.db catchsql " $::presql SELECT * FROM sqlite_master; " db2 } {1 {database disk image is malformed}} db2 close # Corrupt a database by having 2 indices of the same name: do_test corrupt2-2.1 { forcedelete corrupt.db forcedelete corrupt.db-journal forcecopy test.db corrupt.db sqlite3 db2 corrupt.db execsql " $::presql CREATE INDEX a1 ON abc(a); CREATE INDEX a2 ON abc(b); PRAGMA writable_schema = 1; |
︙ | ︙ | |||
146 147 148 149 150 151 152 | SELECT * FROM sqlite_master; " db2 } {1 {malformed database schema (a3) - index a3 already exists}} db2 close do_test corrupt2-3.1 { | | | | 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 | SELECT * FROM sqlite_master; " db2 } {1 {malformed database schema (a3) - index a3 already exists}} db2 close do_test corrupt2-3.1 { forcedelete corrupt.db forcedelete corrupt.db-journal sqlite3 db2 corrupt.db execsql " $::presql PRAGMA auto_vacuum = 1; PRAGMA page_size = 1024; CREATE TABLE t1(a, b, c); |
︙ | ︙ | |||
196 197 198 199 200 201 202 | } db2 } {1 {database disk image is malformed}} db2 close unset -nocomplain result do_test corrupt2-5.1 { | | | | 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 | } db2 } {1 {database disk image is malformed}} db2 close unset -nocomplain result do_test corrupt2-5.1 { forcedelete corrupt.db forcedelete corrupt.db-journal sqlite3 db2 corrupt.db execsql " $::presql PRAGMA auto_vacuum = 0; PRAGMA page_size = 1024; CREATE TABLE t1(a, b, c); |
︙ | ︙ | |||
253 254 255 256 257 258 259 | proc corruption_test {args} { set A(-corrupt) {} set A(-sqlprep) {} set A(-tclprep) {} array set A $args catch {db close} | | | | 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 | proc corruption_test {args} { set A(-corrupt) {} set A(-sqlprep) {} set A(-tclprep) {} array set A $args catch {db close} forcedelete corrupt.db forcedelete corrupt.db-journal sqlite3 db corrupt.db db eval $::presql eval $A(-tclprep) db eval $A(-sqlprep) db close |
︙ | ︙ |
Changes to test/corrupt9.test.
︙ | ︙ | |||
82 83 84 85 86 87 88 | } {1} integrity_check corrupt9-1.2 # Corrupt the freelist by adding duplicate entries to the freelist. # Make sure the corruption is detected. # db close | | | | | 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 | } {1} integrity_check corrupt9-1.2 # Corrupt the freelist by adding duplicate entries to the freelist. # Make sure the corruption is detected. # db close forcecopy test.db test.db-template corrupt_freelist test.db 1 sqlite3 db test.db do_test corrupt9-2.1 { set x [db eval {PRAGMA integrity_check}] expr {$x!="ok"} } {1} do_test corrupt9-2.2 { catchsql { CREATE INDEX i2 ON t2(b,a); REINDEX; } } {1 {database disk image is malformed}} db close forcecopy test.db-template test.db corrupt_freelist test.db 2 sqlite3 db test.db do_test corrupt9-3.1 { set x [db eval {PRAGMA integrity_check}] expr {$x!="ok"} } {1} do_test corrupt9-3.2 { catchsql { CREATE INDEX i2 ON t2(b,a); REINDEX; } } {1 {database disk image is malformed}} db close forcecopy test.db-template test.db corrupt_freelist test.db 3 sqlite3 db test.db do_test corrupt9-4.1 { set x [db eval {PRAGMA integrity_check}] expr {$x!="ok"} } {1} do_test corrupt9-4.2 { |
︙ | ︙ |
Changes to test/corruptA.test.
︙ | ︙ | |||
36 37 38 39 40 41 42 | } {1} integrity_check corruptA-1.2 # Corrupt the file header in various ways and make sure the corruption # is detected when opening the database file. # db close | | | | | | | 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 | } {1} integrity_check corruptA-1.2 # Corrupt the file header in various ways and make sure the corruption # is detected when opening the database file. # db close forcecopy test.db test.db-template set unreadable_version 02 ifcapable wal { set unreadable_version 03 } do_test corruptA-2.1 { forcecopy test.db-template test.db hexio_write test.db 19 $unreadable_version ;# the read format number sqlite3 db test.db catchsql {SELECT * FROM t1} } {1 {file is encrypted or is not a database}} do_test corruptA-2.2 { db close forcecopy test.db-template test.db hexio_write test.db 21 41 ;# max embedded payload fraction sqlite3 db test.db catchsql {SELECT * FROM t1} } {1 {file is encrypted or is not a database}} do_test corruptA-2.3 { db close forcecopy test.db-template test.db hexio_write test.db 22 1f ;# min embedded payload fraction sqlite3 db test.db catchsql {SELECT * FROM t1} } {1 {file is encrypted or is not a database}} do_test corruptA-2.4 { db close forcecopy test.db-template test.db hexio_write test.db 23 21 ;# min leaf payload fraction sqlite3 db test.db catchsql {SELECT * FROM t1} } {1 {file is encrypted or is not a database}} finish_test |
Changes to test/corruptB.test.
︙ | ︙ | |||
42 43 44 45 46 47 48 | INSERT INTO t1 SELECT randomblob(200) FROM t1; INSERT INTO t1 SELECT randomblob(200) FROM t1; } expr {[file size test.db] > (1024*9)} } {1} integrity_check corruptB-1.2 | | | | | | | 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 | INSERT INTO t1 SELECT randomblob(200) FROM t1; INSERT INTO t1 SELECT randomblob(200) FROM t1; } expr {[file size test.db] > (1024*9)} } {1} integrity_check corruptB-1.2 forcecopy test.db bak.db # Set the right-child of a B-Tree rootpage to refer to the root-page itself. # do_test corruptB-1.3.1 { set ::root [execsql {SELECT rootpage FROM sqlite_master}] set ::offset [expr {($::root-1)*1024}] hexio_write test.db [expr $offset+8] [hexio_render_int32 $::root] } {4} do_test corruptB-1.3.2 { sqlite3 db test.db catchsql { SELECT * FROM t1 } } {1 {database disk image is malformed}} # Set the left-child of a cell in a B-Tree rootpage to refer to the # root-page itself. # do_test corruptB-1.4.1 { db close forcecopy bak.db test.db set cell_offset [hexio_get_int [hexio_read test.db [expr $offset+12] 2]] hexio_write test.db [expr $offset+$cell_offset] [hexio_render_int32 $::root] } {4} do_test corruptB-1.4.2 { sqlite3 db test.db catchsql { SELECT * FROM t1 } } {1 {database disk image is malformed}} # Now grow the table B-Tree so that it is more than 2 levels high. # do_test corruptB-1.5.1 { db close forcecopy bak.db test.db sqlite3 db test.db execsql { INSERT INTO t1 SELECT randomblob(200) FROM t1; INSERT INTO t1 SELECT randomblob(200) FROM t1; INSERT INTO t1 SELECT randomblob(200) FROM t1; INSERT INTO t1 SELECT randomblob(200) FROM t1; INSERT INTO t1 SELECT randomblob(200) FROM t1; INSERT INTO t1 SELECT randomblob(200) FROM t1; INSERT INTO t1 SELECT randomblob(200) FROM t1; } } {} forcecopy test.db bak.db # Set the right-child pointer of the right-child of the root page to point # back to the root page. # do_test corruptB-1.6.1 { db close set iRightChild [hexio_get_int [hexio_read test.db [expr $offset+8] 4]] set c_offset [expr ($iRightChild-1)*1024] hexio_write test.db [expr $c_offset+8] [hexio_render_int32 $::root] } {4} do_test corruptB-1.6.2 { sqlite3 db test.db catchsql { SELECT * FROM t1 } } {1 {database disk image is malformed}} # Set the left-child pointer of a cell of the right-child of the root page to # point back to the root page. # do_test corruptB-1.7.1 { db close forcecopy bak.db test.db set cell_offset [hexio_get_int [hexio_read test.db [expr $c_offset+12] 2]] hexio_write test.db [expr $c_offset+$cell_offset] [hexio_render_int32 $::root] } {4} do_test corruptB-1.7.2 { sqlite3 db test.db catchsql { SELECT * FROM t1 } } {1 {database disk image is malformed}} |
︙ | ︙ | |||
136 137 138 139 140 141 142 | } {1 {database disk image is malformed}} # Set the left-child pointer of a cell of the right-child of the root page to # point back to the root page. # do_test corruptB-1.9.1 { db close | | | | | 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 | } {1 {database disk image is malformed}} # Set the left-child pointer of a cell of the right-child of the root page to # point back to the root page. # do_test corruptB-1.9.1 { db close forcecopy bak.db test.db set cell_offset [hexio_get_int [hexio_read test.db [expr $c_offset+12] 2]] hexio_write test.db [expr $c_offset+$cell_offset] [hexio_render_int32 $::root] } {4} do_test corruptB-1.9.2 { sqlite3 db test.db catchsql { SELECT * FROM t1 } } {1 {database disk image is malformed}} #--------------------------------------------------------------------------- do_test corruptB-2.1.1 { db close forcecopy bak.db test.db hexio_write test.db [expr $offset+8] [hexio_render_int32 0x6FFFFFFF] } {4} do_test corruptB-2.1.2 { sqlite3 db test.db catchsql { SELECT * FROM t1 } } {1 {database disk image is malformed}} #--------------------------------------------------------------------------- # Corrupt the header-size field of a database record. # do_test corruptB-3.1.1 { db close forcecopy bak.db test.db sqlite3 db test.db set v [string repeat abcdefghij 200] execsql { CREATE TABLE t2(a); INSERT INTO t2 VALUES($v); } set t2_root [execsql {SELECT rootpage FROM sqlite_master WHERE name = 't2'}] |
︙ | ︙ |
Changes to test/corruptC.test.
︙ | ︙ | |||
13 14 15 16 17 18 19 | # This file implements tests to make sure SQLite does not crash or # segfault if it sees a corrupt database file. It creates a base # data base file, then tests that single byte corruptions in # increasingly larger quantities are handled gracefully. # # $Id: corruptC.test,v 1.14 2009/07/11 06:55:34 danielk1977 Exp $ | | | 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 | # This file implements tests to make sure SQLite does not crash or # segfault if it sees a corrupt database file. It creates a base # data base file, then tests that single byte corruptions in # increasingly larger quantities are handled gracefully. # # $Id: corruptC.test,v 1.14 2009/07/11 06:55:34 danielk1977 Exp $ catch {forcedelete test.db test.db-journal test.bu} set testdir [file dirname $argv0] source $testdir/tester.tcl # Do not use a codec for tests in this file, as the database file is # manipulated directly using tcl scripts (using the [hexio_write] command). # |
︙ | ︙ | |||
54 55 56 57 58 59 60 | # Generate random integer # proc random {range} { return [expr {round(rand()*$range)}] } | < < < < < < | | 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 | # Generate random integer # proc random {range} { return [expr {round(rand()*$range)}] } # Setup for the tests. Make a backup copy of the good database in test.bu. # db close forcecopy test.db test.bu sqlite3 db test.db set fsize [file size test.db] # Set a quasi-random random seed. if {[info exists ::G(issoak)]} { # If we are doing SOAK tests, we want a different # random seed for each run. Ideally we would like |
︙ | ︙ | |||
88 89 90 91 92 93 94 | # First test some specific corruption tests found from earlier runs # with specific seeds. # # test that a corrupt content offset size is handled (seed 5577) do_test corruptC-2.1 { db close | | | | | | | | | | | | | | | | | | 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 | # First test some specific corruption tests found from earlier runs # with specific seeds. # # test that a corrupt content offset size is handled (seed 5577) do_test corruptC-2.1 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 2053 [format %02x 0x04] sqlite3 db test.db catchsql {PRAGMA integrity_check} } {1 {database disk image is malformed}} # test that a corrupt content offset size is handled (seed 5649) do_test corruptC-2.2 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 27 [format %02x 0x08] hexio_write test.db 233 [format %02x 0x6a] hexio_write test.db 328 [format %02x 0x67] hexio_write test.db 750 [format %02x 0x1f] hexio_write test.db 1132 [format %02x 0x52] hexio_write test.db 1133 [format %02x 0x84] hexio_write test.db 1220 [format %02x 0x01] hexio_write test.db 3688 [format %02x 0xc1] hexio_write test.db 3714 [format %02x 0x58] hexio_write test.db 3746 [format %02x 0x9a] sqlite3 db test.db catchsql {UPDATE t1 SET y=1} } {1 {database disk image is malformed}} # test that a corrupt free cell size is handled (seed 13329) do_test corruptC-2.3 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 1094 [format %02x 0x76] sqlite3 db test.db catchsql {UPDATE t1 SET y=1} } {1 {database disk image is malformed}} # test that a corrupt free cell size is handled (seed 169571) do_test corruptC-2.4 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 3119 [format %02x 0xdf] sqlite3 db test.db catchsql {UPDATE t2 SET y='abcdef-uvwxyz'} } {1 {database disk image is malformed}} # test that a corrupt free cell size is handled (seed 169571) do_test corruptC-2.5 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 3119 [format %02x 0xdf] hexio_write test.db 4073 [format %02x 0xbf] sqlite3 db test.db catchsql {BEGIN; UPDATE t2 SET y='abcdef-uvwxyz'; ROLLBACK;} catchsql {PRAGMA integrity_check} } {0 {{*** in database main *** Page 4: btreeInitPage() returns error code 11}}} # {0 {{*** in database main *** # Corruption detected in cell 710 on page 4 # Multiple uses for byte 661 of page 4 # Fragmented space is 249 byte reported as 21 on page 4}}} # test that a corrupt free cell size is handled (seed 169595) do_test corruptC-2.6 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 619 [format %02x 0xe2] hexio_write test.db 3150 [format %02x 0xa8] sqlite3 db test.db catchsql {BEGIN; UPDATE t2 SET y='abcdef-uvwxyz'; ROLLBACK;} } {1 {database disk image is malformed}} # corruption (seed 178692) do_test corruptC-2.7 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 3074 [format %02x 0xa0] sqlite3 db test.db catchsql {BEGIN; UPDATE t2 SET y='abcdef-uvwxyz'; ROLLBACK;} } {1 {database disk image is malformed}} # corruption (seed 179069) do_test corruptC-2.8 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 1393 [format %02x 0x7d] hexio_write test.db 84 [format %02x 0x19] hexio_write test.db 3287 [format %02x 0x3b] hexio_write test.db 2564 [format %02x 0xed] hexio_write test.db 2139 [format %02x 0x55] sqlite3 db test.db catchsql {BEGIN; DELETE FROM t1 WHERE x>13; ROLLBACK;} } {1 {database disk image is malformed}} # corruption (seed 170434) do_test corruptC-2.9 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 2095 [format %02x 0xd6] sqlite3 db test.db catchsql {BEGIN; DELETE FROM t1 WHERE x>13; ROLLBACK;} } {1 {database disk image is malformed}} # corruption (seed 186504) do_test corruptC-2.10 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 3130 [format %02x 0x02] sqlite3 db test.db catchsql {BEGIN; UPDATE t2 SET y='abcdef-uvwxyz'; ROLLBACK;} } {1 {database disk image is malformed}} # corruption (seed 1589) do_test corruptC-2.11 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 55 [format %02x 0xa7] sqlite3 db test.db catchsql {BEGIN; CREATE TABLE t3 AS SELECT x,3 as y FROM t2 WHERE rowid%5!=0; ROLLBACK;} } {1 {database disk image is malformed}} # corruption (seed 14166) do_test corruptC-2.12 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 974 [format %02x 0x2e] sqlite3 db test.db catchsql {SELECT count(*) FROM sqlite_master;} } {1 {malformed database schema (t1i1) - corrupt database}} # corruption (seed 218803) do_test corruptC-2.13 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 102 [format %02x 0x12] sqlite3 db test.db catchsql {BEGIN; CREATE TABLE t3 AS SELECT x,3 as y FROM t2 WHERE rowid%5!=0; ROLLBACK;} } {1 {database disk image is malformed}} do_test corruptC-2.14 { db close forcecopy test.bu test.db sqlite3 db test.db set blob [string repeat abcdefghij 10000] execsql { INSERT INTO t1 VALUES (1, $blob) } sqlite3 db test.db set filesize [file size test.db] hexio_write test.db [expr $filesize-2048] 00000001 catchsql {DELETE FROM t1 WHERE rowid = (SELECT max(rowid) FROM t1)} } {1 {database disk image is malformed}} # At one point this particular corrupt database was causing a buffer # overread. Which caused a crash in a run of all.test once. # do_test corruptC-2.15 { db close forcecopy test.bu test.db hexio_write test.db 986 b9 sqlite3 db test.db catchsql {SELECT count(*) FROM sqlite_master;} } {1 {malformed database schema (t1i1) - no such table: main.t1}} # # Now test for a series of quasi-random seeds. # We loop over the entire file size and touch # each byte at least once. for {set tn 0} {$tn<$fsize} {incr tn 1} { # setup for test db close forcecopy test.bu test.db sqlite3 db test.db # Seek to a random location in the file, and write a random single byte # value. Then do various operations on the file to make sure that # the database engine can handle the corruption gracefully. # set last 0 |
︙ | ︙ |
Changes to test/corruptD.test.
︙ | ︙ | |||
81 82 83 84 85 86 87 | } execsql { DELETE FROM t1 WHERE a = 10; DELETE FROM t1 WHERE a = 20; DELETE FROM t1 WHERE a = 30; DELETE FROM t1 WHERE a = 40; } | | | | 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 | } execsql { DELETE FROM t1 WHERE a = 10; DELETE FROM t1 WHERE a = 20; DELETE FROM t1 WHERE a = 30; DELETE FROM t1 WHERE a = 40; } forcecopy test.db test.bu } {} proc incr_change_counter {} { hexio_write test.db 24 [ hexio_render_int32 [expr [hexio_get_int [hexio_read test.db 24 4]] + 1] ] } proc restore_file {} { db close forcecopy test.bu test.db sqlite3 db test.db } #------------------------------------------------------------------------- # The following tests, corruptD-1.1.*, focus on the page header field # containing the offset of the first free block in a page. # |
︙ | ︙ |
Changes to test/corruptE.test.
︙ | ︙ | |||
12 13 14 15 16 17 18 | # # This file implements tests to make sure SQLite does not crash or # segfault if it sees a corrupt database file. It specifcally # focuses on rowid order corruption. # # $Id: corruptE.test,v 1.14 2009/07/11 06:55:34 danielk1977 Exp $ | | | 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 | # # This file implements tests to make sure SQLite does not crash or # segfault if it sees a corrupt database file. It specifcally # focuses on rowid order corruption. # # $Id: corruptE.test,v 1.14 2009/07/11 06:55:34 danielk1977 Exp $ catch {forcedelete test.db test.db-journal test.bu} set testdir [file dirname $argv0] source $testdir/tester.tcl # Do not use a codec for tests in this file, as the database file is # manipulated directly using tcl scripts (using the [hexio_write] command). # |
︙ | ︙ | |||
56 57 58 59 60 61 62 | } } {} ifcapable {integrityck} { integrity_check corruptE-1.2 } | < < < < < < | | | | | | 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 | } } {} ifcapable {integrityck} { integrity_check corruptE-1.2 } # Setup for the tests. Make a backup copy of the good database in test.bu. # db close forcecopy test.db test.bu sqlite3 db test.db set fsize [file size test.db] do_test corruptE-2.1 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 2041 [format %02x 0x2e] sqlite3 db test.db set res [ catchsql {PRAGMA integrity_check} ] set ans [lindex $res 1] list [regexp {out of order.*previous was} $ans] \ [regexp {out of order.*max larger than parent max} $ans] } {1 1} do_test corruptE-2.2 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 2047 [format %02x 0x84] sqlite3 db test.db set res [ catchsql {PRAGMA integrity_check} ] set ans [lindex $res 1] list [regexp {out of order.*previous was} $ans] \ [regexp {out of order.*min less than parent min} $ans] } {1 1} do_test corruptE-2.3 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 7420 [format %02x 0xa8] hexio_write test.db 10459 [format %02x 0x8d] sqlite3 db test.db set res [ catchsql {PRAGMA integrity_check} ] set ans [lindex $res 1] list [regexp {out of order.*max larger than parent min} $ans] } {1} do_test corruptE-2.4 { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db 10233 [format %02x 0xd0] sqlite3 db test.db set res [ catchsql {PRAGMA integrity_check} ] |
︙ | ︙ | |||
161 162 163 164 165 166 167 | {12297 0xd7} \ {13303 0x53} ] set tc 1 foreach test $tests { do_test corruptE-3.$tc { db close | | | 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 | {12297 0xd7} \ {13303 0x53} ] set tc 1 foreach test $tests { do_test corruptE-3.$tc { db close forcecopy test.bu test.db # insert corrupt byte(s) hexio_write test.db [lindex $test 0] [format %02x [lindex $test 1]] sqlite3 db test.db set res [ catchsql {PRAGMA integrity_check} ] |
︙ | ︙ |
Changes to test/crash.test.
︙ | ︙ | |||
211 212 213 214 215 216 217 | # crash-4.2.*: Test recovery when crash occurs during sync() of an # attached database journal file. # crash-4.3.*: Test recovery when crash occurs during sync() of the master # journal file. # ifcapable attach { do_test crash-4.0 { | | | | 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 | # crash-4.2.*: Test recovery when crash occurs during sync() of an # attached database journal file. # crash-4.3.*: Test recovery when crash occurs during sync() of the master # journal file. # ifcapable attach { do_test crash-4.0 { forcedelete test2.db forcedelete test2.db-journal execsql { ATTACH 'test2.db' AS aux; PRAGMA aux.default_cache_size = 10; CREATE TABLE aux.abc2 AS SELECT 2*a as a, 2*b as b, 2*c as c FROM abc; } expr ([file size test2.db] / 1024) > 450 } {1} |
︙ | ︙ | |||
314 315 316 317 318 319 320 | #-------------------------------------------------------------------------- # The following test cases - crash-5.* - exposes a bug that existed in the # sqlite3pager_movepage() API used by auto-vacuum databases. # database when a crash occurs during a multi-file transaction. See comments # in test crash-5.3 for details. # db close | | | 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 | #-------------------------------------------------------------------------- # The following test cases - crash-5.* - exposes a bug that existed in the # sqlite3pager_movepage() API used by auto-vacuum databases. # database when a crash occurs during a multi-file transaction. See comments # in test crash-5.3 for details. # db close forcedelete test.db sqlite3 db test.db do_test crash-5.1 { execsql { CREATE TABLE abc(a, b, c); -- Root page 3 INSERT INTO abc VALUES(randstr(1500,1500), 0, 0); -- Overflow page 4 INSERT INTO abc SELECT * FROM abc; INSERT INTO abc SELECT * FROM abc; |
︙ | ︙ |
Changes to test/crash3.test.
︙ | ︙ | |||
69 70 71 72 73 74 75 | {UPDATE abc SET a = 2} {2 2 3} \ {INSERT INTO abc VALUES(4, 5, randstr(1000,1000))} {n/a} \ {CREATE TABLE def(d, e, f)} {n/a} \ ] { for {set ii 0} {$ii < 10} {incr ii} { db close | | | 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 | {UPDATE abc SET a = 2} {2 2 3} \ {INSERT INTO abc VALUES(4, 5, randstr(1000,1000))} {n/a} \ {CREATE TABLE def(d, e, f)} {n/a} \ ] { for {set ii 0} {$ii < 10} {incr ii} { db close forcedelete test.db test.db-journal sqlite3 db test.db do_test crash3-1.$tn.1 { execsql { PRAGMA page_size = 1024; BEGIN; CREATE TABLE abc(a, b, c); INSERT INTO abc VALUES(1, 2, 3); |
︙ | ︙ | |||
102 103 104 105 106 107 108 | incr tn } } # This block tests both the IOCAP_SEQUENTIAL and IOCAP_SAFE_APPEND flags. # db close | | | 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 | incr tn } } # This block tests both the IOCAP_SEQUENTIAL and IOCAP_SAFE_APPEND flags. # db close forcedelete test.db test.db-journal sqlite3 db test.db do_test crash3-2.0 { execsql { BEGIN; CREATE TABLE abc(a PRIMARY KEY, b, c); CREATE TABLE def(d PRIMARY KEY, e, f); PRAGMA default_cache_size = 10; |
︙ | ︙ | |||
173 174 175 176 177 178 179 | # IOCAP_SEQUENTIAL. At one point, if both flags were set, small # journal files that contained only a single page, but were required # for some other reason (i.e. nTrunk) were not being written to # disk. # for {set ii 0} {$ii < 10} {incr ii} { db close | | | 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 | # IOCAP_SEQUENTIAL. At one point, if both flags were set, small # journal files that contained only a single page, but were required # for some other reason (i.e. nTrunk) were not being written to # disk. # for {set ii 0} {$ii < 10} {incr ii} { db close forcedelete test.db test.db-journal crashsql -file test.db -char {sequential atomic} { CREATE TABLE abc(a, b, c); } sqlite3 db test.db do_test crash3-3.$ii { execsql {PRAGMA integrity_check} } {ok} } finish_test |
Changes to test/crash4.test.
︙ | ︙ | |||
59 60 61 62 63 64 65 | # # Slowly increase the delay before the crash, repeating the test # over and over. Stop testing when the entire sequence of SQL # statements runs to completing without hitting the crash. # for {set cnt 1; set fin 0} {!$fin} {incr cnt} { db close | | | 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 | # # Slowly increase the delay before the crash, repeating the test # over and over. Stop testing when the entire sequence of SQL # statements runs to completing without hitting the crash. # for {set cnt 1; set fin 0} {!$fin} {incr cnt} { db close forcedelete test.db test.db-journal do_test crash4-1.$cnt.1 { set seed [expr {int(abs(rand()*10000))}] set delay [expr {int($cnt/50)+1}] set file [expr {($cnt&1)?"test.db":"test.db-journal"}] set c [crashsql -delay $delay -file $file -seed $seed -tclbody { db eval {CREATE TABLE a(id INTEGER, name CHAR(50))} db eval {INSERT INTO a(id,name) VALUES(1,'one')} |
︙ | ︙ |
Changes to test/crash5.test.
︙ | ︙ | |||
30 31 32 33 34 35 36 | for {set ii 0} {$ii < 10} {incr ii} { for {set jj 50} {$jj < 100} {incr jj} { # Set up the database so that it is an auto-vacuum database # containing a single table (root page 3) with a single row. # The row has an overflow page (page 4). | | | 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 | for {set ii 0} {$ii < 10} {incr ii} { for {set jj 50} {$jj < 100} {incr jj} { # Set up the database so that it is an auto-vacuum database # containing a single table (root page 3) with a single row. # The row has an overflow page (page 4). forcedelete test.db test.db-journal sqlite3 db test.db set c [string repeat 3 1500] db eval { pragma auto_vacuum = 1; CREATE TABLE t1(a, b, c); INSERT INTO t1 VALUES('1111111111', '2222222222', $c); } |
︙ | ︙ |
Changes to test/crash6.test.
︙ | ︙ | |||
19 20 21 22 23 24 25 | ifcapable !crashtest { finish_test return } for {set ii 0} {$ii < 10} {incr ii} { catch {db close} | | | | 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | ifcapable !crashtest { finish_test return } for {set ii 0} {$ii < 10} {incr ii} { catch {db close} forcedelete test.db test.db-journal crashsql -delay 2 -file test.db { PRAGMA auto_vacuum=OFF; PRAGMA page_size=4096; BEGIN; CREATE TABLE abc AS SELECT 1 AS a, 2 AS b, 3 AS c; COMMIT; BEGIN; CREATE TABLE def AS SELECT 1 AS d, 2 AS e, 3 AS f; COMMIT; } sqlite3 db test.db integrity_check crash6-1.$ii } for {set ii 0} {$ii < 10} {incr ii} { catch {db close} forcedelete test.db test.db-journal sqlite3 db test.db execsql { PRAGMA auto_vacuum=OFF; PRAGMA page_size=2048; BEGIN; CREATE TABLE abc AS SELECT 1 AS a, 2 AS b, 3 AS c; COMMIT; |
︙ | ︙ | |||
62 63 64 65 66 67 68 | } # Test case for crashing during database sync with page-size values # from 1024 to 8192. # for {set ii 0} {$ii < 30} {incr ii} { db close | | | 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 | } # Test case for crashing during database sync with page-size values # from 1024 to 8192. # for {set ii 0} {$ii < 30} {incr ii} { db close forcedelete test.db sqlite3 db test.db set pagesize [expr 1024 << ($ii % 4)] if {$pagesize>$::SQLITE_MAX_PAGE_SIZE} { set pagesize $::SQLITE_MAX_PAGE_SIZE } do_test crash6-3.$ii.0 { |
︙ | ︙ |
Changes to test/crash7.test.
︙ | ︙ | |||
22 23 24 25 26 27 28 | proc signature {} { return [db eval {SELECT count(*), md5sum(a), md5sum(b), md5sum(c) FROM abc}] } foreach f [list test.db test.db-journal] { for {set ii 1} {$ii < 64} {incr ii} { db close | | | 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 | proc signature {} { return [db eval {SELECT count(*), md5sum(a), md5sum(b), md5sum(c) FROM abc}] } foreach f [list test.db test.db-journal] { for {set ii 1} {$ii < 64} {incr ii} { db close delete_file test.db sqlite3 db test.db set from_size [expr 1024 << ($ii&3)] set to_size [expr 1024 << (($ii>>2)&3)] execsql " PRAGMA page_size = $from_size; |
︙ | ︙ |
Changes to test/crash8.test.
︙ | ︙ | |||
239 240 241 242 243 244 245 | # # This block of tests test that SQLite correctly truncates such # journal files, and that the results behave correctly if a hot-journal # rollback occurs. # ifcapable pragma { reset_db | | | 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 | # # This block of tests test that SQLite correctly truncates such # journal files, and that the results behave correctly if a hot-journal # rollback occurs. # ifcapable pragma { reset_db forcedelete test2.db do_test crash8-4.1 { execsql { PRAGMA journal_mode = persist; CREATE TABLE ab(a, b); INSERT INTO ab VALUES(0, 'abc'); INSERT INTO ab VALUES(1, NULL); |
︙ | ︙ | |||
332 333 334 335 336 337 338 | } {1} do_test crash8-4.9 { execsql { SELECT b FROM aux.ab WHERE a = 0 } } {def} do_test crash8-4.10 { | | | | 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 | } {1} do_test crash8-4.9 { execsql { SELECT b FROM aux.ab WHERE a = 0 } } {def} do_test crash8-4.10 { delete_file $zMasterJournal execsql { SELECT b FROM main.ab WHERE a = 0 } } {jkl} } for {set i 1} {$i < 10} {incr i} { catch { db close } forcedelete test.db test.db-journal sqlite3 db test.db do_test crash8-5.$i.1 { execsql { CREATE TABLE t1(x PRIMARY KEY); INSERT INTO t1 VALUES(randomblob(900)); INSERT INTO t1 SELECT randomblob(900) FROM t1; INSERT INTO t1 SELECT randomblob(900) FROM t1; |
︙ | ︙ | |||
363 364 365 366 367 368 369 | ROLLBACK; INSERT INTO t1 VALUES(randomblob(900)); } execsql { PRAGMA integrity_check } } {ok} catch { db close } | | | | | | 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 | ROLLBACK; INSERT INTO t1 VALUES(randomblob(900)); } execsql { PRAGMA integrity_check } } {ok} catch { db close } forcedelete test.db test.db-journal sqlite3 db test.db do_test crash8-5.$i.2 { execsql { PRAGMA cache_size = 10; CREATE TABLE t1(x PRIMARY KEY); INSERT INTO t1 VALUES(randomblob(900)); INSERT INTO t1 SELECT randomblob(900) FROM t1; INSERT INTO t1 SELECT randomblob(900) FROM t1; INSERT INTO t1 SELECT randomblob(900) FROM t1; INSERT INTO t1 SELECT randomblob(900) FROM t1; INSERT INTO t1 SELECT randomblob(900) FROM t1; INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 64 rows */ BEGIN; UPDATE t1 SET x = randomblob(900); } forcedelete testX.db testX.db-journal testX.db-wal forcecopy test.db testX.db forcecopy test.db-journal testX.db-journal db close crashsql -file test.db -delay [expr ($::i%2) + 1] { SELECT * FROM sqlite_master; INSERT INTO t1 VALUES(randomblob(900)); } |
︙ | ︙ |
Changes to test/createtab.test.
︙ | ︙ | |||
23 24 25 26 27 28 29 | set upperBound 0 } # Run these tests for all possible values of autovacuum. # for {set av 0} {$av<=$upperBound} {incr av} { db close | | | 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 | set upperBound 0 } # Run these tests for all possible values of autovacuum. # for {set av 0} {$av<=$upperBound} {incr av} { db close forcedelete test.db test.db-journal sqlite3 db test.db # Create a table that spans multiple pages. It is important # that part of the database be in pages beyond the root page. # do_test createtab-$av.1 { execsql "PRAGMA auto_vacuum=$av" |
︙ | ︙ |
Changes to test/ctime.test.
︙ | ︙ | |||
218 219 220 221 222 223 224 225 226 227 | # test 1 before array bounds (N=-1) do_test ctime-2.5.$tc { set N -1 set ans [ catchsql { SELECT sqlite_compileoption_get($N); } ] } {0 {{}}} finish_test | > > > > > > > > > > > | 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 | # test 1 before array bounds (N=-1) do_test ctime-2.5.$tc { set N -1 set ans [ catchsql { SELECT sqlite_compileoption_get($N); } ] } {0 {{}}} ifcapable blockalloc { do_test ctime-3.1a { db eval {SELECT sqlite_compileoption_used('PAGECACHE_BLOCKALLOC')} } {1} } else { do_test ctime-3.1b { db eval {SELECT sqlite_compileoption_used('PAGECACHE_BLOCKALLOC')} } {0} } finish_test |
Changes to test/dbstatus.test.
︙ | ︙ | |||
151 152 153 154 155 156 157 | CREATE VIRTUAL TABLE t2 USING echo(t1); } } { set tn "$::lookaside_buffer_size-$tn" # Step 1. db close | | | 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 | CREATE VIRTUAL TABLE t2 USING echo(t1); } } { set tn "$::lookaside_buffer_size-$tn" # Step 1. db close forcedelete test.db sqlite3 db test.db sqlite3_db_config_lookaside db 0 $::lookaside_buffer_size 500 db cache size 0 catch { register_echo_module db } ifcapable !vtab { if {[string match *x $tn]} continue } |
︙ | ︙ | |||
283 284 285 286 287 288 289 | SELECT * FROM t2 WHERE b='abcdefg'; } } { set tn "$::lookaside_buffer_size-$tn" # Step 1. db close | | | 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 | SELECT * FROM t2 WHERE b='abcdefg'; } } { set tn "$::lookaside_buffer_size-$tn" # Step 1. db close forcedelete test.db sqlite3 db test.db sqlite3_db_config_lookaside db 0 $::lookaside_buffer_size 500 db cache size 1000 catch { register_echo_module db } ifcapable !vtab { if {[string match *x $tn]} continue } |
︙ | ︙ |
Changes to test/delete.test.
︙ | ︙ | |||
271 272 273 274 275 276 277 | execsql { PRAGMA count_changes=OFF; INSERT INTO t3 VALUES(123); SELECT * FROM t3; } } {123} db close | | | 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 | execsql { PRAGMA count_changes=OFF; INSERT INTO t3 VALUES(123); SELECT * FROM t3; } } {123} db close catch {forcedelete test.db-journal} catch {file attributes test.db -permissions 0444} catch {file attributes test.db -readonly 1} sqlite3 db test.db set ::DB [sqlite3_connection_pointer db] do_test delete-8.1 { catchsql { DELETE FROM t3; |
︙ | ︙ | |||
308 309 310 311 312 313 314 | } {1 {attempt to write a readonly database}} do_test delete-8.6 { execsql {SELECT * FROM t3} } {123} integrity_check delete-8.7 # Need to do the following for tcl 8.5 on mac. On that configuration, the | | | | 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 | } {1 {attempt to write a readonly database}} do_test delete-8.6 { execsql {SELECT * FROM t3} } {123} integrity_check delete-8.7 # Need to do the following for tcl 8.5 on mac. On that configuration, the # -readonly flag is taken so seriously that a subsequent [forcedelete] # (required before the next test file can be executed) will fail. # catch {file attributes test.db -readonly 0} db close forcedelete test.db test.db-journal # The following tests verify that SQLite correctly handles the case # where an index B-Tree is being scanned, the rowid column being read # from each index entry and another statement deletes some rows from # the index B-Tree. At one point this (obscure) scenario was causing # SQLite to return spurious SQLITE_CORRUPT errors and arguably incorrect # query results. |
︙ | ︙ |
Changes to test/descidx1.test.
︙ | ︙ | |||
293 294 295 296 297 298 299 | # Test the legacy_file_format pragma here because we have access to # the get_file_format command. # ifcapable legacyformat { do_test descidx1-6.1 { db close | | | | 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 | # Test the legacy_file_format pragma here because we have access to # the get_file_format command. # ifcapable legacyformat { do_test descidx1-6.1 { db close forcedelete test.db test.db-journal sqlite3 db test.db execsql {PRAGMA legacy_file_format} } {1} } else { do_test descidx1-6.1 { db close forcedelete test.db test.db-journal sqlite3 db test.db execsql {PRAGMA legacy_file_format} } {0} } do_test descidx1-6.2 { execsql {PRAGMA legacy_file_format=YES} execsql {PRAGMA legacy_file_format} |
︙ | ︙ | |||
324 325 326 327 328 329 330 | do_test descidx1-6.3.1 { execsql {VACUUM} get_file_format } {1} } do_test descidx1-6.4 { db close | | | 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 | do_test descidx1-6.3.1 { execsql {VACUUM} get_file_format } {1} } do_test descidx1-6.4 { db close forcedelete test.db test.db-journal sqlite3 db test.db execsql {PRAGMA legacy_file_format=NO} execsql {PRAGMA legacy_file_format} } {0} do_test descidx1-6.5 { execsql { CREATE TABLE t1(a,b,c); |
︙ | ︙ |
Changes to test/diskfull.test.
︙ | ︙ | |||
78 79 80 81 82 83 84 | integrity_check ${prefix}.$::i.2 } } do_diskfull_test diskfull-2 VACUUM # db close | | | | 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 | integrity_check ${prefix}.$::i.2 } } do_diskfull_test diskfull-2 VACUUM # db close # forcedelete test.db # forcedelete test.db-journal # sqlite3 db test.db # # do_test diskfull-3.1 { # execsql { # PRAGMA default_cache_size = 10; # CREATE TABLE t3(a, b, UNIQUE(a, b)); # INSERT INTO t3 VALUES( randstr(100, 100), randstr(100, 100) ); |
︙ | ︙ |
Changes to test/e_expr.test.
︙ | ︙ | |||
653 654 655 656 657 658 659 | do_execsql_test e_expr-12.2.6 {SELECT CURRENT_TIME} {00:00:01} do_execsql_test e_expr-12.2.7 {SELECT CURRENT_DATE} {1970-01-01} do_execsql_test e_expr-12.2.8 {SELECT CURRENT_TIMESTAMP} {{1970-01-01 00:00:01}} set sqlite_current_time 0 # EVIDENCE-OF: R-57598-59332 -- syntax diagram expr # | | | 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 | do_execsql_test e_expr-12.2.6 {SELECT CURRENT_TIME} {00:00:01} do_execsql_test e_expr-12.2.7 {SELECT CURRENT_DATE} {1970-01-01} do_execsql_test e_expr-12.2.8 {SELECT CURRENT_TIMESTAMP} {{1970-01-01 00:00:01}} set sqlite_current_time 0 # EVIDENCE-OF: R-57598-59332 -- syntax diagram expr # forcedelete test.db2 execsql { ATTACH 'test.db2' AS dbname; CREATE TABLE dbname.tblname(cname); } proc glob {args} {return 1} db function glob glob |
︙ | ︙ | |||
1671 1672 1673 1674 1675 1676 1677 | db3 close } #------------------------------------------------------------------------- # Test statements related to the EXISTS and NOT EXISTS operators. # catch { db close } | | | 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 | db3 close } #------------------------------------------------------------------------- # Test statements related to the EXISTS and NOT EXISTS operators. # catch { db close } forcedelete test.db sqlite3 db test.db do_execsql_test e_expr-34.1 { CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 2); INSERT INTO t1 VALUES(NULL, 2); INSERT INTO t1 VALUES(1, NULL); |
︙ | ︙ | |||
1752 1753 1754 1755 1756 1757 1758 | } #------------------------------------------------------------------------- # Test statements related to scalar sub-queries. # catch { db close } | | | 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 | } #------------------------------------------------------------------------- # Test statements related to scalar sub-queries. # catch { db close } forcedelete test.db sqlite3 db test.db do_test e_expr-35.0 { execsql { CREATE TABLE t2(a, b); INSERT INTO t2 VALUES('one', 'two'); INSERT INTO t2 VALUES('three', NULL); INSERT INTO t2 VALUES(4, 5.0); |
︙ | ︙ |
Changes to test/e_fts3.test.
︙ | ︙ | |||
56 57 58 59 60 61 62 | #if {$DO_MALLOC_TEST} break # Reset the database and database connection. If this iteration of the # [foreach] loop is testing with OOM errors, disable the lookaside buffer. # db close | | | 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 | #if {$DO_MALLOC_TEST} break # Reset the database and database connection. If this iteration of the # [foreach] loop is testing with OOM errors, disable the lookaside buffer. # db close forcedelete test.db test.db-journal sqlite3 db test.db if {$DO_MALLOC_TEST} { sqlite3_db_config_lookaside db 0 0 0 } db eval "PRAGMA encoding = '$enc'" proc mit {blob} { set scan(littleEndian) i* set scan(bigEndian) I* |
︙ | ︙ | |||
633 634 635 636 637 638 639 | #------------------------------------------------------------------------- # Test that FTS3 tables can be renamed using the ALTER RENAME command. # OOM errors are tested during ALTER RENAME commands also. # foreach DO_MALLOC_TEST {0 1 2} { db close | | | 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 | #------------------------------------------------------------------------- # Test that FTS3 tables can be renamed using the ALTER RENAME command. # OOM errors are tested during ALTER RENAME commands also. # foreach DO_MALLOC_TEST {0 1 2} { db close forcedelete test.db test.db-journal sqlite3 db test.db if {$DO_MALLOC_TEST} { sqlite3_db_config_lookaside db 0 0 0 } ddl_test 9.1.1 { CREATE VIRTUAL TABLE t10 USING fts3(x) } write_test 9.1.2 t10_content { INSERT INTO t10 VALUES('fts3 tables') } write_test 9.1.3 t10_content { INSERT INTO t10 VALUES('are renameable') } |
︙ | ︙ |
Changes to test/enc2.test.
︙ | ︙ | |||
138 139 140 141 142 143 144 | # The three unicode encodings understood by SQLite. set encodings [list UTF-8 UTF-16le UTF-16be] set sqlite_os_trace 0 set i 1 foreach enc $encodings { | | | 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 | # The three unicode encodings understood by SQLite. set encodings [list UTF-8 UTF-16le UTF-16be] set sqlite_os_trace 0 set i 1 foreach enc $encodings { forcedelete test.db sqlite3 db test.db db eval "PRAGMA encoding = \"$enc\"" execsql $dbcontents do_test enc2-$i.0.1 { db eval {PRAGMA encoding} } $enc do_test enc2-$i.0.2 { |
︙ | ︙ | |||
168 169 170 171 172 173 174 | incr i } # Test that it is an error to try to attach a database with a different # encoding to the main database. ifcapable attach { do_test enc2-4.1 { | | | | 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 | incr i } # Test that it is an error to try to attach a database with a different # encoding to the main database. ifcapable attach { do_test enc2-4.1 { forcedelete test.db sqlite3 db test.db db eval "PRAGMA encoding = 'UTF-8'" db eval "CREATE TABLE abc(a, b, c);" } {} do_test enc2-4.2 { forcedelete test2.db sqlite3 db2 test2.db db2 eval "PRAGMA encoding = 'UTF-16'" db2 eval "CREATE TABLE abc(a, b, c);" } {} do_test enc2-4.3 { catchsql { ATTACH 'test2.db' as aux; |
︙ | ︙ | |||
202 203 204 205 206 207 208 | set l [lsearch -exact $::values $lhs] set r [lsearch -exact $::values $rhs] set res [expr $l - $r] # puts "enc=$enc lhs=$lhs/$l rhs=$rhs/$r res=$res" return $res } | | | 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 | set l [lsearch -exact $::values $lhs] set r [lsearch -exact $::values $rhs] set res [expr $l - $r] # puts "enc=$enc lhs=$lhs/$l rhs=$rhs/$r res=$res" return $res } forcedelete test.db sqlite3 db test.db; set DB [sqlite3_connection_pointer db] do_test enc2-5.0 { execsql { CREATE TABLE t5(a); INSERT INTO t5 VALUES('one'); INSERT INTO t5 VALUES('two'); INSERT INTO t5 VALUES('five'); |
︙ | ︙ | |||
231 232 233 234 235 236 237 | do_test enc2-5.3 { add_test_collate $DB 0 0 1 set res [execsql {SELECT * FROM t5 ORDER BY 1 COLLATE test_collate}] lappend res $::test_collate_enc } {one two three four five UTF-16BE} db close | | | 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 | do_test enc2-5.3 { add_test_collate $DB 0 0 1 set res [execsql {SELECT * FROM t5 ORDER BY 1 COLLATE test_collate}] lappend res $::test_collate_enc } {one two three four five UTF-16BE} db close forcedelete test.db sqlite3 db test.db; set DB [sqlite3_connection_pointer db] execsql {pragma encoding = 'UTF-16LE'} do_test enc2-5.4 { execsql { CREATE TABLE t5(a); INSERT INTO t5 VALUES('one'); INSERT INTO t5 VALUES('two'); |
︙ | ︙ | |||
261 262 263 264 265 266 267 | do_test enc2-5.7 { add_test_collate $DB 1 0 0 set res [execsql {SELECT * FROM t5 ORDER BY 1 COLLATE test_collate}] lappend res $::test_collate_enc } {one two three four five UTF-8} db close | | | 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 | do_test enc2-5.7 { add_test_collate $DB 1 0 0 set res [execsql {SELECT * FROM t5 ORDER BY 1 COLLATE test_collate}] lappend res $::test_collate_enc } {one two three four five UTF-8} db close forcedelete test.db sqlite3 db test.db; set DB [sqlite3_connection_pointer db] execsql {pragma encoding = 'UTF-16BE'} do_test enc2-5.8 { execsql { CREATE TABLE t5(a); INSERT INTO t5 VALUES('one'); INSERT INTO t5 VALUES('two'); |
︙ | ︙ | |||
307 308 309 310 311 312 313 | lappend res $::test_collate_enc } {one two three four five UTF-16BE} do_test enc2-5.14 { set ::sqlite_last_needed_collation } test_collate db close | | | 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 | lappend res $::test_collate_enc } {one two three four five UTF-16BE} do_test enc2-5.14 { set ::sqlite_last_needed_collation } test_collate db close forcedelete test.db do_test enc2-5.15 { sqlite3 db test.db; set ::DB [sqlite3_connection_pointer db] add_test_collate_needed $::DB set ::sqlite_last_needed_collation } {} do_test enc2-5.16 { |
︙ | ︙ | |||
329 330 331 332 333 334 335 | # user function when more than one is available. proc test_function {enc arg} { return "$enc $arg" } db close | | | 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 | # user function when more than one is available. proc test_function {enc arg} { return "$enc $arg" } db close forcedelete test.db sqlite3 db test.db; set DB [sqlite3_connection_pointer db] execsql {pragma encoding = 'UTF-8'} do_test enc2-6.0 { execsql { CREATE TABLE t5(a); INSERT INTO t5 VALUES('one'); } |
︙ | ︙ | |||
362 363 364 365 366 367 368 | add_test_function $DB 0 0 1 execsql { SELECT test_function('sqlite') } } {{UTF-16BE sqlite}} db close | | | 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 | add_test_function $DB 0 0 1 execsql { SELECT test_function('sqlite') } } {{UTF-16BE sqlite}} db close forcedelete test.db sqlite3 db test.db; set DB [sqlite3_connection_pointer db] execsql {pragma encoding = 'UTF-16LE'} do_test enc2-6.3 { execsql { CREATE TABLE t5(a); INSERT INTO t5 VALUES('sqlite'); } |
︙ | ︙ | |||
395 396 397 398 399 400 401 | add_test_function $DB 0 0 1 execsql { SELECT test_function('sqlite') } } {{UTF-16BE sqlite}} db close | | | 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 | add_test_function $DB 0 0 1 execsql { SELECT test_function('sqlite') } } {{UTF-16BE sqlite}} db close forcedelete test.db sqlite3 db test.db; set DB [sqlite3_connection_pointer db] execsql {pragma encoding = 'UTF-16BE'} do_test enc2-6.7 { execsql { CREATE TABLE t5(a); INSERT INTO t5 VALUES('sqlite'); } |
︙ | ︙ | |||
429 430 431 432 433 434 435 | execsql { SELECT test_function('sqlite') } } {{UTF-16BE sqlite}} db close | | | 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 | execsql { SELECT test_function('sqlite') } } {{UTF-16BE sqlite}} db close forcedelete test.db # The following tests - enc2-7.* - function as follows: # # 1: Open an empty database file assuming UTF-16 encoding. # 2: Open the same database with a different handle assuming UTF-8. Create # a table using this handle. # 3: Read the sqlite_master table from the first handle. |
︙ | ︙ | |||
489 490 491 492 493 494 495 | do_test enc2-8.2 { sqlite3_complete16 [utf16 "SELECT * FROM"] } {0} } # Test that the encoding of an empty database may still be set after the # (empty) schema has been initialized. | | | 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 | do_test enc2-8.2 { sqlite3_complete16 [utf16 "SELECT * FROM"] } {0} } # Test that the encoding of an empty database may still be set after the # (empty) schema has been initialized. forcedelete test.db do_test enc2-9.1 { sqlite3 db test.db execsql { PRAGMA encoding = 'UTF-8'; PRAGMA encoding; } } {UTF-8} |
︙ | ︙ | |||
533 534 535 536 537 538 539 | } {UTF-16le} # Ticket #1987. # Disallow encoding changes once the encoding has been set. # do_test enc2-10.1 { db close | | | 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 | } {UTF-16le} # Ticket #1987. # Disallow encoding changes once the encoding has been set. # do_test enc2-10.1 { db close forcedelete test.db test.db-journal sqlite3 db test.db db eval { PRAGMA encoding=UTF16; CREATE TABLE t1(a); PRAGMA encoding=UTF8; CREATE TABLE t2(b); } |
︙ | ︙ |
Changes to test/enc3.test.
︙ | ︙ | |||
77 78 79 80 81 82 83 | } {1} } # Try to attach a database with a different encoding. # ifcapable {utf16 && shared_cache} { db close | | | | 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 | } {1} } # Try to attach a database with a different encoding. # ifcapable {utf16 && shared_cache} { db close forcedelete test8.db test8.db-journal set ::enable_shared_cache [sqlite3_enable_shared_cache 1] sqlite3 dbaux test8.db sqlite3 db test.db db eval {SELECT 1 FROM sqlite_master LIMIT 1} do_test enc3-3.1 { dbaux eval { PRAGMA encoding='utf8'; CREATE TABLE t1(x); PRAGMA encoding } } {UTF-8} do_test enc3-3.2 { catchsql { ATTACH 'test.db' AS utf16; SELECT 1 FROM utf16.sqlite_master LIMIT 1; } dbaux } {1 {attached databases must use the same text encoding as main database}} dbaux close forcedelete test8.db test8.db-journal sqlite3_enable_shared_cache $::enable_shared_cache } finish_test |
Changes to test/enc4.test.
︙ | ︙ | |||
39 40 41 42 43 44 45 | "100000000000000000000000000000000000000000000000000000000"\ "1.0000000000000000000000000000000000000000000000000000000"\ ] set i 1 foreach enc $encodings { | | | 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 | "100000000000000000000000000000000000000000000000000000000"\ "1.0000000000000000000000000000000000000000000000000000000"\ ] set i 1 foreach enc $encodings { forcedelete test.db sqlite3 db test.db db eval "PRAGMA encoding = \"$enc\"" do_test enc4-$i.1 { db eval {PRAGMA encoding} } $enc |
︙ | ︙ | |||
89 90 91 92 93 94 95 | incr j } db close incr i } | | | 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 | incr j } db close incr i } forcedelete test.db sqlite3 db test.db do_test enc4-4.1 { db eval "select 1+1." } {2.0} do_test enc4-4.2.1 { |
︙ | ︙ |
Changes to test/exclusive.test.
︙ | ︙ | |||
18 19 20 21 22 23 24 | source $testdir/tester.tcl ifcapable {!pager_pragmas} { finish_test return } | | | | | | | | 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 | source $testdir/tester.tcl ifcapable {!pager_pragmas} { finish_test return } forcedelete test2.db-journal forcedelete test2.db forcedelete test3.db-journal forcedelete test3.db forcedelete test4.db-journal forcedelete test4.db #---------------------------------------------------------------------- # Test cases exclusive-1.X test the PRAGMA logic. # do_test exclusive-1.0 { execsql { pragma locking_mode; |
︙ | ︙ | |||
473 474 475 476 477 478 479 | BEGIN; INSERT INTO t4 VALUES('Macmillan', 1957); INSERT INTO t4 VALUES('Douglas-Home', 1963); INSERT INTO t4 VALUES('Wilson', 1964); } do_test exclusive-6.2 { forcedelete test2.db test2.db-journal | | | | 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 | BEGIN; INSERT INTO t4 VALUES('Macmillan', 1957); INSERT INTO t4 VALUES('Douglas-Home', 1963); INSERT INTO t4 VALUES('Wilson', 1964); } do_test exclusive-6.2 { forcedelete test2.db test2.db-journal copy_file test.db test2.db copy_file test.db-journal test2.db-journal sqlite3 db test2.db } {} do_execsql_test exclusive-6.3 { PRAGMA locking_mode = EXCLUSIVE; SELECT * FROM t4; } {exclusive Eden 1955} |
︙ | ︙ |
Changes to test/exclusive2.test.
︙ | ︙ | |||
255 256 257 258 259 260 261 | # is only incremented by the first change when in exclusive access # mode. In normal mode, the change-counter is incremented once # per write-transaction. # db close catch {close $::fd} | | | | 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 | # is only incremented by the first change when in exclusive access # mode. In normal mode, the change-counter is incremented once # per write-transaction. # db close catch {close $::fd} forcedelete test.db forcedelete test.db-journal do_test exclusive2-3.0 { sqlite3 db test.db execsql { BEGIN; CREATE TABLE t1(a UNIQUE); INSERT INTO t1 VALUES(randstr(200, 200)); |
︙ | ︙ |
Changes to test/fallocate.test.
︙ | ︙ | |||
76 77 78 79 80 81 82 | [permutation]=="journaltest" || [permutation]=="inmemory_journal" }] ifcapable !wal { set skipwaltests 1 } if {![wal_is_ok]} { set skipwaltests 1 } if {!$skipwaltests} { db close | | | 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 | [permutation]=="journaltest" || [permutation]=="inmemory_journal" }] ifcapable !wal { set skipwaltests 1 } if {![wal_is_ok]} { set skipwaltests 1 } if {!$skipwaltests} { db close forcedelete test.db if {[forced_proxy_locking]} { file delete -force .test.db-conch } sqlite3 db test.db file_control_chunksize_test db main [expr 32*1024] do_test fallocate-2.1 { execsql { PRAGMA page_size = 1024; |
︙ | ︙ |
Changes to test/filectrl.test.
︙ | ︙ | |||
33 34 35 36 37 38 39 | } {} do_test filectrl-1.5 { db close sqlite3 db test_control_lockproxy.db file_control_lockproxy_test db [pwd] } {} db close | | | 33 34 35 36 37 38 39 40 41 | } {} do_test filectrl-1.5 { db close sqlite3 db test_control_lockproxy.db file_control_lockproxy_test db [pwd] } {} db close forcedelete .test_control_lockproxy.db-conch test.proxy finish_test |
Changes to test/filefmt.test.
︙ | ︙ | |||
19 20 21 22 23 24 25 | # Do not use a codec for tests in this file, as the database file is # manipulated directly using tcl scripts (using the [hexio_write] command). # do_not_use_codec db close | | | 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 | # Do not use a codec for tests in this file, as the database file is # manipulated directly using tcl scripts (using the [hexio_write] command). # do_not_use_codec db close forcedelete test.db test.db-journal # Database begins with valid 16-byte header string. # do_test filefmt-1.1 { sqlite3 db test.db db eval {CREATE TABLE t1(x)} db close |
︙ | ︙ | |||
59 60 61 62 63 64 65 | # ifcapable pager_pragmas { foreach pagesize {512 1024 2048 4096 8192 16384 32768} { if {[info exists SQLITE_MAX_PAGE_SIZE] && $pagesize>$SQLITE_MAX_PAGE_SIZE} continue do_test filefmt-1.5.$pagesize.1 { db close | | | 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 | # ifcapable pager_pragmas { foreach pagesize {512 1024 2048 4096 8192 16384 32768} { if {[info exists SQLITE_MAX_PAGE_SIZE] && $pagesize>$SQLITE_MAX_PAGE_SIZE} continue do_test filefmt-1.5.$pagesize.1 { db close forcedelete test.db sqlite3 db test.db db eval "PRAGMA auto_vacuum=OFF" db eval "PRAGMA page_size=$pagesize" db eval {CREATE TABLE t1(x)} file size test.db } [expr $pagesize*2] do_test filefmt-1.5.$pagesize.2 { |
︙ | ︙ | |||
101 102 103 104 105 106 107 | # Usable space per page (page-size minus unused space per page) # must be at least 480 bytes # ifcapable pager_pragmas { do_test filefmt-1.8 { db close | | | | 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 | # Usable space per page (page-size minus unused space per page) # must be at least 480 bytes # ifcapable pager_pragmas { do_test filefmt-1.8 { db close forcedelete test.db sqlite3 db test.db db eval {PRAGMA page_size=512; CREATE TABLE t1(x)} db close hexio_write test.db 20 21 sqlite3 db test.db catchsql { SELECT count(*) FROM sqlite_master } } {1 {file is encrypted or is not a database}} } #------------------------------------------------------------------------- # The following block of tests - filefmt-2.* - test that versions 3.7.0 # and later can read and write databases that have been modified or created # by 3.6.23.1 and earlier. The difference difference is that 3.7.0 stores # the size of the database in the database file header, whereas 3.6.23.1 # always derives this from the size of the file. # db close forcedelete test.db set a_string_counter 1 proc a_string {n} { incr ::a_string_counter string range [string repeat "${::a_string_counter}." $n] 1 $n } sqlite3 db test.db |
︙ | ︙ | |||
153 154 155 156 157 158 159 | } {} do_execsql_test filefmt-2.1.4 { INSERT INTO t2 VALUES(2) } {} integrity_check filefmt-2.1.5 do_test filefmt-2.1.6 { hexio_read test.db 28 4 } {00000010} db close | | | 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 | } {} do_execsql_test filefmt-2.1.4 { INSERT INTO t2 VALUES(2) } {} integrity_check filefmt-2.1.5 do_test filefmt-2.1.6 { hexio_read test.db 28 4 } {00000010} db close forcedelete test.db sqlite3 db test.db db func a_string a_string do_execsql_test filefmt-2.2.1 { PRAGMA page_size = 1024; PRAGMA auto_vacuum = 0; CREATE TABLE t1(a); |
︙ | ︙ |
Changes to test/fts1j.test.
︙ | ︙ | |||
15 16 17 18 19 20 21 | # If SQLITE_ENABLE_FTS1 is defined, omit this file. ifcapable !fts1 { finish_test return } # Clean up anything left over from a previous pass. | | | | 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 | # If SQLITE_ENABLE_FTS1 is defined, omit this file. ifcapable !fts1 { finish_test return } # Clean up anything left over from a previous pass. forcedelete test2.db forcedelete test2.db-journal sqlite3 db2 test2.db db eval { CREATE VIRTUAL TABLE t3 USING fts1(content); INSERT INTO t3 (rowid, content) VALUES(1, "hello world"); } |
︙ | ︙ | |||
80 81 82 83 84 85 86 | DETACH DATABASE two; } db2 } {2} catch {db eval {DETACH DATABASE two}} catch {db2 close} | | | 80 81 82 83 84 85 86 87 88 89 | DETACH DATABASE two; } db2 } {2} catch {db eval {DETACH DATABASE two}} catch {db2 close} forcedelete test2.db finish_test |
Changes to test/fts1o.test.
︙ | ︙ | |||
91 92 93 94 95 96 97 | execsql { SELECT a, b, c FROM fts_t1 WHERE c MATCH 'four'; } } {{one three four} {one four} {one four two}} #--------------------------------------------------------------------- # Test that it is possible to rename an fts1 table in an attached # database. # | | | 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 | execsql { SELECT a, b, c FROM fts_t1 WHERE c MATCH 'four'; } } {{one three four} {one four} {one four two}} #--------------------------------------------------------------------- # Test that it is possible to rename an fts1 table in an attached # database. # forcedelete test2.db test2.db-journal do_test fts1o-4.1 { execsql { DROP TABLE t1_term; ALTER TABLE fts_t1 RENAME to t1; SELECT a, b, c FROM t1 WHERE c MATCH 'two'; } |
︙ | ︙ |
Changes to test/fts2j.test.
︙ | ︙ | |||
15 16 17 18 19 20 21 | # If SQLITE_ENABLE_FTS2 is defined, omit this file. ifcapable !fts2 { finish_test return } # Clean up anything left over from a previous pass. | | | | 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 | # If SQLITE_ENABLE_FTS2 is defined, omit this file. ifcapable !fts2 { finish_test return } # Clean up anything left over from a previous pass. forcedelete test2.db forcedelete test2.db-journal sqlite3 db2 test2.db db eval { CREATE VIRTUAL TABLE t3 USING fts2(content); INSERT INTO t3 (rowid, content) VALUES(1, "hello world"); } |
︙ | ︙ | |||
80 81 82 83 84 85 86 | DETACH DATABASE two; } db2 } {2} catch {db eval {DETACH DATABASE two}} catch {db2 close} | | | 80 81 82 83 84 85 86 87 88 89 | DETACH DATABASE two; } db2 } {2} catch {db eval {DETACH DATABASE two}} catch {db2 close} forcedelete test2.db finish_test |
Changes to test/fts2o.test.
︙ | ︙ | |||
105 106 107 108 109 110 111 | } {{one three four} {one four} {one four two}} #------------------------------------------------------------------- # Close, delete and reopen the database. The following test should # be run on an initially empty db. # db close | | | 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 | } {{one three four} {one four} {one four two}} #------------------------------------------------------------------- # Close, delete and reopen the database. The following test should # be run on an initially empty db. # db close forcedelete test.db test.db-journal sqlite3 db test.db do_test fts2o-3.1 { execsql { CREATE VIRTUAL TABLE t1 USING fts2(a, b, c); INSERT INTO t1(a, b, c) VALUES('one three four', 'one four', 'one two'); SELECT a, b, c FROM t1 WHERE c MATCH 'two'; |
︙ | ︙ | |||
130 131 132 133 134 135 136 | } } {{one three four} {one four} {one two} {one three four} {one four} {one two}} #--------------------------------------------------------------------- # Test that it is possible to rename an fts2 table in an attached # database. # | | | 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 | } } {{one three four} {one four} {one two} {one three four} {one four} {one two}} #--------------------------------------------------------------------- # Test that it is possible to rename an fts2 table in an attached # database. # forcedelete test2.db test2.db-journal do_test fts2o-3.1 { execsql { ATTACH 'test2.db' AS aux; CREATE VIRTUAL TABLE aux.t1 USING fts2(a, b, c); INSERT INTO aux.t1(a, b, c) VALUES( 'neung song sahm', 'neung see', 'neung see song' |
︙ | ︙ |
Changes to test/fts3aj.test.
︙ | ︙ | |||
15 16 17 18 19 20 21 | # If SQLITE_ENABLE_FTS3 is defined, omit this file. ifcapable !fts3 { finish_test return } # Clean up anything left over from a previous pass. | | | | 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 | # If SQLITE_ENABLE_FTS3 is defined, omit this file. ifcapable !fts3 { finish_test return } # Clean up anything left over from a previous pass. forcedelete test2.db forcedelete test2.db-journal sqlite3 db2 test2.db db eval { CREATE VIRTUAL TABLE t3 USING fts3(content); INSERT INTO t3 (rowid, content) VALUES(1, "hello world"); } |
︙ | ︙ | |||
80 81 82 83 84 85 86 | DETACH DATABASE two; } db2 } {2} catch {db eval {DETACH DATABASE two}} catch {db2 close} | | | 80 81 82 83 84 85 86 87 88 89 | DETACH DATABASE two; } db2 } {2} catch {db eval {DETACH DATABASE two}} catch {db2 close} forcedelete test2.db finish_test |
Changes to test/fts3ao.test.
︙ | ︙ | |||
107 108 109 110 111 112 113 | } {{one three four} {one four} {one four two}} #------------------------------------------------------------------- # Close, delete and reopen the database. The following test should # be run on an initially empty db. # db close | | | 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 | } {{one three four} {one four} {one four two}} #------------------------------------------------------------------- # Close, delete and reopen the database. The following test should # be run on an initially empty db. # db close forcedelete test.db test.db-journal sqlite3 db test.db do_test fts3ao-3.1 { execsql { CREATE VIRTUAL TABLE t1 USING fts3(a, b, c); INSERT INTO t1(a, b, c) VALUES('one three four', 'one four', 'one two'); SELECT a, b, c FROM t1 WHERE c MATCH 'two'; |
︙ | ︙ | |||
132 133 134 135 136 137 138 | } } {{one three four} {one four} {one two} {one three four} {one four} {one two}} #--------------------------------------------------------------------- # Test that it is possible to rename an fts3 table in an attached # database. # | | | 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 | } } {{one three four} {one four} {one two} {one three four} {one four} {one two}} #--------------------------------------------------------------------- # Test that it is possible to rename an fts3 table in an attached # database. # forcedelete test2.db test2.db-journal do_test fts3ao-3.1 { execsql { ATTACH 'test2.db' AS aux; CREATE VIRTUAL TABLE aux.t1 USING fts3(a, b, c); INSERT INTO aux.t1(a, b, c) VALUES( 'neung song sahm', 'neung see', 'neung see song' |
︙ | ︙ |
Changes to test/fts3snippet.test.
︙ | ︙ | |||
127 128 129 130 131 132 133 | foreach {DO_MALLOC_TEST enc} { 0 utf8 1 utf8 1 utf16 } { db close | | | 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 | foreach {DO_MALLOC_TEST enc} { 0 utf8 1 utf8 1 utf16 } { db close forcedelete test.db sqlite3 db test.db sqlite3_db_config_lookaside db 0 0 0 db eval "PRAGMA encoding = \"$enc\"" # Set variable $T to the test name prefix for this iteration of the loop. # set T "fts3snippet-$enc" |
︙ | ︙ |
Changes to test/fts4aa.test.
︙ | ︙ | |||
1682 1683 1684 1685 1686 1687 1688 | } $r } # Should get the same search results when the page size is very large # do_test fts4aa-3.0 { db close | | | 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 | } $r } # Should get the same search results when the page size is very large # do_test fts4aa-3.0 { db close forcedelete test.db sqlite3 db test.db db eval { PRAGMA page_size=65536; CREATE VIRTUAL TABLE t1 USING fts4(words, tokenize porter); } fts4aa_fill_table } {} |
︙ | ︙ |
Changes to test/fuzz_malloc.test.
︙ | ︙ | |||
43 44 45 46 47 48 49 | proc do_fuzzy_malloc_test {testname args} { set ::fuzzyopts(-repeats) $::REPEATS set ::fuzzyopts(-sqlprep) {} array set ::fuzzyopts $args sqlite3_memdebug_fail -1 db close | | | 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 | proc do_fuzzy_malloc_test {testname args} { set ::fuzzyopts(-repeats) $::REPEATS set ::fuzzyopts(-sqlprep) {} array set ::fuzzyopts $args sqlite3_memdebug_fail -1 db close delete_file test.db test.db-journal sqlite3 db test.db set ::prep $::fuzzyopts(-sqlprep) execsql $::prep set jj 0 for {set ii 0} {$ii < $::fuzzyopts(-repeats)} {incr ii} { expr srand($jj) incr jj |
︙ | ︙ |
Changes to test/hook.test.
︙ | ︙ | |||
91 92 93 94 95 96 97 | } set ::commit_cnt } {} # Ticket #3564. # do_test hook-3.10 { | | | 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 | } set ::commit_cnt } {} # Ticket #3564. # do_test hook-3.10 { forcedelete test2.db test2.db-journal sqlite3 db2 test2.db proc commit_hook {} { set y [db2 one {SELECT y FROM t3 WHERE y>10}] return [expr {$y>10}] } db2 eval {CREATE TABLE t3(x,y)} db2 commit_hook commit_hook |
︙ | ︙ | |||
213 214 215 216 217 218 219 | } } # Update-hook + ATTACH set ::update_hook {} ifcapable attach { do_test hook-4.2.3 { | | | 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 | } } # Update-hook + ATTACH set ::update_hook {} ifcapable attach { do_test hook-4.2.3 { forcedelete test2.db execsql { ATTACH 'test2.db' AS aux; CREATE TABLE aux.t3(a INTEGER PRIMARY KEY, b); INSERT INTO aux.t3 SELECT * FROM t1; UPDATE t3 SET b = 'two or so' WHERE a = 2; DELETE FROM t3 WHERE 1; -- Avoid the truncate optimization (for now) } |
︙ | ︙ |
Changes to test/incrblob.test.
︙ | ︙ | |||
116 117 118 119 120 121 122 | if {$AutoVacuumMode>0} { ifcapable !autovacuum { break } } db close | | | 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 | if {$AutoVacuumMode>0} { ifcapable !autovacuum { break } } db close forcedelete test.db test.db-journal sqlite3 db test.db execsql "PRAGMA auto_vacuum = $AutoVacuumMode" do_test incrblob-2.$AutoVacuumMode.1 { set ::str [string repeat abcdefghij 2900] execsql { |
︙ | ︙ | |||
375 376 377 378 379 380 381 | #------------------------------------------------------------------------ # incrblob-5.*: # # Test that opening a blob in an attached database works. # ifcapable attach { do_test incrblob-5.1 { | | | 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 | #------------------------------------------------------------------------ # incrblob-5.*: # # Test that opening a blob in an attached database works. # ifcapable attach { do_test incrblob-5.1 { forcedelete test2.db test2.db-journal set ::size [expr [file size [info script]]] execsql { ATTACH 'test2.db' AS aux; CREATE TABLE aux.files(name, text); INSERT INTO aux.files VALUES('this one', zeroblob($::size)); } set fd [db incrblob aux files text 1] |
︙ | ︙ | |||
580 581 582 583 584 585 586 | set fd [open [info script]] fconfigure $fd -translation binary set ::data [read $fd 14000] close $fd db close | | | 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 | set fd [open [info script]] fconfigure $fd -translation binary set ::data [read $fd 14000] close $fd db close forcedelete test.db test.db-journal sqlite3 db test.db do_test incrblob-7.2.1 { execsql { PRAGMA auto_vacuum = "incremental"; CREATE TABLE t1(a INTEGER PRIMARY KEY, b); -- root@page3 INSERT INTO t1 VALUES(123, $::data); |
︙ | ︙ |
Changes to test/incrvacuum.test.
︙ | ︙ | |||
327 328 329 330 331 332 333 | }] $control } } set ::str1 [string repeat abcdefghij 130] set ::str2 [string repeat 1234567890 105] | | | 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 | }] $control } } set ::str1 [string repeat abcdefghij 130] set ::str2 [string repeat 1234567890 105] forcedelete test1.db test1.db-journal test2.db test2.db-journal sqlite3 db1 test1.db sqlite3 db2 test2.db execsql { PRAGMA auto_vacuum = 'none' } db1 execsql { PRAGMA auto_vacuum = 'incremental' } db2 set tn 1 foreach sql $::TestScriptList { |
︙ | ︙ | |||
471 472 473 474 475 476 477 | } {} #--------------------------------------------------------------------- # At one point this test case was causing an assert() to fail. # do_test incrvacuum-9.1 { db close | | | 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 | } {} #--------------------------------------------------------------------- # At one point this test case was causing an assert() to fail. # do_test incrvacuum-9.1 { db close forcedelete test.db test.db-journal sqlite3 db test.db execsql { PRAGMA auto_vacuum = 'incremental'; CREATE TABLE t1(a, b, c); CREATE TABLE t2(a, b, c); INSERT INTO t2 VALUES(randstr(500,500),randstr(500,500),randstr(500,500)); |
︙ | ︙ | |||
589 590 591 592 593 594 595 | #---------------------------------------------------------------- # Test that if we set the auto_vacuum mode to 'incremental', then # create a database, thereafter that database defaults to incremental # vacuum mode. # db close | | | 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 | #---------------------------------------------------------------- # Test that if we set the auto_vacuum mode to 'incremental', then # create a database, thereafter that database defaults to incremental # vacuum mode. # db close forcedelete test.db test.db-journal sqlite3 db test.db ifcapable default_autovacuum { do_test incrvacuum-11.1-av-dflt-on { execsql { PRAGMA auto_vacuum; } |
︙ | ︙ | |||
647 648 649 650 651 652 653 | } {1} #---------------------------------------------------------------------- # Special case: What happens if the database is locked when a "PRAGMA # auto_vacuum = XXX" statement is executed. # db close | | | 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 | } {1} #---------------------------------------------------------------------- # Special case: What happens if the database is locked when a "PRAGMA # auto_vacuum = XXX" statement is executed. # db close forcedelete test.db test.db-journal sqlite3 db test.db do_test incrvacuum-12.1 { execsql { PRAGMA auto_vacuum = 1; } expr {[file size test.db]>0} |
︙ | ︙ | |||
688 689 690 691 692 693 694 | #---------------------------------------------------------------------- # Special case #2: What if one process prepares a "PRAGMA auto_vacuum = XXX" # statement when the database is empty, but doesn't execute it until # after some other process has created the database. # db2 close db close | | | 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 | #---------------------------------------------------------------------- # Special case #2: What if one process prepares a "PRAGMA auto_vacuum = XXX" # statement when the database is empty, but doesn't execute it until # after some other process has created the database. # db2 close db close forcedelete test.db test.db-journal sqlite3 db test.db ; set ::DB [sqlite3_connection_pointer db] sqlite3 db2 test.db do_test incrvacuum-13.1 { # File size is sometimes 1 instead of 0 due to the hack we put in # to work around ticket #3260. Search for comments on #3260 in # os_unix.c. |
︙ | ︙ | |||
739 740 741 742 743 744 745 | } {1 {file is encrypted or is not a database}} db3 close } do_test incrvacuum-15.1 { db close db2 close | | | 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 | } {1 {file is encrypted or is not a database}} db3 close } do_test incrvacuum-15.1 { db close db2 close forcedelete test.db sqlite3 db test.db set str [string repeat "abcdefghij" 500] execsql { PRAGMA cache_size = 10; PRAGMA auto_vacuum = incremental; |
︙ | ︙ |
Changes to test/incrvacuum2.test.
︙ | ︙ | |||
67 68 69 70 71 72 73 | file size test.db } {3072} # Make sure incremental vacuum works on attached databases. # ifcapable attach { do_test incrvacuum2-2.1 { | | | 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 | file size test.db } {3072} # Make sure incremental vacuum works on attached databases. # ifcapable attach { do_test incrvacuum2-2.1 { forcedelete test2.db test2.db-journal execsql { ATTACH DATABASE 'test2.db' AS aux; PRAGMA aux.auto_vacuum=incremental; CREATE TABLE aux.t2(x); INSERT INTO t2 VALUES(zeroblob(30000)); INSERT INTO t1 SELECT * FROM t2; DELETE FROM t2; |
︙ | ︙ |
Changes to test/incrvacuum_ioerr.test.
︙ | ︙ | |||
104 105 106 107 108 109 110 | db close } ifcapable shared_cache { catch { db close } | | | 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 | db close } ifcapable shared_cache { catch { db close } forcedelete test.db set ::enable_shared_cache [sqlite3_enable_shared_cache 1] # Create two connections to a single shared-cache: # sqlite3 db1 test.db sqlite3 db2 test.db |
︙ | ︙ |
Added test/index4.test.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 | # 2011 July 9 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # This file implements regression tests for SQLite library. The # focus of this file is testing the CREATE INDEX statement. # set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix index4 do_execsql_test 1.1 { BEGIN; CREATE TABLE t1(x); INSERT INTO t1 VALUES(randomblob(102)); INSERT INTO t1 SELECT randomblob(102) FROM t1; -- 2 INSERT INTO t1 SELECT randomblob(102) FROM t1; -- 4 INSERT INTO t1 SELECT randomblob(102) FROM t1; -- 8 INSERT INTO t1 SELECT randomblob(102) FROM t1; -- 16 INSERT INTO t1 SELECT randomblob(102) FROM t1; -- 32 INSERT INTO t1 SELECT randomblob(102) FROM t1; -- 64 INSERT INTO t1 SELECT randomblob(102) FROM t1; -- 128 INSERT INTO t1 SELECT randomblob(102) FROM t1; -- 256 INSERT INTO t1 SELECT randomblob(102) FROM t1; -- 512 INSERT INTO t1 SELECT randomblob(102) FROM t1; -- 1024 INSERT INTO t1 SELECT randomblob(102) FROM t1; -- 2048 INSERT INTO t1 SELECT randomblob(102) FROM t1; -- 4096 INSERT INTO t1 SELECT randomblob(102) FROM t1; -- 8192 INSERT INTO t1 SELECT randomblob(102) FROM t1; -- 16384 INSERT INTO t1 SELECT randomblob(102) FROM t1; -- 32768 INSERT INTO t1 SELECT randomblob(102) FROM t1; -- 65536 COMMIT; } do_execsql_test 1.2 { CREATE INDEX i1 ON t1(x); } do_execsql_test 1.3 { PRAGMA integrity_check } {ok} # The same test again - this time with limited memory. # ifcapable memorymanage { set soft_limit [sqlite3_soft_heap_limit 50000] db close sqlite3 db test.db do_execsql_test 1.4 { PRAGMA cache_size = 10; CREATE INDEX i2 ON t1(x); } do_execsql_test 1.5 { PRAGMA integrity_check } {ok} sqlite3_soft_heap_limit $soft_limit } do_execsql_test 1.6 { BEGIN; DROP TABLE t1; CREATE TABLE t1(x); INSERT INTO t1 VALUES('a'); INSERT INTO t1 VALUES('b'); INSERT INTO t1 VALUES('c'); INSERT INTO t1 VALUES('d'); INSERT INTO t1 VALUES('e'); INSERT INTO t1 VALUES('f'); INSERT INTO t1 VALUES('g'); INSERT INTO t1 VALUES(NULL); INSERT INTO t1 SELECT randomblob(1202) FROM t1; -- 16 INSERT INTO t1 SELECT randomblob(2202) FROM t1; -- 32 INSERT INTO t1 SELECT randomblob(3202) FROM t1; -- 64 INSERT INTO t1 SELECT randomblob(4202) FROM t1; -- 128 INSERT INTO t1 SELECT randomblob(5202) FROM t1; -- 256 COMMIT; CREATE INDEX i1 ON t1(x); PRAGMA integrity_check } {ok} do_execsql_test 1.7 { BEGIN; DROP TABLE t1; CREATE TABLE t1(x); INSERT INTO t1 VALUES('a'); COMMIT; CREATE INDEX i1 ON t1(x); PRAGMA integrity_check } {ok} do_execsql_test 1.8 { BEGIN; DROP TABLE t1; CREATE TABLE t1(x); COMMIT; CREATE INDEX i1 ON t1(x); PRAGMA integrity_check } {ok} finish_test |
Added test/indexfault.test.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 | # 2011 August 08 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/lock_common.tcl source $testdir/malloc_common.tcl ifcapable !mergesort { finish_test return } set testprefix indexfault # Set up the custom fault-injector. This is further configured by using # different values for $::custom_filter and different implementations # of Tcl proc [xCustom] for each test case. # proc install_custom_faultsim {} { set ::FAULTSIM(custom) [list \ -injectinstall custom_injectinstall \ -injectstart custom_injectstart \ -injectstop custom_injectstop \ -injecterrlist {{1 {disk I/O error}}} \ -injectuninstall custom_injectuninstall \ ] proc custom_injectinstall {} { testvfs shmfault -default true shmfault filter $::custom_filter shmfault script xCustom } proc custom_injectuninstall {} { catch {db close} catch {db2 close} shmfault delete } set ::custom_ifail -1 set ::custom_nfail -1 proc custom_injectstart {iFail} { set ::custom_ifail $iFail set ::custom_nfail 0 } proc custom_injectstop {} { set ::custom_ifail -1 return $::custom_nfail } } proc uninstall_custom_faultsim {} { unset -nocomplain ::FAULTSIM(custom) } #------------------------------------------------------------------------- # These tests - indexfault-1.* - Build an index on a smallish table with # all different kinds of fault-injection. The CREATE INDEX is run once # with default options and once with a 50KB soft-heap-limit. # do_execsql_test 1.0 { BEGIN; CREATE TABLE t1(x); INSERT INTO t1 VALUES(randomblob(202)); INSERT INTO t1 SELECT randomblob(202) FROM t1; -- 2 INSERT INTO t1 SELECT randomblob(202) FROM t1; -- 4 INSERT INTO t1 SELECT randomblob(202) FROM t1; -- 8 INSERT INTO t1 SELECT randomblob(202) FROM t1; -- 16 INSERT INTO t1 SELECT randomblob(202) FROM t1; -- 32 INSERT INTO t1 SELECT randomblob(202) FROM t1; -- 64 INSERT INTO t1 SELECT randomblob(202) FROM t1; -- 128 INSERT INTO t1 SELECT randomblob(202) FROM t1; -- 256 COMMIT; } faultsim_save_and_close do_faultsim_test 1.1 -prep { faultsim_restore_and_reopen } -body { execsql { CREATE INDEX i1 ON t1(x) } faultsim_test_result {0 {}} faultsim_integrity_check } ifcapable memorymanage { set soft_limit [sqlite3_soft_heap_limit 50000] do_faultsim_test 2.1 -prep { faultsim_restore_and_reopen } -body { execsql { CREATE INDEX i1 ON t1(x) } faultsim_test_result {0 {}} } sqlite3_soft_heap_limit $soft_limit } #------------------------------------------------------------------------- # These are similar to the indexfault-1.* tests, except they create an # index with more than one column. # sqlite3 db test.db do_execsql_test 2.0 { BEGIN; DROP TABLE IF EXISTS t1; CREATE TABLE t1(t,u,v,w,x,y,z); INSERT INTO t1 VALUES( randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30) ); INSERT INTO t1 SELECT randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30) FROM t1; -- 2 INSERT INTO t1 SELECT randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30) FROM t1; -- 4 INSERT INTO t1 SELECT randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30) FROM t1; -- 8 INSERT INTO t1 SELECT randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30) FROM t1; -- 16 INSERT INTO t1 SELECT randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30) FROM t1; -- 32 INSERT INTO t1 SELECT randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30) FROM t1; -- 64 INSERT INTO t1 SELECT randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30), randomblob(30) FROM t1; -- 128 COMMIT; } faultsim_save_and_close do_faultsim_test 2.1 -prep { faultsim_restore_and_reopen } -body { execsql { CREATE INDEX i1 ON t1(t,u,v,w,x,y,z) } faultsim_test_result {0 {}} faultsim_integrity_check } ifcapable memorymanage { set soft_limit [sqlite3_soft_heap_limit 50000] do_faultsim_test 2.2 -prep { faultsim_restore_and_reopen } -body { execsql { CREATE INDEX i1 ON t1(t,u,v,w,x,y,z) } faultsim_test_result {0 {}} } sqlite3_soft_heap_limit $soft_limit } #------------------------------------------------------------------------- # The following tests - indexfault-2.* - all attempt to build a index # on table t1 in the main database with injected IO errors. Individual # test cases work as follows: # # 3.1: IO errors injected into xOpen() calls. # 3.2: As 7.1, but with a low (50KB) soft-heap-limit. # # 3.3: IO errors injected into the first 200 write() calls made on the # second temporary file. # 3.4: As 7.3, but with a low (50KB) soft-heap-limit. # # 3.5: After a certain amount of data has been read from the main database # file (and written into the temporary b-tree), sqlite3_release_memory() # is called to free as much memory as possible. This causes the temp # b-tree to be flushed to disk. So that before its contents can be # transfered to a PMA they must be read back from disk - creating extra # opportunities for IO errors. # install_custom_faultsim # Set up a table to build indexes on. Save the setup using the # [faultsim_save_and_close] mechanism. # sqlite3 db test.db do_execsql_test 3.0 { BEGIN; DROP TABLE IF EXISTS t1; CREATE TABLE t1(x); INSERT INTO t1 VALUES(randomblob(11000)); INSERT INTO t1 SELECT randomblob(11001) FROM t1; -- 2 INSERT INTO t1 SELECT randomblob(11002) FROM t1; -- 4 INSERT INTO t1 SELECT randomblob(11003) FROM t1; -- 8 INSERT INTO t1 SELECT randomblob(11004) FROM t1; -- 16 INSERT INTO t1 SELECT randomblob(11005) FROM t1; -- 32 INSERT INTO t1 SELECT randomblob(11006) FROM t1; -- 64 INSERT INTO t1 SELECT randomblob(11007) FROM t1; -- 128 INSERT INTO t1 SELECT randomblob(11008) FROM t1; -- 256 INSERT INTO t1 SELECT randomblob(11009) FROM t1; -- 512 COMMIT; } faultsim_save_and_close set ::custom_filter xOpen proc xCustom {args} { incr ::custom_ifail -1 if {$::custom_ifail==0} { incr ::custom_nfail return "SQLITE_IOERR" } return "SQLITE_OK" } do_faultsim_test 3.1 -faults custom -prep { faultsim_restore_and_reopen } -body { execsql { CREATE INDEX i1 ON t1(x) } faultsim_test_result {0 {}} } ifcapable memorymanage { set soft_limit [sqlite3_soft_heap_limit 50000] do_faultsim_test 3.2 -faults custom -prep { faultsim_restore_and_reopen } -body { execsql { CREATE INDEX i1 ON t1(x) } faultsim_test_result {0 {}} } sqlite3_soft_heap_limit $soft_limit } set ::custom_filter {xOpen xWrite} proc xCustom {method args} { if {$method == "xOpen"} { if {[lindex $args 0] == ""} { incr ::nTmpOpen 1 if {$::nTmpOpen == 3} { return "failme" } } return "SQLITE_OK" } if {$::custom_ifail<200 && [lindex $args 1] == "failme"} { incr ::custom_ifail -1 if {$::custom_ifail==0} { incr ::custom_nfail return "SQLITE_IOERR" } } return "SQLITE_OK" } do_faultsim_test 3.3 -faults custom -prep { faultsim_restore_and_reopen set ::nTmpOpen 0 } -body { execsql { CREATE INDEX i1 ON t1(x) } faultsim_test_result {0 {}} } ifcapable memorymanage { set soft_limit [sqlite3_soft_heap_limit 50000] do_faultsim_test 3.4 -faults custom -prep { faultsim_restore_and_reopen set ::nTmpOpen 0 } -body { execsql { CREATE INDEX i1 ON t1(x) } faultsim_test_result {0 {}} } sqlite3_soft_heap_limit $soft_limit } uninstall_custom_faultsim #------------------------------------------------------------------------- # Test 4: After a certain amount of data has been read from the main database # file (and written into the temporary b-tree), sqlite3_release_memory() is # called to free as much memory as possible. This causes the temp b-tree to be # flushed to disk. So that before its contents can be transfered to a PMA they # must be read back from disk - creating extra opportunities for IO errors. # install_custom_faultsim catch { db close } forcedelete test.db sqlite3 db test.db do_execsql_test 4.0 { BEGIN; DROP TABLE IF EXISTS t1; CREATE TABLE t1(x); INSERT INTO t1 VALUES(randomblob(11000)); INSERT INTO t1 SELECT randomblob(11001) FROM t1; -- 2 INSERT INTO t1 SELECT randomblob(11002) FROM t1; -- 4 INSERT INTO t1 SELECT randomblob(11003) FROM t1; -- 8 INSERT INTO t1 SELECT randomblob(11004) FROM t1; -- 16 INSERT INTO t1 SELECT randomblob(11005) FROM t1; -- 32 INSERT INTO t1 SELECT randomblob(11005) FROM t1; -- 64 COMMIT; } faultsim_save_and_close testvfs tvfs tvfs script xRead tvfs filter xRead set ::nRead 0 proc xRead {method file args} { if {[file tail $file] == "test.db"} { incr ::nRead } } do_test 4.1 { sqlite3 db test.db -vfs tvfs execsql { CREATE INDEX i1 ON t1(x) } } {} db close tvfs delete set ::custom_filter xRead proc xCustom {method file args} { incr ::nReadCall if {$::nReadCall >= ($::nRead/5)} { if {$::nReadCall == ($::nRead/5)} { set nByte [sqlite3_release_memory [expr 64*1024*1024]] sqlite3_soft_heap_limit 20000 } if {$file == ""} { incr ::custom_ifail -1 if {$::custom_ifail==0} { incr ::custom_nfail return "SQLITE_IOERR" } } } return "SQLITE_OK" } do_faultsim_test 4.2 -faults custom -prep { faultsim_restore_and_reopen set ::nReadCall 0 sqlite3_soft_heap_limit 0 } -body { execsql { CREATE INDEX i1 ON t1(x) } faultsim_test_result {0 {}} } uninstall_custom_faultsim finish_test |
Changes to test/insert5.test.
︙ | ︙ | |||
30 31 32 33 34 35 36 | proc uses_temp_table {sql} { return [expr {[lsearch [execsql "EXPLAIN $sql"] OpenEphemeral]>=0}] } # Construct the sample database. # do_test insert5-1.0 { | | | 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 | proc uses_temp_table {sql} { return [expr {[lsearch [execsql "EXPLAIN $sql"] OpenEphemeral]>=0}] } # Construct the sample database. # do_test insert5-1.0 { forcedelete test2.db test2.db-journal execsql { CREATE TABLE MAIN(Id INTEGER, Id1 INTEGER); CREATE TABLE B(Id INTEGER, Id1 INTEGER); CREATE VIEW v1 AS SELECT * FROM B; CREATE VIEW v2 AS SELECT * FROM MAIN; INSERT INTO MAIN(Id,Id1) VALUES(2,3); INSERT INTO B(Id,Id1) VALUES(2,3); |
︙ | ︙ |
Changes to test/io.test.
︙ | ︙ | |||
221 222 223 224 225 226 227 | file mkdir test.db-journal catchsql { INSERT INTO abc VALUES(9, randstr(1000,1000)); COMMIT } } {1 {unable to open database file}} do_test io-2.6.3 { | | | | 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 | file mkdir test.db-journal catchsql { INSERT INTO abc VALUES(9, randstr(1000,1000)); COMMIT } } {1 {unable to open database file}} do_test io-2.6.3 { forcedelete test.db-journal catchsql { COMMIT } } {0 {}} do_test io-2.6.4 { execsql { SELECT * FROM abc } } {1 2 3 4 5 6 7 8} # Test that if the database modification is part of multi-file commit, # the journal file is always created. In this case, the journal file # is created during execution of the COMMIT statement, so we have to # use the same technique to check that it is created as in the above # block. forcedelete test2.db test2.db-journal ifcapable attach { do_test io-2.7.1 { execsql { ATTACH 'test2.db' AS aux; PRAGMA aux.page_size = 1024; CREATE TABLE aux.abc2(a, b); BEGIN; |
︙ | ︙ | |||
257 258 259 260 261 262 263 | execsql { SELECT * FROM abc UNION ALL SELECT * FROM abc2 } } {1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10} do_test io-2.7.4 { file mkdir test2.db-journal catchsql { COMMIT } } {1 {unable to open database file}} do_test io-2.7.5 { | | | 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 | execsql { SELECT * FROM abc UNION ALL SELECT * FROM abc2 } } {1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10} do_test io-2.7.4 { file mkdir test2.db-journal catchsql { COMMIT } } {1 {unable to open database file}} do_test io-2.7.5 { forcedelete test2.db-journal catchsql { COMMIT } } {1 {cannot commit - no transaction is active}} do_test io-2.7.6 { execsql { SELECT * FROM abc UNION ALL SELECT * FROM abc2 } } {1 2 3 4 5 6 7 8} } |
︙ | ︙ | |||
300 301 302 303 304 305 306 | INSERT INTO abc VALUES(9, 10); } file exists test.db-journal } {1} do_test io-2.9.2 { execsql { ROLLBACK; } db close | | | 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 | INSERT INTO abc VALUES(9, 10); } file exists test.db-journal } {1} do_test io-2.9.2 { execsql { ROLLBACK; } db close forcedelete test.db test.db-journal sqlite3 db test.db -vfs devsym execsql { PRAGMA auto_vacuum = OFF; PRAGMA page_size = 2048; CREATE TABLE abc(a, b); } execsql { |
︙ | ︙ | |||
371 372 373 374 375 376 377 | #---------------------------------------------------------------------- # Test cases io-3.* test the IOCAP_SEQUENTIAL optimization. # sqlite3_simulate_device -char sequential -sectorsize 0 ifcapable pager_pragmas { do_test io-3.1 { db close | | | 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 | #---------------------------------------------------------------------- # Test cases io-3.* test the IOCAP_SEQUENTIAL optimization. # sqlite3_simulate_device -char sequential -sectorsize 0 ifcapable pager_pragmas { do_test io-3.1 { db close forcedelete test.db test.db-journal sqlite3 db test.db -vfs devsym db eval { PRAGMA auto_vacuum=OFF; } # File size might be 1 due to the hack to work around ticket #3260. # Search for #3260 in os_unix.c for additional information. expr {[file size test.db]>1} |
︙ | ︙ | |||
540 541 542 543 544 545 546 | {atomic2K} 4096 4096 {atomic2K atomic} 512 8192 {atomic64K} 512 1024 } { incr tn if {$pgsize>$::SQLITE_MAX_PAGE_SIZE} continue db close | | | 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 | {atomic2K} 4096 4096 {atomic2K atomic} 512 8192 {atomic64K} 512 1024 } { incr tn if {$pgsize>$::SQLITE_MAX_PAGE_SIZE} continue db close forcedelete test.db test.db-journal sqlite3_simulate_device -char $char -sectorsize $sectorsize sqlite3 db test.db -vfs devsym db eval { PRAGMA auto_vacuum=OFF; } ifcapable !atomicwrite { if {[regexp {^atomic} $char]} continue |
︙ | ︙ |
Changes to test/ioerr.test.
︙ | ︙ | |||
179 180 181 182 183 184 185 | db2 eval { PRAGMA synchronous = 0; CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 2); BEGIN; INSERT INTO t1 VALUES(3, 4); } | | | | 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 | db2 eval { PRAGMA synchronous = 0; CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 2); BEGIN; INSERT INTO t1 VALUES(3, 4); } forcecopy test2.db test.db forcecopy test2.db-journal test.db-journal db2 close } -tclbody { sqlite3 db test.db db eval { SELECT * FROM t1; } } -exclude 1 |
︙ | ︙ | |||
215 216 217 218 219 220 221 | do_ioerr_test ioerr-9 -ckrefcount true -tclprep { execsql { CREATE TABLE t1(a,b,c); INSERT INTO t1 VALUES(randstr(200,200), randstr(1000,1000), 2); BEGIN; INSERT INTO t1 VALUES(randstr(200,200), randstr(1000,1000), 2); } | | | | 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 | do_ioerr_test ioerr-9 -ckrefcount true -tclprep { execsql { CREATE TABLE t1(a,b,c); INSERT INTO t1 VALUES(randstr(200,200), randstr(1000,1000), 2); BEGIN; INSERT INTO t1 VALUES(randstr(200,200), randstr(1000,1000), 2); } forcecopy test.db-journal test2.db-journal execsql { COMMIT; } forcecopy test2.db-journal test.db-journal set f [open test.db-journal a] fconfigure $f -encoding binary puts -nonewline $f "hello" puts -nonewline $f "\x00\x00\x00\x05\x01\x02\x03\x04" puts -nonewline $f "\xd9\xd5\x05\xf9\x20\xa1\x63\xd7" close $f } -sqlbody { |
︙ | ︙ |
Changes to test/ioerr4.test.
︙ | ︙ | |||
27 28 29 30 31 32 33 | # Enable shared cache mode and incremental vacuum. # do_test ioerr4-1.1 { db close set ::enable_shared_cache [sqlite3_enable_shared_cache 1] } {0} do_test ioerr4-1.2 { | | | 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 | # Enable shared cache mode and incremental vacuum. # do_test ioerr4-1.1 { db close set ::enable_shared_cache [sqlite3_enable_shared_cache 1] } {0} do_test ioerr4-1.2 { forcedelete test.db test.db-journal sqlite3 db test.db sqlite3 db2 test.db db eval { PRAGMA auto_vacuum=INCREMENTAL; CREATE TABLE a(i INTEGER, b BLOB); } db2 eval { |
︙ | ︙ | |||
75 76 77 78 79 80 81 | } {64} # Set up for an I/O error on incremental vacuum # with two connections on shared cache. # db close db2 close | | | | | | 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 | } {64} # Set up for an I/O error on incremental vacuum # with two connections on shared cache. # db close db2 close forcecopy test.db test.db-bu do_ioerr_test ioerr4-2 -tclprep { catch {db2 close} db close forcedelete test.db test.db-journal forcecopy test.db-bu test.db sqlite3_enable_shared_cache 1 set ::DB [sqlite3 db test.db; sqlite3_connection_pointer db] db eval {PRAGMA auto_vacuum=INCREMENTAL} sqlite3 db2 test.db } -tclbody { db eval {PRAGMA incremental_vacuum(5)} } db2 close forcedelete test.db-bu sqlite3_enable_shared_cache $::enable_shared_cache finish_test |
Changes to test/journal1.test.
︙ | ︙ | |||
16 17 18 19 20 21 22 | # $Id: journal1.test,v 1.2 2005/03/20 22:54:56 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # These tests will not work on windows because windows uses | | | 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 | # $Id: journal1.test,v 1.2 2005/03/20 22:54:56 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # These tests will not work on windows because windows uses # manditory file locking which breaks the copy_file command. # if {$tcl_platform(platform)=="windows"} { finish_test return } # Create a smaple database |
︙ | ︙ | |||
47 48 49 50 51 52 53 | # database. # do_test journal1-1.2 { execsql { BEGIN; DELETE FROM t1; } | | | | | 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 | # database. # do_test journal1-1.2 { execsql { BEGIN; DELETE FROM t1; } forcecopy test.db-journal test.db-journal-bu execsql { ROLLBACK; } db close delete_file test.db copy_file test.db-journal-bu test.db-journal sqlite3 db test.db catchsql { SELECT * FROM sqlite_master } } {0 {}} finish_test |
Changes to test/journal2.test.
︙ | ︙ | |||
163 164 165 166 167 168 169 | do_test journal2-1.13 { tvfs filter {xOpen xClose xDelete xWrite xTruncate} set ::tvfs_error_on_write 1 catchsql { COMMIT } db2 } {1 {disk I/O error}} db2 close unset ::tvfs_error_on_write | | | 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 | do_test journal2-1.13 { tvfs filter {xOpen xClose xDelete xWrite xTruncate} set ::tvfs_error_on_write 1 catchsql { COMMIT } db2 } {1 {disk I/O error}} db2 close unset ::tvfs_error_on_write forcecopy test.db testX.db do_test journal2-1.14 { file exists test.db-journal } 1 do_test journal2-1.15 { execsql { SELECT count(*) FROM t2; PRAGMA integrity_check; } |
︙ | ︙ |
Changes to test/journal3.test.
︙ | ︙ | |||
31 32 33 34 35 36 37 | 2 00666 3 00600 4 00755 } { db close set effective [format %.5o [expr $permissions & ~$umask]] do_test journal3-1.2.$tn.1 { | | | 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 | 2 00666 3 00600 4 00755 } { db close set effective [format %.5o [expr $permissions & ~$umask]] do_test journal3-1.2.$tn.1 { catch { forcedelete test.db-journal } file attributes test.db -permissions $permissions file attributes test.db -permissions } $permissions do_test journal3-1.2.$tn.2 { file exists test.db-journal } {0} do_test journal3-1.2.$tn.3 { sqlite3 db test.db execsql { |
︙ | ︙ |
Changes to test/jrnlmode.test.
︙ | ︙ | |||
196 197 198 199 200 201 202 | DETACH aux2; DETACH aux3; } } {} } ifcapable attach { | | | 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 | DETACH aux2; DETACH aux3; } } {} } ifcapable attach { forcedelete test2.db do_test jrnlmode-2.1 { execsql { ATTACH 'test2.db' AS aux; PRAGMA main.journal_mode = persist; PRAGMA aux.journal_mode = persist; CREATE TABLE abc(a, b, c); CREATE TABLE aux.def(d, e, f); |
︙ | ︙ | |||
238 239 240 241 242 243 244 | } } {4 5 6} #---------------------------------------------------------------------- # Test caes jrnlmode-3.X verify that ticket #3127 has been fixed. # db close | | | | 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 | } } {4 5 6} #---------------------------------------------------------------------- # Test caes jrnlmode-3.X verify that ticket #3127 has been fixed. # db close forcedelete test2.db forcedelete test.db sqlite3 db test.db do_test jrnlmode-3.1 { execsql { CREATE TABLE x(n INTEGER); ATTACH 'test2.db' AS a; create table a.x ( n integer ); |
︙ | ︙ | |||
266 267 268 269 270 271 272 | COMMIT; } } {} } ifcapable autovacuum&&pragma { db close | | | 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 | COMMIT; } } {} } ifcapable autovacuum&&pragma { db close forcedelete test.db sqlite3 db test.db do_test jrnlmode-4.1 { execsql { PRAGMA cache_size = 1; PRAGMA auto_vacuum = 1; CREATE TABLE abc(a, b, c); } |
︙ | ︙ | |||
299 300 301 302 303 304 305 | } #------------------------------------------------------------------------ # The following test caes, jrnlmode-5.*, test the journal_size_limit # pragma. ifcapable pragma { db close | | | 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 | } #------------------------------------------------------------------------ # The following test caes, jrnlmode-5.*, test the journal_size_limit # pragma. ifcapable pragma { db close forcedelete test.db test2.db test3.db sqlite3 db test.db do_test jrnlmode-5.1 { execsql {pragma page_size=1024} execsql {pragma journal_mode=persist} } {persist} |
︙ | ︙ | |||
507 508 509 510 511 512 513 | } {0} } } ifcapable pragma { catch { db close } do_test jrnlmode-7.1 { | | | 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 | } {0} } } ifcapable pragma { catch { db close } do_test jrnlmode-7.1 { foreach f [glob -nocomplain test.db*] { forcedelete $f } sqlite3 db test.db execsql { PRAGMA journal_mode = memory; PRAGMA auto_vacuum = 0; PRAGMA page_size = 1024; PRAGMA user_version = 5; PRAGMA user_version; |
︙ | ︙ |
Changes to test/jrnlmode2.test.
︙ | ︙ | |||
108 109 110 111 112 113 114 | do_test jrnlmode2-2.4 { sqlite3 db2 test.db -readonly 1 catchsql { SELECT * FROM t1 } db2 } {0 {1 2 3 4 5 6}} do_test jrnlmode2-2.5 { db close | | | 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 | do_test jrnlmode2-2.4 { sqlite3 db2 test.db -readonly 1 catchsql { SELECT * FROM t1 } db2 } {0 {1 2 3 4 5 6}} do_test jrnlmode2-2.5 { db close delete_file test.db-journal } {} do_test jrnlmode2-2.6 { sqlite3 db2 test.db -readonly 1 catchsql { SELECT * FROM t1 } db2 } {0 {1 2 3 4 5 6}} catch { db2 close } |
︙ | ︙ |
Changes to test/jrnlmode3.test.
︙ | ︙ | |||
44 45 46 47 48 49 50 | INSERT INTO t1 VALUES(2); ROLLBACK; SELECT * FROM t1; } } {1} db close | | | 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 | INSERT INTO t1 VALUES(2); ROLLBACK; SELECT * FROM t1; } } {1} db close forcedelete test.db test.db-journal sqlite3 db test.db do_test jrnlmode3-2.1 { db eval { PRAGMA locking_mode=EXCLUSIVE; PRAGMA journal_mode=OFF; CREATE TABLE t1(x); |
︙ | ︙ | |||
81 82 83 84 85 86 87 | # Skip the no-change cases if {$fromjmode==$tojmode} continue incr cnt # Start with a fresh database connection an empty database file. # db close | | | 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 | # Skip the no-change cases if {$fromjmode==$tojmode} continue incr cnt # Start with a fresh database connection an empty database file. # db close forcedelete test.db test.db-journal sqlite3 db test.db # Initialize the journal mode. # do_test jrnlmode3-3.$cnt.1-($fromjmode-to-$tojmode) { db eval "PRAGMA journal_mode = $fromjmode;" } $fromjmode |
︙ | ︙ |
Changes to test/like.test.
︙ | ︙ | |||
304 305 306 307 308 309 310 311 | set sqlite_like_count } 12 # GLOB is optimized regardless of the case_sensitive_like setting. # do_test like-3.19 { set sqlite_like_count 0 queryplan { | > < | 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 | set sqlite_like_count } 12 # GLOB is optimized regardless of the case_sensitive_like setting. # do_test like-3.19 { set sqlite_like_count 0 db eval {CREATE INDEX i1 ON t1(x);} queryplan { SELECT x FROM t1 WHERE x GLOB 'abc*' ORDER BY 1; } } {abc abcd nosort {} i1} do_test like-3.20 { set sqlite_like_count } 0 do_test like-3.21 { |
︙ | ︙ | |||
518 519 520 521 522 523 524 | } {zz-lower-lower zZ-lower-upper Zz-upper-lower ZZ-upper-upper nosort {} i2} do_test like-5.24 { queryplan { SELECT x FROM t2 WHERE x LIKE 'ZZ%'; } } {zz-lower-lower zZ-lower-upper Zz-upper-lower ZZ-upper-upper nosort {} i2} do_test like-5.25 { | | > > | 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 | } {zz-lower-lower zZ-lower-upper Zz-upper-lower ZZ-upper-upper nosort {} i2} do_test like-5.24 { queryplan { SELECT x FROM t2 WHERE x LIKE 'ZZ%'; } } {zz-lower-lower zZ-lower-upper Zz-upper-lower ZZ-upper-upper nosort {} i2} do_test like-5.25 { db eval { PRAGMA case_sensitive_like=on; CREATE TABLE t3(x TEXT); CREATE INDEX i3 ON t3(x); INSERT INTO t3 VALUES('ZZ-upper-upper'); INSERT INTO t3 VALUES('zZ-lower-upper'); INSERT INTO t3 VALUES('Zz-upper-lower'); INSERT INTO t3 VALUES('zz-lower-lower'); } queryplan { SELECT x FROM t3 WHERE x LIKE 'zz%'; } } {zz-lower-lower nosort {} i3} do_test like-5.26 { queryplan { SELECT x FROM t3 WHERE x LIKE 'zZ%'; } |
︙ | ︙ |
Changes to test/loadext.test.
︙ | ︙ | |||
130 131 132 133 134 135 136 | db2 close sqlite3 db test.db sqlite3_enable_load_extension db 1 # Try to load an extension for which the file does not exist. # do_test loadext-2.1 { | | | 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 | db2 close sqlite3 db test.db sqlite3_enable_load_extension db 1 # Try to load an extension for which the file does not exist. # do_test loadext-2.1 { forcedelete ${testextension}xx set rc [catch { sqlite3_load_extension db "${testextension}xx" } msg] list $rc $msg } [list 1 [format $dlerror_nosuchfile ${testextension}xx]] # Try to load an extension for which the file is not a shared object |
︙ | ︙ |
Changes to test/lock4.test.
︙ | ︙ | |||
22 23 24 25 26 27 28 | # Initialize the test.db database so that it is non-empty # do_test lock4-1.1 { db eval { PRAGMA auto_vacuum=OFF; CREATE TABLE t1(x); } | | | 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 | # Initialize the test.db database so that it is non-empty # do_test lock4-1.1 { db eval { PRAGMA auto_vacuum=OFF; CREATE TABLE t1(x); } forcedelete test2.db test2.db-journal sqlite3 db2 test2.db db2 eval { PRAGMA auto_vacuum=OFF; CREATE TABLE t2(x) } db2 close list [file size test.db] [file size test2.db] |
︙ | ︙ |
Changes to test/lock5.test.
︙ | ︙ | |||
20 21 22 23 24 25 26 | # SQLITE_ENABLE_LOCKING_STYLE macro. db close if {[catch {sqlite3 db test.db -vfs unix-none} msg]} { finish_test return } db close | | | 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 | # SQLITE_ENABLE_LOCKING_STYLE macro. db close if {[catch {sqlite3 db test.db -vfs unix-none} msg]} { finish_test return } db close forcedelete test.db.lock ifcapable lock_proxy_pragmas { set ::using_proxy 0 foreach {name value} [array get env SQLITE_FORCE_PROXY_LOCKING] { set ::using_proxy $value } # Disable the proxy locking for these tests |
︙ | ︙ | |||
96 97 98 99 100 101 102 | execsql {BEGIN EXCLUSIVE} db close file exists test.db.lock } {0} ##################################################################### | | | 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 | execsql {BEGIN EXCLUSIVE} db close file exists test.db.lock } {0} ##################################################################### forcedelete test.db if {[catch {sqlite3 db test.db -vfs unix-flock} msg]} { finish_test return } # Only run the flock tests on a local file system if { [path_is_local "."] } { |
︙ | ︙ |
Changes to test/main.test.
︙ | ︙ | |||
297 298 299 300 301 302 303 | # Try to open a database with a corrupt database file. # if {[permutation] == ""} { do_test main-2.0 { catch {db close} | | | | | | | 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 | # Try to open a database with a corrupt database file. # if {[permutation] == ""} { do_test main-2.0 { catch {db close} forcedelete test.db set fd [open test.db w] puts $fd hi! close $fd set v [catch {sqlite3 db test.db} msg] if {$v} {lappend v $msg} {lappend v {}} } {0 {}} } # Here are some tests for tokenize.c. # do_test main-3.1 { catch {db close} catch {foreach f [glob -nocomplain testdb/*] {forcedelete $f}} forcedelete testdb sqlite3 db testdb set v [catch {execsql {SELECT * from T1 where x!!5}} msg] lappend v $msg } {1 {unrecognized token: "!!"}} do_test main-3.2 { catch {db close} catch {foreach f [glob -nocomplain testdb/*] {forcedelete $f}} forcedelete testdb sqlite3 db testdb set v [catch {execsql {SELECT * from T1 where ^x}} msg] lappend v $msg } {1 {unrecognized token: "^"}} do_test main-3.2.2 { catchsql {select 'abc} } {1 {unrecognized token: "'abc"}} |
︙ | ︙ | |||
438 439 440 441 442 443 444 | do_test main-3.2.30 { catchsql {select 123--5} } {0 123} do_test main-3.3 { catch {db close} | | | | 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 | do_test main-3.2.30 { catchsql {select 123--5} } {0 123} do_test main-3.3 { catch {db close} catch {foreach f [glob -nocomplain testdb/*] {forcedelete $f}} forcedelete testdb sqlite3 db testdb execsql { create table T1(X REAL); /* C-style comments allowed */ insert into T1 values(0.5); insert into T1 values(0.5e2); insert into T1 values(0.5e-002); insert into T1 values(5e-002); |
︙ | ︙ |
Changes to test/malloc.test.
︙ | ︙ | |||
266 267 268 269 270 271 272 | } # This block tests malloc() failures that occur while opening a # connection to a database. do_malloc_test 10 -tclprep { catch {db2 close} db close | | | 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 | } # This block tests malloc() failures that occur while opening a # connection to a database. do_malloc_test 10 -tclprep { catch {db2 close} db close forcedelete test.db test.db-journal sqlite3 db test.db sqlite3_extended_result_codes db 1 db eval {CREATE TABLE abc(a, b, c)} } -tclbody { db close sqlite3 db2 test.db sqlite3_extended_result_codes db2 1 |
︙ | ︙ | |||
337 338 339 340 341 342 343 | PRAGMA journal_mode = DELETE; /* For inmemory_journal permutation */ PRAGMA synchronous = 0; CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 2); BEGIN; INSERT INTO t1 VALUES(3, 4); } | | | | 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 | PRAGMA journal_mode = DELETE; /* For inmemory_journal permutation */ PRAGMA synchronous = 0; CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 2); BEGIN; INSERT INTO t1 VALUES(3, 4); } forcecopy test2.db test.db forcecopy test2.db-journal test.db-journal db2 close } -tclbody { sqlite3 db test.db sqlite3_extended_result_codes db 1 # If an out-of-memory occurs within a call to a VFS layer function during # hot-journal rollback, sqlite will report SQLITE_CORRUPT. See commit |
︙ | ︙ | |||
495 496 497 498 499 500 501 | # Make sure SQLITE_NOMEM is reported out on an ATTACH failure even # when the malloc failure occurs within the nested parse. # ifcapable attach { do_malloc_test 20 -tclprep { db close | | | 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 | # Make sure SQLITE_NOMEM is reported out on an ATTACH failure even # when the malloc failure occurs within the nested parse. # ifcapable attach { do_malloc_test 20 -tclprep { db close forcedelete test2.db test2.db-journal sqlite3 db test2.db sqlite3_extended_result_codes db 1 db eval {CREATE TABLE t1(x);} db close } -tclbody { if {[catch {sqlite3 db test.db}]} { error "out of memory" |
︙ | ︙ |
Changes to test/malloc3.test.
︙ | ︙ | |||
459 460 461 462 463 464 465 | SELECT * FROM v1 WHERE d = g; } } {a b c a b c 1 2 3 1 2 3} } # Test a simple multi-file transaction # | | | 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 | SELECT * FROM v1 WHERE d = g; } } {a b c a b c 1 2 3 1 2 3} } # Test a simple multi-file transaction # forcedelete test2.db ifcapable attach { SQL {ATTACH 'test2.db' AS aux;} SQL {BEGIN} SQL {CREATE TABLE aux.tbl2(x, y, z)} SQL {INSERT INTO tbl2 VALUES(1, 2, 3)} SQL {INSERT INTO def VALUES(4, 5, 6)} TEST 30 { |
︙ | ︙ | |||
646 647 648 649 650 651 652 | # run the tests with "persistent" malloc failures. sqlite3_extended_result_codes db 1 db cache size 0 run_test $::run_test_script 1 # Close and reopen the db. db close | | | 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 | # run the tests with "persistent" malloc failures. sqlite3_extended_result_codes db 1 db cache size 0 run_test $::run_test_script 1 # Close and reopen the db. db close forcedelete test.db test.db-journal test2.db test2.db-journal sqlite3 db test.db sqlite3_extended_result_codes db 1 set ::DB [sqlite3_connection_pointer db] # Turn off the Tcl interface's prepared statement caching facility in # the new connnection. Then run the tests with "transient" malloc failures. db cache size 0 run_test $::run_test_script 0 sqlite3_memdebug_fail -1 finish_test |
Changes to test/malloc5.test.
︙ | ︙ | |||
271 272 273 274 275 276 277 | # proc nPage {db} { set bt [btree_from_db $db] array set stats [btree_pager_stats $bt] set stats(page) } db close | | | 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 | # proc nPage {db} { set bt [btree_from_db $db] array set stats [btree_pager_stats $bt] set stats(page) } db close forcedelete test.db test.db-journal test2.db test2.db-journal # This block of test-cases (malloc5-6.1.*) prepares two database files # for the subsequent tests. do_test malloc5-6.1.1 { sqlite3 db test.db execsql { PRAGMA page_size=1024; |
︙ | ︙ | |||
300 301 302 303 304 305 306 | SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; INSERT INTO abc SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; INSERT INTO abc SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; COMMIT; } | | | 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 | SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; INSERT INTO abc SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; INSERT INTO abc SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; COMMIT; } forcecopy test.db test2.db sqlite3 db2 test2.db list \ [expr ([file size test.db]/1024)>20] [expr ([file size test2.db]/1024)>20] } {1 1} do_test malloc5-6.1.2 { list [execsql {PRAGMA cache_size}] [execsql {PRAGMA cache_size} db2] } {10 10} |
︙ | ︙ |
Changes to test/mallocA.test.
︙ | ︙ | |||
23 24 25 26 27 28 29 | finish_test return } # Construct a test database # | | | | 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 | finish_test return } # Construct a test database # forcedelete test.db.bu db eval { CREATE TABLE t1(a COLLATE NOCASE,b,c); INSERT INTO t1 VALUES(1,2,3); INSERT INTO t1 VALUES(1,2,4); INSERT INTO t1 VALUES(2,3,4); CREATE INDEX t1i1 ON t1(a); CREATE INDEX t1i2 ON t1(b,c); CREATE TABLE t2(x,y,z); } db close copy_file test.db test.db.bu do_malloc_test mallocA-1 -testdb test.db.bu -sqlbody { ANALYZE } do_malloc_test mallocA-1.1 -testdb test.db.bu -sqlbody { ANALYZE t1 |
︙ | ︙ | |||
70 71 72 73 74 75 76 | # Ensure that no file descriptors were leaked. do_test malloc-99.X { catch {db close} set sqlite_open_file_count } {0} | | | 70 71 72 73 74 75 76 77 78 | # Ensure that no file descriptors were leaked. do_test malloc-99.X { catch {db close} set sqlite_open_file_count } {0} forcedelete test.db.bu finish_test |
Changes to test/malloc_common.tcl.
︙ | ︙ | |||
404 405 406 407 408 409 410 | # fails and then subsequent calls succeed. If $::iRepeat is 1, # then the failure is persistent - once malloc() fails it keeps # failing. # set zRepeat "transient" if {$::iRepeat} {set zRepeat "persistent"} restore_prng_state | | | | 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 | # fails and then subsequent calls succeed. If $::iRepeat is 1, # then the failure is persistent - once malloc() fails it keeps # failing. # set zRepeat "transient" if {$::iRepeat} {set zRepeat "persistent"} restore_prng_state catch {foreach file [glob -nocomplain test.db-mj*] {forcedelete $file}} do_test ${tn}.${zRepeat}.${::n} { # Remove all traces of database files test.db and test2.db # from the file-system. Then open (empty database) "test.db" # with the handle [db]. # catch {db close} catch {db2 close} forcedelete test.db forcedelete test.db-journal forcedelete test.db-wal forcedelete test2.db forcedelete test2.db-journal forcedelete test2.db-wal if {[info exists ::mallocopts(-testdb)]} { copy_file $::mallocopts(-testdb) test.db } catch { sqlite3 db test.db } if {[info commands db] ne ""} { sqlite3_extended_result_codes db 1 } sqlite3_db_config_lookaside db 0 0 0 |
︙ | ︙ |
Changes to test/manydb.test.
︙ | ︙ | |||
36 37 38 39 40 41 42 | lappend filehandles [open testfile.1 w] } } foreach fd $filehandles { close $fd } catch { | | | 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | lappend filehandles [open testfile.1 w] } } foreach fd $filehandles { close $fd } catch { forcedelete testfile.1 } set N [expr $i / $num_fd_per_openwrite_db] # Create a bunch of random database names # unset -nocomplain dbname unset -nocomplain used |
︙ | ︙ | |||
84 85 86 87 88 89 90 | # Close the databases and erase the files. # for {set i 0} {$i<$N} {incr i} { do_test manydb-3.$i { db$i close | | | 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 | # Close the databases and erase the files. # for {set i 0} {$i<$N} {incr i} { do_test manydb-3.$i { db$i close forcedelete $dbname($i) } {} } finish_test |
Changes to test/memdb.test.
︙ | ︙ | |||
405 406 407 408 409 410 411 | } } 0 # Test that auto-vacuum works with in-memory databases. # set msize [sqlite3_status SQLITE_STATUS_MALLOC_SIZE 0] if {[lindex $msize 2]!=0} { | | | 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 | } } 0 # Test that auto-vacuum works with in-memory databases. # set msize [sqlite3_status SQLITE_STATUS_MALLOC_SIZE 0] if {[lindex $msize 2]!=0} { ifcapable autovacuum&&!blockalloc { do_test memdb-9.1 { db close sqlite3 db test.db db cache size 0 execsql { PRAGMA auto_vacuum = full; CREATE TABLE t1(a); |
︙ | ︙ |
Changes to test/memsubsys1.test.
︙ | ︙ | |||
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 | # by default when a new database connection is opened. As a result, it # will not work with the "memsubsys1" permutation. # if {[permutation] == "memsubsys1"} { finish_test return } # This procedure constructs a new database in test.db. It fills # this database with many small records (enough to force multiple # rebalance operations in the btree-layer and to require a large # page cache), verifies correct results, then returns. # proc build_test_db {testname pragmas} { catch {db close} | > > > > > > > | | 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 | # by default when a new database connection is opened. As a result, it # will not work with the "memsubsys1" permutation. # if {[permutation] == "memsubsys1"} { finish_test return } # Nor will it work if the pager is allocating memory in blocks. # ifcapable blockalloc { finish_test return } # This procedure constructs a new database in test.db. It fills # this database with many small records (enough to force multiple # rebalance operations in the btree-layer and to require a large # page cache), verifies correct results, then returns. # proc build_test_db {testname pragmas} { catch {db close} forcedelete test.db test.db-journal sqlite3 db test.db sqlite3_db_config_lookaside db 0 0 0 db eval $pragmas db eval { CREATE TABLE t1(x, y); CREATE TABLE t2(a, b); CREATE INDEX i1 ON t1(x,y); |
︙ | ︙ |
Changes to test/memsubsys2.test.
︙ | ︙ | |||
20 21 22 23 24 25 26 | # This procedure constructs a new database in test.db. It fills # this database with many small records (enough to force multiple # rebalance operations in the btree-layer and to require a large # page cache), verifies correct results, then returns. # proc build_test_db {testname pragmas} { catch {db close} | | | 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 | # This procedure constructs a new database in test.db. It fills # this database with many small records (enough to force multiple # rebalance operations in the btree-layer and to require a large # page cache), verifies correct results, then returns. # proc build_test_db {testname pragmas} { catch {db close} forcedelete test.db test.db-journal sqlite3 db test.db db eval $pragmas db eval { CREATE TABLE t1(x, y); CREATE TABLE t2(a, b); CREATE INDEX i1 ON t1(x,y); INSERT INTO t1 VALUES(1, 100); |
︙ | ︙ |
Changes to test/misc1.test.
︙ | ︙ | |||
487 488 489 490 491 492 493 | } {0} do_test misc1-14.2b { execsql {UPDATE t1 SET a=a||'y' WHERE 1} file exists ../test.db-journal } {1} do_test misc1-14.3 { cd .. | | | 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 | } {0} do_test misc1-14.2b { execsql {UPDATE t1 SET a=a||'y' WHERE 1} file exists ../test.db-journal } {1} do_test misc1-14.3 { cd .. forcedelete tempdir execsql {COMMIT} file exists ./test.db-journal } {0} # A failed create table should not leave the table in the internal # data structures. Ticket #238. # |
︙ | ︙ |
Changes to test/misc2.test.
︙ | ︙ | |||
161 162 163 164 165 166 167 | # # 2006-08-16: This has changed. It is now permitted to update # the table being SELECTed from within the callback of the query. # ifcapable tclvar { do_test misc2-7.1 { db close | | | 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 | # # 2006-08-16: This has changed. It is now permitted to update # the table being SELECTed from within the callback of the query. # ifcapable tclvar { do_test misc2-7.1 { db close forcedelete test.db sqlite3 db test.db execsql { CREATE TABLE t1(x); INSERT INTO t1 VALUES(1); INSERT INTO t1 VALUES(2); INSERT INTO t1 VALUES(3); SELECT * FROM t1; |
︙ | ︙ | |||
261 262 263 264 265 266 267 | # Repeat the tests 7.1 through 7.8 about but this time do the SELECTs # in reverse order so that we exercise the sqlite3BtreePrev() routine # instead of sqlite3BtreeNext() # do_test misc2-7.11 { db close | | | 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 | # Repeat the tests 7.1 through 7.8 about but this time do the SELECTs # in reverse order so that we exercise the sqlite3BtreePrev() routine # instead of sqlite3BtreeNext() # do_test misc2-7.11 { db close forcedelete test.db sqlite3 db test.db execsql { CREATE TABLE t1(x); INSERT INTO t1 VALUES(1); INSERT INTO t1 VALUES(2); INSERT INTO t1 VALUES(3); SELECT * FROM t1; |
︙ | ︙ | |||
357 358 359 360 361 362 363 | } } execsql {SELECT * FROM t1} } {1 2 3 4 5 6 7 8 9 10} } db close | | | 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 | } } execsql {SELECT * FROM t1} } {1 2 3 4 5 6 7 8 9 10} } db close forcedelete test.db sqlite3 db test.db catchsql { pragma recursive_triggers = off } # Ticket #453. If the SQL ended with "-", the tokenizer was calling that # an incomplete token, which caused problem. The solution was to just call # it a minus sign. # |
︙ | ︙ |
Changes to test/misc5.test.
︙ | ︙ | |||
510 511 512 513 514 515 516 | # Ticket #1370. Do not overwrite small files (less than 1024 bytes) # when trying to open them as a database. # if {[permutation] == ""} { do_test misc5-4.1 { db close | | | 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 | # Ticket #1370. Do not overwrite small files (less than 1024 bytes) # when trying to open them as a database. # if {[permutation] == ""} { do_test misc5-4.1 { db close forcedelete test.db set fd [open test.db w] puts $fd "This is not really a database" close $fd sqlite3 db test.db catchsql { CREATE TABLE t1(a,b,c); } |
︙ | ︙ | |||
540 541 542 543 544 545 546 | execsql {SELECT .4e+1} } 4.0 # Ticket #1582. Ensure that an unknown table in a LIMIT clause applied to # a UNION ALL query causes an error, not a crash. # db close | | | 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 | execsql {SELECT .4e+1} } 4.0 # Ticket #1582. Ensure that an unknown table in a LIMIT clause applied to # a UNION ALL query causes an error, not a crash. # db close forcedelete test.db sqlite3 db test.db ifcapable subquery&&compound { do_test misc5-6.1 { catchsql { SELECT * FROM sqlite_master UNION ALL SELECT * FROM sqlite_master |
︙ | ︙ |
Changes to test/misc7.test.
︙ | ︙ | |||
26 27 28 29 30 31 32 | do_test misc7-3 { c_collation_test } {} # Try to open a directory: # do_test misc7-4 { | | | | 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | do_test misc7-3 { c_collation_test } {} # Try to open a directory: # do_test misc7-4 { delete_file mydir file mkdir mydir set rc [catch { sqlite3 db2 ./mydir } msg] list $rc $msg } {1 {unable to open database file}} # Try to open a file with a directory where its journal file should be. # do_test misc7-5 { delete_file mydir file mkdir mydir-journal sqlite3 db2 ./mydir catchsql { CREATE TABLE abc(a, b, c); } db2 } {1 {unable to open database file}} db2 close |
︙ | ︙ | |||
148 149 150 151 152 153 154 | db2 close #-------------------------------------------------------------------- # Test that nothing goes horribly wrong when attaching a database # after the omit_readlock pragma has been exercised. # do_test misc7-7.1 { | | | | 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 | db2 close #-------------------------------------------------------------------- # Test that nothing goes horribly wrong when attaching a database # after the omit_readlock pragma has been exercised. # do_test misc7-7.1 { forcedelete test2.db forcedelete test2.db-journal execsql { PRAGMA omit_readlock = 1; ATTACH 'test2.db' AS aux; CREATE TABLE aux.hello(world); SELECT name FROM aux.sqlite_master; } } {hello} |
︙ | ︙ | |||
250 251 252 253 254 255 256 | set ::echo_module_cost 2.0e+99 execsql {SELECT * FROM t1 WHERE a = 1;} } {1 2 3} unset ::echo_module_cost } db close | | | | 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 | set ::echo_module_cost 2.0e+99 execsql {SELECT * FROM t1 WHERE a = 1;} } {1 2 3} unset ::echo_module_cost } db close forcedelete test.db forcedelete test.db-journal sqlite3 db test.db ifcapable explain { do_execsql_test misc7-14.1 { CREATE TABLE abc(a PRIMARY KEY, b, c); EXPLAIN QUERY PLAN SELECT * FROM abc AS t2 WHERE rowid = 1; } { |
︙ | ︙ | |||
274 275 276 277 278 279 280 | EXPLAIN QUERY PLAN SELECT * FROM abc AS t2 ORDER BY a; } {0 0 0 {SCAN TABLE abc AS t2 USING INDEX sqlite_autoindex_abc_1 (~1000000 rows)} } } db close | | | | 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 | EXPLAIN QUERY PLAN SELECT * FROM abc AS t2 ORDER BY a; } {0 0 0 {SCAN TABLE abc AS t2 USING INDEX sqlite_autoindex_abc_1 (~1000000 rows)} } } db close forcedelete test.db forcedelete test.db-journal sqlite3 db test.db #-------------------------------------------------------------------- # This is all to force the pager_remove_from_stmt_list() function # (inside pager.c) to remove a pager from the middle of the # statement-list. # |
︙ | ︙ | |||
319 320 321 322 323 324 325 | DELETE FROM abc WHERE rowid > 12; INSERT INTO abc SELECT randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; } } {} db close | | | | 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 | DELETE FROM abc WHERE rowid > 12; INSERT INTO abc SELECT randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; } } {} db close forcedelete test.db forcedelete test.db-journal sqlite3 db test.db do_ioerr_test misc7-16 -sqlprep { PRAGMA cache_size = 10; PRAGMA default_cache_size = 10; CREATE TABLE t3(a, b, UNIQUE(a, b)); INSERT INTO t3 VALUES( randstr(100, 100), randstr(100, 100) ); |
︙ | ︙ | |||
381 382 383 384 385 386 387 | if {[file attributes test.db -permissions]==0644} { do_test misc7-17.1 { execsql { BEGIN; DELETE FROM t3 WHERE (oid%3)==0; } | | | | | | 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 | if {[file attributes test.db -permissions]==0644} { do_test misc7-17.1 { execsql { BEGIN; DELETE FROM t3 WHERE (oid%3)==0; } forcecopy test.db bak.db forcecopy test.db-journal bak.db-journal execsql { COMMIT; } db close forcecopy bak.db test.db forcecopy bak.db-journal test.db-journal sqlite3 db test.db catch {file attributes test.db-journal -permissions r--------} catch {file attributes test.db-journal -readonly 1} catchsql { SELECT count(*) FROM t3; } |
︙ | ︙ | |||
480 481 482 483 484 485 486 | set zFile [file join [pwd] "[string repeat abcde 104].db"] set rc [catch {sqlite3 db2 $zFile} msg] list $rc $msg } {1 {unable to open database file}} db close | | | 480 481 482 483 484 485 486 487 488 489 | set zFile [file join [pwd] "[string repeat abcde 104].db"] set rc [catch {sqlite3 db2 $zFile} msg] list $rc $msg } {1 {unable to open database file}} db close forcedelete test.db finish_test |
Changes to test/misuse.test.
︙ | ︙ | |||
35 36 37 38 39 40 41 | } # Make sure the test logic works # do_test misuse-1.1 { db close | | | | 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | } # Make sure the test logic works # do_test misuse-1.1 { db close catch {forcedelete test2.db} catch {forcedelete test2.db-journal} sqlite3 db test2.db; set ::DB [sqlite3_connection_pointer db] execsql { CREATE TABLE t1(a,b); INSERT INTO t1 VALUES(1,2); } catchsql2 { SELECT * FROM t1 |
︙ | ︙ |
Changes to test/multiplex.test.
︙ | ︙ | |||
147 148 149 150 151 152 153 | # # multiplex-2.7.*: Disable/enable tests. # sqlite3_multiplex_initialize "" 1 multiplex_set db main 32768 16 | | | 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 | # # multiplex-2.7.*: Disable/enable tests. # sqlite3_multiplex_initialize "" 1 multiplex_set db main 32768 16 forcedelete test.x do_test multiplex-2.1.2 { sqlite3 db test.x execsql { PRAGMA page_size=1024; PRAGMA auto_vacuum=OFF; PRAGMA journal_mode=DELETE; } |
︙ | ︙ | |||
516 517 518 519 520 521 522 | do_test multiplex-5.4.1 { catch { db close } multiplex_delete test.db file mkdir test.db list [catch { sqlite3 db test.db } msg] $msg } {1 {unable to open database file}} | | | 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 | do_test multiplex-5.4.1 { catch { db close } multiplex_delete test.db file mkdir test.db list [catch { sqlite3 db test.db } msg] $msg } {1 {unable to open database file}} catch { delete_file test.db } do_faultsim_test multiplex-5.5 -prep { catch { sqlite3_multiplex_shutdown } } -body { sqlite3_multiplex_initialize "" 1 multiplex_set db main 32768 16 } |
︙ | ︙ |
Changes to test/notify1.test.
︙ | ︙ | |||
136 137 138 139 140 141 142 | # # Test for slightly more complex deadlock involving three database # connections: db, db2 and db3. # do_test notify1-2.3.1 { db close db2 close | | | 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 | # # Test for slightly more complex deadlock involving three database # connections: db, db2 and db3. # do_test notify1-2.3.1 { db close db2 close forcedelete test.db test2.db test3.db foreach con {db db2 db3} { sqlite3 $con test.db $con eval { ATTACH 'test2.db' AS aux2 } $con eval { ATTACH 'test3.db' AS aux3 } } execsql { CREATE TABLE main.t1(a, b); |
︙ | ︙ | |||
290 291 292 293 294 295 296 | # # notify1-6.4.*: Like 6.3.*, except that instead of the second blocker # committing its transaction, the first does. The # unlock-notify callback is therefore invoked. # db close do_test notify1-6.1.1 { | | | 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 | # # notify1-6.4.*: Like 6.3.*, except that instead of the second blocker # committing its transaction, the first does. The # unlock-notify callback is therefore invoked. # db close do_test notify1-6.1.1 { forcedelete test.db test2.db foreach conn {db db2 db3} { sqlite3 $conn test.db execsql { ATTACH 'test2.db' AS two } $conn } execsql { CREATE TABLE t1(a, b); CREATE TABLE two.t2(a, b); |
︙ | ︙ |
Changes to test/notify2.test.
︙ | ︙ | |||
163 164 165 166 167 168 169 | expr 0 } foreach {iTest xStep xPrepare} { 1 sqlite3_blocking_step sqlite3_blocking_prepare_v2 2 sqlite3_step sqlite3_nonblocking_prepare_v2 } { | | | 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 | expr 0 } foreach {iTest xStep xPrepare} { 1 sqlite3_blocking_step sqlite3_blocking_prepare_v2 2 sqlite3_step sqlite3_nonblocking_prepare_v2 } { forcedelete test.db test2.db test3.db set ThreadSetup "set xStep $xStep;set xPrepare $xPrepare;set nSecond $nSecond" # Set up the database schema used by this test. Each thread opens file # test.db as the main database, then attaches files test2.db and test3.db # as auxillary databases. Each file contains a single table (t1, t2 and t3, in # files test.db, test2.db and test3.db, respectively). |
︙ | ︙ |
Changes to test/notify3.test.
︙ | ︙ | |||
21 22 23 24 25 26 27 | finish_test return } set esc [sqlite3_enable_shared_cache 1] sqlite3 db test.db | | | 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | finish_test return } set esc [sqlite3_enable_shared_cache 1] sqlite3 db test.db forcedelete test.db2 test.db2-journal test.db2-wal sqlite3 db2 test.db2 do_test notify3-1.1 { execsql { CREATE TABLE t1(a, b); INSERT INTO t1 VALUES('t1 A', 't1 B'); } |
︙ | ︙ |
Changes to test/openv2.test.
︙ | ︙ | |||
13 14 15 16 17 18 19 | # # $Id: openv2.test,v 1.2 2009/06/11 17:32:45 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl db close | | | 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 | # # $Id: openv2.test,v 1.2 2009/06/11 17:32:45 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl db close forcedelete test.db test.db-journal do_test openv2-1.1 { set rc [catch {sqlite3 db test.db -create 0} msg] lappend rc $msg } {1 {unable to open database file}} do_test openv2-1.2 { info commands db } {} |
︙ | ︙ |
Changes to test/pager1.test.
︙ | ︙ | |||
448 449 450 451 452 453 454 | execsql { SELECT count(*) FROM t1; PRAGMA integrity_check; } } {4 ok} do_test pager1.4.2.3 { faultsim_restore_and_reopen | | | | 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 | execsql { SELECT count(*) FROM t1; PRAGMA integrity_check; } } {4 ok} do_test pager1.4.2.3 { faultsim_restore_and_reopen foreach f [glob test.db-mj*] { forcedelete $f } execsql { SELECT count(*) FROM t1; PRAGMA integrity_check; } } {64 ok} do_test pager1.4.2.4 { faultsim_restore_and_reopen hexio_write test.db-journal [expr [file size test.db-journal]-20] 123456 execsql { SELECT count(*) FROM t1; PRAGMA integrity_check; } } {4 ok} do_test pager1.4.2.5 { faultsim_restore_and_reopen hexio_write test.db-journal [expr [file size test.db-journal]-20] 123456 foreach f [glob test.db-mj*] { forcedelete $f } execsql { SELECT count(*) FROM t1; PRAGMA integrity_check; } } {4 ok} } |
︙ | ︙ | |||
647 648 649 650 651 652 653 | # Restore the file-system again. This time, before reopening the databases, # delete the master-journal file from the file-system. It now appears that # the transaction was committed (no master-journal file == no rollback). # do_test pager1-4.4.$tn.7 { faultsim_restore_and_reopen $prefix | | | | 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 | # Restore the file-system again. This time, before reopening the databases, # delete the master-journal file from the file-system. It now appears that # the transaction was committed (no master-journal file == no rollback). # do_test pager1-4.4.$tn.7 { faultsim_restore_and_reopen $prefix foreach f [glob ${prefix}-mj*] { forcedelete $f } execsql "ATTACH '${prefix}2' AS aux" } {} do_execsql_test pager1-4.4.$tn.8 { SELECT * FROM a } {double-you why zed won too free} do_execsql_test pager1-4.4.$tn.9 { SELECT * FROM b } {won too free double-you why zed} } cd $pwd } db close tv delete forcedelete $dirname } # Set up a VFS to make a copy of the file-system just before deleting a # journal file to commit a transaction. The transaction modifies exactly # two database pages (and page 1 - the change counter). # |
︙ | ︙ | |||
881 882 883 884 885 886 887 | catch {file attributes test.db-journal -readonly 1} catchsql { SELECT * FROM t1 } } {1 {unable to open database file}} do_test pager1.4.7.3 { db close catch {file attributes test.db-journal -permissions rw-rw-rw-} catch {file attributes test.db-journal -readonly 0} | | | 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 | catch {file attributes test.db-journal -readonly 1} catchsql { SELECT * FROM t1 } } {1 {unable to open database file}} do_test pager1.4.7.3 { db close catch {file attributes test.db-journal -permissions rw-rw-rw-} catch {file attributes test.db-journal -readonly 0} delete_file test.db-journal file exists test.db-journal } {0} #------------------------------------------------------------------------- # The following tests deal with multi-file commits. # # pager1-5.1.*: The case where a multi-file cannot be committed because |
︙ | ︙ | |||
2372 2373 2374 2375 2376 2377 2378 | # Test that if an empty database file (size 0 bytes) is opened in # exclusive-locking mode, any journal file is deleted from the file-system # without being rolled back. And that the RESERVED lock obtained while # doing this is not released. # do_test pager1-30.1 { db close | | | | 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 | # Test that if an empty database file (size 0 bytes) is opened in # exclusive-locking mode, any journal file is deleted from the file-system # without being rolled back. And that the RESERVED lock obtained while # doing this is not released. # do_test pager1-30.1 { db close delete_file test.db delete_file test.db-journal set fd [open test.db-journal w] seek $fd [expr 512+1032*2] puts -nonewline $fd x close $fd sqlite3 db test.db execsql { |
︙ | ︙ | |||
2413 2414 2415 2416 2417 2418 2419 | INSERT INTO t1 SELECT randomblob(1500), randomblob(1500) FROM t1; INSERT INTO t1 SELECT randomblob(1500), randomblob(1500) FROM t1; INSERT INTO t1 SELECT randomblob(1500), randomblob(1500) FROM t1; INSERT INTO t1 SELECT randomblob(1500), randomblob(1500) FROM t1; BEGIN; UPDATE t1 SET y = randomblob(1499); } | | | | 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 | INSERT INTO t1 SELECT randomblob(1500), randomblob(1500) FROM t1; INSERT INTO t1 SELECT randomblob(1500), randomblob(1500) FROM t1; INSERT INTO t1 SELECT randomblob(1500), randomblob(1500) FROM t1; INSERT INTO t1 SELECT randomblob(1500), randomblob(1500) FROM t1; BEGIN; UPDATE t1 SET y = randomblob(1499); } copy_file test.db test.db2 copy_file test.db-journal test.db2-journal hexio_write test.db2-journal 24 00000000 sqlite3 db2 test.db2 execsql { PRAGMA integrity_check } db2 } {ok} } finish_test |
Changes to test/pagerfault.test.
︙ | ︙ | |||
229 230 231 232 233 234 235 | } -test { faultsim_test_result {0 {}} faultsim_integrity_check } do_faultsim_test pagerfault-5.3 -faults oom-transient -prep { faultsim_restore_and_reopen db func a_string a_string | | | 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 | } -test { faultsim_test_result {0 {}} faultsim_integrity_check } do_faultsim_test pagerfault-5.3 -faults oom-transient -prep { faultsim_restore_and_reopen db func a_string a_string forcedelete test2.db test2.db-journal test2.db-wal execsql { PRAGMA journal_mode = PERSIST; ATTACH 'test2.db' AS aux; PRAGMA aux.journal_mode = PERSIST; PRAGMA aux.journal_size_limit = 0; } } -body { |
︙ | ︙ | |||
637 638 639 640 641 642 643 | PRAGMA journal_mode = PERSIST; BEGIN; CREATE TABLE t1(x, y UNIQUE); INSERT INTO t1 VALUES(a_string(333), a_string(444)); COMMIT; } db close | | | 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 | PRAGMA journal_mode = PERSIST; BEGIN; CREATE TABLE t1(x, y UNIQUE); INSERT INTO t1 VALUES(a_string(333), a_string(444)); COMMIT; } db close forcedelete test.db faultsim_save } {} do_faultsim_test pagerfault-13 -prep { faultsim_restore_and_reopen } -body { execsql { CREATE TABLE xx(a, b) } } -test { |
︙ | ︙ | |||
1135 1136 1137 1138 1139 1140 1141 | # PagerCommitPhaseOne(<in-memory-db>) -> SQLITE_OK # PagerCommitPhaseOne(<file-db>) -> SQLITE_IOERR # PagerRollback(<in-memory-db>) # PagerRollback(<file-db>) # do_faultsim_test pagerfault-23 -prep { sqlite3 db :memory: | | | 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 | # PagerCommitPhaseOne(<in-memory-db>) -> SQLITE_OK # PagerCommitPhaseOne(<file-db>) -> SQLITE_IOERR # PagerRollback(<in-memory-db>) # PagerRollback(<file-db>) # do_faultsim_test pagerfault-23 -prep { sqlite3 db :memory: foreach f [glob -nocomplain test.db*] { forcedelete $f } db eval { ATTACH 'test.db2' AS aux; CREATE TABLE t1(a, b); CREATE TABLE aux.t2(a, b); } } -body { execsql { |
︙ | ︙ |
Changes to test/pagesize.test.
︙ | ︙ | |||
38 39 40 41 42 43 44 | PRAGMA page_size=2048; PRAGMA page_size; } } 1024 do_test pagesize-1.4 { db close | | | 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 | PRAGMA page_size=2048; PRAGMA page_size; } } 1024 do_test pagesize-1.4 { db close forcedelete test.db sqlite3 db test.db execsql { PRAGMA page_size=511; PRAGMA page_size; } } 1024 do_test pagesize-1.5 { |
︙ | ︙ | |||
95 96 97 98 99 100 101 | INSERT INTO t1 VALUES(2,3,4); SELECT * FROM t1; } } {1 2 3 2 3 4} } do_test pagesize-2.$PGSZ.1 { db close | | | 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 | INSERT INTO t1 VALUES(2,3,4); SELECT * FROM t1; } } {1 2 3 2 3 4} } do_test pagesize-2.$PGSZ.1 { db close forcedelete test.db sqlite3 db test.db execsql "PRAGMA page_size=$PGSZ" execsql { CREATE TABLE t1(x); PRAGMA page_size; } } $PGSZ |
︙ | ︙ | |||
186 187 188 189 190 191 192 | do_test pagesize-2.$PGSZ.16 { execsql {DROP TABLE t1} ifcapable {vacuum} {execsql VACUUM} } {} integrity_check pagesize-2.$PGSZ.17 db close | | | | 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 | do_test pagesize-2.$PGSZ.16 { execsql {DROP TABLE t1} ifcapable {vacuum} {execsql VACUUM} } {} integrity_check pagesize-2.$PGSZ.17 db close forcedelete test.db sqlite3 db test.db do_test pagesize-2.$PGSZ.30 { execsql " CREATE TABLE t1(x); PRAGMA temp.page_size=$PGSZ; CREATE TEMP TABLE t2(y); PRAGMA main.page_size; PRAGMA temp.page_size; " } [list 1024 $PGSZ] db close forcedelete test.db sqlite3 db test.db do_test pagesize-2.$PGSZ.40 { execsql " PRAGMA page_size=$PGSZ; CREATE TABLE t1(x); CREATE TEMP TABLE t2(y); PRAGMA main.page_size; PRAGMA temp.page_size; " } [list $PGSZ $PGSZ] } finish_test |
Changes to test/pcache2.test.
︙ | ︙ | |||
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 | # This file is focused on testing the pcache module. # # $Id: pcache2.test,v 1.5 2009/07/18 14:36:24 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # Set up a pcache memory pool so that we can easily track how many # pages are being used for cache. # do_test pcache2-1.1 { db close sqlite3_reset_auto_extension sqlite3_shutdown sqlite3_config_pagecache 6000 100 sqlite3_initialize autoinstall_test_functions sqlite3_status SQLITE_STATUS_PAGECACHE_USED 1 sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0 } {0 0 0} # Open up two database connections to separate files. # do_test pcache2-1.2 { | > > > > > > > | | | 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 | # This file is focused on testing the pcache module. # # $Id: pcache2.test,v 1.5 2009/07/18 14:36:24 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # If compiled with blockalloc, pagecache memory is not used. Which # causes these tests to fail. # ifcapable blockalloc { finish_test return } # Set up a pcache memory pool so that we can easily track how many # pages are being used for cache. # do_test pcache2-1.1 { db close sqlite3_reset_auto_extension sqlite3_shutdown sqlite3_config_pagecache 6000 100 sqlite3_initialize autoinstall_test_functions sqlite3_status SQLITE_STATUS_PAGECACHE_USED 1 sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0 } {0 0 0} # Open up two database connections to separate files. # do_test pcache2-1.2 { forcedelete test.db test.db-journal sqlite3 db test.db db eval {PRAGMA cache_size=10} lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0] 1 } {2} do_test pcache2-1.3 { forcedelete test2.db test2.db-journal sqlite3 db2 test2.db db2 eval {PRAGMA cache_size=50} lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0] 1 } {4} # Make lots of changes on the first connection. Verify that the |
︙ | ︙ |
Changes to test/permutations.test.
︙ | ︙ | |||
107 108 109 110 111 112 113 | misc7.test mutex2.test notify2.test onefile.test pagerfault2.test savepoint4.test savepoint6.test select9.test speed1.test speed1p.test speed2.test speed3.test speed4.test speed4p.test sqllimits1.test tkt2686.test thread001.test thread002.test thread003.test thread004.test thread005.test trans2.test vacuum3.test incrvacuum_ioerr.test autovacuum_crash.test btree8.test shared_err.test vtab_err.test walslow.test walcrash.test | | | 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 | misc7.test mutex2.test notify2.test onefile.test pagerfault2.test savepoint4.test savepoint6.test select9.test speed1.test speed1p.test speed2.test speed3.test speed4.test speed4p.test sqllimits1.test tkt2686.test thread001.test thread002.test thread003.test thread004.test thread005.test trans2.test vacuum3.test incrvacuum_ioerr.test autovacuum_crash.test btree8.test shared_err.test vtab_err.test walslow.test walcrash.test walthread.test rtree3.test indexfault.test }] if {[info exists ::env(QUICKTEST_INCLUDE)]} { set allquicktests [concat $allquicktests $::env(QUICKTEST_INCLUDE)] } ############################################################################# # Start of tests |
︙ | ︙ | |||
516 517 518 519 520 521 522 | e_fts3.test fts3cov.test fts3malloc.test fts3rnd.test fts3snippet.test # Exclude test scripts that use tcl IO to access journal files or count # the number of fsync() calls. pager.test exclusive.test jrnlmode.test sync.test misc1.test journal1.test conflict.test crash8.test tkt3457.test io.test | | | | 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 | e_fts3.test fts3cov.test fts3malloc.test fts3rnd.test fts3snippet.test # Exclude test scripts that use tcl IO to access journal files or count # the number of fsync() calls. pager.test exclusive.test jrnlmode.test sync.test misc1.test journal1.test conflict.test crash8.test tkt3457.test io.test journal3.test 8_3_names.test pager1.test async4.test corrupt.test filefmt.test pager2.test corrupt5.test corruptA.test pageropt.test # Exclude stmt.test, which expects sub-journals to use temporary files. stmt.test # WAL mode is different. wal* tkt-2d1a5c67d.test backcompat.test }] ifcapable mem3 { test_suite "memsys3" -description { Run tests using the allocator in mem3.c. } -files [test_set $::allquicktests -exclude { autovacuum.test delete3.test manydb.test |
︙ | ︙ | |||
722 723 724 725 726 727 728 | } -initialize { catch {db close} register_jt_vfs -default "" } -shutdown { unregister_jt_vfs } -files [test_set $::allquicktests -exclude { wal* incrvacuum.test ioerr.test corrupt4.test io.test crash8.test | | | 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 | } -initialize { catch {db close} register_jt_vfs -default "" } -shutdown { unregister_jt_vfs } -files [test_set $::allquicktests -exclude { wal* incrvacuum.test ioerr.test corrupt4.test io.test crash8.test async4.test bigfile.test backcompat.test }] if {[info commands register_demovfs] != ""} { test_suite "demovfs" -description { Check that the demovfs (code in test_demovfs.c) more or less works. } -initialize { register_demovfs |
︙ | ︙ | |||
861 862 863 864 865 866 867 | if {[llength $argv]>1} { set extra [list -files [lrange $argv 1 end]] } eval run_tests $suite $::testspec($suite) $extra } } main $argv finish_test } | < | 861 862 863 864 865 866 867 | if {[llength $argv]>1} { set extra [list -files [lrange $argv 1 end]] } eval run_tests $suite $::testspec($suite) $extra } } main $argv finish_test } |
Changes to test/pragma.test.
︙ | ︙ | |||
47 48 49 50 51 52 53 | return } # Delete the preexisting database to avoid the special setup # that the "all.test" script does. # db close | | | | 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 | return } # Delete the preexisting database to avoid the special setup # that the "all.test" script does. # db close delete_file test.db test.db-journal delete_file test3.db test3.db-journal sqlite3 db test.db; set DB [sqlite3_connection_pointer db] ifcapable pager_pragmas { set DFLT_CACHE_SZ [db one {PRAGMA default_cache_size}] set TEMP_CACHE_SZ [db one {PRAGMA temp.default_cache_size}] do_test pragma-1.1 { |
︙ | ︙ | |||
217 218 219 220 221 222 223 | PRAGMA bogus = -1234; -- Parsing of negative values } } {} # Test modifying the safety_level of an attached database. ifcapable pager_pragmas&&attach { do_test pragma-2.1 { | | | | 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 | PRAGMA bogus = -1234; -- Parsing of negative values } } {} # Test modifying the safety_level of an attached database. ifcapable pager_pragmas&&attach { do_test pragma-2.1 { forcedelete test2.db forcedelete test2.db-journal execsql { ATTACH 'test2.db' AS aux; } } {} do_test pragma-2.2 { execsql { pragma aux.synchronous; |
︙ | ︙ | |||
251 252 253 254 255 256 257 | # Construct a corrupted index and make sure the integrity_check # pragma finds it. # # These tests won't work if the database is encrypted # do_test pragma-3.1 { db close | | | 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 | # Construct a corrupted index and make sure the integrity_check # pragma finds it. # # These tests won't work if the database is encrypted # do_test pragma-3.1 { db close forcedelete test.db test.db-journal sqlite3 db test.db execsql { PRAGMA auto_vacuum=OFF; BEGIN; CREATE TABLE t2(a,b,c); CREATE INDEX i2 ON t2(a); INSERT INTO t2 VALUES(11,2,3); |
︙ | ︙ | |||
308 309 310 311 312 313 314 | } {{rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2} {rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2}} # Add additional corruption by appending unused pages to the end of # the database file testerr.db # do_test pragma-3.8 { execsql {DETACH t2} | | | 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 | } {{rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2} {rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2}} # Add additional corruption by appending unused pages to the end of # the database file testerr.db # do_test pragma-3.8 { execsql {DETACH t2} forcedelete testerr.db testerr.db-journal set out [open testerr.db w] fconfigure $out -translation binary set in [open test.db r] fconfigure $in -translation binary puts -nonewline $out [read $in] seek $in 0 puts -nonewline $out [read $in] |
︙ | ︙ | |||
415 416 417 418 419 420 421 | } {{*** in database t2 *** Page 4 is never used Page 5 is never used Page 6 is never used} {rowid 1 missing from index i2}} } do_test pragma-3.19 { catch {db close} | | | 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 | } {{*** in database t2 *** Page 4 is never used Page 5 is never used Page 6 is never used} {rowid 1 missing from index i2}} } do_test pragma-3.19 { catch {db close} forcedelete test.db test.db-journal sqlite3 db test.db db eval {PRAGMA integrity_check} } {ok} } #exit # Test modifying the cache_size of an attached database. |
︙ | ︙ | |||
737 738 739 740 741 742 743 | sqlite3_step $::STMT } SQLITE_ERROR do_test pragma-8.1.10 { sqlite3_finalize $::STMT } SQLITE_SCHEMA # Make sure the schema-version can be manipulated in an attached database. | | | | 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 | sqlite3_step $::STMT } SQLITE_ERROR do_test pragma-8.1.10 { sqlite3_finalize $::STMT } SQLITE_SCHEMA # Make sure the schema-version can be manipulated in an attached database. forcedelete test2.db forcedelete test2.db-journal ifcapable attach { do_test pragma-8.1.11 { execsql { ATTACH 'test2.db' AS aux; CREATE TABLE aux.t1(a, b, c); PRAGMA aux.schema_version = 205; } |
︙ | ︙ | |||
1201 1202 1203 1204 1205 1206 1207 | } } {} } ;# ifcapable bloblit ifcapable pager_pragmas { db close | | | 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 | } } {} } ;# ifcapable bloblit ifcapable pager_pragmas { db close forcedelete test.db sqlite3 db test.db do_test pragma-14.1 { execsql { pragma auto_vacuum = 0 } execsql { pragma page_count } } {0} |
︙ | ︙ | |||
1237 1238 1239 1240 1241 1242 1243 | execsql { ROLLBACK; PRAGMA page_count; } } {2} do_test pragma-14.6 { | | | 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 | execsql { ROLLBACK; PRAGMA page_count; } } {2} do_test pragma-14.6 { forcedelete test2.db sqlite3 db2 test2.db execsql { PRAGMA auto_vacuum = 0; CREATE TABLE t1(a, b, c); CREATE TABLE t2(a, b, c); CREATE TABLE t3(a, b, c); CREATE TABLE t4(a, b, c); |
︙ | ︙ |
Changes to test/pragma2.test.
︙ | ︙ | |||
29 30 31 32 33 34 35 | return } # Delete the preexisting database to avoid the special setup # that the "all.test" script does. # db close | | | | 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 | return } # Delete the preexisting database to avoid the special setup # that the "all.test" script does. # db close delete_file test.db test.db-journal delete_file test3.db test3.db-journal sqlite3 db test.db; set DB [sqlite3_connection_pointer db] db eval {PRAGMA auto_vacuum=0} do_test pragma2-1.1 { execsql { PRAGMA freelist_count; } |
︙ | ︙ | |||
57 58 59 60 61 62 63 | } {1} do_test pragma2-1.4 { execsql { PRAGMA main.freelist_count; } } {1} | | | | 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 | } {1} do_test pragma2-1.4 { execsql { PRAGMA main.freelist_count; } } {1} forcedelete test2.db forcedelete test2.db-journal ifcapable attach { do_test pragma2-2.1 { execsql { ATTACH 'test2.db' AS aux; PRAGMA aux.auto_vacuum=OFF; PRAGMA aux.freelist_count; |
︙ | ︙ |
Changes to test/quota.test.
︙ | ︙ | |||
119 120 121 122 123 124 125 | proc quota_check {filename limitvar size} { upvar $limitvar limit lappend ::quota [set limit] $size if {[info exists ::quota_request_ok]} { set limit $size } } do_test quota-3.1.1 { | | | 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 | proc quota_check {filename limitvar size} { upvar $limitvar limit lappend ::quota [set limit] $size if {[info exists ::quota_request_ok]} { set limit $size } } do_test quota-3.1.1 { forcedelete test.db sqlite3_quota_initialize "" 1 sqlite3_quota_set *test.db 4096 quota_check } {SQLITE_OK} do_test quota-3.1.2 { sqlite3 db test.db execsql { PRAGMA page_size = 1024; |
︙ | ︙ | |||
154 155 156 157 158 159 160 | do_test quota-3.1.6 { db close db2 close sqlite3_quota_set *test.db 0 {} } {SQLITE_OK} do_test quota-3.2.1 { | | | 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 | do_test quota-3.1.6 { db close db2 close sqlite3_quota_set *test.db 0 {} } {SQLITE_OK} do_test quota-3.2.1 { delete_file force test.db test2.db sqlite3_quota_set * 4096 {} sqlite3 db1a test.db sqlite3 db2a test2.db foreach db {db1a db2a} { execsql { |
︙ | ︙ | |||
253 254 255 256 257 258 259 | quota_list } {*test.db *test2.db} do_test quota-4.1.5 { sqlite3_quota_set *test.db 0 {} quota_list } {*test2.db} do_test quota-4.1.6 { | | | 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 | quota_list } {*test.db *test2.db} do_test quota-4.1.5 { sqlite3_quota_set *test.db 0 {} quota_list } {*test2.db} do_test quota-4.1.6 { forcedelete test2.db test2.db-journal test2.db-wal sqlite3 db test2.db db eval {CREATE TABLE t2(x); INSERT INTO t2 VALUES('tab-t2');} quota_list } {*test2.db} do_test quota-4.1.7 { catchsql {INSERT INTO t2 VALUES(zeroblob(200000))} } {1 {database or disk is full}} |
︙ | ︙ | |||
353 354 355 356 357 358 359 | do_faultsim_test quota-5.2 -prep { catch {db close} } -body { sqlite3 db test.db } catch { db close } | | | | 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 | do_faultsim_test quota-5.2 -prep { catch {db close} } -body { sqlite3 db test.db } catch { db close } forcedelete test.db do_test quota-5.3.prep { sqlite3 db test.db execsql { PRAGMA auto_vacuum = 1; PRAGMA page_size = 1024; CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(10, zeroblob(1200)); } faultsim_save_and_close } {} do_faultsim_test quota-5.3 -prep { faultsim_restore_and_reopen } -body { execsql { DELETE FROM t1 } } do_test quota-5.4.1 { catch { db close } forcedelete test.db file mkdir test.db list [catch { sqlite3 db test.db } msg] $msg } {1 {unable to open database file}} do_faultsim_test quota-5.5 -prep { catch { sqlite3_quota_shutdown } } -body { |
︙ | ︙ |
Changes to test/rollback.test.
︙ | ︙ | |||
84 85 86 87 88 89 90 | && [permutation] ne "inmemory_journal" } { do_test rollback-2.1 { execsql { BEGIN; INSERT INTO t3 VALUES('hello world'); } | | | | 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 | && [permutation] ne "inmemory_journal" } { do_test rollback-2.1 { execsql { BEGIN; INSERT INTO t3 VALUES('hello world'); } forcecopy test.db testA.db forcecopy test.db-journal testA.db-journal execsql { COMMIT; } } {} # At this point files testA.db and testA.db-journal are present in the # file system. This block adds a master-journal file pointer to the |
︙ | ︙ |
Changes to test/savepoint.test.
︙ | ︙ | |||
367 368 369 370 371 372 373 | #------------------------------------------------------------------------- # The following tests, savepoint-6.*, test an incr-vacuum inside of a # couple of nested savepoints. # ifcapable {autovacuum && pragma} { db close | | | 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 | #------------------------------------------------------------------------- # The following tests, savepoint-6.*, test an incr-vacuum inside of a # couple of nested savepoints. # ifcapable {autovacuum && pragma} { db close forcedelete test.db sqlite3 db test.db do_test savepoint-6.1 { execsql { PRAGMA auto_vacuum = incremental } wal_set_journal_mode execsql { CREATE TABLE t1(a, b, c); |
︙ | ︙ | |||
415 416 417 418 419 420 421 | } #------------------------------------------------------------------------- # The following tests, savepoint-7.*, attempt to break the logic # surrounding savepoints by growing and shrinking the database file. # db close | | | 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 | } #------------------------------------------------------------------------- # The following tests, savepoint-7.*, attempt to break the logic # surrounding savepoints by growing and shrinking the database file. # db close forcedelete test.db sqlite3 db test.db do_test savepoint-7.1 { execsql { PRAGMA auto_vacuum = incremental } wal_set_journal_mode execsql { PRAGMA cache_size = 10; |
︙ | ︙ | |||
486 487 488 489 490 491 492 | } execsql { PRAGMA integrity_check } } {ok} wal_check_journal_mode savepoint-7.3.3 do_test savepoint-7.4.1 { db close | | | 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 | } execsql { PRAGMA integrity_check } } {ok} wal_check_journal_mode savepoint-7.3.3 do_test savepoint-7.4.1 { db close forcedelete test.db sqlite3 db test.db execsql { PRAGMA auto_vacuum = incremental } wal_set_journal_mode execsql { CREATE TABLE t1(a, b, PRIMARY KEY(a, b)); INSERT INTO t1 VALUES(randstr(1000,1000), randstr(1000,1000)); BEGIN; |
︙ | ︙ | |||
636 637 638 639 640 641 642 | # of the aux1 and aux2 locks. So record the current lock status of # TEMP for use in the answers. set templockstate [lindex [db eval {PRAGMA lock_status}] 3] if {[wal_is_wal_mode]==0} { do_test savepoint-10.2.1 { | | | | 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 | # of the aux1 and aux2 locks. So record the current lock status of # TEMP for use in the answers. set templockstate [lindex [db eval {PRAGMA lock_status}] 3] if {[wal_is_wal_mode]==0} { do_test savepoint-10.2.1 { forcedelete test3.db forcedelete test2.db execsql { ATTACH 'test2.db' AS aux1; ATTACH 'test3.db' AS aux2; DROP TABLE t1; CREATE TABLE main.t1(x, y); CREATE TABLE aux1.t2(x, y); CREATE TABLE aux2.t3(x, y); |
︙ | ︙ | |||
758 759 760 761 762 763 764 | #------------------------------------------------------------------------- # The following tests - savepoint-11.* - test the interaction of # savepoints and creating or dropping tables and indexes in # auto-vacuum mode. # do_test savepoint-11.1 { db close | | | 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 | #------------------------------------------------------------------------- # The following tests - savepoint-11.* - test the interaction of # savepoints and creating or dropping tables and indexes in # auto-vacuum mode. # do_test savepoint-11.1 { db close forcedelete test.db sqlite3 db test.db execsql { PRAGMA auto_vacuum = full; } wal_set_journal_mode execsql { CREATE TABLE t1(a, b, UNIQUE(a, b)); INSERT INTO t1 VALUES(1, randstr(1000,1000)); INSERT INTO t1 VALUES(2, randstr(1000,1000)); |
︙ | ︙ | |||
864 865 866 867 868 869 870 | #------------------------------------------------------------------------- # The following tests - savepoint-13.* - test the interaction of # savepoints and "journal_mode = off". # if {[wal_is_wal_mode]==0} { do_test savepoint-13.1 { db close | | | 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 | #------------------------------------------------------------------------- # The following tests - savepoint-13.* - test the interaction of # savepoints and "journal_mode = off". # if {[wal_is_wal_mode]==0} { do_test savepoint-13.1 { db close catch {forcedelete test.db} sqlite3 db test.db execsql { BEGIN; CREATE TABLE t1(a PRIMARY KEY, b); INSERT INTO t1 VALUES(1, 2); COMMIT; PRAGMA journal_mode = off; |
︙ | ︙ | |||
905 906 907 908 909 910 911 | ROLLBACK; SELECT * FROM t1; } } {1 2 3 4 5 6 7 8 9 10 11 12} } db close | | | 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 | ROLLBACK; SELECT * FROM t1; } } {1 2 3 4 5 6 7 8 9 10 11 12} } db close delete_file test.db do_multiclient_test tn { do_test savepoint-14.$tn.1 { sql1 { CREATE TABLE foo(x); INSERT INTO foo VALUES(1); INSERT INTO foo VALUES(2); } |
︙ | ︙ |
Changes to test/savepoint6.test.
︙ | ︙ | |||
242 243 244 245 246 247 248 | sql { PRAGMA cache_size = 10 } }] { unset -nocomplain ::lSavepoint unset -nocomplain ::aEntry catch { db close } | | | 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 | sql { PRAGMA cache_size = 10 } }] { unset -nocomplain ::lSavepoint unset -nocomplain ::aEntry catch { db close } forcedelete test.db test.db-wal test.db-journal eval $zSetup sql $DATABASE_SCHEMA wal_set_journal_mode do_test savepoint6-$testname.setup { savepoint one |
︙ | ︙ |
Changes to test/securedel.test.
︙ | ︙ | |||
22 23 24 25 26 27 28 | } do_test securedel-1.0 { db eval {PRAGMA secure_delete;} } $DEFAULT_SECDEL | | | 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 | } do_test securedel-1.0 { db eval {PRAGMA secure_delete;} } $DEFAULT_SECDEL forcedelete test2.db test2.db-journal do_test securedel-1.1 { db eval { ATTACH 'test2.db' AS db2; PRAGMA main.secure_delete=ON; PRAGMA db2.secure_delete; } } [list 1 $DEFAULT_SECDEL] |
︙ | ︙ |
Changes to test/shared.test.
︙ | ︙ | |||
23 24 25 26 27 28 29 | } set ::enable_shared_cache [sqlite3_enable_shared_cache 1] foreach av [list 0 1] { # Open the database connection and execute the auto-vacuum pragma | | | 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 | } set ::enable_shared_cache [sqlite3_enable_shared_cache 1] foreach av [list 0 1] { # Open the database connection and execute the auto-vacuum pragma forcedelete test.db sqlite3 db test.db ifcapable autovacuum { do_test shared-[expr $av+1].1.0 { execsql "pragma auto_vacuum=$::av" execsql {pragma auto_vacuum} } "$av" |
︙ | ︙ | |||
285 286 287 288 289 290 291 | # (as well as main): # # db.main -> ./test.db # db.test2 -> ./test2.db # db2.main -> ./test2.db # db2.test -> ./test.db # | | | | | 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 | # (as well as main): # # db.main -> ./test.db # db.test2 -> ./test2.db # db2.main -> ./test2.db # db2.test -> ./test.db # forcedelete test.db forcedelete test2.db forcedelete test2.db-journal sqlite3 db test.db sqlite3 db2 test2.db do_test shared-$av.4.1.1 { set sqlite_open_file_count expr $sqlite_open_file_count-($extrafds_prelock*2) } {2} do_test shared-$av.4.1.2 { |
︙ | ︙ | |||
418 419 420 421 422 423 424 | catch {db2 close} catch {db close} #-------------------------------------------------------------------------- # Tests shared-5.* # foreach db [list test.db test1.db test2.db test3.db] { | | | 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 | catch {db2 close} catch {db close} #-------------------------------------------------------------------------- # Tests shared-5.* # foreach db [list test.db test1.db test2.db test3.db] { forcedelete $db ${db}-journal } do_test shared-$av.5.1.1 { sqlite3 db1 test.db sqlite3 db2 test.db execsql { ATTACH 'test1.db' AS test1; ATTACH 'test2.db' AS test2; |
︙ | ︙ | |||
549 550 551 552 553 554 555 | } set ret } {} catch {db1 close} catch {db2 close} foreach f [list test.db test2.db] { | | | 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 | } set ret } {} catch {db1 close} catch {db2 close} foreach f [list test.db test2.db] { forcedelete $f ${f}-journal } #-------------------------------------------------------------------------- # Tests shared-7.* test auto-vacuum does not invalidate cursors from # other shared-cache users when it reorganizes the database on # COMMIT. # |
︙ | ︙ | |||
635 636 637 638 639 640 641 | catch {db2 close} unset -nocomplain contents #-------------------------------------------------------------------------- # The following tests try to trick the shared-cache code into assuming # the wrong encoding for a database. # | | | 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 | catch {db2 close} unset -nocomplain contents #-------------------------------------------------------------------------- # The following tests try to trick the shared-cache code into assuming # the wrong encoding for a database. # forcedelete test.db test.db-journal ifcapable utf16 { do_test shared-$av.8.1.1 { sqlite3 db test.db execsql { PRAGMA encoding = 'UTF-16'; SELECT * FROM sqlite_master; } |
︙ | ︙ | |||
667 668 669 670 671 672 673 | do_test shared-$av.8.1.5 { db2 close execsql { PRAGMA encoding; } } {UTF-8} | | | | 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 | do_test shared-$av.8.1.5 { db2 close execsql { PRAGMA encoding; } } {UTF-8} forcedelete test2.db test2.db-journal do_test shared-$av.8.2.1 { execsql { ATTACH 'test2.db' AS aux; SELECT * FROM aux.sqlite_master; } } {} do_test shared-$av.8.2.2 { sqlite3 db2 test2.db execsql { PRAGMA encoding = 'UTF-16'; CREATE TABLE def(d, e, f); } db2 string range [execsql {PRAGMA encoding;} db2] 0 end-2 } {UTF-16} catch {db close} catch {db2 close} forcedelete test.db test2.db do_test shared-$av.8.3.2 { sqlite3 db test.db execsql { CREATE TABLE def(d, e, f) } execsql { PRAGMA encoding } } {UTF-8} do_test shared-$av.8.3.3 { |
︙ | ︙ | |||
724 725 726 727 728 729 730 | } } {1 {attached databases must use the same text encoding as main database}} } } catch {db close} catch {db2 close} | | | 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 | } } {1 {attached databases must use the same text encoding as main database}} } } catch {db close} catch {db2 close} forcedelete test.db test2.db #--------------------------------------------------------------------------- # The following tests - shared-9.* - test interactions between TEMP triggers # and shared-schemas. # ifcapable trigger&&tempdb { |
︙ | ︙ | |||
764 765 766 767 768 769 770 | #--------------------------------------------------------------------------- # The following tests - shared-10.* - test that the library behaves # correctly when a connection to a shared-cache is closed. # do_test shared-$av.10.1 { # Create a small sample database with two connections to it (db and db2). | | | 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 | #--------------------------------------------------------------------------- # The following tests - shared-10.* - test that the library behaves # correctly when a connection to a shared-cache is closed. # do_test shared-$av.10.1 { # Create a small sample database with two connections to it (db and db2). forcedelete test.db sqlite3 db test.db sqlite3 db2 test.db execsql { CREATE TABLE ab(a PRIMARY KEY, b); CREATE TABLE de(d PRIMARY KEY, e); INSERT INTO ab VALUES('Chiang Mai', 100000); INSERT INTO ab VALUES('Bangkok', 8000000); |
︙ | ︙ | |||
845 846 847 848 849 850 851 | integrity_check shared-$av.10.10 do_test shared-$av.10.11 { db close db3 close } {} do_test shared-$av.11.1 { | | | 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 | integrity_check shared-$av.10.10 do_test shared-$av.10.11 { db close db3 close } {} do_test shared-$av.11.1 { forcedelete test.db sqlite3 db test.db sqlite3 db2 test.db execsql { CREATE TABLE abc(a, b, c); CREATE TABLE abc2(a, b, c); BEGIN; INSERT INTO abc VALUES(1, 2, 3); |
︙ | ︙ | |||
908 909 910 911 912 913 914 | do_test shared-$av.11.11 { db close db2 close } {} # This tests that if it is impossible to free any pages, SQLite will # exceed the limit set by PRAGMA cache_size. | | | 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 | do_test shared-$av.11.11 { db close db2 close } {} # This tests that if it is impossible to free any pages, SQLite will # exceed the limit set by PRAGMA cache_size. forcedelete test.db test.db-journal sqlite3 db test.db ifcapable pager_pragmas { do_test shared-$av.12.1 { execsql { PRAGMA cache_size = 10; PRAGMA cache_size; } |
︙ | ︙ | |||
955 956 957 958 959 960 961 | # Internally, locks are acquired on shared B-Tree structures in the order # that the structures appear in the virtual memory address space. This # test case attempts to cause the order of the structures in memory # to be different from the order in which they are attached to a given # database handle. This covers an extra line or two. # do_test shared-$av.13.1 { | | | 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 | # Internally, locks are acquired on shared B-Tree structures in the order # that the structures appear in the virtual memory address space. This # test case attempts to cause the order of the structures in memory # to be different from the order in which they are attached to a given # database handle. This covers an extra line or two. # do_test shared-$av.13.1 { forcedelete test2.db test3.db test4.db test5.db sqlite3 db :memory: execsql { ATTACH 'test2.db' AS aux2; ATTACH 'test3.db' AS aux3; ATTACH 'test4.db' AS aux4; ATTACH 'test5.db' AS aux5; DETACH aux2; |
︙ | ︙ | |||
1018 1019 1020 1021 1022 1023 1024 | # [db2]. This is to try to find any points where shared-schema elements # are allocated using the lookaside buffer of [db]. # # Mutexes are enabled for this test as that activates a couple of useful # assert() statements in the C code. # do_test shared-$av-15.1 { | | | 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 | # [db2]. This is to try to find any points where shared-schema elements # are allocated using the lookaside buffer of [db]. # # Mutexes are enabled for this test as that activates a couple of useful # assert() statements in the C code. # do_test shared-$av-15.1 { forcedelete test.db sqlite3 db test.db -fullmutex 1 sqlite3 db2 test.db -fullmutex 1 execsql { CREATE TABLE t1(a, b, c); CREATE INDEX i1 ON t1(a, b); CREATE VIEW v1 AS SELECT * FROM t1; CREATE VIEW v2 AS SELECT * FROM t1, v1 |
︙ | ︙ |
Changes to test/shared2.test.
︙ | ︙ | |||
125 126 127 128 129 130 131 | db1 close db2 close do_test shared2-3.2 { sqlite3_enable_shared_cache 1 } {1} | | | 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 | db1 close db2 close do_test shared2-3.2 { sqlite3_enable_shared_cache 1 } {1} forcedelete test.db sqlite3 db test.db do_test shared2-4.1 { execsql { CREATE TABLE t0(a, b); CREATE TABLE t1(a, b DEFAULT 'hello world'); } |
︙ | ︙ |
Changes to test/shared3.test.
︙ | ︙ | |||
20 21 22 23 24 25 26 | return } set ::enable_shared_cache [sqlite3_enable_shared_cache 1] # Ticket #1824 # do_test shared3-1.1 { | | | 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 | return } set ::enable_shared_cache [sqlite3_enable_shared_cache 1] # Ticket #1824 # do_test shared3-1.1 { forcedelete test.db test.db-journal sqlite3 db1 test.db db1 eval { PRAGMA encoding=UTF16; CREATE TABLE t1(x,y); INSERT INTO t1 VALUES('abc','This is a test string'); } db1 close |
︙ | ︙ |
Changes to test/shared4.test.
︙ | ︙ | |||
26 27 28 29 30 31 32 | return } set ::enable_shared_cache [sqlite3_enable_shared_cache 1] # Prepare multiple databases in shared cache mode. # do_test shared4-1.1 { | | | | | | 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 | return } set ::enable_shared_cache [sqlite3_enable_shared_cache 1] # Prepare multiple databases in shared cache mode. # do_test shared4-1.1 { forcedelete test1.db test1.db-journal forcedelete test2.db test2.db-journal forcedelete test3.db test3.db-journal forcedelete test4.db test4.db-journal sqlite3 db1 test1.db sqlite3 db2 test2.db sqlite3 db3 test3.db sqlite3 db4 test4.db db1 eval { CREATE TABLE t1(a); INSERT INTO t1 VALUES(111); |
︙ | ︙ |
Changes to test/shared6.test.
︙ | ︙ | |||
231 232 233 234 235 236 237 | do_test shared6-3.X { db1 close db2 close db3 close } {} do_test shared6-4.1 { | | | 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 | do_test shared6-3.X { db1 close db2 close db3 close } {} do_test shared6-4.1 { #forcedelete test.db test.db-journal sqlite3 db1 test.db sqlite3 db2 test.db set ::STMT [sqlite3_prepare_v2 db1 "SELECT * FROM t1" -1 DUMMY] execsql { CREATE TABLE t5(a, b) } db2 } {} do_test shared6-4.2 { |
︙ | ︙ |
Changes to test/shared7.test.
︙ | ︙ | |||
31 32 33 34 35 36 37 | } catchsql { ATTACH 'test.db' AS err1; } } {1 {database is already attached}} do_test shared7-1.3 { | | | 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 | } catchsql { ATTACH 'test.db' AS err1; } } {1 {database is already attached}} do_test shared7-1.3 { forcedelete test2.db test2.db-journal db eval { ATTACH 'test2.db' AS test2; CREATE TABLE test2.t2(y); } catchsql { ATTACH 'test2.db' AS err2; } |
︙ | ︙ |
Changes to test/speed3.test.
︙ | ︙ | |||
112 113 114 115 116 117 118 | PRAGMA main.cache_size = 200000; PRAGMA main.auto_vacuum = 'incremental'; ATTACH 'test2.db' AS 'aux'; PRAGMA aux.auto_vacuum = 'none'; } } | | | 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 | PRAGMA main.cache_size = 200000; PRAGMA main.auto_vacuum = 'incremental'; ATTACH 'test2.db' AS 'aux'; PRAGMA aux.auto_vacuum = 'none'; } } forcedelete test2.db test2.db-journal reset_db # Set up a database in auto-vacuum mode and create a database schema. # do_test speed3-0.1 { execsql { CREATE TABLE main.t1(a INTEGER, b TEXT, c INTEGER); |
︙ | ︙ |
Changes to test/sqllimits1.test.
︙ | ︙ | |||
755 756 757 758 759 760 761 | #-------------------------------------------------------------------- # Test cases sqllimits1-12.*: Test the SQLITE_MAX_ATTACHED limit. # ifcapable attach { do_test sqllimits1-12.1 { set max $::SQLITE_MAX_ATTACHED for {set i 0} {$i < ($max)} {incr i} { | | | 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 | #-------------------------------------------------------------------- # Test cases sqllimits1-12.*: Test the SQLITE_MAX_ATTACHED limit. # ifcapable attach { do_test sqllimits1-12.1 { set max $::SQLITE_MAX_ATTACHED for {set i 0} {$i < ($max)} {incr i} { forcedelete test${i}.db test${i}.db-journal } for {set i 0} {$i < ($max)} {incr i} { execsql "ATTACH 'test${i}.db' AS aux${i}" } catchsql "ATTACH 'test${i}.db' AS aux${i}" } "1 {too many attached databases - max $::SQLITE_MAX_ATTACHED}" do_test sqllimits1-12.2 { |
︙ | ︙ |
Changes to test/stat.test.
︙ | ︙ | |||
135 136 137 138 139 140 141 | SELECT * FROM stat WHERE name = 't5' OR name = 'i5'; } [list \ i5 / 5 leaf 0 0 1016 0 \ t5 / 4 leaf 0 0 1016 0 \ ] db close | | | 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 | SELECT * FROM stat WHERE name = 't5' OR name = 'i5'; } [list \ i5 / 5 leaf 0 0 1016 0 \ t5 / 4 leaf 0 0 1016 0 \ ] db close forcedelete test.db sqlite3 db test.db register_dbstat_vtab db breakpoint do_execsql_test stat-5.1 { PRAGMA auto_vacuum = OFF; CREATE VIRTUAL TABLE temp.stat USING dbstat; CREATE TABLE t1(x); |
︙ | ︙ |
Changes to test/sync.test.
︙ | ︙ | |||
38 39 40 41 42 43 44 | incr sqlite_sync_count $adj } } } do_test sync-1.1 { set sqlite_sync_count 0 | | | | 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 | incr sqlite_sync_count $adj } } } do_test sync-1.1 { set sqlite_sync_count 0 forcedelete test2.db forcedelete test2.db-journal execsql { PRAGMA fullfsync=OFF; CREATE TABLE t1(a,b); ATTACH DATABASE 'test2.db' AS db2; CREATE TABLE db2.t2(x,y); } cond_incr_sync_count 2 |
︙ | ︙ |
Changes to test/syscall.test.
︙ | ︙ | |||
55 56 57 58 59 60 61 | #------------------------------------------------------------------------- # Tests for the xNextSystemCall method. # foreach s { open close access getcwd stat fstat ftruncate fcntl read pread write pwrite fchmod fallocate | | | 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 | #------------------------------------------------------------------------- # Tests for the xNextSystemCall method. # foreach s { open close access getcwd stat fstat ftruncate fcntl read pread write pwrite fchmod fallocate pread64 pwrite64 unlink openDirectory } { if {[test_syscall exists $s]} {lappend syscall_list $s} } do_test 3.1 { lsort [test_syscall list] } [lsort $syscall_list] #------------------------------------------------------------------------- # This test verifies that if a call to open() fails and errno is set to |
︙ | ︙ |
Changes to test/table.test.
︙ | ︙ | |||
663 664 665 666 667 668 669 | set result [list $rc $msg] } {1 {database table is locked}} ifcapable attach { # Now attach a database and ensure that a table can be created in the # attached database whilst in a callback from a query on the main database. do_test table-14.3 { | | | | 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 | set result [list $rc $msg] } {1 {database table is locked}} ifcapable attach { # Now attach a database and ensure that a table can be created in the # attached database whilst in a callback from a query on the main database. do_test table-14.3 { forcedelete test2.db forcedelete test2.db-journal execsql { ATTACH 'test2.db' as aux; } db eval {SELECT * FROM tablet8 LIMIT 1} {} { db eval {CREATE TABLE aux.t1(a, b, c)} } } {} |
︙ | ︙ |
Changes to test/tclsqlite.test.
︙ | ︙ | |||
612 613 614 615 616 617 618 619 620 621 | db eval { DELETE FROM t5; INSERT INTO t5 VALUES(@y); SELECT hex(x), typeof(x) FROM t5 } } {31323334 blob} } finish_test | > > > > > > > > > > > | 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 | db eval { DELETE FROM t5; INSERT INTO t5 VALUES(@y); SELECT hex(x), typeof(x) FROM t5 } } {31323334 blob} } db func xCall xCall proc xCall {} { return "value" } do_execsql_test tcl-14.1 { CREATE TABLE t6(x); INSERT INTO t6 VALUES(1); } do_test tcl-14.2 { db one {SELECT x FROM t6 WHERE xCall()!='value'} } {} finish_test |
Changes to test/temptable.test.
︙ | ︙ | |||
400 401 402 403 404 405 406 | db close sqlite3 db test.db catchsql { SELECT * FROM t8,t9; } } {1 {no such table: t9}} | | | | | 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 | db close sqlite3 db test.db catchsql { SELECT * FROM t8,t9; } } {1 {no such table: t9}} forcedelete test2.db test2.db-journal ifcapable attach { do_test temptable-7.1 { catchsql { ATTACH 'test2.db' AS two; CREATE TEMP TABLE two.abc(x,y); } } {1 {temporary table name must be unqualified}} } # Need to do the following for tcl 8.5 on mac. On that configuration, the # -readonly flag is taken so seriously that a subsequent [forcedelete] # (required before the next test file can be executed) will fail. # catch {file attributes test.db -readonly 0} do_test temptable-8.0 { db close catch {forcedelete test.db} sqlite3 db test.db } {} do_test temptable-8.1 { execsql { CREATE TEMP TABLE tbl2(a, b); } execsql { CREATE TABLE tbl(a, b); INSERT INTO tbl VALUES(1, 2); |
︙ | ︙ |
Changes to test/temptrigger.test.
︙ | ︙ | |||
153 154 155 156 157 158 159 | # connection. This forces [db] to reload the 'test2.db' # schema. Check that the temp trigger is still fired # correctly. # # temptrigger-3.4: Check that the temp trigger can be dropped without error. # do_test temptrigger-3.1 { | | | | 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 | # connection. This forces [db] to reload the 'test2.db' # schema. Check that the temp trigger is still fired # correctly. # # temptrigger-3.4: Check that the temp trigger can be dropped without error. # do_test temptrigger-3.1 { catch { forcedelete test2.db test2.db-journal } catch { forcedelete test.db test.db-journal } sqlite3 db test.db sqlite3 db2 test2.db execsql { CREATE TABLE t2(a, b) } db2 execsql { ATTACH 'test2.db' AS aux; CREATE TEMP TABLE tt2(a, b); CREATE TEMP TRIGGER tr2 AFTER INSERT ON aux.t2 BEGIN |
︙ | ︙ |
Changes to test/tester.tcl.
︙ | ︙ | |||
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 | #------------------------------------------------------------------------- # The commands provided by the code in this file to help with creating # test cases are as follows: # # Commands to manipulate the db and the file-system at a high level: # # copy_file FROM TO # drop_all_tables ?DB? # forcedelete FILENAME # # Test the capability of the SQLite version built into the interpreter to # determine if a specific test can be run: # # ifcapable EXPR # | > > | 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 | #------------------------------------------------------------------------- # The commands provided by the code in this file to help with creating # test cases are as follows: # # Commands to manipulate the db and the file-system at a high level: # # copy_file FROM TO # delete_file FILENAME # drop_all_tables ?DB? # forcecopy FROM TO # forcedelete FILENAME # # Test the capability of the SQLite version built into the interpreter to # determine if a specific test can be run: # # ifcapable EXPR # |
︙ | ︙ | |||
118 119 120 121 122 123 124 125 126 127 128 129 130 131 | # This command is not opening a new database connection. Pass the # arguments through to the C implemenation as the are. # uplevel 1 sqlite_orig $args } } } proc execpresql {handle args} { trace remove execution $handle enter [list execpresql $handle] if {[info exists ::G(perm:presql)]} { $handle eval $::G(perm:presql) } } | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 | # This command is not opening a new database connection. Pass the # arguments through to the C implemenation as the are. # uplevel 1 sqlite_orig $args } } } proc getFileRetries {} { if {![info exists ::G(file-retries)]} { # # NOTE: Return the default number of retries for [file] operations. A # value of zero or less here means "disabled". # return [expr {$::tcl_platform(platform) eq "windows" ? 10 : 0}] } return $::G(file-retries) } proc getFileRetryDelay {} { if {![info exists ::G(file-retry-delay)]} { # # NOTE: Return the default number of milliseconds to wait when retrying # failed [file] operations. A value of zero or less means "do not # wait". # return 100; # TODO: Good default? } return $::G(file-retry-delay) } # Copy file $from into $to. This is used because some versions of # TCL for windows (notably the 8.4.1 binary package shipped with the # current mingw release) have a broken "file copy" command. # proc copy_file {from to} { do_copy_file false $from $to } proc forcecopy {from to} { do_copy_file true $from $to } proc do_copy_file {force from to} { set nRetry [getFileRetries] ;# Maximum number of retries. set nDelay [getFileRetryDelay] ;# Delay in ms before retrying. # On windows, sometimes even a [file copy -force] can fail. The cause is # usually "tag-alongs" - programs like anti-virus software, automatic backup # tools and various explorer extensions that keep a file open a little longer # than we expect, causing the delete to fail. # # The solution is to wait a short amount of time before retrying the copy. # if {$nRetry > 0} { for {set i 0} {$i<$nRetry} {incr i} { set rc [catch { if {$force} { file copy -force $from $to } else { file copy $from $to } } msg] if {$rc==0} break if {$nDelay > 0} { after $nDelay } } if {$rc} { error $msg } } else { if {$force} { file copy -force $from $to } else { file copy $from $to } } } # Delete a file or directory # proc delete_file {args} { do_delete_file false {*}$args } proc forcedelete {args} { do_delete_file true {*}$args } proc do_delete_file {force args} { set nRetry [getFileRetries] ;# Maximum number of retries. set nDelay [getFileRetryDelay] ;# Delay in ms before retrying. foreach filename $args { # On windows, sometimes even a [file delete -force] can fail just after # a file is closed. The cause is usually "tag-alongs" - programs like # anti-virus software, automatic backup tools and various explorer # extensions that keep a file open a little longer than we expect, causing # the delete to fail. # # The solution is to wait a short amount of time before retrying the # delete. # if {$nRetry > 0} { for {set i 0} {$i<$nRetry} {incr i} { set rc [catch { if {$force} { file delete -force $filename } else { file delete $filename } } msg] if {$rc==0} break if {$nDelay > 0} { after $nDelay } } if {$rc} { error $msg } } else { if {$force} { file delete -force $filename } else { file delete $filename } } } } proc execpresql {handle args} { trace remove execution $handle enter [list execpresql $handle] if {[info exists ::G(perm:presql)]} { $handle eval $::G(perm:presql) } } |
︙ | ︙ | |||
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 | # --pause # --soft-heap-limit=NN # --maxerror=NN # --malloctrace=N # --backtrace=N # --binarylog=N # --soak=N # --start=[$permutation:]$testfile # set cmdlinearg(soft-heap-limit) 0 set cmdlinearg(maxerror) 1000 set cmdlinearg(malloctrace) 0 set cmdlinearg(backtrace) 10 set cmdlinearg(binarylog) 0 set cmdlinearg(soak) 0 set cmdlinearg(start) "" set leftover [list] foreach a $argv { switch -regexp -- $a { {^-+pause$} { # Wait for user input before continuing. This is to give the user an | > > > > | 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 | # --pause # --soft-heap-limit=NN # --maxerror=NN # --malloctrace=N # --backtrace=N # --binarylog=N # --soak=N # --file-retries=N # --file-retry-delay=N # --start=[$permutation:]$testfile # set cmdlinearg(soft-heap-limit) 0 set cmdlinearg(maxerror) 1000 set cmdlinearg(malloctrace) 0 set cmdlinearg(backtrace) 10 set cmdlinearg(binarylog) 0 set cmdlinearg(soak) 0 set cmdlinearg(file-retries) 0 set cmdlinearg(file-retry-delay) 0 set cmdlinearg(start) "" set leftover [list] foreach a $argv { switch -regexp -- $a { {^-+pause$} { # Wait for user input before continuing. This is to give the user an |
︙ | ︙ | |||
193 194 195 196 197 198 199 200 201 202 203 204 205 206 | {^-+binarylog=.+$} { foreach {dummy cmdlinearg(binarylog)} [split $a =] break } {^-+soak=.+$} { foreach {dummy cmdlinearg(soak)} [split $a =] break set ::G(issoak) $cmdlinearg(soak) } {^-+start=.+$} { foreach {dummy cmdlinearg(start)} [split $a =] break set ::G(start:file) $cmdlinearg(start) if {[regexp {(.*):(.*)} $cmdlinearg(start) -> s.perm s.file]} { set ::G(start:permutation) ${s.perm} set ::G(start:file) ${s.file} | > > > > > > > > | 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 | {^-+binarylog=.+$} { foreach {dummy cmdlinearg(binarylog)} [split $a =] break } {^-+soak=.+$} { foreach {dummy cmdlinearg(soak)} [split $a =] break set ::G(issoak) $cmdlinearg(soak) } {^-+file-retries=.+$} { foreach {dummy cmdlinearg(file-retries)} [split $a =] break set ::G(file-retries) $cmdlinearg(file-retries) } {^-+file-retry-delay=.+$} { foreach {dummy cmdlinearg(file-retry-delay)} [split $a =] break set ::G(file-retry-delay) $cmdlinearg(file-retry-delay) } {^-+start=.+$} { foreach {dummy cmdlinearg(start)} [split $a =] break set ::G(start:file) $cmdlinearg(start) if {[regexp {(.*):(.*)} $cmdlinearg(start) -> s.perm s.file]} { set ::G(start:permutation) ${s.perm} set ::G(start:file) ${s.file} |
︙ | ︙ | |||
259 260 261 262 263 264 265 | } # Create a test database # proc reset_db {} { catch {db close} | | | | | 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 | } # Create a test database # proc reset_db {} { catch {db close} forcedelete test.db forcedelete test.db-journal forcedelete test.db-wal if {[forced_proxy_locking]} { sqlite3 db ./test.db set lock_proxy_path [db eval "PRAGMA lock_proxy_file;"] catch {db close} # puts "deleting $lock_proxy_path" file delete -force $lock_proxy_path file delete -force test.db |
︙ | ︙ | |||
644 645 646 647 648 649 650 | puts "Writing leaks.sql..." sqlite3_memdebug_log sync memdebug_log_sql leaks.sql } } catch { foreach f [glob -nocomplain test.db-*-journal] { | | | | 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 | puts "Writing leaks.sql..." sqlite3_memdebug_log sync memdebug_log_sql leaks.sql } } catch { foreach f [glob -nocomplain test.db-*-journal] { forcedelete $f } } catch { foreach f [glob -nocomplain test.db-mj*] { forcedelete $f } } exit [expr {$nErr>0}] } # Display memory statistics for analysis and debugging purposes. # |
︙ | ︙ | |||
765 766 767 768 769 770 771 | if {[catch {sqlite3_finalize $vm} errmsg]} { return [list 1 $errmsg] } } return $r } | < < < < < < < < < < < < < < < < < < < < < < < < | 894 895 896 897 898 899 900 901 902 903 904 905 906 907 | if {[catch {sqlite3_finalize $vm} errmsg]} { return [list 1 $errmsg] } } return $r } # Do an integrity check of the entire database # proc integrity_check {name {db db}} { ifcapable integrityck { do_test $name [list execsql {PRAGMA integrity_check} $db] {ok} } } |
︙ | ︙ | |||
989 990 991 992 993 994 995 | # Delete the files test.db and test2.db, then execute the TCL and # SQL (in that order) to prepare for the test case. do_test $testname.$n.1 { set ::sqlite_io_error_pending 0 catch {db close} catch {db2 close} | | | | | | 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 | # Delete the files test.db and test2.db, then execute the TCL and # SQL (in that order) to prepare for the test case. do_test $testname.$n.1 { set ::sqlite_io_error_pending 0 catch {db close} catch {db2 close} catch {forcedelete test.db} catch {forcedelete test.db-journal} catch {forcedelete test2.db} catch {forcedelete test2.db-journal} set ::DB [sqlite3 db test.db; sqlite3_connection_pointer db] sqlite3_extended_result_codes $::DB $::ioerropts(-erc) if {[info exists ::ioerropts(-tclprep)]} { eval $::ioerropts(-tclprep) } if {[info exists ::ioerropts(-sqlprep)]} { execsql $::ioerropts(-sqlprep) |
︙ | ︙ | |||
1266 1267 1268 1269 1270 1271 1272 | } set fd [open $filename w] puts $fd "BEGIN; ${tbl}${tbl2}${tbl3}${sql} ; COMMIT;" close $fd } | < < < < < < < < < < < < < < < < < < | 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 | } set fd [open $filename w] puts $fd "BEGIN; ${tbl}${tbl2}${tbl3}${sql} ; COMMIT;" close $fd } # Drop all tables in database [db] proc drop_all_tables {{db db}} { ifcapable trigger&&foreignkey { set pk [$db one "PRAGMA foreign_keys"] $db eval "PRAGMA foreign_keys = OFF" } foreach {idx name file} [db eval {PRAGMA database_list}] { |
︙ | ︙ | |||
1478 1479 1480 1481 1482 1483 1484 | return "" } proc db_save {} { foreach f [glob -nocomplain sv_test.db*] { forcedelete $f } foreach f [glob -nocomplain test.db*] { set f2 "sv_$f" | | | | | 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 | return "" } proc db_save {} { foreach f [glob -nocomplain sv_test.db*] { forcedelete $f } foreach f [glob -nocomplain test.db*] { set f2 "sv_$f" forcecopy $f $f2 } } proc db_save_and_close {} { db_save catch { db close } return "" } proc db_restore {} { foreach f [glob -nocomplain test.db*] { forcedelete $f } foreach f2 [glob -nocomplain sv_test.db*] { set f [string range $f2 3 end] forcecopy $f2 $f } } proc db_restore_and_reopen {{dbfile test.db}} { catch { db close } db_restore sqlite3 db $dbfile } proc db_delete_and_reopen {{file test.db}} { catch { db close } foreach f [glob -nocomplain test.db*] { forcedelete $f } sqlite3 db $file } # If the library is compiled with the SQLITE_DEFAULT_AUTOVACUUM macro set # to non-zero, then set the global variable $AUTOVACUUM to 1. set AUTOVACUUM $sqlite_options(default_autovacuum) source $testdir/thread_common.tcl source $testdir/malloc_common.tcl |
Changes to test/thread002.test.
︙ | ︙ | |||
23 24 25 26 27 28 29 | set ::enable_shared_cache [sqlite3_enable_shared_cache 1] set ::NTHREAD 10 do_test thread002.1 { # Create 3 databases with identical schemas: for {set ii 0} {$ii < 3} {incr ii} { | | | 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 | set ::enable_shared_cache [sqlite3_enable_shared_cache 1] set ::NTHREAD 10 do_test thread002.1 { # Create 3 databases with identical schemas: for {set ii 0} {$ii < 3} {incr ii} { forcedelete test${ii}.db sqlite3 db test${ii}.db execsql { CREATE TABLE t1(k, v); CREATE INDEX t1_i ON t1(v); INSERT INTO t1(v) VALUES(1.0); } db close |
︙ | ︙ |
Changes to test/thread003.test.
︙ | ︙ | |||
36 37 38 39 40 41 42 | } } {} do_test thread003.1.2 { expr {([file size test.db] / 1024) > 2000} } {1} do_test thread003.1.3 { db close | | | 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | } } {} do_test thread003.1.2 { expr {([file size test.db] / 1024) > 2000} } {1} do_test thread003.1.3 { db close forcedelete test2.db sqlite3 db test2.db } {} do_test thread003.1.4 { execsql { BEGIN; CREATE TABLE t1(a, b, c); } |
︙ | ︙ |
Changes to test/thread005.test.
︙ | ︙ | |||
117 118 119 120 121 122 123 | # when using an ATTACHed database. There doesn't seem to be any reason # for this, other than that operating on an ATTACHed database means there # are a few more mutex grabs and releases during the window of time open # for the race-condition. Maybe this encourages the scheduler to context # switch or something... # | | | 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 | # when using an ATTACHed database. There doesn't seem to be any reason # for this, other than that operating on an ATTACHed database means there # are a few more mutex grabs and releases during the window of time open # for the race-condition. Maybe this encourages the scheduler to context # switch or something... # forcedelete test.db test2.db unset -nocomplain finished do_test thread005-2.1 { sqlite3 db test.db execsql { ATTACH 'test2.db' AS aux } execsql { CREATE TABLE aux.t1(a INTEGER PRIMARY KEY, b UNIQUE); |
︙ | ︙ |
Changes to test/tkt-2d1a5c67d.test.
︙ | ︙ | |||
114 115 116 117 118 119 120 | execsql { SELECT * FROM t4 WHERE a = 'xyz' } } {xyz} # Check that recovery works on the WAL file. # forcedelete test.db2-wal test.db2 do_test 3.6 { | | | | 114 115 116 117 118 119 120 121 122 123 124 125 126 127 | execsql { SELECT * FROM t4 WHERE a = 'xyz' } } {xyz} # Check that recovery works on the WAL file. # forcedelete test.db2-wal test.db2 do_test 3.6 { copy_file test.db-wal test.db2-wal copy_file test.db test.db2 sqlite3 db2 test.db2 execsql { SELECT * FROM t4 WHERE a = 'xyz' } db2 } {xyz} finish_test |
Changes to test/tkt-5ee23731f.test.
︙ | ︙ | |||
15 16 17 18 19 20 21 | # set testdir [file dirname $argv0] source $testdir/tester.tcl do_test tkt-5ee237-1.1 { db close | | | 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 | # set testdir [file dirname $argv0] source $testdir/tester.tcl do_test tkt-5ee237-1.1 { db close forcedelete test.db sqlite3 db test.db db eval { CREATE TABLE t1(x UNIQUE); INSERT INTO t1 VALUES(1); INSERT INTO t1 VALUES(2); INSERT INTO t1 SELECT x+2 FROM t1; INSERT INTO t1 SELECT x+4 FROM t1; |
︙ | ︙ |
Added test/tkt-b1d3a2e531.test.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 | # 2011 August 22 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # This file implements regression tests for SQLite library. # # This file implements tests for foreign keys. Specifically, it tests # that ticket b1d3a2e531 has been fixed. # set testdir [file dirname $argv0] source $testdir/tester.tcl ifcapable {!foreignkey||!trigger} { finish_test return } set testprefix tkt-b1d3a2e531 do_execsql_test 1.0 { PRAGMA foreign_keys = ON } do_execsql_test 1.1 { CREATE TABLE pp(x PRIMARY KEY); CREATE TABLE cc(y REFERENCES pp DEFERRABLE INITIALLY DEFERRED); INSERT INTO pp VALUES('abc'); INSERT INTO cc VALUES('abc'); } do_execsql_test 1.2 { BEGIN; DROP TABLE pp; DROP TABLE cc; COMMIT; } do_execsql_test 1.3 { CREATE TABLE pp(x PRIMARY KEY); CREATE TABLE cc(y REFERENCES pp DEFERRABLE INITIALLY DEFERRED); INSERT INTO pp VALUES('abc'); INSERT INTO cc VALUES('abc'); } do_execsql_test 1.4 { BEGIN; DROP TABLE cc; DROP TABLE pp; COMMIT; } do_execsql_test 2.1 { CREATE TABLE pp(x PRIMARY KEY); CREATE TABLE cc( y INTEGER PRIMARY KEY REFERENCES pp DEFERRABLE INITIALLY DEFERRED ); INSERT INTO pp VALUES(5); INSERT INTO cc VALUES(5); } do_execsql_test 2.2 { BEGIN; DROP TABLE pp; DROP TABLE cc; COMMIT; } do_execsql_test 2.3 { CREATE TABLE pp(x PRIMARY KEY); CREATE TABLE cc( y INTEGER PRIMARY KEY REFERENCES pp DEFERRABLE INITIALLY DEFERRED ); INSERT INTO pp VALUES(5); INSERT INTO cc VALUES(5); } do_execsql_test 2.4 { BEGIN; DROP TABLE cc; DROP TABLE pp; COMMIT; } do_execsql_test 3.1 { CREATE TABLE pp1(x PRIMARY KEY); CREATE TABLE cc1(y REFERENCES pp1 DEFERRABLE INITIALLY DEFERRED); CREATE TABLE pp2(x PRIMARY KEY); CREATE TABLE cc2(y REFERENCES pp1 DEFERRABLE INITIALLY DEFERRED); INSERT INTO pp1 VALUES(2200); INSERT INTO cc1 VALUES(NULL); INSERT INTO pp2 VALUES(2200); INSERT INTO cc2 VALUES(2200); } do_catchsql_test 3.2 { BEGIN; DELETE FROM pp2; DROP TABLE pp1; DROP TABLE cc1; COMMIT; } {1 {foreign key constraint failed}} do_catchsql_test 3.3 { DROP TABLE cc2; COMMIT; } {0 {}} finish_test |
Added test/tkt-d635236375.test.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 | # 2011 August 3 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # This file implements regression tests for SQLite library. The # focus of this file is testing that bug [d63523637517386191d634e] # has been fixed. # set testdir [file dirname $argv0] source $testdir/tester.tcl set ::testprefix tkt-d635236375 do_test 1.0 { execsql { CREATE TABLE t1(id1 INTEGER PRIMARY KEY); INSERT INTO t1 VALUES(9999); CREATE TABLE t2(id2 INTEGER PRIMARY KEY); INSERT INTO t2 VALUES(12345); INSERT INTO t2 VALUES(54321); SELECT DISTINCT id1 AS x, id1 AS y FROM t1, t2; } } {9999 9999} do_test 1.1 { execsql { SELECT count(*) FROM t1, t2 GROUP BY id1, id1; } } {2} finish_test |
Changes to test/tkt-f3e5abed55.test.
︙ | ︙ | |||
10 11 12 13 14 15 16 | #*********************************************************************** # set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/malloc_common.tcl | | | | 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 | #*********************************************************************** # set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/malloc_common.tcl foreach f [glob -nocomplain test.db*mj*] { forcedelete $f } forcedelete test.db2 do_test tkt-f3e5abed55-1.1 { execsql { ATTACH 'test.db2' AS aux; CREATE TABLE main.t1(a, b); CREATE TABLE aux.t2(c, d); } |
︙ | ︙ | |||
47 48 49 50 51 52 53 | execsql COMMIT db2 execsql COMMIT } {} do_test tkt-f3e5abed55-1.6 { glob -nocomplain test.db*mj* } {} | | | 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 | execsql COMMIT db2 execsql COMMIT } {} do_test tkt-f3e5abed55-1.6 { glob -nocomplain test.db*mj* } {} foreach f [glob -nocomplain test.db*mj*] { forcedelete $f } db close db2 close # Set up a testvfs so that the next time SQLite tries to delete the # file "test.db-journal", a snapshot of the current file-system contents |
︙ | ︙ |
Changes to test/tkt1667.test.
︙ | ︙ | |||
21 22 23 24 25 26 27 | ifcapable !autovacuum||!tclvar { finish_test return } db close | | | 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | ifcapable !autovacuum||!tclvar { finish_test return } db close forcedelete test.db test.db-journal # Set the pending byte offset such that the page it is on is # the first autovacuum pointer map page in the file (assume a page # size of 1024). set first_ptrmap_page [expr 1024/5 + 3] sqlite3_test_control_pending_byte [expr 1024 * ($first_ptrmap_page-1)] |
︙ | ︙ |
Changes to test/tkt1873.test.
︙ | ︙ | |||
20 21 22 23 24 25 26 | source $testdir/tester.tcl ifcapable !attach { finish_test return } | | | 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 | source $testdir/tester.tcl ifcapable !attach { finish_test return } forcedelete test2.db test2.db-journal do_test tkt1873-1.1 { execsql { CREATE TABLE t1(x, y); ATTACH 'test2.db' AS aux; CREATE TABLE aux.t2(x, y); INSERT INTO t1 VALUES(1, 2); |
︙ | ︙ |
Changes to test/tkt2686.test.
︙ | ︙ | |||
45 46 47 48 49 50 51 | } } {} integrity_check tkt2686-$i.3 catch {db eval COMMIT} } db close | | | 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 | } } {} integrity_check tkt2686-$i.3 catch {db eval COMMIT} } db close forcedelete test.db test.db-journal sqlite3 db test.db db eval { PRAGMA page_size=1024; PRAGMA max_page_count=50; PRAGMA auto_vacuum=1; CREATE TABLE filler (fill); |
︙ | ︙ |
Changes to test/tkt2817.test.
︙ | ︙ | |||
44 45 46 47 48 49 50 | # These tests - tkt2817-2.* - are the same as the previous block, except # for the fact that the temp-table and the main table do not share the # same name. #2817 did not cause a problem with these tests. # db close | | | 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 | # These tests - tkt2817-2.* - are the same as the previous block, except # for the fact that the temp-table and the main table do not share the # same name. #2817 did not cause a problem with these tests. # db close forcedelete test.db sqlite3 db test.db do_test tkt2817-2.0 { execsql { CREATE TEMP TABLE tmp(a, b, c); INSERT INTO tmp VALUES(1, 'abc', 'def'); INSERT INTO tmp VALUES(2, 'ghi', 'jkl'); } |
︙ | ︙ |
Changes to test/tkt2820.test.
︙ | ︙ | |||
21 22 23 24 25 26 27 | # set testdir [file dirname $argv0] source $testdir/tester.tcl proc test_schema_change {testid init ddl res} { db close | | | 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | # set testdir [file dirname $argv0] source $testdir/tester.tcl proc test_schema_change {testid init ddl res} { db close forcedelete test.db test.db-journal sqlite3 db test.db execsql $init do_test tkt2820-$testid.1 { set STMT [sqlite3_prepare db {SELECT * FROM sqlite_master} -1 DUMMY] sqlite3_step $STMT } {SQLITE_ROW} #if {$testid==3} {execsql {PRAGMA vdbe_trace=ON}} |
︙ | ︙ | |||
66 67 68 69 70 71 72 | # We further observe that prior to the fix associated with ticket #2820, # no statement journal would be created on an SQL statement that was run # while a second statement was active, as long as we are in autocommit # mode. This is incorrect. # do_test tkt2820-4.1 { db close | | | 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 | # We further observe that prior to the fix associated with ticket #2820, # no statement journal would be created on an SQL statement that was run # while a second statement was active, as long as we are in autocommit # mode. This is incorrect. # do_test tkt2820-4.1 { db close forcedelete test.db test.db-journal sqlite3 db test.db db eval { CREATE TABLE t1(a INTEGER PRIMARY KEY); INSERT INTO t1 VALUES(1); INSERT INTO t1 VALUES(2); } |
︙ | ︙ |
Changes to test/tkt2854.test.
︙ | ︙ | |||
124 125 126 127 128 129 130 | execsql { SELECT * FROM abc } db2 } {} # Check that if an attempt to obtain an exclusive lock fails because an # attached db cannot be locked, the internal exclusive flag used by # shared-cache users is correctly cleared. do_test tkt2854-1.19 { | | | 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 | execsql { SELECT * FROM abc } db2 } {} # Check that if an attempt to obtain an exclusive lock fails because an # attached db cannot be locked, the internal exclusive flag used by # shared-cache users is correctly cleared. do_test tkt2854-1.19 { forcedelete test2.db test2.db-journal sqlite3 db4 test2.db execsql { CREATE TABLE def(d, e, f) } db4 execsql { ATTACH 'test2.db' AS aux } db } {} do_test tkt2854-1.20 { execsql {BEGIN IMMEDIATE} db4 catchsql {BEGIN EXCLUSIVE} db |
︙ | ︙ |
Changes to test/tkt3457.test.
︙ | ︙ | |||
42 43 44 45 46 47 48 | execsql { CREATE TABLE t1(a, b, c); INSERT INTO t1 VALUES(1, 2, 3); BEGIN; INSERT INTO t1 VALUES(4, 5, 6); } | | | | | | < | | 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 | execsql { CREATE TABLE t1(a, b, c); INSERT INTO t1 VALUES(1, 2, 3); BEGIN; INSERT INTO t1 VALUES(4, 5, 6); } forcecopy test.db bak.db forcecopy test.db-journal bak.db-journal # Fix the first journal-header in the journal-file. Because the # journal file has not yet been synced, the 8-byte magic string at the # start of the first journal-header has not been written by SQLite. # So write it now. set fd [open bak.db-journal a+] fconfigure $fd -encoding binary -translation binary seek $fd 0 puts -nonewline $fd "\xd9\xd5\x05\xf9\x20\xa1\x63\xd7" close $fd execsql COMMIT } {} if { ![path_is_dos "."] } { do_test tkt3457-1.2 { forcecopy bak.db-journal test.db-journal file attributes test.db-journal -permissions --------- catchsql { SELECT * FROM t1 } } {1 {unable to open database file}} do_test tkt3457-1.3 { forcecopy bak.db-journal test.db-journal file attributes test.db-journal -permissions -w--w--w- catchsql { SELECT * FROM t1 } } {1 {unable to open database file}} do_test tkt3457-1.4 { forcecopy bak.db-journal test.db-journal file attributes test.db-journal -permissions r--r--r-- catchsql { SELECT * FROM t1 } } {1 {unable to open database file}} do_test tkt3457-1.5 { forcecopy bak.db-journal test.db-journal file attributes test.db-journal -permissions rw-rw-rw- catchsql { SELECT * FROM t1 } } {0 {1 2 3 4 5 6}} } finish_test |
Changes to test/tkt35xx.test.
︙ | ︙ | |||
46 47 48 49 50 51 52 | INSERT INTO t1 VALUES(1, 1, zeroblob(676)); } } {} # Trigger the problem using statement rollback. # db close | | | 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 | INSERT INTO t1 VALUES(1, 1, zeroblob(676)); } } {} # Trigger the problem using statement rollback. # db close delete_file test.db sqlite3 db test.db set big [string repeat abcdefghij 22] ;# 220 byte string do_test tkt35xx-1.2.1 { execsql { PRAGMA auto_vacuum = 0; PRAGMA page_size = 1024; CREATE TABLE t3(a INTEGER PRIMARY KEY, b); |
︙ | ︙ |
Changes to test/trigger1.test.
︙ | ︙ | |||
530 531 532 533 534 535 536 | # correctly re-installed. # # Also verify that references within trigger programs are resolved at # statement compile time, not trigger installation time. This means, for # example, that you can drop and re-create tables referenced by triggers. ifcapable tempdb&&attach { do_test trigger1-10.0 { | | | | 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 | # correctly re-installed. # # Also verify that references within trigger programs are resolved at # statement compile time, not trigger installation time. This means, for # example, that you can drop and re-create tables referenced by triggers. ifcapable tempdb&&attach { do_test trigger1-10.0 { forcedelete test2.db forcedelete test2.db-journal execsql { ATTACH 'test2.db' AS aux; } } {} do_test trigger1-10.1 { execsql { CREATE TABLE main.t4(a, b, c); |
︙ | ︙ |
Changes to test/trigger4.test.
︙ | ︙ | |||
126 127 128 129 130 131 132 | update test set b=99 where id=7; select * from test2; } } {7 99} do_test trigger4-4.1 { db close | | | | 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 | update test set b=99 where id=7; select * from test2; } } {7 99} do_test trigger4-4.1 { db close forcedelete trigtest.db forcedelete trigtest.db-journal sqlite3 db trigtest.db catchsql {drop table tbl; drop view vw} execsql { create table tbl(a integer primary key, b integer); create view vw as select * from tbl; create trigger t_del_tbl instead of delete on vw for each row begin delete from tbl where a = old.a; |
︙ | ︙ | |||
191 192 193 194 195 196 197 | } {0 {}} do_test trigger4-7.2 { execsql {select a, b from vw where a<=102 or a>=227 order by a} } {101 1001 102 2002 227 2127 228 2128} integrity_check trigger4-99.9 db close | | | 191 192 193 194 195 196 197 198 199 200 | } {0 {}} do_test trigger4-7.2 { execsql {select a, b from vw where a<=102 or a>=227 order by a} } {101 1001 102 2002 227 2127 228 2128} integrity_check trigger4-99.9 db close forcedelete trigtest.db trigtest.db-journal finish_test |
Changes to test/triggerA.test.
︙ | ︙ | |||
201 202 203 204 205 206 207 | return } source $testdir/malloc_common.tcl # Save a copy of the current database configuration. # db close | | | | | | | 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 | return } source $testdir/malloc_common.tcl # Save a copy of the current database configuration. # db close forcedelete test.db-triggerA copy_file test.db test.db-triggerA sqlite3 db test.db # Run malloc tests on the INSTEAD OF trigger firing. # do_malloc_test triggerA-3 -tclprep { db close forcedelete test.db test.db-journal forcecopy test.db-triggerA test.db sqlite3 db test.db sqlite3_extended_result_codes db 1 db eval {SELECT * FROM v5; -- warm up the cache} } -sqlbody { DELETE FROM v5 WHERE x=5; UPDATE v5 SET b=b+9900000 WHERE x BETWEEN 3 AND 5; } # Clean up the saved database copy. # forcedelete test.db-triggerA finish_test |
Changes to test/triggerC.test.
︙ | ︙ | |||
914 915 916 917 918 919 920 | INSERT INTO v2 DEFAULT VALUES; SELECT a, b, a IS NULL, b IS NULL FROM log; } } {{} {} 1 1} do_test triggerC-12.1 { db close | | | 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 | INSERT INTO v2 DEFAULT VALUES; SELECT a, b, a IS NULL, b IS NULL FROM log; } } {{} {} 1 1} do_test triggerC-12.1 { db close forcedelete test.db sqlite3 db test.db execsql { CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 2); INSERT INTO t1 VALUES(3, 4); INSERT INTO t1 VALUES(5, 6); |
︙ | ︙ |
Changes to test/triggerD.test.
︙ | ︙ | |||
189 190 191 192 193 194 195 | # the sqlite_master table. We cannot fix the bug simply by disallowing # "xyz.tab" since that could break legacy applications. We have to # fix the system so that the "xyz." on "xyz.tab" is ignored. # Verify that this is the case. # do_test triggerD-4.1 { db close | | | 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 | # the sqlite_master table. We cannot fix the bug simply by disallowing # "xyz.tab" since that could break legacy applications. We have to # fix the system so that the "xyz." on "xyz.tab" is ignored. # Verify that this is the case. # do_test triggerD-4.1 { db close forcedelete test.db test2.db sqlite3 db test.db db eval { CREATE TABLE t1(x); ATTACH 'test2.db' AS db2; CREATE TABLE db2.t2(y); CREATE TABLE db2.log(z); CREATE TRIGGER db2.trig AFTER INSERT ON db2.t2 BEGIN |
︙ | ︙ |
Changes to test/vacuum.test.
︙ | ︙ | |||
204 205 206 207 208 209 210 | # Ticket #427. Make sure VACUUM works when the EMPTY_RESULT_CALLBACKS # pragma is turned on. # do_test vacuum-3.1 { db close db2 close | | | 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 | # Ticket #427. Make sure VACUUM works when the EMPTY_RESULT_CALLBACKS # pragma is turned on. # do_test vacuum-3.1 { db close db2 close delete_file test.db sqlite3 db test.db execsql { PRAGMA empty_result_callbacks=on; VACUUM; } } {} |
︙ | ︙ | |||
230 231 232 233 234 235 236 | # Ticket #515. VACUUM after deleting and recreating the table that # a view refers to. Omit this test if the library is not view-enabled. # ifcapable view { do_test vacuum-5.1 { db close | | | 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 | # Ticket #515. VACUUM after deleting and recreating the table that # a view refers to. Omit this test if the library is not view-enabled. # ifcapable view { do_test vacuum-5.1 { db close forcedelete test.db sqlite3 db test.db catchsql { CREATE TABLE Test (TestID int primary key); INSERT INTO Test VALUES (NULL); CREATE VIEW viewTest AS SELECT * FROM Test; BEGIN; |
︙ | ︙ | |||
286 287 288 289 290 291 292 | execsql { select count(*) from "abc abc" WHERE a = X'00112233'; } } {1} } # Check what happens when an in-memory database is vacuumed. The | | | | 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 | execsql { select count(*) from "abc abc" WHERE a = X'00112233'; } } {1} } # Check what happens when an in-memory database is vacuumed. The # [delete_file] command covers us in case the library was compiled # without in-memory database support. # forcedelete :memory: do_test vacuum-7.0 { sqlite3 db2 :memory: execsql { CREATE TABLE t1(t); VACUUM; } db2 } {} |
︙ | ︙ | |||
333 334 335 336 337 338 339 | } {1} } db2 close # Ticket #873. VACUUM a database that has ' in its name. # do_test vacuum-8.1 { | | | | 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 | } {1} } db2 close # Ticket #873. VACUUM a database that has ' in its name. # do_test vacuum-8.1 { forcedelete a'z.db forcedelete a'z.db-journal sqlite3 db2 a'z.db execsql { CREATE TABLE t1(t); VACUUM; } db2 } {} db2 close |
︙ | ︙ | |||
379 380 381 382 383 384 385 | execsql { VACUUM; } cksum } $::cksum } | | | 379 380 381 382 383 384 385 386 387 388 | execsql { VACUUM; } cksum } $::cksum } forcedelete {a'z.db} finish_test |
Changes to test/vacuum2.test.
︙ | ︙ | |||
131 132 133 134 135 136 137 | } {ok} db2 close ifcapable autovacuum { do_test vacuum2-4.1 { db close | | | 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 | } {ok} db2 close ifcapable autovacuum { do_test vacuum2-4.1 { db close forcedelete test.db sqlite3 db test.db execsql { pragma auto_vacuum=1; create table t(a, b); insert into t values(1, 2); insert into t values(1, 2); pragma auto_vacuum=0; |
︙ | ︙ |
Changes to test/vacuum3.test.
︙ | ︙ | |||
192 193 194 195 196 197 198 | integrity_check vacuum3-3.$I.3 incr I } do_test vacuum3-4.1 { db close | | | 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 | integrity_check vacuum3-3.$I.3 incr I } do_test vacuum3-4.1 { db close delete_file test.db sqlite3 db test.db execsql { PRAGMA page_size=1024; CREATE TABLE abc(a, b, c); INSERT INTO abc VALUES(1, 2, 3); INSERT INTO abc VALUES(4, 5, 6); } |
︙ | ︙ |
Changes to test/view.test.
︙ | ︙ | |||
454 455 456 457 458 459 460 | catchsql { CREATE VIEW v12 AS SELECT a FROM t1 WHERE b=? } } {1 {parameters are not allowed in views}} ifcapable attach { do_test view-13.1 { | | | 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 | catchsql { CREATE VIEW v12 AS SELECT a FROM t1 WHERE b=? } } {1 {parameters are not allowed in views}} ifcapable attach { do_test view-13.1 { forcedelete test2.db catchsql { ATTACH 'test2.db' AS two; CREATE TABLE two.t2(x,y); CREATE VIEW v13 AS SELECT y FROM two.t2; } } {1 {view v13 cannot reference objects in database two}} } |
︙ | ︙ |
Changes to test/vtab1.test.
︙ | ︙ | |||
720 721 722 723 724 725 726 | do_test vtab1-6-8.4 { execsql { SELECT * FROM techo ORDER BY a; } } {} execsql {PRAGMA count_changes=OFF} | | | | 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 | do_test vtab1-6-8.4 { execsql { SELECT * FROM techo ORDER BY a; } } {} execsql {PRAGMA count_changes=OFF} forcedelete test2.db forcedelete test2.db-journal sqlite3 db2 test2.db execsql { CREATE TABLE techo(a PRIMARY KEY, b, c); } db2 proc check_echo_table {tn} { set ::data1 [execsql {SELECT rowid, * FROM techo}] set ::data2 [execsql {SELECT rowid, * FROM techo} db2] |
︙ | ︙ |
Changes to test/vtab7.test.
︙ | ︙ | |||
130 131 132 133 134 135 136 | set ::rc } {1 {database table is locked}} execsql {DROP TABLE newtab} # Write to an attached database from xSync(). ifcapable attach { do_test vtab7-3.1 { | | | | 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 | set ::rc } {1 {database table is locked}} execsql {DROP TABLE newtab} # Write to an attached database from xSync(). ifcapable attach { do_test vtab7-3.1 { forcedelete test2.db forcedelete test2.db-journal execsql { ATTACH 'test2.db' AS db2; CREATE TABLE db2.stuff(description, shape, color); } set ::callbacks(xSync,abc) { execsql { INSERT INTO db2.stuff VALUES('abc', 'square', 'green'); } } |
︙ | ︙ |
Changes to test/vtabC.test.
︙ | ︙ | |||
27 28 29 30 31 32 33 | # N will be the number of virtual tables we have defined. # unset -nocomplain N for {set N 1} {$N<=20} {incr N} { db close | | | 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 | # N will be the number of virtual tables we have defined. # unset -nocomplain N for {set N 1} {$N<=20} {incr N} { db close forcedelete test.db test.db-journal sqlite3 db test.db register_echo_module [sqlite3_connection_pointer db] # Create $N tables and $N virtual tables to echo them. # unset -nocomplain tablist set tablist {} |
︙ | ︙ |
Changes to test/wal.test.
︙ | ︙ | |||
25 26 27 28 29 30 31 | if { ![wal_is_ok] } { finish_test return } proc reopen_db {} { catch { db close } | | | 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 | if { ![wal_is_ok] } { finish_test return } proc reopen_db {} { catch { db close } forcedelete test.db test.db-wal test.db-wal-summary sqlite3_wal db test.db } set ::blobcnt 0 proc blob {nByte} { incr ::blobcnt return [string range [string repeat "${::blobcnt}x" $nByte] 1 $nByte] |
︙ | ︙ | |||
211 212 213 214 215 216 217 | } expr { $logsize == [file size test.db-wal] } } {1} do_test wal-4.4.5 { execsql { SELECT count(*) FROM t2 } } {1} do_test wal-4.4.6 { | | | | 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 | } expr { $logsize == [file size test.db-wal] } } {1} do_test wal-4.4.5 { execsql { SELECT count(*) FROM t2 } } {1} do_test wal-4.4.6 { forcecopy test.db test2.db forcecopy test.db-wal test2.db-wal sqlite3 db2 test2.db execsql { SELECT count(*) FROM t2 ; SELECT count(*) FROM t1 } db2 } {1 2} do_test wal-4.4.7 { execsql { PRAGMA integrity_check } db2 } {ok} db2 close |
︙ | ︙ | |||
269 270 271 272 273 274 275 | } expr { $logsize == [file size test.db-wal] } } {1} do_test wal-4.5.5 { execsql { SELECT count(*) FROM t2 ; SELECT count(*) FROM t1 } } {1 2} do_test wal-4.5.6 { | | | | 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 | } expr { $logsize == [file size test.db-wal] } } {1} do_test wal-4.5.5 { execsql { SELECT count(*) FROM t2 ; SELECT count(*) FROM t1 } } {1 2} do_test wal-4.5.6 { forcecopy test.db test2.db forcecopy test.db-wal test2.db-wal sqlite3 db2 test2.db execsql { SELECT count(*) FROM t2 ; SELECT count(*) FROM t1 } db2 } {1 2} do_test wal-4.5.7 { execsql { PRAGMA integrity_check } db2 } {ok} db2 close |
︙ | ︙ | |||
334 335 336 337 338 339 340 | } } {1 2 3 4} db close foreach sector {512 4096} { sqlite3_simulate_device -sectorsize $sector foreach pgsz {512 1024 2048 4096} { | | | 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 | } } {1 2 3 4} db close foreach sector {512 4096} { sqlite3_simulate_device -sectorsize $sector foreach pgsz {512 1024 2048 4096} { forcedelete test.db test.db-wal do_test wal-6.$sector.$pgsz.1 { sqlite3 db test.db -vfs devsym execsql " PRAGMA page_size = $pgsz; PRAGMA auto_vacuum = 0; PRAGMA journal_mode = wal; " |
︙ | ︙ | |||
357 358 359 360 361 362 363 | do_test wal-6.$sector.$pgsz.2 { log_deleted test.db-wal } {1} } } do_test wal-7.1 { | | | | 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 | do_test wal-6.$sector.$pgsz.2 { log_deleted test.db-wal } {1} } } do_test wal-7.1 { forcedelete test.db test.db-wal sqlite3_wal db test.db execsql { PRAGMA page_size = 1024; CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 2); } list [file size test.db] [file size test.db-wal] } [list 1024 [wal_file_size 3 1024]] do_test wal-7.2 { execsql { PRAGMA wal_checkpoint } list [file size test.db] [file size test.db-wal] } [list 2048 [wal_file_size 3 1024]] # Execute some transactions in auto-vacuum mode to test database file # truncation. # do_test wal-8.1 { reopen_db catch { db close } forcedelete test.db test.db-wal sqlite3 db test.db db function blob blob execsql { PRAGMA auto_vacuum = 1; PRAGMA journal_mode = wal; PRAGMA auto_vacuum; |
︙ | ︙ | |||
435 436 437 438 439 440 441 | } 1024 do_test wal-9.2 { sqlite3_wal db2 test.db execsql {PRAGMA integrity_check } db2 } {ok} do_test wal-9.3 { | | | | | 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 | } 1024 do_test wal-9.2 { sqlite3_wal db2 test.db execsql {PRAGMA integrity_check } db2 } {ok} do_test wal-9.3 { forcedelete test2.db test2.db-wal copy_file test.db test2.db copy_file test.db-wal test2.db-wal sqlite3_wal db3 test2.db execsql {PRAGMA integrity_check } db3 } {ok} db3 close do_test wal-9.4 { execsql { PRAGMA wal_checkpoint } |
︙ | ︙ | |||
787 788 789 790 791 792 793 | list [expr [file size test.db]/1024] [expr [file size test.db-wal]/1044] } {3 1} do_test wal-12.3 { execsql { INSERT INTO t2 VALUES('B', 1) } list [expr [file size test.db]/1024] [expr [file size test.db-wal]/1044] } {3 2} do_test wal-12.4 { | | | | | | 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 | list [expr [file size test.db]/1024] [expr [file size test.db-wal]/1044] } {3 1} do_test wal-12.3 { execsql { INSERT INTO t2 VALUES('B', 1) } list [expr [file size test.db]/1024] [expr [file size test.db-wal]/1044] } {3 2} do_test wal-12.4 { forcecopy test.db test2.db forcecopy test.db-wal test2.db-wal sqlite3_wal db2 test2.db execsql { SELECT * FROM t2 } db2 } {B 1} db2 close do_test wal-12.5 { execsql { PRAGMA wal_checkpoint; UPDATE t2 SET y = 2 WHERE x = 'B'; PRAGMA wal_checkpoint; UPDATE t1 SET y = 1 WHERE x = 'A'; PRAGMA wal_checkpoint; UPDATE t1 SET y = 0 WHERE x = 'A'; } execsql { SELECT * FROM t2 } } {B 2} do_test wal-12.6 { forcecopy test.db test2.db forcecopy test.db-wal test2.db-wal sqlite3_wal db2 test2.db execsql { SELECT * FROM t2 } db2 } {B 2} db2 close db close #------------------------------------------------------------------------- |
︙ | ︙ | |||
910 911 912 913 914 915 916 | # that had an out-of-date pager-cache, the next time the connection was # used it did not realize the cache was out-of-date and proceeded to # operate with an inconsistent cache. Leading to corruption. # catch { db close } catch { db2 close } catch { db3 close } | | | 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 | # that had an out-of-date pager-cache, the next time the connection was # used it did not realize the cache was out-of-date and proceeded to # operate with an inconsistent cache. Leading to corruption. # catch { db close } catch { db2 close } catch { db3 close } forcedelete test.db test.db-wal sqlite3 db test.db sqlite3 db2 test.db do_test wal-14 { execsql { PRAGMA journal_mode = WAL; CREATE TABLE t1(a PRIMARY KEY, b); INSERT INTO t1 VALUES(randomblob(10), randomblob(100)); |
︙ | ︙ | |||
948 949 950 951 952 953 954 | catch { db close } catch { db2 close } #------------------------------------------------------------------------- # The following block of tests - wal-15.* - focus on testing the # implementation of the sqlite3_wal_checkpoint() interface. # | | | 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 | catch { db close } catch { db2 close } #------------------------------------------------------------------------- # The following block of tests - wal-15.* - focus on testing the # implementation of the sqlite3_wal_checkpoint() interface. # forcedelete test.db test.db-wal sqlite3 db test.db do_test wal-15.1 { execsql { PRAGMA auto_vacuum = 0; PRAGMA page_size = 1024; PRAGMA journal_mode = WAL; } |
︙ | ︙ | |||
1044 1045 1046 1047 1048 1049 1050 | 5 {sqlite3_wal_checkpoint db aux} SQLITE_OK 0 1 6 {sqlite3_wal_checkpoint db temp} SQLITE_OK 0 0 7 {db eval "PRAGMA main.wal_checkpoint"} {0 10 10} 1 0 8 {db eval "PRAGMA aux.wal_checkpoint"} {0 16 16} 0 1 9 {db eval "PRAGMA temp.wal_checkpoint"} {0 -1 -1} 0 0 } { do_test wal-16.$tn.1 { | | | | 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 | 5 {sqlite3_wal_checkpoint db aux} SQLITE_OK 0 1 6 {sqlite3_wal_checkpoint db temp} SQLITE_OK 0 0 7 {db eval "PRAGMA main.wal_checkpoint"} {0 10 10} 1 0 8 {db eval "PRAGMA aux.wal_checkpoint"} {0 16 16} 0 1 9 {db eval "PRAGMA temp.wal_checkpoint"} {0 -1 -1} 0 0 } { do_test wal-16.$tn.1 { forcedelete test2.db test2.db-wal test2.db-journal forcedelete test.db test.db-wal test.db-journal sqlite3 db test.db execsql { ATTACH 'test2.db' AS aux; PRAGMA main.auto_vacuum = 0; PRAGMA aux.auto_vacuum = 0; PRAGMA main.journal_mode = WAL; |
︙ | ︙ | |||
1119 1120 1121 1122 1123 1124 1125 | 2 256 [wal_file_size 172 512] 3 512 [wal_file_size 172 512] 4 1024 [wal_file_size 172 512] 5 2048 [wal_file_size 172 512] 6 4096 [wal_file_size 176 512] 7 8192 [wal_file_size 184 512] " { | | | 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 | 2 256 [wal_file_size 172 512] 3 512 [wal_file_size 172 512] 4 1024 [wal_file_size 172 512] 5 2048 [wal_file_size 172 512] 6 4096 [wal_file_size 176 512] 7 8192 [wal_file_size 184 512] " { forcedelete test.db test.db-wal test.db-journal sqlite3_simulate_device -sectorsize $sectorsize sqlite3 db test.db -vfs devsym do_test wal-17.$tn.1 { execsql { PRAGMA auto_vacuum = 0; PRAGMA page_size = 512; |
︙ | ︙ | |||
1164 1165 1166 1167 1168 1169 1170 | # # wal-18.1.* When the first 32-bits of a frame checksum is correct but # the second 32-bits are false, and # # wal-18.2.* When the page-size field that occurs at the start of a log # file is a power of 2 greater than 16384 or smaller than 512. # | | | | | | | 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 | # # wal-18.1.* When the first 32-bits of a frame checksum is correct but # the second 32-bits are false, and # # wal-18.2.* When the page-size field that occurs at the start of a log # file is a power of 2 greater than 16384 or smaller than 512. # forcedelete test.db test.db-wal test.db-journal do_test wal-18.0 { sqlite3 db test.db execsql { PRAGMA page_size = 1024; PRAGMA auto_vacuum = 0; PRAGMA journal_mode = WAL; PRAGMA synchronous = OFF; CREATE TABLE t1(a, b, UNIQUE(a, b)); INSERT INTO t1 VALUES(0, 0); PRAGMA wal_checkpoint; INSERT INTO t1 VALUES(1, 2); -- frames 1 and 2 INSERT INTO t1 VALUES(3, 4); -- frames 3 and 4 INSERT INTO t1 VALUES(5, 6); -- frames 5 and 6 } forcecopy test.db testX.db forcecopy test.db-wal testX.db-wal db close list [file size testX.db] [file size testX.db-wal] } [list [expr 3*1024] [wal_file_size 6 1024]] unset -nocomplain nFrame result foreach {nFrame result} { 0 {0 0} 1 {0 0} 2 {0 0 1 2} 3 {0 0 1 2} 4 {0 0 1 2 3 4} 5 {0 0 1 2 3 4} 6 {0 0 1 2 3 4 5 6} } { do_test wal-18.1.$nFrame { forcecopy testX.db test.db forcecopy testX.db-wal test.db-wal hexio_write test.db-wal [expr 24 + $nFrame*(24+1024) + 20] 00000000 sqlite3 db test.db execsql { SELECT * FROM t1; PRAGMA integrity_check; |
︙ | ︙ | |||
1236 1237 1238 1239 1240 1241 1242 | binary scan $blob $scanpattern values foreach {v1 v2} $values { set c1 [expr {($c1 + $v1 + $c2)&0xFFFFFFFF}] set c2 [expr {($c2 + $v2 + $c1)&0xFFFFFFFF}] } } | | | | | 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 | binary scan $blob $scanpattern values foreach {v1 v2} $values { set c1 [expr {($c1 + $v1 + $c2)&0xFFFFFFFF}] set c2 [expr {($c2 + $v2 + $c1)&0xFFFFFFFF}] } } forcecopy test.db testX.db foreach {tn pgsz works} { 1 128 0 2 256 0 3 512 1 4 1024 1 5 2048 1 6 4096 1 7 8192 1 8 16384 1 9 32768 1 10 65536 1 11 131072 0 11 1016 0 } { if {$::SQLITE_MAX_PAGE_SIZE < $pgsz} { set works 0 } for {set pg 1} {$pg <= 3} {incr pg} { forcecopy testX.db test.db forcedelete test.db-wal # Check that the database now exists and consists of three pages. And # that there is no associated wal file. # do_test wal-18.2.$tn.$pg.1 { file exists test.db-wal } 0 do_test wal-18.2.$tn.$pg.2 { file exists test.db } 1 do_test wal-18.2.$tn.$pg.3 { file size test.db } [expr 1024*3] |
︙ | ︙ | |||
1325 1326 1327 1328 1329 1330 1331 | # connection knows that it is the last connection to disconnect from # the database, so it runs a checkpoint operation. The bug was that # the connection was not updating its private copy of the wal-index # header before doing so, meaning that it could checkpoint an old # snapshot. # do_test wal-19.1 { | | | 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 | # connection knows that it is the last connection to disconnect from # the database, so it runs a checkpoint operation. The bug was that # the connection was not updating its private copy of the wal-index # header before doing so, meaning that it could checkpoint an old # snapshot. # do_test wal-19.1 { forcedelete test.db test.db-wal test.db-journal sqlite3 db test.db sqlite3 db2 test.db execsql { PRAGMA journal_mode = WAL; CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 2); INSERT INTO t1 VALUES(3, 4); |
︙ | ︙ | |||
1374 1375 1376 1377 1378 1379 1380 | # the data is present and the database is not corrupt. # # At one point, SQLite was failing to grow the mapping of the wal-index # file in step 3 and the checkpoint was corrupting the database file. # do_test wal-20.1 { catch {db close} | | | 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 | # the data is present and the database is not corrupt. # # At one point, SQLite was failing to grow the mapping of the wal-index # file in step 3 and the checkpoint was corrupting the database file. # do_test wal-20.1 { catch {db close} forcedelete test.db test.db-wal test.db-journal sqlite3 db test.db execsql { PRAGMA journal_mode = WAL; CREATE TABLE t1(x); INSERT INTO t1 VALUES(randomblob(900)); SELECT count(*) FROM t1; } |
︙ | ︙ | |||
1480 1481 1482 1483 1484 1485 1486 | #------------------------------------------------------------------------- # Test that when 1 or more pages are recovered from a WAL file, # sqlite3_log() is invoked to report this to the user. # set walfile [file nativename [file join [pwd] test.db-wal]] catch {db close} | | | 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 | #------------------------------------------------------------------------- # Test that when 1 or more pages are recovered from a WAL file, # sqlite3_log() is invoked to report this to the user. # set walfile [file nativename [file join [pwd] test.db-wal]] catch {db close} forcedelete test.db do_test wal-23.1 { faultsim_delete_and_reopen execsql { CREATE TABLE t1(a, b); PRAGMA journal_mode = WAL; INSERT INTO t1 VALUES(1, 2); INSERT INTO t1 VALUES(3, 4); |
︙ | ︙ |
Changes to test/wal2.test.
︙ | ︙ | |||
164 165 166 167 168 169 170 | do_test wal2-1.$tn.2 { set ::locks } $wal_locks } db close db2 close tvfs delete | | | 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 | do_test wal2-1.$tn.2 { set ::locks } $wal_locks } db close db2 close tvfs delete forcedelete test.db test.db-wal test.db-journal #------------------------------------------------------------------------- # This test case is very similar to the previous one, except, after # the reader reads the corrupt wal-index header, but before it has # a chance to re-read it under the cover of the RECOVER lock, the # wal-index header is replaced with a valid, but out-of-date, header. # |
︙ | ︙ | |||
271 272 273 274 275 276 277 | } execsql { SELECT count(a), sum(a) FROM t1 } db2 } $res1 } db close db2 close tvfs delete | | | 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 | } execsql { SELECT count(a), sum(a) FROM t1 } db2 } $res1 } db close db2 close tvfs delete forcedelete test.db test.db-wal test.db-journal if 0 { #------------------------------------------------------------------------- # This test case - wal2-3.* - tests the response of the library to an # SQLITE_BUSY when attempting to obtain a READ or RECOVER lock. # |
︙ | ︙ | |||
344 345 346 347 348 349 350 | execsql { SELECT count(a), sum(a) FROM t1 } } {4 10} do_test wal2-3.5 { list [info exists ::sabotage] [info exists ::locked] } {0 0} db close tvfs delete | | | 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 | execsql { SELECT count(a), sum(a) FROM t1 } } {4 10} do_test wal2-3.5 { list [info exists ::sabotage] [info exists ::locked] } {0 0} db close tvfs delete forcedelete test.db test.db-wal test.db-journal } #------------------------------------------------------------------------- # Test that a database connection using a VFS that does not support the # xShmXXX interfaces cannot open a WAL database. # |
︙ | ︙ | |||
445 446 447 448 449 450 451 | # wal2-6.5.*: # # wal2-6.6.*: Check that if the xShmLock() to reaquire a WAL read-lock when # exiting exclusive mode fails (i.e. SQLITE_IOERR), then the # connection silently remains in exclusive mode. # do_test wal2-6.1.1 { | | | 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 | # wal2-6.5.*: # # wal2-6.6.*: Check that if the xShmLock() to reaquire a WAL read-lock when # exiting exclusive mode fails (i.e. SQLITE_IOERR), then the # connection silently remains in exclusive mode. # do_test wal2-6.1.1 { forcedelete test.db test.db-wal test.db-journal sqlite3 db test.db execsql { Pragma Journal_Mode = Wal; } } {wal} do_test wal2-6.1.2 { execsql { PRAGMA lock_status } |
︙ | ︙ | |||
488 489 490 491 492 493 494 | INSERT INTO t1 VALUES(3, 4); PRAGMA lock_status; } } {main shared temp closed} db close do_test wal2-6.2.1 { | | | 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 | INSERT INTO t1 VALUES(3, 4); PRAGMA lock_status; } } {main shared temp closed} db close do_test wal2-6.2.1 { forcedelete test.db test.db-wal test.db-journal sqlite3 db test.db execsql { Pragma Locking_Mode = Exclusive; Pragma Journal_Mode = Wal; Pragma Lock_Status; } } {exclusive wal main exclusive temp closed} |
︙ | ︙ | |||
556 557 558 559 560 561 562 | SELECT * FROM t1; pragma lock_status; } } {1 2 3 4 5 6 main shared temp closed} db close do_test wal2-6.3.1 { | | | 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 | SELECT * FROM t1; pragma lock_status; } } {1 2 3 4 5 6 main shared temp closed} db close do_test wal2-6.3.1 { forcedelete test.db test.db-wal test.db-journal sqlite3 db test.db execsql { PRAGMA journal_mode = WAL; PRAGMA locking_mode = exclusive; BEGIN; CREATE TABLE t1(x); INSERT INTO t1 VALUES('Chico'); |
︙ | ︙ | |||
601 602 603 604 605 606 607 | # This test - wal2-6.4.* - uses a single database connection and the # [testvfs] instrumentation to test that xShmLock() is being called # as expected when a WAL database is used with locking_mode=exclusive. # do_test wal2-6.4.1 { | | > | 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 | # This test - wal2-6.4.* - uses a single database connection and the # [testvfs] instrumentation to test that xShmLock() is being called # as expected when a WAL database is used with locking_mode=exclusive. # do_test wal2-6.4.1 { forcedelete test.db test.db-wal test.db-journal proc tvfs_cb {method args} { set ::shm_file [lindex $args 0] if {$method == "xShmLock"} { lappend ::locks [lindex $args 2] } return "SQLITE_OK" } testvfs tvfs tvfs script tvfs_cb sqlite3 db test.db -vfs tvfs set {} {} } {} set RECOVERY { {0 1 lock exclusive} {1 7 lock exclusive} {1 7 unlock exclusive} {0 1 unlock exclusive} } set READMARK0_READ { |
︙ | ︙ | |||
800 801 802 803 804 805 806 | db2 close T delete #------------------------------------------------------------------------- # Test a theory about the checksum algorithm. Theory was false and this # test did not provoke a bug. # | | | | | | 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 | db2 close T delete #------------------------------------------------------------------------- # Test a theory about the checksum algorithm. Theory was false and this # test did not provoke a bug. # forcedelete test.db test.db-wal test.db-journal do_test wal2-7.1.1 { sqlite3 db test.db execsql { PRAGMA page_size = 4096; PRAGMA journal_mode = WAL; CREATE TABLE t1(a, b); } file size test.db } {4096} do_test wal2-7.1.2 { forcecopy test.db test2.db forcecopy test.db-wal test2.db-wal hexio_write test2.db-wal 48 FF } {1} do_test wal2-7.1.3 { sqlite3 db2 test2.db execsql { PRAGMA wal_checkpoint } db2 execsql { SELECT * FROM sqlite_master } db2 } {} db close db2 close forcedelete test.db test.db-wal test.db-journal do_test wal2-8.1.2 { sqlite3 db test.db execsql { PRAGMA auto_vacuum=OFF; PRAGMA page_size = 1024; PRAGMA journal_mode = WAL; CREATE TABLE t1(x); |
︙ | ︙ | |||
878 879 880 881 882 883 884 | # # proc get_name {method args} { set ::filename [lindex $args 0] ; tvfs filter {} } testvfs tvfs tvfs script get_name tvfs filter xShmOpen | | | 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 | # # proc get_name {method args} { set ::filename [lindex $args 0] ; tvfs filter {} } testvfs tvfs tvfs script get_name tvfs filter xShmOpen forcedelete test.db test.db-wal test.db-journal do_test wal2-9.1 { sqlite3 db test.db -vfs tvfs execsql { PRAGMA journal_mode = WAL; CREATE TABLE x(y); INSERT INTO x VALUES('Barton'); INSERT INTO x VALUES('Deakin'); |
︙ | ︙ |
Changes to test/wal3.test.
︙ | ︙ | |||
94 95 96 97 98 99 100 | do_test wal3-1.$i.4 { execsql { PRAGMA integrity_check } db2 } {ok} db2 close # Check that the file-system in its current state can be recovered. # | | | | | 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 | do_test wal3-1.$i.4 { execsql { PRAGMA integrity_check } db2 } {ok} db2 close # Check that the file-system in its current state can be recovered. # forcecopy test.db test2.db forcecopy test.db-wal test2.db-wal forcedelete test2.db-journal sqlite3 db2 test2.db do_test wal3-1.$i.5 { execsql { SELECT count(*) FROM t1 } db2 } 4018 do_test wal3-1.$i.6 { execsql { SELECT x FROM t1 WHERE rowid = $i } } $str |
︙ | ︙ | |||
218 219 220 221 222 223 224 | } proc sync_counter {args} { foreach {method filename id flags} $args break lappend ::syncs [file tail $filename] $flags } do_test wal3-3.$tn { | | | 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 | } proc sync_counter {args} { foreach {method filename id flags} $args break lappend ::syncs [file tail $filename] $flags } do_test wal3-3.$tn { forcedelete test.db test.db-wal test.db-journal testvfs T T filter {} T script sync_counter sqlite3 db test.db -vfs T execsql { PRAGMA journal_mode = WAL } |
︙ | ︙ | |||
422 423 424 425 426 427 428 | # + The attempt to obtain the lock on aReadMark[0] fails with SQLITE_BUSY. # This can happen if a checkpoint is ongoing. In this case also simply # obtain a different read-lock. # catch {db close} testvfs T -default 1 do_test wal3-6.1.1 { | | | 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 | # + The attempt to obtain the lock on aReadMark[0] fails with SQLITE_BUSY. # This can happen if a checkpoint is ongoing. In this case also simply # obtain a different read-lock. # catch {db close} testvfs T -default 1 do_test wal3-6.1.1 { forcedelete test.db test.db-journal test.db wal sqlite3 db test.db execsql { PRAGMA auto_vacuum = off } execsql { PRAGMA journal_mode = WAL } execsql { CREATE TABLE t1(a, b); INSERT INTO t1 VALUES('o', 't'); INSERT INTO t1 VALUES('t', 'f'); |
︙ | ︙ | |||
503 504 505 506 507 508 509 | } {1} db3 close db2 close db close do_test wal3-6.2.1 { | | | 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 | } {1} db3 close db2 close db close do_test wal3-6.2.1 { forcedelete test.db test.db-journal test.db wal sqlite3 db test.db sqlite3 db2 test.db execsql { PRAGMA auto_vacuum = off } execsql { PRAGMA journal_mode = WAL } execsql { CREATE TABLE t1(a, b); INSERT INTO t1 VALUES('h', 'h'); |
︙ | ︙ | |||
569 570 571 572 573 574 575 | # # + The value in the aReadMark[x] slot has been modified since it was # read. # catch {db close} testvfs T -default 1 do_test wal3-7.1.1 { | | | 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 | # # + The value in the aReadMark[x] slot has been modified since it was # read. # catch {db close} testvfs T -default 1 do_test wal3-7.1.1 { forcedelete test.db test.db-journal test.db wal sqlite3 db test.db execsql { PRAGMA journal_mode = WAL; CREATE TABLE blue(red PRIMARY KEY, green); } } {wal} |
︙ | ︙ | |||
626 627 628 629 630 631 632 | db close db2 close T delete #------------------------------------------------------------------------- # do_test wal3-8.1.1 { | | | 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 | db close db2 close T delete #------------------------------------------------------------------------- # do_test wal3-8.1.1 { forcedelete test.db test.db-journal test.db wal .test.db-conch sqlite3 db test.db sqlite3 db2 test.db execsql { PRAGMA auto_vacuum = off; PRAGMA journal_mode = WAL; CREATE TABLE b(c); INSERT INTO b VALUES('Tehran'); |
︙ | ︙ | |||
723 724 725 726 727 728 729 | # on any aReadMark[] slot (because there are already several readers), # the client takes a shared-lock on a slot without modifying the value # and continues. # set nConn 50 if { [string match *BSD $tcl_platform(os)] } { set nConn 25 } do_test wal3-9.0 { | | | 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 | # on any aReadMark[] slot (because there are already several readers), # the client takes a shared-lock on a slot without modifying the value # and continues. # set nConn 50 if { [string match *BSD $tcl_platform(os)] } { set nConn 25 } do_test wal3-9.0 { forcedelete test.db test.db-journal test.db wal sqlite3 db test.db execsql { PRAGMA page_size = 1024; PRAGMA journal_mode = WAL; CREATE TABLE whoami(x); INSERT INTO whoami VALUES('nobody'); } |
︙ | ︙ |
Changes to test/wal4.test.
︙ | ︙ | |||
31 32 33 34 35 36 37 | } } {wal 1 2} do_test wal4-1.2 { # Save a copy of the file-system containing the wal and wal-index files # only (no database file). faultsim_save_and_close | | | 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 | } } {wal 1 2} do_test wal4-1.2 { # Save a copy of the file-system containing the wal and wal-index files # only (no database file). faultsim_save_and_close forcedelete sv_test.db } {} do_test wal4-1.3 { faultsim_restore_and_reopen catchsql { SELECT * FROM t1 } } {1 {no such table: t1}} |
︙ | ︙ |
Changes to test/walbak.test.
︙ | ︙ | |||
48 49 50 51 52 53 54 | BEGIN; CREATE TABLE t1(a PRIMARY KEY, b); INSERT INTO t1 VALUES('I', 'one'); COMMIT; } } {wal} do_test walbak-1.1 { | | | 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 | BEGIN; CREATE TABLE t1(a PRIMARY KEY, b); INSERT INTO t1 VALUES('I', 'one'); COMMIT; } } {wal} do_test walbak-1.1 { forcedelete bak.db bak.db-journal bak.db-wal db backup bak.db file size bak.db } [expr 3*1024] do_test walbak-1.2 { sqlite3 db2 bak.db execsql { SELECT * FROM t1; |
︙ | ︙ | |||
79 80 81 82 83 84 85 86 87 88 89 90 91 92 | do_test walbak-1.5 { list [file size test.db] [file size test.db-wal] } [list 1024 [wal_file_size 6 1024]] do_test walbak-1.6 { execsql { PRAGMA wal_checkpoint } list [file size test.db] [file size test.db-wal] } [list [expr 3*1024] [wal_file_size 6 1024]] do_test walbak-1.7 { execsql { CREATE TABLE t2(a, b); INSERT INTO t2 SELECT * FROM t1; DROP TABLE t1; } list [file size test.db] [file size test.db-wal] | > > > | 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 | do_test walbak-1.5 { list [file size test.db] [file size test.db-wal] } [list 1024 [wal_file_size 6 1024]] do_test walbak-1.6 { execsql { PRAGMA wal_checkpoint } list [file size test.db] [file size test.db-wal] } [list [expr 3*1024] [wal_file_size 6 1024]] do_test walbak-1.6.1 { hexio_read test.db 18 2 } {0202} do_test walbak-1.7 { execsql { CREATE TABLE t2(a, b); INSERT INTO t2 SELECT * FROM t1; DROP TABLE t1; } list [file size test.db] [file size test.db-wal] |
︙ | ︙ | |||
106 107 108 109 110 111 112 | proc sig {{db db}} { $db eval { PRAGMA integrity_check; SELECT md5sum(a, b) FROM t1; } } db close | | | 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 | proc sig {{db db}} { $db eval { PRAGMA integrity_check; SELECT md5sum(a, b) FROM t1; } } db close delete_file test.db sqlite3 db test.db do_test walbak-2.1 { execsql { PRAGMA journal_mode = WAL } execsql { CREATE TABLE t1(a PRIMARY KEY, b); BEGIN; INSERT INTO t1 VALUES(randomblob(500), randomblob(500)); |
︙ | ︙ | |||
236 237 238 239 240 241 242 | PRAGMA page_size = 2048; PRAGMA journal_mode = PERSIST; CREATE TABLE xx(x); } } } { | | | 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 | PRAGMA page_size = 2048; PRAGMA journal_mode = PERSIST; CREATE TABLE xx(x); } } } { foreach f [glob -nocomplain test.db*] { forcedelete $f } eval $setup do_test walbak-3.$tn.1 { execsql { CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 2); |
︙ | ︙ | |||
270 271 272 273 274 275 276 277 278 279 280 281 | do_test walbak-3.$tn.4 { sqlite3_backup B db main db2 main B step 10000 B finish execsql { SELECT * FROM t1 } } {1 2 3 4 5 6 7 8} db close db2 close } | > > > > > > > > > > > > > > > > > > > > > > > | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 | do_test walbak-3.$tn.4 { sqlite3_backup B db main db2 main B step 10000 B finish execsql { SELECT * FROM t1 } } {1 2 3 4 5 6 7 8} # Check that [db] is still in WAL mode. do_test walbak-3.$tn.5 { execsql { PRAGMA journal_mode } } {wal} do_test walbak-3.$tn.6 { execsql { PRAGMA wal_checkpoint } hexio_read test.db 18 2 } {0202} # If it was not an in-memory database, check that [db2] is still in # rollback mode. if {[file exists test.db2]} { do_test walbak-3.$tn.7 { execsql { PRAGMA journal_mode } db2 } {wal} do_test walbak-3.$tn.8 { execsql { PRAGMA wal_checkpoint } hexio_read test.db 18 2 } {0202} } db close db2 close } #------------------------------------------------------------------------- # Test that the following holds when a backup operation is run: # # Source | Destination inital | Destination final # --------------------------------------------------- # Rollback Rollback Rollback # Rollback WAL WAL # WAL Rollback WAL # WAL WAL WAL # foreach {tn src dest dest_final} { 1 delete delete delete 2 delete wal wal 3 wal delete wal 4 wal wal wal } { catch { db close } catch { db2 close } forcedelete test.db test.db2 do_test walbak-4.$tn.1 { sqlite3 db test.db db eval "PRAGMA journal_mode = $src" db eval { CREATE TABLE t1(a, b); INSERT INTO t1 VALUES('I', 'II'); INSERT INTO t1 VALUES('III', 'IV'); } sqlite3 db2 test.db2 db2 eval "PRAGMA journal_mode = $dest" db2 eval { CREATE TABLE t2(x, y); INSERT INTO t2 VALUES('1', '2'); INSERT INTO t2 VALUES('3', '4'); } } {} do_test walbak-4.$tn.2 { execsql { PRAGMA journal_mode } db } $src do_test walbak-4.$tn.3 { execsql { PRAGMA journal_mode } db2 } $dest do_test walbak-4.$tn.4 { db backup test.db2 } {} do_test walbak-4.$tn.5 { execsql { SELECT * FROM t1 } db2 } {I II III IV} do_test walbak-4.$tn.5 { execsql { PRAGMA journal_mode } db2 } $dest_final db2 close do_test walbak-4.$tn.6 { file exists test.db2-wal } 0 sqlite3 db2 test.db2 do_test walbak-4.$tn.7 { execsql { PRAGMA journal_mode } db2 } $dest_final } finish_test |
Changes to test/walcksum.test.
︙ | ︙ | |||
153 154 155 156 157 158 159 | if {$::tcl_platform(byteOrder) == "littleEndian"} { set native "little" } foreach endian {big little} { # Create a database. Leave some data in the log file. # do_test walcksum-1.$endian.1 { catch { db close } | | | | | 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 | if {$::tcl_platform(byteOrder) == "littleEndian"} { set native "little" } foreach endian {big little} { # Create a database. Leave some data in the log file. # do_test walcksum-1.$endian.1 { catch { db close } forcedelete test.db test.db-wal test.db-journal sqlite3 db test.db execsql { PRAGMA page_size = 1024; PRAGMA auto_vacuum = 0; PRAGMA synchronous = NORMAL; CREATE TABLE t1(a PRIMARY KEY, b); INSERT INTO t1 VALUES(1, 'one'); INSERT INTO t1 VALUES(2, 'two'); INSERT INTO t1 VALUES(3, 'three'); INSERT INTO t1 VALUES(5, 'five'); PRAGMA journal_mode = WAL; INSERT INTO t1 VALUES(8, 'eight'); INSERT INTO t1 VALUES(13, 'thirteen'); INSERT INTO t1 VALUES(21, 'twentyone'); } forcecopy test.db test2.db forcecopy test.db-wal test2.db-wal db close list [file size test2.db] [file size test2.db-wal] } [list [expr 1024*3] [wal_file_size 6 1024]] # Verify that the checksums are valid for all frames and that they # are calculated by interpreting data in native byte-order. |
︙ | ︙ | |||
199 200 201 202 203 204 205 | for {set f 1} {$f <= 6} {incr f} { do_test walcksum-1.$endian.3.$f { log_checksum_write test2.db-wal $f $endian log_checksum_verify test2.db-wal $f $endian } {1} } do_test walcksum-1.$endian.4.1 { | | | | 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 | for {set f 1} {$f <= 6} {incr f} { do_test walcksum-1.$endian.3.$f { log_checksum_write test2.db-wal $f $endian log_checksum_verify test2.db-wal $f $endian } {1} } do_test walcksum-1.$endian.4.1 { forcecopy test2.db test.db forcecopy test2.db-wal test.db-wal sqlite3 db test.db execsql { SELECT a FROM t1 } } {1 2 3 5 8 13 21} # Following recovery, any frames written to the log should use the same # endianness as the existing frames. Check that this is the case. # |
︙ | ︙ | |||
248 249 250 251 252 253 254 | log_checksum_verify test.db-wal $f $endian } {1} } # Now that both the recoverer and non-recoverer have added frames to the # log file, check that it can still be recovered. # | | | | 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 | log_checksum_verify test.db-wal $f $endian } {1} } # Now that both the recoverer and non-recoverer have added frames to the # log file, check that it can still be recovered. # forcecopy test.db test2.db forcecopy test.db-wal test2.db-wal do_test walcksum-1.$endian.7.11 { sqlite3 db3 test2.db execsql { PRAGMA integrity_check; SELECT a FROM t1; } db3 } {ok 1 2 3 5 8 13 21 34 55} |
︙ | ︙ | |||
294 295 296 297 298 299 300 | #------------------------------------------------------------------------- # Test case walcksum-2.* tests that if a statement transaction is rolled # back after frames are written to the WAL, and then (after writing some # more) the outer transaction is committed, the WAL file is still correctly # formatted (and can be recovered by a second process if required). # do_test walcksum-2.1 { | | | 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 | #------------------------------------------------------------------------- # Test case walcksum-2.* tests that if a statement transaction is rolled # back after frames are written to the WAL, and then (after writing some # more) the outer transaction is committed, the WAL file is still correctly # formatted (and can be recovered by a second process if required). # do_test walcksum-2.1 { forcedelete test.db test.db-wal test.db-journal sqlite3 db test.db execsql { PRAGMA synchronous = NORMAL; PRAGMA page_size = 1024; PRAGMA journal_mode = WAL; PRAGMA cache_size = 10; CREATE TABLE t1(x PRIMARY KEY); |
︙ | ︙ | |||
322 323 324 325 326 327 328 | INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 32 */ INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 64 */ INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 128 */ INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 256 */ COMMIT; } | | | | | | | | | 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 | INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 32 */ INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 64 */ INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 128 */ INSERT INTO t1 SELECT randomblob(800) FROM t1; /* 256 */ COMMIT; } forcecopy test.db test2.db forcecopy test.db-wal test2.db-wal sqlite3 db2 test2.db execsql { PRAGMA integrity_check; SELECT count(*) FROM t1; } db2 } {ok 256} catch { db close } catch { db2 close } #------------------------------------------------------------------------- # Test case walcksum-3.* tests that the checksum calculation detects single # byte changes to frame or frame-header data and considers the frame # invalid as a result. # do_test walcksum-3.1 { forcedelete test.db test.db-wal test.db-journal sqlite3 db test.db execsql { PRAGMA synchronous = NORMAL; PRAGMA page_size = 1024; CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, randomblob(300)); INSERT INTO t1 VALUES(2, randomblob(300)); PRAGMA journal_mode = WAL; INSERT INTO t1 VALUES(3, randomblob(300)); } file size test.db-wal } [wal_file_size 1 1024] do_test walcksum-3.2 { forcecopy test.db-wal test2.db-wal forcecopy test.db test2.db sqlite3 db2 test2.db execsql { SELECT a FROM t1 } db2 } {1 2 3} db2 close forcecopy test.db test2.db foreach incr {1 2 3 20 40 60 80 100 120 140 160 180 200 220 240 253 254 255} { do_test walcksum-3.3.$incr { set FAIL 0 for {set iOff 0} {$iOff < [wal_file_size 1 1024]} {incr iOff} { forcecopy test.db-wal test2.db-wal set fd [open test2.db-wal r+] fconfigure $fd -encoding binary fconfigure $fd -translation binary seek $fd $iOff binary scan [read $fd 1] c x seek $fd $iOff |
︙ | ︙ |
Changes to test/walcrash.test.
︙ | ︙ | |||
37 38 39 40 41 42 43 | set seed 0 set REPEATS 100 # walcrash-1.* # for {set i 1} {$i < $REPEATS} {incr i} { | | | 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | set seed 0 set REPEATS 100 # walcrash-1.* # for {set i 1} {$i < $REPEATS} {incr i} { forcedelete test.db test.db-wal do_test walcrash-1.$i.1 { crashsql -delay 4 -file test.db-wal -seed [incr seed] { PRAGMA journal_mode = WAL; CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 1); INSERT INTO t1 VALUES(2, 3); INSERT INTO t1 VALUES(3, 6); |
︙ | ︙ | |||
74 75 76 77 78 79 80 | } {wal} db close } # walcrash-2.* # for {set i 1} {$i < $REPEATS} {incr i} { | | | 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 | } {wal} db close } # walcrash-2.* # for {set i 1} {$i < $REPEATS} {incr i} { forcedelete test.db test.db-wal do_test walcrash-2.$i.1 { crashsql -delay 4 -file test.db-wal -seed [incr seed] { PRAGMA journal_mode = WAL; CREATE TABLE t1(a PRIMARY KEY, b); INSERT INTO t1 VALUES(1, 2); INSERT INTO t1 VALUES(3, 4); INSERT INTO t1 VALUES(5, 9); |
︙ | ︙ | |||
111 112 113 114 115 116 117 | } {wal} db close } # walcrash-3.* # # for {set i 1} {$i < $REPEATS} {incr i} { | | | | 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 | } {wal} db close } # walcrash-3.* # # for {set i 1} {$i < $REPEATS} {incr i} { # forcedelete test.db test.db-wal # forcedelete test2.db test2.db-wal # # do_test walcrash-3.$i.1 { # crashsql -delay 2 -file test2.db-wal -seed [incr seed] { # PRAGMA journal_mode = WAL; # ATTACH 'test2.db' AS aux; # CREATE TABLE t1(a PRIMARY KEY, b); # CREATE TABLE aux.t2(a PRIMARY KEY, b); |
︙ | ︙ | |||
143 144 145 146 147 148 149 | # # db close # } # walcrash-4.* # for {set i 1} {$i < $REPEATS} {incr i} { | | | | 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 | # # db close # } # walcrash-4.* # for {set i 1} {$i < $REPEATS} {incr i} { forcedelete test.db test.db-wal forcedelete test2.db test2.db-wal do_test walcrash-4.$i.1 { crashsql -delay 3 -file test.db-wal -seed [incr seed] -blocksize 4096 { PRAGMA journal_mode = WAL; PRAGMA page_size = 1024; CREATE TABLE t1(a PRIMARY KEY, b); INSERT INTO t1 VALUES(1, 2); |
︙ | ︙ | |||
171 172 173 174 175 176 177 | db close } # walcrash-5.* # for {set i 1} {$i < $REPEATS} {incr i} { | | | | 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 | db close } # walcrash-5.* # for {set i 1} {$i < $REPEATS} {incr i} { forcedelete test.db test.db-wal forcedelete test2.db test2.db-wal do_test walcrash-5.$i.1 { crashsql -delay 11 -file test.db-wal -seed [incr seed] -blocksize 4096 { PRAGMA journal_mode = WAL; PRAGMA page_size = 1024; BEGIN; CREATE TABLE t1(x PRIMARY KEY); |
︙ | ︙ | |||
212 213 214 215 216 217 218 | db close } # walcrash-6.* # for {set i 1} {$i < $REPEATS} {incr i} { | | | | 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 | db close } # walcrash-6.* # for {set i 1} {$i < $REPEATS} {incr i} { forcedelete test.db test.db-wal forcedelete test2.db test2.db-wal do_test walcrash-6.$i.1 { crashsql -delay 12 -file test.db-wal -seed [incr seed] -blocksize 512 { PRAGMA journal_mode = WAL; PRAGMA page_size = 1024; BEGIN; CREATE TABLE t1(x PRIMARY KEY); |
︙ | ︙ | |||
262 263 264 265 266 267 268 | # # (a) that the database is a WAL database, and # (b) the database page-size # # based on the log file. # for {set i 1} {$i < $REPEATS} {incr i} { | | | 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 | # # (a) that the database is a WAL database, and # (b) the database page-size # # based on the log file. # for {set i 1} {$i < $REPEATS} {incr i} { forcedelete test.db test.db-wal # Select a page-size for this test. # set pgsz [lindex {512 1024 2048 4096 8192 16384} [expr $i%6]] do_test walcrash-7.$i.1 { crashsql -delay 3 -file test.db -seed [incr seed] -blocksize 512 " |
︙ | ︙ |
Changes to test/walfault.test.
︙ | ︙ | |||
465 466 467 468 469 470 471 | BEGIN; CREATE TABLE abc(a PRIMARY KEY); INSERT INTO abc VALUES(randomblob(1500)); INSERT INTO abc VALUES(randomblob(1500)); COMMIT; } faultsim_save_and_close | | | 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 | BEGIN; CREATE TABLE abc(a PRIMARY KEY); INSERT INTO abc VALUES(randomblob(1500)); INSERT INTO abc VALUES(randomblob(1500)); COMMIT; } faultsim_save_and_close delete_file sv_test.db-shm } {} do_faultsim_test walfault-13.1 -prep { faultsim_restore_and_reopen } -body { db eval { PRAGMA locking_mode = exclusive } db eval { SELECT count(*) FROM abc } |
︙ | ︙ |
Changes to test/walmode.test.
︙ | ︙ | |||
313 314 315 316 317 318 319 | #------------------------------------------------------------------------- # Test the effect of a "PRAGMA journal_mode" command being the first # thing executed by a new connection. This means that the schema is not # loaded when sqlite3_prepare_v2() is called to compile the statement. # do_test walmode-7.0 { | | | 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 | #------------------------------------------------------------------------- # Test the effect of a "PRAGMA journal_mode" command being the first # thing executed by a new connection. This means that the schema is not # loaded when sqlite3_prepare_v2() is called to compile the statement. # do_test walmode-7.0 { forcedelete test.db sqlite3 db test.db execsql { PRAGMA journal_mode = WAL; CREATE TABLE t1(a, b); } } {wal} foreach {tn sql result} { |
︙ | ︙ |
Changes to test/walnoshm.test.
︙ | ︙ | |||
83 84 85 86 87 88 89 | PRAGMA locking_mode = exclusive; PRAGMA journal_mode = WAL; INSERT INTO t2 VALUES('e', 'f'); INSERT INTO t2 VALUES('g', 'h'); } {exclusive wal} do_test 2.1.3 { | | | | | | 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 | PRAGMA locking_mode = exclusive; PRAGMA journal_mode = WAL; INSERT INTO t2 VALUES('e', 'f'); INSERT INTO t2 VALUES('g', 'h'); } {exclusive wal} do_test 2.1.3 { forcecopy test.db test2.db forcecopy test.db-wal test2.db-wal sqlite3 db2 test2.db catchsql { SELECT * FROM t2 } db2 } {1 {unable to open database file}} do_test 2.1.4 { catchsql { PRAGMA journal_mode = delete } db2 } {1 {unable to open database file}} do_test 2.1.5 { execsql { PRAGMA locking_mode = exclusive; PRAGMA journal_mode = delete; SELECT * FROM t2; } db2 } {exclusive delete a b c d e f g h} do_test 2.2.1 { forcecopy test.db test2.db forcecopy test.db-wal test2.db-wal sqlite3 db3 test2.db -vfs tvfsshm sqlite3 db2 test2.db execsql { SELECT * FROM t2 } db3 } {a b c d e f g h} do_test 2.2.2 { execsql { PRAGMA locking_mode = exclusive } db2 |
︙ | ︙ |
Changes to test/walslow.test.
︙ | ︙ | |||
21 22 23 24 25 26 27 | if { ![wal_is_ok] } { finish_test return } proc reopen_db {} { catch { db close } | | | 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | if { ![wal_is_ok] } { finish_test return } proc reopen_db {} { catch { db close } forcedelete test.db test.db-wal sqlite3 db test.db execsql { PRAGMA journal_mode = wal } } db close save_prng_state for {set seed 1} {$seed<10} {incr seed} { |
︙ | ︙ | |||
53 54 55 56 57 58 59 | do_test walslow-1.seed=$seed.$iTest.2 { execsql "PRAGMA wal_checkpoint;" execsql { PRAGMA integrity_check } } {ok} do_test walslow-1.seed=$seed.$iTest.3 { | | | | | 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 | do_test walslow-1.seed=$seed.$iTest.2 { execsql "PRAGMA wal_checkpoint;" execsql { PRAGMA integrity_check } } {ok} do_test walslow-1.seed=$seed.$iTest.3 { forcedelete testX.db testX.db-wal copy_file test.db testX.db copy_file test.db-wal testX.db-wal sqlite3 db2 testX.db execsql { PRAGMA journal_mode = WAL } db2 execsql { PRAGMA integrity_check } db2 } {ok} do_test walslow-1.seed=$seed.$iTest.4 { |
︙ | ︙ |
Changes to test/walthread.test.
︙ | ︙ | |||
129 130 131 132 133 134 135 | puts "Skipping $P(testname)" return } puts "Running $P(testname) for $P(seconds) seconds..." catch { db close } | | | 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 | puts "Skipping $P(testname)" return } puts "Running $P(testname) for $P(seconds) seconds..." catch { db close } forcedelete test.db test.db-journal test.db-wal sqlite3 db test.db eval $P(init) catch { db close } foreach T $P(threads) { set name [lindex $T 0] |
︙ | ︙ | |||
508 509 510 511 512 513 514 | INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 8192 */ INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 16384 */ INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 32768 */ INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 65536 */ COMMIT; } | | | | | | 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 | INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 8192 */ INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 16384 */ INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 32768 */ INSERT INTO t1 SELECT randomblob(900) FROM t1; /* 65536 */ COMMIT; } forcecopy test.db-wal bak.db-wal forcecopy test.db bak.db db close forcecopy bak.db-wal test.db-wal forcecopy bak.db test.db if {[file size test.db-wal] < [log_file_size [expr 64*1024] 1024]} { error "Somehow failed to create a large log file" } puts "Database with large log file recovered. Now running clients..." } -thread T 5 { db eval { SELECT count(*) FROM t1 } } unset -nocomplain seconds finish_test |
Changes to test/win32lock.test.
︙ | ︙ | |||
37 38 39 40 41 42 43 | INSERT INTO t1 VALUES(3,randomblob(25000)); INSERT INTO t1 VALUES(4,randomblob(12500)); SELECT x, length(y) FROM t1 ORDER BY rowid; } } {1 100000 2 50000 3 25000 4 12500} unset -nocomplain delay1 rc msg | < < > > > > < > > | < > | < > | > > > > > > > > > > > | < > > | < > | | < > | > | 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 | INSERT INTO t1 VALUES(3,randomblob(25000)); INSERT INTO t1 VALUES(4,randomblob(12500)); SELECT x, length(y) FROM t1 ORDER BY rowid; } } {1 100000 2 50000 3 25000 4 12500} unset -nocomplain delay1 rc msg set old_pending_byte [sqlite3_test_control_pending_byte 0x40000000] set win32_lock_ok [list] set win32_lock_error [list] set delay1 25 while {1} { lock_win32_file test.db 0 $::delay1 set ::log {} set rc [catch {db eval {SELECT x, length(y) FROM t1 ORDER BY rowid}} msg] if {$rc} { lappend win32_lock_error $::delay1 do_test win32lock-1.2-$delay1-error { set ::msg } {disk I/O error} } else { lappend win32_lock_ok $::delay1 do_test win32lock-1.2-$delay1-ok { set ::msg } {1 100000 2 50000 3 25000 4 12500} if {[info exists ::log] && $::log!=""} { do_test win32lock-1.2-$delay1-log1 { regsub {\d+} $::log # x set x } {{delayed #ms for lock/sharing conflict}} } } if {[llength $win32_lock_ok] && [llength $win32_lock_error]} break incr delay1 25 sqlite3_sleep 10 } do_test win32lock-2.0 { file_control_win32_av_retry db -1 -1 } {0 10 25} do_test win32lock-2.1 { file_control_win32_av_retry db 1 1 } {0 1 1} # # NOTE: It is known that the win32lock-2.2-* tests may fail if the system is # experiencing heavy load (i.e. they are very timing sensitive). This is # primarily due to the AV retry delay being set to 1 millisecond in the # win32lock-2.1 test (above). While it is important to test this corner # case for the AV retry logic, a failure of this test should probably not # be interpreted as a bug in SQLite or these test cases. # set win32_lock_ok [list] set win32_lock_error [list] set delay1 1 while {1} { lock_win32_file test.db 0 $::delay1 set ::log {} set rc [catch {db eval {SELECT x, length(y) FROM t1 ORDER BY rowid}} msg] if {$rc} { lappend win32_lock_error $::delay1 do_test win32lock-2.2-$delay1-error { set ::msg } {disk I/O error} } else { lappend win32_lock_ok $::delay1 do_test win32lock-2.2-$delay1-ok { set ::msg } {1 100000 2 50000 3 25000 4 12500} if {[info exists ::log] && $::log!=""} { do_test win32lock-2.2-$delay1-log1 { regsub {\d+} $::log # x set x } {{delayed #ms for lock/sharing conflict}} } } if {[llength $win32_lock_ok] && [llength $win32_lock_error]} break incr delay1 1 sqlite3_sleep 10 } file_control_win32_av_retry db 10 25 sqlite3_test_control_pending_byte $old_pending_byte sqlite3_shutdown test_sqlite3_log sqlite3_initialize |
︙ | ︙ |
Added tool/extract.c.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 | /* ** Extract a range of bytes from a file. ** ** Usage: ** ** extract FILENAME OFFSET AMOUNT ** ** The bytes are written to standard output. */ #include <stdio.h> #include <stdlib.h> int main(int argc, char **argv){ FILE *f; char *zBuf; int ofst; int n; size_t got; if( argc!=4 ){ fprintf(stderr, "Usage: %s FILENAME OFFSET AMOUNT\n", *argv); return 1; } f = fopen(argv[1], "rb"); if( f==0 ){ fprintf(stderr, "cannot open \"%s\"\n", argv[1]); return 1; } ofst = atoi(argv[2]); n = atoi(argv[3]); zBuf = malloc( n ); if( zBuf==0 ){ fprintf(stderr, "out of memory\n"); return 1; } fseek(f, ofst, SEEK_SET); got = fread(zBuf, 1, n, f); fclose(f); if( got<n ){ fprintf(stderr, "got only %d of %d bytes\n", got, n); return 1; }else{ fwrite(zBuf, 1, n, stdout); } return 0; } |
Changes to tool/mksqlite3c.tcl.
︙ | ︙ | |||
26 27 28 29 30 31 32 33 34 35 36 37 38 39 | # from in this file. The versioon number is needed to generate the header # comment of the amalgamation. # if {[lsearch $argv --nostatic]>=0} { set addstatic 0 } else { set addstatic 1 } set in [open tsrc/sqlite3.h] set cnt 0 set VERSION ????? while {![eof $in]} { set line [gets $in] if {$line=="" && [eof $in]} break | > > > > > | 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 | # from in this file. The versioon number is needed to generate the header # comment of the amalgamation. # if {[lsearch $argv --nostatic]>=0} { set addstatic 0 } else { set addstatic 1 } if {[lsearch $argv --linemacros]>=0} { set linemacros 1 } else { set linemacros 0 } set in [open tsrc/sqlite3.h] set cnt 0 set VERSION ????? while {![eof $in]} { set line [gets $in] if {$line=="" && [eof $in]} break |
︙ | ︙ | |||
133 134 135 136 137 138 139 | } # Read the source file named $filename and write it into the # sqlite3.c output file. If any #include statements are seen, # process them approprately. # proc copy_file {filename} { | | > > > > > > | | 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 | } # Read the source file named $filename and write it into the # sqlite3.c output file. If any #include statements are seen, # process them approprately. # proc copy_file {filename} { global seen_hdr available_hdr out addstatic linemacros set ln 0 set tail [file tail $filename] section_comment "Begin file $tail" if {$linemacros} {puts $out "#line 1 \"$filename\""} set in [open $filename r] set varpattern {^[a-zA-Z][a-zA-Z_0-9 *]+(sqlite3[_a-zA-Z0-9]+)(\[|;| =)} set declpattern {[a-zA-Z][a-zA-Z_0-9 ]+ \**(sqlite3[_a-zA-Z0-9]+)\(} if {[file extension $filename]==".h"} { set declpattern " *$declpattern" } set declpattern ^$declpattern while {![eof $in]} { set line [gets $in] incr ln if {[regexp {^\s*#\s*include\s+["<]([^">]+)[">]} $line all hdr]} { if {[info exists available_hdr($hdr)]} { if {$available_hdr($hdr)} { if {$hdr!="os_common.h" && $hdr!="hwtime.h"} { set available_hdr($hdr) 0 } section_comment "Include $hdr in the middle of $tail" copy_file tsrc/$hdr section_comment "Continuing where we left off in $tail" if {$linemacros} {puts $out "#line [expr {$ln+1}] \"$filename\""} } } elseif {![info exists seen_hdr($hdr)]} { set seen_hdr($hdr) 1 puts $out $line } else { puts $out "/* $line */" } } elseif {[regexp {^#ifdef __cplusplus} $line]} { puts $out "#if 0" } elseif {!$linemacros && [regexp {^#line} $line]} { # Skip #line directives. } elseif {$addstatic && ![regexp {^(static|typedef)} $line]} { regsub {^SQLITE_API } $line {} line if {[regexp $declpattern $line all funcname]} { # Add the SQLITE_PRIVATE or SQLITE_API keyword before functions. # so that linkage can be modified at compile-time. if {[regexp {^sqlite3_} $funcname]} { |
︙ | ︙ | |||
256 257 258 259 260 261 262 263 264 265 266 267 268 269 | vdbemem.c vdbeaux.c vdbeapi.c vdbetrace.c vdbe.c vdbeblob.c journal.c memjournal.c walker.c resolve.c expr.c alter.c | > | 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 | vdbemem.c vdbeaux.c vdbeapi.c vdbetrace.c vdbe.c vdbeblob.c vdbesort.c journal.c memjournal.c walker.c resolve.c expr.c alter.c |
︙ | ︙ |
Added tool/offsets.c.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 | /* ** This program searches an SQLite database file for the lengths and ** offsets for all TEXT or BLOB entries for a particular column of a ** particular table. The rowid, size and offset for the column are ** written to standard output. There are three arguments, which are the ** name of the database file, the table, and the column. */ #include "sqlite3.h" #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> typedef unsigned char u8; typedef struct GState GState; #define ArraySize(X) (sizeof(X)/sizeof(X[0])) /* ** Global state information for this program. */ struct GState { char *zErr; /* Error message text */ FILE *f; /* Open database file */ int szPg; /* Page size for the database file */ int iRoot; /* Root page of the table */ int iCol; /* Column number for the column */ int pgno; /* Current page number */ u8 *aPage; /* Current page content */ u8 *aStack[20]; /* Page stack */ int aPgno[20]; /* Page number stack */ int nStack; /* Depth of stack */ int bTrace; /* True for tracing output */ }; /* ** Write an error. */ static void ofstError(GState *p, const char *zFormat, ...){ va_list ap; sqlite3_free(p->zErr); va_start(ap, zFormat); p->zErr = sqlite3_vmprintf(zFormat, ap); va_end(ap); } /* ** Write a trace message */ static void ofstTrace(GState *p, const char *zFormat, ...){ va_list ap; if( p->bTrace ){ va_start(ap, zFormat); vprintf(zFormat, ap); va_end(ap); } } /* ** Find the root page of the table and the column number of the column. */ static void ofstRootAndColumn( GState *p, /* Global state */ const char *zFile, /* Name of the database file */ const char *zTable, /* Name of the table */ const char *zColumn /* Name of the column */ ){ sqlite3 *db = 0; sqlite3_stmt *pStmt = 0; char *zSql = 0; int rc; if( p->zErr ) return; rc = sqlite3_open(zFile, &db); if( rc ){ ofstError(p, "cannot open database file \"%s\"", zFile); goto rootAndColumn_exit; } zSql = sqlite3_mprintf("SELECT rootpage FROM sqlite_master WHERE name=%Q", zTable); rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); if( rc ) ofstError(p, "%s: [%s]", sqlite3_errmsg(db), zSql); sqlite3_free(zSql); if( p->zErr ) goto rootAndColumn_exit; if( sqlite3_step(pStmt)!=SQLITE_ROW ){ ofstError(p, "cannot find table [%s]\n", zTable); sqlite3_finalize(pStmt); goto rootAndColumn_exit; } p->iRoot = sqlite3_column_int(pStmt , 0); sqlite3_finalize(pStmt); p->iCol = -1; zSql = sqlite3_mprintf("PRAGMA table_info(%Q)", zTable); rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); if( rc ) ofstError(p, "%s: [%s}", sqlite3_errmsg(db), zSql); sqlite3_free(zSql); if( p->zErr ) goto rootAndColumn_exit; while( sqlite3_step(pStmt)==SQLITE_ROW ){ const char *zCol = sqlite3_column_text(pStmt, 1); if( strlen(zCol)==strlen(zColumn) && sqlite3_strnicmp(zCol, zColumn, strlen(zCol))==0 ){ p->iCol = sqlite3_column_int(pStmt, 0); break; } } sqlite3_finalize(pStmt); if( p->iCol<0 ){ ofstError(p, "no such column: %s.%s", zTable, zColumn); goto rootAndColumn_exit; } zSql = sqlite3_mprintf("PRAGMA page_size"); rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); if( rc ) ofstError(p, "%s: [%s]", sqlite3_errmsg(db), zSql); sqlite3_free(zSql); if( p->zErr ) goto rootAndColumn_exit; if( sqlite3_step(pStmt)!=SQLITE_ROW ){ ofstError(p, "cannot find page size"); }else{ p->szPg = sqlite3_column_int(pStmt, 0); } sqlite3_finalize(pStmt); rootAndColumn_exit: sqlite3_close(db); return; } /* ** Pop a page from the stack */ static void ofstPopPage(GState *p){ if( p->nStack<=0 ) return; p->nStack--; sqlite3_free(p->aStack[p->nStack]); p->pgno = p->aPgno[p->nStack-1]; p->aPage = p->aStack[p->nStack-1]; } /* ** Push a new page onto the stack. */ static void ofstPushPage(GState *p, int pgno){ u8 *pPage; size_t got; if( p->zErr ) return; if( p->nStack >= ArraySize(p->aStack) ){ ofstError(p, "page stack overflow"); return; } p->aPgno[p->nStack] = pgno; p->aStack[p->nStack] = pPage = sqlite3_malloc( p->szPg ); if( pPage==0 ){ fprintf(stderr, "out of memory\n"); exit(1); } p->nStack++; p->aPage = pPage; p->pgno = pgno; fseek(p->f, (pgno-1)*p->szPg, SEEK_SET); got = fread(pPage, 1, p->szPg, p->f); if( got!=p->szPg ){ ofstError(p, "unable to read page %d", pgno); ofstPopPage(p); } } /* Read a two-byte integer at the given offset into the current page */ static int ofst2byte(GState *p, int ofst){ int x = p->aPage[ofst]; return (x<<8) + p->aPage[ofst+1]; } /* Read a four-byte integer at the given offset into the current page */ static int ofst4byte(GState *p, int ofst){ int x = p->aPage[ofst]; x = (x<<8) + p->aPage[ofst+1]; x = (x<<8) + p->aPage[ofst+2]; x = (x<<8) + p->aPage[ofst+3]; return x; } /* Read a variable-length integer. Update the offset */ static sqlite3_int64 ofstVarint(GState *p, int *pOfst){ sqlite3_int64 x = 0; u8 *a = &p->aPage[*pOfst]; int n = 0; while( n<8 && (a[0] & 0x80)!=0 ){ x = (x<<7) + (a[0] & 0x7f); n++; a++; } if( n==8 ){ x = (x<<8) + a[0]; }else{ x = (x<<7) + a[0]; } *pOfst += (n+1); return x; } /* Return the absolute offset into a file for the given offset ** into the current page */ static int ofstInFile(GState *p, int ofst){ return p->szPg*(p->pgno-1) + ofst; } /* Return the size (in bytes) of the data corresponding to the ** given serial code */ static int ofstSerialSize(int scode){ if( scode<5 ) return scode; if( scode==5 ) return 6; if( scode<8 ) return 8; if( scode<12 ) return 0; return (scode-12)/2; } /* Forward reference */ static void ofstWalkPage(GState*, int); /* Walk an interior btree page */ static void ofstWalkInteriorPage(GState *p){ int nCell; int i; int ofst; int iChild; nCell = ofst2byte(p, 3); for(i=0; i<nCell; i++){ ofst = ofst2byte(p, 12+i*2); iChild = ofst4byte(p, ofst); ofstWalkPage(p, iChild); if( p->zErr ) return; } ofstWalkPage(p, ofst4byte(p, 8)); } /* Walk a leaf btree page */ static void ofstWalkLeafPage(GState *p){ int nCell; int i; int ofst; int nPayload; sqlite3_int64 rowid; int nHdr; int j; int scode; int sz; int dataOfst; char zMsg[200]; nCell = ofst2byte(p, 3); for(i=0; i<nCell; i++){ ofst = ofst2byte(p, 8+i*2); nPayload = ofstVarint(p, &ofst); rowid = ofstVarint(p, &ofst); if( nPayload > p->szPg-35 ){ sqlite3_snprintf(sizeof(zMsg), zMsg, "# overflow rowid %lld", rowid); printf("%s\n", zMsg); continue; } dataOfst = ofst; nHdr = ofstVarint(p, &ofst); dataOfst += nHdr; for(j=0; j<p->iCol; j++){ scode = ofstVarint(p, &ofst); dataOfst += ofstSerialSize(scode); } scode = ofstVarint(p, &ofst); sz = ofstSerialSize(scode); sqlite3_snprintf(sizeof(zMsg), zMsg, "rowid %12lld size %5d offset %8d", rowid, sz, ofstInFile(p, dataOfst)); printf("%s\n", zMsg); } } /* ** Output results from a single page. */ static void ofstWalkPage(GState *p, int pgno){ if( p->zErr ) return; ofstPushPage(p, pgno); if( p->zErr ) return; if( p->aPage[0]==5 ){ ofstWalkInteriorPage(p); }else if( p->aPage[0]==13 ){ ofstWalkLeafPage(p); }else{ ofstError(p, "page %d has a faulty type byte: %d", pgno, p->aPage[0]); } ofstPopPage(p); } int main(int argc, char **argv){ GState g; memset(&g, 0, sizeof(g)); if( argc>2 && strcmp(argv[1],"--trace")==0 ){ g.bTrace = 1; argc--; argv++; } if( argc!=4 ){ fprintf(stderr, "Usage: %s DATABASE TABLE COLUMN\n", *argv); exit(1); } ofstRootAndColumn(&g, argv[1], argv[2], argv[3]); if( g.zErr ){ fprintf(stderr, "%s\n", g.zErr); exit(1); } ofstTrace(&g, "# szPg = %d\n", g.szPg); ofstTrace(&g, "# iRoot = %d\n", g.iRoot); ofstTrace(&g, "# iCol = %d\n", g.iCol); g.f = fopen(argv[1], "rb"); if( g.f==0 ){ fprintf(stderr, "cannot open \"%s\"\n", argv[1]); exit(1); } ofstWalkPage(&g, g.iRoot); if( g.zErr ){ fprintf(stderr, "%s\n", g.zErr); exit(1); } return 0; } |