#!/usr/bin/tclsh # # This script processes raw documentation source text into its final form # for display. The processing actions are described below. # # Invoke this command as follows: # # tclsh wrap.tcl $(DOC) $(SRC) $(DEST) source1.in source2.in ... # # The $(DOC) and $(SRC) values are the names of directories containing # the documentation source and program source. $(DEST) is the name of # of the directory where generated HTML is written. sourceN.in is the # input file to be processed. The output is sourceN.html in the # local directory. # # Changes made to the source files: # # * An appropriate header is prepended to the file. # # * Any ... in the input is moved into the prepended # header. # # * An appropriate footer is appended. # # * Scripts within ... are evaluated. Output that # is emitted from these scripts by "hd_puts" or "hd_resolve" # procedures appears in place of the original script. # # * Hyperlinks within [...] are resolved. # # A two-pass algorithm is used. The first pass collects the names of # hyperlink targets, requirements text, and other global information. # The second pass uses the data gathered on the first pass to generate # the final output. # set DOC [lindex $argv 0] set SRC [lindex $argv 1] set DEST [lindex $argv 2] set HOMEDIR [pwd] ;# Also remember our home directory. # This is the first-pass implementation of procedure that renders # hyperlinks. Do not even bother trying to do anything during the # first pass. We have to collect keyword information before the # hyperlinks are meaningful. # proc hd_resolve {text} { hd_puts $text } # This is the second-pass implementation of the procedure that # renders hyperlinks. Convert all hyperlinks in $text into # appropriate markup. # # Links to keywords within the same main file are resolved using # $::llink() if possible. All other links and links that could # not be resolved using $::llink() are resolved using $::glink(). # proc hd_resolve_2ndpass {text} { regsub -all {\[(.*?)\]} $text \ "\175; hd_resolve_one \173\\1\175; hd_puts \173" text eval "hd_puts \173$text\175" } proc hd_resolve_one {x} { if {[string is integer $x]} { hd_puts \[$x\] return } set x2 [split $x |] set kw [string trim [lindex $x2 0]] if {[llength $x2]==1} { set content $kw regsub {\([^)]*\)} $content {} kw regsub {=.*} $kw {} kw regsub -all {[^a-zA-Z0-9_.# -]} $kw {} kw } else { regsub -all {[^a-zA-Z0-9_.# -]} $kw {} kw set content [string trim [lindex $x2 1]] } global hd llink glink if {$hd(enable-main)} { set fn $hd(fn-main) if {[regexp {^[Tt]icket #(\d+)$} $kw all tktid]} { set url http://www.sqlite.org/cvstrac/tktview?tn=$tktid puts -nonewline $hd(main) \ "$content" } elseif {[info exists llink($fn:$kw)]} { puts -nonewline $hd(main) \ "$content" } elseif {[info exists glink($kw)]} { puts -nonewline $hd(main) \ "$content" } else { puts stderr "ERROR: unknown hyperlink target: $kw" puts -nonewline $hd(main) "$content" } } if {$hd(enable-aux)} { if {[regexp {^[Tt]icket #(\d+)$} $kw all tktid]} { set url http://www.sqlite.org/cvstrac/tktview?tn=$tktid puts -nonewline $hd(aux) \ "$content" } elseif {[info exists glink($kw)]} { puts -nonewline $hd(aux) \ "$content" } else { puts stderr "ERROR: unknown hyperlink target: $kw" puts -nonewline $hd(aux) "$content" } } } # Record the fact that all keywords given in the argument list should # cause a jump to the current location in the current file. # # If only the main output file is open, then all references to the # keyword jump to the main output file. If both main and aux are # open then references in the main file jump to the main file and all # other references jump to the auxiliary file. # # This procedure is only active during the first pass when we are # collecting hyperlink information. This procedure is redefined to # be a no-op before the start of the second pass. # proc hd_keywords {args} { global glink llink hd if {$hd(fragment)==""} { set lurl $hd(fn-main) } else { set lurl "#$hd(fragment)" } set fn $hd(fn-main) if {[info exists hd(aux)]} { set gurl $hd(fn-aux) } else { set gurl {} if {$hd(fragment)!=""} { set lurl $hd(fn-main)#$hd(fragment) } } foreach a $args { if {[info exists glink($a)]} { puts stderr "WARNING: duplicate keyword \"$a\"" } if {$gurl==""} { set glink($a) $lurl } else { set glink($a) $gurl set llink($fn:$a) $lurl } } } # Start a new fragment in the main file. Give the new fragment the # indicated name. Any keywords defined after this point will refer # to the fragment, not to the beginning of the file. # # Only the main file may have fragments. Auxiliary files are assumed # to be small enough that fragments are not helpful. # proc hd_fragment {name args} { global hd set hd(fragment) $name puts $hd(main) "" eval hd_keywords $args } # Write raw output to both the main file and the auxiliary. Only write # to files that are enabled. # proc hd_puts {text} { global hd if {$hd(enable-main)} { puts $hd(main) $text } if {$hd(enable-aux)} { puts $hd(aux) $text } } # Enable or disable the main output file. # proc hd_enable_main {boolean} { global hd set hd(enable-main) $boolean } # Enable or disable the auxiliary output file. # proc hd_enable_aux {boolean} { global hd set hd(enable-aux) $boolean } set hd(enable-aux) 0 # Open the main output file. $filename is relative to $::DEST. # proc hd_open_main {filename} { global hd DEST hd_close_main set hd(fn-main) $filename set hd(rootpath-main) [hd_rootpath $filename] set hd(main) [open $DEST/$filename w] set hd(enable-main) 1 set hd(fragment) {} } # If $filename is a path from $::DEST to a file, return a path # from the directory containing $filename back to the directory $::DEST. # proc hd_rootpath {filename} { set up {} set n [llength [split $filename /]] if {$n<=1} { return {} } else { return [string repeat ../ [expr {$n-1}]] } } # Close the main output file. # proc hd_close_main {} { global hd hd_close_aux if {[info exists hd(main)]} { puts $hd(main) $hd(footer) close $hd(main) unset hd(main) } } # Open the auxiliary output file. # # Most documents have only a main file and no auxiliary. However, some # large documents are broken up into smaller pieces where each smaller piece # is an auxiliary file. There will typically be either many auxiliary files # or no auxiliary files associated with each main file. # proc hd_open_aux {filename} { global hd DEST hd_close_aux set hd(fn-aux) $filename set hd(rootpath-aux) [hd_rootpath $filename] set hd(aux) [open $DEST/$filename w] set hd(enable-aux) 1 } # Close the auxiliary output file # proc hd_close_aux {} { global hd if {[info exists hd(aux)]} { puts $hd(aux) $hd(footer) close $hd(aux) unset hd(aux) set hd(enable-aux) 0 set hd(enable-main) 1 } } # hd_putsin4 is like puts except that it removes the first 4 indentation # characters from each line. It also does variable substitution in # the namespace of its calling procedure. # proc putsin4 {fd text} { regsub -all "\n " $text \n text puts $fd [uplevel 1 [list subst -noback -nocom $text]] } # A procedure to write the common header found on every HTML file on # the SQLite website. # proc hd_header {title {srcfile {}}} { global hd set saved_enable $hd(enable-main) if {$srcfile==""} { set fd $hd(aux) set path $hd(rootpath-aux) } else { set fd $hd(main) set path $hd(rootpath-main) } puts $fd {} puts $fd {} puts $fd "$title" putsin4 $fd { } puts $fd {} putsin4 $fd {
Small. Fast. Reliable.
Choose any three.
} if {[file exists DRAFT]} { putsin4 $fd {

*** DRAFT ***

} } if {$srcfile!=""} { if {[file exists DRAFT]} { set hd(footer) {

*** DRAFT ***

} } else { set hd(footer) {} } append hd(footer) "
\n" set mtime [file mtime $srcfile] set date [clock format $mtime -format {%Y/%m/%d %H:%M:%S UTC} -gmt 1] append hd(footer) "This page last modified $date\n" append hd(footer) "
" } else { set hd(enable-main) $saved_enable } } # The following proc is used to ensure consistent formatting in the # HTML generated by lang.tcl and pragma.tcl. # proc Syntax {args} { hd_puts {} foreach {rule body} $args { hd_puts "" regsub -all < $body {%LT} body regsub -all > $body {%GT} body regsub -all %LT $body {} body regsub -all %GT $body {} body regsub -all {[]|[*?]} $body {&} body regsub -all "\n" [string trim $body] "
\n" body regsub -all "\n *" $body "\n\\ \\ \\ \\ " body regsub -all {[|,.*()]} $body {&} body regsub -all { = } $body { = } body regsub -all {STAR} $body {*} body ## These metacharacters must be handled to undo being ## treated as SQL punctuation characters above. regsub -all {RPPLUS} $body {
)+} body regsub -all {LP} $body {(} body regsub -all {RP} $body {)} body ## Place the left-hand side of the rule in the 2nd table column. hd_puts "" } hd_puts {
" hd_puts "$rule ::=$body
} } # Record a requirement. This procedure is active only for the first # pass. This procedure becomes a no-op for the second pass. During # the second pass, requirements listing report generators can use the # data accumulated during the first pass to construct their reports. # # If the "verbatim" argument is true, then the requirement text is # rendered as is. In other words, the requirement text is assumed to # be valid HTML with all hyperlinks already resolved. If the "verbatim" # argument is false (the default) then the requirement text is rendered # using hd_render which will find an expand hyperlinks within the text. # proc hd_requirement {id text {verbatim 0}} { global ALLREQ ALLREQ_VERBATIM ALLREQ_DERIVEDFROM if {[info exists ALLREQ($id)]} { puts stderr "duplicate requirement label: $id" } set ALLREQ_DERIVEDFROM($id) {} while {[regexp {<([AHLS]\d\d\d\d\d)>} $text all df]} { regsub {<[AHLS]\d\d\d\d\d> *} $text {} text lappend ALLREQ_DERIVEDFROM($id) $df } set ALLREQ($id) $text set ALLREQ_VERBATIM($id) $verbatim } # First pass. Process all files. But do not render hyperlinks. # Merely collect keyword information so that hyperlinks can be # correctly rendered on the second pass. # foreach infile [lrange $argv 3 end] { cd $HOMEDIR puts "Processing $infile" set fd [open $infile r] set in [read $fd] close $fd set title {No Title} regexp {([^\n]*)} $in all title regsub {[^\n]*} $in {} in set outfile [file root [file tail $infile]].html hd_open_main $outfile hd_header $title $infile regsub -all {} $in "\175; eval \173" in regsub -all {} $in "\175; hd_puts \173" in eval "hd_puts \173$in\175" cd $::HOMEDIR hd_close_main } # Second pass. Process all files again. This time render hyperlinks # according to the keyword information collected on the first pass. # proc hd_keywords {args} {} rename hd_resolve {} rename hd_resolve_2ndpass hd_resolve proc hd_requirement {args} {} foreach infile [lrange $argv 3 end] { cd $HOMEDIR puts "Processing $infile" set fd [open $infile r] set in [read $fd] close $fd set title {No Title} regexp {([^\n]*)} $in all title regsub {[^\n]*} $in {} in set outfile [file root [file tail $infile]].html hd_open_main $outfile hd_header $title $infile regsub -all {} $in "\175; eval \173" in regsub -all {} $in "\175; hd_resolve \173" in eval "hd_resolve \173$in\175" cd $::HOMEDIR hd_close_main } # Generate a document showing the hyperlink keywords and their # targets. # hd_open_main doc_keyword_crossref.html hd_header {Hyperlink Crossreference} $DOC/wrap.tcl hd_puts "
" hd_close_main