Documentation Source Text

Check-in [273baf47db]
Login

Many hyperlinks are disabled.
Use anonymous login to enable hyperlinks.

Overview
Comment:Updates to support the requirements derivation matrix.
Downloads: Tarball | ZIP archive
Timelines: family | ancestors | descendants | both | trunk
Files: files | file ages | folders
SHA1: 273baf47dbfc0a861407dea71af0bd601df4417b
User & Date: drh 2008-07-22 18:46:20.000
Context
2008-07-22
18:46
Added the requirements derivation matrix report. (check-in: fd2d3b3be4 user: drh tags: trunk)
18:46
Updates to support the requirements derivation matrix. (check-in: 273baf47db user: drh tags: trunk)
17:40
Change the requirement numbers used in fileformat.in and fileio.in to start with "H2". (check-in: c1bf78dd4f user: dan tags: trunk)
Changes
Unified Diff Ignore Whitespace Patch
Changes to pages/capi3ref.in.
17
18
19
20
21
22
23
24
25
26
27
28

29
30
31
32
33
34
35
set rowbody {}     ;# Content of a row
set rowtag {}      ;# 
unset -nocomplain keyword

# End a table row or the complete table.
#
proc endrow {} {
  global inrow body rowbody rowtag
  if {$inrow} {
    set rowbody [string trim $rowbody]
    append body $rowbody</td></tr>\n
    hd_requirement $rowtag $rowbody

    set inrow 0
    set rowbody {}
    set rowtag {}
  }
}
proc endtab {} {
  global intab body







|




>







17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
set rowbody {}     ;# Content of a row
set rowtag {}      ;# 
unset -nocomplain keyword

# End a table row or the complete table.
#
proc endrow {} {
  global inrow body rowbody rowtag keyword
  if {$inrow} {
    set rowbody [string trim $rowbody]
    append body $rowbody</td></tr>\n
    hd_requirement $rowtag $rowbody
    set keyword($rowtag) 1
    set inrow 0
    set rowbody {}
    set rowtag {}
  }
}
proc endtab {} {
  global intab body
130
131
132
133
134
135
136










137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
    }
  } elseif {$phase==3} {
    # Reading in an interface definition.  Stop reading at the first blank
    # line.
    #
    regsub {^SQLITE_API } $line {} line
    if {$line==""} {










      set kwlist [lsort [array names keyword]]
      unset -nocomplain keyword
      set key $type:$kwlist
      set reqtag {}
      regexp {\{(.*)\}} $title all reqtag
      regsub { *\{[\w.]+\}} $title {} title
      regsub -all { *\{[\w.]+\}} $body {} body
      set body [string map \
          {<todo> {<font color="red">(TODO: } </todo> )</font>} $body]
      set code [string map {& &amp; < &lt; > &gt;} $code]
      lappend content [list $key $title $type $kwlist $body $code]
      if {$reqtag!=""} {
        set reqbody "The sqlite3.h header file shall define the\n"
        append reqbody "the following interfaces:\n<blockquote><pre>\n"
        append reqbody $code
        append reqbody "\n</pre></blockquote>"
        hd_requirement $reqtag $reqbody 1
      }
      set title {}
      set keywords {}
      set type {}
      set body {}
      set code {}
      set phase 0







>
>
>
>
>
>
>
>
>
>



<
<
<










|







131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150



151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
    }
  } elseif {$phase==3} {
    # Reading in an interface definition.  Stop reading at the first blank
    # line.
    #
    regsub {^SQLITE_API } $line {} line
    if {$line==""} {
      set reqtag {}
      set reqdf {}
      if {[regexp {\{([AHLS]\d\d\d\d\d)\}} $title all reqtag]} {
        regsub { *\{[AHLS]\d\d\d\d\d\}} $title {} title
        while {[regexp {<([AHLS]\d\d\d\d\d)>} $title all df]} {
          append reqdf <$df>
          regsub { *<[AHLS]\d\d\d\d\d>} $title {} title
        }
        set keyword($reqtag) 1
      }
      set kwlist [lsort [array names keyword]]
      unset -nocomplain keyword
      set key $type:$kwlist



      regsub -all { *\{[\w.]+\}} $body {} body
      set body [string map \
          {<todo> {<font color="red">(TODO: } </todo> )</font>} $body]
      set code [string map {& &amp; < &lt; > &gt;} $code]
      lappend content [list $key $title $type $kwlist $body $code]
      if {$reqtag!=""} {
        set reqbody "The sqlite3.h header file shall define the\n"
        append reqbody "the following interfaces:\n<blockquote><pre>\n"
        append reqbody $code
        append reqbody "\n</pre></blockquote>"
        hd_requirement $reqtag $reqbody$reqdf 1
      }
      set title {}
      set keywords {}
      set type {}
      set body {}
      set code {}
      set phase 0
Changes to pages/sysreq.in.
1
2
3
4
5
6
7





8
9
10
11
12
13
14
<title>SQLite System Requirements</title>
<tcl>
unset -nocomplain sysreq_list
proc sysreq {id derivedfrom explaination text} {
  global sysreq_list
  lappend sysreq_list $id $derivedfrom $text
  hd_fragment $id $id





  if {[string length $explaination]} {
    hd_resolve "<p>$explaination</p>"
  }
  hd_puts "<blockquote><b>$id:</b>"
  hd_resolve $text
  hd_puts {</b></blockquote}
}







>
>
>
>
>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
<title>SQLite System Requirements</title>
<tcl>
unset -nocomplain sysreq_list
proc sysreq {id derivedfrom explaination text} {
  global sysreq_list
  lappend sysreq_list $id $derivedfrom $text
  hd_fragment $id $id
  set dlist {}
  foreach d $derivedfrom {
    append dlist <$d>
  }
  hd_requirement $id $text$dlist
  if {[string length $explaination]} {
    hd_resolve "<p>$explaination</p>"
  }
  hd_puts "<blockquote><b>$id:</b>"
  hd_resolve $text
  hd_puts {</b></blockquote}
}
Changes to wrap.tcl.
1
2
3


4
5
6
7
8
9
10
11
12
13
14
15
16

17
18

19

20
21
22

23
24



25
26
27
28
29
30
31
32
#!/usr/bin/tclsh
#
# This script processes raw page text into its final form for display.


# Invoke this command as follows:
#
#       tclsh wrap.tcl $(DOC) $(SRC) $(DEST) source1.in source2.in ...
#
# The $(DOC) and $(SRC) values are the names of directories containing
# the documentation source and program source.  $(DEST) is the name of
# of the directory where generated HTML is written.  sourceN.in is the
# input file to be processed.  The output is sourceN.html in the
# local directory.
#
# Changes made to the source files:
#
#     *  An appropriate header is prepended to the file.  

#     *  Any <title>...</title> in the input is moved into the prepended
#        header.

#     *  An appropriate footer is appended.

#     *  Scripts within <tcl>...</tcl> are evaluated.  Output that
#        is emitted from these scripts by "puts" appears in place of
#        the original script.

#     *  Hyperlinks within [...] are resolved.
#



# 
#
set DOC [lindex $argv 0]
set SRC [lindex $argv 1]
set DEST [lindex $argv 2]
set HOMEDIR [pwd]            ;# Also remember our home directory.

# This is the first-pass implementation of procedure that renders


|
>
>













>


>

>

|
|
>


>
>
>
|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
#!/usr/bin/tclsh
#
# This script processes raw documentation source text into its final form 
# for display.  The processing actions are described below.
#
# Invoke this command as follows:
#
#       tclsh wrap.tcl $(DOC) $(SRC) $(DEST) source1.in source2.in ...
#
# The $(DOC) and $(SRC) values are the names of directories containing
# the documentation source and program source.  $(DEST) is the name of
# of the directory where generated HTML is written.  sourceN.in is the
# input file to be processed.  The output is sourceN.html in the
# local directory.
#
# Changes made to the source files:
#
#     *  An appropriate header is prepended to the file.  
#
#     *  Any <title>...</title> in the input is moved into the prepended
#        header.
#
#     *  An appropriate footer is appended.
#
#     *  Scripts within <tcl>...</tcl> are evaluated.  Output that
#        is emitted from these scripts by "hd_puts" or "hd_resolve"
#        procedures appears in place of the original script.
#
#     *  Hyperlinks within [...] are resolved.
#
# A two-pass algorithm is used.  The first pass collects the names of
# hyperlink targets, requirements text, and other global information.
# The second pass uses the data gathered on the first pass to generate
# the final output.
#
set DOC [lindex $argv 0]
set SRC [lindex $argv 1]
set DEST [lindex $argv 2]
set HOMEDIR [pwd]            ;# Also remember our home directory.

# This is the first-pass implementation of procedure that renders
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
      puts -nonewline $hd(aux) "<font color=\"red\">$content</font>"
    }
  }
}



# Record the fact that the keywords given in the argument list should
# cause a jump to the current location in the current file.
#
# If only the main output file is open, then all references to the
# keyword jump to the main output file.  If both main and aux are
# open then references in the main file jump to the main file and all
# other references jump to the auxiliary file.
#







|







107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
      puts -nonewline $hd(aux) "<font color=\"red\">$content</font>"
    }
  }
}



# Record the fact that all keywords given in the argument list should
# cause a jump to the current location in the current file.
#
# If only the main output file is open, then all references to the
# keyword jump to the main output file.  If both main and aux are
# open then references in the main file jump to the main file and all
# other references jump to the auxiliary file.
#
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
    unset hd(main)
  }
}

# Open the auxiliary output file.
#
# Most documents have only a main file and no auxiliary.  However, some
# large documents are broken up into smaller pieces were each smaller piece
# is an auxiliary file.  There will typically be either many auxiliary files
# or no auxiliary files associated with each main file.
#
proc hd_open_aux {filename} {
  global hd DEST
  hd_close_aux
  set hd(fn-aux) $filename







|







230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
    unset hd(main)
  }
}

# Open the auxiliary output file.
#
# Most documents have only a main file and no auxiliary.  However, some
# large documents are broken up into smaller pieces where each smaller piece
# is an auxiliary file.  There will typically be either many auxiliary files
# or no auxiliary files associated with each main file.
#
proc hd_open_aux {filename} {
  global hd DEST
  hd_close_aux
  set hd(fn-aux) $filename
399
400
401
402
403
404
405






406
407
408
409
410





411
412
413
414
415
416
417
}

# Record a requirement.  This procedure is active only for the first
# pass.  This procedure becomes a no-op for the second pass.  During
# the second pass, requirements listing report generators can use the
# data accumulated during the first pass to construct their reports.
#






proc hd_requirement {id text {verbatim 0}} {
  global ALLREQ ALLREQ_VERBATIM
  if {[info exists ALLREQ($id)]} {
    puts stderr "duplicate requirement label: $id"
  }





  set ALLREQ($id) $text
  set ALLREQ_VERBATIM($id) $verbatim
}


# First pass.  Process all files.  But do not render hyperlinks.
# Merely collect keyword information so that hyperlinks can be







>
>
>
>
>
>

|



>
>
>
>
>







408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
}

# Record a requirement.  This procedure is active only for the first
# pass.  This procedure becomes a no-op for the second pass.  During
# the second pass, requirements listing report generators can use the
# data accumulated during the first pass to construct their reports.
#
# If the "verbatim" argument is true, then the requirement text is
# rendered as is.  In other words, the requirement text is assumed to
# be valid HTML with all hyperlinks already resolved.  If the "verbatim"
# argument is false (the default) then the requirement text is rendered
# using hd_render which will find an expand hyperlinks within the text.
#
proc hd_requirement {id text {verbatim 0}} {
  global ALLREQ ALLREQ_VERBATIM ALLREQ_DERIVEDFROM
  if {[info exists ALLREQ($id)]} {
    puts stderr "duplicate requirement label: $id"
  }
  set ALLREQ_DERIVEDFROM($id) {}
  while {[regexp {<([AHLS]\d\d\d\d\d)>} $text all df]} {
    regsub {<[AHLS]\d\d\d\d\d> *} $text {} text
    lappend ALLREQ_DERIVEDFROM($id) $df
  }
  set ALLREQ($id) $text
  set ALLREQ_VERBATIM($id) $verbatim
}


# First pass.  Process all files.  But do not render hyperlinks.
# Merely collect keyword information so that hyperlinks can be