diff options
author | Chris Luke <chrisy@flirble.org> | 2016-07-25 16:38:11 -0400 |
---|---|---|
committer | Dave Barach <openvpp@barachs.net> | 2016-08-31 12:56:56 +0000 |
commit | 54ccf2261cb1f4afd966b7b1e92689183cb17836 (patch) | |
tree | 3f1aee6c70c4c5983828d1f53a2c46dd9bb28f8c /doxygen | |
parent | d85590a00421a73f019a91c6c3cdd05b6b73f414 (diff) |
VPP-221 CLI auto-documentation infrastructure
As a step before Doxygen, extract CLI-related struct initializers
from the code and parse that into a summary of the CLI commands
available with the provided help text, such as it is. At the moment
this only renders this into an indexed Markdown file that Doxygen
then picks up but later we can use this information to enrich the
existing VLIB_CLI_COMMAND macro documentor as well as provide
runtime documentation to VPP that is stored on disk outside the
binary image.
Additionally support a comment block immediately prior to
VLIB_CLI_COMMAND CLI command definitions in the form /*? ... ?*/
that can be used to include long-form documentation without having
it compiled into VPP.
Examples of documenting CLI commands can be found in
vlib/vlib/unix/cli.c which, whilst not perfect, should provide a
starting point. Screen captures of sample output can be seen at
https://chrisy.flirble.org/vpp/doxy-cli-example.png and
https://chrisy.flirble.org/vpp/doxy-cli-index.png .
Next, shift the Doxygen root makefile targets to their own Makefile.
The primary reason for this is that the siphon targets do dependency
tracking which means it needs to generate those dependencies whenever
make is run; that is pointless if we're not going to generate any
documentation. This includes the package dependencies since they since
they sometimes unnecessarily interfere with the code build in some cases
at the moment; later we will look to building a Python venv to host the
Python modules we use.
One final remark: In future we may consider deprecating .long_help
in the VLIB_CLI_COMMAND structure entirely but add perhaps .usage_help.
.short_help would be reserved for a summary of the command function
and .usage_help provide the syntax of that command. These changes would
provide great semantic value to the automaticly generated CLI
documentation. I could also see having .long_help replaced by a
mechanism that reads it from disk at runtime with a rudimentary
Markdown/Doxygen filter so that we can use the same text that is used in
the published documentation.
Change-Id: I80d6fe349b47dce649fa77d21ffec0ddb45c7bbf
Signed-off-by: Chris Luke <chrisy@flirble.org>
Diffstat (limited to 'doxygen')
-rw-r--r-- | doxygen/Makefile | 122 | ||||
-rw-r--r-- | doxygen/dir.dox.sample | 3 | ||||
-rw-r--r-- | doxygen/doxygen.cfg | 13 | ||||
-rwxr-xr-x | doxygen/filter_c.py | 24 | ||||
-rwxr-xr-x | doxygen/siphon_generate.py | 313 | ||||
-rwxr-xr-x | doxygen/siphon_process.py | 323 |
6 files changed, 793 insertions, 5 deletions
diff --git a/doxygen/Makefile b/doxygen/Makefile new file mode 100644 index 00000000000..7bdc8ee9736 --- /dev/null +++ b/doxygen/Makefile @@ -0,0 +1,122 @@ +# Copyright (c) 2016 Comcast Cable Communications Management, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Build the documentation +# + +# These should be passed in by the root Makefile +WS_ROOT ?= $(CURDIR)/.. +BR ?= $(WS_ROOT)/build-root + +# Package dependencies +DOC_DEB_DEPENDS = doxygen graphviz python-pyparsing +DOC_RPM_DEPENDS = doxygen graphviz pyparsing + +# Doxygen configuration and our utility scripts +DOXY_DIR ?= $(WS_ROOT)/doxygen + +# Input directories and files +DOXY_INPUT ?= \ + README.md \ + vppinfra \ + svm \ + vlib \ + vlib-api \ + vnet \ + vpp \ + vpp-api \ + plugins + +# Target directory for doxygen output +DOXY_OUTPUT ?= $(BR)/docs + +# Siphoned fragments end up in here +SIPHON_INPUT ?= $(DOXY_OUTPUT)/siphon_fragments + +# Siphoned fragements are processed into here +SIPHON_OUTPUT ?= $(DOXY_OUTPUT)/siphon_docs + +# Extra document inputs that are processed in addition to DOXY_INPUT +EXTRA_DOXY_INPUT += $(SIPHON_OUTPUT) + +# All the siphon types we know about +SIPHONS ?= clicmd + +SIPHON_FILES = $(addprefix $(SIPHON_INPUT)/,$(addsuffix .siphon,$(SIPHONS))) +SIPHON_DOCS = $(addprefix $(SIPHON_OUTPUT)/,$(addsuffix .md,$(SIPHONS))) + +$(BR)/.doxygen-bootstrap.ok: +ifeq ($(OS_ID),ubuntu) + @sudo -E apt-get $(CONFIRM) $(FORCE) install $(DOC_DEB_DEPENDS) +else ifneq ("$(wildcard /etc/redhat-release)","") + @sudo yum install $(CONFIRM) $(DOC_RPM_DEPENDS) +else + $(error "This option currently works only on Ubuntu or Centos systems") +endif + @touch $@ + +.PHONY: bootstrap-doxygen +bootstrap-doxygen: $(BR)/.doxygen-bootstrap.ok + +.DELETE_ON_ERROR: $(BR)/.doxygen-siphon.dep +$(BR)/.doxygen-siphon.dep: Makefile + set -e; rm -f "$@"; for input in $(DOXY_INPUT); do \ + find "$(WS_ROOT)/$$input" -type f \ + \( -name '*.[ch]' -or -name '*.dox' \) \ + -print | sed -e "s/^/\$$(SIPHON_FILES): /" >> $@; \ + done + +# Include the source -> siphon dependencies +-include $(BR)/.doxygen-siphon.dep + +.NOTPARALLEL: $(SIPHON_FILES) +$(SIPHON_FILES): $(DOXY_DIR)/siphon_generate.py $(BR)/.doxygen-bootstrap.ok + @rm -rf "$(SIPHON_INPUT)" "$(SIPHON_OUTPUT)" + @mkdir -p "$(SIPHON_INPUT)" "$(SIPHON_OUTPUT)" + set -e; for input in $(DOXY_INPUT); do \ + cd "$(WS_ROOT)"; \ + find "$$input" -type f \ + \( -name '*.[ch]' -or -name '*.dox' \) \ + -print >> $(SIPHON_INPUT)/files; \ + done + set -e; cd "$(WS_ROOT)"; $(DOXY_DIR)/siphon_generate.py \ + --output="$(SIPHON_INPUT)" \ + "@$(SIPHON_INPUT)/files" + + +.DELETE_ON_ERROR: $(SIPHON_DOCS) +$(SIPHON_OUTPUT)/%.md: $(SIPHON_INPUT)/%.siphon $(DOXY_DIR)/siphon_process.py + set -e; cd "$(WS_ROOT)"; \ + $(DOXY_DIR)/siphon_process.py --type=$(basename $(notdir $<)) \ + --output="$(SIPHON_OUTPUT)" $< > $@ + +# This target can be used just to generate the siphoned docs +.PHONY: doxygen-siphon +doxygen-siphon: $(SIPHON_DOCS) + +# Generate the doxygen docs +doxygen: $(SIPHON_DOCS) + @mkdir -p "$(DOXY_OUTPUT)" + set -e; cd "$(WS_ROOT)"; \ + ROOT="$(WS_ROOT)" \ + BUILD_ROOT="$(BR)" \ + INPUT="$(addprefix $(WS_ROOT)/,$(DOXY_INPUT)) $(EXTRA_DOXY_INPUT)" \ + HTML=YES \ + VERSION="`git describe --tags --dirty`" \ + doxygen $(DOXY_DIR)/doxygen.cfg + +wipe-doxygen: + rm -rf "$(BR)/docs" "$(BR)/.doxygen-siphon.d" + diff --git a/doxygen/dir.dox.sample b/doxygen/dir.dox.sample index 41e84d7bee4..500fe595ae5 100644 --- a/doxygen/dir.dox.sample +++ b/doxygen/dir.dox.sample @@ -18,7 +18,7 @@ /** @dir -@brief Someone please fix this description +@brief Someone please fix this description. @todo This directory needs a description. This is where you would document the contents of a directory. @@ -26,3 +26,4 @@ This is where you would document the contents of a directory. This looks like a C file but it is not part of the build; it is purely for documentation. */ +/*? %%clicmd:group_label CLI section description%% ?*/ diff --git a/doxygen/doxygen.cfg b/doxygen/doxygen.cfg index 0eadbd7397c..82687cac42c 100644 --- a/doxygen/doxygen.cfg +++ b/doxygen/doxygen.cfg @@ -229,8 +229,21 @@ TAB_SIZE = 8 # newlines. ALIASES = + +## Indexes VPP graph nodes ALIASES += "node=@xrefitem nodes \"Node Identifier\" \"Node Identifiers\" @c " +## Formatting for CLI commands and output +ALIASES += "clistart=<code><pre>" +ALIASES += "cliend=</pre></code>" + +## Formatting for CLI example paragraphs +ALIASES += "cliexpar=@par Example usage" +ALIASES += "cliexcmd{1}=@clistart<b>vpp# <em>\1</em></b>@cliend" +ALIASES += "cliexstart{1}=@cliexcmd{\1}@clistart" +ALIASES += "cliexend=@cliend" + + # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding "class=itcl::class" # will allow you to use the command class in the itcl::class meaning. diff --git a/doxygen/filter_c.py b/doxygen/filter_c.py index db1681c9450..733fdefb3d4 100755 --- a/doxygen/filter_c.py +++ b/doxygen/filter_c.py @@ -15,13 +15,13 @@ # Filter for .c files to make various preprocessor tricks Doxygenish -import sys, re +import os, sys, re if len(sys.argv) < 2: sys.stderr.write("Usage: %s <filename>\n" % (sys.argv[0])) sys.exit(1) -patterns = [ +replace_patterns = [ # Search for VLIB_CLI_COMMAND, extract its parameter and add a docblock for it ( re.compile("(?P<m>VLIB_CLI_COMMAND)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g<m> (\g<name>) */ vlib_cli_command_t \g<name>"), @@ -44,9 +44,25 @@ patterns = [ ( re.compile("(?P<pre>(^|,)\s*)(?P<name>(un)?format_[a-zA-Z0-9_]+)(?P<post>\s*(,|$))") , r"\g<pre>\g<name>()\g<post>" ), ] -with open(sys.argv[1]) as fd: + +filename = sys.argv[1] +cwd = os.getcwd() +if filename[0:len(cwd)] == cwd: + filename = filename[len(cwd):] + if filename[0] == "/": + filename = filename[1:] + +with open(filename) as fd: + line_num = 0 + for line in fd: + line_num += 1 str = line[:-1] # filter \n - for p in patterns: + + # Look for search/replace patterns + for p in replace_patterns: str = p[0].sub(p[1], str) + sys.stdout.write(str+"\n") + +# All done diff --git a/doxygen/siphon_generate.py b/doxygen/siphon_generate.py new file mode 100755 index 00000000000..457757b510c --- /dev/null +++ b/doxygen/siphon_generate.py @@ -0,0 +1,313 @@ +#!/usr/bin/env python +# Copyright (c) 2016 Comcast Cable Communications Management, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Looks for preprocessor macros with struct initializers and siphons them +# off into another file for later parsing; ostensibly to generate +# documentation from struct initializer data. + +import os, sys, re, argparse, json + +DEFAULT_OUTPUT = "build-root/docs/siphons" +DEFAULT_PREFIX = os.getcwd() + +ap = argparse.ArgumentParser() +ap.add_argument("--output", '-o', metavar="directory", default=DEFAULT_OUTPUT, + help="Output directory for .siphon files [%s]" % DEFAULT_OUTPUT) +ap.add_argument("--input-prefix", metavar="path", default=DEFAULT_PREFIX, + help="Prefix to strip from input pathnames [%s]" % DEFAULT_PREFIX) +ap.add_argument("input", nargs='+', metavar="input_file", + help="Input C source files") +args = ap.parse_args() + +"""Patterns that match the start of code blocks we want to siphon""" +siphon_patterns = [ + ( re.compile("(?P<m>VLIB_CLI_COMMAND)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"), "clicmd" ), +] + +"""Matches a siphon comment block start""" +siphon_block_start = re.compile("^\s*/\*\?\s*(.*)$") + +"""Matches a siphon comment block stop""" +siphon_block_stop = re.compile("^(.*)\s*\?\*/\s*$") + +"""Siphon block directive delimiter""" +siphon_block_delimiter = "%%" + +"""Matches a siphon block directive such as '%clicmd:group_label Debug CLI%'""" +siphon_block_directive = re.compile("(%s)\s*([a-zA-Z0-9_:]+)\s+(.*)\s*(%s)" % \ + (siphon_block_delimiter, siphon_block_delimiter)) + +"""Matches the start of an initializer block""" +siphon_initializer = re.compile("\s*=") + +""" +count open and close braces in str +return (0, index) when braces were found and count becomes 0. +index indicates the position at which the last closing brace was +found. +return (-1, -1) if a closing brace is found before any opening one. +return (count, -1) if not all opening braces are closed, count is the +current depth +""" +def count_braces(str, count=0, found=False): + for index in range(0, len(str)): + if str[index] == '{': + count += 1; + found = True + elif str[index] == '}': + if count == 0: + # means we never found an open brace + return (-1, -1) + count -= 1; + + if count == 0 and found: + return (count, index) + + return (count, -1) + +# Collated output for each siphon +output = {} + +# Pre-process file names in case they indicate a file with +# a list of files +files = [] +for filename in args.input: + if filename.startswith('@'): + with open(filename[1:], 'r') as fp: + lines = fp.readlines() + for line in lines: + files.append(line.strip()) + lines = None + else: + files.append(filename) + +# Iterate all the input files we've been given +for filename in files: + # Strip the current directory off the start of the + # filename for brevity + if filename[0:len(args.input_prefix)] == args.input_prefix: + filename = filename[len(args.input_prefix):] + if filename[0] == "/": + filename = filename[1:] + + # Work out the abbreviated directory name + directory = os.path.dirname(filename) + if directory[0:2] == "./": + directory = directory[2:] + elif directory[0:len(args.input_prefix)] == args.input_prefix: + directory = directory[len(args.input_prefix):] + if directory[0] == "/": + directory = directory[1:] + + # Open the file and explore its contents... + sys.stderr.write("Siphoning from %s...\n" % filename) + directives = {} + with open(filename) as fd: + siphon = None + close_siphon = None + siphon_block = "" + in_block = False + line_num = 0 + siphon_line = 0 + + for line in fd: + line_num += 1 + str = line[:-1] # filter \n + + """See if there is a block directive and if so extract it""" + def process_block_directive(str, directives): + m = siphon_block_directive.search(str) + if m is not None: + k = m.group(2) + v = m.group(3).strip() + directives[k] = v + # Return only the parts we did not match + return str[0:m.start(1)] + str[m.end(4):] + + return str + + def process_block_prefix(str): + if str.startswith(" * "): + str = str[3:] + elif str == " *": + str = "" + return str + + if not in_block: + # See if the line contains the start of a siphon doc block + m = siphon_block_start.search(str) + if m is not None: + in_block = True + t = m.group(1) + + # Now check if the block closes on the same line + m = siphon_block_stop.search(t) + if m is not None: + t = m.group(1) + in_block = False + + # Check for directives + t = process_block_directive(t, directives) + + # Filter for normal comment prefixes + t = process_block_prefix(t) + + # Add what is left + siphon_block += t + + # Skip to next line + continue + + else: + # Check to see if we have an end block marker + m = siphon_block_stop.search(str) + if m is not None: + in_block = False + t = m.group(1) + else: + t = str + + # Check for directives + t = process_block_directive(t, directives) + + # Filter for normal comment prefixes + t = process_block_prefix(t) + + # Add what is left + siphon_block += t + "\n" + + # Skip to next line + continue + + + if siphon is None: + # Look for blocks we need to siphon + for p in siphon_patterns: + if p[0].match(str): + siphon = [ p[1], str + "\n", 0 ] + siphon_line = line_num + + # see if we have an initializer + m = siphon_initializer.search(str) + if m is not None: + # count the braces on this line + (count, index) = count_braces(str[m.start():]) + siphon[2] = count + # TODO - it's possible we have the initializer all on the first line + # we should check for it, but also account for the possibility that + # the open brace is on the next line + #if count == 0: + # # braces balanced + # close_siphon = siphon + # siphon = None + else: + # no initializer: close the siphon right now + close_siphon = siphon + siphon = None + else: + # See if we should end the siphon here - do we have balanced + # braces? + (count, index) = count_braces(str, count=siphon[2], found=True) + if count == 0: + # braces balanced - add the substring and close the siphon + siphon[1] += str[:index+1] + ";\n" + close_siphon = siphon + siphon = None + else: + # add the whole string, move on + siphon[2] = count + siphon[1] += str + "\n" + + if close_siphon is not None: + # Write the siphoned contents to the right place + siphon_name = close_siphon[0] + if siphon_name not in output: + output[siphon_name] = { + "global": {}, + "items": [], + "file": "%s/%s.siphon" % (args.output, close_siphon[0]) + } + + # Copy directives for the file + details = {} + for key in directives: + if ":" in key: + (sn, label) = key.split(":") + if sn == siphon_name: + details[label] = directives[key] + else: + details[key] = directives[key] + + # Copy details for this block + details['file'] = filename + details['line_start'] = siphon_line + details['line_end'] = line_num + details['siphon_block'] = siphon_block.strip() + + # Some defaults + if "group" not in details: + if "group_label" in details: + # use the filename since group labels are mostly of file scope + details['group'] = details['file'] + else: + details['group'] = directory + + if "group_label" not in details: + details['group_label'] = details['group'] + + details["block"] = close_siphon[1] + + # Store the item + output[siphon_name]['items'].append(details) + + # All done + close_siphon = None + siphon_block = "" + + # Update globals + for key in directives.keys(): + if ':' not in key: + continue + + if filename.endswith("/dir.dox"): + # very special! use the parent directory name + l = directory + else: + l = filename + + (sn, label) = key.split(":") + + if sn not in output: + output[sn] = {} + if 'global' not in output[sn]: + output[sn]['global'] = {} + if l not in output[sn]['global']: + output[sn]['global'][l] = {} + if 'file' not in output[sn]: + output[sn]['file'] = "%s/%s.siphon" % (args.output, sn) + if 'items' not in output[sn]: + output[sn]['items'] = [] + + output[sn]['global'][l][label] = directives[key] + + +# Write out the data +for siphon in output.keys(): + sys.stderr.write("Saving siphon %s...\n" % siphon) + s = output[siphon] + with open(s['file'], "a") as fp: + json.dump(s, fp, separators=(',', ': '), indent=4, sort_keys=True) + +# All done diff --git a/doxygen/siphon_process.py b/doxygen/siphon_process.py new file mode 100755 index 00000000000..80add4b9a44 --- /dev/null +++ b/doxygen/siphon_process.py @@ -0,0 +1,323 @@ +#!/usr/bin/env python +# Copyright (c) 2016 Comcast Cable Communications Management, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Filter for .siphon files that are generated by other filters. +# The idea is to siphon off certain initializers so that we can better +# auto-document the contents of that initializer. + +import os, sys, re, argparse, cgi, json +import pyparsing as pp + +import pprint + +DEFAULT_SIPHON ="clicmd" +DEFAULT_OUTPUT = None +DEFAULT_PREFIX = os.getcwd() + +siphon_map = { + 'clicmd': "VLIB_CLI_COMMAND", +} + +ap = argparse.ArgumentParser() +ap.add_argument("--type", '-t', metavar="siphon_type", default=DEFAULT_SIPHON, + choices=siphon_map.keys(), + help="Siphon type to process [%s]" % DEFAULT_SIPHON) +ap.add_argument("--output", '-o', metavar="directory", default=DEFAULT_OUTPUT, + help="Output directory for .md files [%s]" % DEFAULT_OUTPUT) +ap.add_argument("--input-prefix", metavar="path", default=DEFAULT_PREFIX, + help="Prefix to strip from input pathnames [%s]" % DEFAULT_PREFIX) +ap.add_argument("input", nargs='+', metavar="input_file", + help="Input .siphon files") +args = ap.parse_args() + +if args.output is None: + sys.stderr.write("Error: Siphon processor requires --output to be set.") + sys.exit(1) + + +def clicmd_index_sort(cfg, group, dec): + if group in dec and 'group_label' in dec[group]: + return dec[group]['group_label'] + return group + +def clicmd_index_header(cfg): + s = "# CLI command index\n" + s += "\n[TOC]\n" + return s + +def clicmd_index_section(cfg, group, md): + return "\n@subpage %s\n\n" % md + +def clicmd_index_entry(cfg, meta, item): + v = item["value"] + return "* [%s](@ref %s)\n" % (v["path"], meta["label"]) + +def clicmd_sort(cfg, meta, item): + return item['value']['path'] + +def clicmd_header(cfg, group, md, dec): + if group in dec and 'group_label' in dec[group]: + label = dec[group]['group_label'] + else: + label = group + return "\n@page %s %s\n" % (md, label) + +def clicmd_format(cfg, meta, item): + v = item["value"] + s = "\n@section %s %s\n" % (meta['label'], v['path']) + + # The text from '.short_help = '. + # Later we should split this into short_help and usage_help + # since the latter is how it is primarily used but the former + # is also needed. + if "short_help" in v: + tmp = v["short_help"].strip() + + # Bit hacky. Add a trailing period if it doesn't have one. + if tmp[-1] != ".": + tmp += "." + + s += "### Summary/usage\n %s\n\n" % tmp + + # This is seldom used and will likely be deprecated + if "long_help" in v: + tmp = v["long_help"] + + s += "### Long help\n %s\n\n" % tmp + + # Extracted from the code in /*? ... ?*/ blocks + if "siphon_block" in item["meta"]: + sb = item["meta"]["siphon_block"] + + if sb != "": + # hack. still needed? + sb = sb.replace("\n", "\\n") + try: + sb = json.loads('"'+sb+'"') + s += "### Description\n%s\n\n" % sb + except: + pass + + # Gives some developer-useful linking + if "item" in meta or "function" in v: + s += "### Declaration and implementation\n\n" + + if "item" in meta: + s += "Declaration: @ref %s (%s:%d)\n\n" % \ + (meta['item'], meta["file"], int(item["meta"]["line_start"])) + + if "function" in v: + s += "Implementation: @ref %s.\n\n" % v["function"] + + return s + + +siphons = { + "VLIB_CLI_COMMAND": { + "index_sort_key": clicmd_index_sort, + "index_header": clicmd_index_header, + "index_section": clicmd_index_section, + "index_entry": clicmd_index_entry, + 'sort_key': clicmd_sort, + "header": clicmd_header, + "format": clicmd_format, + } +} + + +# PyParsing definition for our struct initializers which look like this: +# VLIB_CLI_COMMAND (show_sr_tunnel_command, static) = { +# .path = "show sr tunnel", +# .short_help = "show sr tunnel [name <sr-tunnel-name>]", +# .function = show_sr_tunnel_fn, +#}; +def getMacroInitializerBNF(): + cs = pp.Forward() + ident = pp.Word(pp.alphas + "_", pp.alphas + pp.nums + "_") + intNum = pp.Word(pp.nums) + hexNum = pp.Literal("0x") + pp.Word(pp.hexnums) + octalNum = pp.Literal("0") + pp.Word("01234567") + integer = (hexNum | octalNum | intNum) + \ + pp.Optional(pp.Literal("ULL") | pp.Literal("LL") | pp.Literal("L")) + floatNum = pp.Regex(r'\d+(\.\d*)?([eE]\d+)?') + pp.Optional(pp.Literal("f")) + char = pp.Literal("'") + pp.Word(pp.printables, exact=1) + pp.Literal("'") + arrayIndex = integer | ident + + lbracket = pp.Literal("(").suppress() + rbracket = pp.Literal(")").suppress() + lbrace = pp.Literal("{").suppress() + rbrace = pp.Literal("}").suppress() + comma = pp.Literal(",").suppress() + equals = pp.Literal("=").suppress() + dot = pp.Literal(".").suppress() + semicolon = pp.Literal(";").suppress() + + # initializer := { [member = ] (variable | expression | { initializer } ) } + typeName = ident + varName = ident + + typeSpec = pp.Optional("unsigned") + \ + pp.oneOf("int long short float double char u8 i8 void") + \ + pp.Optional(pp.Word("*"), default="") + typeCast = pp.Combine( "(" + ( typeSpec | typeName ) + ")" ).suppress() + + string = pp.Combine(pp.OneOrMore(pp.QuotedString(quoteChar='"', + escChar='\\', multiline=True)), adjacent=False) + literal = pp.Optional(typeCast) + (integer | floatNum | char | string) + var = pp.Combine(pp.Optional(typeCast) + varName + pp.Optional("[" + arrayIndex + "]")) + + expr = (literal | var) # TODO + + + member = pp.Combine(dot + varName + pp.Optional("[" + arrayIndex + "]")) + value = (expr | cs) + + entry = pp.Group(pp.Optional(member + equals, default="") + value) + entries = (pp.ZeroOrMore(entry + comma) + entry + pp.Optional(comma)) | \ + (pp.ZeroOrMore(entry + comma)) + + cs << (lbrace + entries + rbrace) + + macroName = ident + params = pp.Group(pp.ZeroOrMore(expr + comma) + expr) + macroParams = lbracket + params + rbracket + + mi = macroName + pp.Optional(macroParams) + equals + pp.Group(cs) + semicolon + mi.ignore(pp.cppStyleComment) + return mi + + +mi = getMacroInitializerBNF() + +# Parse the input file into a more usable dictionary structure +cmds = {} +line_num = 0 +line_start = 0 +for filename in args.input: + sys.stderr.write("Parsing items in file \"%s\"...\n" % filename) + data = None + with open(filename, "r") as fd: + data = json.load(fd) + + cmds['_global'] = data['global'] + + # iterate the items loaded and regroup it + for item in data["items"]: + try: + o = mi.parseString(item['block']).asList() + except: + sys.stderr.write("Exception parsing item: %s\n%s\n" \ + % (json.dumps(item, separators=(',', ': '), indent=4), + item['block'])) + raise + + group = item['group'] + file = item['file'] + macro = o[0] + param = o[1][0] + + if group not in cmds: + cmds[group] = {} + + if file not in cmds[group]: + cmds[group][file] = {} + + if macro not in cmds[group][file]: + cmds[group][file][macro] = {} + + c = { + 'params': o[2], + 'meta': {}, + 'value': {}, + } + + for key in item: + if key == 'block': + continue + c['meta'][key] = item[key] + + for i in c['params']: + c['value'][i[0]] = cgi.escape(i[1]) + + cmds[group][file][macro][param] = c + + +# Write the header for this siphon type +cfg = siphons[siphon_map[args.type]] +sys.stdout.write(cfg["index_header"](cfg)) +contents = "" + +def group_sort_key(item): + if "index_sort_key" in cfg: + return cfg["index_sort_key"](cfg, item, cmds['_global']) + return item + +# Iterate the dictionary and process it +for group in sorted(cmds.keys(), key=group_sort_key): + if group.startswith('_'): + continue + + sys.stderr.write("Processing items in group \"%s\"...\n" % group) + + cfg = siphons[siphon_map[args.type]] + md = group.replace("/", "_").replace(".", "_") + sys.stdout.write(cfg["index_section"](cfg, group, md)) + + if "header" in cfg: + dec = cmds['_global'] + contents += cfg["header"](cfg, group, md, dec) + + for file in sorted(cmds[group].keys()): + if group.startswith('_'): + continue + + sys.stderr.write("- Processing items in file \"%s\"...\n" % file) + + for macro in sorted(cmds[group][file].keys()): + if macro != siphon_map[args.type]: + continue + sys.stderr.write("-- Processing items in macro \"%s\"...\n" % macro) + cfg = siphons[macro] + + meta = { + "group": group, + "file": file, + "macro": macro, + "md": md, + } + + def item_sort_key(item): + if "sort_key" in cfg: + return cfg["sort_key"](cfg, meta, cmds[group][file][macro][item]) + return item + + for param in sorted(cmds[group][file][macro].keys(), key=item_sort_key): + sys.stderr.write("--- Processing item \"%s\"...\n" % param) + + meta["item"] = param + + # mangle "md" and the item to make a reference label + meta["label"] = "%s___%s" % (meta["md"], param) + + if "index_entry" in cfg: + s = cfg["index_entry"](cfg, meta, cmds[group][file][macro][param]) + sys.stdout.write(s) + + if "format" in cfg: + contents += cfg["format"](cfg, meta, cmds[group][file][macro][param]) + +sys.stdout.write(contents) + +# All done |