aboutsummaryrefslogtreecommitdiffstats
path: root/doxygen
diff options
context:
space:
mode:
authorChris Luke <chrisy@flirble.org>2016-07-25 16:38:11 -0400
committerChris Luke <chrisy@flirble.org>2016-09-07 15:16:36 -0400
commit78e0e3af7072cd8977c73c41863776be10d54155 (patch)
tree57e161337a563b237bfaf97ba8e01d6cc232e616 /doxygen
parentc1ac47b61ca6f788bb8cc08128c79edc743dcb99 (diff)
VPP-221 CLI auto-documentation infrastructure
[Note: This is an amalgamation of several patches cherry-picked from master all related to introducing auto-CLI documentation. See commits d4024f58 ee4743ad 16bcf7d8 e0d802bc 54ccf226] As a step before Doxygen, extract CLI-related struct initializers from the code and parse that into a summary of the CLI commands available with the provided help text, such as it is. At the moment this only renders this into an indexed Markdown file that Doxygen then picks up but later we can use this information to enrich the existing VLIB_CLI_COMMAND macro documentor as well as provide runtime documentation to VPP that is stored on disk outside the binary image. Additionally support a comment block immediately prior to VLIB_CLI_COMMAND CLI command definitions in the form /*? ... ?*/ that can be used to include long-form documentation without having it compiled into VPP. Examples of documenting CLI commands can be found in vlib/vlib/unix/cli.c which, whilst not perfect, should provide a starting point. Screen captures of sample output can be seen at https://chrisy.flirble.org/vpp/doxy-cli-example.png and https://chrisy.flirble.org/vpp/doxy-cli-index.png . Next, shift the Doxygen root makefile targets to their own Makefile. The primary reason for this is that the siphon targets do dependency tracking which means it needs to generate those dependencies whenever make is run; that is pointless if we're not going to generate any documentation. This includes the package dependencies since they since they sometimes unnecessarily interfere with the code build in some cases at the moment; later we will look to building a Python venv to host the Python modules we use. One final remark: In future we may consider deprecating .long_help in the VLIB_CLI_COMMAND structure entirely but add perhaps .usage_help. .short_help would be reserved for a summary of the command function and .usage_help provide the syntax of that command. These changes would provide great semantic value to the automaticly generated CLI documentation. I could also see having .long_help replaced by a mechanism that reads it from disk at runtime with a rudimentary Markdown/Doxygen filter so that we can use the same text that is used in the published documentation. Change-Id: I8afccfd7fe2096411d8064ac954ca5a2cbca9ae7 Signed-off-by: Chris Luke <chrisy@flirble.org>
Diffstat (limited to 'doxygen')
-rw-r--r--doxygen/Makefile154
-rw-r--r--doxygen/dir.dox.sample3
-rw-r--r--doxygen/doxygen.cfg22
-rwxr-xr-xdoxygen/filter_c.py69
-rwxr-xr-xdoxygen/filter_h.py53
-rwxr-xr-xdoxygen/siphon_generate.py322
-rwxr-xr-xdoxygen/siphon_process.py323
7 files changed, 925 insertions, 21 deletions
diff --git a/doxygen/Makefile b/doxygen/Makefile
new file mode 100644
index 00000000000..471b6fd7f15
--- /dev/null
+++ b/doxygen/Makefile
@@ -0,0 +1,154 @@
+# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Build the documentation
+#
+
+# These should be passed in by the root Makefile
+WS_ROOT ?= $(CURDIR)/..
+BR ?= $(WS_ROOT)/build-root
+OS_ID ?= $(shell grep '^ID=' /etc/os-release | cut -f2- -d= | sed -e 's/\"//g')
+
+# Package dependencies
+DOC_DEB_DEPENDS = doxygen graphviz python-pyparsing
+DOC_RPM_DEPENDS = doxygen graphviz pyparsing
+
+# Doxygen configuration and our utility scripts
+DOXY_DIR ?= $(WS_ROOT)/doxygen
+
+# Input directories and files
+DOXY_INPUT ?= \
+ README.md \
+ vppinfra \
+ svm \
+ vlib \
+ vlib-api \
+ vnet \
+ vpp \
+ vpp-api \
+ plugins
+
+# Files to exclude, from pre-Doxygen steps, eg because they're
+# selectively compiled.
+# Examples would be to exclude non-DPDK related sources when
+# there's a DPDK equivalent that conflicts.
+# This is specifically for the pre-Doxygen steps; Doxygen uses
+# @cond for this instead.
+DOXY_PRE_EXCLUDE ?= \
+ vlib/vlib/buffer.c
+
+# Generate a regexp for filenames to exclude
+DOXY_PRE_EXCLUDE_REGEXP = ($(subst .,\.,$(shell echo '$(strip $(DOXY_PRE_EXCLUDE))' | sed -e 's/ /|/g')))
+
+# Discover all the directories we might, possibly, maybe, have include files in
+DOXY_INCLUDE_PATH = $(shell set -e; cd $(WS_ROOT); for item in $(DOXY_INPUT); do find $$item -type d; done)
+
+# Discover if we have CPP available
+CPP ?= $(shell which cpp)
+ifneq ($(strip $(CPP)),)
+# Add whatever directories CPP normally includes
+DOXY_INCLUDE_PATH += $(shell set -e; $(CPP) -v </dev/null 2>&1 | grep -A 1000 '\#include' | awk '/^ /{print $$1}')
+endif
+
+# Target directory for doxygen output
+DOXY_OUTPUT ?= $(BR)/docs
+
+# Siphoned fragments end up in here
+SIPHON_INPUT ?= $(DOXY_OUTPUT)/siphon_fragments
+
+# Siphoned fragements are processed into here
+SIPHON_OUTPUT ?= $(DOXY_OUTPUT)/siphon_docs
+
+# Extra document inputs that are processed in addition to DOXY_INPUT
+EXTRA_DOXY_INPUT += $(SIPHON_OUTPUT)
+
+# All the siphon types we know about
+SIPHONS ?= clicmd
+
+SIPHON_FILES = $(addprefix $(SIPHON_INPUT)/,$(addsuffix .siphon,$(SIPHONS)))
+SIPHON_DOCS = $(addprefix $(SIPHON_OUTPUT)/,$(addsuffix .md,$(SIPHONS)))
+
+$(BR)/.doxygen-bootstrap.ok:
+ifeq ($(OS_ID),ubuntu)
+ @set -e; inst=; \
+ for i in $(DOC_DEB_DEPENDS); do \
+ dpkg-query --show $$i >/dev/null 2>&1 || inst="$$inst $$i"; \
+ done; \
+ if [ "$$inst" ]; then sudo apt-get $(CONFIRM) $(FORCE) install $$inst; fi
+else ifneq ("$(wildcard /etc/redhat-release)","")
+ @sudo yum install $(CONFIRM) $(DOC_RPM_DEPENDS)
+else
+ $(error "This option currently works only on Ubuntu or Centos systems")
+endif
+ @touch $@
+
+.PHONY: bootstrap-doxygen
+bootstrap-doxygen: $(BR)/.doxygen-bootstrap.ok
+
+.DELETE_ON_ERROR: $(BR)/.doxygen-siphon.dep
+$(BR)/.doxygen-siphon.dep: Makefile
+ set -e; rm -f "$@"; for input in $(DOXY_INPUT); do \
+ find "$(WS_ROOT)/$$input" -type f \
+ \( -name '*.[ch]' -or -name '*.dox' \) -print \
+ | grep -v -E '^$(WS_ROOT)/$(DOXY_PRE_EXCLUDE_REGEXP)$$' \
+ | sed -e "s/^/\$$(SIPHON_FILES): /" \
+ >> $@; \
+ done
+
+# Include the source -> siphon dependencies
+-include $(BR)/.doxygen-siphon.dep
+
+.NOTPARALLEL: $(SIPHON_FILES)
+$(SIPHON_FILES): $(DOXY_DIR)/siphon_generate.py $(BR)/.doxygen-bootstrap.ok
+ @rm -rf "$(SIPHON_INPUT)" "$(SIPHON_OUTPUT)"
+ @mkdir -p "$(SIPHON_INPUT)" "$(SIPHON_OUTPUT)"
+ @touch $(SIPHON_INPUT)/files
+ for input in $(DOXY_INPUT); do \
+ cd "$(WS_ROOT)"; \
+ find "$$input" -type f \
+ \( -name '*.[ch]' -or -name '*.dox' \) -print \
+ | grep -v -E '^$(DOXY_PRE_EXCLUDE_REGEXP)$$' \
+ >> $(SIPHON_INPUT)/files; \
+ done
+ set -e; cd "$(WS_ROOT)"; $(DOXY_DIR)/siphon_generate.py \
+ --output="$(SIPHON_INPUT)" \
+ "@$(SIPHON_INPUT)/files"
+
+
+.DELETE_ON_ERROR: $(SIPHON_DOCS)
+$(SIPHON_OUTPUT)/%.md: $(SIPHON_INPUT)/%.siphon $(DOXY_DIR)/siphon_process.py
+ set -e; cd "$(WS_ROOT)"; \
+ $(DOXY_DIR)/siphon_process.py --type=$(basename $(notdir $<)) \
+ --output="$(SIPHON_OUTPUT)" $< > $@
+
+# This target can be used just to generate the siphoned docs
+.PHONY: doxygen-siphon
+doxygen-siphon: $(SIPHON_DOCS)
+
+# Generate the doxygen docs
+doxygen: $(SIPHON_DOCS)
+ @mkdir -p "$(DOXY_OUTPUT)"
+ set -e; cd "$(WS_ROOT)"; \
+ ROOT="$(WS_ROOT)" \
+ BUILD_ROOT="$(BR)" \
+ INPUT="$(addprefix $(WS_ROOT)/,$(DOXY_INPUT)) $(EXTRA_DOXY_INPUT)" \
+ INCLUDE_PATH="$(DOXY_INCLUDE_PATH)" \
+ HTML=YES \
+ VERSION="`git describe --tags --dirty`" \
+ doxygen $(DOXY_DIR)/doxygen.cfg
+
+wipe-doxygen:
+ rm -rf "$(BR)/docs" "$(BR)/.doxygen-siphon.d"
+
diff --git a/doxygen/dir.dox.sample b/doxygen/dir.dox.sample
index 41e84d7bee4..500fe595ae5 100644
--- a/doxygen/dir.dox.sample
+++ b/doxygen/dir.dox.sample
@@ -18,7 +18,7 @@
/**
@dir
-@brief Someone please fix this description
+@brief Someone please fix this description.
@todo This directory needs a description.
This is where you would document the contents of a directory.
@@ -26,3 +26,4 @@ This is where you would document the contents of a directory.
This looks like a C file but it is not part of the build; it is purely
for documentation.
*/
+/*? %%clicmd:group_label CLI section description%% ?*/
diff --git a/doxygen/doxygen.cfg b/doxygen/doxygen.cfg
index 0eadbd7397c..971a159502c 100644
--- a/doxygen/doxygen.cfg
+++ b/doxygen/doxygen.cfg
@@ -229,8 +229,22 @@ TAB_SIZE = 8
# newlines.
ALIASES =
+
+## Indexes VPP graph nodes
ALIASES += "node=@xrefitem nodes \"Node Identifier\" \"Node Identifiers\" @c "
+## Formatting for CLI commands and output
+ALIASES += "cli{1}=<code><pre>\1</code></pre>"
+ALIASES += "clistart=<code><pre>"
+ALIASES += "cliend=</pre></code>"
+
+## Formatting for CLI example paragraphs
+ALIASES += "cliexpar=@par Example usage"
+ALIASES += "cliexcmd{1}=@clistart<b>vpp# <em>\1</em></b>@cliend"
+ALIASES += "cliexstart{1}=@cliexcmd{\1}@clistart"
+ALIASES += "cliexend=@cliend"
+
+
# This tag can be used to specify a number of word-keyword mappings (TCL only).
# A mapping has the form "name=value". For example adding "class=itcl::class"
# will allow you to use the command class in the itcl::class meaning.
@@ -630,7 +644,7 @@ GENERATE_DEPRECATEDLIST= YES
# sections, marked by \if <section_label> ... \endif and \cond <section_label>
# ... \endcond blocks.
-ENABLED_SECTIONS =
+ENABLED_SECTIONS = DPDK
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
# initial value of a variable or macro / define can have for it to appear in the
@@ -901,6 +915,7 @@ INPUT_FILTER =
FILTER_PATTERNS = \
*.c=$(ROOT)/doxygen/filter_c.py \
+ *.h=$(ROOT)/doxygen/filter_h.py \
*.api=$(ROOT)/doxygen/filter_api.py
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
@@ -2022,7 +2037,7 @@ SEARCH_INCLUDES = YES
# preprocessor.
# This tag requires that the tag SEARCH_INCLUDES is set to YES.
-INCLUDE_PATH = $(INPUT)
+INCLUDE_PATH = $(INCLUDE_PATH)
# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
# patterns (like *.h and *.hpp) to filter out the header-files in the
@@ -2046,7 +2061,8 @@ PREDEFINED = \
__ORDER_LITTLE_ENDIAN__=1234 \
__BYTE_ORDER__=1234 \
__FLOAT_WORD_ORDER__=1234 \
- DPDK=1
+ DPDK=1 \
+ always_inline:="static inline"
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The
diff --git a/doxygen/filter_c.py b/doxygen/filter_c.py
index db1681c9450..30b933ba79e 100755
--- a/doxygen/filter_c.py
+++ b/doxygen/filter_c.py
@@ -15,38 +15,73 @@
# Filter for .c files to make various preprocessor tricks Doxygenish
-import sys, re
+import os, sys, re
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s <filename>\n" % (sys.argv[0]))
sys.exit(1)
-patterns = [
- # Search for VLIB_CLI_COMMAND, extract its parameter and add a docblock for it
- ( re.compile("(?P<m>VLIB_CLI_COMMAND)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g<m> (\g<name>) */ vlib_cli_command_t \g<name>"),
+replace_patterns = [
+ # Search for VLIB_CLI_COMMAND, extract its parameters and add a docblock for it
+ ( re.compile("(?P<m>VLIB_CLI_COMMAND)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"),
+ r"/** @brief (@em constructor) \g<m> (\g<name>) */ vlib_cli_command_t \g<name>"),
+ ( re.compile("(?P<m>VLIB_CLI_COMMAND)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<qual>[^)]*)[)]"),
+ r"/** @brief (@em constructor) \g<m> (\g<name>) */ \g<qual> vlib_cli_command_t \g<name>"),
- # Search for VLIB_REGISTER_NODE, extract its parameter and add a docblock for it
- ( re.compile("(?P<m>VLIB_REGISTER_NODE)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g<m> (\g<name>) */ vlib_node_registration_t \g<name>"),
+ # Search for VLIB_REGISTER_NODE, extract its parameters and add a docblock for it
+ ( re.compile("(?P<m>VLIB_REGISTER_NODE)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"),
+ r"/** @brief (@em constructor) \g<m> (\g<name>) */ vlib_node_registration_t \g<name>"),
+ ( re.compile("(?P<m>VLIB_REGISTER_NODE)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<qual>[^)]*)[)]"),
+ r"/** @brief (@em constructor) \g<m> (\g<name>) */ \g<qual> vlib_node_registration_t \g<name>"),
# Search for VLIB_INIT_FUNCTION, extract its parameter and add a docblock for it
- ( re.compile("(?P<m>VLIB_INIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"), r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ vlib_init_function_t * _vlib_init_function_\g<name>"),
- ( re.compile("(?P<m>VLIB_DECLARE_INIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"), r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ vlib_init_function_t * _vlib_init_function_\g<name>"),
+ ( re.compile("(?P<m>VLIB_INIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"),
+ r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ vlib_init_function_t * _vlib_init_function_\g<name>"),
+ ( re.compile("(?P<m>VLIB_DECLARE_INIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"),
+ r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ vlib_init_function_t * _vlib_init_function_\g<name>"),
- # Search for VLIB_LOOP_ENTER_FUNCTION, extract the 1st parameter (ignore any others) and add a docblock for it
- ( re.compile("(?P<m>VLIB_MAIN_LOOP_ENTER_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ _vlib_main_loop_enter_\g<name>"),
- ( re.compile("(?P<m>VLIB_MAIN_LOOP_EXIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ _vlib_main_loop_exit_\g<name>"),
+ # Search for VLIB_LOOP_ENTER_FUNCTION, extract the parameters and add a docblock for it
+ ( re.compile("(?P<m>VLIB_MAIN_LOOP_ENTER_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"),
+ r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ _vlib_main_loop_enter_\g<name>"),
+ ( re.compile("(?P<m>VLIB_MAIN_LOOP_EXIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"),
+ r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ _vlib_main_loop_exit_\g<name>"),
- # Search for VLIB_CONFIG_FUNCTION, extract the 1st parameter (ignore any others) and add a docblock for it
- ( re.compile("(?P<m>VLIB_CONFIG_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<n>\"[^\"]+\")(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g<m> (\g<name>, \g<n>) */ vlib_config_function_runtime_t _vlib_config_function_\g<name>"),
- ( re.compile("(?P<m>VLIB_EARLY_CONFIG_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<n>\"[^\"]+\")(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g<m> (\g<name>, \g<n>) */ vlib_config_function_runtime_t _vlib_config_function_\g<name>"),
+ # Search for VLIB_CONFIG_FUNCTION, extract the parameters and add a docblock for it
+ ( re.compile("(?P<m>VLIB_CONFIG_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<n>\"[^\"]+\")(,[^)]*)?[)]"),
+ r"/** @brief (@em constructor) \g<m> (\g<name>, \g<n>) */ vlib_config_function_runtime_t _vlib_config_function_\g<name>"),
+ ( re.compile("(?P<m>VLIB_EARLY_CONFIG_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<n>\"[^\"]+\")(,[^)]*)?[)]"),
+ r"/** @brief (@em constructor) \g<m> (\g<name>, \g<n>) */ vlib_config_function_runtime_t _vlib_config_function_\g<name>"),
# Search for "format_thing" and "unformat_thing" when used as a function pointer and add parens
- ( re.compile("(?P<pre>(^|,)\s*)(?P<name>(un)?format_[a-zA-Z0-9_]+)(?P<post>\s*(,|$))") , r"\g<pre>\g<name>()\g<post>" ),
+ ( re.compile("(?P<pre>(^|,)\s*)(?P<name>(un)?format_[a-zA-Z0-9_]+)(?P<post>\s*(,|$))"),
+ r"\g<pre>\g<name>()\g<post>" ),
+
+ # Search for CLIB_PAD_FROM_TO(...); and replace with padding
+ # #define CLIB_PAD_FROM_TO(from,to) u8 pad_##from[(to) - (from)]
+ ( re.compile("(?P<m>CLIB_PAD_FROM_TO)\s*[(](?P<from>[^,]+),\s*(?P<to>[^)]+)[)]"),
+ r"/** Padding. */ u8 pad_\g<from>[(\g<to>) - (\g<from>)]" ),
+
]
-with open(sys.argv[1]) as fd:
+
+filename = sys.argv[1]
+cwd = os.getcwd()
+if filename[0:len(cwd)] == cwd:
+ filename = filename[len(cwd):]
+ if filename[0] == "/":
+ filename = filename[1:]
+
+with open(filename) as fd:
+ line_num = 0
+
for line in fd:
+ line_num += 1
str = line[:-1] # filter \n
- for p in patterns:
+
+ # Look for search/replace patterns
+ for p in replace_patterns:
str = p[0].sub(p[1], str)
+
sys.stdout.write(str+"\n")
+
+# All done
diff --git a/doxygen/filter_h.py b/doxygen/filter_h.py
new file mode 100755
index 00000000000..967388d5743
--- /dev/null
+++ b/doxygen/filter_h.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Filter for .c files to make various preprocessor tricks Doxygenish
+
+import os, sys, re
+
+if len(sys.argv) < 2:
+ sys.stderr.write("Usage: %s <filename>\n" % (sys.argv[0]))
+ sys.exit(1)
+
+replace_patterns = [
+ # Search for CLIB_PAD_FROM_TO(...); and replace with padding
+ # #define CLIB_PAD_FROM_TO(from,to) u8 pad_##from[(to) - (from)]
+ ( re.compile("(?P<m>CLIB_PAD_FROM_TO)\s*[(](?P<from>[^,]+),\s*(?P<to>[^)]+)[)]"),
+ r"/** Padding. */ u8 pad_\g<from>[(\g<to>) - (\g<from>)]" ),
+
+]
+
+
+filename = sys.argv[1]
+cwd = os.getcwd()
+if filename[0:len(cwd)] == cwd:
+ filename = filename[len(cwd):]
+ if filename[0] == "/":
+ filename = filename[1:]
+
+with open(filename) as fd:
+ line_num = 0
+
+ for line in fd:
+ line_num += 1
+ str = line[:-1] # filter \n
+
+ # Look for search/replace patterns
+ for p in replace_patterns:
+ str = p[0].sub(p[1], str)
+
+ sys.stdout.write(str+"\n")
+
+# All done
diff --git a/doxygen/siphon_generate.py b/doxygen/siphon_generate.py
new file mode 100755
index 00000000000..8b999114e52
--- /dev/null
+++ b/doxygen/siphon_generate.py
@@ -0,0 +1,322 @@
+#!/usr/bin/env python
+# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Looks for preprocessor macros with struct initializers and siphons them
+# off into another file for later parsing; ostensibly to generate
+# documentation from struct initializer data.
+
+import os, sys, re, argparse, json
+
+DEFAULT_OUTPUT = "build-root/docs/siphons"
+DEFAULT_PREFIX = os.getcwd()
+
+ap = argparse.ArgumentParser()
+ap.add_argument("--output", '-o', metavar="directory", default=DEFAULT_OUTPUT,
+ help="Output directory for .siphon files [%s]" % DEFAULT_OUTPUT)
+ap.add_argument("--input-prefix", metavar="path", default=DEFAULT_PREFIX,
+ help="Prefix to strip from input pathnames [%s]" % DEFAULT_PREFIX)
+ap.add_argument("input", nargs='+', metavar="input_file",
+ help="Input C source files")
+args = ap.parse_args()
+
+"""Patterns that match the start of code blocks we want to siphon"""
+siphon_patterns = [
+ ( re.compile("(?P<m>VLIB_CLI_COMMAND)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"), "clicmd" ),
+]
+
+"""Matches a siphon comment block start"""
+siphon_block_start = re.compile("^\s*/\*\?\s*(.*)$")
+
+"""Matches a siphon comment block stop"""
+siphon_block_stop = re.compile("^(.*)\s*\?\*/\s*$")
+
+"""Siphon block directive delimiter"""
+siphon_block_delimiter = "%%"
+
+"""Matches a siphon block directive such as '%clicmd:group_label Debug CLI%'"""
+siphon_block_directive = re.compile("(%s)\s*([a-zA-Z0-9_:]+)\s+(.*)\s*(%s)" % \
+ (siphon_block_delimiter, siphon_block_delimiter))
+
+"""Matches the start of an initializer block"""
+siphon_initializer = re.compile("\s*=")
+
+"""
+count open and close braces in str
+return (0, index) when braces were found and count becomes 0.
+index indicates the position at which the last closing brace was
+found.
+return (-1, -1) if a closing brace is found before any opening one.
+return (count, -1) if not all opening braces are closed, count is the
+current depth
+"""
+def count_braces(str, count=0, found=False):
+ for index in range(0, len(str)):
+ if str[index] == '{':
+ count += 1;
+ found = True
+ elif str[index] == '}':
+ if count == 0:
+ # means we never found an open brace
+ return (-1, -1)
+ count -= 1;
+
+ if count == 0 and found:
+ return (count, index)
+
+ return (count, -1)
+
+# Collated output for each siphon
+output = {}
+
+# Build a list of known siphons
+known_siphons = []
+for item in siphon_patterns:
+ siphon = item[1]
+ if siphon not in known_siphons:
+ known_siphons.append(siphon)
+
+# Setup information for siphons we know about
+for siphon in known_siphons:
+ output[siphon] = {
+ "file": "%s/%s.siphon" % (args.output, siphon),
+ "global": {},
+ "items": [],
+ }
+
+# Pre-process file names in case they indicate a file with
+# a list of files
+files = []
+for filename in args.input:
+ if filename.startswith('@'):
+ with open(filename[1:], 'r') as fp:
+ lines = fp.readlines()
+ for line in lines:
+ files.append(line.strip())
+ lines = None
+ else:
+ files.append(filename)
+
+# Iterate all the input files we've been given
+for filename in files:
+ # Strip the current directory off the start of the
+ # filename for brevity
+ if filename[0:len(args.input_prefix)] == args.input_prefix:
+ filename = filename[len(args.input_prefix):]
+ if filename[0] == "/":
+ filename = filename[1:]
+
+ # Work out the abbreviated directory name
+ directory = os.path.dirname(filename)
+ if directory[0:2] == "./":
+ directory = directory[2:]
+ elif directory[0:len(args.input_prefix)] == args.input_prefix:
+ directory = directory[len(args.input_prefix):]
+ if directory[0] == "/":
+ directory = directory[1:]
+
+ # Open the file and explore its contents...
+ sys.stderr.write("Siphoning from %s...\n" % filename)
+ directives = {}
+ with open(filename) as fd:
+ siphon = None
+ close_siphon = None
+ siphon_block = ""
+ in_block = False
+ line_num = 0
+ siphon_line = 0
+
+ for line in fd:
+ line_num += 1
+ str = line[:-1] # filter \n
+
+ """See if there is a block directive and if so extract it"""
+ def process_block_directive(str, directives):
+ m = siphon_block_directive.search(str)
+ if m is not None:
+ k = m.group(2)
+ v = m.group(3).strip()
+ directives[k] = v
+ # Return only the parts we did not match
+ return str[0:m.start(1)] + str[m.end(4):]
+
+ return str
+
+ def process_block_prefix(str):
+ if str.startswith(" * "):
+ str = str[3:]
+ elif str == " *":
+ str = ""
+ return str
+
+ if not in_block:
+ # See if the line contains the start of a siphon doc block
+ m = siphon_block_start.search(str)
+ if m is not None:
+ in_block = True
+ t = m.group(1)
+
+ # Now check if the block closes on the same line
+ m = siphon_block_stop.search(t)
+ if m is not None:
+ t = m.group(1)
+ in_block = False
+
+ # Check for directives
+ t = process_block_directive(t, directives)
+
+ # Filter for normal comment prefixes
+ t = process_block_prefix(t)
+
+ # Add what is left
+ siphon_block += t
+
+ # Skip to next line
+ continue
+
+ else:
+ # Check to see if we have an end block marker
+ m = siphon_block_stop.search(str)
+ if m is not None:
+ in_block = False
+ t = m.group(1)
+ else:
+ t = str
+
+ # Check for directives
+ t = process_block_directive(t, directives)
+
+ # Filter for normal comment prefixes
+ t = process_block_prefix(t)
+
+ # Add what is left
+ siphon_block += t + "\n"
+
+ # Skip to next line
+ continue
+
+
+ if siphon is None:
+ # Look for blocks we need to siphon
+ for p in siphon_patterns:
+ if p[0].match(str):
+ siphon = [ p[1], str + "\n", 0 ]
+ siphon_line = line_num
+
+ # see if we have an initializer
+ m = siphon_initializer.search(str)
+ if m is not None:
+ # count the braces on this line
+ (count, index) = count_braces(str[m.start():])
+ siphon[2] = count
+ # TODO - it's possible we have the initializer all on the first line
+ # we should check for it, but also account for the possibility that
+ # the open brace is on the next line
+ #if count == 0:
+ # # braces balanced
+ # close_siphon = siphon
+ # siphon = None
+ else:
+ # no initializer: close the siphon right now
+ close_siphon = siphon
+ siphon = None
+ else:
+ # See if we should end the siphon here - do we have balanced
+ # braces?
+ (count, index) = count_braces(str, count=siphon[2], found=True)
+ if count == 0:
+ # braces balanced - add the substring and close the siphon
+ siphon[1] += str[:index+1] + ";\n"
+ close_siphon = siphon
+ siphon = None
+ else:
+ # add the whole string, move on
+ siphon[2] = count
+ siphon[1] += str + "\n"
+
+ if close_siphon is not None:
+ # Write the siphoned contents to the right place
+ siphon_name = close_siphon[0]
+
+ # Copy directives for the file
+ details = {}
+ for key in directives:
+ if ":" in key:
+ (sn, label) = key.split(":")
+ if sn == siphon_name:
+ details[label] = directives[key]
+ else:
+ details[key] = directives[key]
+
+ # Copy details for this block
+ details['file'] = filename
+ details['line_start'] = siphon_line
+ details['line_end'] = line_num
+ details['siphon_block'] = siphon_block.strip()
+
+ # Some defaults
+ if "group" not in details:
+ if "group_label" in details:
+ # use the filename since group labels are mostly of file scope
+ details['group'] = details['file']
+ else:
+ details['group'] = directory
+
+ if "group_label" not in details:
+ details['group_label'] = details['group']
+
+ details["block"] = close_siphon[1]
+
+ # Store the item
+ output[siphon_name]['items'].append(details)
+
+ # All done
+ close_siphon = None
+ siphon_block = ""
+
+ # Update globals
+ for key in directives.keys():
+ if ':' not in key:
+ continue
+
+ if filename.endswith("/dir.dox"):
+ # very special! use the parent directory name
+ l = directory
+ else:
+ l = filename
+
+ (sn, label) = key.split(":")
+
+ if sn not in output:
+ output[sn] = {}
+ if 'global' not in output[sn]:
+ output[sn]['global'] = {}
+ if l not in output[sn]['global']:
+ output[sn]['global'][l] = {}
+ if 'file' not in output[sn]:
+ output[sn]['file'] = "%s/%s.siphon" % (args.output, sn)
+ if 'items' not in output[sn]:
+ output[sn]['items'] = []
+
+ output[sn]['global'][l][label] = directives[key]
+
+
+# Write out the data
+for siphon in output.keys():
+ sys.stderr.write("Saving siphon %s...\n" % siphon)
+ s = output[siphon]
+ with open(s['file'], "a") as fp:
+ json.dump(s, fp, separators=(',', ': '), indent=4, sort_keys=True)
+
+# All done
diff --git a/doxygen/siphon_process.py b/doxygen/siphon_process.py
new file mode 100755
index 00000000000..82a166d31b5
--- /dev/null
+++ b/doxygen/siphon_process.py
@@ -0,0 +1,323 @@
+#!/usr/bin/env python
+# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Filter for .siphon files that are generated by other filters.
+# The idea is to siphon off certain initializers so that we can better
+# auto-document the contents of that initializer.
+
+import os, sys, re, argparse, cgi, json
+import pyparsing as pp
+
+import pprint
+
+DEFAULT_SIPHON ="clicmd"
+DEFAULT_OUTPUT = None
+DEFAULT_PREFIX = os.getcwd()
+
+siphon_map = {
+ 'clicmd': "VLIB_CLI_COMMAND",
+}
+
+ap = argparse.ArgumentParser()
+ap.add_argument("--type", '-t', metavar="siphon_type", default=DEFAULT_SIPHON,
+ choices=siphon_map.keys(),
+ help="Siphon type to process [%s]" % DEFAULT_SIPHON)
+ap.add_argument("--output", '-o', metavar="directory", default=DEFAULT_OUTPUT,
+ help="Output directory for .md files [%s]" % DEFAULT_OUTPUT)
+ap.add_argument("--input-prefix", metavar="path", default=DEFAULT_PREFIX,
+ help="Prefix to strip from input pathnames [%s]" % DEFAULT_PREFIX)
+ap.add_argument("input", nargs='+', metavar="input_file",
+ help="Input .siphon files")
+args = ap.parse_args()
+
+if args.output is None:
+ sys.stderr.write("Error: Siphon processor requires --output to be set.")
+ sys.exit(1)
+
+
+def clicmd_index_sort(cfg, group, dec):
+ if group in dec and 'group_label' in dec[group]:
+ return dec[group]['group_label']
+ return group
+
+def clicmd_index_header(cfg):
+ s = "# CLI command index\n"
+ s += "\n[TOC]\n"
+ return s
+
+def clicmd_index_section(cfg, group, md):
+ return "\n@subpage %s\n\n" % md
+
+def clicmd_index_entry(cfg, meta, item):
+ v = item["value"]
+ return "* [%s](@ref %s)\n" % (v["path"], meta["label"])
+
+def clicmd_sort(cfg, meta, item):
+ return item['value']['path']
+
+def clicmd_header(cfg, group, md, dec):
+ if group in dec and 'group_label' in dec[group]:
+ label = dec[group]['group_label']
+ else:
+ label = group
+ return "\n@page %s %s\n" % (md, label)
+
+def clicmd_format(cfg, meta, item):
+ v = item["value"]
+ s = "\n@section %s %s\n" % (meta['label'], v['path'])
+
+ # The text from '.short_help = '.
+ # Later we should split this into short_help and usage_help
+ # since the latter is how it is primarily used but the former
+ # is also needed.
+ if "short_help" in v:
+ tmp = v["short_help"].strip()
+
+ # Bit hacky. Add a trailing period if it doesn't have one.
+ if tmp[-1] != ".":
+ tmp += "."
+
+ s += "### Summary/usage\n %s\n\n" % tmp
+
+ # This is seldom used and will likely be deprecated
+ if "long_help" in v:
+ tmp = v["long_help"]
+
+ s += "### Long help\n %s\n\n" % tmp
+
+ # Extracted from the code in /*? ... ?*/ blocks
+ if "siphon_block" in item["meta"]:
+ sb = item["meta"]["siphon_block"]
+
+ if sb != "":
+ # hack. still needed?
+ sb = sb.replace("\n", "\\n")
+ try:
+ sb = json.loads('"'+sb+'"')
+ s += "### Description\n%s\n\n" % sb
+ except:
+ pass
+
+ # Gives some developer-useful linking
+ if "item" in meta or "function" in v:
+ s += "### Declaration and implementation\n\n"
+
+ if "item" in meta:
+ s += "Declaration: @ref %s (%s:%d)\n\n" % \
+ (meta['item'], meta["file"], int(item["meta"]["line_start"]))
+
+ if "function" in v:
+ s += "Implementation: @ref %s.\n\n" % v["function"]
+
+ return s
+
+
+siphons = {
+ "VLIB_CLI_COMMAND": {
+ "index_sort_key": clicmd_index_sort,
+ "index_header": clicmd_index_header,
+ "index_section": clicmd_index_section,
+ "index_entry": clicmd_index_entry,
+ 'sort_key': clicmd_sort,
+ "header": clicmd_header,
+ "format": clicmd_format,
+ }
+}
+
+
+# PyParsing definition for our struct initializers which look like this:
+# VLIB_CLI_COMMAND (show_sr_tunnel_command, static) = {
+# .path = "show sr tunnel",
+# .short_help = "show sr tunnel [name <sr-tunnel-name>]",
+# .function = show_sr_tunnel_fn,
+#};
+def getMacroInitializerBNF():
+ cs = pp.Forward()
+ ident = pp.Word(pp.alphas + "_", pp.alphas + pp.nums + "_")
+ intNum = pp.Word(pp.nums)
+ hexNum = pp.Literal("0x") + pp.Word(pp.hexnums)
+ octalNum = pp.Literal("0") + pp.Word("01234567")
+ integer = (hexNum | octalNum | intNum) + \
+ pp.Optional(pp.Literal("ULL") | pp.Literal("LL") | pp.Literal("L"))
+ floatNum = pp.Regex(r'\d+(\.\d*)?([eE]\d+)?') + pp.Optional(pp.Literal("f"))
+ char = pp.Literal("'") + pp.Word(pp.printables, exact=1) + pp.Literal("'")
+ arrayIndex = integer | ident
+
+ lbracket = pp.Literal("(").suppress()
+ rbracket = pp.Literal(")").suppress()
+ lbrace = pp.Literal("{").suppress()
+ rbrace = pp.Literal("}").suppress()
+ comma = pp.Literal(",").suppress()
+ equals = pp.Literal("=").suppress()
+ dot = pp.Literal(".").suppress()
+ semicolon = pp.Literal(";").suppress()
+
+ # initializer := { [member = ] (variable | expression | { initializer } ) }
+ typeName = ident
+ varName = ident
+
+ typeSpec = pp.Optional("unsigned") + \
+ pp.oneOf("int long short float double char u8 i8 void") + \
+ pp.Optional(pp.Word("*"), default="")
+ typeCast = pp.Combine( "(" + ( typeSpec | typeName ) + ")" ).suppress()
+
+ string = pp.Combine(pp.OneOrMore(pp.QuotedString(quoteChar='"',
+ escChar='\\', multiline=True)), adjacent=False)
+ literal = pp.Optional(typeCast) + (integer | floatNum | char | string)
+ var = pp.Combine(pp.Optional(typeCast) + varName + pp.Optional("[" + arrayIndex + "]"))
+
+ expr = (literal | var) # TODO
+
+
+ member = pp.Combine(dot + varName + pp.Optional("[" + arrayIndex + "]"), adjacent=False)
+ value = (expr | cs)
+
+ entry = pp.Group(pp.Optional(member + equals, default="") + value)
+ entries = (pp.ZeroOrMore(entry + comma) + entry + pp.Optional(comma)) | \
+ (pp.ZeroOrMore(entry + comma))
+
+ cs << (lbrace + entries + rbrace)
+
+ macroName = ident
+ params = pp.Group(pp.ZeroOrMore(expr + comma) + expr)
+ macroParams = lbracket + params + rbracket
+
+ mi = macroName + pp.Optional(macroParams) + equals + pp.Group(cs) + semicolon
+ mi.ignore(pp.cppStyleComment)
+ return mi
+
+
+mi = getMacroInitializerBNF()
+
+# Parse the input file into a more usable dictionary structure
+cmds = {}
+line_num = 0
+line_start = 0
+for filename in args.input:
+ sys.stderr.write("Parsing items in file \"%s\"...\n" % filename)
+ data = None
+ with open(filename, "r") as fd:
+ data = json.load(fd)
+
+ cmds['_global'] = data['global']
+
+ # iterate the items loaded and regroup it
+ for item in data["items"]:
+ try:
+ o = mi.parseString(item['block']).asList()
+ except:
+ sys.stderr.write("Exception parsing item: %s\n%s\n" \
+ % (json.dumps(item, separators=(',', ': '), indent=4),
+ item['block']))
+ raise
+
+ group = item['group']
+ file = item['file']
+ macro = o[0]
+ param = o[1][0]
+
+ if group not in cmds:
+ cmds[group] = {}
+
+ if file not in cmds[group]:
+ cmds[group][file] = {}
+
+ if macro not in cmds[group][file]:
+ cmds[group][file][macro] = {}
+
+ c = {
+ 'params': o[2],
+ 'meta': {},
+ 'value': {},
+ }
+
+ for key in item:
+ if key == 'block':
+ continue
+ c['meta'][key] = item[key]
+
+ for i in c['params']:
+ c['value'][i[0]] = cgi.escape(i[1])
+
+ cmds[group][file][macro][param] = c
+
+
+# Write the header for this siphon type
+cfg = siphons[siphon_map[args.type]]
+sys.stdout.write(cfg["index_header"](cfg))
+contents = ""
+
+def group_sort_key(item):
+ if "index_sort_key" in cfg:
+ return cfg["index_sort_key"](cfg, item, cmds['_global'])
+ return item
+
+# Iterate the dictionary and process it
+for group in sorted(cmds.keys(), key=group_sort_key):
+ if group.startswith('_'):
+ continue
+
+ sys.stderr.write("Processing items in group \"%s\"...\n" % group)
+
+ cfg = siphons[siphon_map[args.type]]
+ md = group.replace("/", "_").replace(".", "_")
+ sys.stdout.write(cfg["index_section"](cfg, group, md))
+
+ if "header" in cfg:
+ dec = cmds['_global']
+ contents += cfg["header"](cfg, group, md, dec)
+
+ for file in sorted(cmds[group].keys()):
+ if group.startswith('_'):
+ continue
+
+ sys.stderr.write("- Processing items in file \"%s\"...\n" % file)
+
+ for macro in sorted(cmds[group][file].keys()):
+ if macro != siphon_map[args.type]:
+ continue
+ sys.stderr.write("-- Processing items in macro \"%s\"...\n" % macro)
+ cfg = siphons[macro]
+
+ meta = {
+ "group": group,
+ "file": file,
+ "macro": macro,
+ "md": md,
+ }
+
+ def item_sort_key(item):
+ if "sort_key" in cfg:
+ return cfg["sort_key"](cfg, meta, cmds[group][file][macro][item])
+ return item
+
+ for param in sorted(cmds[group][file][macro].keys(), key=item_sort_key):
+ sys.stderr.write("--- Processing item \"%s\"...\n" % param)
+
+ meta["item"] = param
+
+ # mangle "md" and the item to make a reference label
+ meta["label"] = "%s___%s" % (meta["md"], param)
+
+ if "index_entry" in cfg:
+ s = cfg["index_entry"](cfg, meta, cmds[group][file][macro][param])
+ sys.stdout.write(s)
+
+ if "format" in cfg:
+ contents += cfg["format"](cfg, meta, cmds[group][file][macro][param])
+
+sys.stdout.write(contents)
+
+# All done