From b585097048884e063ac25aecc26a6802ee3faa4d Mon Sep 17 00:00:00 2001 From: Chris Luke Date: Tue, 3 May 2016 16:34:59 -0400 Subject: VPP-57 Add Doxygen to VPP - Configures Doxygen. - Adds a source filter to do magic on our use of the preprocessor to do constructor stuff to make Doxygen grok it better. - Adds a convenience helper to the root Makefile. - Adds a README.md to the root directory (and which Doxygem uses as its "mainpage". - Add several other documentative files. - Currently using SVG for call graphs, though this may have a load-time performance impact in browsers. Change-Id: I25fc6fb5bf634319dcb36a7f0e32031921c125ac Signed-off-by: Chris Luke --- doxygen/filter_c.py | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100755 doxygen/filter_c.py (limited to 'doxygen/filter_c.py') diff --git a/doxygen/filter_c.py b/doxygen/filter_c.py new file mode 100755 index 00000000..5a812a55 --- /dev/null +++ b/doxygen/filter_c.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +import sys, re + +if len(sys.argv) < 2: + sys.stderr.write("Usage: %s \n" % (sys.argv[0])) + sys.exit(1) + +patterns = [ + # Search for VLIB_CLI_COMMAND, extract its parameter and add a docblock for it + ( re.compile("(?PVLIB_CLI_COMMAND)\s*[(](?P[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g (\g) */ vlib_cli_command_t \g"), + + # Search for VLIB_REGISTER_NODE, extract its parameter and add a docblock for it + ( re.compile("(?PVLIB_REGISTER_NODE)\s*[(](?P[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g (\g) */ vlib_node_registration_t \g"), + + # Search for VLIB_INIT_FUNCTION, extract its parameter and add a docblock for it + ( re.compile("(?PVLIB_INIT_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+)[)]"), r"/** @brief (@em constructor) \g (@ref \g) */ vlib_init_function_t * _vlib_init_function_\g"), + ( re.compile("(?PVLIB_DECLARE_INIT_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+)[)]"), r"/** @brief (@em constructor) \g (@ref \g) */ vlib_init_function_t * _vlib_init_function_\g"), + + # Search for VLIB_LOOP_ENTER_FUNCTION, extract the 1st parameter (ignore any others) and add a docblock for it + ( re.compile("(?PVLIB_MAIN_LOOP_ENTER_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g (@ref \g) */ _vlib_main_loop_enter_\g"), + ( re.compile("(?PVLIB_MAIN_LOOP_EXIT_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g (@ref \g) */ _vlib_main_loop_exit_\g"), + + # Search for VLIB_CONFIG_FUNCTION, extract the 1st parameter (ignore any others) and add a docblock for it + ( re.compile("(?PVLIB_CONFIG_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+),\s*(?P\"[^\"]+\")(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g (\g, \g) */ vlib_config_function_runtime_t _vlib_config_function_\g"), + ( re.compile("(?PVLIB_EARLY_CONFIG_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+),\s*(?P\"[^\"]+\")(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g (\g, \g) */ vlib_config_function_runtime_t _vlib_config_function_\g"), + + # Search for "format_thing" and "unformat_thing" when used as a function pointer and add parens + ( re.compile("(?P
(^|,)\s*)(?P(un)?format_[a-zA-Z0-9_]+)(?P\s*(,|$))") , r"\g
\g()\g" ),
+]
+
+with open(sys.argv[1]) as fd:
+    for line in fd:
+        str = line
+        for p in patterns:
+            str = p[0].sub(p[1], str)
+        sys.stdout.write(str)
-- 
cgit 1.2.3-korg


From 75a37b372efc2b4a324aa38b53487b8168358a63 Mon Sep 17 00:00:00 2001
From: Chris Luke 
Date: Sat, 14 May 2016 12:17:12 -0400
Subject: VPP-62 Add a doxy filter to enable vpe.api doc

This makes Doxygen think the API definitions are structs which is close
enough to be able to document the API methods.

It also has logic to create an indexed API page but that's disabled for
now because it duplicates the "brief" text twice in the struct doc.

Fixes a minor line numbering issue in filter_c.py.

Change-Id: If380160b73e7c10d999b35a76f55d0e27cbc91cc
Signed-off-by: Chris Luke 
---
 doxygen/doxygen.cfg   |  8 +++++---
 doxygen/filter_api.py | 45 +++++++++++++++++++++++++++++++++++++++++++++
 doxygen/filter_c.py   | 19 +++++++++++++++++--
 3 files changed, 67 insertions(+), 5 deletions(-)
 create mode 100755 doxygen/filter_api.py

(limited to 'doxygen/filter_c.py')

diff --git a/doxygen/doxygen.cfg b/doxygen/doxygen.cfg
index 6d6bb6cc..ec4312c9 100644
--- a/doxygen/doxygen.cfg
+++ b/doxygen/doxygen.cfg
@@ -281,7 +281,7 @@ OPTIMIZE_OUTPUT_VHDL   = NO
 # Note that for custom extensions you also need to set FILE_PATTERNS otherwise
 # the files are not read by doxygen.
 
-EXTENSION_MAPPING      = def=C
+EXTENSION_MAPPING      = def=C api=C
 
 # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
 # according to the Markdown format, which allows for more readable
@@ -796,7 +796,7 @@ INPUT_ENCODING         = UTF-8
 # *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f, *.for, *.tcl,
 # *.vhd, *.vhdl, *.ucf, *.qsf, *.as and *.js.
 
-FILE_PATTERNS          = *.md *.c *.h *.def *.inc *.S *.dox
+FILE_PATTERNS          = *.md *.c *.h *.def *.inc *.S *.dox *.api
 
 # The RECURSIVE tag can be used to specify whether or not subdirectories should
 # be searched for input files as well.
@@ -898,7 +898,9 @@ INPUT_FILTER           =
 # need to set EXTENSION_MAPPING for the extension otherwise the files are not
 # properly processed by doxygen.
 
-FILTER_PATTERNS        = *.c=$(ROOT)/doxygen/filter_c.py
+FILTER_PATTERNS        = \
+	*.c=$(ROOT)/doxygen/filter_c.py \
+	*.api=$(ROOT)/doxygen/filter_api.py
 
 # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
 # INPUT_FILTER) will also be used to filter the input files that are used for
diff --git a/doxygen/filter_api.py b/doxygen/filter_api.py
new file mode 100755
index 00000000..3e2aaaec
--- /dev/null
+++ b/doxygen/filter_api.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Filter for vpe.api to make it Doxygenish.
+
+import sys, re
+
+if len(sys.argv) < 2:
+    sys.stderr.write("Usage: %s \n" % (sys.argv[0]))
+    sys.exit(1)
+
+patterns = [
+    # Search for "define" blocks and treat them as structs
+    ( re.compile(r"^.*(manual_.[^\s]+\s+)?define\s+(?P[^\s]+)"), r"typedef struct vl_api_\g_t"),
+
+    # For every "brief" statement at the start of a comment block, add an
+    # xref with whatever is on the same line. This gives us an index page
+    # with all the API methods in one place.
+    # XXX Commented out for now; works but duplicates the brief text in the
+    # struct documentation
+    #( re.compile(r"/\*\*\s*(?P[\\@]brief)\s+(?P.+)(\*/)$"), r'/** @xrefitem api "" "VPP API" \g \g \g'),  # capture inline comment close
+    #( re.compile(r"/\*\*\s*(?P[\\@]brief)\s+(?P.+)$"), r'/** @xrefitem api "" "VPP API" \g \g \g'),
+
+    # Since structs don't have params, replace @param with @tparam
+    ( re.compile("[\\@]param\\b"), "@tparam"),
+]
+
+with open(sys.argv[1]) as fd:
+    for line in fd:
+        str = line[:-1] # strip \n
+        for p in patterns:
+            str = p[0].sub(p[1], str)
+        sys.stdout.write(str+"\n")
diff --git a/doxygen/filter_c.py b/doxygen/filter_c.py
index 5a812a55..db1681c9 100755
--- a/doxygen/filter_c.py
+++ b/doxygen/filter_c.py
@@ -1,4 +1,19 @@
 #!/usr/bin/env python
+# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Filter for .c files to make various preprocessor tricks Doxygenish
 
 import sys, re
 
@@ -31,7 +46,7 @@ patterns = [
 
 with open(sys.argv[1]) as fd:
     for line in fd:
-        str = line
+        str = line[:-1] # filter \n
         for p in patterns:
             str = p[0].sub(p[1], str)
-        sys.stdout.write(str)
+        sys.stdout.write(str+"\n")
-- 
cgit 1.2.3-korg


From 54ccf2261cb1f4afd966b7b1e92689183cb17836 Mon Sep 17 00:00:00 2001
From: Chris Luke 
Date: Mon, 25 Jul 2016 16:38:11 -0400
Subject: VPP-221 CLI auto-documentation infrastructure

As a step before Doxygen, extract CLI-related struct initializers
from the code and parse that into a summary of the CLI commands
available with the provided help text, such as it is. At the moment
this only renders this into an indexed Markdown file that Doxygen
then picks up but later we can use this information to enrich the
existing VLIB_CLI_COMMAND macro documentor as well as provide
runtime documentation to VPP that is stored on disk outside the
binary image.

Additionally support a comment block immediately prior to
VLIB_CLI_COMMAND CLI command definitions in the form /*? ... ?*/
that can be used to include long-form documentation without having
it compiled into VPP.

Examples of documenting CLI commands can be found in
vlib/vlib/unix/cli.c which, whilst not perfect, should provide a
starting point. Screen captures of sample output can be seen at
https://chrisy.flirble.org/vpp/doxy-cli-example.png and
https://chrisy.flirble.org/vpp/doxy-cli-index.png .

Next, shift the Doxygen root makefile targets to their own Makefile.
The primary reason for this is that the siphon targets do dependency
tracking which means it needs to generate those dependencies whenever
make is run; that is pointless if we're not going to generate any
documentation. This includes the package dependencies since they since
they sometimes unnecessarily interfere with the code build in some cases
at the moment; later we will look to building a Python venv to host the
Python modules we use.

One final remark: In future we may consider deprecating .long_help
in the VLIB_CLI_COMMAND structure entirely but add perhaps .usage_help.
.short_help would be reserved for a summary of the command function
and .usage_help provide the syntax of that command. These changes would
provide great semantic value to the automaticly generated CLI
documentation. I could also see having .long_help replaced by a
mechanism that reads it from disk at runtime with a rudimentary
Markdown/Doxygen filter so that we can use the same text that is used in
the published documentation.

Change-Id: I80d6fe349b47dce649fa77d21ffec0ddb45c7bbf
Signed-off-by: Chris Luke 
---
 .gitignore                 |   3 +
 Makefile                   |  38 +++---
 doxygen/Makefile           | 122 +++++++++++++++++
 doxygen/dir.dox.sample     |   3 +-
 doxygen/doxygen.cfg        |  13 ++
 doxygen/filter_c.py        |  24 +++-
 doxygen/siphon_generate.py | 313 +++++++++++++++++++++++++++++++++++++++++++
 doxygen/siphon_process.py  | 323 +++++++++++++++++++++++++++++++++++++++++++++
 vlib/vlib/dir.dox          |   2 +
 vlib/vlib/unix/cli.c       | 160 ++++++++++++++++------
 vlib/vlib/unix/dir.dox     |  27 ++++
 11 files changed, 963 insertions(+), 65 deletions(-)
 create mode 100644 doxygen/Makefile
 create mode 100755 doxygen/siphon_generate.py
 create mode 100755 doxygen/siphon_process.py
 create mode 100644 vlib/vlib/unix/dir.dox

(limited to 'doxygen/filter_c.py')

diff --git a/.gitignore b/.gitignore
index 0a009d1d..a86a50c1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
 *~
+*.sw[op]
 
 /build-root/.ccache
 /build-root/build-*/
@@ -66,6 +67,8 @@ TAGS
 
 # Generated documentation
 /build-root/docs
+/build-root/.doxygen-bootstrap.ok
+/build-root/.doxygen-siphon.dep
 
 # indent backup files
 *.BAK
diff --git a/Makefile b/Makefile
index f12a0da8..5808a333 100644
--- a/Makefile
+++ b/Makefile
@@ -30,7 +30,7 @@ OS_VERSION_ID= $(shell grep '^VERSION_ID=' /etc/os-release | cut -f2- -d= | sed
 DEB_DEPENDS  = curl build-essential autoconf automake bison libssl-dev ccache
 DEB_DEPENDS += debhelper dkms git libtool libganglia1-dev libapr1-dev dh-systemd
 DEB_DEPENDS += libconfuse-dev git-review exuberant-ctags cscope
-DEB_DEPENDS += doxygen graphviz python-dev
+DEB_DEPENDS += python-dev
 ifeq ($(OS_VERSION_ID),14.04)
 	DEB_DEPENDS += openjdk-8-jdk-headless
 else
@@ -40,7 +40,7 @@ endif
 RPM_DEPENDS_GROUPS = 'Development Tools'
 RPM_DEPENDS  = redhat-lsb glibc-static java-1.8.0-openjdk-devel yum-utils
 RPM_DEPENDS += openssl-devel https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm apr-devel
-RPM_DEPENDS += doxygen graphviz python-devel
+RPM_DEPENDS += python-devel
 EPEL_DEPENDS = libconfuse-devel ganglia-devel
 
 ifneq ($(wildcard $(STARTUP_DIR)/startup.conf),)
@@ -54,7 +54,7 @@ endif
 
 .PHONY: help bootstrap wipe wipe-release build build-release rebuild rebuild-release
 .PHONY: run run-release debug debug-release build-vat run-vat pkg-deb pkg-rpm
-.PHONY: ctags cscope doxygen wipe-doxygen plugins plugins-release build-vpp-api
+.PHONY: ctags cscope plugins plugins-release build-vpp-api
 
 help:
 	@echo "Make Targets:"
@@ -81,6 +81,7 @@ help:
 	@echo " gtags               - (re)generate gtags database"
 	@echo " cscope              - (re)generate cscope database"
 	@echo " doxygen             - (re)generate documentation"
+	@echo " bootstrap-doxygen   - setup Doxygen dependencies"
 	@echo " wipe-doxygen        - wipe all generated documentation"
 	@echo ""
 	@echo "Make Arguments:"
@@ -237,24 +238,21 @@ cscope: cscope.files
 # Build the documentation
 #
 
-DOXY_INPUT ?= \
-	README.md \
-	vppinfra \
-	svm \
-	vlib \
-	vlib-api \
-	vnet \
-	vpp \
-	vpp-api
+# Doxygen configuration and our utility scripts
+export DOXY_DIR ?= $(WS_ROOT)/doxygen
+
+define make-doxy
+	@WS_ROOT="$(WS_ROOT)" BR="$(BR)" make -C $(DOXY_DIR) $@
+endef
+
+.PHONY: bootstrap-doxygen doxygen wipe-doxygen
+
+bootstrap-doxygen:
+	$(call make-doxy)
 
 doxygen:
-	@mkdir -p "$(BR)/docs"
-	ROOT="$(WS_ROOT)" \
-	     BUILD_ROOT="$(BR)" \
-	     INPUT="$(addprefix $(WS_ROOT)/,$(DOXY_INPUT))" \
-	     HTML=YES \
-	     VERSION="`git describe --tags --dirty`" \
-	     doxygen doxygen/doxygen.cfg
+	$(call make-doxy)
 
 wipe-doxygen:
-	rm -rf "$(BR)/docs"
+	$(call make-doxy)
+
diff --git a/doxygen/Makefile b/doxygen/Makefile
new file mode 100644
index 00000000..7bdc8ee9
--- /dev/null
+++ b/doxygen/Makefile
@@ -0,0 +1,122 @@
+# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Build the documentation
+#
+
+# These should be passed in by the root Makefile
+WS_ROOT ?= $(CURDIR)/..
+BR ?= $(WS_ROOT)/build-root
+
+# Package dependencies
+DOC_DEB_DEPENDS = doxygen graphviz python-pyparsing
+DOC_RPM_DEPENDS = doxygen graphviz pyparsing
+
+# Doxygen configuration and our utility scripts
+DOXY_DIR ?= $(WS_ROOT)/doxygen
+
+# Input directories and files
+DOXY_INPUT ?= \
+	README.md \
+	vppinfra \
+	svm \
+	vlib \
+	vlib-api \
+	vnet \
+	vpp \
+	vpp-api \
+	plugins
+
+# Target directory for doxygen output
+DOXY_OUTPUT ?= $(BR)/docs
+
+# Siphoned fragments end up in here
+SIPHON_INPUT ?= $(DOXY_OUTPUT)/siphon_fragments
+
+# Siphoned fragements are processed into here
+SIPHON_OUTPUT ?= $(DOXY_OUTPUT)/siphon_docs
+
+# Extra document inputs that are processed in addition to DOXY_INPUT
+EXTRA_DOXY_INPUT += $(SIPHON_OUTPUT)
+
+# All the siphon types we know about
+SIPHONS ?= clicmd
+
+SIPHON_FILES = $(addprefix $(SIPHON_INPUT)/,$(addsuffix .siphon,$(SIPHONS)))
+SIPHON_DOCS = $(addprefix $(SIPHON_OUTPUT)/,$(addsuffix .md,$(SIPHONS)))
+
+$(BR)/.doxygen-bootstrap.ok:
+ifeq ($(OS_ID),ubuntu)
+	@sudo -E apt-get $(CONFIRM) $(FORCE) install $(DOC_DEB_DEPENDS)
+else ifneq ("$(wildcard /etc/redhat-release)","")
+	@sudo yum install $(CONFIRM) $(DOC_RPM_DEPENDS)
+else
+	$(error "This option currently works only on Ubuntu or Centos systems")
+endif
+	@touch $@
+
+.PHONY: bootstrap-doxygen
+bootstrap-doxygen: $(BR)/.doxygen-bootstrap.ok
+
+.DELETE_ON_ERROR: $(BR)/.doxygen-siphon.dep
+$(BR)/.doxygen-siphon.dep: Makefile
+	set -e; rm -f "$@"; for input in $(DOXY_INPUT); do \
+		find "$(WS_ROOT)/$$input" -type f \
+			\( -name '*.[ch]' -or -name '*.dox' \) \
+			-print | sed -e "s/^/\$$(SIPHON_FILES): /" >> $@; \
+	done
+
+# Include the source -> siphon dependencies
+-include $(BR)/.doxygen-siphon.dep
+
+.NOTPARALLEL: $(SIPHON_FILES)
+$(SIPHON_FILES): $(DOXY_DIR)/siphon_generate.py $(BR)/.doxygen-bootstrap.ok
+	@rm -rf "$(SIPHON_INPUT)" "$(SIPHON_OUTPUT)"
+	@mkdir -p "$(SIPHON_INPUT)" "$(SIPHON_OUTPUT)"
+	set -e; for input in $(DOXY_INPUT); do \
+		cd "$(WS_ROOT)"; \
+		find "$$input" -type f \
+			\( -name '*.[ch]' -or -name '*.dox' \) \
+			-print >> $(SIPHON_INPUT)/files; \
+	done
+	set -e; cd "$(WS_ROOT)"; $(DOXY_DIR)/siphon_generate.py \
+		--output="$(SIPHON_INPUT)" \
+		"@$(SIPHON_INPUT)/files"
+
+
+.DELETE_ON_ERROR: $(SIPHON_DOCS)
+$(SIPHON_OUTPUT)/%.md: $(SIPHON_INPUT)/%.siphon $(DOXY_DIR)/siphon_process.py
+	set -e; cd "$(WS_ROOT)"; \
+		$(DOXY_DIR)/siphon_process.py --type=$(basename $(notdir $<)) \
+			--output="$(SIPHON_OUTPUT)" $< > $@
+
+# This target can be used just to generate the siphoned docs
+.PHONY: doxygen-siphon
+doxygen-siphon: $(SIPHON_DOCS)
+
+# Generate the doxygen docs
+doxygen: $(SIPHON_DOCS)
+	@mkdir -p "$(DOXY_OUTPUT)"
+	set -e; cd "$(WS_ROOT)"; \
+	    ROOT="$(WS_ROOT)" \
+	    BUILD_ROOT="$(BR)" \
+	    INPUT="$(addprefix $(WS_ROOT)/,$(DOXY_INPUT)) $(EXTRA_DOXY_INPUT)" \
+	    HTML=YES \
+	    VERSION="`git describe --tags --dirty`" \
+	    doxygen $(DOXY_DIR)/doxygen.cfg
+
+wipe-doxygen:
+	rm -rf "$(BR)/docs" "$(BR)/.doxygen-siphon.d"
+
diff --git a/doxygen/dir.dox.sample b/doxygen/dir.dox.sample
index 41e84d7b..500fe595 100644
--- a/doxygen/dir.dox.sample
+++ b/doxygen/dir.dox.sample
@@ -18,7 +18,7 @@
 
 /**
 @dir
-@brief Someone please fix this description
+@brief Someone please fix this description.
 @todo This directory needs a description.
 
 This is where you would document the contents of a directory.
@@ -26,3 +26,4 @@ This is where you would document the contents of a directory.
 This looks like a C file but it is not part of the build; it is purely
 for documentation.
 */
+/*? %%clicmd:group_label CLI section description%% ?*/
diff --git a/doxygen/doxygen.cfg b/doxygen/doxygen.cfg
index 0eadbd73..82687cac 100644
--- a/doxygen/doxygen.cfg
+++ b/doxygen/doxygen.cfg
@@ -229,8 +229,21 @@ TAB_SIZE               = 8
 # newlines.
 
 ALIASES                =
+
+## Indexes VPP graph nodes
 ALIASES += "node=@xrefitem nodes \"Node Identifier\" \"Node Identifiers\" @c "
 
+## Formatting for CLI commands and output
+ALIASES += "clistart=
"
+ALIASES += "cliend=
" + +## Formatting for CLI example paragraphs +ALIASES += "cliexpar=@par Example usage" +ALIASES += "cliexcmd{1}=@clistartvpp# \1@cliend" +ALIASES += "cliexstart{1}=@cliexcmd{\1}@clistart" +ALIASES += "cliexend=@cliend" + + # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding "class=itcl::class" # will allow you to use the command class in the itcl::class meaning. diff --git a/doxygen/filter_c.py b/doxygen/filter_c.py index db1681c9..733fdefb 100755 --- a/doxygen/filter_c.py +++ b/doxygen/filter_c.py @@ -15,13 +15,13 @@ # Filter for .c files to make various preprocessor tricks Doxygenish -import sys, re +import os, sys, re if len(sys.argv) < 2: sys.stderr.write("Usage: %s \n" % (sys.argv[0])) sys.exit(1) -patterns = [ +replace_patterns = [ # Search for VLIB_CLI_COMMAND, extract its parameter and add a docblock for it ( re.compile("(?PVLIB_CLI_COMMAND)\s*[(](?P[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g (\g) */ vlib_cli_command_t \g"), @@ -44,9 +44,25 @@ patterns = [ ( re.compile("(?P
(^|,)\s*)(?P(un)?format_[a-zA-Z0-9_]+)(?P\s*(,|$))") , r"\g
\g()\g" ),
 ]
 
-with open(sys.argv[1]) as fd:
+
+filename = sys.argv[1]
+cwd = os.getcwd()
+if filename[0:len(cwd)] == cwd:
+    filename = filename[len(cwd):]
+    if filename[0] == "/":
+        filename = filename[1:]
+
+with open(filename) as fd:
+    line_num = 0
+
     for line in fd:
+        line_num += 1
         str = line[:-1] # filter \n
-        for p in patterns:
+
+        # Look for search/replace patterns
+        for p in replace_patterns:
             str = p[0].sub(p[1], str)
+
         sys.stdout.write(str+"\n")
+
+# All done
diff --git a/doxygen/siphon_generate.py b/doxygen/siphon_generate.py
new file mode 100755
index 00000000..457757b5
--- /dev/null
+++ b/doxygen/siphon_generate.py
@@ -0,0 +1,313 @@
+#!/usr/bin/env python
+# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Looks for preprocessor macros with struct initializers and siphons them
+# off into another file for later parsing; ostensibly to generate
+# documentation from struct initializer data.
+
+import os, sys, re, argparse, json
+
+DEFAULT_OUTPUT = "build-root/docs/siphons"
+DEFAULT_PREFIX = os.getcwd()
+
+ap = argparse.ArgumentParser()
+ap.add_argument("--output", '-o', metavar="directory", default=DEFAULT_OUTPUT,
+        help="Output directory for .siphon files [%s]" % DEFAULT_OUTPUT)
+ap.add_argument("--input-prefix", metavar="path", default=DEFAULT_PREFIX,
+        help="Prefix to strip from input pathnames [%s]" % DEFAULT_PREFIX)
+ap.add_argument("input", nargs='+', metavar="input_file",
+        help="Input C source files")
+args = ap.parse_args()
+
+"""Patterns that match the start of code blocks we want to siphon"""
+siphon_patterns = [
+    ( re.compile("(?PVLIB_CLI_COMMAND)\s*[(](?P[a-zA-Z0-9_]+)(,[^)]*)?[)]"), "clicmd" ),
+]
+
+"""Matches a siphon comment block start"""
+siphon_block_start = re.compile("^\s*/\*\?\s*(.*)$")
+
+"""Matches a siphon comment block stop"""
+siphon_block_stop = re.compile("^(.*)\s*\?\*/\s*$")
+
+"""Siphon block directive delimiter"""
+siphon_block_delimiter = "%%"
+
+"""Matches a siphon block directive such as '%clicmd:group_label Debug CLI%'"""
+siphon_block_directive = re.compile("(%s)\s*([a-zA-Z0-9_:]+)\s+(.*)\s*(%s)" % \
+        (siphon_block_delimiter, siphon_block_delimiter))
+
+"""Matches the start of an initializer block"""
+siphon_initializer = re.compile("\s*=")
+
+"""
+count open and close braces in str
+return (0, index) when braces were found and count becomes 0.
+index indicates the position at which the last closing brace was
+found.
+return (-1, -1) if a closing brace is found before any opening one.
+return (count, -1) if not all opening braces are closed, count is the
+current depth
+"""
+def count_braces(str, count=0, found=False):
+    for index in range(0, len(str)):
+        if str[index] == '{':
+            count += 1;
+            found = True
+        elif str[index] == '}':
+            if count == 0:
+                # means we never found an open brace
+                return (-1, -1)
+            count -= 1;
+
+        if count == 0 and found:
+            return (count, index)
+
+    return (count, -1)
+
+# Collated output for each siphon
+output = {}
+
+# Pre-process file names in case they indicate a file with
+# a list of files
+files = []
+for filename in args.input:
+    if filename.startswith('@'):
+        with open(filename[1:], 'r') as fp:
+            lines = fp.readlines()
+            for line in lines:
+                files.append(line.strip())
+            lines = None
+    else:
+        files.append(filename)
+
+# Iterate all the input files we've been given
+for filename in files:
+    # Strip the current directory off the start of the
+    # filename for brevity
+    if filename[0:len(args.input_prefix)] == args.input_prefix:
+        filename = filename[len(args.input_prefix):]
+        if filename[0] == "/":
+            filename = filename[1:]
+
+    # Work out the abbreviated directory name
+    directory = os.path.dirname(filename)
+    if directory[0:2] == "./":
+        directory = directory[2:]
+    elif directory[0:len(args.input_prefix)] == args.input_prefix:
+        directory = directory[len(args.input_prefix):]
+    if directory[0] == "/":
+    	directory = directory[1:]
+
+    # Open the file and explore its contents...
+    sys.stderr.write("Siphoning from %s...\n" % filename)
+    directives = {}
+    with open(filename) as fd:
+        siphon = None
+        close_siphon = None
+        siphon_block = ""
+        in_block = False
+        line_num = 0
+        siphon_line = 0
+
+        for line in fd:
+            line_num += 1
+            str = line[:-1] # filter \n
+
+            """See if there is a block directive and if so extract it"""
+            def process_block_directive(str, directives):
+                m = siphon_block_directive.search(str)
+                if m is not None:
+                    k = m.group(2)
+                    v = m.group(3).strip()
+                    directives[k] = v
+                    # Return only the parts we did not match
+                    return str[0:m.start(1)] + str[m.end(4):]
+
+                return str
+
+            def process_block_prefix(str):
+                if str.startswith(" * "):
+                    str = str[3:]
+                elif str == " *":
+                    str = ""
+                return str
+                
+            if not in_block:
+                # See if the line contains the start of a siphon doc block
+                m = siphon_block_start.search(str)
+                if m is not None:
+                    in_block = True
+                    t = m.group(1)
+
+                    # Now check if the block closes on the same line
+                    m = siphon_block_stop.search(t)
+                    if m is not None:
+                        t = m.group(1)
+                        in_block = False
+
+                    # Check for directives
+                    t = process_block_directive(t, directives)
+
+                    # Filter for normal comment prefixes
+                    t = process_block_prefix(t)
+
+                    # Add what is left
+                    siphon_block += t
+
+                    # Skip to next line
+                    continue
+
+            else:
+                # Check to see if we have an end block marker
+                m = siphon_block_stop.search(str)
+                if m is not None:
+                    in_block = False
+                    t = m.group(1)
+                else:
+                    t = str
+
+                # Check for directives
+                t = process_block_directive(t, directives)
+
+                # Filter for normal comment prefixes
+                t = process_block_prefix(t)
+
+                # Add what is left
+                siphon_block += t + "\n"
+
+                # Skip to next line
+                continue
+
+
+            if siphon is None:
+                # Look for blocks we need to siphon
+                for p in siphon_patterns:
+                    if p[0].match(str):
+                        siphon = [ p[1], str + "\n", 0 ]
+                        siphon_line = line_num
+
+                        # see if we have an initializer
+                        m = siphon_initializer.search(str)
+                        if m is not None:
+                            # count the braces on this line
+                            (count, index) = count_braces(str[m.start():])
+                            siphon[2] = count
+                            # TODO - it's possible we have the initializer all on the first line
+                            # we should check for it, but also account for the possibility that
+                            # the open brace is on the next line
+                            #if count == 0:
+                            #    # braces balanced
+                            #    close_siphon = siphon
+                            #    siphon = None
+                        else:
+                            # no initializer: close the siphon right now
+                            close_siphon = siphon
+                            siphon = None
+            else:
+                # See if we should end the siphon here - do we have balanced
+                # braces?
+                (count, index) = count_braces(str, count=siphon[2], found=True)
+                if count == 0:
+                    # braces balanced - add the substring and close the siphon
+                    siphon[1] += str[:index+1] + ";\n"
+                    close_siphon = siphon
+                    siphon = None
+                else:
+                    # add the whole string, move on
+                    siphon[2] = count
+                    siphon[1] += str + "\n"
+
+            if close_siphon is not None:
+                # Write the siphoned contents to the right place
+                siphon_name = close_siphon[0]
+                if siphon_name not in output:
+                    output[siphon_name] = {
+                        "global": {},
+                        "items": [],
+                        "file": "%s/%s.siphon" % (args.output, close_siphon[0])
+                    }
+
+                # Copy directives for the file
+                details = {}
+                for key in directives:
+                    if ":" in key:
+                        (sn, label) = key.split(":")
+                        if sn == siphon_name:
+                            details[label] = directives[key]
+                    else:
+                        details[key] = directives[key]
+
+                # Copy details for this block
+                details['file'] = filename
+                details['line_start'] = siphon_line
+                details['line_end'] = line_num
+                details['siphon_block'] = siphon_block.strip()
+
+                # Some defaults
+                if "group" not in details:
+                    if "group_label" in details:
+                        # use the filename since group labels are mostly of file scope
+                        details['group'] = details['file']
+                    else:
+			details['group'] = directory
+
+                if "group_label" not in details:
+                    details['group_label'] = details['group']
+
+                details["block"] = close_siphon[1]
+
+                # Store the item
+                output[siphon_name]['items'].append(details)
+
+                # All done
+                close_siphon = None
+                siphon_block = ""
+
+        # Update globals
+        for key in directives.keys():
+            if ':' not in key:
+                continue
+
+            if filename.endswith("/dir.dox"):
+                # very special! use the parent directory name
+                l = directory
+            else:
+                l = filename
+
+            (sn, label) = key.split(":")
+
+            if sn not in output:
+                output[sn] = {}
+            if 'global' not in output[sn]:
+                output[sn]['global'] = {}
+            if l not in output[sn]['global']:
+                output[sn]['global'][l] = {}
+            if 'file' not in output[sn]:
+                output[sn]['file'] = "%s/%s.siphon" % (args.output, sn)
+            if 'items' not in output[sn]:
+                output[sn]['items'] = []
+
+            output[sn]['global'][l][label] = directives[key]
+
+
+# Write out the data
+for siphon in output.keys():
+    sys.stderr.write("Saving siphon %s...\n" % siphon)
+    s = output[siphon]
+    with open(s['file'], "a") as fp:
+        json.dump(s, fp, separators=(',', ': '), indent=4, sort_keys=True)
+
+# All done
diff --git a/doxygen/siphon_process.py b/doxygen/siphon_process.py
new file mode 100755
index 00000000..80add4b9
--- /dev/null
+++ b/doxygen/siphon_process.py
@@ -0,0 +1,323 @@
+#!/usr/bin/env python
+# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Filter for .siphon files that are generated by other filters.
+# The idea is to siphon off certain initializers so that we can better
+# auto-document the contents of that initializer.
+
+import os, sys, re, argparse, cgi, json
+import pyparsing as pp
+
+import pprint
+
+DEFAULT_SIPHON ="clicmd"
+DEFAULT_OUTPUT = None
+DEFAULT_PREFIX = os.getcwd()
+
+siphon_map = {
+    'clicmd': "VLIB_CLI_COMMAND",
+}
+
+ap = argparse.ArgumentParser()
+ap.add_argument("--type", '-t', metavar="siphon_type", default=DEFAULT_SIPHON,
+        choices=siphon_map.keys(),
+        help="Siphon type to process [%s]" % DEFAULT_SIPHON)
+ap.add_argument("--output", '-o', metavar="directory", default=DEFAULT_OUTPUT,
+        help="Output directory for .md files [%s]" % DEFAULT_OUTPUT)
+ap.add_argument("--input-prefix", metavar="path", default=DEFAULT_PREFIX,
+        help="Prefix to strip from input pathnames [%s]" % DEFAULT_PREFIX)
+ap.add_argument("input", nargs='+', metavar="input_file",
+        help="Input .siphon files")
+args = ap.parse_args()
+
+if args.output is None:
+    sys.stderr.write("Error: Siphon processor requires --output to be set.")
+    sys.exit(1)
+
+
+def clicmd_index_sort(cfg, group, dec):
+    if group in dec and 'group_label' in dec[group]:
+        return dec[group]['group_label']
+    return group
+
+def clicmd_index_header(cfg):
+    s = "# CLI command index\n"
+    s += "\n[TOC]\n"
+    return s
+
+def clicmd_index_section(cfg, group, md):
+    return "\n@subpage %s\n\n" % md
+
+def clicmd_index_entry(cfg, meta, item):
+    v = item["value"]
+    return "* [%s](@ref %s)\n" % (v["path"], meta["label"])
+
+def clicmd_sort(cfg, meta, item):
+    return item['value']['path']
+
+def clicmd_header(cfg, group, md, dec):
+    if group in dec and 'group_label' in dec[group]:
+        label = dec[group]['group_label']
+    else:
+        label = group
+    return "\n@page %s %s\n" % (md, label)
+
+def clicmd_format(cfg, meta, item):
+    v = item["value"]
+    s = "\n@section %s %s\n" % (meta['label'], v['path'])
+
+    # The text from '.short_help = '.
+    # Later we should split this into short_help and usage_help
+    # since the latter is how it is primarily used but the former
+    # is also needed.
+    if "short_help" in v:
+        tmp = v["short_help"].strip()
+
+        # Bit hacky. Add a trailing period if it doesn't have one.
+        if tmp[-1] != ".":
+            tmp += "."
+
+        s += "### Summary/usage\n    %s\n\n" % tmp
+
+    # This is seldom used and will likely be deprecated
+    if "long_help" in v:
+        tmp = v["long_help"]
+
+        s += "### Long help\n    %s\n\n" % tmp
+
+    # Extracted from the code in /*? ... ?*/ blocks
+    if "siphon_block" in item["meta"]:
+        sb = item["meta"]["siphon_block"]
+
+        if sb != "":
+            # hack. still needed?
+            sb = sb.replace("\n", "\\n")
+            try:
+                sb = json.loads('"'+sb+'"')
+                s += "### Description\n%s\n\n" % sb
+            except:
+                pass
+
+    # Gives some developer-useful linking
+    if "item" in meta or "function" in v:
+        s += "### Declaration and implementation\n\n"
+
+        if "item" in meta:
+            s += "Declaration: @ref %s (%s:%d)\n\n" % \
+                (meta['item'], meta["file"], int(item["meta"]["line_start"]))
+
+        if "function" in v:
+            s += "Implementation: @ref %s.\n\n" % v["function"]
+
+    return s
+
+
+siphons = {
+    "VLIB_CLI_COMMAND": {
+        "index_sort_key": clicmd_index_sort,
+        "index_header": clicmd_index_header,
+        "index_section": clicmd_index_section,
+        "index_entry": clicmd_index_entry,
+        'sort_key': clicmd_sort,
+        "header": clicmd_header,
+        "format": clicmd_format,
+    }
+}
+
+
+# PyParsing definition for our struct initializers which look like this:
+# VLIB_CLI_COMMAND (show_sr_tunnel_command, static) = {
+#    .path = "show sr tunnel",
+#    .short_help = "show sr tunnel [name ]",
+#    .function = show_sr_tunnel_fn,
+#};
+def getMacroInitializerBNF():
+    cs = pp.Forward()
+    ident = pp.Word(pp.alphas + "_", pp.alphas + pp.nums + "_")
+    intNum = pp.Word(pp.nums)
+    hexNum = pp.Literal("0x") + pp.Word(pp.hexnums)
+    octalNum = pp.Literal("0") + pp.Word("01234567")
+    integer = (hexNum | octalNum | intNum) + \
+        pp.Optional(pp.Literal("ULL") | pp.Literal("LL") | pp.Literal("L"))
+    floatNum = pp.Regex(r'\d+(\.\d*)?([eE]\d+)?') + pp.Optional(pp.Literal("f"))
+    char = pp.Literal("'") + pp.Word(pp.printables, exact=1) + pp.Literal("'")
+    arrayIndex = integer | ident
+
+    lbracket = pp.Literal("(").suppress()
+    rbracket = pp.Literal(")").suppress()
+    lbrace = pp.Literal("{").suppress()
+    rbrace = pp.Literal("}").suppress()
+    comma = pp.Literal(",").suppress()
+    equals = pp.Literal("=").suppress()
+    dot = pp.Literal(".").suppress()
+    semicolon = pp.Literal(";").suppress()
+
+    # initializer := { [member = ] (variable | expression | { initializer } ) }
+    typeName = ident
+    varName = ident
+
+    typeSpec = pp.Optional("unsigned") + \
+               pp.oneOf("int long short float double char u8 i8 void") + \
+               pp.Optional(pp.Word("*"), default="")
+    typeCast = pp.Combine( "(" + ( typeSpec | typeName ) + ")" ).suppress()
+
+    string = pp.Combine(pp.OneOrMore(pp.QuotedString(quoteChar='"',
+        escChar='\\', multiline=True)), adjacent=False)
+    literal = pp.Optional(typeCast) + (integer | floatNum | char | string)
+    var = pp.Combine(pp.Optional(typeCast) + varName + pp.Optional("[" + arrayIndex + "]"))
+
+    expr = (literal | var) # TODO
+
+
+    member = pp.Combine(dot + varName + pp.Optional("[" + arrayIndex + "]"))
+    value = (expr | cs)
+
+    entry = pp.Group(pp.Optional(member + equals, default="") + value)
+    entries = (pp.ZeroOrMore(entry + comma) + entry + pp.Optional(comma)) | \
+              (pp.ZeroOrMore(entry + comma))
+
+    cs << (lbrace + entries + rbrace)
+
+    macroName = ident
+    params = pp.Group(pp.ZeroOrMore(expr + comma) + expr)
+    macroParams = lbracket + params + rbracket
+
+    mi = macroName + pp.Optional(macroParams) + equals + pp.Group(cs) + semicolon
+    mi.ignore(pp.cppStyleComment)
+    return mi
+
+
+mi = getMacroInitializerBNF()
+
+# Parse the input file into a more usable dictionary structure
+cmds = {}
+line_num = 0
+line_start = 0
+for filename in args.input:
+    sys.stderr.write("Parsing items in file \"%s\"...\n" % filename)
+    data = None
+    with open(filename, "r") as fd:
+        data = json.load(fd)
+
+    cmds['_global'] = data['global']
+
+    # iterate the items loaded and regroup it
+    for item in data["items"]:
+        try:
+            o = mi.parseString(item['block']).asList()
+        except:
+            sys.stderr.write("Exception parsing item: %s\n%s\n" \
+                    % (json.dumps(item, separators=(',', ': '), indent=4),
+                        item['block']))
+            raise
+
+        group = item['group']
+        file = item['file']
+        macro = o[0]
+        param = o[1][0]
+
+        if group not in cmds:
+            cmds[group] = {}
+
+        if file not in cmds[group]:
+            cmds[group][file] = {}
+
+        if macro not in cmds[group][file]:
+            cmds[group][file][macro] = {}
+
+        c = {
+            'params': o[2],
+            'meta': {},
+            'value': {},
+        }
+
+        for key in item:
+            if key == 'block':
+                continue
+            c['meta'][key] = item[key]
+
+        for i in c['params']:
+            c['value'][i[0]] = cgi.escape(i[1])
+
+        cmds[group][file][macro][param] = c
+
+
+# Write the header for this siphon type
+cfg = siphons[siphon_map[args.type]]
+sys.stdout.write(cfg["index_header"](cfg))
+contents = ""
+
+def group_sort_key(item):
+    if "index_sort_key" in cfg:
+        return cfg["index_sort_key"](cfg, item, cmds['_global'])
+    return item
+
+# Iterate the dictionary and process it
+for group in sorted(cmds.keys(), key=group_sort_key):
+    if group.startswith('_'):
+        continue
+
+    sys.stderr.write("Processing items in group \"%s\"...\n" % group)
+
+    cfg = siphons[siphon_map[args.type]]
+    md = group.replace("/", "_").replace(".", "_")
+    sys.stdout.write(cfg["index_section"](cfg, group, md))
+
+    if "header" in cfg:
+        dec = cmds['_global']
+        contents += cfg["header"](cfg, group, md, dec)
+
+    for file in sorted(cmds[group].keys()):
+        if group.startswith('_'):
+            continue
+
+        sys.stderr.write("- Processing items in file \"%s\"...\n" % file)
+
+        for macro in sorted(cmds[group][file].keys()):
+            if macro != siphon_map[args.type]:
+                continue
+            sys.stderr.write("-- Processing items in macro \"%s\"...\n" % macro)
+            cfg = siphons[macro]
+
+            meta = {
+                "group": group,
+                "file": file,
+                "macro": macro,
+                "md": md,
+            }
+
+            def item_sort_key(item):
+                if "sort_key" in cfg:
+                    return cfg["sort_key"](cfg, meta, cmds[group][file][macro][item])
+                return item
+
+            for param in sorted(cmds[group][file][macro].keys(), key=item_sort_key):
+                sys.stderr.write("--- Processing item \"%s\"...\n" % param)
+
+                meta["item"] = param
+
+                # mangle "md" and the item to make a reference label
+                meta["label"] = "%s___%s" % (meta["md"], param)
+
+                if "index_entry" in cfg:
+                    s = cfg["index_entry"](cfg, meta, cmds[group][file][macro][param])
+                    sys.stdout.write(s)
+
+                if "format" in cfg:
+                    contents += cfg["format"](cfg, meta, cmds[group][file][macro][param])
+
+sys.stdout.write(contents)
+
+# All done
diff --git a/vlib/vlib/dir.dox b/vlib/vlib/dir.dox
index 8ca47cd7..4806e7a9 100644
--- a/vlib/vlib/dir.dox
+++ b/vlib/vlib/dir.dox
@@ -19,3 +19,5 @@
 @dir
 @brief VLIB application library source.
 */
+/*? %%clicmd:group_label VLIB application library%% ?*/
+
diff --git a/vlib/vlib/unix/cli.c b/vlib/vlib/unix/cli.c
index 92bb8bc3..bf09ee0c 100644
--- a/vlib/vlib/unix/cli.c
+++ b/vlib/vlib/unix/cli.c
@@ -42,6 +42,7 @@
  * Provides a command line interface so humans can interact with VPP.
  * This is predominantly a debugging and testing mechanism.
  */
+/*? %%clicmd:group_label Debug CLI %% ?*/
 
 #include 
 #include 
@@ -146,9 +147,13 @@ typedef struct
      CLI process. */
   u8 *input_vector;
 
+  /** This session has command history. */
   u8 has_history;
+  /** Array of vectors of commands in the history. */
   u8 **command_history;
+  /** The command currently pointed at by the history cursor. */
   u8 *current_command;
+  /** How far from the end of the history array the user has browsed. */
   i32 excursion;
 
   /** Maximum number of history entries this session will store. */
@@ -157,7 +162,12 @@ typedef struct
   /** Current command line counter */
   u32 command_number;
 
+  /** The string being searched for in the history. */
   u8 *search_key;
+  /** If non-zero then the CLI is searching in the history array.
+   * - @c -1 means search backwards.
+   * - @c 1 means search forwards.
+   */
   int search_mode;
 
   /** Position of the insert cursor on the current input line */
@@ -232,41 +242,41 @@ unix_cli_file_free (unix_cli_file_t * f)
 /** CLI actions */
 typedef enum
 {
-  UNIX_CLI_PARSE_ACTION_NOACTION = 0,  /**< No action */
-  UNIX_CLI_PARSE_ACTION_CRLF,	       /**< Carriage return, newline or enter */
-  UNIX_CLI_PARSE_ACTION_TAB,	       /**< Tab key */
-  UNIX_CLI_PARSE_ACTION_ERASE,	       /**< Erase cursor left */
-  UNIX_CLI_PARSE_ACTION_ERASERIGHT,    /**< Erase cursor right */
-  UNIX_CLI_PARSE_ACTION_UP,	       /**< Up arrow */
-  UNIX_CLI_PARSE_ACTION_DOWN,	       /**< Down arrow */
-  UNIX_CLI_PARSE_ACTION_LEFT,
-  UNIX_CLI_PARSE_ACTION_RIGHT,
-  UNIX_CLI_PARSE_ACTION_HOME,
-  UNIX_CLI_PARSE_ACTION_END,
-  UNIX_CLI_PARSE_ACTION_WORDLEFT,
-  UNIX_CLI_PARSE_ACTION_WORDRIGHT,
-  UNIX_CLI_PARSE_ACTION_ERASELINELEFT,
-  UNIX_CLI_PARSE_ACTION_ERASELINERIGHT,
-  UNIX_CLI_PARSE_ACTION_CLEAR,
-  UNIX_CLI_PARSE_ACTION_REVSEARCH,
-  UNIX_CLI_PARSE_ACTION_FWDSEARCH,
-  UNIX_CLI_PARSE_ACTION_YANK,
-  UNIX_CLI_PARSE_ACTION_TELNETIAC,
-
-  UNIX_CLI_PARSE_ACTION_PAGER_CRLF,
-  UNIX_CLI_PARSE_ACTION_PAGER_QUIT,
-  UNIX_CLI_PARSE_ACTION_PAGER_NEXT,
-  UNIX_CLI_PARSE_ACTION_PAGER_DN,
-  UNIX_CLI_PARSE_ACTION_PAGER_UP,
-  UNIX_CLI_PARSE_ACTION_PAGER_TOP,
-  UNIX_CLI_PARSE_ACTION_PAGER_BOTTOM,
-  UNIX_CLI_PARSE_ACTION_PAGER_PGDN,
-  UNIX_CLI_PARSE_ACTION_PAGER_PGUP,
-  UNIX_CLI_PARSE_ACTION_PAGER_REDRAW,
-  UNIX_CLI_PARSE_ACTION_PAGER_SEARCH,
-
-  UNIX_CLI_PARSE_ACTION_PARTIALMATCH,
-  UNIX_CLI_PARSE_ACTION_NOMATCH
+  UNIX_CLI_PARSE_ACTION_NOACTION = 0,	/**< No action */
+  UNIX_CLI_PARSE_ACTION_CRLF,		/**< Carriage return, newline or enter */
+  UNIX_CLI_PARSE_ACTION_TAB,		/**< Tab key */
+  UNIX_CLI_PARSE_ACTION_ERASE,		/**< Erase cursor left */
+  UNIX_CLI_PARSE_ACTION_ERASERIGHT,	/**< Erase cursor right */
+  UNIX_CLI_PARSE_ACTION_UP,		/**< Up arrow */
+  UNIX_CLI_PARSE_ACTION_DOWN,		/**< Down arrow */
+  UNIX_CLI_PARSE_ACTION_LEFT,		/**< Left arrow */
+  UNIX_CLI_PARSE_ACTION_RIGHT,		/**< Right arrow */
+  UNIX_CLI_PARSE_ACTION_HOME,		/**< Home key (jump to start of line) */
+  UNIX_CLI_PARSE_ACTION_END,		/**< End key (jump to end of line) */
+  UNIX_CLI_PARSE_ACTION_WORDLEFT,	/**< Jump cursor to start of left word */
+  UNIX_CLI_PARSE_ACTION_WORDRIGHT,	/**< Jump cursor to start of right word */
+  UNIX_CLI_PARSE_ACTION_ERASELINELEFT,	/**< Erase line to left of cursor */
+  UNIX_CLI_PARSE_ACTION_ERASELINERIGHT,	/**< Erase line to right & including cursor */
+  UNIX_CLI_PARSE_ACTION_CLEAR,		/**< Clear the terminal */
+  UNIX_CLI_PARSE_ACTION_REVSEARCH,	/**< Search backwards in command history */
+  UNIX_CLI_PARSE_ACTION_FWDSEARCH,	/**< Search forwards in command history */
+  UNIX_CLI_PARSE_ACTION_YANK,		/**< Undo last erase action */
+  UNIX_CLI_PARSE_ACTION_TELNETIAC,	/**< Telnet control code */
+
+  UNIX_CLI_PARSE_ACTION_PAGER_CRLF,	/**< Enter pressed (CR, CRLF, LF, etc) */
+  UNIX_CLI_PARSE_ACTION_PAGER_QUIT,	/**< Exit the pager session */
+  UNIX_CLI_PARSE_ACTION_PAGER_NEXT,	/**< Scroll to next page */
+  UNIX_CLI_PARSE_ACTION_PAGER_DN,	/**< Scroll to next line */
+  UNIX_CLI_PARSE_ACTION_PAGER_UP,	/**< Scroll to previous line */
+  UNIX_CLI_PARSE_ACTION_PAGER_TOP,	/**< Scroll to first line */
+  UNIX_CLI_PARSE_ACTION_PAGER_BOTTOM,	/**< Scroll to last line */
+  UNIX_CLI_PARSE_ACTION_PAGER_PGDN,	/**< Scroll to next page */
+  UNIX_CLI_PARSE_ACTION_PAGER_PGUP,	/**< Scroll to previous page */
+  UNIX_CLI_PARSE_ACTION_PAGER_REDRAW,	/**< Clear and redraw the page on the terminal */
+  UNIX_CLI_PARSE_ACTION_PAGER_SEARCH,	/**< Search the pager buffer */
+
+  UNIX_CLI_PARSE_ACTION_PARTIALMATCH,	/**< Action parser found a partial match */
+  UNIX_CLI_PARSE_ACTION_NOMATCH		/**< Action parser did not find any match */
 } unix_cli_parse_action_t;
 
 /** @brief Mapping of input buffer strings to action values.
@@ -485,6 +495,9 @@ unix_cli_match_action (unix_cli_parse_actions_t * a,
 }
 
 
+/** Add bytes to the output vector and then flagg the I/O system that bytes
+ * are available to be sent.
+ */
 static void
 unix_cli_add_pending_output (unix_file_t * uf,
 			     unix_cli_file_t * cf,
@@ -502,6 +515,9 @@ unix_cli_add_pending_output (unix_file_t * uf,
     }
 }
 
+/** Delete all bytes from the output vector and flag the I/O system
+ * that no more bytes are available to be sent.
+ */
 static void
 unix_cli_del_pending_output (unix_file_t * uf,
 			     unix_cli_file_t * cf, uword n_bytes)
@@ -983,13 +999,13 @@ unix_vlib_cli_output (uword cli_file_index, u8 * buffer, uword buffer_bytes)
 
 /** Identify whether a terminal type is ANSI capable.
  *
- * Compares the string given in @term with a list of terminal types known
+ * Compares the string given in @c term with a list of terminal types known
  * to support ANSI escape sequences.
  *
  * This list contains, for example, @c xterm, @c screen and @c ansi.
  *
  * @param term A string with a terminal type in it.
- * @param len The length of the string in @term.
+ * @param len The length of the string in @c term.
  *
  * @return @c 1 if the terminal type is recognized as supporting ANSI
  *         terminal sequences; @c 0 otherwise.
@@ -2059,6 +2075,10 @@ done:
     goto more;
 }
 
+/** Destroy a CLI session.
+ * @note If we destroy the @c stdin session this additionally signals
+ *       the shutdown of VPP.
+ */
 static void
 unix_cli_kill (unix_cli_main_t * cm, uword cli_file_index)
 {
@@ -2088,6 +2108,7 @@ unix_cli_kill (unix_cli_main_t * cm, uword cli_file_index)
   pool_put (cm->cli_file_pool, cf);
 }
 
+/** Handle system events. */
 static uword
 unix_cli_process (vlib_main_t * vm,
 		  vlib_node_runtime_t * rt, vlib_frame_t * f)
@@ -2130,6 +2151,8 @@ done:
   return 0;
 }
 
+/** Called when a CLI session file descriptor can be written to without
+ * blocking. */
 static clib_error_t *
 unix_cli_write_ready (unix_file_t * uf)
 {
@@ -2152,6 +2175,7 @@ unix_cli_write_ready (unix_file_t * uf)
   return /* no error */ 0;
 }
 
+/** Called when a CLI session file descriptor has data to be read. */
 static clib_error_t *
 unix_cli_read_ready (unix_file_t * uf)
 {
@@ -2482,8 +2506,8 @@ unix_cli_config (vlib_main_t * vm, unformat_input_t * input)
 
 VLIB_CONFIG_FUNCTION (unix_cli_config, "unix-cli");
 
-/** Called when VPP is shutting down, this resets the system
- * terminal state, if previously saved.
+/** Called when VPP is shutting down, this restores the system
+ * terminal state if previously saved.
  */
 static clib_error_t *
 unix_cli_exit (vlib_main_t * vm)
@@ -2500,7 +2524,7 @@ unix_cli_exit (vlib_main_t * vm)
 VLIB_MAIN_LOOP_EXIT_FUNCTION (unix_cli_exit);
 
 /** Set the CLI prompt.
- * @param The C string to set the prompt to.
+ * @param prompt The C string to set the prompt to.
  * @note This setting is global; it impacts all current
  *       and future CLI sessions.
  */
@@ -2531,6 +2555,12 @@ unix_cli_quit (vlib_main_t * vm,
   return 0;
 }
 
+/*?
+ * Terminates the current CLI session.
+ *
+ * If VPP is running in @em interactive mode and this is the console session
+ * (that is, the session on @c stdin) then this will also terminate VPP.
+?*/
 /* *INDENT-OFF* */
 VLIB_CLI_COMMAND (unix_cli_quit_command, static) = {
   .path = "quit",
@@ -2597,6 +2627,13 @@ done:
   return error;
 }
 
+/*?
+ * Executes a sequence of CLI commands which are read from a file.
+ *
+ * If a command is unrecognised or otherwise invalid then the usual CLI
+ * feedback will be generated, however execution of subsequent commands
+ * from the file will continue.
+?*/
 /* *INDENT-OFF* */
 VLIB_CLI_COMMAND (cli_exec, static) = {
   .path = "exec",
@@ -2706,6 +2743,9 @@ unix_cli_show_history (vlib_main_t * vm,
   return 0;
 }
 
+/*?
+ * Displays the command history for the current session, if any.
+?*/
 /* *INDENT-OFF* */
 VLIB_CLI_COMMAND (cli_unix_cli_show_history, static) = {
   .path = "history",
@@ -2755,6 +2795,24 @@ unix_cli_show_terminal (vlib_main_t * vm,
   return 0;
 }
 
+/*?
+ * Displays various information about the state of the current terminal
+ * session.
+ *
+ * @cliexpar
+ * @cliexstart{show terminal}
+ * Terminal name:   unix-cli-stdin
+ * Terminal mode:   char-by-char
+ * Terminal width:  123
+ * Terminal height: 48
+ * ANSI capable:    yes
+ * History enabled: yes
+ * History limit:   50
+ * Pager enabled:   yes
+ * Pager limit:     100000
+ * CRLF mode:       LF
+ * @cliexend
+?*/
 /* *INDENT-OFF* */
 VLIB_CLI_COMMAND (cli_unix_cli_show_terminal, static) = {
   .path = "show terminal",
@@ -2799,6 +2857,13 @@ unix_cli_set_terminal_pager (vlib_main_t * vm,
   return 0;
 }
 
+/*?
+ * Enables or disables the terminal pager for this session. Generally
+ * this defaults to enabled.
+ *
+ * Additionally allows the pager buffer size to be set; though note that
+ * this value is set globally and not per session.
+?*/
 /* *INDENT-OFF* */
 VLIB_CLI_COMMAND (cli_unix_cli_set_terminal_pager, static) = {
   .path = "set terminal pager",
@@ -2850,6 +2915,13 @@ unix_cli_set_terminal_history (vlib_main_t * vm,
   return 0;
 }
 
+/*?
+ * Enables or disables the command history function of the current
+ * terminal. Generally this defaults to enabled.
+ *
+ * This command also allows the maximum size of the history buffer for
+ * this session to be altered.
+?*/
 /* *INDENT-OFF* */
 VLIB_CLI_COMMAND (cli_unix_cli_set_terminal_history, static) = {
   .path = "set terminal history",
@@ -2880,6 +2952,14 @@ unix_cli_set_terminal_ansi (vlib_main_t * vm,
   return 0;
 }
 
+/*?
+ * Enables or disables the use of ANSI control sequences by this terminal.
+ * The default will vary based on terminal detection at the start of the
+ * session.
+ *
+ * ANSI control sequences are used in a small number of places to provide,
+ * for example, color text output and to control the cursor in the pager.
+?*/
 /* *INDENT-OFF* */
 VLIB_CLI_COMMAND (cli_unix_cli_set_terminal_ansi, static) = {
   .path = "set terminal ansi",
diff --git a/vlib/vlib/unix/dir.dox b/vlib/vlib/unix/dir.dox
new file mode 100644
index 00000000..cdded0f1
--- /dev/null
+++ b/vlib/vlib/unix/dir.dox
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Doxygen directory documentation */
+
+/**
+@dir
+@brief VLIB Unix interface
+
+VLIB application library Unix interface layer.
+
+*/
+/*? %%clicmd:group_label VLIB Unix stuff%% ?*/
+
-- 
cgit 1.2.3-korg


From 16bcf7d8dcd411e6a6b8d217cce5e450f7357bb3 Mon Sep 17 00:00:00 2001
From: Chris Luke 
Date: Thu, 1 Sep 2016 14:31:46 -0400
Subject: VPP-346 A swathe of doc fixes

Fixes various Doxygen warnings and other structural defects.

Note: This does not attempt to improve the content of the
documentation; only to improve the syntax and structure of it
and in some cases the consistency.

Change-Id: Ib1915f33edbdbc4558c85565de80dce323193906
Signed-off-by: Chris Luke 
---
 doxygen/Makefile                    |  4 +++
 doxygen/doxygen.cfg                 |  4 ++-
 doxygen/filter_c.py                 | 49 +++++++++++++++++++++++-----------
 doxygen/filter_h.py                 | 53 +++++++++++++++++++++++++++++++++++++
 vnet/vnet/interface_cli.c           |  9 ++++++-
 vnet/vnet/ip/lookup.h               | 12 ++++++---
 vnet/vnet/ipsec-gre/error.def       |  2 +-
 vnet/vnet/ipsec-gre/interface.c     |  2 +-
 vnet/vnet/ipsec-gre/ipsec_gre.c     |  2 +-
 vnet/vnet/ipsec-gre/ipsec_gre.h     |  2 +-
 vnet/vnet/ipsec-gre/node.c          |  6 ++---
 vnet/vnet/l2/l2_bd.c                | 26 ++++++++++--------
 vnet/vnet/l2/l2_efp_filter.c        |  6 ++---
 vnet/vnet/l2/l2_fib.c               | 10 +++----
 vnet/vnet/l2/l2_flood.c             |  4 +--
 vnet/vnet/l2/l2_fwd.c               |  6 ++---
 vnet/vnet/l2/l2_input.c             | 19 ++++++-------
 vnet/vnet/l2/l2_input_classify.c    | 31 +++++++++++-----------
 vnet/vnet/l2/l2_learn.c             |  9 ++++---
 vnet/vnet/l2/l2_output.c            |  9 ++++---
 vnet/vnet/l2/l2_output_acl.c        |  4 +--
 vnet/vnet/l2/l2_output_classify.c   | 26 +++++++++---------
 vnet/vnet/l2/l2_vtr.c               |  6 ++---
 vnet/vnet/l2/l2_xcrw.c              |  3 ++-
 vnet/vnet/unix/tuntap.c             |  8 +++---
 vppinfra/vppinfra/bihash_template.c |  4 +--
 vppinfra/vppinfra/bihash_template.h |  4 +--
 27 files changed, 208 insertions(+), 112 deletions(-)
 create mode 100755 doxygen/filter_h.py

(limited to 'doxygen/filter_c.py')

diff --git a/doxygen/Makefile b/doxygen/Makefile
index 8e916526..df7d07d7 100644
--- a/doxygen/Makefile
+++ b/doxygen/Makefile
@@ -40,6 +40,9 @@ DOXY_INPUT ?= \
 	vpp-api \
 	plugins
 
+DOXY_INCLUDE_PATH = $(shell set -e; cd $(WS_ROOT); for item in $(DOXY_INPUT); do find $$item -type d; done)
+DOXY_INCLUDE_PATH += $(shell set -e; cpp -v &1 | grep -A 1000 '\#include' | awk '/^ /{print $$1}')
+
 # Target directory for doxygen output
 DOXY_OUTPUT ?= $(BR)/docs
 
@@ -114,6 +117,7 @@ doxygen: $(SIPHON_DOCS)
 	    ROOT="$(WS_ROOT)" \
 	    BUILD_ROOT="$(BR)" \
 	    INPUT="$(addprefix $(WS_ROOT)/,$(DOXY_INPUT)) $(EXTRA_DOXY_INPUT)" \
+	    INCLUDE_PATH="$(DOXY_INCLUDE_PATH)" \
 	    HTML=YES \
 	    VERSION="`git describe --tags --dirty`" \
 	    doxygen $(DOXY_DIR)/doxygen.cfg
diff --git a/doxygen/doxygen.cfg b/doxygen/doxygen.cfg
index 82687cac..4eb0f373 100644
--- a/doxygen/doxygen.cfg
+++ b/doxygen/doxygen.cfg
@@ -234,6 +234,7 @@ ALIASES                =
 ALIASES += "node=@xrefitem nodes \"Node Identifier\" \"Node Identifiers\" @c "
 
 ## Formatting for CLI commands and output
+ALIASES += "cli{1}=
\1
" ALIASES += "clistart=
"
 ALIASES += "cliend=
" @@ -914,6 +915,7 @@ INPUT_FILTER = FILTER_PATTERNS = \ *.c=$(ROOT)/doxygen/filter_c.py \ + *.h=$(ROOT)/doxygen/filter_h.py \ *.api=$(ROOT)/doxygen/filter_api.py # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using @@ -2035,7 +2037,7 @@ SEARCH_INCLUDES = YES # preprocessor. # This tag requires that the tag SEARCH_INCLUDES is set to YES. -INCLUDE_PATH = $(INPUT) +INCLUDE_PATH = $(INCLUDE_PATH) # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the diff --git a/doxygen/filter_c.py b/doxygen/filter_c.py index 733fdefb..30b933ba 100755 --- a/doxygen/filter_c.py +++ b/doxygen/filter_c.py @@ -22,26 +22,45 @@ if len(sys.argv) < 2: sys.exit(1) replace_patterns = [ - # Search for VLIB_CLI_COMMAND, extract its parameter and add a docblock for it - ( re.compile("(?PVLIB_CLI_COMMAND)\s*[(](?P[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g (\g) */ vlib_cli_command_t \g"), - - # Search for VLIB_REGISTER_NODE, extract its parameter and add a docblock for it - ( re.compile("(?PVLIB_REGISTER_NODE)\s*[(](?P[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g (\g) */ vlib_node_registration_t \g"), + # Search for VLIB_CLI_COMMAND, extract its parameters and add a docblock for it + ( re.compile("(?PVLIB_CLI_COMMAND)\s*[(](?P[a-zA-Z0-9_]+)[)]"), + r"/** @brief (@em constructor) \g (\g) */ vlib_cli_command_t \g"), + ( re.compile("(?PVLIB_CLI_COMMAND)\s*[(](?P[a-zA-Z0-9_]+),\s*(?P[^)]*)[)]"), + r"/** @brief (@em constructor) \g (\g) */ \g vlib_cli_command_t \g"), + + # Search for VLIB_REGISTER_NODE, extract its parameters and add a docblock for it + ( re.compile("(?PVLIB_REGISTER_NODE)\s*[(](?P[a-zA-Z0-9_]+)[)]"), + r"/** @brief (@em constructor) \g (\g) */ vlib_node_registration_t \g"), + ( re.compile("(?PVLIB_REGISTER_NODE)\s*[(](?P[a-zA-Z0-9_]+),\s*(?P[^)]*)[)]"), + r"/** @brief (@em constructor) \g (\g) */ \g vlib_node_registration_t \g"), # Search for VLIB_INIT_FUNCTION, extract its parameter and add a docblock for it - ( re.compile("(?PVLIB_INIT_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+)[)]"), r"/** @brief (@em constructor) \g (@ref \g) */ vlib_init_function_t * _vlib_init_function_\g"), - ( re.compile("(?PVLIB_DECLARE_INIT_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+)[)]"), r"/** @brief (@em constructor) \g (@ref \g) */ vlib_init_function_t * _vlib_init_function_\g"), + ( re.compile("(?PVLIB_INIT_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+)[)]"), + r"/** @brief (@em constructor) \g (@ref \g) */ vlib_init_function_t * _vlib_init_function_\g"), + ( re.compile("(?PVLIB_DECLARE_INIT_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+)[)]"), + r"/** @brief (@em constructor) \g (@ref \g) */ vlib_init_function_t * _vlib_init_function_\g"), + + # Search for VLIB_LOOP_ENTER_FUNCTION, extract the parameters and add a docblock for it + ( re.compile("(?PVLIB_MAIN_LOOP_ENTER_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+)(,[^)]*)?[)]"), + r"/** @brief (@em constructor) \g (@ref \g) */ _vlib_main_loop_enter_\g"), + ( re.compile("(?PVLIB_MAIN_LOOP_EXIT_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+)(,[^)]*)?[)]"), + r"/** @brief (@em constructor) \g (@ref \g) */ _vlib_main_loop_exit_\g"), + + # Search for VLIB_CONFIG_FUNCTION, extract the parameters and add a docblock for it + ( re.compile("(?PVLIB_CONFIG_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+),\s*(?P\"[^\"]+\")(,[^)]*)?[)]"), + r"/** @brief (@em constructor) \g (\g, \g) */ vlib_config_function_runtime_t _vlib_config_function_\g"), + ( re.compile("(?PVLIB_EARLY_CONFIG_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+),\s*(?P\"[^\"]+\")(,[^)]*)?[)]"), + r"/** @brief (@em constructor) \g (\g, \g) */ vlib_config_function_runtime_t _vlib_config_function_\g"), - # Search for VLIB_LOOP_ENTER_FUNCTION, extract the 1st parameter (ignore any others) and add a docblock for it - ( re.compile("(?PVLIB_MAIN_LOOP_ENTER_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g (@ref \g) */ _vlib_main_loop_enter_\g"), - ( re.compile("(?PVLIB_MAIN_LOOP_EXIT_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g (@ref \g) */ _vlib_main_loop_exit_\g"), + # Search for "format_thing" and "unformat_thing" when used as a function pointer and add parens + ( re.compile("(?P
(^|,)\s*)(?P(un)?format_[a-zA-Z0-9_]+)(?P\s*(,|$))"),
+        r"\g
\g()\g" ),
 
-    # Search for VLIB_CONFIG_FUNCTION, extract the 1st parameter (ignore any others) and add a docblock for it
-    ( re.compile("(?PVLIB_CONFIG_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+),\s*(?P\"[^\"]+\")(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g (\g, \g) */ vlib_config_function_runtime_t _vlib_config_function_\g"),
-    ( re.compile("(?PVLIB_EARLY_CONFIG_FUNCTION)\s*[(](?P[a-zA-Z0-9_]+),\s*(?P\"[^\"]+\")(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g (\g, \g) */ vlib_config_function_runtime_t _vlib_config_function_\g"),
+    # Search for CLIB_PAD_FROM_TO(...); and replace with padding
+    # #define CLIB_PAD_FROM_TO(from,to) u8 pad_##from[(to) - (from)]
+    ( re.compile("(?PCLIB_PAD_FROM_TO)\s*[(](?P[^,]+),\s*(?P[^)]+)[)]"),
+        r"/** Padding. */ u8 pad_\g[(\g) - (\g)]" ),
 
-    # Search for "format_thing" and "unformat_thing" when used as a function pointer and add parens
-    ( re.compile("(?P
(^|,)\s*)(?P(un)?format_[a-zA-Z0-9_]+)(?P\s*(,|$))") , r"\g
\g()\g" ),
 ]
 
 
diff --git a/doxygen/filter_h.py b/doxygen/filter_h.py
new file mode 100755
index 00000000..967388d5
--- /dev/null
+++ b/doxygen/filter_h.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Filter for .c files to make various preprocessor tricks Doxygenish
+
+import os, sys, re
+
+if len(sys.argv) < 2:
+    sys.stderr.write("Usage: %s \n" % (sys.argv[0]))
+    sys.exit(1)
+
+replace_patterns = [
+    # Search for CLIB_PAD_FROM_TO(...); and replace with padding
+    # #define CLIB_PAD_FROM_TO(from,to) u8 pad_##from[(to) - (from)]
+    ( re.compile("(?PCLIB_PAD_FROM_TO)\s*[(](?P[^,]+),\s*(?P[^)]+)[)]"),
+        r"/** Padding. */ u8 pad_\g[(\g) - (\g)]" ),
+
+]
+
+
+filename = sys.argv[1]
+cwd = os.getcwd()
+if filename[0:len(cwd)] == cwd:
+    filename = filename[len(cwd):]
+    if filename[0] == "/":
+        filename = filename[1:]
+
+with open(filename) as fd:
+    line_num = 0
+
+    for line in fd:
+        line_num += 1
+        str = line[:-1] # filter \n
+
+        # Look for search/replace patterns
+        for p in replace_patterns:
+            str = p[0].sub(p[1], str)
+
+        sys.stdout.write(str+"\n")
+
+# All done
diff --git a/vnet/vnet/interface_cli.c b/vnet/vnet/interface_cli.c
index 7b9f5458..654edcaa 100644
--- a/vnet/vnet/interface_cli.c
+++ b/vnet/vnet/interface_cli.c
@@ -37,6 +37,11 @@
  *  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
+/**
+ * @file
+ * Interface CLI.
+ */
+
 #include 
 #include 
 #include 
@@ -432,7 +437,9 @@ VLIB_CLI_COMMAND (clear_interface_counters_command, static) = {
 };
 /* *INDENT-ON* */
 
-/** \detail
+/**
+ * Parse subinterface names.
+ *
  * The following subinterface syntax is supported. The first two are for
  * backwards compatability:
  *
diff --git a/vnet/vnet/ip/lookup.h b/vnet/vnet/ip/lookup.h
index fcd080a4..dcc9d25f 100644
--- a/vnet/vnet/ip/lookup.h
+++ b/vnet/vnet/ip/lookup.h
@@ -37,11 +37,15 @@
  *  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
-/** @file Definitions for all things IP (v4|v6) unicast and multicast lookup related.
-    - Adjacency definitions and registration
-    - callbacks on route add
-    - callbacks on interface address change
+/**
+ * @file
+ * Definitions for all things IP (v4|v6) unicast and multicast lookup related.
+ *
+ * - Adjacency definitions and registration.
+ * - Callbacks on route add.
+ * - Callbacks on interface address change.
  */
+
 #ifndef included_ip_lookup_h
 #define included_ip_lookup_h
 
diff --git a/vnet/vnet/ipsec-gre/error.def b/vnet/vnet/ipsec-gre/error.def
index 0d7b4686..d84e8ed1 100644
--- a/vnet/vnet/ipsec-gre/error.def
+++ b/vnet/vnet/ipsec-gre/error.def
@@ -13,7 +13,7 @@
  * limitations under the License.
  */
 /**
- * @file error.def
+ * @file
  * @brief L2-GRE over IPSec errors.
  */
 
diff --git a/vnet/vnet/ipsec-gre/interface.c b/vnet/vnet/ipsec-gre/interface.c
index 3e5d3954..dbf9df56 100644
--- a/vnet/vnet/ipsec-gre/interface.c
+++ b/vnet/vnet/ipsec-gre/interface.c
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 /**
- * @file interface.c
+ * @file
  * @brief L2-GRE over IPSec tunnel interface.
  *
  * Creates ipsec-gre tunnel interface.
diff --git a/vnet/vnet/ipsec-gre/ipsec_gre.c b/vnet/vnet/ipsec-gre/ipsec_gre.c
index 24ec6f4e..3d1b54fc 100644
--- a/vnet/vnet/ipsec-gre/ipsec_gre.c
+++ b/vnet/vnet/ipsec-gre/ipsec_gre.c
@@ -13,7 +13,7 @@
  * limitations under the License.
  */
 /**
- * @file ipsec_gre.c
+ * @file
  * @brief L2-GRE over IPSec packet processing.
  *
  * Add GRE header to thr packet and send it to the esp-encrypt node.
diff --git a/vnet/vnet/ipsec-gre/ipsec_gre.h b/vnet/vnet/ipsec-gre/ipsec_gre.h
index 2b66c6a6..a2ca64b6 100644
--- a/vnet/vnet/ipsec-gre/ipsec_gre.h
+++ b/vnet/vnet/ipsec-gre/ipsec_gre.h
@@ -13,7 +13,7 @@
  * limitations under the License.
  */
 /**
- * @file ipsec_gre.h
+ * @file
  * @brief L2-GRE over IPSec packet processing.
 */
 
diff --git a/vnet/vnet/ipsec-gre/node.c b/vnet/vnet/ipsec-gre/node.c
index 13102552..5a176088 100644
--- a/vnet/vnet/ipsec-gre/node.c
+++ b/vnet/vnet/ipsec-gre/node.c
@@ -13,10 +13,10 @@
  * limitations under the License.
  */
 /**
- * @file node.c
+ * @file
  * @brief L2-GRE over IPSec packet processing.
  *
- * Removes GRE header from the packet and send it to the l2-input node.
+ * Removes GRE header from the packet and sends it to the l2-input node.
 */
 
 #include 
@@ -68,7 +68,7 @@ u8 * format_ipsec_gre_rx_trace (u8 * s, va_list * args)
  *
  * @par Graph mechanics: buffer metadata, next index usage
  *
- * Uses:
+ * Uses:
  * - ip->src_address and ip->dst_address
  *     - Match tunnel by source and destination addresses in GRE IP header.
  *
diff --git a/vnet/vnet/l2/l2_bd.c b/vnet/vnet/l2/l2_bd.c
index 490a08f2..ddbe934a 100644
--- a/vnet/vnet/l2/l2_bd.c
+++ b/vnet/vnet/l2/l2_bd.c
@@ -35,7 +35,7 @@
 bd_main_t bd_main;
 
 /**
-  Init bridge domain if not done already
+  Init bridge domain if not done already.
   For feature bitmap, set all bits except ARP termination
 */
 void
@@ -175,7 +175,7 @@ VLIB_INIT_FUNCTION (l2bd_init);
 
 
 /**
-    Set the learn/forward/flood flags for the bridge domain
+    Set the learn/forward/flood flags for the bridge domain.
     Return 0 if ok, non-zero if for an error.
 */
 u32
@@ -224,7 +224,7 @@ bd_set_flags (vlib_main_t * vm, u32 bd_index, u32 flags, u32 enable)
 }
 
 /**
-   set bridge-domain learn enable/disable
+   Set bridge-domain learn enable/disable.
    The CLI format is:
    set bridge-domain learn  [disable]
 */
@@ -279,7 +279,7 @@ VLIB_CLI_COMMAND (bd_learn_cli, static) = {
 /* *INDENT-ON* */
 
 /**
-    set bridge-domain forward enable/disable
+    Set bridge-domain forward enable/disable.
     The CLI format is:
     set bridge-domain forward  [disable]
 */
@@ -333,7 +333,7 @@ VLIB_CLI_COMMAND (bd_fwd_cli, static) = {
 /* *INDENT-ON* */
 
 /**
-    set bridge-domain flood enable/disable
+    Set bridge-domain flood enable/disable.
     The CLI format is:
     set bridge-domain flood  [disable]
 */
@@ -388,7 +388,7 @@ VLIB_CLI_COMMAND (bd_flood_cli, static) = {
 /* *INDENT-ON* */
 
 /**
-    set bridge-domain unkown-unicast flood enable/disable
+    Set bridge-domain unkown-unicast flood enable/disable.
     The CLI format is:
     set bridge-domain uu-flood  [disable]
 */
@@ -443,7 +443,7 @@ VLIB_CLI_COMMAND (bd_uu_flood_cli, static) = {
 /* *INDENT-ON* */
 
 /**
-    set bridge-domain arp term enable/disable
+    Set bridge-domain arp term enable/disable.
     The CLI format is:
     set bridge-domain arp term  [disable]
 */
@@ -496,11 +496,15 @@ VLIB_CLI_COMMAND (bd_arp_term_cli, static) = {
 
 
 /**
+ * Add/delete IP address to MAC address mapping.
+ *
  * The clib hash implementation stores uword entries in the hash table.
  * The hash table mac_by_ip4 is keyed via IP4 address and store the
  * 6-byte MAC address directly in the hash table entry uword.
- * This only works for 64-bit processor with 8-byte uword; which means
- * this code *WILL NOT WORK* for a 32-bit prcessor with 4-byte uword.
+ *
+ * @warning This only works for 64-bit processor with 8-byte uword;
+ * which means this code *WILL NOT WORK* for a 32-bit prcessor with
+ * 4-byte uword.
  */
 u32
 bd_add_del_ip_mac (u32 bd_index,
@@ -573,7 +577,7 @@ bd_add_del_ip_mac (u32 bd_index,
 }
 
 /**
-    set bridge-domain arp entry add/delete
+    Set bridge-domain arp entry add/delete.
     The CLI format is:
     set bridge-domain arp entry    [del]
 */
@@ -689,7 +693,7 @@ format_vtr (u8 * s, va_list * args)
 }
 
 /**
-   show bridge-domain state
+   Show bridge-domain state.
    The CLI format is:
    show bridge-domain []
 */
diff --git a/vnet/vnet/l2/l2_efp_filter.c b/vnet/vnet/l2/l2_efp_filter.c
index 221db9ab..2038dce2 100644
--- a/vnet/vnet/l2/l2_efp_filter.c
+++ b/vnet/vnet/l2/l2_efp_filter.c
@@ -97,7 +97,7 @@ typedef enum
 
 /**
  *  Extract fields from the packet that will be used in interface
- *  classification
+ *  classification.
  */
 static_always_inline void
 extract_keys (vnet_main_t * vnet_main,
@@ -524,7 +524,7 @@ VLIB_NODE_FUNCTION_MULTIARCH (l2_efp_filter_node, l2_efp_filter_node_fn)
 VLIB_INIT_FUNCTION (l2_efp_filter_init);
 
 
-/** Enable/disable the EFP Filter check on the subinterface */
+/** Enable/disable the EFP Filter check on the subinterface. */
 void
 l2_efp_filter_configure (vnet_main_t * vnet_main, u32 sw_if_index, u32 enable)
 {
@@ -534,7 +534,7 @@ l2_efp_filter_configure (vnet_main_t * vnet_main, u32 sw_if_index, u32 enable)
 
 
 /**
- * set subinterface egress efp filter enable/disable
+ * Set subinterface egress efp filter enable/disable.
  * The CLI format is:
  *    set interface l2 efp-filter  [disable]]
  */
diff --git a/vnet/vnet/l2/l2_fib.c b/vnet/vnet/l2/l2_fib.c
index 4275e884..97620bfb 100644
--- a/vnet/vnet/l2/l2_fib.c
+++ b/vnet/vnet/l2/l2_fib.c
@@ -97,7 +97,7 @@ l2fib_table_dump (u32 bd_index, l2fib_entry_key_t ** l2fe_key,
     }
 }
 
-/** Display the contents of the l2fib */
+/** Display the contents of the l2fib. */
 static clib_error_t *
 show_l2fib (vlib_main_t * vm,
 	    unformat_input_t * input, vlib_cli_command_t * cmd)
@@ -228,8 +228,8 @@ l2fib_clear_table (uint keep_static)
   l2learn_main.global_learn_count = 0;
 }
 
-/** Clear all entries in L2FIB
- * TODO: Later we may want a way to remove only the non-static entries
+/** Clear all entries in L2FIB.
+ * @TODO: Later we may want a way to remove only the non-static entries
  */
 static clib_error_t *
 clear_l2fib (vlib_main_t * vm,
@@ -286,7 +286,7 @@ l2fib_add_entry (u64 mac,
 }
 
 /**
- * Add an entry to the L2FIB
+ * Add an entry to the L2FIB.
  * The CLI format is:
  *    l2fib add    [static] [bvi]
  *    l2fib add   filter
@@ -517,7 +517,7 @@ l2fib_del_entry (u64 mac, u32 bd_index)
 }
 
 /**
- * Delete an entry from the L2FIB
+ * Delete an entry from the L2FIB.
  * The CLI format is:
  *    l2fib del  
  */
diff --git a/vnet/vnet/l2/l2_flood.c b/vnet/vnet/l2/l2_flood.c
index 0654fe29..05df2a01 100644
--- a/vnet/vnet/l2/l2_flood.c
+++ b/vnet/vnet/l2/l2_flood.c
@@ -490,7 +490,7 @@ VLIB_INIT_FUNCTION (l2flood_init);
 
 
 
-/** Add the L3 input node for this ethertype to the next nodes structure */
+/** Add the L3 input node for this ethertype to the next nodes structure. */
 void
 l2flood_register_input_type (vlib_main_t * vm,
 			     ethernet_type_t type, u32 node_index)
@@ -505,7 +505,7 @@ l2flood_register_input_type (vlib_main_t * vm,
 
 
 /**
- * set subinterface flood enable/disable
+ * Set subinterface flood enable/disable.
  * The CLI format is:
  * set interface l2 flood  [disable]
  */
diff --git a/vnet/vnet/l2/l2_fwd.c b/vnet/vnet/l2/l2_fwd.c
index 4950b23a..8fa355e0 100644
--- a/vnet/vnet/l2/l2_fwd.c
+++ b/vnet/vnet/l2/l2_fwd.c
@@ -104,7 +104,7 @@ typedef enum
   L2FWD_N_NEXT,
 } l2fwd_next_t;
 
-/** Forward one packet based on the mac table lookup result */
+/** Forward one packet based on the mac table lookup result. */
 
 static_always_inline void
 l2fwd_process (vlib_main_t * vm,
@@ -400,7 +400,7 @@ VLIB_NODE_FUNCTION_MULTIARCH (l2fwd_node, l2fwd_node_fn)
 VLIB_INIT_FUNCTION (l2fwd_init);
 
 
-/** Add the L3 input node for this ethertype to the next nodes structure */
+/** Add the L3 input node for this ethertype to the next nodes structure. */
 void
 l2fwd_register_input_type (vlib_main_t * vm,
 			   ethernet_type_t type, u32 node_index)
@@ -415,7 +415,7 @@ l2fwd_register_input_type (vlib_main_t * vm,
 
 
 /**
- * set subinterface forward enable/disable
+ * Set subinterface forward enable/disable.
  * The CLI format is:
  *   set interface l2 forward  [disable]
  */
diff --git a/vnet/vnet/l2/l2_input.c b/vnet/vnet/l2/l2_input.c
index 5c39b797..24cb31fb 100644
--- a/vnet/vnet/l2/l2_input.c
+++ b/vnet/vnet/l2/l2_input.c
@@ -490,7 +490,7 @@ VLIB_NODE_FUNCTION_MULTIARCH (l2input_node, l2input_node_fn)
 VLIB_INIT_FUNCTION (l2input_init);
 
 
-/** Get a pointer to the config for the given interface */
+/** Get a pointer to the config for the given interface. */
 l2_input_config_t *
 l2input_intf_config (u32 sw_if_index)
 {
@@ -500,7 +500,7 @@ l2input_intf_config (u32 sw_if_index)
   return vec_elt_at_index (mp->configs, sw_if_index);
 }
 
-/** Enable (or disable) the feature in the bitmap for the given interface */
+/** Enable (or disable) the feature in the bitmap for the given interface. */
 u32
 l2input_intf_bitmap_enable (u32 sw_if_index, u32 feature_bitmap, u32 enable)
 {
@@ -536,10 +536,10 @@ l2input_set_bridge_features (u32 bd_index, u32 feat_mask, u32 feat_value)
 
 /**
  * Set the subinterface to run in l2 or l3 mode.
- * for L3 mode, just the sw_if_index is specified
- * for bridged mode, the bd id and bvi flag are also specified
- * for xconnect mode, the peer sw_if_index is also specified
- * Return 0 if ok, or non-0 if there was an error
+ * For L3 mode, just the sw_if_index is specified.
+ * For bridged mode, the bd id and bvi flag are also specified.
+ * For xconnect mode, the peer sw_if_index is also specified.
+ * Return 0 if ok, or non-0 if there was an error.
  */
 
 u32
@@ -771,7 +771,7 @@ set_int_l2_mode (vlib_main_t * vm, vnet_main_t * vnet_main, u32 mode, u32 sw_if_
 }
 
 /**
- * set subinterface in bridging mode with a bridge-domain ID
+ * Set subinterface in bridging mode with a bridge-domain ID.
  * The CLI format is:
  *   set interface l2 bridge   [bvi] [split-horizon-group]
  */
@@ -847,7 +847,7 @@ VLIB_CLI_COMMAND (int_l2_bridge_cli, static) = {
 /* *INDENT-ON* */
 
 /**
- * set subinterface in xconnect mode with another interface
+ * Set subinterface in xconnect mode with another interface.
  * The CLI format is:
  *   set interface l2 xconnect  
  */
@@ -897,7 +897,7 @@ VLIB_CLI_COMMAND (int_l2_xc_cli, static) = {
 /* *INDENT-ON* */
 
 /**
- * set subinterface in L3 mode
+ * Set subinterface in L3 mode.
  * The CLI format is:
  *   set interface l3 
  */
@@ -936,6 +936,7 @@ VLIB_CLI_COMMAND (int_l3_cli, static) = {
 /* *INDENT-ON* */
 
 /**
+ * Show interface mode.
  * The CLI format is:
  *    show mode [  ...]
  */
diff --git a/vnet/vnet/l2/l2_input_classify.c b/vnet/vnet/l2/l2_input_classify.c
index 1b9f911c..69cd113d 100644
--- a/vnet/vnet/l2/l2_input_classify.c
+++ b/vnet/vnet/l2/l2_input_classify.c
@@ -21,13 +21,14 @@
 
 /**
  * @file
- * @brief L2 input classifier
+ * @brief L2 input classifier.
  *
- * See also .../vnet/vnet/classify/vnet_classify.[ch]
+ * @sa @ref vnet/vnet/classify/vnet_classify.c
+ * @sa @ref vnet/vnet/classify/vnet_classify.h
  */
 
 /**
- * @brief l2_input_classifier packet trace record
+ * @brief l2_input_classifier packet trace record.
  */
 typedef struct
 {
@@ -42,7 +43,7 @@ typedef struct
 } l2_input_classify_trace_t;
 
 /**
- * @brief vlib node runtime
+ * @brief vlib node runtime.
  */
 typedef struct
 {
@@ -52,7 +53,7 @@ typedef struct
   l2_input_classify_main_t *l2cm;
 } l2_input_classify_runtime_t;
 
-/** packet trace format function */
+/** Packet trace format function. */
 static u8 *
 format_l2_input_classify_trace (u8 * s, va_list * args)
 {
@@ -66,7 +67,7 @@ format_l2_input_classify_trace (u8 * s, va_list * args)
   return s;
 }
 
-/** l2 input classifier main data structure */
+/** l2 input classifier main data structure. */
 l2_input_classify_main_t l2_input_classify_main;
 
 vlib_node_registration_t l2_input_classify_node;
@@ -106,19 +107,19 @@ static char *l2_input_classify_error_strings[] = {
  * @em Uses:
  * - (l2_input_classify_runtime_t *)
  *         rt->classify_table_index_by_sw_if_index
- *	   Head of the per-interface, perprotocol classifier table chain
- * 	   for a specific interface. ~0 => send pkts to the next
- * 	   feature in the L2 feature chain.
+ *	- Head of the per-interface, per-protocol classifier table chain
+ * 	  for a specific interface.
+ *      - @c ~0 => send pkts to the next feature in the L2 feature chain.
  * - vnet_buffer(b)->sw_if_index[VLIB_RX]
  * 	- Indicates the @c sw_if_index value of the interface that the
- * 	packet was received on.
- * - vnet_buffer (b0)->l2.feature_bitmap
+ * 	  packet was received on.
+ * - vnet_buffer(b0)->l2.feature_bitmap
  * 	- Used to steer packets across l2 features enabled on the interface
  * - (vnet_classify_entry_t) e0->next_index
  *	- Used to steer traffic when the classifier hits on a session
  * - (vnet_classify_entry_t) e0->advance
  *	- Signed quantity applied via vlib_buffer_advance
- * 	when the classifier hits on a session
+ * 	  when the classifier hits on a session
  * - (vnet_classify_table_t) t0->miss_next_index
  *	- Used to steer traffic when the classifier misses
  *
@@ -477,7 +478,7 @@ VLIB_REGISTER_NODE (l2_input_classify_node) = {
 VLIB_NODE_FUNCTION_MULTIARCH (l2_input_classify_node,
 			      l2_input_classify_node_fn);
 
-/** l2 input classsifier feature initialization */
+/** l2 input classsifier feature initialization. */
 clib_error_t *
 l2_input_classify_init (vlib_main_t * vm)
 {
@@ -505,7 +506,7 @@ l2_input_classify_init (vlib_main_t * vm)
 VLIB_INIT_FUNCTION (l2_input_classify_init);
 
 
-/** enable/disable l2 input classification on a specific interface */
+/** Enable/disable l2 input classification on a specific interface. */
 void
 vnet_l2_input_classify_enable_disable (u32 sw_if_index, int enable_disable)
 {
@@ -513,7 +514,7 @@ vnet_l2_input_classify_enable_disable (u32 sw_if_index, int enable_disable)
 			      (u32) enable_disable);
 }
 
-/** @brief Set l2 per-protocol, per-interface input classification tables
+/** @brief Set l2 per-protocol, per-interface input classification tables.
  *
  *  @param sw_if_index  interface handle
  *  @param ip4_table_index  ip4 classification table index, or ~0
diff --git a/vnet/vnet/l2/l2_learn.c b/vnet/vnet/l2/l2_learn.c
index 30f5617f..96d4816e 100644
--- a/vnet/vnet/l2/l2_learn.c
+++ b/vnet/vnet/l2/l2_learn.c
@@ -29,8 +29,9 @@
 #include 
 #include 
 
-/*
- * Ethernet bridge learning
+/**
+ * @file
+ * Ethernet bridge learning.
  *
  * Populate the mac table with entries mapping the packet's source mac + bridge
  * domain ID to the input sw_if_index.
@@ -102,7 +103,7 @@ typedef enum
 } l2learn_next_t;
 
 
-/** Perform learning on one packet based on the mac table lookup result */
+/** Perform learning on one packet based on the mac table lookup result. */
 
 static_always_inline void
 l2learn_process (vlib_node_runtime_t * node,
@@ -462,7 +463,7 @@ VLIB_INIT_FUNCTION (l2learn_init);
 
 
 /**
- * set subinterface learn enable/disable
+ * Set subinterface learn enable/disable.
  * The CLI format is:
  *    set interface l2 learn  [disable]
  */
diff --git a/vnet/vnet/l2/l2_output.c b/vnet/vnet/l2/l2_output.c
index 8bc43744..85678caf 100644
--- a/vnet/vnet/l2/l2_output.c
+++ b/vnet/vnet/l2/l2_output.c
@@ -73,9 +73,10 @@ static char *l2output_error_strings[] = {
 };
 
 /**
- * Return 0 if split horizon check passes, otherwise return non-zero
+ * Check for split horizon violations.
+ * Return 0 if split horizon check passes, otherwise return non-zero.
  * Packets should not be transmitted out an interface with the same
- * split-horizon group as the input interface, except if the shg is 0
+ * split-horizon group as the input interface, except if the @c shg is 0
  * in which case the check always passes.
  */
 static_always_inline u32
@@ -592,7 +593,7 @@ output_node_mapping_send_rpc (u32 node_index, u32 sw_if_index)
 #endif
 
 
-/** Create a mapping in the next node mapping table for the given sw_if_index */
+/** Create a mapping in the next node mapping table for the given sw_if_index. */
 u32
 l2output_create_output_node_mapping (vlib_main_t * vlib_main, vnet_main_t * vnet_main, u32 node_index,	/* index of current node */
 				     u32 * output_node_index_vec,
@@ -660,7 +661,7 @@ l2output_intf_config (u32 sw_if_index)
   return vec_elt_at_index (mp->configs, sw_if_index);
 }
 
-/** Enable (or disable) the feature in the bitmap for the given interface */
+/** Enable (or disable) the feature in the bitmap for the given interface. */
 void
 l2output_intf_bitmap_enable (u32 sw_if_index, u32 feature_bitmap, u32 enable)
 {
diff --git a/vnet/vnet/l2/l2_output_acl.c b/vnet/vnet/l2/l2_output_acl.c
index 4597d42e..94a4d66b 100644
--- a/vnet/vnet/l2/l2_output_acl.c
+++ b/vnet/vnet/l2/l2_output_acl.c
@@ -306,8 +306,8 @@ VLIB_NODE_FUNCTION_MULTIARCH (l2_outacl_node, l2_outacl_node_fn)
 VLIB_INIT_FUNCTION (l2_outacl_init);
 
 #if 0
-/** @todo maybe someone will add output ACL's in the future
- * set subinterface outacl enable/disable
+/** @todo maybe someone will add output ACL's in the future.
+ * Set subinterface outacl enable/disable.
  * The CLI format is:
  *    set interface acl output  [disable]
  */
diff --git a/vnet/vnet/l2/l2_output_classify.c b/vnet/vnet/l2/l2_output_classify.c
index 1cb8b850..c04df3c2 100644
--- a/vnet/vnet/l2/l2_output_classify.c
+++ b/vnet/vnet/l2/l2_output_classify.c
@@ -12,18 +12,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-/*
- * l2_classify.c
- */
 
 #include 
 #include 
 
 /**
  * @file
- * @brief L2 input classifier
+ * @brief L2 input classifier.
  *
- * See also .../vnet/vnet/classify/vnet_classify.[ch]
+ * @sa @ref vnet/vnet/classify/vnet_classify.c
+ * @sa @ref vnet/vnet/classify/vnet_classify.h
  */
 
 typedef struct
@@ -46,7 +44,7 @@ typedef struct
   l2_output_classify_main_t *l2cm;
 } l2_output_classify_runtime_t;
 
-/** packet trace format function */
+/** Packet trace format function. */
 static u8 *
 format_l2_output_classify_trace (u8 * s, va_list * args)
 {
@@ -61,7 +59,7 @@ format_l2_output_classify_trace (u8 * s, va_list * args)
   return s;
 }
 
-/** l2 output classifier main data structure */
+/** l2 output classifier main data structure. */
 l2_output_classify_main_t l2_output_classify_main;
 
 vlib_node_registration_t l2_output_classify_node;
@@ -477,7 +475,7 @@ VLIB_REGISTER_NODE (l2_output_classify_node) = {
 VLIB_NODE_FUNCTION_MULTIARCH (l2_output_classify_node,
 			      l2_output_classify_node_fn);
 
-/** l2 output classsifier feature initialization */
+/** l2 output classsifier feature initialization. */
 clib_error_t *
 l2_output_classify_init (vlib_main_t * vm)
 {
@@ -507,7 +505,7 @@ l2_output_classify_init (vlib_main_t * vm)
 
 VLIB_INIT_FUNCTION (l2_output_classify_init);
 
-/** enable/disable l2 input classification on a specific interface */
+/** Enable/disable l2 input classification on a specific interface. */
 void
 vnet_l2_output_classify_enable_disable (u32 sw_if_index, int enable_disable)
 {
@@ -516,15 +514,15 @@ vnet_l2_output_classify_enable_disable (u32 sw_if_index, int enable_disable)
 			       (u32) enable_disable);
 }
 
-/** @brief Set l2 per-protocol, per-interface output classification tables
+/** @brief Set l2 per-protocol, per-interface output classification tables.
  *
- *  @param sw_if_index  interface handle
- *  @param ip4_table_index  ip4 classification table index, or ~0
- *  @param ip6_table_index  ip6 classification table index, or ~0
+ *  @param sw_if_index        interface handle
+ *  @param ip4_table_index    ip4 classification table index, or ~0
+ *  @param ip6_table_index    ip6 classification table index, or ~0
  *  @param other_table_index  non-ip4, non-ip6 classification table index,
  *         or ~0
  *  @returns 0 on success, VNET_API_ERROR_NO_SUCH_TABLE, TABLE2, TABLE3
- *  if the indicated (non-~0) table does not exist.
+ *           if the indicated (non-~0) table does not exist.
  */
 
 int
diff --git a/vnet/vnet/l2/l2_vtr.c b/vnet/vnet/l2/l2_vtr.c
index 6250074e..3ec8b8a7 100644
--- a/vnet/vnet/l2/l2_vtr.c
+++ b/vnet/vnet/l2/l2_vtr.c
@@ -30,7 +30,7 @@
 #include 
 
 
-/** Just a placeholder. Also ensures file is not eliminated by linker. */
+/** Just a placeholder; ensures file is not eliminated by linker. */
 clib_error_t *
 l2_vtr_init (vlib_main_t * vm)
 {
@@ -254,7 +254,7 @@ done:
 }
 
 /**
- *  Get vtag tag rewrite on the given interface.
+ * Get vtag tag rewrite on the given interface.
  * Return 1 if there is an error, 0 if ok
  */
 u32
@@ -411,7 +411,7 @@ done:
 }
 
 /**
- * set subinterface vtr enable/disable
+ * Set subinterface vtr enable/disable.
  * The CLI format is:
  *    set interface l2 tag-rewrite  [disable | pop 1 | pop 2 | push {dot1q|dot1ad}  []]
  *
diff --git a/vnet/vnet/l2/l2_xcrw.c b/vnet/vnet/l2/l2_xcrw.c
index 95219e6e..344a8b96 100644
--- a/vnet/vnet/l2/l2_xcrw.c
+++ b/vnet/vnet/l2/l2_xcrw.c
@@ -14,7 +14,8 @@
  */
 #include 
 
-/*
+/**
+ * @file
  * General L2 / L3 cross-connect, used to set up
  * "L2 interface <--> your-favorite-tunnel-encap" tunnels.
  *
diff --git a/vnet/vnet/unix/tuntap.c b/vnet/vnet/unix/tuntap.c
index 83e7ec4f..b3fbc7f3 100644
--- a/vnet/vnet/unix/tuntap.c
+++ b/vnet/vnet/unix/tuntap.c
@@ -786,9 +786,9 @@ tuntap_ip4_add_del_interface_address (ip4_main_t * im,
 }
 
 /**
- * @brief workaround for a known #include bug
- * #include  causes multiple definitions if
- * netinet/in.h is also included.
+ * @brief workaround for a known include file bug.
+ * including @c  causes multiple definitions if
+ * @c