aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--Makefile38
-rw-r--r--doxygen/Makefile154
-rw-r--r--doxygen/dir.dox.sample3
-rw-r--r--doxygen/doxygen.cfg22
-rwxr-xr-xdoxygen/filter_c.py69
-rwxr-xr-xdoxygen/filter_h.py53
-rwxr-xr-xdoxygen/siphon_generate.py322
-rwxr-xr-xdoxygen/siphon_process.py323
-rw-r--r--vlib/vlib/dir.dox2
-rw-r--r--vlib/vlib/unix/cli.c160
-rw-r--r--vlib/vlib/unix/dir.dox27
12 files changed, 1095 insertions, 81 deletions
diff --git a/.gitignore b/.gitignore
index 425261836aa..07668dafbc8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
*~
+*.sw[op]
/build-root/.ccache
/build-root/build-*/
@@ -66,6 +67,8 @@ GTAGS
# Generated documentation
/build-root/docs
+/build-root/.doxygen-bootstrap.ok
+/build-root/.doxygen-siphon.dep
# indent backup files
*.BAK
diff --git a/Makefile b/Makefile
index 4e3d65bcd02..20d88da4476 100644
--- a/Makefile
+++ b/Makefile
@@ -30,7 +30,7 @@ OS_VERSION_ID= $(shell grep '^VERSION_ID=' /etc/os-release | cut -f2- -d= | sed
DEB_DEPENDS = curl build-essential autoconf automake bison libssl-dev ccache
DEB_DEPENDS += debhelper dkms git libtool libganglia1-dev libapr1-dev dh-systemd
DEB_DEPENDS += libconfuse-dev git-review exuberant-ctags cscope
-DEB_DEPENDS += doxygen graphviz
+DEB_DEPENDS += python-dev
ifeq ($(OS_VERSION_ID),14.04)
DEB_DEPENDS += openjdk-8-jdk-headless
else
@@ -40,7 +40,7 @@ endif
RPM_DEPENDS_GROUPS = 'Development Tools'
RPM_DEPENDS = redhat-lsb glibc-static java-1.8.0-openjdk-devel yum-utils
RPM_DEPENDS += openssl-devel https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm apr-devel
-RPM_DEPENDS += doxygen graphviz
+RPM_DEPENDS += python-devel
EPEL_DEPENDS = libconfuse-devel ganglia-devel
ifneq ($(wildcard $(STARTUP_DIR)/startup.conf),)
@@ -54,7 +54,7 @@ endif
.PHONY: help bootstrap wipe wipe-release build build-release rebuild rebuild-release
.PHONY: run run-release debug debug-release build-vat run-vat pkg-deb pkg-rpm
-.PHONY: ctags cscope doxygen wipe-doxygen plugins plugins-release
+.PHONY: ctags cscope plugins plugins-release build-vpp-api
help:
@echo "Make Targets:"
@@ -80,6 +80,7 @@ help:
@echo " gtags - (re)generate gtags database"
@echo " cscope - (re)generate cscope database"
@echo " doxygen - (re)generate documentation"
+ @echo " bootstrap-doxygen - setup Doxygen dependencies"
@echo " wipe-doxygen - wipe all generated documentation"
@echo ""
@echo "Make Arguments:"
@@ -233,24 +234,21 @@ cscope: cscope.files
# Build the documentation
#
-DOXY_INPUT ?= \
- README.md \
- vppinfra \
- svm \
- vlib \
- vlib-api \
- vnet \
- vpp \
- vpp-api
+# Doxygen configuration and our utility scripts
+export DOXY_DIR ?= $(WS_ROOT)/doxygen
+
+define make-doxy
+ @OS_ID="$(OS_ID)" WS_ROOT="$(WS_ROOT)" BR="$(BR)" make -C $(DOXY_DIR) $@
+endef
+
+.PHONY: bootstrap-doxygen doxygen wipe-doxygen
+
+bootstrap-doxygen:
+ $(call make-doxy)
doxygen:
- @mkdir -p "$(BR)/docs"
- ROOT="$(WS_ROOT)" \
- BUILD_ROOT="$(BR)" \
- INPUT="$(addprefix $(WS_ROOT)/,$(DOXY_INPUT))" \
- HTML=YES \
- VERSION="`git describe --tags --dirty`" \
- doxygen doxygen/doxygen.cfg
+ $(call make-doxy)
wipe-doxygen:
- rm -rf "$(BR)/docs"
+ $(call make-doxy)
+
diff --git a/doxygen/Makefile b/doxygen/Makefile
new file mode 100644
index 00000000000..471b6fd7f15
--- /dev/null
+++ b/doxygen/Makefile
@@ -0,0 +1,154 @@
+# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Build the documentation
+#
+
+# These should be passed in by the root Makefile
+WS_ROOT ?= $(CURDIR)/..
+BR ?= $(WS_ROOT)/build-root
+OS_ID ?= $(shell grep '^ID=' /etc/os-release | cut -f2- -d= | sed -e 's/\"//g')
+
+# Package dependencies
+DOC_DEB_DEPENDS = doxygen graphviz python-pyparsing
+DOC_RPM_DEPENDS = doxygen graphviz pyparsing
+
+# Doxygen configuration and our utility scripts
+DOXY_DIR ?= $(WS_ROOT)/doxygen
+
+# Input directories and files
+DOXY_INPUT ?= \
+ README.md \
+ vppinfra \
+ svm \
+ vlib \
+ vlib-api \
+ vnet \
+ vpp \
+ vpp-api \
+ plugins
+
+# Files to exclude, from pre-Doxygen steps, eg because they're
+# selectively compiled.
+# Examples would be to exclude non-DPDK related sources when
+# there's a DPDK equivalent that conflicts.
+# This is specifically for the pre-Doxygen steps; Doxygen uses
+# @cond for this instead.
+DOXY_PRE_EXCLUDE ?= \
+ vlib/vlib/buffer.c
+
+# Generate a regexp for filenames to exclude
+DOXY_PRE_EXCLUDE_REGEXP = ($(subst .,\.,$(shell echo '$(strip $(DOXY_PRE_EXCLUDE))' | sed -e 's/ /|/g')))
+
+# Discover all the directories we might, possibly, maybe, have include files in
+DOXY_INCLUDE_PATH = $(shell set -e; cd $(WS_ROOT); for item in $(DOXY_INPUT); do find $$item -type d; done)
+
+# Discover if we have CPP available
+CPP ?= $(shell which cpp)
+ifneq ($(strip $(CPP)),)
+# Add whatever directories CPP normally includes
+DOXY_INCLUDE_PATH += $(shell set -e; $(CPP) -v </dev/null 2>&1 | grep -A 1000 '\#include' | awk '/^ /{print $$1}')
+endif
+
+# Target directory for doxygen output
+DOXY_OUTPUT ?= $(BR)/docs
+
+# Siphoned fragments end up in here
+SIPHON_INPUT ?= $(DOXY_OUTPUT)/siphon_fragments
+
+# Siphoned fragements are processed into here
+SIPHON_OUTPUT ?= $(DOXY_OUTPUT)/siphon_docs
+
+# Extra document inputs that are processed in addition to DOXY_INPUT
+EXTRA_DOXY_INPUT += $(SIPHON_OUTPUT)
+
+# All the siphon types we know about
+SIPHONS ?= clicmd
+
+SIPHON_FILES = $(addprefix $(SIPHON_INPUT)/,$(addsuffix .siphon,$(SIPHONS)))
+SIPHON_DOCS = $(addprefix $(SIPHON_OUTPUT)/,$(addsuffix .md,$(SIPHONS)))
+
+$(BR)/.doxygen-bootstrap.ok:
+ifeq ($(OS_ID),ubuntu)
+ @set -e; inst=; \
+ for i in $(DOC_DEB_DEPENDS); do \
+ dpkg-query --show $$i >/dev/null 2>&1 || inst="$$inst $$i"; \
+ done; \
+ if [ "$$inst" ]; then sudo apt-get $(CONFIRM) $(FORCE) install $$inst; fi
+else ifneq ("$(wildcard /etc/redhat-release)","")
+ @sudo yum install $(CONFIRM) $(DOC_RPM_DEPENDS)
+else
+ $(error "This option currently works only on Ubuntu or Centos systems")
+endif
+ @touch $@
+
+.PHONY: bootstrap-doxygen
+bootstrap-doxygen: $(BR)/.doxygen-bootstrap.ok
+
+.DELETE_ON_ERROR: $(BR)/.doxygen-siphon.dep
+$(BR)/.doxygen-siphon.dep: Makefile
+ set -e; rm -f "$@"; for input in $(DOXY_INPUT); do \
+ find "$(WS_ROOT)/$$input" -type f \
+ \( -name '*.[ch]' -or -name '*.dox' \) -print \
+ | grep -v -E '^$(WS_ROOT)/$(DOXY_PRE_EXCLUDE_REGEXP)$$' \
+ | sed -e "s/^/\$$(SIPHON_FILES): /" \
+ >> $@; \
+ done
+
+# Include the source -> siphon dependencies
+-include $(BR)/.doxygen-siphon.dep
+
+.NOTPARALLEL: $(SIPHON_FILES)
+$(SIPHON_FILES): $(DOXY_DIR)/siphon_generate.py $(BR)/.doxygen-bootstrap.ok
+ @rm -rf "$(SIPHON_INPUT)" "$(SIPHON_OUTPUT)"
+ @mkdir -p "$(SIPHON_INPUT)" "$(SIPHON_OUTPUT)"
+ @touch $(SIPHON_INPUT)/files
+ for input in $(DOXY_INPUT); do \
+ cd "$(WS_ROOT)"; \
+ find "$$input" -type f \
+ \( -name '*.[ch]' -or -name '*.dox' \) -print \
+ | grep -v -E '^$(DOXY_PRE_EXCLUDE_REGEXP)$$' \
+ >> $(SIPHON_INPUT)/files; \
+ done
+ set -e; cd "$(WS_ROOT)"; $(DOXY_DIR)/siphon_generate.py \
+ --output="$(SIPHON_INPUT)" \
+ "@$(SIPHON_INPUT)/files"
+
+
+.DELETE_ON_ERROR: $(SIPHON_DOCS)
+$(SIPHON_OUTPUT)/%.md: $(SIPHON_INPUT)/%.siphon $(DOXY_DIR)/siphon_process.py
+ set -e; cd "$(WS_ROOT)"; \
+ $(DOXY_DIR)/siphon_process.py --type=$(basename $(notdir $<)) \
+ --output="$(SIPHON_OUTPUT)" $< > $@
+
+# This target can be used just to generate the siphoned docs
+.PHONY: doxygen-siphon
+doxygen-siphon: $(SIPHON_DOCS)
+
+# Generate the doxygen docs
+doxygen: $(SIPHON_DOCS)
+ @mkdir -p "$(DOXY_OUTPUT)"
+ set -e; cd "$(WS_ROOT)"; \
+ ROOT="$(WS_ROOT)" \
+ BUILD_ROOT="$(BR)" \
+ INPUT="$(addprefix $(WS_ROOT)/,$(DOXY_INPUT)) $(EXTRA_DOXY_INPUT)" \
+ INCLUDE_PATH="$(DOXY_INCLUDE_PATH)" \
+ HTML=YES \
+ VERSION="`git describe --tags --dirty`" \
+ doxygen $(DOXY_DIR)/doxygen.cfg
+
+wipe-doxygen:
+ rm -rf "$(BR)/docs" "$(BR)/.doxygen-siphon.d"
+
diff --git a/doxygen/dir.dox.sample b/doxygen/dir.dox.sample
index 41e84d7bee4..500fe595ae5 100644
--- a/doxygen/dir.dox.sample
+++ b/doxygen/dir.dox.sample
@@ -18,7 +18,7 @@
/**
@dir
-@brief Someone please fix this description
+@brief Someone please fix this description.
@todo This directory needs a description.
This is where you would document the contents of a directory.
@@ -26,3 +26,4 @@ This is where you would document the contents of a directory.
This looks like a C file but it is not part of the build; it is purely
for documentation.
*/
+/*? %%clicmd:group_label CLI section description%% ?*/
diff --git a/doxygen/doxygen.cfg b/doxygen/doxygen.cfg
index 0eadbd7397c..971a159502c 100644
--- a/doxygen/doxygen.cfg
+++ b/doxygen/doxygen.cfg
@@ -229,8 +229,22 @@ TAB_SIZE = 8
# newlines.
ALIASES =
+
+## Indexes VPP graph nodes
ALIASES += "node=@xrefitem nodes \"Node Identifier\" \"Node Identifiers\" @c "
+## Formatting for CLI commands and output
+ALIASES += "cli{1}=<code><pre>\1</code></pre>"
+ALIASES += "clistart=<code><pre>"
+ALIASES += "cliend=</pre></code>"
+
+## Formatting for CLI example paragraphs
+ALIASES += "cliexpar=@par Example usage"
+ALIASES += "cliexcmd{1}=@clistart<b>vpp# <em>\1</em></b>@cliend"
+ALIASES += "cliexstart{1}=@cliexcmd{\1}@clistart"
+ALIASES += "cliexend=@cliend"
+
+
# This tag can be used to specify a number of word-keyword mappings (TCL only).
# A mapping has the form "name=value". For example adding "class=itcl::class"
# will allow you to use the command class in the itcl::class meaning.
@@ -630,7 +644,7 @@ GENERATE_DEPRECATEDLIST= YES
# sections, marked by \if <section_label> ... \endif and \cond <section_label>
# ... \endcond blocks.
-ENABLED_SECTIONS =
+ENABLED_SECTIONS = DPDK
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
# initial value of a variable or macro / define can have for it to appear in the
@@ -901,6 +915,7 @@ INPUT_FILTER =
FILTER_PATTERNS = \
*.c=$(ROOT)/doxygen/filter_c.py \
+ *.h=$(ROOT)/doxygen/filter_h.py \
*.api=$(ROOT)/doxygen/filter_api.py
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
@@ -2022,7 +2037,7 @@ SEARCH_INCLUDES = YES
# preprocessor.
# This tag requires that the tag SEARCH_INCLUDES is set to YES.
-INCLUDE_PATH = $(INPUT)
+INCLUDE_PATH = $(INCLUDE_PATH)
# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
# patterns (like *.h and *.hpp) to filter out the header-files in the
@@ -2046,7 +2061,8 @@ PREDEFINED = \
__ORDER_LITTLE_ENDIAN__=1234 \
__BYTE_ORDER__=1234 \
__FLOAT_WORD_ORDER__=1234 \
- DPDK=1
+ DPDK=1 \
+ always_inline:="static inline"
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The
diff --git a/doxygen/filter_c.py b/doxygen/filter_c.py
index db1681c9450..30b933ba79e 100755
--- a/doxygen/filter_c.py
+++ b/doxygen/filter_c.py
@@ -15,38 +15,73 @@
# Filter for .c files to make various preprocessor tricks Doxygenish
-import sys, re
+import os, sys, re
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s <filename>\n" % (sys.argv[0]))
sys.exit(1)
-patterns = [
- # Search for VLIB_CLI_COMMAND, extract its parameter and add a docblock for it
- ( re.compile("(?P<m>VLIB_CLI_COMMAND)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g<m> (\g<name>) */ vlib_cli_command_t \g<name>"),
+replace_patterns = [
+ # Search for VLIB_CLI_COMMAND, extract its parameters and add a docblock for it
+ ( re.compile("(?P<m>VLIB_CLI_COMMAND)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"),
+ r"/** @brief (@em constructor) \g<m> (\g<name>) */ vlib_cli_command_t \g<name>"),
+ ( re.compile("(?P<m>VLIB_CLI_COMMAND)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<qual>[^)]*)[)]"),
+ r"/** @brief (@em constructor) \g<m> (\g<name>) */ \g<qual> vlib_cli_command_t \g<name>"),
- # Search for VLIB_REGISTER_NODE, extract its parameter and add a docblock for it
- ( re.compile("(?P<m>VLIB_REGISTER_NODE)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g<m> (\g<name>) */ vlib_node_registration_t \g<name>"),
+ # Search for VLIB_REGISTER_NODE, extract its parameters and add a docblock for it
+ ( re.compile("(?P<m>VLIB_REGISTER_NODE)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"),
+ r"/** @brief (@em constructor) \g<m> (\g<name>) */ vlib_node_registration_t \g<name>"),
+ ( re.compile("(?P<m>VLIB_REGISTER_NODE)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<qual>[^)]*)[)]"),
+ r"/** @brief (@em constructor) \g<m> (\g<name>) */ \g<qual> vlib_node_registration_t \g<name>"),
# Search for VLIB_INIT_FUNCTION, extract its parameter and add a docblock for it
- ( re.compile("(?P<m>VLIB_INIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"), r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ vlib_init_function_t * _vlib_init_function_\g<name>"),
- ( re.compile("(?P<m>VLIB_DECLARE_INIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"), r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ vlib_init_function_t * _vlib_init_function_\g<name>"),
+ ( re.compile("(?P<m>VLIB_INIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"),
+ r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ vlib_init_function_t * _vlib_init_function_\g<name>"),
+ ( re.compile("(?P<m>VLIB_DECLARE_INIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"),
+ r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ vlib_init_function_t * _vlib_init_function_\g<name>"),
- # Search for VLIB_LOOP_ENTER_FUNCTION, extract the 1st parameter (ignore any others) and add a docblock for it
- ( re.compile("(?P<m>VLIB_MAIN_LOOP_ENTER_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ _vlib_main_loop_enter_\g<name>"),
- ( re.compile("(?P<m>VLIB_MAIN_LOOP_EXIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ _vlib_main_loop_exit_\g<name>"),
+ # Search for VLIB_LOOP_ENTER_FUNCTION, extract the parameters and add a docblock for it
+ ( re.compile("(?P<m>VLIB_MAIN_LOOP_ENTER_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"),
+ r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ _vlib_main_loop_enter_\g<name>"),
+ ( re.compile("(?P<m>VLIB_MAIN_LOOP_EXIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"),
+ r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ _vlib_main_loop_exit_\g<name>"),
- # Search for VLIB_CONFIG_FUNCTION, extract the 1st parameter (ignore any others) and add a docblock for it
- ( re.compile("(?P<m>VLIB_CONFIG_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<n>\"[^\"]+\")(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g<m> (\g<name>, \g<n>) */ vlib_config_function_runtime_t _vlib_config_function_\g<name>"),
- ( re.compile("(?P<m>VLIB_EARLY_CONFIG_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<n>\"[^\"]+\")(,[^)]*)?[)]"), r"/** @brief (@em constructor) \g<m> (\g<name>, \g<n>) */ vlib_config_function_runtime_t _vlib_config_function_\g<name>"),
+ # Search for VLIB_CONFIG_FUNCTION, extract the parameters and add a docblock for it
+ ( re.compile("(?P<m>VLIB_CONFIG_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<n>\"[^\"]+\")(,[^)]*)?[)]"),
+ r"/** @brief (@em constructor) \g<m> (\g<name>, \g<n>) */ vlib_config_function_runtime_t _vlib_config_function_\g<name>"),
+ ( re.compile("(?P<m>VLIB_EARLY_CONFIG_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<n>\"[^\"]+\")(,[^)]*)?[)]"),
+ r"/** @brief (@em constructor) \g<m> (\g<name>, \g<n>) */ vlib_config_function_runtime_t _vlib_config_function_\g<name>"),
# Search for "format_thing" and "unformat_thing" when used as a function pointer and add parens
- ( re.compile("(?P<pre>(^|,)\s*)(?P<name>(un)?format_[a-zA-Z0-9_]+)(?P<post>\s*(,|$))") , r"\g<pre>\g<name>()\g<post>" ),
+ ( re.compile("(?P<pre>(^|,)\s*)(?P<name>(un)?format_[a-zA-Z0-9_]+)(?P<post>\s*(,|$))"),
+ r"\g<pre>\g<name>()\g<post>" ),
+
+ # Search for CLIB_PAD_FROM_TO(...); and replace with padding
+ # #define CLIB_PAD_FROM_TO(from,to) u8 pad_##from[(to) - (from)]
+ ( re.compile("(?P<m>CLIB_PAD_FROM_TO)\s*[(](?P<from>[^,]+),\s*(?P<to>[^)]+)[)]"),
+ r"/** Padding. */ u8 pad_\g<from>[(\g<to>) - (\g<from>)]" ),
+
]
-with open(sys.argv[1]) as fd:
+
+filename = sys.argv[1]
+cwd = os.getcwd()
+if filename[0:len(cwd)] == cwd:
+ filename = filename[len(cwd):]
+ if filename[0] == "/":
+ filename = filename[1:]
+
+with open(filename) as fd:
+ line_num = 0
+
for line in fd:
+ line_num += 1
str = line[:-1] # filter \n
- for p in patterns:
+
+ # Look for search/replace patterns
+ for p in replace_patterns:
str = p[0].sub(p[1], str)
+
sys.stdout.write(str+"\n")
+
+# All done
diff --git a/doxygen/filter_h.py b/doxygen/filter_h.py
new file mode 100755
index 00000000000..967388d5743
--- /dev/null
+++ b/doxygen/filter_h.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Filter for .c files to make various preprocessor tricks Doxygenish
+
+import os, sys, re
+
+if len(sys.argv) < 2:
+ sys.stderr.write("Usage: %s <filename>\n" % (sys.argv[0]))
+ sys.exit(1)
+
+replace_patterns = [
+ # Search for CLIB_PAD_FROM_TO(...); and replace with padding
+ # #define CLIB_PAD_FROM_TO(from,to) u8 pad_##from[(to) - (from)]
+ ( re.compile("(?P<m>CLIB_PAD_FROM_TO)\s*[(](?P<from>[^,]+),\s*(?P<to>[^)]+)[)]"),
+ r"/** Padding. */ u8 pad_\g<from>[(\g<to>) - (\g<from>)]" ),
+
+]
+
+
+filename = sys.argv[1]
+cwd = os.getcwd()
+if filename[0:len(cwd)] == cwd:
+ filename = filename[len(cwd):]
+ if filename[0] == "/":
+ filename = filename[1:]
+
+with open(filename) as fd:
+ line_num = 0
+
+ for line in fd:
+ line_num += 1
+ str = line[:-1] # filter \n
+
+ # Look for search/replace patterns
+ for p in replace_patterns:
+ str = p[0].sub(p[1], str)
+
+ sys.stdout.write(str+"\n")
+
+# All done
diff --git a/doxygen/siphon_generate.py b/doxygen/siphon_generate.py
new file mode 100755
index 00000000000..8b999114e52
--- /dev/null
+++ b/doxygen/siphon_generate.py
@@ -0,0 +1,322 @@
+#!/usr/bin/env python
+# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Looks for preprocessor macros with struct initializers and siphons them
+# off into another file for later parsing; ostensibly to generate
+# documentation from struct initializer data.
+
+import os, sys, re, argparse, json
+
+DEFAULT_OUTPUT = "build-root/docs/siphons"
+DEFAULT_PREFIX = os.getcwd()
+
+ap = argparse.ArgumentParser()
+ap.add_argument("--output", '-o', metavar="directory", default=DEFAULT_OUTPUT,
+ help="Output directory for .siphon files [%s]" % DEFAULT_OUTPUT)
+ap.add_argument("--input-prefix", metavar="path", default=DEFAULT_PREFIX,
+ help="Prefix to strip from input pathnames [%s]" % DEFAULT_PREFIX)
+ap.add_argument("input", nargs='+', metavar="input_file",
+ help="Input C source files")
+args = ap.parse_args()
+
+"""Patterns that match the start of code blocks we want to siphon"""
+siphon_patterns = [
+ ( re.compile("(?P<m>VLIB_CLI_COMMAND)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"), "clicmd" ),
+]
+
+"""Matches a siphon comment block start"""
+siphon_block_start = re.compile("^\s*/\*\?\s*(.*)$")
+
+"""Matches a siphon comment block stop"""
+siphon_block_stop = re.compile("^(.*)\s*\?\*/\s*$")
+
+"""Siphon block directive delimiter"""
+siphon_block_delimiter = "%%"
+
+"""Matches a siphon block directive such as '%clicmd:group_label Debug CLI%'"""
+siphon_block_directive = re.compile("(%s)\s*([a-zA-Z0-9_:]+)\s+(.*)\s*(%s)" % \
+ (siphon_block_delimiter, siphon_block_delimiter))
+
+"""Matches the start of an initializer block"""
+siphon_initializer = re.compile("\s*=")
+
+"""
+count open and close braces in str
+return (0, index) when braces were found and count becomes 0.
+index indicates the position at which the last closing brace was
+found.
+return (-1, -1) if a closing brace is found before any opening one.
+return (count, -1) if not all opening braces are closed, count is the
+current depth
+"""
+def count_braces(str, count=0, found=False):
+ for index in range(0, len(str)):
+ if str[index] == '{':
+ count += 1;
+ found = True
+ elif str[index] == '}':
+ if count == 0:
+ # means we never found an open brace
+ return (-1, -1)
+ count -= 1;
+
+ if count == 0 and found:
+ return (count, index)
+
+ return (count, -1)
+
+# Collated output for each siphon
+output = {}
+
+# Build a list of known siphons
+known_siphons = []
+for item in siphon_patterns:
+ siphon = item[1]
+ if siphon not in known_siphons:
+ known_siphons.append(siphon)
+
+# Setup information for siphons we know about
+for siphon in known_siphons:
+ output[siphon] = {
+ "file": "%s/%s.siphon" % (args.output, siphon),
+ "global": {},
+ "items": [],
+ }
+
+# Pre-process file names in case they indicate a file with
+# a list of files
+files = []
+for filename in args.input:
+ if filename.startswith('@'):
+ with open(filename[1:], 'r') as fp:
+ lines = fp.readlines()
+ for line in lines:
+ files.append(line.strip())
+ lines = None
+ else:
+ files.append(filename)
+
+# Iterate all the input files we've been given
+for filename in files:
+ # Strip the current directory off the start of the
+ # filename for brevity
+ if filename[0:len(args.input_prefix)] == args.input_prefix:
+ filename = filename[len(args.input_prefix):]
+ if filename[0] == "/":
+ filename = filename[1:]
+
+ # Work out the abbreviated directory name
+ directory = os.path.dirname(filename)
+ if directory[0:2] == "./":
+ directory = directory[2:]
+ elif directory[0:len(args.input_prefix)] == args.input_prefix:
+ directory = directory[len(args.input_prefix):]
+ if directory[0] == "/":
+ directory = directory[1:]
+
+ # Open the file and explore its contents...
+ sys.stderr.write("Siphoning from %s...\n" % filename)
+ directives = {}
+ with open(filename) as fd:
+ siphon = None
+ close_siphon = None
+ siphon_block = ""
+ in_block = False
+ line_num = 0
+ siphon_line = 0
+
+ for line in fd:
+ line_num += 1
+ str = line[:-1] # filter \n
+
+ """See if there is a block directive and if so extract it"""
+ def process_block_directive(str, directives):
+ m = siphon_block_directive.search(str)
+ if m is not None:
+ k = m.group(2)
+ v = m.group(3).strip()
+ directives[k] = v
+ # Return only the parts we did not match
+ return str[0:m.start(1)] + str[m.end(4):]
+
+ return str
+
+ def process_block_prefix(str):
+ if str.startswith(" * "):
+ str = str[3:]
+ elif str == " *":
+ str = ""
+ return str
+
+ if not in_block:
+ # See if the line contains the start of a siphon doc block
+ m = siphon_block_start.search(str)
+ if m is not None:
+ in_block = True
+ t = m.group(1)
+
+ # Now check if the block closes on the same line
+ m = siphon_block_stop.search(t)
+ if m is not None:
+ t = m.group(1)
+ in_block = False
+
+ # Check for directives
+ t = process_block_directive(t, directives)
+
+ # Filter for normal comment prefixes
+ t = process_block_prefix(t)
+
+ # Add what is left
+ siphon_block += t
+
+ # Skip to next line
+ continue
+
+ else:
+ # Check to see if we have an end block marker
+ m = siphon_block_stop.search(str)
+ if m is not None:
+ in_block = False
+ t = m.group(1)
+ else:
+ t = str
+
+ # Check for directives
+ t = process_block_directive(t, directives)
+
+ # Filter for normal comment prefixes
+ t = process_block_prefix(t)
+
+ # Add what is left
+ siphon_block += t + "\n"
+
+ # Skip to next line
+ continue
+
+
+ if siphon is None:
+ # Look for blocks we need to siphon
+ for p in siphon_patterns:
+ if p[0].match(str):
+ siphon = [ p[1], str + "\n", 0 ]
+ siphon_line = line_num
+
+ # see if we have an initializer
+ m = siphon_initializer.search(str)
+ if m is not None:
+ # count the braces on this line
+ (count, index) = count_braces(str[m.start():])
+ siphon[2] = count
+ # TODO - it's possible we have the initializer all on the first line
+ # we should check for it, but also account for the possibility that
+ # the open brace is on the next line
+ #if count == 0:
+ # # braces balanced
+ # close_siphon = siphon
+ # siphon = None
+ else:
+ # no initializer: close the siphon right now
+ close_siphon = siphon
+ siphon = None
+ else:
+ # See if we should end the siphon here - do we have balanced
+ # braces?
+ (count, index) = count_braces(str, count=siphon[2], found=True)
+ if count == 0:
+ # braces balanced - add the substring and close the siphon
+ siphon[1] += str[:index+1] + ";\n"
+ close_siphon = siphon
+ siphon = None
+ else:
+ # add the whole string, move on
+ siphon[2] = count
+ siphon[1] += str + "\n"
+
+ if close_siphon is not None:
+ # Write the siphoned contents to the right place
+ siphon_name = close_siphon[0]
+
+ # Copy directives for the file
+ details = {}
+ for key in directives:
+ if ":" in key:
+ (sn, label) = key.split(":")
+ if sn == siphon_name:
+ details[label] = directives[key]
+ else:
+ details[key] = directives[key]
+
+ # Copy details for this block
+ details['file'] = filename
+ details['line_start'] = siphon_line
+ details['line_end'] = line_num
+ details['siphon_block'] = siphon_block.strip()
+
+ # Some defaults
+ if "group" not in details:
+ if "group_label" in details:
+ # use the filename since group labels are mostly of file scope
+ details['group'] = details['file']
+ else:
+ details['group'] = directory
+
+ if "group_label" not in details:
+ details['group_label'] = details['group']
+
+ details["block"] = close_siphon[1]
+
+ # Store the item
+ output[siphon_name]['items'].append(details)
+
+ # All done
+ close_siphon = None
+ siphon_block = ""
+
+ # Update globals
+ for key in directives.keys():
+ if ':' not in key:
+ continue
+
+ if filename.endswith("/dir.dox"):
+ # very special! use the parent directory name
+ l = directory
+ else:
+ l = filename
+
+ (sn, label) = key.split(":")
+
+ if sn not in output:
+ output[sn] = {}
+ if 'global' not in output[sn]:
+ output[sn]['global'] = {}
+ if l not in output[sn]['global']:
+ output[sn]['global'][l] = {}
+ if 'file' not in output[sn]:
+ output[sn]['file'] = "%s/%s.siphon" % (args.output, sn)
+ if 'items' not in output[sn]:
+ output[sn]['items'] = []
+
+ output[sn]['global'][l][label] = directives[key]
+
+
+# Write out the data
+for siphon in output.keys():
+ sys.stderr.write("Saving siphon %s...\n" % siphon)
+ s = output[siphon]
+ with open(s['file'], "a") as fp:
+ json.dump(s, fp, separators=(',', ': '), indent=4, sort_keys=True)
+
+# All done
diff --git a/doxygen/siphon_process.py b/doxygen/siphon_process.py
new file mode 100755
index 00000000000..82a166d31b5
--- /dev/null
+++ b/doxygen/siphon_process.py
@@ -0,0 +1,323 @@
+#!/usr/bin/env python
+# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Filter for .siphon files that are generated by other filters.
+# The idea is to siphon off certain initializers so that we can better
+# auto-document the contents of that initializer.
+
+import os, sys, re, argparse, cgi, json
+import pyparsing as pp
+
+import pprint
+
+DEFAULT_SIPHON ="clicmd"
+DEFAULT_OUTPUT = None
+DEFAULT_PREFIX = os.getcwd()
+
+siphon_map = {
+ 'clicmd': "VLIB_CLI_COMMAND",
+}
+
+ap = argparse.ArgumentParser()
+ap.add_argument("--type", '-t', metavar="siphon_type", default=DEFAULT_SIPHON,
+ choices=siphon_map.keys(),
+ help="Siphon type to process [%s]" % DEFAULT_SIPHON)
+ap.add_argument("--output", '-o', metavar="directory", default=DEFAULT_OUTPUT,
+ help="Output directory for .md files [%s]" % DEFAULT_OUTPUT)
+ap.add_argument("--input-prefix", metavar="path", default=DEFAULT_PREFIX,
+ help="Prefix to strip from input pathnames [%s]" % DEFAULT_PREFIX)
+ap.add_argument("input", nargs='+', metavar="input_file",
+ help="Input .siphon files")
+args = ap.parse_args()
+
+if args.output is None:
+ sys.stderr.write("Error: Siphon processor requires --output to be set.")
+ sys.exit(1)
+
+
+def clicmd_index_sort(cfg, group, dec):
+ if group in dec and 'group_label' in dec[group]:
+ return dec[group]['group_label']
+ return group
+
+def clicmd_index_header(cfg):
+ s = "# CLI command index\n"
+ s += "\n[TOC]\n"
+ return s
+
+def clicmd_index_section(cfg, group, md):
+ return "\n@subpage %s\n\n" % md
+
+def clicmd_index_entry(cfg, meta, item):
+ v = item["value"]
+ return "* [%s](@ref %s)\n" % (v["path"], meta["label"])
+
+def clicmd_sort(cfg, meta, item):
+ return item['value']['path']
+
+def clicmd_header(cfg, group, md, dec):
+ if group in dec and 'group_label' in dec[group]:
+ label = dec[group]['group_label']
+ else:
+ label = group
+ return "\n@page %s %s\n" % (md, label)
+
+def clicmd_format(cfg, meta, item):
+ v = item["value"]
+ s = "\n@section %s %s\n" % (meta['label'], v['path'])
+
+ # The text from '.short_help = '.
+ # Later we should split this into short_help and usage_help
+ # since the latter is how it is primarily used but the former
+ # is also needed.
+ if "short_help" in v:
+ tmp = v["short_help"].strip()
+
+ # Bit hacky. Add a trailing period if it doesn't have one.
+ if tmp[-1] != ".":
+ tmp += "."
+
+ s += "### Summary/usage\n %s\n\n" % tmp
+
+ # This is seldom used and will likely be deprecated
+ if "long_help" in v:
+ tmp = v["long_help"]
+
+ s += "### Long help\n %s\n\n" % tmp
+
+ # Extracted from the code in /*? ... ?*/ blocks
+ if "siphon_block" in item["meta"]:
+ sb = item["meta"]["siphon_block"]
+
+ if sb != "":
+ # hack. still needed?
+ sb = sb.replace("\n", "\\n")
+ try:
+ sb = json.loads('"'+sb+'"')
+ s += "### Description\n%s\n\n" % sb
+ except:
+ pass
+
+ # Gives some developer-useful linking
+ if "item" in meta or "function" in v:
+ s += "### Declaration and implementation\n\n"
+
+ if "item" in meta:
+ s += "Declaration: @ref %s (%s:%d)\n\n" % \
+ (meta['item'], meta["file"], int(item["meta"]["line_start"]))
+
+ if "function" in v:
+ s += "Implementation: @ref %s.\n\n" % v["function"]
+
+ return s
+
+
+siphons = {
+ "VLIB_CLI_COMMAND": {
+ "index_sort_key": clicmd_index_sort,
+ "index_header": clicmd_index_header,
+ "index_section": clicmd_index_section,
+ "index_entry": clicmd_index_entry,
+ 'sort_key': clicmd_sort,
+ "header": clicmd_header,
+ "format": clicmd_format,
+ }
+}
+
+
+# PyParsing definition for our struct initializers which look like this:
+# VLIB_CLI_COMMAND (show_sr_tunnel_command, static) = {
+# .path = "show sr tunnel",
+# .short_help = "show sr tunnel [name <sr-tunnel-name>]",
+# .function = show_sr_tunnel_fn,
+#};
+def getMacroInitializerBNF():
+ cs = pp.Forward()
+ ident = pp.Word(pp.alphas + "_", pp.alphas + pp.nums + "_")
+ intNum = pp.Word(pp.nums)
+ hexNum = pp.Literal("0x") + pp.Word(pp.hexnums)
+ octalNum = pp.Literal("0") + pp.Word("01234567")
+ integer = (hexNum | octalNum | intNum) + \
+ pp.Optional(pp.Literal("ULL") | pp.Literal("LL") | pp.Literal("L"))
+ floatNum = pp.Regex(r'\d+(\.\d*)?([eE]\d+)?') + pp.Optional(pp.Literal("f"))
+ char = pp.Literal("'") + pp.Word(pp.printables, exact=1) + pp.Literal("'")
+ arrayIndex = integer | ident
+
+ lbracket = pp.Literal("(").suppress()
+ rbracket = pp.Literal(")").suppress()
+ lbrace = pp.Literal("{").suppress()
+ rbrace = pp.Literal("}").suppress()
+ comma = pp.Literal(",").suppress()
+ equals = pp.Literal("=").suppress()
+ dot = pp.Literal(".").suppress()
+ semicolon = pp.Literal(";").suppress()
+
+ # initializer := { [member = ] (variable | expression | { initializer } ) }
+ typeName = ident
+ varName = ident
+
+ typeSpec = pp.Optional("unsigned") + \
+ pp.oneOf("int long short float double char u8 i8 void") + \
+ pp.Optional(pp.Word("*"), default="")
+ typeCast = pp.Combine( "(" + ( typeSpec | typeName ) + ")" ).suppress()
+
+ string = pp.Combine(pp.OneOrMore(pp.QuotedString(quoteChar='"',
+ escChar='\\', multiline=True)), adjacent=False)
+ literal = pp.Optional(typeCast) + (integer | floatNum | char | string)
+ var = pp.Combine(pp.Optional(typeCast) + varName + pp.Optional("[" + arrayIndex + "]"))
+
+ expr = (literal | var) # TODO
+
+
+ member = pp.Combine(dot + varName + pp.Optional("[" + arrayIndex + "]"), adjacent=False)
+ value = (expr | cs)
+
+ entry = pp.Group(pp.Optional(member + equals, default="") + value)
+ entries = (pp.ZeroOrMore(entry + comma) + entry + pp.Optional(comma)) | \
+ (pp.ZeroOrMore(entry + comma))
+
+ cs << (lbrace + entries + rbrace)
+
+ macroName = ident
+ params = pp.Group(pp.ZeroOrMore(expr + comma) + expr)
+ macroParams = lbracket + params + rbracket
+
+ mi = macroName + pp.Optional(macroParams) + equals + pp.Group(cs) + semicolon
+ mi.ignore(pp.cppStyleComment)
+ return mi
+
+
+mi = getMacroInitializerBNF()
+
+# Parse the input file into a more usable dictionary structure
+cmds = {}
+line_num = 0
+line_start = 0
+for filename in args.input:
+ sys.stderr.write("Parsing items in file \"%s\"...\n" % filename)
+ data = None
+ with open(filename, "r") as fd:
+ data = json.load(fd)
+
+ cmds['_global'] = data['global']
+
+ # iterate the items loaded and regroup it
+ for item in data["items"]:
+ try:
+ o = mi.parseString(item['block']).asList()
+ except:
+ sys.stderr.write("Exception parsing item: %s\n%s\n" \
+ % (json.dumps(item, separators=(',', ': '), indent=4),
+ item['block']))
+ raise
+
+ group = item['group']
+ file = item['file']
+ macro = o[0]
+ param = o[1][0]
+
+ if group not in cmds:
+ cmds[group] = {}
+
+ if file not in cmds[group]:
+ cmds[group][file] = {}
+
+ if macro not in cmds[group][file]:
+ cmds[group][file][macro] = {}
+
+ c = {
+ 'params': o[2],
+ 'meta': {},
+ 'value': {},
+ }
+
+ for key in item:
+ if key == 'block':
+ continue
+ c['meta'][key] = item[key]
+
+ for i in c['params']:
+ c['value'][i[0]] = cgi.escape(i[1])
+
+ cmds[group][file][macro][param] = c
+
+
+# Write the header for this siphon type
+cfg = siphons[siphon_map[args.type]]
+sys.stdout.write(cfg["index_header"](cfg))
+contents = ""
+
+def group_sort_key(item):
+ if "index_sort_key" in cfg:
+ return cfg["index_sort_key"](cfg, item, cmds['_global'])
+ return item
+
+# Iterate the dictionary and process it
+for group in sorted(cmds.keys(), key=group_sort_key):
+ if group.startswith('_'):
+ continue
+
+ sys.stderr.write("Processing items in group \"%s\"...\n" % group)
+
+ cfg = siphons[siphon_map[args.type]]
+ md = group.replace("/", "_").replace(".", "_")
+ sys.stdout.write(cfg["index_section"](cfg, group, md))
+
+ if "header" in cfg:
+ dec = cmds['_global']
+ contents += cfg["header"](cfg, group, md, dec)
+
+ for file in sorted(cmds[group].keys()):
+ if group.startswith('_'):
+ continue
+
+ sys.stderr.write("- Processing items in file \"%s\"...\n" % file)
+
+ for macro in sorted(cmds[group][file].keys()):
+ if macro != siphon_map[args.type]:
+ continue
+ sys.stderr.write("-- Processing items in macro \"%s\"...\n" % macro)
+ cfg = siphons[macro]
+
+ meta = {
+ "group": group,
+ "file": file,
+ "macro": macro,
+ "md": md,
+ }
+
+ def item_sort_key(item):
+ if "sort_key" in cfg:
+ return cfg["sort_key"](cfg, meta, cmds[group][file][macro][item])
+ return item
+
+ for param in sorted(cmds[group][file][macro].keys(), key=item_sort_key):
+ sys.stderr.write("--- Processing item \"%s\"...\n" % param)
+
+ meta["item"] = param
+
+ # mangle "md" and the item to make a reference label
+ meta["label"] = "%s___%s" % (meta["md"], param)
+
+ if "index_entry" in cfg:
+ s = cfg["index_entry"](cfg, meta, cmds[group][file][macro][param])
+ sys.stdout.write(s)
+
+ if "format" in cfg:
+ contents += cfg["format"](cfg, meta, cmds[group][file][macro][param])
+
+sys.stdout.write(contents)
+
+# All done
diff --git a/vlib/vlib/dir.dox b/vlib/vlib/dir.dox
index 8ca47cd79ef..4806e7a91c6 100644
--- a/vlib/vlib/dir.dox
+++ b/vlib/vlib/dir.dox
@@ -19,3 +19,5 @@
@dir
@brief VLIB application library source.
*/
+/*? %%clicmd:group_label VLIB application library%% ?*/
+
diff --git a/vlib/vlib/unix/cli.c b/vlib/vlib/unix/cli.c
index 92bb8bc3945..bf09ee0cce2 100644
--- a/vlib/vlib/unix/cli.c
+++ b/vlib/vlib/unix/cli.c
@@ -42,6 +42,7 @@
* Provides a command line interface so humans can interact with VPP.
* This is predominantly a debugging and testing mechanism.
*/
+/*? %%clicmd:group_label Debug CLI %% ?*/
#include <vlib/vlib.h>
#include <vlib/unix/unix.h>
@@ -146,9 +147,13 @@ typedef struct
CLI process. */
u8 *input_vector;
+ /** This session has command history. */
u8 has_history;
+ /** Array of vectors of commands in the history. */
u8 **command_history;
+ /** The command currently pointed at by the history cursor. */
u8 *current_command;
+ /** How far from the end of the history array the user has browsed. */
i32 excursion;
/** Maximum number of history entries this session will store. */
@@ -157,7 +162,12 @@ typedef struct
/** Current command line counter */
u32 command_number;
+ /** The string being searched for in the history. */
u8 *search_key;
+ /** If non-zero then the CLI is searching in the history array.
+ * - @c -1 means search backwards.
+ * - @c 1 means search forwards.
+ */
int search_mode;
/** Position of the insert cursor on the current input line */
@@ -232,41 +242,41 @@ unix_cli_file_free (unix_cli_file_t * f)
/** CLI actions */
typedef enum
{
- UNIX_CLI_PARSE_ACTION_NOACTION = 0, /**< No action */
- UNIX_CLI_PARSE_ACTION_CRLF, /**< Carriage return, newline or enter */
- UNIX_CLI_PARSE_ACTION_TAB, /**< Tab key */
- UNIX_CLI_PARSE_ACTION_ERASE, /**< Erase cursor left */
- UNIX_CLI_PARSE_ACTION_ERASERIGHT, /**< Erase cursor right */
- UNIX_CLI_PARSE_ACTION_UP, /**< Up arrow */
- UNIX_CLI_PARSE_ACTION_DOWN, /**< Down arrow */
- UNIX_CLI_PARSE_ACTION_LEFT,
- UNIX_CLI_PARSE_ACTION_RIGHT,
- UNIX_CLI_PARSE_ACTION_HOME,
- UNIX_CLI_PARSE_ACTION_END,
- UNIX_CLI_PARSE_ACTION_WORDLEFT,
- UNIX_CLI_PARSE_ACTION_WORDRIGHT,
- UNIX_CLI_PARSE_ACTION_ERASELINELEFT,
- UNIX_CLI_PARSE_ACTION_ERASELINERIGHT,
- UNIX_CLI_PARSE_ACTION_CLEAR,
- UNIX_CLI_PARSE_ACTION_REVSEARCH,
- UNIX_CLI_PARSE_ACTION_FWDSEARCH,
- UNIX_CLI_PARSE_ACTION_YANK,
- UNIX_CLI_PARSE_ACTION_TELNETIAC,
-
- UNIX_CLI_PARSE_ACTION_PAGER_CRLF,
- UNIX_CLI_PARSE_ACTION_PAGER_QUIT,
- UNIX_CLI_PARSE_ACTION_PAGER_NEXT,
- UNIX_CLI_PARSE_ACTION_PAGER_DN,
- UNIX_CLI_PARSE_ACTION_PAGER_UP,
- UNIX_CLI_PARSE_ACTION_PAGER_TOP,
- UNIX_CLI_PARSE_ACTION_PAGER_BOTTOM,
- UNIX_CLI_PARSE_ACTION_PAGER_PGDN,
- UNIX_CLI_PARSE_ACTION_PAGER_PGUP,
- UNIX_CLI_PARSE_ACTION_PAGER_REDRAW,
- UNIX_CLI_PARSE_ACTION_PAGER_SEARCH,
-
- UNIX_CLI_PARSE_ACTION_PARTIALMATCH,
- UNIX_CLI_PARSE_ACTION_NOMATCH
+ UNIX_CLI_PARSE_ACTION_NOACTION = 0, /**< No action */
+ UNIX_CLI_PARSE_ACTION_CRLF, /**< Carriage return, newline or enter */
+ UNIX_CLI_PARSE_ACTION_TAB, /**< Tab key */
+ UNIX_CLI_PARSE_ACTION_ERASE, /**< Erase cursor left */
+ UNIX_CLI_PARSE_ACTION_ERASERIGHT, /**< Erase cursor right */
+ UNIX_CLI_PARSE_ACTION_UP, /**< Up arrow */
+ UNIX_CLI_PARSE_ACTION_DOWN, /**< Down arrow */
+ UNIX_CLI_PARSE_ACTION_LEFT, /**< Left arrow */
+ UNIX_CLI_PARSE_ACTION_RIGHT, /**< Right arrow */
+ UNIX_CLI_PARSE_ACTION_HOME, /**< Home key (jump to start of line) */
+ UNIX_CLI_PARSE_ACTION_END, /**< End key (jump to end of line) */
+ UNIX_CLI_PARSE_ACTION_WORDLEFT, /**< Jump cursor to start of left word */
+ UNIX_CLI_PARSE_ACTION_WORDRIGHT, /**< Jump cursor to start of right word */
+ UNIX_CLI_PARSE_ACTION_ERASELINELEFT, /**< Erase line to left of cursor */
+ UNIX_CLI_PARSE_ACTION_ERASELINERIGHT, /**< Erase line to right & including cursor */
+ UNIX_CLI_PARSE_ACTION_CLEAR, /**< Clear the terminal */
+ UNIX_CLI_PARSE_ACTION_REVSEARCH, /**< Search backwards in command history */
+ UNIX_CLI_PARSE_ACTION_FWDSEARCH, /**< Search forwards in command history */
+ UNIX_CLI_PARSE_ACTION_YANK, /**< Undo last erase action */
+ UNIX_CLI_PARSE_ACTION_TELNETIAC, /**< Telnet control code */
+
+ UNIX_CLI_PARSE_ACTION_PAGER_CRLF, /**< Enter pressed (CR, CRLF, LF, etc) */
+ UNIX_CLI_PARSE_ACTION_PAGER_QUIT, /**< Exit the pager session */
+ UNIX_CLI_PARSE_ACTION_PAGER_NEXT, /**< Scroll to next page */
+ UNIX_CLI_PARSE_ACTION_PAGER_DN, /**< Scroll to next line */
+ UNIX_CLI_PARSE_ACTION_PAGER_UP, /**< Scroll to previous line */
+ UNIX_CLI_PARSE_ACTION_PAGER_TOP, /**< Scroll to first line */
+ UNIX_CLI_PARSE_ACTION_PAGER_BOTTOM, /**< Scroll to last line */
+ UNIX_CLI_PARSE_ACTION_PAGER_PGDN, /**< Scroll to next page */
+ UNIX_CLI_PARSE_ACTION_PAGER_PGUP, /**< Scroll to previous page */
+ UNIX_CLI_PARSE_ACTION_PAGER_REDRAW, /**< Clear and redraw the page on the terminal */
+ UNIX_CLI_PARSE_ACTION_PAGER_SEARCH, /**< Search the pager buffer */
+
+ UNIX_CLI_PARSE_ACTION_PARTIALMATCH, /**< Action parser found a partial match */
+ UNIX_CLI_PARSE_ACTION_NOMATCH /**< Action parser did not find any match */
} unix_cli_parse_action_t;
/** @brief Mapping of input buffer strings to action values.
@@ -485,6 +495,9 @@ unix_cli_match_action (unix_cli_parse_actions_t * a,
}
+/** Add bytes to the output vector and then flagg the I/O system that bytes
+ * are available to be sent.
+ */
static void
unix_cli_add_pending_output (unix_file_t * uf,
unix_cli_file_t * cf,
@@ -502,6 +515,9 @@ unix_cli_add_pending_output (unix_file_t * uf,
}
}
+/** Delete all bytes from the output vector and flag the I/O system
+ * that no more bytes are available to be sent.
+ */
static void
unix_cli_del_pending_output (unix_file_t * uf,
unix_cli_file_t * cf, uword n_bytes)
@@ -983,13 +999,13 @@ unix_vlib_cli_output (uword cli_file_index, u8 * buffer, uword buffer_bytes)
/** Identify whether a terminal type is ANSI capable.
*
- * Compares the string given in @term with a list of terminal types known
+ * Compares the string given in @c term with a list of terminal types known
* to support ANSI escape sequences.
*
* This list contains, for example, @c xterm, @c screen and @c ansi.
*
* @param term A string with a terminal type in it.
- * @param len The length of the string in @term.
+ * @param len The length of the string in @c term.
*
* @return @c 1 if the terminal type is recognized as supporting ANSI
* terminal sequences; @c 0 otherwise.
@@ -2059,6 +2075,10 @@ done:
goto more;
}
+/** Destroy a CLI session.
+ * @note If we destroy the @c stdin session this additionally signals
+ * the shutdown of VPP.
+ */
static void
unix_cli_kill (unix_cli_main_t * cm, uword cli_file_index)
{
@@ -2088,6 +2108,7 @@ unix_cli_kill (unix_cli_main_t * cm, uword cli_file_index)
pool_put (cm->cli_file_pool, cf);
}
+/** Handle system events. */
static uword
unix_cli_process (vlib_main_t * vm,
vlib_node_runtime_t * rt, vlib_frame_t * f)
@@ -2130,6 +2151,8 @@ done:
return 0;
}
+/** Called when a CLI session file descriptor can be written to without
+ * blocking. */
static clib_error_t *
unix_cli_write_ready (unix_file_t * uf)
{
@@ -2152,6 +2175,7 @@ unix_cli_write_ready (unix_file_t * uf)
return /* no error */ 0;
}
+/** Called when a CLI session file descriptor has data to be read. */
static clib_error_t *
unix_cli_read_ready (unix_file_t * uf)
{
@@ -2482,8 +2506,8 @@ unix_cli_config (vlib_main_t * vm, unformat_input_t * input)
VLIB_CONFIG_FUNCTION (unix_cli_config, "unix-cli");
-/** Called when VPP is shutting down, this resets the system
- * terminal state, if previously saved.
+/** Called when VPP is shutting down, this restores the system
+ * terminal state if previously saved.
*/
static clib_error_t *
unix_cli_exit (vlib_main_t * vm)
@@ -2500,7 +2524,7 @@ unix_cli_exit (vlib_main_t * vm)
VLIB_MAIN_LOOP_EXIT_FUNCTION (unix_cli_exit);
/** Set the CLI prompt.
- * @param The C string to set the prompt to.
+ * @param prompt The C string to set the prompt to.
* @note This setting is global; it impacts all current
* and future CLI sessions.
*/
@@ -2531,6 +2555,12 @@ unix_cli_quit (vlib_main_t * vm,
return 0;
}
+/*?
+ * Terminates the current CLI session.
+ *
+ * If VPP is running in @em interactive mode and this is the console session
+ * (that is, the session on @c stdin) then this will also terminate VPP.
+?*/
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (unix_cli_quit_command, static) = {
.path = "quit",
@@ -2597,6 +2627,13 @@ done:
return error;
}
+/*?
+ * Executes a sequence of CLI commands which are read from a file.
+ *
+ * If a command is unrecognised or otherwise invalid then the usual CLI
+ * feedback will be generated, however execution of subsequent commands
+ * from the file will continue.
+?*/
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (cli_exec, static) = {
.path = "exec",
@@ -2706,6 +2743,9 @@ unix_cli_show_history (vlib_main_t * vm,
return 0;
}
+/*?
+ * Displays the command history for the current session, if any.
+?*/
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (cli_unix_cli_show_history, static) = {
.path = "history",
@@ -2755,6 +2795,24 @@ unix_cli_show_terminal (vlib_main_t * vm,
return 0;
}
+/*?
+ * Displays various information about the state of the current terminal
+ * session.
+ *
+ * @cliexpar
+ * @cliexstart{show terminal}
+ * Terminal name: unix-cli-stdin
+ * Terminal mode: char-by-char
+ * Terminal width: 123
+ * Terminal height: 48
+ * ANSI capable: yes
+ * History enabled: yes
+ * History limit: 50
+ * Pager enabled: yes
+ * Pager limit: 100000
+ * CRLF mode: LF
+ * @cliexend
+?*/
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (cli_unix_cli_show_terminal, static) = {
.path = "show terminal",
@@ -2799,6 +2857,13 @@ unix_cli_set_terminal_pager (vlib_main_t * vm,
return 0;
}
+/*?
+ * Enables or disables the terminal pager for this session. Generally
+ * this defaults to enabled.
+ *
+ * Additionally allows the pager buffer size to be set; though note that
+ * this value is set globally and not per session.
+?*/
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (cli_unix_cli_set_terminal_pager, static) = {
.path = "set terminal pager",
@@ -2850,6 +2915,13 @@ unix_cli_set_terminal_history (vlib_main_t * vm,
return 0;
}
+/*?
+ * Enables or disables the command history function of the current
+ * terminal. Generally this defaults to enabled.
+ *
+ * This command also allows the maximum size of the history buffer for
+ * this session to be altered.
+?*/
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (cli_unix_cli_set_terminal_history, static) = {
.path = "set terminal history",
@@ -2880,6 +2952,14 @@ unix_cli_set_terminal_ansi (vlib_main_t * vm,
return 0;
}
+/*?
+ * Enables or disables the use of ANSI control sequences by this terminal.
+ * The default will vary based on terminal detection at the start of the
+ * session.
+ *
+ * ANSI control sequences are used in a small number of places to provide,
+ * for example, color text output and to control the cursor in the pager.
+?*/
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (cli_unix_cli_set_terminal_ansi, static) = {
.path = "set terminal ansi",
diff --git a/vlib/vlib/unix/dir.dox b/vlib/vlib/unix/dir.dox
new file mode 100644
index 00000000000..cdded0f19d3
--- /dev/null
+++ b/vlib/vlib/unix/dir.dox
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Doxygen directory documentation */
+
+/**
+@dir
+@brief VLIB Unix interface
+
+VLIB application library Unix interface layer.
+
+*/
+/*? %%clicmd:group_label VLIB Unix stuff%% ?*/
+