aboutsummaryrefslogtreecommitdiffstats
path: root/build-root
diff options
context:
space:
mode:
authorEd Warnicke <eaw@cisco.com>2015-12-08 15:45:58 -0700
committerEd Warnicke <eaw@cisco.com>2015-12-08 15:47:27 -0700
commitcb9cadad578297ffd78fa8a33670bdf1ab669e7e (patch)
tree6ac2be912482cc7849a26f0ab845561c3d7f4e26 /build-root
parentfb0815d4ae4bb0fe27bd9313f34b45c8593b907e (diff)
Initial commit of vpp code.v1.0.0
Change-Id: Ib246f1fbfce93274020ee93ce461e3d8bd8b9f17 Signed-off-by: Ed Warnicke <eaw@cisco.com>
Diffstat (limited to 'build-root')
-rw-r--r--build-root/Makefile1168
-rwxr-xr-xbuild-root/autowank308
-rwxr-xr-xbuild-root/bootstrap.sh59
-rw-r--r--build-root/build-config.mk.README3
-rw-r--r--build-root/config.site105
-rwxr-xr-xbuild-root/copyimg83
-rw-r--r--build-root/deb/debian/.gitignore10
-rwxr-xr-xbuild-root/deb/debian/README.vpp56
-rwxr-xr-xbuild-root/deb/debian/compat1
-rw-r--r--build-root/deb/debian/control54
-rw-r--r--build-root/deb/debian/copyright9
-rwxr-xr-xbuild-root/deb/debian/rules30
-rwxr-xr-xbuild-root/deb/debian/source/format1
-rw-r--r--build-root/deb/debian/vpp-bin.README.Debian53
-rw-r--r--build-root/deb/debian/vpp.postinst8
-rw-r--r--build-root/deb/debian/vpp.upstart21
-rw-r--r--build-root/emacs-lisp/all-skel.el11
-rw-r--r--build-root/emacs-lisp/cli-cmd-skel.el32
-rw-r--r--build-root/emacs-lisp/config-skel.el28
-rw-r--r--build-root/emacs-lisp/dual-loop-skel.el290
-rw-r--r--build-root/emacs-lisp/periodic-skel.el86
-rw-r--r--build-root/emacs-lisp/pipe-skel.el132
-rw-r--r--build-root/emacs-lisp/tunnel-c-skel.el441
-rw-r--r--build-root/emacs-lisp/tunnel-decap-skel.el299
-rw-r--r--build-root/emacs-lisp/tunnel-encap-skel.el245
-rw-r--r--build-root/emacs-lisp/tunnel-h-skel.el128
-rw-r--r--build-root/packages/vppapigen.mk5
-rw-r--r--build-root/packages/vppinfra.mk4
-rw-r--r--build-root/packages/vppversion.mk5
-rw-r--r--build-root/platforms.mk50
-rw-r--r--build-root/rpm/vpp.service12
-rw-r--r--build-root/rpm/vpp.spec127
-rwxr-xr-xbuild-root/scripts/find-dev-contents31
-rwxr-xr-xbuild-root/scripts/generate-deb-changelog37
-rwxr-xr-xbuild-root/scripts/make-plugin-toolkit40
-rwxr-xr-xbuild-root/scripts/version22
-rw-r--r--build-root/vagrant/.gitignore1
-rw-r--r--build-root/vagrant/README54
-rw-r--r--build-root/vagrant/Vagrantfile36
-rw-r--r--build-root/vagrant/bootstrap.centos7.sh45
-rw-r--r--build-root/vagrant/bootstrap.ubuntu1404.sh77
41 files changed, 4207 insertions, 0 deletions
diff --git a/build-root/Makefile b/build-root/Makefile
new file mode 100644
index 00000000000..ca860fa04f6
--- /dev/null
+++ b/build-root/Makefile
@@ -0,0 +1,1168 @@
+# Copyright (c) 2015 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Copyright (c) 2007-2008 Eliot Dresselhaus
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+######################################################################
+# Collect makefile fragments
+######################################################################
+
+# Scripts require non-POSIX parts of bash
+SHELL := /bin/bash
+
+# Where this makefile lives
+MU_BUILD_ROOT_DIR = $(shell pwd)
+MU_BUILD_NAME = $(shell basename $(MU_BUILD_ROOT_DIR))
+
+# Search path (e.g. multiple directories) where sources are found.
+SOURCE_PATH =
+
+# Pick up user's definitions for variables e.g. SOURCE_PATH, etc.
+-include build-config.mk
+
+MU_BUILD_ROOT_NAME = $(shell basename $(MU_BUILD_ROOT_DIR))
+MU_BUILD_DATA_DIR_NAME = build-data
+
+ABSOLUTE_SOURCE_PATH = $(foreach d,$(SOURCE_PATH),$(shell cd $(d) && pwd))
+
+SOURCE_PATH_BUILD_ROOT_DIRS = $(addsuffix /$(MU_BUILD_NAME),$(ABSOLUTE_SOURCE_PATH))
+SOURCE_PATH_BUILD_DATA_DIRS = $(addsuffix /$(MU_BUILD_DATA_DIR_NAME),$(ABSOLUTE_SOURCE_PATH))
+
+# For tools use build-root as source path, otherwise use given source path
+FIND_SOURCE_PATH = \
+ $(if $(is_build_tool), \
+ $(SOURCE_PATH_BUILD_ROOT_DIRS) $(MU_BUILD_ROOT_DIR), \
+ $(SOURCE_PATH_BUILD_DATA_DIRS))
+
+# First search given source path, then default to build-root
+FULL_SOURCE_PATH = $(SOURCE_PATH_BUILD_DATA_DIRS) $(MU_BUILD_ROOT_DIR)
+
+# Misc functions
+is_in_fn = $(strip $(filter $(1),$(2)))
+last_fn = $(lastword $1)
+chop_fn = $(wordlist 2,$(words $1),x $1)
+uniq_fn = $(strip $(if $1,$(call uniq_fn,$(call chop_fn,$1)) \
+ $(if $(filter $(call last_fn,$1),$(call chop_fn,$1)),,$(call last_fn,$1))))
+ifdef3_fn = $(if $(patsubst undefined,,$(origin $(1))),$(3),$(2))
+ifdef_fn = $(call ifdef3_fn,$(1),$(2),$($(1)))
+
+_mu_debug = $(warning "$(1) = $($(1))")
+
+$(foreach d,$(FIND_SOURCE_PATH), \
+ $(eval _mu_package_mk_in_$(d) = $(shell find $(d)/packages/*.mk 2> /dev/null)) \
+ $(eval _mu_srcdirs_in_$(d) = \
+ $(shell find $(d)/.. \
+ -maxdepth 1 \
+ -type d \
+ -and -not -name ".." \
+ -and -not -name $(MU_BUILD_ROOT_NAME) \
+ -and -not -name $(MU_BUILD_DATA_DIR_NAME))) \
+ $(eval _mu_non_package_files_in_$(d) = \
+ $(shell find $(d)/packages \
+ -type f \
+ -and -not -name '*.mk' \
+ -and -not -name '*~' 2> /dev/null)) \
+ $(foreach p,$(patsubst %.mk,%,$(notdir $(_mu_package_mk_in_$(d)))), \
+ $(eval _mu_package_dir_$(p) = $(d)) \
+ $(eval _mu_package_mk_$(p) = $(d)/packages/$(p).mk) \
+ ) \
+ $(foreach p,$(notdir $(_mu_srcdirs_in_$(d))), \
+ $(eval _mu_package_srcdir_$(p) = $(shell cd $(d)/../$(p) && pwd)) \
+ ) \
+)
+
+# Find root directory for package based on presence of package .mk
+# makefile fragment on source path.
+_find_build_data_dir_for_package_fn = $(shell \
+ set -eu$(BUILD_DEBUG) ; \
+ for d in $(FIND_SOURCE_PATH) ; do \
+ f="$${d}/packages/$(1).mk" ; \
+ [[ -f $${f} ]] && echo `cd $${d} && pwd` && exit 0 ; \
+ done ; \
+ echo "")
+find_build_data_dir_for_package_fn = $(call ifdef_fn,_mu_package_dir_$(1),)
+
+# dir/PACKAGE
+_find_source_fn = $(shell \
+ set -eu$(BUILD_DEBUG) ; \
+ d="$(call find_build_data_dir_for_package_fn,$(1))" ; \
+ [[ -n "$${d}" ]] && d="$${d}/../$(1)" ; \
+ echo "$${d}")
+find_source_fn = $(call ifdef3_fn,_mu_package_dir_$(1),,$(_mu_package_dir_$(1))/../$(1))
+
+# Find given FILE in source path as build-data/packages/FILE
+find_package_file_fn = $(shell \
+ set -eu$(BUILD_DEBUG) ; \
+ d="$(call find_build_data_dir_for_package_fn,$(1))" ; \
+ [[ -n "$${d}" ]] && d="$${d}/packages/$(2)" ; \
+ [[ -f "$${d}" ]] && echo "$${d}")
+
+# Find first FILE in source path with name PATH/build-data/FILE
+find_build_data_file_fn = $(shell \
+ set -eu$(BUILD_DEBUG) ; \
+ for d in $(FIND_SOURCE_PATH) ; do \
+ f="$${d}/$(1)" ; \
+ [[ -f $${f} ]] && echo `cd $${d} && pwd`/$(1) && exit 0 ; \
+ done ; \
+ echo "")
+
+######################################################################
+# ARCH, PLATFORM
+######################################################################
+
+NATIVE_ARCH = $(shell gcc -dumpmachine | sed -e 's/\([a-zA-Z_0-9]*\)-.*/\1/')
+
+# Find all platforms.mk that we can, including those from build-root
+$(foreach d,$(FULL_SOURCE_PATH), \
+ $(eval -include $(d)/platforms.mk))
+
+# Platform should be defined somewhere by specifying $($(PLATFORM)_arch)
+ARCH = $(strip $($(PLATFORM)_arch))
+ifeq ($(ARCH),)
+ $(error "Unknown platform `$(PLATFORM)'")
+endif
+
+# map e.g. ppc7450 -> ppc
+BASIC_ARCH = \
+ ${shell case '$(ARCH)' in \
+ (native) echo $(NATIVE_ARCH) ;; \
+ (i*86*) echo i386 ;; \
+ (ppc*|powerpc*) echo ppc ;; \
+ (*) echo '$(ARCH)' ;; \
+ esac }
+
+# x86_64 can be either 32/64. set BIACH=32 to get 32 bit libraries.
+BIARCH = 64
+
+x86_64_libdir = $(BIARCH)
+native_libdir = $($(NATIVE_ARCH)_libdir)
+
+# lib or lib64 depending
+arch_lib_dir = lib$($(BASIC_ARCH)_libdir)
+
+# OS to configure for. configure --host will be set to $(ARCH)-$(OS)
+OS = mu-linux
+
+spu_target = spu
+native_target =
+
+is_native = $(if $(ARCH:native=),,true)
+not_native = $(if $(ARCH:native=),true,)
+
+ARCH_TARGET_tmp = $(call ifdef_fn,$(ARCH)_target,$(ARCH)-$(OS))
+TARGET = $(call ifdef_fn,$(PLATFORM)_target,$(ARCH_TARGET_tmp))
+TARGET_PREFIX = $(if $(not_native),$(TARGET)-,)
+
+# CPU microarchitecture detection.
+# Either set <platform>_march in build-data/platforms/<platform>.mk,
+# or detect and use the build-host instruction set
+
+MARCH = $(strip $($(PLATFORM)_march))
+ifeq ($(MARCH),)
+ ifneq ($(wildcard $(TOOL_INSTALL_DIR)/bin/$(TARGET)-gcc),)
+ TARGET_GCC = $(TOOL_INSTALL_DIR)/bin/$(TARGET)-gcc
+ else ifneq ($(wildcard $(MU_BUILD_ROOT_DIR)/tools/bin/$(TARGET)-gcc),)
+ TARGET_GCC = $(MU_BUILD_ROOT_DIR)/tools/bin/$(TARGET)-gcc
+ endif
+ ifneq ($(TARGET_GCC),)
+ MARCH = $(shell $(TARGET_GCC) -Q --help=target -march=native | grep march | sed -e 's/.*march=[[:space:]]*//')
+ else
+ MARCH = native
+ endif
+else
+ ifeq ($(MARCH),nehalem)
+ override MARCH = corei7
+ else ifeq ($(MARCH),westmere)
+ override MARCH = corei7
+ else ifeq ($(MARCH),sandybridge)
+ override MARCH = corei7-avx
+ else ifeq ($(MARCH),ivybridge)
+ override MARCH = core-avx-i
+ else ifeq ($(MARCH),haswell)
+ override MARCH = core-avx2
+ endif
+endif
+export MARCH
+
+######################################################################
+# Generic build stuff
+######################################################################
+
+# The package we are currently working on
+PACKAGE = $*
+
+# Build/install tags. This lets you have different CFLAGS/CPPFLAGS/LDFLAGS
+# for e.g. debug versus optimized compiles. Each tag has its own set of build/install
+# areas.
+TAG =
+TAG_PREFIX = $(if $(TAG),$(TAG)-)
+
+# yes you need the space
+tag_var_with_added_space_fn = $(if $($(TAG)_TAG_$(1)),$($(TAG)_TAG_$(1)) )
+
+# TAG=debug for debugging
+debug_TAG_CFLAGS = -g -O0 -DCLIB_DEBUG -march=$(MARCH)
+debug_TAG_LDFLAGS = -g -O0 -DCLIB_DEBUG -march=$(MARCH)
+
+# TAG=prof for profiling
+prof_TAG_CFLAGS = -g -pg -O2
+prof_TAG_LDFLAGS = -g -pg -O2
+
+# TAG=o0
+o0_TAG_CFLAGS = -g -O0
+o1_TAG_LDFLAGS = -g -O0
+
+# TAG=o1
+o1_TAG_CFLAGS = -g -O1
+o1_TAG_LDFLAGS = -g -O1
+
+# TAG=o2
+o2_TAG_CFLAGS = -g -O2
+o2_TAG_LDFLAGS = -g -O2
+
+# TAG=o3
+o3_TAG_CFLAGS = -g -O3
+o3_TAG_LDFLAGS = -g -O3
+
+BUILD_PREFIX_package = build-$(TAG_PREFIX)
+BUILD_PREFIX_tool = build-tool-$(TAG_PREFIX)
+INSTALL_PREFIX = install-$(TAG_PREFIX)
+IMAGES_PREFIX = images-$(TAG_PREFIX)
+
+# Whether we are building a tool or not
+tool_or_package_fn = $(if $(is_build_tool),tool,package)
+
+# Directory where packages are built & installed
+BUILD_DIR = $(MU_BUILD_ROOT_DIR)/$(BUILD_PREFIX_$(call tool_or_package_fn))$(ARCH)
+
+## BURT
+# we will deprecate INSTALL_DIR shortly for DFLT_INSTALL_DIR
+INSTALL_DIR = $(MU_BUILD_ROOT_DIR)/$(INSTALL_PREFIX)$(ARCH)
+# DFLT_INSTALL_DIR used in platforms.mk for $(PLATFORM)_DESTDIR_BASE
+DFLT_INSTALL_DIR := $(MU_BUILD_ROOT_DIR)/$(INSTALL_PREFIX)$(ARCH)
+## BURT
+
+PLATFORM_IMAGE_DIR = $(MU_BUILD_ROOT_DIR)/$(IMAGES_PREFIX)$(PLATFORM)
+
+# $(call VAR,DEFAULT)
+override_var_with_default_fn = $(if $($(1)),$($(1)),$(2))
+
+# $(call if_directory_exists_fn,D1,D2) returns D1 if it exists else D2
+define if_directory_exists_fn
+$(shell if test -d $(1); then echo $(1); else echo $(2); fi)
+endef
+
+# $(call if_file_exists_fn,F1,F2) returns F1 if it exists else F2
+define if_file_exists_fn
+$(shell if test -f $(1); then echo $(1); else echo $(2); fi)
+endef
+
+# Default VAR, package specified override of default PACKAGE_VAR
+package_var_fn = $(call override_var_with_default_fn,$(1)_$(2),$(1))
+
+package_build_dir_fn = $(call package_var_fn,$(1),build_dir)
+
+package_install_dir_fn = \
+ $(if $(is_build_tool),$(TOOL_INSTALL_DIR),$(INSTALL_DIR)/$(call package_build_dir_fn,$(1)))
+
+PACKAGE_BUILD_DIR = \
+ $(BUILD_DIR)/$(call package_build_dir_fn,$(PACKAGE))
+PACKAGE_INSTALL_DIR = \
+ $(call package_install_dir_fn,$(PACKAGE))
+
+# Tools (gcc, binutils, glibc...) are installed here
+TOOL_INSTALL_DIR = $(MU_BUILD_ROOT_DIR)/tools
+
+# Target specific tools go here e.g. mu-build/tools/ppc-mu-linux
+TARGET_TOOL_INSTALL_DIR = $(TOOL_INSTALL_DIR)/$(TARGET)
+
+# Set BUILD_DEBUG to vx or x enable shell command tracing.
+BUILD_DEBUG =
+
+# Message from build system itself (as opposed to make or shell commands)
+build_msg_fn = echo "@@@@ $(1) @@@@"
+
+# Always prefer our own tools to those installed on system.
+# Note: ccache-bin must be before tool bin.
+BUILD_ENV = \
+ export CCACHE_DIR=$(MU_BUILD_ROOT_DIR)/.ccache ; \
+ export PATH=$(TOOL_INSTALL_DIR)/ccache-bin:$(TOOL_INSTALL_DIR)/bin:$${PATH} ; \
+ export PATH="`echo $${PATH} | sed -e s/[.]://`" ; \
+ $(if $(not_native),export CONFIG_SITE=$(MU_BUILD_ROOT_DIR)/config.site ;,) \
+ export LD_LIBRARY_PATH=$(TOOL_INSTALL_DIR)/lib64:$(TOOL_INSTALL_DIR)/lib ; \
+ set -eu$(BUILD_DEBUG) ; \
+ set -o pipefail
+
+######################################################################
+# Package build generic definitions
+######################################################################
+
+package_dir_fn = \
+ $(call find_build_data_dir_for_package_fn,$(1))/packages
+
+package_mk_fn = $(call package_dir_fn,$(1))/$(1).mk
+
+### BURT
+
+#next version
+#pkgPhaseDependMacro = $(foreach x,configure build install, \
+ $(eval $(1)_$(x)_depend := $($(1)_depend:%=%-$(x))))
+#version equivalent to original code
+pkgPhaseDependMacro = $(eval $(1)_configure_depend := $($(1)_depend:%=%-install))
+
+### BURT
+
+# Pick up built-root/pre-package-include.mk for all source directories
+$(foreach d,$(SOURCE_PATH_BUILD_ROOT_DIRS), \
+ $(eval -include $(d)/pre-package-include.mk))
+
+$(foreach d,$(addsuffix /packages,$(FIND_SOURCE_PATH)), \
+ $(eval -include $(d)/*.mk) \
+ $(eval ALL_PACKAGES += $(patsubst $(d)/%.mk,%,$(wildcard $(d)/*.mk))) \
+)
+
+# Pick up built-root/post-package-include.mk for all source directories
+$(foreach d,$(SOURCE_PATH_BUILD_ROOT_DIRS), \
+ $(eval -include $(d)/post-package-include.mk))
+
+# Linux specific native build tools
+NATIVE_TOOLS_LINUX = \
+ e2fsimage \
+ e2fsprogs \
+ fakeroot \
+ jffs2 \
+ mkimage \
+ zlib \
+ xz \
+ squashfs
+
+IS_LINUX = $(if $(findstring no,$($(PLATFORM)_uses_linux)),no,yes)
+
+NATIVE_TOOLS_$(IS_LINUX) += $(NATIVE_TOOLS_LINUX)
+
+# only build glibc for linux installs
+CROSS_TOOLS_$(IS_LINUX) += glibc gcc
+
+# must be first for bootstrapping
+NATIVE_TOOLS = findutils make
+
+# basic tools needed for build system
+NATIVE_TOOLS += git automake autoconf libtool texinfo bison flex tar
+
+# needed to compile gcc
+NATIVE_TOOLS += mpfr gmp mpc
+
+# Tool to sign binaries
+NATIVE_TOOLS += sign
+
+# ccache
+NATIVE_TOOLS += ccache
+
+# Tools needed on native host to build for platform
+NATIVE_TOOLS += $(call ifdef_fn,$(PLATFORM)_native_tools,)
+
+# Tools for cross-compiling from native -> ARCH
+CROSS_TOOLS = binutils gcc-bootstrap gdb
+
+# Tools needed on native host to build for platform
+CROSS_TOOLS += $(call ifdef_fn,$(PLATFORM)_cross_tools,)
+
+NATIVE_TOOLS += $(NATIVE_TOOLS_yes)
+CROSS_TOOLS += $(CROSS_TOOLS_yes)
+
+timestamp_name_fn = .mu_build_$(1)_timestamp
+CONFIGURE_TIMESTAMP = $(call timestamp_name_fn,configure)
+BUILD_TIMESTAMP = $(call timestamp_name_fn,build)
+INSTALL_TIMESTAMP = $(call timestamp_name_fn,install)
+
+TIMESTAMP_DIR = $(PACKAGE_BUILD_DIR)
+
+find_newer_files_fn = \
+ "`for i in $(2) ; do \
+ [[ -f $$i && $$i -nt $(1) ]] && echo "$$i" && exit 0; \
+ done ; \
+ exit 0;`"
+
+find_filter = -not -name '*~'
+find_filter += -and -not -path '*/.git*'
+find_filter += -and -not -path '*/.svn*'
+find_filter += -and -not -path '*/.CVS*'
+find_filter += -and -not -path '*/manual/*'
+find_filter += -and -not -path '*/autom4te.cache/*'
+find_filter += -and -not -path '*/doc/all-cfg.texi'
+find_filter += -and -not -path '*/.mu_build_*'
+
+find_newer_filtered_fn = \
+ (! -f $(1) \
+ || -n $(call find_newer_files_fn,$(1),$(3)) \
+ || -n "`find -H $(2) \
+ -type f \
+ -and -newer $(1) \
+ -and \( $(4) \) \
+ -print -quit`")
+
+find_newer_fn = \
+ $(call find_newer_filtered_fn,$(1),$(2),$(3),$(find_filter))
+
+######################################################################
+# Package dependencies
+######################################################################
+
+# This must come before %-configure, %-build, %-install pattern rules
+# or else dependencies will not work.
+
+package_dependencies_fn = \
+ $(patsubst %-install, %, \
+ $(filter %-install,$($(1)_configure_depend)))
+
+PACKAGE_DEPENDENCIES = $(call package_dependencies_fn,$(PACKAGE))
+
+# package specific configure, build, install dependencies
+add_package_dependency_fn = \
+ $(if $($(1)_$(2)_depend), \
+ $(eval $(1)-$(2) : $($(1)_$(2)_depend)))
+
+$(foreach p,$(ALL_PACKAGES), \
+ $(call add_package_dependency_fn,$(p),configure) \
+ $(call add_package_dependency_fn,$(p),build) \
+ $(call add_package_dependency_fn,$(p),install))
+
+TARGETS_RESPECTING_DEPENDENCIES = image_install wipe diff push-all pull-all find-source
+
+# carry over packages dependencies to image install, wipe, pull-all, push-all
+$(foreach p,$(ALL_PACKAGES), \
+ $(if $($(p)_configure_depend), \
+ $(foreach s,$(TARGETS_RESPECTING_DEPENDENCIES), \
+ $(eval $(p)-$(s): \
+ $(addsuffix -$(s), $(call package_dependencies_fn,$(p)))))))
+
+# recursively resolve dependencies
+resolve_dependencies2_fn = $(strip \
+ $(eval __added = $(filter-out $(4), \
+ $(call uniq_fn, \
+ $(foreach l,$(3), \
+ $(call ifdef3_fn,$(l)$(1),,$(call $(2),$($(l)$(1)))) \
+ )))) \
+ $(eval __known = $(call uniq_fn,$(4) $(3) $(__added))) \
+ $(if $(__added), \
+ $(call resolve_dependencies2_fn,$(1),$(2),$(__added),$(__known)), \
+ $(__known)) \
+)
+
+resolve_dependencies_null_fn = $(1)
+
+resolve_dependencies_fn = $(call resolve_dependencies2_fn,$(1),resolve_dependencies_null_fn,$(2))
+
+######################################################################
+# Package configure
+######################################################################
+
+# x86_64 can be either 32/64. set BIACH=32 to get 32 bit libraries.
+BIARCH = 64
+
+x86_64_libdir = $(BIARCH)
+native_libdir = $($(NATIVE_ARCH)_libdir)
+
+# lib or lib64 depending
+arch_lib_dir = lib$($(BASIC_ARCH)_libdir)
+
+# find dynamic linker as absolute path
+TOOL_INSTALL_LIB_DIR=$(TOOL_INSTALL_DIR)/$(TARGET)/$(arch_lib_dir)
+DYNAMIC_LINKER=${shell cd $(TOOL_INSTALL_LIB_DIR); echo ld*.so.*}
+
+# Pad dynamic linker & rpath so elftool will never have to change ELF section sizes.
+# Yes, this is a kludge.
+lots_of_slashes_to_pad_names = "/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////"
+
+# When PLATFORM != native we *always* use our own versions of GLIBC and dynamic linker
+CROSS_LDFLAGS = \
+ -Wl,--dynamic-linker=$(lots_of_slashes_to_pad_names)$(TOOL_INSTALL_LIB_DIR)/$(DYNAMIC_LINKER) \
+ -Wl,-rpath -Wl,$(lots_of_slashes_to_pad_names)$(TOOL_INSTALL_LIB_DIR)
+
+cross_ldflags = $(if $(is_native)$(is_build_tool),,$(CROSS_LDFLAGS) )
+
+# $(call installed_libs_fn,PACKAGE)
+# Return install library directory for given package.
+# Some packages (e.g. openssl) don't install under lib64; instead they use lib
+define installed_lib_fn
+$(call if_directory_exists_fn,
+ $(call package_install_dir_fn,$(1))/$(arch_lib_dir),
+ $(call package_install_dir_fn,$(1))/lib)
+endef
+
+# Set -L and rpath to point to dependent libraries previously built by us.
+installed_libs_fn = \
+ $(foreach i,$(1), \
+ -L$(call installed_lib_fn,$(i)) \
+ -Wl,-rpath -Wl,$(call installed_lib_fn,$(i)))
+
+# As above for include files
+installed_include_fn = $(call package_install_dir_fn,$(1))/include
+
+installed_includes_fn = $(foreach i,$(1),-I$(call installed_include_fn,$(i)))
+
+# By default package CPPFLAGS (to set include path -I) and LDFLAGS (to set link path -L)
+# point at dependent install directories.
+DEFAULT_CPPFLAGS = $(call installed_includes_fn, $(PACKAGE_DEPENDENCIES))
+DEFAULT_LDFLAGS = $(call installed_libs_fn, $(PACKAGE_DEPENDENCIES))
+
+configure_var_fn = \
+ $(call tag_var_with_added_space_fn,$(1))$(call override_var_with_default_fn,$(PACKAGE)_$(1),$(DEFAULT_$(1)))
+configure_ldflags_fn = \
+ $(cross_ldflags)$(call configure_var_fn,LDFLAGS)
+
+# Allow packages to override CPPFLAGS, CFLAGS, and LDFLAGS
+CONFIGURE_ENV = \
+ $(if $(call configure_var_fn,CPPFLAGS), \
+ CPPFLAGS="$(CPPFLAGS) $(call configure_var_fn,CPPFLAGS)") \
+ $(if $(call configure_var_fn,CFLAGS), \
+ CFLAGS="$(CFLAGS) $(call configure_var_fn,CFLAGS)") \
+ $(if $(call configure_var_fn,CCASFLAGS), \
+ CCASFLAGS="$(CCASFLAGS) $(call configure_var_fn,CCASFLAGS)") \
+ $(if $(call configure_ldflags_fn), \
+ LDFLAGS="$(LDFLAGS) $(call configure_ldflags_fn)") \
+ $(if $($(PACKAGE)_configure_env),$($(PACKAGE)_configure_env))
+
+### BURT
+# only partially used now (used in a few .mk files)
+ifeq ($(is_build_tool),yes)
+prefix = $(PACKAGE_INSTALL_DIR)
+libdir = $(PACKAGE_INSTALL_DIR)/$(arch_lib_dir)
+libexecdir = $(PACKAGE_INSTALL_DIR)/usr/libexec
+DESTDIR = /
+else
+# Eventually simplify this with no per package DESTDIR or prefix
+ppdMacro = $(if $(PER_PACKAGE_DESTDIR),$(call package_build_dir_fn,$(1)))
+pppMacro = $(if $(PER_PACKAGE_PREFIX),$(call package_build_dir_fn,$(1)))
+prefixMacro = $($(PLATFORM)_PREFIX_BASE)/$(pppMacro)
+prefix = $(call prefixMacro,$(PACKAGE))
+libdir = $($(PLATFORM)_LIBDIR)
+libexecdir = $($(PLATFORM)_LIBEXECDIR)
+destdirMacro = $($(PLATFORM)_DESTDIR_BASE)$(ppdMacro)
+DESTDIR = $(call destdirMacro,$(PACKAGE))
+endif
+### BURT
+### dbarach
+image_extra_dependencies = $($(PLATFORM)_image_extra_dependencies)
+### dbarach
+
+configure_package_gnu = \
+ s=$(call find_source_fn,$(PACKAGE_SOURCE)) ; \
+ if [ ! -f $$s/configure ] ; then \
+ autoreconf -i -f $$s ; \
+ fi ; \
+ cd $(PACKAGE_BUILD_DIR) ; \
+ env $(CONFIGURE_ENV) \
+ $$s/configure \
+ $(if $($(PACKAGE)_configure_host_and_target), \
+ $($(PACKAGE)_configure_host_and_target), \
+ $(if $(not_native),--host=$(TARGET),)) \
+ $(if $($(PACKAGE)_configure_prefix), \
+ $($(PACKAGE)_configure_prefix), \
+ --libdir=$(PACKAGE_INSTALL_DIR)/$(arch_lib_dir) \
+ --prefix=$(PACKAGE_INSTALL_DIR)) \
+ $($(PACKAGE)_configure_args) \
+ $($(PACKAGE)_configure_args_$(PLATFORM))
+
+configure_package = \
+ $(call build_msg_fn,Configuring $(PACKAGE) in $(PACKAGE_BUILD_DIR)) ; \
+ mkdir -p $(PACKAGE_BUILD_DIR) ; \
+ $(if $($(PACKAGE)_configure), \
+ $($(PACKAGE)_configure), \
+ $(configure_package_gnu))
+
+# Tools (e.g. gcc, binutils, gdb) required a platform to build for
+check_platform = \
+ is_tool="$(is_build_tool)" ; \
+ is_cross_package="$(findstring $(PACKAGE),$(CROSS_TOOLS))" ; \
+ is_arch_native="$(if $(subst native,,$(ARCH)),,yes)" ; \
+ if [ "$${is_tool}" == "yes" \
+ -a "$${is_cross_package}" != "" \
+ -a "$${is_arch_native}" != "" ]; then \
+ $(call build_msg_fn,You must specify PLATFORM for building tools) ; \
+ exit 1 ; \
+ fi ; \
+ : check that platform gcc can be found ; \
+ target_gcc=gcc ; \
+ if [ "$${is_arch_native}" != "yes" ] ; then \
+ target_gcc=$(TARGET)-gcc ; \
+ fi ; \
+ if [ "$${is_tool}" != "yes" \
+ -a "$${is_arch_native}" != "yes" \
+ -a ! -x "`which 2> /dev/null $${target_gcc}`" ] ; then \
+ $(call build_msg_fn, \
+ No cross-compiler found for platform $(PLATFORM) target $(TARGET); \
+ try make PLATFORM=$(PLATFORM) install-tools) ; \
+ exit 1 ; \
+ fi
+
+configure_check_timestamp = \
+ @$(BUILD_ENV) ; \
+ $(check_platform) ; \
+ mkdir -p $(PACKAGE_BUILD_DIR) ; \
+ mkdir -p $(PACKAGE_INSTALL_DIR) ; \
+ conf="$(TIMESTAMP_DIR)/$(CONFIGURE_TIMESTAMP)" ; \
+ dirs="$(call package_mk_fn,$(PACKAGE)) \
+ $(wildcard $(call find_source_fn,$(PACKAGE_SOURCE))/configure) \
+ $(MU_BUILD_ROOT_DIR)/config.site" ; \
+ if [[ $(call find_newer_fn, $${conf}, $${dirs}, $?) ]]; then \
+ $(configure_package) ; \
+ touch $${conf} ; \
+ else \
+ $(call build_msg_fn,Configuring $(PACKAGE): nothing to do) ; \
+ fi
+
+.PHONY: %-configure
+%-configure: %-find-source
+ $(configure_check_timestamp)
+
+######################################################################
+# Package build
+######################################################################
+
+linux_n_cpus = `grep '^processor' /proc/cpuinfo | wc -l`
+
+MAKE_PARALLEL_JOBS = \
+ -j $(shell \
+ if [ -f /proc/cpuinfo ] ; then \
+ expr 4 '*' $(linux_n_cpus) ; \
+ else \
+ echo 1 ; \
+ fi)
+
+MAKE_PARALLEL_FLAGS = $(if $($(PACKAGE)_make_parallel_fails),,$(MAKE_PARALLEL_JOBS))
+
+# Make command shorthand for packages & tools.
+PACKAGE_MAKE = \
+ $(MAKE) \
+ -C $(PACKAGE_BUILD_DIR) \
+ $($(PACKAGE)_make_args) \
+ $(MAKE_PARALLEL_FLAGS)
+
+build_package = \
+ $(call build_msg_fn,Building $* in $(PACKAGE_BUILD_DIR)) ; \
+ mkdir -p $(PACKAGE_BUILD_DIR) ; \
+ cd $(PACKAGE_BUILD_DIR) ; \
+ $(if $($(PACKAGE)_build), \
+ $($(PACKAGE)_build), \
+ $(PACKAGE_MAKE))
+
+build_check_timestamp = \
+ @$(BUILD_ENV) ; \
+ comp="$(TIMESTAMP_DIR)/$(BUILD_TIMESTAMP)" ; \
+ conf="$(TIMESTAMP_DIR)/$(CONFIGURE_TIMESTAMP)" ; \
+ dirs="$(call find_source_fn,$(PACKAGE_SOURCE)) \
+ $($(PACKAGE)_build_timestamp_depends) \
+ $(if $(is_build_tool),,$(addprefix $(INSTALL_DIR)/,$(PACKAGE_DEPENDENCIES)))" ; \
+ if [[ $${conf} -nt $${comp} \
+ || $(call find_newer_fn, $${comp}, $${dirs}, $?) ]]; then \
+ $(build_package) ; \
+ touch $${comp} ; \
+ else \
+ $(call build_msg_fn,Building $(PACKAGE): nothing to do) ; \
+ fi
+
+.PHONY: %-build
+%-build: %-configure
+ $(build_check_timestamp)
+
+.PHONY: %-rebuild
+%-rebuild: %-wipe %-build
+ @ :
+
+######################################################################
+# Package install
+######################################################################
+
+install_package = \
+ : by default, for non-tools, remove any previously installed bits ; \
+ $(if $(is_build_tool)$($(PACKAGE)_keep_instdir), \
+ true, \
+ rm -rf $(PACKAGE_INSTALL_DIR)); \
+ mkdir -p $(PACKAGE_INSTALL_DIR) ; \
+ $(if $($(PACKAGE)_pre_install),$($(PACKAGE)_pre_install),true); \
+ $(if $($(PACKAGE)_install), \
+ $($(PACKAGE)_install), \
+ $(PACKAGE_MAKE) \
+ $($(PACKAGE)_install_args) \
+ install) ; \
+ $(if $($(PACKAGE)_post_install),$($(PACKAGE)_post_install),true)
+
+install_check_timestamp = \
+ @$(BUILD_ENV) ; \
+ inst=$(TIMESTAMP_DIR)/$(INSTALL_TIMESTAMP) ; \
+ dirs="$(PACKAGE_BUILD_DIR) \
+ $($(PACKAGE)_install_dependencies)" ; \
+ if [[ $(call find_newer_fn, $${inst}, $${dirs}, $?) ]]; then \
+ $(call build_msg_fn,Installing $(PACKAGE)) ; \
+ $(install_package) ; \
+ touch $${inst} ; \
+ else \
+ $(call build_msg_fn,Installing $(PACKAGE): nothing to do) ; \
+ fi
+
+.PHONY: %-install
+%-install: %-build
+ $(install_check_timestamp)
+
+######################################################################
+# Source code managment
+######################################################################
+
+GIT = git
+
+# Maps package name to source directory root.
+# Multiple packages may use a single source tree.
+# For example, gcc-bootstrap package shares gcc source.
+PACKAGE_SOURCE = $(if $($(PACKAGE)_source),$($(PACKAGE)_source),$(PACKAGE))
+
+# Use git to download source if directory is not found
+find_source_for_package = \
+ @$(BUILD_ENV) ; \
+ $(call build_msg_fn,Arch for platform '$(PLATFORM)' is $(ARCH)) ; \
+ $(call build_msg_fn,Finding source for $(PACKAGE)) ; \
+ s="$(call find_source_fn,$(PACKAGE_SOURCE))" ; \
+ [[ -z "$${s}" ]] \
+ && $(call build_msg_fn,Package $(PACKAGE) not found with path $(SOURCE_PATH)) \
+ && exit 1; \
+ mk="$(call find_build_data_dir_for_package_fn,$(PACKAGE_SOURCE))/packages/$(PACKAGE).mk"; \
+ $(call build_msg_fn,Makefile fragment found in $${mk}) ; \
+ if [ ! -d "$${s}" ] ; then \
+ d=`dirname $${mk}` ; \
+ i=`cd $${d}/.. && ($(GIT) config remote.origin.url || \
+ awk '/URL/ { print $$2; }' .git/remotes/origin)`; \
+ g=`dirname $${i}` ; \
+ $(call build_msg_fn,Fetching source: $(GIT) clone $${g}/$(PACKAGE_SOURCE) $$s) ; \
+ if ! $(GIT) clone $${g}/$(PACKAGE_SOURCE) $$s; then \
+ $(call build_msg_fn,No source for $(PACKAGE) in $${g}); \
+ exit 1; \
+ fi ; \
+ $(call build_msg_fn,Autowanking $${g}/$(PACKAGE_SOURCE)) ; \
+ (cd $${s} ; $(MU_BUILD_ROOT_DIR)/autowank --touch) ; \
+ fi ; \
+ s=`cd $${s} && pwd` ; \
+ $(call build_msg_fn,Source found in $${s})
+
+.PHONY: %-find-source
+%-find-source:
+ $(find_source_for_package)
+
+.PHONY: %-push %-pull %-push-all %-pull-all
+%-push %-pull %-push-all %-pull-all:
+ @$(BUILD_ENV) ; \
+ push_or_pull=$(patsubst %-all,%,$(subst $(PACKAGE)-,,$@)) ; \
+ $(call build_msg_fn,Git $${push_or_pull} source for $(PACKAGE)) ; \
+ s=$(call find_source_fn,$(PACKAGE_SOURCE)) ; \
+ if [ "x$$s" = "x" ]; then \
+ $(call build_msg_fn,No source for $(PACKAGE)) ; \
+ exit 1; \
+ fi ; \
+ cd $$s && $(GIT) $${push_or_pull}
+
+# Pull all packages for platform
+.PHONY: pull-all
+pull-all:
+ @$(BUILD_ENV) ; \
+ $(call build_msg_fn,Git pull build system) ; \
+ for d in $(MU_BUILD_ROOT_DIR) \
+ $(SOURCE_PATH_BUILD_ROOT_DIRS) \
+ $(SOURCE_PATH_BUILD_DATA_DIRS); do \
+ $(call build_msg_fn,Git pull $${d}) ; \
+ pushd $${d} >& /dev/null && $(GIT) pull && popd >& /dev/null ; \
+ done ; \
+ $(call build_msg_fn,Git pull build tools) ; \
+ $(call tool_make_target_fn,pull-all) ; \
+ $(call build_msg_fn,Git pull packages for platform $(PLATFORM)) ; \
+ make PLATFORM=$(PLATFORM) $(patsubst %,%-pull-all,$(ROOT_PACKAGES))
+
+.PHONY: %-diff
+%-diff:
+ @$(BUILD_ENV) ; \
+ d=$(call find_source_fn,$(PACKAGE_SOURCE)) ; \
+ $(call build_msg_fn,Git diff $(PACKAGE)) ; \
+ if [ -d $${d}/.git ] ; then \
+ cd $${d} && $(GIT) --no-pager diff 2>/dev/null; \
+ else \
+ $(call build_msg_fn, $(PACKAGE) not a git directory) ; \
+ fi
+
+
+
+# generate diffs for everything in source path
+.PHONY: diff-all
+diff-all:
+ @$(BUILD_ENV) ; \
+ $(call build_msg_fn,Generate diffs) ; \
+ for r in $(ABSOLUTE_SOURCE_PATH); do \
+ for d in $${r}/* ; do \
+ if [ -d $${d} ] ; then \
+ $(call build_msg_fn,Git diff $${d}) ; \
+ if [ -d $${d}/.git ] ; then \
+ cd $${d} && $(GIT) --no-pager diff 2>/dev/null; \
+ else \
+ $(call build_msg_fn, $${d} not a git directory) ; \
+ fi ; \
+ fi ; \
+ done ; \
+ done
+
+######################################################################
+# System images
+######################################################################
+
+IMAGE_DIR = $(MU_BUILD_ROOT_DIR)/image-$(PLATFORM)
+
+# Reports shared libraries in given directory
+find_shared_libs_fn = \
+ find $(1) \
+ -maxdepth 1 \
+ -regex '.*/lib[a-z0-9_]+\+?\+?.so' \
+ -o -regex '.*/lib[a-z0-9_]+-[0-9.]+\+?\+?.so' \
+ -o -regex '.*/lib[a-z0-9_]+\+?\+?.so.[0-9.]+'
+
+# By default pick up files from binary directories and /etc.
+# Also include shared libraries.
+DEFAULT_IMAGE_INCLUDE = \
+ for d in bin sbin libexec \
+ usr/bin usr/sbin usr/libexec \
+ etc; do \
+ [[ -d $$d ]] && echo $$d; \
+ done ; \
+ [[ -d $(arch_lib_dir) ]] \
+ && $(call find_shared_libs_fn,$(arch_lib_dir))
+
+# Define any shell functions needed by install scripts
+image_install_functions = \
+ $(foreach p,$(ALL_PACKAGES), \
+ $(if $($(p)_image_install_functions), \
+ $($(p)_image_install_functions)))
+
+# Should always be over-written by temp dir in %-root-image rule
+IMAGE_INSTALL_DIR = $(error you need to set IMAGE_INSTALL_DIR)
+
+image_install_fn = \
+ @$(BUILD_ENV) ; \
+ $(call build_msg_fn,Image-install $(1) for platform $(PLATFORM)) ; \
+ inst_dir=$(IMAGE_INSTALL_DIR) ; \
+ mkdir -p $${inst_dir} ; \
+ cd $(2) ; \
+ : select files to include in image ; \
+ image_include_files=" \
+ `$(call ifdef_fn,$(1)_image_include,$(DEFAULT_IMAGE_INCLUDE)) ; \
+ echo "" ; \
+ exit 0 ; `"; \
+ : select files regexps to exclude from image ; \
+ image_exclude_files="" ; \
+ if [ ! -z "$($(1)_image_exclude)" ] ; then \
+ image_exclude_files="${image_exclude_files} \
+ $(patsubst %,--exclude=%,$($(1)_image_exclude))" ; \
+ fi ; \
+ [[ -z "$${image_include_files}" || $${image_include_files} == " " ]] \
+ || tar cf - $${image_include_files} $${image_exclude_files} \
+ | tar xf - -C $${inst_dir} ; \
+ : copy files from copyimg directories on source path if present ; \
+ for build_data_dir in $(SOURCE_PATH_BUILD_DATA_DIRS) ; do \
+ d="$${build_data_dir}/packages/$(1).copyimg" ; \
+ if [ -d "$${d}" ] ; then \
+ env $($(PLATFORM)_copyimg_env) \
+ $(MU_BUILD_ROOT_DIR)/copyimg $${d} $${inst_dir} ; \
+ fi ; \
+ done ; \
+ : run package dependent install script ; \
+ $(if $($(1)_image_install), \
+ $(image_install_functions) \
+ cd $${inst_dir} ; \
+ $($(1)_image_install))
+
+.PHONY: %-image_install
+%-image_install: %-install
+ $(call image_install_fn,$(PACKAGE),$(PACKAGE_INSTALL_DIR))
+
+basic_system_image_include = \
+ $(call ifdef_fn,$(PLATFORM)_basic_system_image_include, \
+ echo bin/ldd ; \
+ echo $(arch_lib_dir)/ld*.so* ; \
+ $(call find_shared_libs_fn, $(arch_lib_dir)))
+
+basic_system_image_install = \
+ mkdir -p bin lib mnt proc root sbin sys tmp etc ; \
+ mkdir -p usr usr/{bin,sbin} usr/lib ; \
+ mkdir -p var var/{lib,lock,log,run,tmp} ; \
+ mkdir -p var/lock/subsys var/lib/urandom
+
+.PHONY: basic_system-image_install
+basic_system-image_install: # linuxrc-install
+ $(if $(not_native), \
+ $(call image_install_fn,basic_system,$(TARGET_TOOL_INSTALL_DIR)),)
+
+ROOT_PACKAGES = $(if $($(PLATFORM)_root_packages),$($(PLATFORM)_root_packages),$(default_root_packages))
+
+.PHONY: install-packages
+install-packages: $(patsubst %,%-find-source,$(ROOT_PACKAGES))
+ @$(BUILD_ENV) ; \
+ set -eu$(BUILD_DEBUG) ; \
+ d=$(MU_BUILD_ROOT_DIR)/packages-$(PLATFORM) ; \
+ rm -rf $${d} ; \
+ mkdir -p $${d}; \
+ $(MAKE) -C $(MU_BUILD_ROOT_DIR) IMAGE_INSTALL_DIR=$${d} \
+ $(patsubst %,%-image_install, \
+ basic_system \
+ $(ROOT_PACKAGES)) || exit 1; \
+ $(call build_msg_fn, Relocating ELF executables to run in $${d}) ; \
+ find $${d} -type f \
+ -exec elftool quiet in '{}' out '{}' \
+ set-interpreter \
+ $${d}/$(arch_lib_dir)/$(DYNAMIC_LINKER) \
+ set-rpath $${d}/$(arch_lib_dir):$${d}/lib ';' ; \
+ : strip symbols from files ; \
+ if [ $${strip_symbols:-no} = 'yes' ] ; then \
+ $(call build_msg_fn, Stripping symbols from files) ; \
+ find $${d} -type f \
+ -exec \
+ $(TARGET_PREFIX)strip \
+ --strip-unneeded '{}' ';' \
+ >& /dev/null ; \
+ else \
+ $(call build_msg_fn, NOT stripping symbols) ; \
+ fi
+
+# readonly root squashfs image
+# Note: $(call build_msg_fn) does not seem to work inside of fakeroot so we use echo
+.PHONY: ro-image
+$(PLATFORM_IMAGE_DIR)/ro.img ro-image: $(patsubst %,%-find-source,$(ROOT_PACKAGES))
+ @$(BUILD_ENV) ; \
+ d=$(PLATFORM_IMAGE_DIR) ; \
+ mkdir -p $$d; \
+ ro_image=$$d/ro.img ; \
+ rm -f $${ro_image} ; \
+ tmp_dir="`mktemp -d $$d/ro-image-XXXXXX`" ; \
+ chmod 0755 $${tmp_dir} ; \
+ cd $${tmp_dir} ; \
+ trap "rm -rf $${tmp_dir}" err ; \
+ fakeroot /bin/bash -c "{ \
+ set -eu$(BUILD_DEBUG) ; \
+ $(MAKE) -C $(MU_BUILD_ROOT_DIR) IMAGE_INSTALL_DIR=$${tmp_dir} \
+ $(patsubst %,%-image_install, \
+ basic_system \
+ $(ROOT_PACKAGES)) ; \
+ : make dev directory ; \
+ $(linuxrc_makedev) ; \
+ echo @@@@ Relocating ELF executables to run in / @@@@ ; \
+ find $${d} -type f \
+ -exec elftool quiet in '{}' out '{}' \
+ set-interpreter \
+ /$(arch_lib_dir)/$(DYNAMIC_LINKER) \
+ set-rpath /$(arch_lib_dir):/lib ';' ; \
+ : strip symbols from files ; \
+ if [ '$${strip_symbols:-yes}' = 'yes' ] ; then \
+ echo @@@@ Stripping symbols from files @@@@ ; \
+ find $${tmp_dir} -type f \
+ -exec \
+ $(TARGET_PREFIX)strip \
+ --strip-unneeded '{}' ';' \
+ >& /dev/null ; \
+ else \
+ echo @@@@ NOT stripping symbols @@@@ ; \
+ fi ; \
+ if [ $${sign_executables:-yes} = 'yes' \
+ -a -n "$($(PLATFORM)_public_key)" ] ; then \
+ echo @@@@ Signing executables @@@@ ; \
+ find $${tmp_dir} -type f \
+ | xargs sign $($(PLATFORM)_public_key) \
+ $($(PLATFORM)_private_key_passphrase) ; \
+ fi ; \
+ : make read-only file system ; \
+ mksquashfs \
+ $${tmp_dir} $${ro_image} \
+ -no-exports -no-progress -no-recovery ; \
+ }" ; \
+ : cleanup tmp directory ; \
+ rm -rf $${tmp_dir}
+
+MKFS_JFFS2_BYTE_ORDER_x86_64 = -l
+MKFS_JFFS2_BYTE_ORDER_i686 = -l
+MKFS_JFFS2_BYTE_ORDER_ppc = -b
+MKFS_JFFS2_BYTE_ORDER_mips = -b
+MKFS_JFFS2_BYTE_ORDER_native = $(MKFS_JFFS2_BYTE_ORDER_$(NATIVE_ARCH))
+
+MKFS_JFFS2_SECTOR_SIZE_IN_KBYTES = \
+ $(call ifdef_fn,$(PLATFORM)_jffs2_sector_size_in_kbytes,256)
+
+mkfs_fn_jffs2 = mkfs.jffs2 \
+ --eraseblock=$(MKFS_JFFS2_SECTOR_SIZE_IN_KBYTES)KiB \
+ --root=$(1) --output=$(2) \
+ $(MKFS_JFFS2_BYTE_ORDER_$(BASIC_ARCH))
+
+# As things stand the actual initrd size parameter
+# is set in .../open-repo/build-data/packages/linuxrc.mk.
+EXT2_RW_IMAGE_SIZE=notused
+
+mkfs_fn_ext2 = \
+ e2fsimage -d $(1) -f $(2) -s $(EXT2_RW_IMAGE_SIZE)
+
+RW_IMAGE_TYPE=jffs2
+
+make_rw_image_fn = \
+ $(call mkfs_fn_$(RW_IMAGE_TYPE),$(1),$(2))
+
+rw_image_embed_ro_image_fn = \
+ mkdir -p proc initrd images ro rw union ; \
+ cp $(PLATFORM_IMAGE_DIR)/$(1) images/$(1) ; \
+ md5sum images/$(1) > images/$(1).md5 ; \
+ echo Built by $(LOGNAME) at `date` > images/$(1).stamp ; \
+ mkdir -p changes/$(1)
+
+# make sure RW_IMAGE_TYPE is a type we know how to build
+.PHONY: rw-image-check-type
+rw-image-check-type:
+ @$(BUILD_ENV) ; \
+ if [ -z "$(make_rw_image_fn)" ] ; then \
+ $(call build_msg_fn,Unknown read/write fs image type; \
+ try RW_IMAGE_TYPE=ext2 or RW_IMAGE_TYPE=jffs2) ; \
+ exit 1; \
+ fi
+
+# read write image
+.PHONY: rw-image
+rw-image: rw-image-check-type ro-image
+ @$(BUILD_ENV) ; \
+ d=$(PLATFORM_IMAGE_DIR) ; \
+ mkdir -p $$d ; \
+ rw_image="$$d/rw.$(RW_IMAGE_TYPE)" ; \
+ ro_image="ro.img" ; \
+ rm -f $$rw_image ; \
+ tmp_dir="`mktemp -d $$d/rw-image-XXXXXX`" ; \
+ chmod 0755 $${tmp_dir} ; \
+ cd $${tmp_dir} ; \
+ trap "rm -rf $${tmp_dir}" err ; \
+ fakeroot /bin/bash -c "{ \
+ set -eu$(BUILD_DEBUG) ; \
+ $(linuxrc_makedev) ; \
+ $(call rw_image_embed_ro_image_fn,$${ro_image}) ; \
+ $(call make_rw_image_fn,$${tmp_dir},$${rw_image}) ; \
+ }" ; \
+ : cleanup tmp directory ; \
+ rm -rf $${tmp_dir}
+
+images: linuxrc-install linux-install $(image_extra_dependencies) rw-image
+ @$(BUILD_ENV) ; \
+ d=$(PLATFORM_IMAGE_DIR) ; \
+ cd $(BUILD_DIR)/linux-$(PLATFORM) ; \
+ i="" ; \
+ [[ -z $$i && -f bzImage ]] && i=bzImage ; \
+ [[ -z $$i && -f zImage ]] && i=zImage ; \
+ [[ -z $$i && -f linux ]] && i=linux ; \
+ [[ -z $$i && -f vmlinux ]] && i=vmlinux ; \
+ [[ -z $$i ]] \
+ && $(call build_msg_fn,no linux image to install \
+ in $(BUILD_DIR)/linux-$(PLATFORM)) \
+ && exit 1 ; \
+ cp $$i $$d
+
+######################################################################
+# Tool chain build/install
+######################################################################
+
+.PHONY: ccache-install
+ccache-install:
+ $(MAKE) -C $(MU_BUILD_ROOT_DIR) ccache-build
+ mkdir -p $(TOOL_INSTALL_DIR)/ccache-bin
+ ln -sf $(MU_BUILD_ROOT_DIR)/build-tool-native/ccache/ccache \
+ $(TOOL_INSTALL_DIR)/ccache-bin/$(TARGET_PREFIX)gcc
+
+TOOL_MAKE = $(MAKE) is_build_tool=yes
+
+tool_make_target_fn = \
+ $(if $(strip $(NATIVE_TOOLS)), \
+ $(TOOL_MAKE) $(patsubst %,%-$(1),$(NATIVE_TOOLS)) ARCH=native || exit 1 ;) \
+ $(TOOL_MAKE) $(patsubst %,%-$(1),$(CROSS_TOOLS))
+
+.PHONY: install-tools
+install-tools:
+ $(call tool_make_target_fn,install)
+
+.PHONY: bootstrap-tools
+bootstrap-tools:
+ $(TOOL_MAKE) make-install findutils-install git-install \
+ automake-install autoconf-install libtool-install fakeroot-install
+
+
+######################################################################
+# Clean
+######################################################################
+
+package_clean_script = \
+ @$(call build_msg_fn, Cleaning $* in $(PACKAGE_INSTALL_DIR)) ; \
+ $(BUILD_ENV) ; \
+ $(if $(is_build_tool),,rm -rf $(PACKAGE_INSTALL_DIR) ;) \
+ rm -rf $(TIMESTAMP_DIR)/$(call timestamp_name_fn,*) ; \
+ $(if $($(PACKAGE)_clean), \
+ $($(PACKAGE)_clean), \
+ $(PACKAGE_MAKE) clean)
+
+.PHONY: %-clean
+%-clean:
+ $(package_clean_script)
+
+# Wipe e.g. remove build and install directories for packages.
+package_wipe_script = \
+ @message=$(if $(is_build_tool),"Wiping build $(PACKAGE)","Wiping build/install $(PACKAGE)") ; \
+ $(call build_msg_fn,$$message) ; \
+ $(BUILD_ENV) ; \
+ rm -rf $(if $(is_build_tool),$(PACKAGE_BUILD_DIR),$(PACKAGE_INSTALL_DIR) $(PACKAGE_BUILD_DIR))
+
+.PHONY: %-wipe
+%-wipe:
+ $(package_wipe_script)
+
+# Wipe entire build/install area for TAG and PLATFORM
+.PHONY: wipe-all
+wipe-all:
+ @$(call build_msg_fn, Wiping $(BUILD_DIR) $(INSTALL_DIR)) ; \
+ $(BUILD_ENV) ; \
+ rm -rf $(BUILD_DIR) $(INSTALL_DIR)
+
+# Clean everything
+distclean:
+ rm -rf $(MU_BUILD_ROOT_DIR)/$(BUILD_PREFIX_package)*/
+ rm -rf $(MU_BUILD_ROOT_DIR)/$(BUILD_PREFIX_tool)*
+ rm -rf $(MU_BUILD_ROOT_DIR)/$(INSTALL_PREFIX)*
+ rm -rf $(MU_BUILD_ROOT_DIR)/$(IMAGES_PREFIX)*
+ rm -rf $(TOOL_INSTALL_DIR)
+ rm -rf $(MU_BUILD_ROOT_DIR)/*.deb
+ rm -rf $(MU_BUILD_ROOT_DIR)/*.changes
+ (cd $(MU_BUILD_ROOT_DIR)/deb/;debian/rules clean)
+ rm -f $(MU_BUILD_ROOT_DIR)/deb/debian/*.install
+ rm -f $(MU_BUILD_ROOT_DIR)/deb/debian/*.dkms
+ rm -f $(MU_BUILD_ROOT_DIR)/deb/debian/changelog
diff --git a/build-root/autowank b/build-root/autowank
new file mode 100755
index 00000000000..05897065337
--- /dev/null
+++ b/build-root/autowank
@@ -0,0 +1,308 @@
+#!/bin/bash
+
+# Copyright (c) 2015 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This aptly-named script verifies and fixes time ordering
+# problems with Makefile.{am,in} aclocal.m4 configure* files.
+
+set -eu
+#set -vx
+
+touch=""
+commit=""
+comma_v=""
+aclocal=""
+optimize=""
+
+# The old autowank scheme used "touch <foo> ; sleep 1"
+# to ensure differentiable, ordered timestamps. Worked, but
+# took N seconds given N files to fix. We have an example
+# which wastes multiple minutes given the old scheme.
+#
+# This version generates a sequence of timestamps
+# starting an hour ago. That gives us
+# lots to play with, in case some obnoxious program feels the need
+# to complain about timestamps in the future.
+
+# If we're in UTC+N land, generate UTC+(N+1)
+# If we're in UTC-N land, generate UTC-(N-1)
+
+my_tz=`date +%z`
+sign=`echo $my_tz | sed -n -e 's/^\(.\{1\}\).*$/\1/p'`
+t=`echo $my_tz | sed -n -e 's/^\(.\{1\}\)//p'`
+tz_hour=`echo $t | sed -n -e 's/^\(.\{2\}\).*$/\1/p'`
+tz_hour=`echo $tz_hour | sed 's/^0//'`
+
+if [ $sign = "-" ] ; then
+ sign="+"
+ let tz_hour=$tz_hour+1
+ if [[ $tz_hour -ge "24" ]] ; then
+ tz_hour=0
+ fi
+else
+ sign="-"
+ let tz_hour=$tz_hour-1 || true
+ if [[ $tz_hour -lt "0" ]] ; then
+ tz_hour=23
+ fi
+fi
+
+# Timestamp, an hour ago:
+ts_begin=`TZ=UTC${sign}${tz_hour} date +%Y%m%d%H%M.%S`
+
+# break into constituent parts
+year=`echo $ts_begin | sed -n -e 's/^\(.\{4\}\).*$/\1/p'`
+t=`echo $ts_begin | sed -n -e 's/^\(.\{4\}\)//p'`
+month=`echo $t | sed -n -e 's/^\(.\{2\}\).*$/\1/p'`
+t=`echo $t | sed -n -e 's/^\(.\{2\}\)//p'`
+day=`echo $t | sed -n -e 's/^\(.\{2\}\).*$/\1/p'`
+t=`echo $t | sed -n -e 's/^\(.\{2\}\)//p'`
+hour=`echo $t | sed -n -e 's/^\(.\{2\}\).*$/\1/p'`
+t=`echo $t | sed -n -e 's/^\(.\{2\}\)//p'`
+min=`echo $t | sed -n -e 's/^\(.\{2\}\).*$/\1/p'`
+t=`echo $t | sed -n -e 's/^\(.\{2\}\)//p'`
+sec=`echo $t | sed -n -e 's/\.//p'`
+
+# How many days in the current month?
+# Good until someone changes the calendar rules
+days_in_current_month() {
+ if [[ $month -eq 9 || $month -eq 4 \
+ || $month -eq 6 || $month -eq 11 ]] ; then
+ return 30;
+ fi
+ if [[ $month -eq 2 ]] ; then
+ let t=($year/400)*400
+ if [[ $t -eq $year ]] ; then
+ return 29;
+ fi
+ let t=($year/100)*100
+ if [[ $t -eq $year ]] ; then
+ return 28;
+ fi
+ let t=($year/4)*4
+ if [[ $t -eq $year ]] ; then
+ return 29;
+ fi
+ return 28;
+ fi
+ return 31;
+}
+
+# The next timestamp to issue via touch
+# A real hemorrhoid because bash isnt easily convinced
+# that 08 is a decimal number
+next_ts() {
+ sec=`echo $sec | sed 's/^0//'`
+ let sec=$sec+1
+ if [[ "$sec" -lt "60" ]] ; then
+ if [[ "$sec" -lt "10" ]] ; then
+ sec=0$sec
+ fi
+ return 0;
+ fi
+ sec="00"
+ min=`echo $min | sed 's/^0//'`
+ let min=$min+1
+ if [[ "$min" -lt "60" ]] ; then
+ if [[ "$min" -lt "10" ]] ; then
+ min=0$min
+ fi
+ return 0;
+ fi
+ min="00"
+ hour=`echo $hour | sed 's/^0//'`
+ let hour=$hour+1
+ if [[ "$hour" -lt "24" ]] ; then
+ if [[ "$hour" -lt "10" ]] ; then
+ hour=0$hour
+ fi
+ return 0;
+ fi
+ hour="00"
+ days_in_current_month
+ days_in_month=$?
+ if [[ "$day" -lt "$days_in_month" ]] ; then
+ day=`echo $day | sed 's/^0//'`
+ let day=$day+1
+ if [[ "$day" -lt "10" ]] ; then
+ day=0$day
+ fi
+ return 0;
+ fi
+ day="01"
+ month=`echo $month | sed 's/^0//'`
+ let month=$month+1
+ if [[ "$month" -lt "13" ]] ; then
+ if [[ "$month" -lt "10" ]] ; then
+ month=0$month
+ fi
+ return 0;
+ fi
+ month="01"
+ let year=$year+1
+ return 0;
+}
+
+while [ $# != 0 ] ; do
+ case "$1" in
+ (--commav) comma_v=",v" ;;
+ (--touch) touch=yes ;;
+ (--aclocal) aclocal=yes ;;
+ (--nooptimize) optimize="" ;;
+ (--commit=*) commit="$1" ;;
+ (*) echo "$0: usage [--touch|--commit|]" > /dev/stderr
+ exit 17 ;;
+ esac
+ shift
+done
+
+if [ "${aclocal}" != "" ] ; then
+ if [ -f aclocal.m4 ] ; then
+ echo touching aclocal.m4
+ sleep 1
+ touch aclocal.m4
+ else
+ echo aclocal.m4 not found
+ fi
+fi
+
+if [ "${comma_v}" != "" -a "${commit}" != "" ] ; then
+ echo "No, you may NOT molest ,v files directly. Go away." > /dev/stderr
+ exit 1
+fi
+
+function touchme ()
+{
+ local victim="${1}"
+ shift
+ local touchmebaby=""
+ local sein="is"
+ local newer="no"
+ local older="no"
+
+ if [ ! -r "$victim" ] ; then
+ echo " Possible problem: No $victim exists. " > /dev/stderr
+ return
+ fi
+
+ while [ $# != 0 ] ; do
+ if [ "${1}" -nt "${victim}" ] ; then
+ newer="yes"
+ fi
+ if [ "${1}" -ot "${victim}" ] ; then
+ older="yes"
+ fi
+ if [ "${newer}" = "no" -a "${older}" = "no" ] ; then
+ newer="yes"
+ fi
+
+ if [ "${newer}" = "yes" ] ; then
+ if [ "${touchmebaby}" = "" ] ; then
+ touchmebaby="${1}"
+ else
+ sein="are"
+ touchmebaby="${touchmebaby} ${1}"
+ fi
+ fi
+ shift
+ done
+ if [ -n "${touchmebaby}" ] ; then
+ echo "*** ${touchmebaby} ${sein} newer than ${victim} "
+ if [ -n "${touch}" ] ; then
+ #
+ # This is the old version, in case something backfires...
+ if [ "${optimize}" != "yes" ] ; then
+ echo "Fixing " ;touch -c "$victim" ; sleep 1
+ else
+ echo "Fixing "
+ # echo touch -c -t $year$month$day$hour$min.$sec "$victim"
+ touch -c -t $year$month$day$hour$min.$sec "$victim"
+ next_ts
+ fi
+ fi
+ fi
+}
+
+makefileins="`/usr/bin/find . -name Attic -prune -o -name Makefile.in${comma_v}`"
+
+# aclocal.m4 depends on ***/Makefile.am, configure.ac, acinclude.m4, *.m4 crap
+touchme aclocal.m4${comma_v} \
+ `/usr/bin/find . -name Attic -prune -o -name Makefile.am${comma_v}` \
+ "configure.in${comma_v}" "configure.ac${comma_v}" \
+ "acinclude.m4${comma_v}"
+
+# Makefile.in must be newer than Makefile.am
+for f in $makefileins ; do
+ d="`dirname ${f}`"
+ touchme "${d}/Makefile.in${comma_v}" "${d}/Makefile.am${comma_v}"
+done
+
+# Makefile.in depends on aclocal.m4
+for f in $makefileins ; do
+ d="`dirname $f`"
+ touchme "${d}/Makefile.in${comma_v}" "aclocal.m4${comma_v}"
+done
+
+# config.in must be newer than aclocal.m4 and configure.ac
+if [ -f "config.in${comma_v}" ] ; then
+ touchme "config.in${comma_v}" "aclocal.m4${comma_v}" \
+ "configure.ac${comma_v}" \
+ "configure.in${comma_v}"
+fi
+
+# config.h.in (or More Thoroughly Modern configh.in)
+# must be newer than aclocal.m4 and (obsolete) acconfig.h
+for c_h_in in config.h.in configh.in ; do
+ if [ -f "${c_h_in}${comma_v}" ]; then
+ touchme "${c_h_in}${comma_v}" "aclocal.m4${comma_v}" "acconfig.h${comma_v}"
+ #>>>> WTF? Why? This is nonsensical
+ ## ***/Makefile.in must be newer than config.h.in
+ #for f in $makefileins ; do
+ # touchme "$f" "${c_h_in}${comma_v}"
+ #done
+ fi
+done
+
+# configure must be newer than everything
+# touchme configure $makefileins -- why would this be needed?
+touchme "configure${comma_v}" "aclocal.m4${comma_v}" "acconfig.h${comma_v}" \
+ "config.in${comma_v}" "config.h.in${comma_v}" \
+ "configh.in${comma_v}"
+
+if [ -n "${commit}" ] ; then
+ commit="${commit:9}" # strip off "--commit="
+ # First ***/Makefile.am,
+ # configure.in, configure.ac,
+ # ***/*.m4
+ # acconfig.h
+ cvs commit -m "${commit}" \
+ `for f in ${makefileins} ; do \
+ [ -f "$${f%.in}.am" ] && echo "$${f%.in}.am" ; \
+ done` \
+ `[ -f configure.in ] && echo configure.in` \
+ `[ -f configure.ac ] && echo configure.ac` \
+ `[ -f acconfig.h ] && echo acconfig.h` \
+ `/usr/bin/find . -name '*.m4' -mindepth 2`
+
+ # Next aclocal.m4
+ [ -f "aclocal.m4" ] && cvs commit -m "${commit}" aclocal.m4
+
+ # Next config.in, config.h.in, configh.in
+ [ -f "config.in" ] && cvs commit -m "${commit}" config.in
+ [ -f "config.h.in" ] && cvs commit -m "${commit}" config.h.in
+ [ -f "configh.in" ] && cvs commit -m "${commit}" configh.in
+
+ # Last ***/Makefile.in, configure
+ cvs commit -m "${commit}" ${makefileins} configure
+fi
diff --git a/build-root/bootstrap.sh b/build-root/bootstrap.sh
new file mode 100755
index 00000000000..3ae94098ee2
--- /dev/null
+++ b/build-root/bootstrap.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+build_root=`pwd`
+cd ../
+wsroot=`pwd`
+
+# PATH
+CCACHE_DIR="$build_root/.ccache"
+ADD_TO_PATH="$build_root/tools/ccache-bin:$build_root/tools/bin"
+
+# Construct build-config.mk
+cd $build_root
+echo SOURCE_PATH = $wsroot > build-config.mk
+echo
+echo Saving PATH settings in `pwd`/path_setup
+echo Source this file later, as needed
+cat >path_setup <<EOF
+#!/bin/bash
+
+export PATH=$ADD_TO_PATH:$PATH
+export CCACHE_DIR=$CCACHE_DIR
+EOF
+
+# regenerate tools/ccache-bin
+rm -rf tools/ccache-bin
+mkdir -p tools/ccache-bin
+
+if [ ! -f /usr/bin/ccache ] ; then
+ echo Please install ccache AYEC and re-run this script
+fi
+
+cd tools/ccache-bin
+for c in gcc g++
+ do
+ if [ -f /usr/bin/ccache ] ; then
+ ln -s /usr/bin/ccache $c
+ else
+ ln -s /usr/bin/gcc
+ fi
+done
+
+cd $wsroot
+
+for dir in vppapigen vppinfra sample-plugin vppversion svm vlib vlib-api vnet \
+ vpp vpp-api-test vpp-japi
+do
+ cd $dir
+ echo "Autowank in $dir"
+ ../build-root/autowank --touch
+ cd $wsroot
+done
+
+cd $build_root
+echo Compile native tools
+for tool in vppapigen vppversion
+do
+ make V=0 is_build_tool=yes $tool-install
+done
+
diff --git a/build-root/build-config.mk.README b/build-root/build-config.mk.README
new file mode 100644
index 00000000000..8b8c87ddb52
--- /dev/null
+++ b/build-root/build-config.mk.README
@@ -0,0 +1,3 @@
+# To specify directories to find sources, build/packages/*.mk
+# and build/platforms.mk
+# SOURCE_PATH = PATH1 PATH2 ...
diff --git a/build-root/config.site b/build-root/config.site
new file mode 100644
index 00000000000..9f6070860f1
--- /dev/null
+++ b/build-root/config.site
@@ -0,0 +1,105 @@
+######################################################################
+# glibc
+######################################################################
+
+# glibc needs this for cross compiling
+libc_cv_forced_unwind=yes
+libc_cv_c_cleanup=yes
+libc_cv_ssp=no
+# fixes gentoo build; not sure why?
+libc_cv_uname_release=""
+libc_cv_uname_version=""
+ac_cv_header_cpuid_h=yes
+######################################################################
+# bash
+######################################################################
+
+# Bash configure.in uses this to work around an autoconf 2.53 bug
+ac_cv_func_setvbuf_reversed=no
+ac_cv_rl_version=5.1
+bash_cv_termcap_lib=libncurses
+
+# These mostly come from debian bash-2.05b changes
+# They are needed to make a functioning bash. Without these
+# settings gdbserver exiting would cause the invoking bash to
+# exit also.
+bash_cv_have_mbstate_t=yes
+bash_cv_dup2_broken=no
+bash_cv_pgrp_pipe=no
+bash_cv_sys_siglist=yes
+bash_cv_under_sys_siglist=yes
+bash_cv_opendir_not_robust=no
+bash_cv_printf_declared=yes
+bash_cv_ulimit_maxfds=yes
+bash_cv_getenv_redef=yes
+bash_cv_getcwd_calls_popen=no
+bash_cv_func_strcoll_broken=no
+bash_cv_must_reinstall_sighandlers=no
+bash_cv_type_quad_t=yes
+bash_cv_func_sigsetjmp=present
+bash_cv_job_control_missing=present
+bash_cv_sys_named_pipes=present
+bash_cv_type_rlimit=long
+bash_cv_printf_a_format=yes
+bash_cv_unusable_rtsigs=no
+
+######################################################################
+# Apache
+######################################################################
+ac_cv_func_setpgrp_void=yes
+apr_cv_process_shared_works=yes
+apr_cv_tcp_nodelay_with_cork=yes
+ap_void_ptr_lt_long=no
+
+case ${host_cpu} in
+x86_64 | alpha)
+ ac_cv_sizeof_ssize_t=8
+ ac_cv_sizeof_size_t=8
+ ac_cv_sizeof_pid_t=4
+ ;;
+*)
+ ac_cv_sizeof_ssize_t=4
+ ac_cv_sizeof_size_t=4
+ ac_cv_sizeof_pid_t=4
+ ;;
+esac
+
+######################################################################
+# gdb
+######################################################################
+gdb_cv_func_ptrace_args=int,int,long,long
+
+######################################################################
+# dpkg
+######################################################################
+dpkg_cv_va_copy=yes
+
+######################################################################
+# coreutils
+######################################################################
+ac_cv_search_clock_gettime=no
+gl_cv_fs_space=yes
+
+######################################################################
+# valgrind
+######################################################################
+ac_cv_file__proc_self_fd=yes
+ac_cv_file__proc_self_exe=yes
+ac_cv_file__proc_self_maps=yes
+
+######################################################################
+# tcpdump
+######################################################################
+ac_cv_linux_vers=2
+ac_cv_func_pcap_findalldevs=no
+
+######################################################################
+# flex
+######################################################################
+ac_cv_func_malloc_0_nonnull=yes
+ac_cv_func_realloc_0_nonnull=yes
+
+######################################################################
+# tar
+######################################################################
+tar_gl_cv_func_mknod_works=yes
diff --git a/build-root/copyimg b/build-root/copyimg
new file mode 100755
index 00000000000..e5e3fc265ee
--- /dev/null
+++ b/build-root/copyimg
@@ -0,0 +1,83 @@
+#!/bin/sh
+
+if [ $# -lt 2 ]; then
+ cat - <<EOF
+$0 FROM-DIR TO-DIR ENVIRONMENT
+
+Copies files from one directory to another with possible
+transformations.
+
+Files named FILE.spp will be transformed via the spp preprocessor
+subject to environment definitions. Source FILE.copyimgspp results in
+destination file FILE in the corresponding destination directory.
+
+Files named FILE.copyimgsh are run as shell scripts in (i.e. via chdir)
+the corresponding destination directory (and not copied).
+
+First regular files are copied. Then transformations are preformed.
+Finally, shell scripts are run.
+EOF
+ exit 1;
+fi
+
+FROM_DIR=$1
+TO_DIR=$2
+
+FILTER=" -and -not -name '*~'";
+FILTER="${FILTER} -and -not -name '.*~'";
+FILTER="$FILTER -and -not -path '*/.git*'";
+FILTER="$FILTER -and -not -path '*/.svn*'";
+FILTER="$FILTER -and -not -path '*/.CVS*'";
+
+FROM_FILES=`(cd $FROM_DIR; eval "find . -not -type d $FILTER")`;
+ FROM_DIRS=`(cd $FROM_DIR; eval "find . -type d $FILTER")`;
+
+COPY_FILES=
+SPP_FILES=
+SH_FILES=
+for f in $FROM_FILES; do
+ case $f in
+ *.copyimgspp) SPP_FILES="$SPP_FILES $f" ;;
+ *.copyimgsh) SH_FILES="$SH_FILES $f" ;;
+ *) COPY_FILES="$COPY_FILES $f";;
+ esac
+done
+
+# Make destination directories.
+mkdir -p $TO_DIR;
+if [ "$FROM_DIRS" != "" ]; then
+ for d in $FROM_DIRS; do
+ mkdir -p $TO_DIR/$d;
+ done
+fi
+
+# Copy files
+if [ "$COPY_FILES" != "" ]; then
+ tar -cf - -C $FROM_DIR $COPY_FILES | tar --preserve-permissions -xf - -C $TO_DIR;
+fi
+
+# Use spp to transform any spp files
+if [ "$SPP_FILES" != "" ]; then
+ for f in $SPP_FILES; do
+ d=`dirname $f`;
+ b=`basename $f .copyimgspp`;
+ mkdir -p $TO_DIR/$d;
+ t=$TO_DIR/$d/$b;
+ spp -o $TO_DIR/$d/$b $FROM_DIR/$f || exit 1;
+ done;
+fi
+
+# Now that all files have been copied/created we run any shell scripts
+ABS_FROM_DIR=`(cd $FROM_DIR; pwd)`;
+if [ "$SH_FILES" != "" ]; then
+ # Allow directory to define some functions
+ if [ -f $FROM_DIR/copyimgsh-functions.sh ]; then
+ . $FROM_DIR/copyimgsh-functions.sh ;
+ fi ;
+ for f in $SH_FILES; do
+ d=`dirname $f`;
+ b=`basename $f`;
+ mkdir -p $TO_DIR/$d;
+ (cd $TO_DIR/$d; . $ABS_FROM_DIR/$d/$b) || exit 1;
+ done;
+fi;
diff --git a/build-root/deb/debian/.gitignore b/build-root/deb/debian/.gitignore
new file mode 100644
index 00000000000..6c9b412e875
--- /dev/null
+++ b/build-root/deb/debian/.gitignore
@@ -0,0 +1,10 @@
+changelog
+files
+*debhelper*
+*.substvars
+*.install
+vpp-dpdk-dkms*
+vpp/
+vpp-dev/
+vpp-lib/
+vpp-dpdk-dkms/
diff --git a/build-root/deb/debian/README.vpp b/build-root/deb/debian/README.vpp
new file mode 100755
index 00000000000..b343c786823
--- /dev/null
+++ b/build-root/deb/debian/README.vpp
@@ -0,0 +1,56 @@
+Building DEB packages
+=====================
+
+REQUIREMENTS:
+ You will need a working Internet connection to execute the build, because
+ the build procedure for the included "dpdk" project attempts to contact the
+ Internet host "dpdk.org".
+
+There are three main parts to the process:
+ a) Stage the source tree so that dpkg-source will recognize its organization
+ and create a valid DSC source package for you;
+ b) Ensure that the tools required for building DEB packages are installed;
+ and
+ c) Launch the build.
+
+1) Create, or have on hand, a local clone of the git repository, with no
+untracked files or local modifications pending, up-to-date with the branch or
+commit reference from which you wish to construct the source release.
+
+The branch and repository origins will differ based on local conditions.
+
+Example:
+$ git clone -b master ssh://git@example.com:7999/~username/open-vpp
+
+("-b master" can be omitted since master is the default branch)
+
+2) Rename the checkout with a version number embedded in its name as is
+conventional for code releases. Again, your version number may vary.
+
+Example:
+$ mv open-vpp open-vpp-0.0.0
+
+3) Ensure that the dpkg-buildpackage program is installed.
+
+E.g.,
+
+# apt-get install dpkg-dev
+
+4) From the PARENT directory of the debian/ directory, run:
+
+$ cd open-vpp-0.0.0
+$ dpkg-buildpackage -I .git -us -uc
+
+(The -us and -uc flags omit GPG signatures from the .dsc and .changes files,
+respectively. You can add them later, or if you are preparing a signed release
+and have the signing key on hand, leave off the flags.)
+
+5) Get rid of the source directory; you now either have a source package with
+which you can re-create it at any time, or there were problems with the build,
+and you should go back to your git checkout to fix them.
+
+$ rm -r open-vpp-0.0.0
+
+END
+
+vim:set ai et sw=4 ts=4 tw=80:
diff --git a/build-root/deb/debian/compat b/build-root/deb/debian/compat
new file mode 100755
index 00000000000..ec635144f60
--- /dev/null
+++ b/build-root/deb/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/build-root/deb/debian/control b/build-root/deb/debian/control
new file mode 100644
index 00000000000..0278286af9d
--- /dev/null
+++ b/build-root/deb/debian/control
@@ -0,0 +1,54 @@
+Source: vpp
+Section: net
+Priority: extra
+Maintainer: Cisco OpenVPP Packaging Team <bogus.address@cisco.com>
+Build-Depends: debhelper (>= 9), dkms
+Standards-Version: 3.9.4
+
+Package: vpp
+Architecture: any
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: Vector Packet Processing--executables
+ This package provides VPP executables: vpe, vpe_api_test, vpe_json_test
+ vpe - the vector packet engine
+ vpe_api_test - vector packet engine API test tool
+ vpe_json_test - vector packet engine JSON test tool
+
+Package: vpp-dbg
+Architecture: any
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: Vector Packet Processing--debug symbols
+
+Package: vpp-dev
+Architecture: any
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: Vector Packet Processing--development support
+ This package contains development support files for the VPP libraries, including:
+ .
+ Do we need to list those header files or just leave it blank ?
+ dynamic vectors (vec.c), dynamic bitmaps (bitmap.h), allocation heap of
+ objects (heap.c), allocation pool(pool.h), dynamic hash tables (hash.c), memory
+ allocator (mheap.c), extendable printf-like interface built on top of vectors
+ (format.c), formats for data structures (std-formats.c), and support for clock
+ time-based function calls (timer.c).
+ .
+ TODO: reference and describe only the .h files
+
+Package: vpp-lib
+Architecture: any
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: Vector Packet Processing--runtime libraries
+ This package contains the VPP shared libraries, including:
+ .
+ vppinfra - foundation library supporting vectors, hashes, bitmaps, pools, and string formatting.
+ dpdk - Intel DPDK library
+ svm - vm library
+ vlib - vector processing library
+ vlib-api - binary API library
+ vnet - network stack library
+
+Package: vpp-dpdk-dkms
+Architecture: any
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: DPDK 2.1 igb_uio_driver
+ This package contains Linux kernel modules distributed with DPDK.
diff --git a/build-root/deb/debian/copyright b/build-root/deb/debian/copyright
new file mode 100644
index 00000000000..f9775c158f9
--- /dev/null
+++ b/build-root/deb/debian/copyright
@@ -0,0 +1,9 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: optional.
+Upstream-Contact: optional.
+Source: optional.
+Disclaimer: optional.
+Comment: optional.
+License: Apache-2.0
+Copyright: 2015 Cisco and/or its affiliates and others.
+
diff --git a/build-root/deb/debian/rules b/build-root/deb/debian/rules
new file mode 100755
index 00000000000..bcee0121ff6
--- /dev/null
+++ b/build-root/deb/debian/rules
@@ -0,0 +1,30 @@
+#!/usr/bin/make -f
+# See debhelper(7) (uncomment to enable)
+# output every command that modifies files on the build system.
+DH_VERBOSE = 1
+
+# see EXAMPLES in dpkg-buildflags(1) and read /usr/share/dpkg/*
+DPKG_EXPORT_BUILDFLAGS = 1
+include /usr/share/dpkg/default.mk
+
+# see FEATURE AREAS in dpkg-buildflags(1)
+#export DEB_BUILD_MAINT_OPTIONS = hardening=+all
+
+# see ENVIRONMENT in dpkg-buildflags(1)
+# package maintainers to append CFLAGS
+#export DEB_CFLAGS_MAINT_APPEND = -Wall -pedantic
+# package maintainers to append LDFLAGS
+#export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed
+
+# main packaging script based on dh7 syntax
+%:
+ dh $@ --with dkms
+
+override_dh_install:
+ dh_install --exclude .git
+
+override_dh_strip:
+ dh_strip --dbg-package=vpp-dbg
+
+override_dh_dkms:
+ dh_dkms -pvpp-dpdk-dkms
diff --git a/build-root/deb/debian/source/format b/build-root/deb/debian/source/format
new file mode 100755
index 00000000000..89ae9db8f88
--- /dev/null
+++ b/build-root/deb/debian/source/format
@@ -0,0 +1 @@
+3.0 (native)
diff --git a/build-root/deb/debian/vpp-bin.README.Debian b/build-root/deb/debian/vpp-bin.README.Debian
new file mode 100644
index 00000000000..25ecd38ef66
--- /dev/null
+++ b/build-root/deb/debian/vpp-bin.README.Debian
@@ -0,0 +1,53 @@
+To run vpp with the debug shell:
+
+sudo vpe unix interactive
+
+which will result in a prompt that looks like:
+
+DBGvpd#
+
+To give it a spin, we can create a tap interface and try a simple ping
+(with trace).
+
+To create the tap:
+
+DBGvpd# tap connect foobar
+Created tap-0 for Linux tap 'foobar'
+DBGvpd# show int
+
+To assign it an ip address (and 'up' the interface):
+
+DBGvpd# set int ip address tap-0 192.168.1.1/24
+DBGvpd# set int state tap-0 up
+
+To turn on packet tracing for the tap interface:
+DBGvpd# trace add tapcli-rx 10
+
+Now, to set up and try the other end from the unix prompt:
+vagrant@vagrant-ubuntu-trusty-64:~$ sudo ip addr add 192.168.1.2/24 dev foobar
+vagrant@vagrant-ubuntu-trusty-64:~$ ping -c 3 192.168.1.1
+
+To look at the trace, back in the vpp CLI:
+DBGvpd# show trace
+
+And to stop tracing:
+
+DBGvpd# clear trace
+
+Other fun things to look at:
+
+The vlib packet processing graph:
+DBGvpd# show vlib graph
+
+which will produce output like:
+
+ Name Next Previous
+ip4-icmp-input error-punt [0] ip4-local
+ ip4-icmp-echo-request [1]
+ vpe-icmp4-oam [2]
+
+To read this, the first column (Name) is the name of the node.
+The second column (Next) is the name of the children of that node.
+The third column (Previous) is the name of the parents of this node.
+
+END
diff --git a/build-root/deb/debian/vpp.postinst b/build-root/deb/debian/vpp.postinst
new file mode 100644
index 00000000000..78fcac226a8
--- /dev/null
+++ b/build-root/deb/debian/vpp.postinst
@@ -0,0 +1,8 @@
+#!/bin/sh -e
+
+# try to set the required values now. This may or may not work.
+sysctl --system
+
+#DEBHELPER#
+
+exit 0
diff --git a/build-root/deb/debian/vpp.upstart b/build-root/deb/debian/vpp.upstart
new file mode 100644
index 00000000000..ec731d896f5
--- /dev/null
+++ b/build-root/deb/debian/vpp.upstart
@@ -0,0 +1,21 @@
+description "vector packet processing engine"
+author "Cisco Systems, Inc <listname@cisco.com>"
+
+manual
+
+respawn
+
+pre-start script
+ rm -f /dev/shm/* || true
+ # should be there via dkms, but if not, start anyway
+ modprobe igb_uio || true
+end script
+
+
+script
+ exec vpe $(cat /etc/vpp/startup.conf | sed -e 's/#.*//')
+end script
+
+post-stop script
+ rm -f /dev/shm/* || true
+end script
diff --git a/build-root/emacs-lisp/all-skel.el b/build-root/emacs-lisp/all-skel.el
new file mode 100644
index 00000000000..afc2d37f6e3
--- /dev/null
+++ b/build-root/emacs-lisp/all-skel.el
@@ -0,0 +1,11 @@
+;; list of clib / vlib / vnet / vpp skeleton files
+
+(load-file "./cli-cmd-skel.el")
+(load-file "./pipe-skel.el")
+(load-file "./dual-loop-skel.el")
+(load-file "./periodic-skel.el")
+(load-file "./config-skel.el")
+(load-file "./tunnel-c-skel.el")
+(load-file "./tunnel-h-skel.el")
+(load-file "./tunnel-encap-skel.el")
+(load-file "./tunnel-decap-skel.el")
diff --git a/build-root/emacs-lisp/cli-cmd-skel.el b/build-root/emacs-lisp/cli-cmd-skel.el
new file mode 100644
index 00000000000..8a83cd1f8b3
--- /dev/null
+++ b/build-root/emacs-lisp/cli-cmd-skel.el
@@ -0,0 +1,32 @@
+;;; cli-cmd-skel.el - cli command skeleton
+
+(require 'skeleton)
+
+(define-skeleton cli-cmd-skel
+"Insert a CLI command "
+nil
+'(setq cmd-name (skeleton-read "Command Name: "))
+'(setq path (skeleton-read "Path: "))
+
+"
+static clib_error_t *
+" cmd-name "_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, \"whatever %d\", &whatever))
+ ;
+ else
+ return clib_error_return (0, \"unknown input `%U'\",
+ format_unformat_error, input);
+ }
+ return 0;
+}
+
+VLIB_CLI_COMMAND (" cmd-name "_command, static) = {
+ .path = \"" path "\",
+ .short_help = \"" path "\",
+ .function = " cmd-name "_command_fn,
+};
+")
diff --git a/build-root/emacs-lisp/config-skel.el b/build-root/emacs-lisp/config-skel.el
new file mode 100644
index 00000000000..c9a1eb6cdb6
--- /dev/null
+++ b/build-root/emacs-lisp/config-skel.el
@@ -0,0 +1,28 @@
+;;; config-skel.el - config function command skeleton
+
+(require 'skeleton)
+
+(define-skeleton config-skel
+"Insert a vlib config skeleton "
+nil
+'(setq cfg-name (skeleton-read "Config Class Name: "))
+
+"
+static clib_error_t *
+" cfg-name "_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ u32 whatever;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, \"whatever %d\", &whatever))
+ ;
+ else
+ return clib_error_return (0, \"unknown input `%U'\",
+ format_unformat_error, input);
+ }
+
+ return 0;
+}
+
+VLIB_CONFIG_FUNCTION (" cfg-name "_config, \"" cfg-name "\");
+")
diff --git a/build-root/emacs-lisp/dual-loop-skel.el b/build-root/emacs-lisp/dual-loop-skel.el
new file mode 100644
index 00000000000..fafd21aeb06
--- /dev/null
+++ b/build-root/emacs-lisp/dual-loop-skel.el
@@ -0,0 +1,290 @@
+;;; dual-loop-skel.el - Eliotic dual-loop node skeleton
+
+(require 'skeleton)
+
+(define-skeleton dual-loop-skel
+"Insert a skeleton dual-loop graph node"
+nil
+'(setq node-name (skeleton-read "Node Name: "))
+'(setq uc-node-name (upcase node-name))
+"
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <clib/error.h>
+
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <clib/hash.h>
+#include <clib/error.h>
+#include <clib/elog.h>
+
+typedef struct {
+ /* convenience */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+ ethernet_main_t * ethernet_main;
+} " node-name "_main_t;
+
+" node-name "_main_t " node-name "_main;
+
+vlib_node_registration_t " node-name "_node;
+
+typedef struct {
+ u32 next_index;
+ u32 sw_if_index;
+} " node-name "_trace_t;
+
+/* packet trace format function */
+static u8 * format_" node-name "_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ " node-name "_trace_t * t = va_arg (*args, " node-name "_trace_t *);
+
+ s = format (s, \"" uc-node-name ": sw_if_index %d, next index %d\",
+ t->sw_if_index, t->next_index);
+ return s;
+}
+
+vlib_node_registration_t " node-name "_node;
+
+#define foreach_" node-name "_error \\
+_(SWAPPED, \"Mac swap packets processed\")
+
+typedef enum {
+#define _(sym,str) " uc-node-name "_ERROR_##sym,
+ foreach_" node-name "_error
+#undef _
+ " uc-node-name "_N_ERROR,
+} " node-name "_error_t;
+
+static char * " node-name "_error_strings[] = {
+#define _(sym,string) string,
+ foreach_" node-name "_error
+#undef _
+};
+
+typedef enum {
+ " uc-node-name "_NEXT_INTERFACE_OUTPUT,
+ " uc-node-name "_N_NEXT,
+} " node-name "_next_t;
+
+#define foreach_mac_address_offset \\
+_(0) \\
+_(1) \\
+_(2) \\
+_(3) \\
+_(4) \\
+_(5)
+
+static uword
+" node-name "_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, * from, * to_next;
+ " node-name "_next_t next_index;
+ u32 pkts_swapped = 0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 next0 = " uc-node-name "_NEXT_INTERFACE_OUTPUT;
+ u32 next1 = " uc-node-name "_NEXT_INTERFACE_OUTPUT;
+ u32 sw_if_index0, sw_if_index1;
+ u8 tmp0[6], tmp1[6];
+ ethernet_header_t *en0, *en1;
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* $$$$$ Dual loop: process 2 x packets here $$$$$ */
+ ASSERT (b0->current_data == 0);
+ ASSERT (b1->current_data == 0);
+
+ en0 = vlib_buffer_get_current (b0);
+ en1 = vlib_buffer_get_current (b1);
+
+ /* This is not the fastest way to swap src + dst mac addresses */
+#define _(a) tmp0[a] = en0->src_address[a];
+ foreach_mac_address_offset;
+#undef _
+#define _(a) en0->src_address[a] = en0->dst_address[a];
+ foreach_mac_address_offset;
+#undef _
+#define _(a) en0->dst_address[a] = tmp0[a];
+ foreach_mac_address_offset;
+#undef _
+
+#define _(a) tmp1[a] = en1->src_address[a];
+ foreach_mac_address_offset;
+#undef _
+#define _(a) en1->src_address[a] = en1->dst_address[a];
+ foreach_mac_address_offset;
+#undef _
+#define _(a) en1->dst_address[a] = tmp1[a];
+ foreach_mac_address_offset;
+#undef _
+
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
+
+ /* Send pkt back out the RX interface */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = sw_if_index0;
+ vnet_buffer(b1)->sw_if_index[VLIB_TX] = sw_if_index1;
+
+ pkts_swapped += 2;
+ /* $$$$$ End of processing 2 x packets $$$$$ */
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ " node-name "_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ " node-name "_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0 = " uc-node-name "_NEXT_INTERFACE_OUTPUT;
+ u32 sw_if_index0;
+ u8 tmp0[6];
+ ethernet_header_t *en0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* $$$$$ Single loop: process 1 packet here $$$$$ */
+
+ /*
+ * Direct from the driver, we should be at offset 0
+ * aka at &b0->data[0]
+ */
+ ASSERT (b0->current_data == 0);
+
+ en0 = vlib_buffer_get_current (b0);
+
+ /* This is not the fastest way to swap src + dst mac addresses */
+#define _(a) tmp0[a] = en0->src_address[a];
+ foreach_mac_address_offset;
+#undef _
+#define _(a) en0->src_address[a] = en0->dst_address[a];
+ foreach_mac_address_offset;
+#undef _
+#define _(a) en0->dst_address[a] = tmp0[a];
+ foreach_mac_address_offset;
+#undef _
+
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+
+ /* Send pkt back out the RX interface */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = sw_if_index0;
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ " node-name "_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ pkts_swapped += 1;
+
+ /* $$$$$ Done processing 1 packet here $$$$$ */
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, " node-name "_node.index,
+ " uc-node-name "_ERROR_SWAPPED, pkts_swapped);
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (" node-name "_node) = {
+ .function = " node-name "_node_fn,
+ .name = \"" node-name "\",
+ .vector_size = sizeof (u32),
+ .format_trace = format_" node-name "_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(" node-name "_error_strings),
+ .error_strings = " node-name "_error_strings,
+
+ .n_next_nodes = " uc-node-name "_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [" uc-node-name "_NEXT_INTERFACE_OUTPUT] = \"interface-output\",
+ },
+};
+
+")
+
diff --git a/build-root/emacs-lisp/periodic-skel.el b/build-root/emacs-lisp/periodic-skel.el
new file mode 100644
index 00000000000..37c20856c64
--- /dev/null
+++ b/build-root/emacs-lisp/periodic-skel.el
@@ -0,0 +1,86 @@
+;;; pipe-skel.el - pipelined graph node skeleton
+
+(require 'skeleton)
+
+(define-skeleton periodic-skel
+"Insert a skeleton periodic process node"
+nil
+'(setq node-name (skeleton-read "Name: "))
+'(setq uc-node-name (upcase node-name))
+'(setq poll-period (skeleton-read "Poll period (f64 seconds, e.g. 10.0): "))
+
+"
+#define " uc-node-name "_POLL_PERIOD " poll-period "
+
+static uword
+" node-name "_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt,
+ vlib_frame_t * f)
+{
+ f64 poll_time_remaining;
+ uword event_type, * event_data = 0;
+
+ poll_time_remaining = " uc-node-name "_POLL_PERIOD;
+ while (1) {
+ int i;
+
+ /*
+ * Sleep until next periodic call due, or until we receive event(s)
+ */
+ poll_time_remaining =
+ vlib_process_wait_for_event_or_clock (vm, poll_time_remaining);
+
+ event_type = vlib_process_get_events (vm, &event_data);
+ switch (event_type) {
+ case ~0: /* no events => timeout */
+ break;
+
+ /*
+ * $$$$ FIXME: add cases / handlers for each event type
+ */
+ case EVENT1:
+ for (i = 0; i < vec_len (event_data); i++)
+ handle_event1 (mm, event_data[i]);
+ break;
+
+ case EVENT2:
+ for (i = 0; i < vec_len (event_data); i++)
+ handle_event2 (vm, event_data[i]);
+ break;
+
+ /* ... and so forth for each event type */
+
+ default:
+ /* This should never happen... */
+ clib_warning (\"BUG: unhandled event type %d\", event_type);
+ break;
+ }
+ if (event_data)
+ _vec_len (event_data) = 0;
+
+ /* Timer expired, call periodic function */
+ if (vlib_process_suspend_time_is_zero (poll_time_remaining)) {
+ " node-name "_periodic (vm);
+ poll_time_remaining = " uc-node-name "_POLL_PERIOD;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * " node-name " periodic node declaration
+ */
+static VLIB_REGISTER_NODE (" node-name "_node) = {
+ .function = " node-name "_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = \"" node-name "-process\",
+};
+
+/*
+ * To signal an event:
+ *
+ * vlib_process_signal_event (vm, " node-name "_node.index, EVENTn, datum);
+ *
+ */
+")
diff --git a/build-root/emacs-lisp/pipe-skel.el b/build-root/emacs-lisp/pipe-skel.el
new file mode 100644
index 00000000000..8ad25c0482d
--- /dev/null
+++ b/build-root/emacs-lisp/pipe-skel.el
@@ -0,0 +1,132 @@
+;;; pipe-skel.el - pipelined graph node skeleton
+
+(require 'skeleton)
+
+(define-skeleton pipeline-node-skel
+"Insert a skeleton pipelined graph node"
+nil
+'(setq node-name (skeleton-read "Node Name: "))
+'(setq uc-node-name (upcase node-name))
+'(setq nstages (skeleton-read "Number of pipeline stages: "))
+"
+#include <vlib/vlib.h>
+#include <clib/error.h>
+
+/*
+ * Dump these counters via the \"show error\" CLI command
+ * FIXME: Add packet counter / error strings as desired
+ */
+
+#define foreach_" node-name "_error \\
+_(ERROR1, \"sample counter/ error string\")
+
+static char * " node-name "_error_strings[] = {
+#define _(sym,string) string,
+ foreach_" node-name "_error
+#undef _
+};
+
+/*
+ * packet error / counter enumeration
+ *
+ * To count and drop a vlib_buffer_t *b:
+ *
+ * Set b->error = node->errors[" uc-node-name "_ERROR_xxx];
+ * last_stage returns a disposition index bound to \"error-drop\"
+ *
+ * To manually increment the specific counter " uc-node-name "_ERROR1
+ *
+ * vlib_node_t *n = vlib_get_node (vm, " node-name ".index);
+ * u32 node_counter_base_index = n->error_heap_index;
+ * vlib_error_main_t * em = &vm->error_main;
+ * em->counters[node_counter_base_index + " uc-node-name "_ERROR1] += 1;
+ *
+ */
+
+typedef enum {
+#define _(sym,str) " uc-node-name "_ERROR_##sym,
+ foreach_" node-name "_error
+#undef _
+ " uc-node-name "_N_ERROR,
+} " node-name "_error_t;
+
+/*
+ * enumeration of per-packet dispositions
+ * FIXME: add dispositions as desired
+ */
+
+typedef enum { \n"
+" " uc-node-name "_NEXT_NORMAL,\n"
+" " uc-node-name "_N_NEXT,
+} " node-name "_next_t;
+
+#define NSTAGES " nstages "
+
+/*
+ * Use the generic buffer metadata + first line of packet data prefetch
+ * stage function from <api/pipeline.h>. This is usually a Good Idea.
+ */
+#define stage0 generic_stage0
+
+/*
+ * FIXME: add stage functions. Here is the function prototype:
+ *
+ * static inline void stageN (vlib_main_t * vm,
+ * vlib_node_runtime_t * node,
+ * u32 buffer_index)
+ */
+
+/*
+ * FIXME: the last pipeline stage returns the desired pkt next node index,
+ * from the " node-name "_next_t enum above
+ */
+static inline u32 last_stage (vlib_main_t *vm, vlib_node_runtime_t *node,
+ u32 bi)
+{
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+
+ b->error = node->errors[EXAMPLE_ERROR_ERROR1];
+
+ return " uc-node-name "_NEXT_NORMAL;
+}
+
+#include <api/pipeline.h>
+
+static uword " node-name "_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return dispatch_pipeline (vm, node, frame);
+}
+
+static VLIB_REGISTER_NODE (example_node) = {
+ .function = " node-name "_node_fn,
+ .name = \"" node-name "-node\",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(" node-name "_error_strings),
+ .error_strings = " node-name "_error_strings,
+
+ .n_next_nodes = " uc-node-name "_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [" uc-node-name "_NEXT_NORMAL] = \"error-drop\",
+ },
+};
+
+/*
+ * packet generator definition to push superframes of data into the
+ * new graph node. Cut and paste into <file>, then
+ * \"exec <file>\", \"pa enable test\" at the QVNET prompt...
+ *
+packet-generator new {
+ name test
+ limit 100
+ node " node-name "-node
+ size 374-374
+ data { hex 0x02b46b96000100096978676265000500bf436973636f20494f5320536f6674776172652c2043333735304520536f66747761726520284333373530452d554e4956455253414c2d4d292c2056657273696f6e2031322e32283335295345352c2052454c4541534520534f4654574152452028666331290a436f707972696768742028632920313938362d3230303720627920436973636f2053797374656d732c20496e632e0a436f6d70696c6564205468752031392d4a756c2d30372031363a3137206279206e616368656e00060018636973636f2057532d4333373530452d3234544400020011000000010101cc0004000000000003001b54656e4769676162697445746865726e6574312f302f3100040008000000280008002400000c011200000000ffffffff010221ff000000000000001e7a50f000ff000000090004000a00060001000b0005010012000500001300050000160011000000010101cc000400000000001a00100000000100000000ffffffff }
+}
+ */
+")
diff --git a/build-root/emacs-lisp/tunnel-c-skel.el b/build-root/emacs-lisp/tunnel-c-skel.el
new file mode 100644
index 00000000000..6f1096caa88
--- /dev/null
+++ b/build-root/emacs-lisp/tunnel-c-skel.el
@@ -0,0 +1,441 @@
+;;; tunnel-c-skel.el - tunnel encap cli / api
+
+(require 'skeleton)
+
+(define-skeleton tunnel-c-skel
+"Insert a tunnel cli/api implementation"
+nil
+'(setq encap_stack (skeleton-read "encap_stack (e.g ip4_udp_lisp): "))
+'(setq ENCAP_STACK (upcase encap_stack))
+'(setq encap-stack (replace-regexp-in-string "_" "-" encap_stack))
+"
+#include <vnet/" encap-stack "/" encap_stack" .h>
+
+" encap_stack "_main_t " encap_stack "_main;
+
+static u8 * format_decap_next (u8 * s, va_list * args)
+{
+ u32 next_index = va_arg (*args, u32);
+
+ switch (next_index)
+ {
+ case " ENCAP_STACK "_INPUT_NEXT_DROP:
+ return format (s, \"drop\");
+ case " ENCAP_STACK "_INPUT_NEXT_IP4_INPUT:
+ return format (s, \"ip4\");
+ case " ENCAP_STACK "_INPUT_NEXT_IP6_INPUT:
+ return format (s, \"ip6\");
+ case " ENCAP_STACK "_INPUT_NEXT_" ENCAP_STACK "_ENCAP:
+ return format (s, \"" encap-stack "\");
+ default:
+ return format (s, \"unknown %d\", next_index);
+ }
+ return s;
+}
+
+u8 * format_" encap_stack "_tunnel (u8 * s, va_list * args)
+{
+ " encap_stack "_tunnel_t * t = va_arg (*args, " encap_stack "_tunnel_t *);
+ " encap_stack "_main_t * ngm = &" encap_stack "_main;
+
+ s = format (s,
+ \"[%d] %U (src) %U (dst) fibs: encap %d, decap %d\",
+ t - ngm->tunnels,
+ format_ip4_address, &t->src,
+ format_ip4_address, &t->dst,
+ t->encap_fib_index,
+ t->decap_fib_index);
+
+ s = format (s, \" decap next %U\\n\", format_decap_next, t->decap_next_index);
+ /* FIXME: add protocol details */
+ return s;
+}
+
+static u8 * format_" encap_stack "_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, \"" encap_stack "_tunnel%d\", dev_instance);
+}
+
+static uword dummy_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ clib_warning (\"you shouldn't be here, leaking buffers...\");
+ return frame->n_vectors;
+}
+
+VNET_DEVICE_CLASS (" encap_stack "_device_class,static) = {
+ .name = "" ENCAP_STACK "",
+ .format_device_name = format_" encap_stack "_name,
+ .format_tx_trace = format_" encap_stack "_encap_trace,
+ .tx_function = dummy_interface_tx,
+};
+
+static uword dummy_set_rewrite (vnet_main_t * vnm,
+ u32 sw_if_index,
+ u32 l3_type,
+ void * dst_address,
+ void * rewrite,
+ uword max_rewrite_bytes)
+{
+ return 0;
+}
+
+u8 * format_" encap_stack "_header_with_length (u8 * s, va_list * args)
+{
+ " encap_stack "_header_t * h = va_arg (*args, " encap_stack "_header_t *);
+ u32 max_header_bytes = va_arg (*args, u32);
+ u32 header_bytes;
+
+ header_bytes = sizeof (h[0]);
+ if (max_header_bytes != 0 && header_bytes > max_header_bytes)
+ return format (s, \"" encap-stack "header truncated\");
+
+ /* FIXME: pretty-print an " encap_stack " header */
+
+ return s;
+}
+
+VNET_HW_INTERFACE_CLASS (" encap_stack "_hw_class) = {
+ .name = \"" ENCAP_STACK "\",
+ .format_header = format_" encap_stack "_header_with_length,
+ .set_rewrite = dummy_set_rewrite,
+};
+
+#define foreach_copy_field \
+_(src.as_u32) \
+_(dst.as_u32) \
+_(encap_fib_index) \
+_(decap_fib_index) \
+_(decap_next_index) \
+_(FIXME_ADD_ALL_COPY_FIELDS )
+
+static int " encap_stack "_rewrite (" encap_stack "_tunnel_t * t)
+{
+ u8 *rw = 0;
+ ip4_header_t * ip0;
+ " encap_stack "_header_t * h0;
+ int len;
+
+ len = sizeof (*h0);
+
+ vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES);
+
+ h0 = (ip4_udp_" encap_stack "_header_t *) rw;
+
+ /* FIXME: build the actual header here... */
+
+ /* Fixed portion of the (outer) ip4 header */
+ ip0 = &h0->ip4;
+ ip0->ip_version_and_header_length = 0x45;
+ ip0->ttl = 254;
+ ip0->protocol = IP_PROTOCOL_UDP;
+
+ /* we'll fix up the ip4 header length and checksum after-the-fact */
+ ip0->src_address.as_u32 = t->src.as_u32;
+ ip0->dst_address.as_u32 = t->dst.as_u32;
+ ip0->checksum = ip4_header_checksum (ip0);
+
+ /* UDP header, randomize src port on something, maybe? */
+ h0->udp.src_port = clib_host_to_net_u16 (4341);
+ h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_" encap_stack ");
+
+ /* $$$ build a <mumble> tunnel header here */
+
+ t->rewrite = rw;
+ return (0);
+}
+
+int vnet_" encap_stack "_add_del_tunnel
+(vnet_" encap_stack "_add_del_tunnel_args_t *a, u32 * hw_if_indexp)
+{
+ " encap_stack "_main_t * ngm = &" encap_stack "_main;
+ " encap_stack "_tunnel_t *t = 0;
+ vnet_main_t * vnm = ngm->vnet_main;
+ vnet_hw_interface_t * hi;
+ uword * p;
+ u32 hw_if_index = ~0;
+ int rv;
+ " encap_stack "_tunnel_key_t key, *key_copy;
+ hash_pair_t *hp;
+
+ key.FIXME = clib_host_to_net_XXX(FIXME);
+
+ p = hash_get_mem (ngm->" encap_stack "_tunnel_by_key, &key);
+
+ if (a->is_add)
+ {
+ /* adding a tunnel: tunnel must not already exist */
+ if (p)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ pool_get_aligned (ngm->tunnels, t, CLIB_CACHE_LINE_BYTES);
+ memset (t, 0, sizeof (*t));
+
+ /* copy from arg structure */
+#define _(x) t->x = a->x;
+ foreach_copy_field;
+#undef _
+
+ rv = " encap_stack "_rewrite (t);
+
+ if (rv)
+ {
+ pool_put (ngm->tunnels, t);
+ return rv;
+ }
+
+ /* $$$$ use a simple hash if you can ... */
+ key_copy = clib_mem_alloc (sizeof (*key_copy));
+ memcpy (key_copy, &key, sizeof (*key_copy));
+
+ hash_set_mem (ngm->" encap_stack "_tunnel_by_key, key_copy,
+ t - ngm->tunnels);
+
+ /*
+ * interface freelist / recycle shtik
+ * This simple implementation rapidly reuses freed tunnel interfaces.
+ * Consider whether to refcount, etc. etc.
+ */
+ if (vec_len (ngm->free_" encap_stack "_tunnel_hw_if_indices) > 0)
+ {
+ hw_if_index = ngm->free_" encap_stack "_tunnel_hw_if_indices
+ [vec_len (ngm->free_" encap_stack "_tunnel_hw_if_indices)-1];
+ _vec_len (ngm->free_" encap_stack "_tunnel_hw_if_indices) -= 1;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ hi->dev_instance = t - ngm->tunnels;
+ hi->hw_instance = hi->dev_instance;
+ }
+ else
+ {
+ hw_if_index = vnet_register_interface
+ (vnm, " encap_stack "_device_class.index, t - ngm->tunnels,
+ " encap_stack "_hw_class.index, t - ngm->tunnels);
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ hi->output_node_index = " encap_stack "_encap_node.index;
+ }
+
+ t->hw_if_index = hw_if_index;
+
+ vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ }
+ else
+ {
+ /* deleting a tunnel: tunnel must exist */
+ if (!p)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ t = pool_elt_at_index (ngm->tunnels, p[0]);
+
+ vnet_sw_interface_set_flags (vnm, t->hw_if_index, 0 /* down */);
+ vec_add1 (ngm->free_" encap_stack "_tunnel_hw_if_indices, t->hw_if_index);
+
+ hp = hash_get_pair (ngm->" encap_stack "_tunnel_by_key, &key);
+ key_copy = (void *)(hp->key);
+ hash_unset_mem (ngm->" encap_stack "_tunnel_by_key, &key);
+ clib_mem_free (key_copy);
+
+ vec_free (t->rewrite);
+ pool_put (ngm->tunnels, t);
+ }
+
+ if (hw_if_indexp)
+ *hw_if_indexp = hw_if_index;
+
+ return 0;
+}
+
+static u32 fib_index_from_fib_id (u32 fib_id)
+{
+ ip4_main_t * im = &ip4_main;
+ uword * p;
+
+ p = hash_get (im->fib_index_by_table_id, fib_id);
+ if (!p)
+ return ~0;
+
+ return p[0];
+}
+
+static uword unformat_decap_next (unformat_input_t * input, va_list * args)
+{
+ u32 * result = va_arg (*args, u32 *);
+ u32 tmp;
+
+ if (unformat (input, \"drop\"))
+ *result = " ENCAP_STACK "_INPUT_NEXT_DROP;
+ else if (unformat (input, \"ip4\"))
+ *result = " ENCAP_STACK "_INPUT_NEXT_IP4_INPUT;
+ else if (unformat (input, \"ip6\"))
+ *result = " ENCAP_STACK "_INPUT_NEXT_IP6_INPUT;
+ else if (unformat (input, \"ethernet\"))
+ *result = " ENCAP_STACK "_INPUT_NEXT_IP6_INPUT;
+ else if (unformat (input, \"" encap-stack "\"))
+ *result = " ENCAP_STACK "_INPUT_NEXT_" ENCAP_STACK "_ENCAP;
+ else if (unformat (input, \"%d\", &tmp))
+ *result = tmp;
+ else
+ return 0;
+ return 1;
+}
+
+static clib_error_t *
+" encap_stack "_add_del_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, * line_input = &_line_input;
+ ip4_address_t src, dst;
+ u8 is_add = 1;
+ u8 src_set = 0;
+ u8 dst_set = 0;
+ u32 encap_fib_index = 0;
+ u32 decap_fib_index = 0;
+ u8 next_protocol = " ENCAP_STACK "_NEXT_PROTOCOL_IP4;
+ u32 decap_next_index = " ENCAP_STACK "_INPUT_NEXT_IP4_INPUT;
+ u8 flags = " ENCAP_STACK "_FLAGS_P;
+ u8 ver_res = 0;
+ u8 res = 0;
+ u32 iid = 0;
+ u8 iid_set = 0;
+ u32 tmp;
+ int rv;
+ vnet_" encap_stack "_add_del_tunnel_args_t _a, * a = &_a;
+
+ /* Get a line of input. */
+ if (! unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (line_input, \"del\"))
+ is_add = 0;
+ else if (unformat (line_input, \"src %U\",
+ unformat_ip4_address, &src))
+ src_set = 1;
+ else if (unformat (line_input, \"dst %U\",
+ unformat_ip4_address, &dst))
+ dst_set = 1;
+ else if (unformat (line_input, \"encap-vrf-id %d\", &tmp))
+ {
+ encap_fib_index = fib_index_from_fib_id (tmp);
+ if (encap_fib_index == ~0)
+ return clib_error_return (0, \"nonexistent encap fib id %d\", tmp);
+ }
+ else if (unformat (line_input, \"decap-vrf-id %d\", &tmp))
+ {
+ decap_fib_index = fib_index_from_fib_id (tmp);
+ if (decap_fib_index == ~0)
+ return clib_error_return (0, \"nonexistent decap fib id %d\", tmp);
+ }
+ else if (unformat (line_input, \"decap-next %U\", unformat_decap_next,
+ &decap_next_index))
+ ;
+ else if (unformat(line_input, \"next-ip4\"))
+ next_protocol = 1;
+ else if (unformat(line_input, \"next-ip6\"))
+ next_protocol = 2;
+ else if (unformat(line_input, \"next-ethernet\"))
+ next_protocol = 3;
+ else if (unformat(line_input, \"next-nsh\"))
+ next_protocol = 4;
+ /*
+ * $$$ allow the user to specify anything they want
+ * in the " ENCAP_STACK " header
+ */
+ else
+ return clib_error_return (0, \"parse error: '%U'\",
+ format_unformat_error, line_input);
+ }
+
+ unformat_free (line_input);
+
+ if (src_set == 0)
+ return clib_error_return (0, \"tunnel src address not specified\");
+
+ if (dst_set == 0)
+ return clib_error_return (0, \"tunnel dst address not specified\");
+
+ memset (a, 0, sizeof (*a));
+
+ a->is_add = is_add;
+
+#define _(x) a->x = x;
+ foreach_copy_field;
+#undef _
+
+ rv = vnet_" encap_stack "_add_del_tunnel (a, 0 /* hw_if_indexp */);
+
+ switch(rv)
+ {
+ case 0:
+ break;
+ case VNET_API_ERROR_INVALID_VALUE:
+ return clib_error_return (0, \"tunnel already exists...\");
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, \"tunnel does not exist...\");
+
+ default:
+ return clib_error_return
+ (0, \"vnet_" encap_stack "_add_del_tunnel returned %d\", rv);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (create_" encap_stack "_tunnel_command, static) = {
+ .path = \"lisp gpe tunnel\",
+ .short_help =
+ \"<mumble> tunnel src <ip4-addr> dst <ip4-addr>\\n\"
+ \" [encap-fib-id <nn>] [decap-fib-id <nn>]\\n\"
+ \" [decap-next [ip4|ip6|ethernet|nsh-encap|<nn>]][del]\\n\",
+ .function = " encap_stack "_add_del_tunnel_command_fn,
+};
+
+static clib_error_t *
+show_" encap_stack "_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ " encap_stack "_main_t * ngm = &" encap_stack "_main;
+ " encap_stack "_tunnel_t * t;
+
+ if (pool_elts (ngm->tunnels) == 0)
+ vlib_cli_output (vm, \"No lisp-gpe tunnels configured...\");
+
+ pool_foreach (t, ngm->tunnels,
+ ({
+ vlib_cli_output (vm, \"%U\", format_" encap_stack "_tunnel);
+ }));
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_" encap_stack "_tunnel_command, static) = {
+ .path = \"show lisp gpe tunnel\",
+ .function = show_" encap_stack "_tunnel_command_fn,
+};
+
+clib_error_t *" encap_stack "_init (vlib_main_t *vm)
+{
+ " encap_stack "_main_t *ngm = &" encap_stack "_main;
+
+ ngm->vnet_main = vnet_get_main();
+ ngm->vlib_main = vm;
+
+ ngm->" encap_stack "_tunnel_by_key
+ = hash_create_mem (0, sizeof(" encap_stack "_tunnel_key_t), sizeof (uword));
+
+ /* YMMV, register with the local netstack */
+ udp_register_dst_port (vm, UDP_DST_PORT_" encap_stack ",
+ " encap_stack "_input_node.index, 1 /* is_ip4 */);
+ return 0;
+}
+
+VLIB_INIT_FUNCTION(" encap_stack "_init);
+
+")
+
diff --git a/build-root/emacs-lisp/tunnel-decap-skel.el b/build-root/emacs-lisp/tunnel-decap-skel.el
new file mode 100644
index 00000000000..fc6f3a1474b
--- /dev/null
+++ b/build-root/emacs-lisp/tunnel-decap-skel.el
@@ -0,0 +1,299 @@
+;;; tunnel-decap-skel.el - tunnel decapsulation skeleton
+
+(require 'skeleton)
+
+(define-skeleton tunnel-decap-skel
+"Insert a tunnel decap implementation"
+nil
+'(setq encap_stack (skeleton-read "encap_stack (e.g ip4_udp_lisp): "))
+'(setq ENCAP_STACK (upcase encap_stack))
+'(setq encap-stack (replace-regexp-in-string "_" "-" encap_stack))
+'(setq ENCAP-STACK (upcase encap-stack))
+"
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/" encap-stack "/" encap_stack ".h>
+
+typedef struct {
+ u32 next_index;
+ u32 tunnel_index;
+ u32 error;
+ " encap_stack "_header_t h;
+} " encap_stack "_rx_trace_t;
+
+static u8 * format_" encap_stack "_rx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ " encap_stack "_rx_trace_t * t = va_arg (*args, " encap_stack "_rx_trace_t *);
+
+ if (t->tunnel_index != ~0)
+ {
+ s = format (s, \"" ENCAP-STACK ": tunnel %d next %d error %d\",
+ t->tunnel_index, t->next_index, t->error);
+ }
+ else
+ {
+ s = format (s, \"" ENCAP-STACK ": no tunnel next %d error %d\\n\",
+ t->next_index, t->error);
+ }
+ s = format (s, \"\\n %U\", format_" encap_stack "_header_with_length, &t->h,
+ (u32) sizeof (t->h) /* max size */);
+ return s;
+}
+
+static uword
+" encap_stack "_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+ " encap_stack "_main_t * ngm = &" encap_stack "_main;
+ u32 last_tunnel_index = ~0;
+ " encap_stack "_tunnel_key_t last_key;
+ u32 pkts_decapsulated = 0;
+
+ memset (&last_key, 0xff, sizeof (last_key));
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+#if 0 /* $$$ dual loop when the single loop works */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+ nsh_unicast_header_t * h0, * h1;
+ u32 label0, label1;
+ u32 next0, next1;
+ uword * p0, * p1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ h0 = vlib_buffer_get_current (b0);
+ h1 = vlib_buffer_get_current (b1);
+
+ next0 = next1 = " ENCAP_STACK "_INPUT_NEXT_IP4_INPUT;
+
+ label0 = clib_net_to_host_u32 (h0->label_exp_s_ttl);
+ label1 = clib_net_to_host_u32 (h1->label_exp_s_ttl);
+
+ /*
+ * Translate label contents into a fib index.
+ * This is a decent sanity check, and guarantees
+ * a sane FIB for the downstream lookup
+ */
+ label0 = vnet_nsh_uc_get_label (label0);
+ label1 = vnet_nsh_uc_get_label (label1);
+
+ /* If 2xlabels match, and match the 1-wide cache, use it */
+ if (label0 == label1 && rt->last_label == label0)
+ {
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = rt->last_fib_index;
+ vnet_buffer(b1)->sw_if_index[VLIB_TX] = rt->last_fib_index;
+ }
+ else
+ {
+ p0 = hash_get (rt->mm->fib_index_by_nsh_label, label0);
+ if (PREDICT_FALSE (p0 == 0))
+ {
+ next0 = " ENCAP_STACK "_INPUT_NEXT_DROP;
+ b0->error = node->errors[NSH_ERROR_BAD_LABEL];
+ }
+ else
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = p0[0];
+
+ p1 = hash_get (rt->mm->fib_index_by_nsh_label, label1);
+ if (PREDICT_FALSE (p1 == 0))
+ {
+ next1 = " ENCAP_STACK "_INPUT_NEXT_DROP;
+ b1->error = node->errors[NSH_ERROR_BAD_LABEL];
+ }
+ else
+ {
+ vnet_buffer(b1)->sw_if_index[VLIB_TX] = p1[0];
+ rt->last_fib_index = p1[0];
+ rt->last_label = label1;
+ }
+ }
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ nsh_rx_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->label_exp_s_ttl = label0;
+ }
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ nsh_rx_trace_t *tr = vlib_add_trace (vm, node,
+ b1, sizeof (*tr));
+ tr->label_exp_s_ttl = label1;
+ }
+
+ vlib_buffer_advance (b0, sizeof (*h0));
+ vlib_buffer_advance (b1, sizeof (*h1));
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+#endif
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0;
+ " encap_stack "_header_t * iuX0;
+ uword * p0;
+ u32 tunnel_index0;
+ " encap_stack "_tunnel_t * t0;
+ " encap_stack "_tunnel_key_t key0;
+ u32 error0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /*
+ * udp leaves current_data pointing at the tunnel header
+ * $$$$ FIXME
+ */
+ vlib_buffer_advance
+ (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
+
+ iuX0 = vlib_buffer_get_current (b0);
+
+ /* pop (ip, udp, lisp-gpe) */
+ vlib_buffer_advance (b0, sizeof (*iuX0));
+
+ tunnel_index0 = ~0;
+ error0 = 0;
+ next0 = " ENCAP_STACK "_INPUT_NEXT_DROP;
+
+ key0.src = iuX0->ip4.src_address.as_u32;
+ key0.iid = iuX0->lisp.iid;
+
+ /* $$$ validate key comparison */
+ if (PREDICT_FALSE ((key0.as_u64[0] != last_key.as_u64[0])))
+ {
+ p0 = hash_get_mem (ngm->" encap_stack "_tunnel_by_key, &key0);
+
+ if (p0 == 0)
+ {
+ error0 = " ENCAP_STACK "_ERROR_NO_SUCH_TUNNEL;
+ goto trace0;
+ }
+
+ last_key.as_u64[0] = key0.as_u64[0];
+ tunnel_index0 = last_tunnel_index = p0[0];
+ }
+ else
+ tunnel_index0 = last_tunnel_index;
+
+ t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0);
+
+ next0 = t0->decap_next_index;
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ vnet_update_l2_len (b0);
+
+ /*
+ * ip[46] lookup in the configured FIB
+ * " encap-stack ", here's the encap tunnel sw_if_index
+ */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
+ pkts_decapsulated ++;
+
+ trace0:
+ b0->error = error0 ? node->errors[error0] : 0;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ " encap_stack "_rx_trace_t *tr
+ = vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->error = error0;
+ tr->tunnel_index = tunnel_index0;
+ tr->h = iuX0->lisp;
+ }
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, " encap_stack "_input_node.index,
+ " ENCAP_STACK "_ERROR_DECAPSULATED,
+ pkts_decapsulated);
+ return from_frame->n_vectors;
+}
+
+static char * " encap_stack "_error_strings[] = {
+#define " encap_stack "_error(n,s) s,
+#include <vnet/" encap-stack "/" encap_stack "_error.def>
+#undef " encap_stack "_error
+#undef _
+};
+
+VLIB_REGISTER_NODE (" encap_stack "_input_node) = {
+ .function = \"" encap_stack "_input\",
+ .name = \"" encap-stack "-input\",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_errors = " ENCAP_STACK "_N_ERROR,
+ .error_strings = " encap_stack "_error_strings,
+
+ .n_next_nodes = " ENCAP_STACK "_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [" ENCAP_STACK "_INPUT_NEXT_##s] = n,
+ foreach_" encap_stack "_input_next
+#undef _
+ },
+
+ .format_buffer = format_" encap_stack "_header_with_length,
+ .format_trace = format_" encap_stack "_rx_trace,
+ // $$$$ .unformat_buffer = unformat_" encap_stack "_header,
+};
+
+")
diff --git a/build-root/emacs-lisp/tunnel-encap-skel.el b/build-root/emacs-lisp/tunnel-encap-skel.el
new file mode 100644
index 00000000000..5933153e16c
--- /dev/null
+++ b/build-root/emacs-lisp/tunnel-encap-skel.el
@@ -0,0 +1,245 @@
+;;; tunnel-encap-skel.el - tunnel interface output skeleton
+
+(require 'skeleton)
+
+(define-skeleton tunnel-encap-skel
+"Insert a tunnel encap implementation"
+nil
+'(setq encap_stack (skeleton-read "encap_stack (e.g ip4_udp_lisp): "))
+'(setq ENCAP_STACK (upcase encap_stack))
+'(setq encap-stack (replace-regexp-in-string "_" "-" encap_stack))
+'(setq ENCAP-STACK (upcase encap-stack))
+"
+#include <clib/error.h>
+#include <clib/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/" encap-stack "/" encap_stack ".h>
+
+/* Statistics (not really errors) */
+#define foreach_" encap_stack "_encap_error \\
+_(ENCAPSULATED, \"good packets encapsulated\")
+
+static char * " encap_stack "_encap_error_strings[] = {
+#define _(sym,string) string,
+ foreach_" encap_stack "_encap_error
+#undef _
+};
+
+typedef enum {
+#define _(sym,str) " ENCAP_STACK "_ENCAP_ERROR_##sym,
+ foreach_" encap_stack "_encap_error
+#undef _
+ " ENCAP_STACK "_ENCAP_N_ERROR,
+} " encap_stack "_encap_error_t;
+
+typedef enum {
+ " ENCAP_STACK "_ENCAP_NEXT_IP4_LOOKUP,
+ " ENCAP_STACK "_ENCAP_NEXT_DROP,
+ " ENCAP_STACK "_ENCAP_N_NEXT,
+} " encap_stack "_encap_next_t;
+
+typedef struct {
+ u32 tunnel_index;
+} " encap_stack "_encap_trace_t;
+
+u8 * format_" encap_stack "_encap_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ " encap_stack "_encap_trace_t * t
+ = va_arg (*args, " encap_stack "_encap_trace_t *);
+
+ s = format (s, \"" ENCAP-STACK ": tunnel %d\", t->tunnel_index);
+ return s;
+}
+
+/* $$$$ FIXME adjust to match the rewrite string */
+#define foreach_fixed_header_offset \\
+_(0) _(1) _(2) _(3) _(FIXME)
+
+static uword
+" encap_stack "_encap (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+ " encap_stack "_main_t * ngm = &" encap_stack "_main;
+ vnet_main_t * vnm = ngm->vnet_main;
+ u32 pkts_encapsulated = 0;
+ u16 old_l0 = 0, old_l1 = 0;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+#if 0 /* $$$ dual loop when the single loop works */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+ nsh_unicast_header_t * h0, * h1;
+ u32 label0, label1;
+ u32 next0, next1;
+ uword * p0, * p1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ h0 = vlib_buffer_get_current (b0);
+ h1 = vlib_buffer_get_current (b1);
+
+ next0 = next1 = " ENCAP_STACK "_ENCAP_NEXT_IP4_LOOKUP;
+
+ vlib_buffer_advance (b0, sizeof (*h0));
+ vlib_buffer_advance (b1, sizeof (*h1));
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+#endif
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0 = " ENCAP_STACK "_ENCAP_NEXT_IP4_LOOKUP;
+ vnet_hw_interface_t * hi0;
+ ip4_header_t * ip0;
+ udp_header_t * udp0;
+ u64 * copy_src0, * copy_dst0;
+ u32 * copy_src_last0, * copy_dst_last0;
+ " encap_stack "_tunnel_t * t0;
+ u16 new_l0;
+ ip_csum_t sum0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* 1-wide cache? */
+ hi0 = vnet_get_sup_hw_interface
+ (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
+
+ t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
+
+ ASSERT(vec_len(t0->rewrite) >= 24);
+
+ /* Apply the rewrite string. $$$$ vnet_rewrite? */
+ vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
+
+ ip0 = vlib_buffer_get_current(b0);
+ /* Copy the fixed header */
+ copy_dst0 = (u64 *) ip0;
+ copy_src0 = (u64 *) t0->rewrite;
+
+ ASSERT (sizeof (ip4_udp_" encap_stack "_header_t) == FIXME);
+
+ /* Copy first N octets 8-bytes at a time */
+#define _(offs) copy_dst0[offs] = copy_src0[offs];
+ foreach_fixed_header_offset;
+#undef _
+#if 0 /* needed if encap not a multiple of 8 bytes */
+ /* Last 4 octets. Hopefully gcc will be our friend */
+ copy_dst_last0 = (u32 *)(&copy_dst0[FIXME]);
+ copy_src_last0 = (u32 *)(&copy_src0[FIXME]);
+ copy_dst_last0[0] = copy_src_last0[0];
+
+#endif
+ /* fix the <bleep>ing outer-IP checksum */
+ sum0 = ip0->checksum;
+ /* old_l0 always 0, see the rewrite setup */
+ new_l0 =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+
+ sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
+ length /* changed member */);
+ ip0->checksum = ip_csum_fold (sum0);
+ ip0->length = new_l0;
+
+ /* Fix UDP length */
+ udp0 = (udp_header_t *)(ip0+1);
+ new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
+ - sizeof (*ip0));
+
+ udp0->length = new_l0;
+
+ /* Reset to look up tunnel partner in the configured FIB */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
+ pkts_encapsulated ++;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ " encap_stack "_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->tunnel_index = t0 - ngm->tunnels;
+ }
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, node->node_index,
+ " ENCAP_STACK "_ENCAP_ERROR_ENCAPSULATED,
+ pkts_encapsulated);
+ return from_frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (" encap_stack "_encap_node) = {
+ .function = " encap_stack "_encap,
+ .name = \"" encap-stack "-encap\",
+ .vector_size = sizeof (u32),
+ .format_trace = format_" encap_stack "_encap_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(" encap_stack "_encap_error_strings),
+ .error_strings = " encap_stack "_encap_error_strings,
+
+ .n_next_nodes = " ENCAP_STACK "_ENCAP_N_NEXT,
+
+ .next_nodes = {
+ [" ENCAP_STACK "_ENCAP_NEXT_IP4_LOOKUP] = \"ip4-lookup\",
+ [" ENCAP_STACK "_ENCAP_NEXT_DROP] = \"error-drop\",
+ },
+};
+")
diff --git a/build-root/emacs-lisp/tunnel-h-skel.el b/build-root/emacs-lisp/tunnel-h-skel.el
new file mode 100644
index 00000000000..7e33309b3b1
--- /dev/null
+++ b/build-root/emacs-lisp/tunnel-h-skel.el
@@ -0,0 +1,128 @@
+;;; tunnel-h-skel.el - tunnel encap header file skeleton
+
+(require 'skeleton)
+
+(define-skeleton tunnel-h-skel
+"Insert a tunnel encap header file"
+nil
+'(setq encap_stack (skeleton-read "encap_stack (e.g ip4_udp_lisp): "))
+'(setq ENCAP_STACK (upcase encap_stack))
+'(setq encap-stack (replace-regexp-in-string "_" "-" encap_stack))
+"
+#ifndef included_vnet_" encap_stack "_h
+#define included_vnet_" encap_stack "_h
+
+#include <clib/error.h>
+#include <clib/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/" encap-stack "/" encap_stack "_packet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/udp.h>
+
+/* Encap stack built in encap.c */
+typedef CLIB_PACKED (struct {
+ ip4_header_t ip4; /* 20 bytes */
+ udp_header_t udp; /* 8 bytes */
+ " encap_stack "_header_t lisp; /* 8 bytes */
+}) " encap_stack "_header_t;
+
+typedef CLIB_PACKED(struct {
+ /*
+ * Key fields:
+ * all fields in NET byte order
+ */
+ union {
+ struct {
+ u32 FIXME_NET_BYTE_ORDER;
+ };
+ u64 as_u64[1];
+ };
+}) " encap_stack "_tunnel_key_t;
+
+typedef struct {
+ /* Rewrite string. $$$$ maybe: embed vnet_rewrite header */
+ u8 * rewrite;
+
+ /* decap next index */
+ u32 decap_next_index;
+
+ /* tunnel src and dst addresses */
+ ip4_address_t src;
+ ip4_address_t dst;
+
+ /* FIB indices */
+ u32 encap_fib_index; /* tunnel partner lookup here */
+ u32 decap_fib_index; /* inner IP lookup here */
+
+ /* vnet intfc hw/sw_if_index */
+ u32 hw_if_index;
+
+ /* encap header fields in HOST byte order */
+ u32 FIXME;
+} " encap_stack "_tunnel_t;
+
+#define foreach_" encap_stack "_input_next \\
+_(DROP, \"error-drop\") \\
+_(IP4_INPUT, \"ip4-input\") \\
+_(IP6_INPUT, \"ip6-input\") \\
+_(ETHERNET_INPUT, \"ethernet-input\") \\
+_(" ENCAP_STACK "_ENCAP, \"" encap-stack "-encap\")
+
+typedef enum {
+#define _(s,n) " ENCAP_STACK "_INPUT_NEXT_##s,
+ foreach_" encap_stack "_input_next
+#undef _
+ " ENCAP_STACK "_INPUT_N_NEXT,
+} " encap_stack "_input_next_t;
+
+typedef enum {
+#define " encap_stack "_error(n,s) " ENCAP_STACK "_ERROR_##n,
+#include <vnet/" encap-stack "/" encap_stack "_error.def>
+#undef " encap_stack "_error
+ " ENCAP_STACK "_N_ERROR,
+} " encap_stack "_input_error_t;
+
+typedef struct {
+ /* vector of encap tunnel instances */
+ " encap_stack "_tunnel_t *tunnels;
+
+ /* lookup tunnel by key */
+ uword * " encap_stack "_tunnel_by_key;
+
+ /* Free vlib hw_if_indices */
+ u32 * free_" encap_stack "_tunnel_hw_if_indices;
+
+ /* convenience */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} " encap_stack "_main_t;
+
+" encap_stack "_main_t " encap_stack "_main;
+
+vlib_node_registration_t " encap_stack "_input_node;
+vlib_node_registration_t " encap_stack "_encap_node;
+
+u8 * format_" encap_stack "_encap_trace (u8 * s, va_list * args);
+u8 * format_" encap_stack "_header_with_length (u8 * s, va_list * args);
+
+typedef struct {
+ u8 is_add;
+ ip4_address_t src, dst;
+ u32 encap_fib_index;
+ u32 decap_fib_index;
+ u32 decap_next_index;
+ /* encap fields in HOST byte order */
+ u8 FIXME_HOST_BYTE_ORDER;
+} vnet_" encap_stack "_add_del_tunnel_args_t;
+
+int vnet_" encap_stack "_add_del_tunnel
+(vnet_" encap_stack "_add_del_tunnel_args_t *a, u32 * hw_if_indexp);
+
+u8 * format_" encap_stack "_header_with_length (u8 * s, va_list * args);
+
+#endif /* included_vnet_" encap_stack "_h */
+
+")
diff --git a/build-root/packages/vppapigen.mk b/build-root/packages/vppapigen.mk
new file mode 100644
index 00000000000..0d284631979
--- /dev/null
+++ b/build-root/packages/vppapigen.mk
@@ -0,0 +1,5 @@
+vppapigen_configure_depend = vppinfra-install
+
+vppapigen_CPPFLAGS = $(call installed_includes_fn, vppinfra)
+
+vppapigen_LDFLAGS = $(call installed_libs_fn, vppinfra)
diff --git a/build-root/packages/vppinfra.mk b/build-root/packages/vppinfra.mk
new file mode 100644
index 00000000000..db48ed58f03
--- /dev/null
+++ b/build-root/packages/vppinfra.mk
@@ -0,0 +1,4 @@
+# nothing
+
+
+
diff --git a/build-root/packages/vppversion.mk b/build-root/packages/vppversion.mk
new file mode 100644
index 00000000000..3ccde62b3f5
--- /dev/null
+++ b/build-root/packages/vppversion.mk
@@ -0,0 +1,5 @@
+vppversion_configure_depend = vppinfra-install
+
+vppversion_CPPFLAGS = $(call installed_includes_fn, vppinfra)
+
+vppversion_LDFLAGS = $(call installed_libs_fn, vppinfra)
diff --git a/build-root/platforms.mk b/build-root/platforms.mk
new file mode 100644
index 00000000000..cb36d1bfac7
--- /dev/null
+++ b/build-root/platforms.mk
@@ -0,0 +1,50 @@
+# Copyright (c) 2015 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Copyright (c) 2007-2008 Eliot Dresselhaus
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+# Platform selects e.g. Linux config file
+PLATFORM = native
+
+native_arch = native
+
+# Default for which packages go into read-only image
+# used to have pam
+default_root_packages = bash coreutils sysvinit util-linux mingetty procps
+
+# Linux based platforms (PLATFORM=i686 PLATFORM=ppc etc.)
+i686_arch = i686
+x86_64_arch = x86_64
+ppc_arch = ppc
+
diff --git a/build-root/rpm/vpp.service b/build-root/rpm/vpp.service
new file mode 100644
index 00000000000..dd8ee36bd73
--- /dev/null
+++ b/build-root/rpm/vpp.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Vector Packet Processing Process
+After=syslog.target network.target auditd.service
+
+[Service]
+ExecStart=/bin/sh -c '/usr/bin/vpe $(cat /etc/vpp/startup.conf | sed -e "s/#.*//")'
+Type=simple
+Restart=on-failure
+RestartSec=5s
+
+[Install]
+WantedBy=multi-user.target
diff --git a/build-root/rpm/vpp.spec b/build-root/rpm/vpp.spec
new file mode 100644
index 00000000000..ddcdeb9f1ab
--- /dev/null
+++ b/build-root/rpm/vpp.spec
@@ -0,0 +1,127 @@
+%define _vpp_install_dir ../install-vpp-native
+%define _vpp_build_dir ../build-tool-native
+%define _unitdir /lib/systemd/system
+%define _topdir %(pwd)
+%define _builddir %{_topdir}
+%define _version %(../scripts/version rpm-version)
+%define _release %(../scripts/version rpm-release)
+
+Name: vpp
+Summary: Vector Packet Processing
+License: MIT
+Version: %{_version}
+Release: %{_release}
+Requires: vpp-lib = %{_version}-%{_release}, net-tools, pciutils
+
+%description
+This package provides VPP executables: vpe, vpe_api_test, vpe_json_test
+vpe - the vector packet engine
+vpe_api_test - vector packet engine API test tool
+vpe_json_test - vector packet engine JSON test tool
+
+%package lib
+Summary: VPP libraries
+Group: System Environment/Libraries
+
+%description lib
+This package contains the VPP shared libraries, including:
+vppinfra - foundation library supporting vectors, hashes, bitmaps, pools, and string formatting.
+dpdk - Intel DPDK library
+svm - vm library
+vlib - vector processing library
+vlib-api - binary API library
+vnet - network stack library
+
+%package devel
+Summary: VPP header files, static libraries
+Group: Development/Libraries
+Requires: vpp-lib
+
+%description devel
+This package contains the header files and static libraries for
+vppinfra. Install this package if you want to write or compile a
+program that needs vpp.
+Do we need to list those header files or just leave it blank ?
+dynamic vectors (vec.c), dynamic bitmaps (bitmap.h), allocation heap of
+objects (heap.c), allocation pool(pool.h), dynamic hash tables (hash.c), memory
+allocator (mheap.c), extendable printf-like interface built on top of vectors
+(format.c), formats for data structures (std-formats.c), and support for clock
+time-based function calls (timer.c).
+TODO: reference and describe only the .h files
+
+%install
+#
+# binaries
+#
+mkdir -p -m755 %{buildroot}%{_bindir}
+mkdir -p -m755 %{buildroot}%{_unitdir}
+install -p -m 755 %{_vpp_install_dir}/*/bin/* %{buildroot}%{_bindir}
+install -p -m 755 %{_vpp_build_dir}/vppapigen/vppapigen %{buildroot}%{_bindir}
+#
+# configs
+#
+mkdir -p -m755 %{buildroot}/etc/vpp
+mkdir -p -m755 %{buildroot}/etc/sysctl.d
+install -p -m 644 vpp.service %{buildroot}%{_unitdir}
+install -p -m 644 ../../vpp/conf/startup.conf %{buildroot}/etc/vpp
+install -p -m 644 ../../vpp/conf/80-vpp.conf %{buildroot}/etc/sysctl.d
+#
+# libraries
+#
+mkdir -p -m755 %{buildroot}%{_libdir}
+for file in $(find %{_vpp_install_dir}/*/lib* -type f -name '*.so.*.*.*' -print )
+do
+ install -p -m 755 $file %{buildroot}%{_libdir}
+done
+for file in $(cd %{buildroot}%{_libdir} && find . -type f -print | sed -e 's/^\.\///')
+do
+ # make lib symlinks
+ ( cd %{buildroot}%{_libdir} &&
+ ln -fs $file $(echo $file | sed -e 's/\(\.so\.[0-9]\+\).*/\1/') )
+done
+#
+# devel
+#
+for dir in $(find %{_vpp_install_dir}/*/include/ -maxdepth 0 -type d -print | grep -v dpdk)
+do
+ for subdir in $(cd ${dir} && find . -type d -print)
+ do
+ mkdir -p -m755 %{buildroot}/usr/include/${subdir}
+ done
+ for file in $(cd ${dir} && find . -type f -print)
+ do
+ install -p -m 644 $dir/$file %{buildroot}%{_includedir}/$file
+ done
+done
+# sample plugin
+mkdir -p -m755 %{buildroot}/usr/share/doc/vpp/examples/sample-plugin/sample
+for file in $(cd %{_vpp_install_dir}/../../sample-plugin && find -type f -print)
+do
+ install -p -m 644 %{_vpp_install_dir}/../../sample-plugin/$file \
+ %{buildroot}/usr/share/doc/vpp/examples/sample-plugin/$file
+done
+
+%post
+sysctl --system
+%systemd_post vpp.service
+
+%postun
+%systemd_postun_with_restart vpp.service
+
+%files
+%defattr(-,bin,bin)
+%{_unitdir}/vpp.service
+/usr/bin/vpe*
+/usr/bin/svm*
+%config /etc/sysctl.d/80-vpp.conf
+%config /etc/vpp/startup.conf
+
+%files lib
+%defattr(-,bin,bin)
+%{_libdir}/*
+
+%files devel
+%defattr(-,bin,bin)
+/usr/bin/vppapigen
+%{_includedir}/*
+/usr/share/doc/vpp/examples/sample-plugin
diff --git a/build-root/scripts/find-dev-contents b/build-root/scripts/find-dev-contents
new file mode 100755
index 00000000000..77028af0051
--- /dev/null
+++ b/build-root/scripts/find-dev-contents
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# includes
+paths=`find $1/*/include -type f -print | grep -v '/dpdk/include/'`
+rm -f $2
+
+for path in $paths
+do
+ relpath=`echo $path | sed -e 's:.*/include/::'`
+ dir=`dirname $relpath`
+ if [ $dir = "." ] ; then
+ echo ../$path /usr/include >> $2
+ else
+ echo ../$path /usr/include/$dir >> $2
+ fi
+done
+
+# sample plugin
+paths=`(cd ..; find sample-plugin -type f -print | grep -v autom4te)`
+
+for path in $paths
+do
+ relpath=`echo $path | sed -e 's:.*/sample-plugin/::'`
+ dir=`dirname $relpath`
+ if [ $dir = "sample-plugin" ] ; then
+ echo ../../$path /usr/share/doc/vpp/examples/sample-plugin >> $2
+ else
+ echo ../../$path \
+ /usr/share/doc/vpp/examples/$dir >> $2
+ fi
+done
diff --git a/build-root/scripts/generate-deb-changelog b/build-root/scripts/generate-deb-changelog
new file mode 100755
index 00000000000..a143431257f
--- /dev/null
+++ b/build-root/scripts/generate-deb-changelog
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+CHANGELOG=deb/debian/changelog
+DIST=unstable
+FIRST=1
+
+print_changelog_item() {
+ DATE=$(git log -1 --format=%cD ${TAG})
+ DEBFULLNAME=$(git log -1 --format=%an ${TAG})
+ DEBEMAIL=$(git log -1 --format=%ae ${TAG})
+
+ if [ ${FIRST} = 0 ]; then echo >> ${CHANGELOG}; fi
+ FIRST=0
+
+ echo "vpp (${VER}) ${DIST}; urgency=low" >> ${CHANGELOG}
+ echo >> ${CHANGELOG}
+ echo "${DESC}" >> ${CHANGELOG}
+ echo >> ${CHANGELOG}
+ echo " -- ${DEBFULLNAME} <${DEBEMAIL}> ${DATE}" >> ${CHANGELOG}
+}
+
+VER=$(scripts/version)
+TAG=HEAD
+ADDS=$(echo ${VER} | sed -e 's/~.*//'| cut -s -d- -f2)
+
+rm -f ${CHANGELOG}
+
+if [ -n "${ADDS}" ]; then
+ DESC=" * includes ${ADDS} commits after $(echo ${VER}| cut -d- -f1) release"
+ print_changelog_item
+fi
+
+for TAG in $(git tag -l 'v[0-9].[0-9].[0-9]' | sort -r ); do
+ VER=$(echo ${TAG}| sed -e 's/^v//')
+ DESC=$(git tag -l -n20 ${TAG} | tail -n+2 | sed -e 's/^ */ /')
+ print_changelog_item
+done
diff --git a/build-root/scripts/make-plugin-toolkit b/build-root/scripts/make-plugin-toolkit
new file mode 100755
index 00000000000..14e9eda26fe
--- /dev/null
+++ b/build-root/scripts/make-plugin-toolkit
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+set -eux
+
+build_tarball() {
+ for dir in vppinfra dpdk svm vlib-api vlib vnet vpp vpp-api-test
+ do
+ tar -C install-$1/$dir/include -cf - . | tar -C $tmp_dir/include -xf -
+ done
+ tar -C ../sample-plugin -cf - . \
+ | tar -C $tmp_dir/src/sample-plugin -xf -
+ cp tools/bin/vppapigen $tmp_dir/tools/bin
+ echo Created by `id -u -n` on `hostname` at `date` > \
+ $tmp_dir/toolkit-version-stamp
+ cp scripts/vpp-plugin-toolkit-readme $tmp_dir/README
+ tar -C $tmp_dir -zcf $PWD/vpp-plugin-toolkit-$1.tar.gz .
+}
+
+if [ `basename $PWD` != "build-root" ] ; then
+ echo Please run this script from build-root
+ exit 1
+fi
+
+echo Pull additional tookit repos
+make PLATFORM=vpp sample-plugin-find-source
+
+make PLATFORM=vpp TAG=vpp wipe-all
+echo Build vpp forwarder production package
+make PLATFORM=vpp TAG=vpp V=0 strip_sumbols=yes install-packages
+
+tmp_dir="`mktemp -d /tmp/plugin-XXXXXX`"
+trap "rm -rf $tmp_dir" err
+
+echo Create vpp forwarder production plugin toolkit tarball
+mkdir -p $tmp_dir/tools/bin $tmp_dir/include $tmp_dir/lib64 \
+ $tmp_dir/src/sample-plugin
+build_tarball vpp-native
+rm -rf $tmp_dir
+
+exit 0
diff --git a/build-root/scripts/version b/build-root/scripts/version
new file mode 100755
index 00000000000..9d236bc227a
--- /dev/null
+++ b/build-root/scripts/version
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+TAG=$(git describe | cut -d- -f1 | sed -e 's/^v//')
+ADD=$(git describe | cut -s -d- -f2)
+CMT=$(git describe --dirty --match 'v*'| cut -s -d- -f3,4)
+
+if [ "$1" = "rpm-version" ]; then
+ echo ${TAG}
+ exit
+fi
+
+if [ "$1" = "rpm-release" ]; then
+ [ -z "${ADD}" ] && echo release && exit
+ echo ${ADD}~${CMT}
+ exit
+fi
+
+if [ -n "${ADD}" ]; then
+ echo ${TAG}-${ADD}~${CMT}
+else
+ echo ${TAG}
+fi
diff --git a/build-root/vagrant/.gitignore b/build-root/vagrant/.gitignore
new file mode 100644
index 00000000000..a977916f658
--- /dev/null
+++ b/build-root/vagrant/.gitignore
@@ -0,0 +1 @@
+.vagrant/
diff --git a/build-root/vagrant/README b/build-root/vagrant/README
new file mode 100644
index 00000000000..0498615473e
--- /dev/null
+++ b/build-root/vagrant/README
@@ -0,0 +1,54 @@
+To run vpp with the debug shell:
+
+sudo ~vagrant/git/vpp/build-root/install-vpp_debug-native/vpp/bin/vpe unix interactive
+
+which will result in a prompt that looks like:
+
+DBGvpd#
+
+To give it a spin, we can create a tap interface and try a simple ping
+(with trace).
+
+To create the tap:
+
+DBGvpd# tap connect foobar
+Created tap-0 for Linux tap 'foobar'
+DBGvpd# show int
+
+To assign it an ip address (and 'up' the interface):
+
+DBGvpd# set int ip address tap-0 192.168.1.1/24
+DBGvpd# set int state tap-0 up
+
+To turn on packet tracing for the tap interface:
+DBGvpd# trace add tapcli-rx 10
+
+Now, to set up and try the other end from the unix prompt:
+vagrant@vagrant-ubuntu-trusty-64:~$ sudo ip addr add 192.168.1.2/24 dev foobar
+vagrant@vagrant-ubuntu-trusty-64:~$ ping -c 3 192.168.1.1
+
+To look at the trace, back in the vpp CLI:
+DBGvpd# show trace
+
+And to stop tracing:
+
+DBGvpd# clear trace
+
+Other fun things to look at:
+
+The vlib packet processing graph:
+DBGvpd# show vlib graph
+
+which will produce output like:
+
+ Name Next Previous
+ip4-icmp-input error-punt [0] ip4-local
+ ip4-icmp-echo-request [1]
+ vpe-icmp4-oam [2]
+
+To read this, the first column (Name) is the name of the node.
+The second column (Next) is the name of the children of that node.
+The third column (Previous) is the name of the parents of this node.
+
+To see this README again:
+cat /vagrant/README \ No newline at end of file
diff --git a/build-root/vagrant/Vagrantfile b/build-root/vagrant/Vagrantfile
new file mode 100644
index 00000000000..9c61dd33af2
--- /dev/null
+++ b/build-root/vagrant/Vagrantfile
@@ -0,0 +1,36 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant.configure(2) do |config|
+
+ # Pick the right distro and bootstrap, default is ubuntu1404
+ distro = ENV['VPP_VAGRANT_DISTRO']
+ if distro == 'centos7'
+ config.vm.box = "puppetlabs/centos-7.0-64-nocm"
+ config.vm.provision 'shell', path: 'bootstrap.centos7.sh'
+ else
+ config.vm.box = "puppetlabs/ubuntu-14.04-64-nocm"
+ config.vm.provision 'shell', path: 'bootstrap.ubuntu1404.sh'
+ end
+
+ # vagrant-cachier caches apt/yum etc to speed subsequent
+ # vagrant up
+ # to enable, run
+ # vagrant plugin install vagrant-cachier
+ #
+ if Vagrant.has_plugin?("vagrant-cachier")
+ config.cache.scope = :box
+ end
+
+ config.vm.synced_folder "../../", "/vpp", disabled: false
+ config.vm.provider "virtualbox" do |vb|
+ vb.memory = "4096"
+ end
+ config.vm.provider "vmware_fusion" do |fusion,override|
+ fusion.vmx["memsize"] = "4096"
+ end
+ config.vm.provider "vmware_workstation" do |vws,override|
+ vws.vmx["memsize"] = "8192"
+ vws.vmx["numvcpus"] = "4"
+ end
+end
diff --git a/build-root/vagrant/bootstrap.centos7.sh b/build-root/vagrant/bootstrap.centos7.sh
new file mode 100644
index 00000000000..1d811e45599
--- /dev/null
+++ b/build-root/vagrant/bootstrap.centos7.sh
@@ -0,0 +1,45 @@
+
+# Standard update + upgrade dance
+yum check-update
+yum update -y
+
+# Install build tools
+yum groupinstall 'Development Tools' -y
+yum install openssl-devel -y
+yum install glibc-static -y
+
+# Install jdk and maven
+yum install -y java-1.8.0-openjdk-devel
+
+# Load the uio kernel module
+modprobe uio_pci_generic
+
+echo uio_pci_generic >> /etc/modules-load.d/uio_pci_generic.conf
+
+# Setup for hugepages using upstart so it persists across reboots
+sysctl -w vm.nr_hugepages=1024
+echo "vm.nr_hugepages=1024" >> /etc/sysctl.conf
+mkdir -p /mnt/huge
+echo "hugetlbfs /mnt/huge hugetlbfs defaults 0 0" >> /etc/fstab
+mount /mnt/huge
+
+# Setup the vpp code
+cd ~vagrant/
+sudo -u vagrant mkdir git
+cd git/
+
+# You will need to alter this line to reflect reality.
+sudo -H -u vagrant git clone /vpp
+cd vpp
+
+# Initial vpp build
+if [ -d build-root ]; then
+ # Bootstrap vpp
+ cd build-root/
+ sudo -H -u vagrant ./bootstrap.sh
+
+ # Build vpp
+ sudo -H -u vagrant make PLATFORM=vpp TAG=vpp_debug install-packages
+ cd ~vagrant/
+ cat /vagrant/README
+fi
diff --git a/build-root/vagrant/bootstrap.ubuntu1404.sh b/build-root/vagrant/bootstrap.ubuntu1404.sh
new file mode 100644
index 00000000000..c89f098ce25
--- /dev/null
+++ b/build-root/vagrant/bootstrap.ubuntu1404.sh
@@ -0,0 +1,77 @@
+# Standard update + upgrade dance
+apt-get update
+apt-get upgrade -y
+
+# Fix the silly notion that /bin/sh should point to dash by pointing it to bash
+
+sudo update-alternatives --install /bin/sh sh /bin/bash 100
+
+# Install build tools
+apt-get install -y build-essential autoconf automake bison libssl-dev ccache libtool git dkms debhelper
+
+# Install other stuff
+# apt-get install -y qemu-kvm libvirt-bin ubuntu-vm-builder bridge-utils
+
+# Install uio
+apt-get install -y linux-image-extra-`uname -r`
+
+# Install jdk and maven
+apt-get install -y openjdk-7-jdk
+# $$$ comment out for the moment
+# apt-get install -y --force-yes maven3
+
+# Install debian packaging tools
+apt-get install -y debhelper dkms
+
+# Setup for hugepages using upstart so it persists across reboots
+echo "vm.nr_hugepages=1024" >> /etc/sysctl.d/20-hugepages.conf
+sysctl --system
+
+cat << EOF > /etc/init/hugepages.conf
+start on runlevel [2345]
+
+task
+
+script
+ mkdir -p /run/hugepages/kvm || true
+ rm -f /run/hugepages/kvm/* || true
+ rm -f /dev/shm/* || true
+ mount -t hugetlbfs nodev /run/hugepages/kvm
+end script
+EOF
+
+# Make sure we run that hugepages.conf right now
+start hugepages
+
+# Setup the vpp code
+cd ~vagrant/
+sudo -u vagrant mkdir git
+cd git/
+
+# You will need to alter this line to reflect reality.
+sudo -H -u vagrant git clone /vpp
+cd vpp/
+
+# Initial vpp build
+if [ -d build-root ]; then
+ # Bootstrap vpp
+ cd build-root/
+ sudo -H -u vagrant ./bootstrap.sh
+
+ # Build vpp
+ sudo -H -u vagrant make PLATFORM=vpp TAG=vpp_debug install-deb
+
+ # Stick the dpdk module in the canonical place
+ cp ./install-vpp_debug-native/dpdk/kmod/igb_uio.ko /lib/modules/`uname -r`/kernel/drivers/uio/
+ depmod
+
+ # Load igb_uio into the kernel
+ modprobe igb_uio
+
+ # Make sure igb_uio loads at boot time
+ # Make sure uio loads at boot time
+ echo igb_uio >> /lib/modprobe.d/igb_uio.conf
+ cd ~vagrant/
+ cat /vagrant/README
+
+fi