diff options
Diffstat (limited to 'vpp/build-root')
79 files changed, 6583 insertions, 0 deletions
diff --git a/vpp/build-root/Makefile b/vpp/build-root/Makefile new file mode 100644 index 00000000..6e26e90e --- /dev/null +++ b/vpp/build-root/Makefile @@ -0,0 +1,1171 @@ +# Copyright (c) 2015 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright (c) 2007-2008 Eliot Dresselhaus +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + +###################################################################### +# Collect makefile fragments +###################################################################### + +# Scripts require non-POSIX parts of bash +SHELL := /bin/bash + +# Where this makefile lives +MU_BUILD_ROOT_DIR = $(shell pwd) +MU_BUILD_NAME = $(shell basename $(MU_BUILD_ROOT_DIR)) + +# Search path (e.g. multiple directories) where sources are found. +SOURCE_PATH = + +# Pick up user's definitions for variables e.g. SOURCE_PATH, etc. +-include build-config.mk + +MU_BUILD_ROOT_NAME = $(shell basename $(MU_BUILD_ROOT_DIR)) +MU_BUILD_DATA_DIR_NAME = build-data + +ABSOLUTE_SOURCE_PATH = $(foreach d,$(SOURCE_PATH),$(shell cd $(d) && pwd)) + +SOURCE_PATH_BUILD_ROOT_DIRS = $(addsuffix /$(MU_BUILD_NAME),$(ABSOLUTE_SOURCE_PATH)) +SOURCE_PATH_BUILD_DATA_DIRS = $(addsuffix /$(MU_BUILD_DATA_DIR_NAME),$(ABSOLUTE_SOURCE_PATH)) + +# For tools use build-root as source path, otherwise use given source path +FIND_SOURCE_PATH = \ + $(if $(is_build_tool), \ + $(SOURCE_PATH_BUILD_ROOT_DIRS) $(MU_BUILD_ROOT_DIR), \ + $(SOURCE_PATH_BUILD_DATA_DIRS)) + +# First search given source path, then default to build-root +FULL_SOURCE_PATH = $(SOURCE_PATH_BUILD_DATA_DIRS) $(MU_BUILD_ROOT_DIR) + +# Misc functions +is_in_fn = $(strip $(filter $(1),$(2))) +last_fn = $(lastword $1) +chop_fn = $(wordlist 2,$(words $1),x $1) +uniq_fn = $(strip $(if $1,$(call uniq_fn,$(call chop_fn,$1)) \ + $(if $(filter $(call last_fn,$1),$(call chop_fn,$1)),,$(call last_fn,$1)))) +ifdef3_fn = $(if $(patsubst undefined,,$(origin $(1))),$(3),$(2)) +ifdef_fn = $(call ifdef3_fn,$(1),$(2),$($(1))) + +_mu_debug = $(warning "$(1) = $($(1))") + +$(foreach d,$(FIND_SOURCE_PATH), \ + $(eval _mu_package_mk_in_$(d) = $(shell find $(d)/packages/*.mk 2> /dev/null)) \ + $(eval _mu_srcdirs_in_$(d) = \ + $(shell find $(d)/.. \ + -maxdepth 1 \ + -type d \ + -and -not -name ".." \ + -and -not -name $(MU_BUILD_ROOT_NAME) \ + -and -not -name $(MU_BUILD_DATA_DIR_NAME))) \ + $(eval _mu_non_package_files_in_$(d) = \ + $(shell find $(d)/packages \ + -type f \ + -and -not -name '*.mk' \ + -and -not -name '*~' 2> /dev/null)) \ + $(foreach p,$(patsubst %.mk,%,$(notdir $(_mu_package_mk_in_$(d)))), \ + $(eval _mu_package_dir_$(p) = $(d)) \ + $(eval _mu_package_mk_$(p) = $(d)/packages/$(p).mk) \ + ) \ + $(foreach p,$(notdir $(_mu_srcdirs_in_$(d))), \ + $(eval _mu_package_srcdir_$(p) = $(shell cd $(d)/../$(p) && pwd)) \ + ) \ +) + +# Find root directory for package based on presence of package .mk +# makefile fragment on source path. +_find_build_data_dir_for_package_fn = $(shell \ + set -eu$(BUILD_DEBUG) ; \ + for d in $(FIND_SOURCE_PATH) ; do \ + f="$${d}/packages/$(1).mk" ; \ + [[ -f $${f} ]] && echo `cd $${d} && pwd` && exit 0 ; \ + done ; \ + echo "") +find_build_data_dir_for_package_fn = $(call ifdef_fn,_mu_package_dir_$(1),) + +# dir/PACKAGE +_find_source_fn = $(shell \ + set -eu$(BUILD_DEBUG) ; \ + d="$(call find_build_data_dir_for_package_fn,$(1))" ; \ + [[ -n "$${d}" ]] && d="$${d}/../$(1)" ; \ + echo "$${d}") +find_source_fn = $(call ifdef3_fn,_mu_package_dir_$(1),,$(_mu_package_dir_$(1))/../$(1)) + +# Find given FILE in source path as build-data/packages/FILE +find_package_file_fn = $(shell \ + set -eu$(BUILD_DEBUG) ; \ + d="$(call find_build_data_dir_for_package_fn,$(1))" ; \ + [[ -n "$${d}" ]] && d="$${d}/packages/$(2)" ; \ + [[ -f "$${d}" ]] && echo "$${d}") + +# Find first FILE in source path with name PATH/build-data/FILE +find_build_data_file_fn = $(shell \ + set -eu$(BUILD_DEBUG) ; \ + for d in $(FIND_SOURCE_PATH) ; do \ + f="$${d}/$(1)" ; \ + [[ -f $${f} ]] && echo `cd $${d} && pwd`/$(1) && exit 0 ; \ + done ; \ + echo "") + +###################################################################### +# ARCH, PLATFORM +###################################################################### + +NATIVE_ARCH = $(shell gcc -dumpmachine | sed -e 's/\([a-zA-Z_0-9]*\)-.*/\1/') + +# Find all platforms.mk that we can, including those from build-root +$(foreach d,$(FULL_SOURCE_PATH), \ + $(eval -include $(d)/platforms.mk)) + +# Platform should be defined somewhere by specifying $($(PLATFORM)_arch) +ARCH = $(strip $($(PLATFORM)_arch)) +ifeq ($(ARCH),) + $(error "Unknown platform `$(PLATFORM)'") +endif + +# map e.g. ppc7450 -> ppc +BASIC_ARCH = \ + ${shell case '$(ARCH)' in \ + (native) echo $(NATIVE_ARCH) ;; \ + (i*86*) echo i386 ;; \ + (ppc*|powerpc*) echo ppc ;; \ + (*) echo '$(ARCH)' ;; \ + esac } + +# x86_64 can be either 32/64. set BIACH=32 to get 32 bit libraries. +BIARCH = 64 + +x86_64_libdir = $(BIARCH) +native_libdir = $($(NATIVE_ARCH)_libdir) + +# lib or lib64 depending +arch_lib_dir = lib$($(BASIC_ARCH)_libdir) + +# OS to configure for. configure --host will be set to $(ARCH)-$(OS) +# Allow per-platform overrides + +OS = $(strip $($(PLATFORM)_os)) +ifeq ($(OS),) + OS = mu-linux +endif + +spu_target = spu +native_target = + +is_native = $(if $(ARCH:native=),,true) +not_native = $(if $(ARCH:native=),true,) + +ARCH_TARGET_tmp = $(call ifdef_fn,$(ARCH)_target,$(ARCH)-$(OS)) +TARGET = $(call ifdef_fn,$(PLATFORM)_target,$(ARCH_TARGET_tmp)) +TARGET_PREFIX = $(if $(not_native),$(TARGET)-,) + +# CPU microarchitecture detection. +# Either set <platform>_march in build-data/platforms/<platform>.mk, +# or detect and use the build-host instruction set + +MARCH = $(strip $($(PLATFORM)_march)) +ifeq ($(MARCH),) + ifneq ($(wildcard $(TOOL_INSTALL_DIR)/bin/$(TARGET)-gcc),) + TARGET_GCC = $(TOOL_INSTALL_DIR)/bin/$(TARGET)-gcc + else ifneq ($(wildcard $(MU_BUILD_ROOT_DIR)/tools/bin/$(TARGET)-gcc),) + TARGET_GCC = $(MU_BUILD_ROOT_DIR)/tools/bin/$(TARGET)-gcc + endif + ifneq ($(TARGET_GCC),) + MARCH = $(shell $(TARGET_GCC) -Q --help=target -march=native | grep march | sed -e 's/.*march=[[:space:]]*//') + else + MARCH = native + endif +else + ifeq ($(MARCH),nehalem) + override MARCH = corei7 + else ifeq ($(MARCH),westmere) + override MARCH = corei7 + else ifeq ($(MARCH),sandybridge) + override MARCH = corei7-avx + else ifeq ($(MARCH),ivybridge) + override MARCH = core-avx-i + else ifeq ($(MARCH),haswell) + override MARCH = core-avx2 + endif +endif +export MARCH + +MTUNE = $(strip $($(PLATFORM)_mtune)) +ifeq ($(MTUNE),) + MTUNE = generic +endif + +###################################################################### +# Generic build stuff +###################################################################### + +# The package we are currently working on +PACKAGE = $* + +# Build/install tags. This lets you have different CFLAGS/CPPFLAGS/LDFLAGS +# for e.g. debug versus optimized compiles. Each tag has its own set of build/install +# areas. +TAG = +TAG_PREFIX = $(if $(TAG),$(TAG)-) + +# yes you need the space +tag_var_with_added_space_fn = $(if $($(TAG)_TAG_$(1)),$($(TAG)_TAG_$(1)) ) + +# TAG=debug for debugging +debug_TAG_CFLAGS = -g -O0 -DCLIB_DEBUG -DFORTIFY_SOURCE=2 -march=$(MARCH) \ + -fstack-protector-all -fPIC +debug_TAG_LDFLAGS = -g -O0 -DCLIB_DEBUG -DFORTIFY_SOURCE=2 -march=$(MARCH) \ + -fstack-protector-all -fPIC + +BUILD_PREFIX_package = build-$(TAG_PREFIX) +BUILD_PREFIX_tool = build-tool-$(TAG_PREFIX) +INSTALL_PREFIX = install-$(TAG_PREFIX) +IMAGES_PREFIX = images-$(TAG_PREFIX) + +# Whether we are building a tool or not +tool_or_package_fn = $(if $(is_build_tool),tool,package) + +# Directory where packages are built & installed +BUILD_DIR = $(MU_BUILD_ROOT_DIR)/$(BUILD_PREFIX_$(call tool_or_package_fn))$(ARCH) + +## BURT +# we will deprecate INSTALL_DIR shortly for DFLT_INSTALL_DIR +INSTALL_DIR = $(MU_BUILD_ROOT_DIR)/$(INSTALL_PREFIX)$(ARCH) +# DFLT_INSTALL_DIR used in platforms.mk for $(PLATFORM)_DESTDIR_BASE +DFLT_INSTALL_DIR := $(MU_BUILD_ROOT_DIR)/$(INSTALL_PREFIX)$(ARCH) +## BURT + +PLATFORM_IMAGE_DIR = $(MU_BUILD_ROOT_DIR)/$(IMAGES_PREFIX)$(PLATFORM) + +# $(call VAR,DEFAULT) +override_var_with_default_fn = $(if $($(1)),$($(1)),$(2)) + +# $(call if_directory_exists_fn,D1,D2) returns D1 if it exists else D2 +define if_directory_exists_fn +$(shell if test -d $(1); then echo $(1); else echo $(2); fi) +endef + +# $(call if_file_exists_fn,F1,F2) returns F1 if it exists else F2 +define if_file_exists_fn +$(shell if test -f $(1); then echo $(1); else echo $(2); fi) +endef + +# Default VAR, package specified override of default PACKAGE_VAR +package_var_fn = $(call override_var_with_default_fn,$(1)_$(2),$(1)) + +package_build_dir_fn = $(call package_var_fn,$(1),build_dir) + +package_install_dir_fn = \ + $(if $(is_build_tool),$(TOOL_INSTALL_DIR),$(INSTALL_DIR)/$(call package_build_dir_fn,$(1))) + +PACKAGE_BUILD_DIR = \ + $(BUILD_DIR)/$(call package_build_dir_fn,$(PACKAGE)) +PACKAGE_INSTALL_DIR = \ + $(call package_install_dir_fn,$(PACKAGE)) + +# Tools (gcc, binutils, glibc...) are installed here +TOOL_INSTALL_DIR = $(MU_BUILD_ROOT_DIR)/tools + +# Target specific tools go here e.g. mu-build/tools/ppc-mu-linux +TARGET_TOOL_INSTALL_DIR = $(TOOL_INSTALL_DIR)/$(TARGET) + +# Set BUILD_DEBUG to vx or x enable shell command tracing. +BUILD_DEBUG = + +# Message from build system itself (as opposed to make or shell commands) +build_msg_fn = echo "@@@@ $(1) @@@@" + +# Allow CCACHE_DIR to be overridden, e.g. in .../build-root/build-config.mk +ifeq ($(CCACHE_DIR),) + CCACHE_DIR=$(MU_BUILD_ROOT_DIR)/.ccache +endif + +# Always prefer our own tools to those installed on system. +# Note: ccache-bin must be before tool bin. +BUILD_ENV = \ + export CCACHE_DIR=$(CCACHE_DIR) ; \ + export PATH=$(TOOL_INSTALL_DIR)/ccache-bin:$(TOOL_INSTALL_DIR)/bin:$${PATH} ; \ + export PATH="`echo $${PATH} | sed -e s/[.]://`" ; \ + $(if $(not_native),export CONFIG_SITE=$(MU_BUILD_ROOT_DIR)/config.site ;,) \ + export LD_LIBRARY_PATH=$(TOOL_INSTALL_DIR)/lib64:$(TOOL_INSTALL_DIR)/lib ; \ + set -eu$(BUILD_DEBUG) ; \ + set -o pipefail + +###################################################################### +# Package build generic definitions +###################################################################### + +package_dir_fn = \ + $(call find_build_data_dir_for_package_fn,$(1))/packages + +package_mk_fn = $(call package_dir_fn,$(1))/$(1).mk + +### BURT + +#next version +#pkgPhaseDependMacro = $(foreach x,configure build install, \ + $(eval $(1)_$(x)_depend := $($(1)_depend:%=%-$(x)))) +#version equivalent to original code +pkgPhaseDependMacro = $(eval $(1)_configure_depend := $($(1)_depend:%=%-install)) + +### BURT + +# Pick up built-root/pre-package-include.mk for all source directories +$(foreach d,$(SOURCE_PATH_BUILD_ROOT_DIRS), \ + $(eval -include $(d)/pre-package-include.mk)) + +$(foreach d,$(addsuffix /packages,$(FIND_SOURCE_PATH)), \ + $(eval -include $(d)/*.mk) \ + $(eval ALL_PACKAGES += $(patsubst $(d)/%.mk,%,$(wildcard $(d)/*.mk))) \ +) + +# Pick up built-root/post-package-include.mk for all source directories +$(foreach d,$(SOURCE_PATH_BUILD_ROOT_DIRS), \ + $(eval -include $(d)/post-package-include.mk)) + +# Linux specific native build tools +NATIVE_TOOLS_LINUX = \ + e2fsimage \ + e2fsprogs \ + fakeroot \ + jffs2 \ + mkimage \ + zlib \ + xz \ + squashfs + +IS_LINUX = $(if $(findstring no,$($(PLATFORM)_uses_linux)),no,yes) + +NATIVE_TOOLS_$(IS_LINUX) += $(NATIVE_TOOLS_LINUX) + +# only build glibc for linux installs +CROSS_TOOLS_$(IS_LINUX) += glibc gcc + +# must be first for bootstrapping +NATIVE_TOOLS = findutils make + +# basic tools needed for build system +NATIVE_TOOLS += git automake autoconf libtool texinfo bison flex tar + +# needed to compile gcc +NATIVE_TOOLS += mpfr gmp mpc + +# Tool to sign binaries +NATIVE_TOOLS += sign + +# ccache +NATIVE_TOOLS += ccache + +# Tools needed on native host to build for platform +NATIVE_TOOLS += $(call ifdef_fn,$(PLATFORM)_native_tools,) + +# Tools for cross-compiling from native -> ARCH +CROSS_TOOLS = binutils gcc-bootstrap gdb + +# Tools needed on native host to build for platform +CROSS_TOOLS += $(call ifdef_fn,$(PLATFORM)_cross_tools,) + +NATIVE_TOOLS += $(NATIVE_TOOLS_yes) +CROSS_TOOLS += $(CROSS_TOOLS_yes) + +timestamp_name_fn = .mu_build_$(1)_timestamp +CONFIGURE_TIMESTAMP = $(call timestamp_name_fn,configure) +BUILD_TIMESTAMP = $(call timestamp_name_fn,build) +INSTALL_TIMESTAMP = $(call timestamp_name_fn,install) + +TIMESTAMP_DIR = $(PACKAGE_BUILD_DIR) + +find_newer_files_fn = \ + "`for i in $(2) ; do \ + [[ -f $$i && $$i -nt $(1) ]] && echo "$$i" && exit 0; \ + done ; \ + exit 0;`" + +find_filter = -not -name '*~' +find_filter += -and -not -path '*/.git*' +find_filter += -and -not -path '*/.svn*' +find_filter += -and -not -path '*/.CVS*' +find_filter += -and -not -path '*/manual/*' +find_filter += -and -not -path '*/autom4te.cache/*' +find_filter += -and -not -path '*/doc/all-cfg.texi' +find_filter += -and -not -path '*/.mu_build_*' + +find_newer_filtered_fn = \ + (! -f $(1) \ + || -n $(call find_newer_files_fn,$(1),$(3)) \ + || -n "`find -H $(2) \ + -type f \ + -and -newer $(1) \ + -and \( $(4) \) \ + -print -quit`") + +find_newer_fn = \ + $(call find_newer_filtered_fn,$(1),$(2),$(3),$(find_filter)) + +###################################################################### +# Package dependencies +###################################################################### + +# This must come before %-configure, %-build, %-install pattern rules +# or else dependencies will not work. + +package_dependencies_fn = \ + $(patsubst %-install, %, \ + $(filter %-install,$($(1)_configure_depend))) + +PACKAGE_DEPENDENCIES = $(call package_dependencies_fn,$(PACKAGE)) + +# package specific configure, build, install dependencies +add_package_dependency_fn = \ + $(if $($(1)_$(2)_depend), \ + $(eval $(1)-$(2) : $($(1)_$(2)_depend))) + +$(foreach p,$(ALL_PACKAGES), \ + $(call add_package_dependency_fn,$(p),configure) \ + $(call add_package_dependency_fn,$(p),build) \ + $(call add_package_dependency_fn,$(p),install)) + +TARGETS_RESPECTING_DEPENDENCIES = image_install wipe diff push-all pull-all find-source + +# carry over packages dependencies to image install, wipe, pull-all, push-all +$(foreach p,$(ALL_PACKAGES), \ + $(if $($(p)_configure_depend), \ + $(foreach s,$(TARGETS_RESPECTING_DEPENDENCIES), \ + $(eval $(p)-$(s): \ + $(addsuffix -$(s), $(call package_dependencies_fn,$(p))))))) + +# recursively resolve dependencies +resolve_dependencies2_fn = $(strip \ + $(eval __added = $(filter-out $(4), \ + $(call uniq_fn, \ + $(foreach l,$(3), \ + $(call ifdef3_fn,$(l)$(1),,$(call $(2),$($(l)$(1)))) \ + )))) \ + $(eval __known = $(call uniq_fn,$(4) $(3) $(__added))) \ + $(if $(__added), \ + $(call resolve_dependencies2_fn,$(1),$(2),$(__added),$(__known)), \ + $(__known)) \ +) + +resolve_dependencies_null_fn = $(1) + +resolve_dependencies_fn = $(call resolve_dependencies2_fn,$(1),resolve_dependencies_null_fn,$(2)) + +###################################################################### +# Package configure +###################################################################### + +# x86_64 can be either 32/64. set BIACH=32 to get 32 bit libraries. +BIARCH = 64 + +x86_64_libdir = $(BIARCH) +native_libdir = $($(NATIVE_ARCH)_libdir) + +# lib or lib64 depending +arch_lib_dir = lib$($(BASIC_ARCH)_libdir) + +# find dynamic linker as absolute path +TOOL_INSTALL_LIB_DIR=$(TOOL_INSTALL_DIR)/$(TARGET)/$(arch_lib_dir) +DYNAMIC_LINKER=${shell cd $(TOOL_INSTALL_LIB_DIR); echo ld*.so.*} + +# Pad dynamic linker & rpath so elftool will never have to change ELF section sizes. +# Yes, this is a kludge. +lots_of_slashes_to_pad_names = "/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////" + +# When PLATFORM != native we *always* use our own versions of GLIBC and dynamic linker +# Allow per-platform overrides +CROSS_LDFLAGS = $(strip $($(PLATFORM)_cross_ldflags)) +ifeq ($(CROSS_LDFLAGS),) + CROSS_LDFLAGS = \ + -Wl,--dynamic-linker=$(lots_of_slashes_to_pad_names)$(TOOL_INSTALL_LIB_DIR)/$(DYNAMIC_LINKER) \ + -Wl,-rpath -Wl,$(lots_of_slashes_to_pad_names)$(TOOL_INSTALL_LIB_DIR) +endif + +cross_ldflags = $(if $(is_native)$(is_build_tool),,$(CROSS_LDFLAGS) ) + +# $(call installed_libs_fn,PACKAGE) +# Return install library directory for given package. +# Some packages (e.g. openssl) don't install under lib64; instead they use lib +define installed_lib_fn +$(call if_directory_exists_fn, + $(call package_install_dir_fn,$(1))/$(arch_lib_dir), + $(call package_install_dir_fn,$(1))/lib) +endef + +# Set -L and rpath to point to dependent libraries previously built by us. +installed_libs_fn = \ + $(foreach i,$(1), \ + -L$(call installed_lib_fn,$(i)) \ + -Wl,-rpath -Wl,$(call installed_lib_fn,$(i))) + +# As above for include files +installed_include_fn = $(call package_install_dir_fn,$(1))/include + +installed_includes_fn = $(foreach i,$(1),-I$(call installed_include_fn,$(i))) + +# By default package CPPFLAGS (to set include path -I) and LDFLAGS (to set link path -L) +# point at dependent install directories. +DEFAULT_CPPFLAGS = $(call installed_includes_fn, $(PACKAGE_DEPENDENCIES)) +DEFAULT_LDFLAGS = $(call installed_libs_fn, $(PACKAGE_DEPENDENCIES)) + +configure_var_fn = \ + $(call tag_var_with_added_space_fn,$(1))$(call override_var_with_default_fn,$(PACKAGE)_$(1),$(DEFAULT_$(1))) +configure_ldflags_fn = \ + $(cross_ldflags)$(call configure_var_fn,LDFLAGS) + +# Allow packages to override CPPFLAGS, CFLAGS, and LDFLAGS +CONFIGURE_ENV = \ + $(if $(call configure_var_fn,CPPFLAGS), \ + CPPFLAGS="$(CPPFLAGS) $(call configure_var_fn,CPPFLAGS)") \ + $(if $(call configure_var_fn,CFLAGS), \ + CFLAGS="$(CFLAGS) $(call configure_var_fn,CFLAGS)") \ + $(if $(call configure_var_fn,CCASFLAGS), \ + CCASFLAGS="$(CCASFLAGS) $(call configure_var_fn,CCASFLAGS)") \ + $(if $(call configure_ldflags_fn), \ + LDFLAGS="$(LDFLAGS) $(call configure_ldflags_fn)") \ + $(if $($(PACKAGE)_configure_env),$($(PACKAGE)_configure_env)) + +### BURT +# only partially used now (used in a few .mk files) +ifeq ($(is_build_tool),yes) +prefix = $(PACKAGE_INSTALL_DIR) +libdir = $(PACKAGE_INSTALL_DIR)/$(arch_lib_dir) +libexecdir = $(PACKAGE_INSTALL_DIR)/usr/libexec +DESTDIR = / +else +# Eventually simplify this with no per package DESTDIR or prefix +ppdMacro = $(if $(PER_PACKAGE_DESTDIR),$(call package_build_dir_fn,$(1))) +pppMacro = $(if $(PER_PACKAGE_PREFIX),$(call package_build_dir_fn,$(1))) +prefixMacro = $($(PLATFORM)_PREFIX_BASE)/$(pppMacro) +prefix = $(call prefixMacro,$(PACKAGE)) +libdir = $($(PLATFORM)_LIBDIR) +libexecdir = $($(PLATFORM)_LIBEXECDIR) +destdirMacro = $($(PLATFORM)_DESTDIR_BASE)$(ppdMacro) +DESTDIR = $(call destdirMacro,$(PACKAGE)) +endif +### BURT +### dbarach +image_extra_dependencies = $($(PLATFORM)_image_extra_dependencies) +### dbarach + +configure_package_gnu = \ + s=$(call find_source_fn,$(PACKAGE_SOURCE)) ; \ + if [ ! -f $$s/configure ] ; then \ + autoreconf -i -f $$s ; \ + fi ; \ + cd $(PACKAGE_BUILD_DIR) ; \ + env $(CONFIGURE_ENV) \ + $$s/configure \ + $(if $($(PACKAGE)_configure_host_and_target), \ + $($(PACKAGE)_configure_host_and_target), \ + $(if $(not_native),--host=$(TARGET),)) \ + $(if $($(PACKAGE)_configure_prefix), \ + $($(PACKAGE)_configure_prefix), \ + --libdir=$(PACKAGE_INSTALL_DIR)/$(arch_lib_dir) \ + --prefix=$(PACKAGE_INSTALL_DIR)) \ + $($(PACKAGE)_configure_args) \ + $($(PACKAGE)_configure_args_$(PLATFORM)) + +configure_package = \ + $(call build_msg_fn,Configuring $(PACKAGE) in $(PACKAGE_BUILD_DIR)) ; \ + mkdir -p $(PACKAGE_BUILD_DIR) ; \ + $(if $($(PACKAGE)_configure), \ + $($(PACKAGE)_configure), \ + $(configure_package_gnu)) + +# Tools (e.g. gcc, binutils, gdb) required a platform to build for +check_platform = \ + is_tool="$(is_build_tool)" ; \ + is_cross_package="$(findstring $(PACKAGE),$(CROSS_TOOLS))" ; \ + is_arch_native="$(if $(subst native,,$(ARCH)),,yes)" ; \ + if [ "$${is_tool}" == "yes" \ + -a "$${is_cross_package}" != "" \ + -a "$${is_arch_native}" != "" ]; then \ + $(call build_msg_fn,You must specify PLATFORM for building tools) ; \ + exit 1 ; \ + fi ; \ + : check that platform gcc can be found ; \ + target_gcc=gcc ; \ + if [ "$${is_arch_native}" != "yes" ] ; then \ + target_gcc=$(TARGET)-gcc ; \ + fi ; \ + if [ "$${is_tool}" != "yes" \ + -a "$${is_arch_native}" != "yes" \ + -a ! -x "`which 2> /dev/null $${target_gcc}`" ] ; then \ + $(call build_msg_fn, \ + No cross-compiler found for platform $(PLATFORM) target $(TARGET); \ + try make PLATFORM=$(PLATFORM) install-tools) ; \ + exit 1 ; \ + fi + +configure_check_timestamp = \ + @$(BUILD_ENV) ; \ + $(check_platform) ; \ + mkdir -p $(PACKAGE_BUILD_DIR) ; \ + mkdir -p $(PACKAGE_INSTALL_DIR) ; \ + conf="$(TIMESTAMP_DIR)/$(CONFIGURE_TIMESTAMP)" ; \ + dirs="$(call package_mk_fn,$(PACKAGE)) \ + $(wildcard $(call find_source_fn,$(PACKAGE_SOURCE))/configure) \ + $(MU_BUILD_ROOT_DIR)/config.site" ; \ + if [[ $(call find_newer_fn, $${conf}, $${dirs}, $?) ]]; then \ + $(configure_package) ; \ + touch $${conf} ; \ + else \ + $(call build_msg_fn,Configuring $(PACKAGE): nothing to do) ; \ + fi + +.PHONY: %-configure +%-configure: %-find-source + $(configure_check_timestamp) + +###################################################################### +# Package build +###################################################################### + +linux_n_cpus = `grep '^processor' /proc/cpuinfo | wc -l` + +MAKE_PARALLEL_JOBS = \ + -j $(shell \ + if [ -f /proc/cpuinfo ] ; then \ + expr 2 '*' $(linux_n_cpus) ; \ + else \ + echo 1 ; \ + fi) + +MAKE_PARALLEL_FLAGS = $(if $($(PACKAGE)_make_parallel_fails),,$(MAKE_PARALLEL_JOBS)) + +# Make command shorthand for packages & tools. +PACKAGE_MAKE = \ + $(MAKE) \ + -C $(PACKAGE_BUILD_DIR) \ + $($(PACKAGE)_make_args) \ + $(MAKE_PARALLEL_FLAGS) + +build_package = \ + $(call build_msg_fn,Building $* in $(PACKAGE_BUILD_DIR)) ; \ + mkdir -p $(PACKAGE_BUILD_DIR) ; \ + cd $(PACKAGE_BUILD_DIR) ; \ + $(if $($(PACKAGE)_build), \ + $($(PACKAGE)_build), \ + $(PACKAGE_MAKE)) + +build_check_timestamp = \ + @$(BUILD_ENV) ; \ + comp="$(TIMESTAMP_DIR)/$(BUILD_TIMESTAMP)" ; \ + conf="$(TIMESTAMP_DIR)/$(CONFIGURE_TIMESTAMP)" ; \ + dirs="$(call find_source_fn,$(PACKAGE_SOURCE)) \ + $($(PACKAGE)_build_timestamp_depends) \ + $(if $(is_build_tool),,$(addprefix $(INSTALL_DIR)/,$(PACKAGE_DEPENDENCIES)))" ; \ + if [[ $${conf} -nt $${comp} \ + || $(call find_newer_fn, $${comp}, $${dirs}, $?) ]]; then \ + $(build_package) ; \ + touch $${comp} ; \ + else \ + $(call build_msg_fn,Building $(PACKAGE): nothing to do) ; \ + fi + +.PHONY: %-build +%-build: %-configure + $(build_check_timestamp) + +.PHONY: %-rebuild +%-rebuild: %-wipe %-build + @ : + +###################################################################### +# Package install +###################################################################### + +install_package = \ + : by default, for non-tools, remove any previously installed bits ; \ + $(if $(is_build_tool)$($(PACKAGE)_keep_instdir), \ + true, \ + rm -rf $(PACKAGE_INSTALL_DIR)); \ + mkdir -p $(PACKAGE_INSTALL_DIR) ; \ + $(if $($(PACKAGE)_pre_install),$($(PACKAGE)_pre_install),true); \ + $(if $($(PACKAGE)_install), \ + $($(PACKAGE)_install), \ + $(PACKAGE_MAKE) \ + $($(PACKAGE)_install_args) \ + install) ; \ + $(if $($(PACKAGE)_post_install),$($(PACKAGE)_post_install),true) + +install_check_timestamp = \ + @$(BUILD_ENV) ; \ + inst=$(TIMESTAMP_DIR)/$(INSTALL_TIMESTAMP) ; \ + dirs="$(PACKAGE_BUILD_DIR) \ + $($(PACKAGE)_install_dependencies)" ; \ + if [[ $(call find_newer_fn, $${inst}, $${dirs}, $?) ]]; then \ + $(call build_msg_fn,Installing $(PACKAGE)) ; \ + $(install_package) ; \ + touch $${inst} ; \ + else \ + $(call build_msg_fn,Installing $(PACKAGE): nothing to do) ; \ + fi + +.PHONY: %-install +%-install: %-build + $(install_check_timestamp) + +###################################################################### +# Source code managment +###################################################################### + +GIT = git + +# Maps package name to source directory root. +# Multiple packages may use a single source tree. +# For example, gcc-bootstrap package shares gcc source. +PACKAGE_SOURCE = $(if $($(PACKAGE)_source),$($(PACKAGE)_source),$(PACKAGE)) + +# Use git to download source if directory is not found +find_source_for_package = \ + @$(BUILD_ENV) ; \ + $(call build_msg_fn,Arch for platform '$(PLATFORM)' is $(ARCH)) ; \ + $(call build_msg_fn,Finding source for $(PACKAGE)) ; \ + s="$(call find_source_fn,$(PACKAGE_SOURCE))" ; \ + [[ -z "$${s}" ]] \ + && $(call build_msg_fn,Package $(PACKAGE) not found with path $(SOURCE_PATH)) \ + && exit 1; \ + mk="$(call find_build_data_dir_for_package_fn,$(PACKAGE_SOURCE))/packages/$(PACKAGE).mk"; \ + $(call build_msg_fn,Makefile fragment found in $${mk}) ; \ + if [ ! -d "$${s}" ] ; then \ + d=`dirname $${mk}` ; \ + i=`cd $${d}/.. && ($(GIT) config remote.origin.url || \ + awk '/URL/ { print $$2; }' .git/remotes/origin)`; \ + g=`dirname $${i}` ; \ + $(call build_msg_fn,Fetching source: $(GIT) clone $${g}/$(PACKAGE_SOURCE) $$s) ; \ + if ! $(GIT) clone $${g}/$(PACKAGE_SOURCE) $$s; then \ + $(call build_msg_fn,No source for $(PACKAGE) in $${g}); \ + exit 1; \ + fi ; \ + $(call build_msg_fn,Fix file dates in $${g}/$(PACKAGE_SOURCE)) ; \ + (cd $${s} ; $(MU_BUILD_ROOT_DIR)/autowank --touch) ; \ + fi ; \ + s=`cd $${s} && pwd` ; \ + $(call build_msg_fn,Source found in $${s}) + +.PHONY: %-find-source +%-find-source: + $(find_source_for_package) + +.PHONY: %-push %-pull %-push-all %-pull-all +%-push %-pull %-push-all %-pull-all: + @$(BUILD_ENV) ; \ + push_or_pull=$(patsubst %-all,%,$(subst $(PACKAGE)-,,$@)) ; \ + $(call build_msg_fn,Git $${push_or_pull} source for $(PACKAGE)) ; \ + s=$(call find_source_fn,$(PACKAGE_SOURCE)) ; \ + if [ "x$$s" = "x" ]; then \ + $(call build_msg_fn,No source for $(PACKAGE)) ; \ + exit 1; \ + fi ; \ + cd $$s && $(GIT) $${push_or_pull} + +# Pull all packages for platform +.PHONY: pull-all +pull-all: + @$(BUILD_ENV) ; \ + $(call build_msg_fn,Git pull build system) ; \ + for d in $(MU_BUILD_ROOT_DIR) \ + $(SOURCE_PATH_BUILD_ROOT_DIRS) \ + $(SOURCE_PATH_BUILD_DATA_DIRS); do \ + $(call build_msg_fn,Git pull $${d}) ; \ + pushd $${d} >& /dev/null && $(GIT) pull && popd >& /dev/null ; \ + done ; \ + $(call build_msg_fn,Git pull build tools) ; \ + $(call tool_make_target_fn,pull-all) ; \ + $(call build_msg_fn,Git pull packages for platform $(PLATFORM)) ; \ + make PLATFORM=$(PLATFORM) $(patsubst %,%-pull-all,$(ROOT_PACKAGES)) + +.PHONY: %-diff +%-diff: + @$(BUILD_ENV) ; \ + d=$(call find_source_fn,$(PACKAGE_SOURCE)) ; \ + $(call build_msg_fn,Git diff $(PACKAGE)) ; \ + if [ -d $${d}/.git ] ; then \ + cd $${d} && $(GIT) --no-pager diff 2>/dev/null; \ + else \ + $(call build_msg_fn, $(PACKAGE) not a git directory) ; \ + fi + + + +# generate diffs for everything in source path +.PHONY: diff-all +diff-all: + @$(BUILD_ENV) ; \ + $(call build_msg_fn,Generate diffs) ; \ + for r in $(ABSOLUTE_SOURCE_PATH); do \ + for d in $${r}/* ; do \ + if [ -d $${d} ] ; then \ + $(call build_msg_fn,Git diff $${d}) ; \ + if [ -d $${d}/.git ] ; then \ + cd $${d} && $(GIT) --no-pager diff 2>/dev/null; \ + else \ + $(call build_msg_fn, $${d} not a git directory) ; \ + fi ; \ + fi ; \ + done ; \ + done + +###################################################################### +# System images +###################################################################### + +IMAGE_DIR = $(MU_BUILD_ROOT_DIR)/image-$(PLATFORM) + +# Reports shared libraries in given directory +find_shared_libs_fn = \ + find $(1) \ + -maxdepth 1 \ + -regex '.*/lib[a-z0-9_]+\+?\+?.so' \ + -o -regex '.*/lib[a-z0-9_]+-[0-9.]+\+?\+?.so' \ + -o -regex '.*/lib[a-z0-9_]+\+?\+?.so.[0-9.]+' + +# By default pick up files from binary directories and /etc. +# Also include shared libraries. +DEFAULT_IMAGE_INCLUDE = \ + for d in bin sbin libexec \ + usr/bin usr/sbin usr/libexec \ + etc; do \ + [[ -d $$d ]] && echo $$d; \ + done ; \ + [[ -d $(arch_lib_dir) ]] \ + && $(call find_shared_libs_fn,$(arch_lib_dir)) + +# Define any shell functions needed by install scripts +image_install_functions = \ + $(foreach p,$(ALL_PACKAGES), \ + $(if $($(p)_image_install_functions), \ + $($(p)_image_install_functions))) + +# Should always be over-written by temp dir in %-root-image rule +IMAGE_INSTALL_DIR = $(error you need to set IMAGE_INSTALL_DIR) + +image_install_fn = \ + @$(BUILD_ENV) ; \ + $(call build_msg_fn,Image-install $(1) for platform $(PLATFORM)) ; \ + inst_dir=$(IMAGE_INSTALL_DIR) ; \ + mkdir -p $${inst_dir} ; \ + cd $(2) ; \ + : select files to include in image ; \ + image_include_files=" \ + `$(call ifdef_fn,$(1)_image_include,$(DEFAULT_IMAGE_INCLUDE)) ; \ + echo "" ; \ + exit 0 ; `"; \ + : select files regexps to exclude from image ; \ + image_exclude_files="" ; \ + if [ ! -z "$($(1)_image_exclude)" ] ; then \ + image_exclude_files="${image_exclude_files} \ + $(patsubst %,--exclude=%,$($(1)_image_exclude))" ; \ + fi ; \ + [[ -z "$${image_include_files}" || $${image_include_files} == " " ]] \ + || tar cf - $${image_include_files} $${image_exclude_files} \ + | tar xf - -C $${inst_dir} ; \ + : copy files from copyimg directories on source path if present ; \ + for build_data_dir in $(SOURCE_PATH_BUILD_DATA_DIRS) ; do \ + d="$${build_data_dir}/packages/$(1).copyimg" ; \ + if [ -d "$${d}" ] ; then \ + env $($(PLATFORM)_copyimg_env) \ + $(MU_BUILD_ROOT_DIR)/copyimg $${d} $${inst_dir} ; \ + fi ; \ + done ; \ + : run package dependent install script ; \ + $(if $($(1)_image_install), \ + $(image_install_functions) \ + cd $${inst_dir} ; \ + $($(1)_image_install)) + +.PHONY: %-image_install +%-image_install: %-install + $(call image_install_fn,$(PACKAGE),$(PACKAGE_INSTALL_DIR)) + +basic_system_image_include = \ + $(call ifdef_fn,$(PLATFORM)_basic_system_image_include, \ + echo bin/ldd ; \ + echo $(arch_lib_dir)/ld*.so* ; \ + $(call find_shared_libs_fn, $(arch_lib_dir))) + +basic_system_image_install = \ + mkdir -p bin lib mnt proc root sbin sys tmp etc ; \ + mkdir -p usr usr/{bin,sbin} usr/lib ; \ + mkdir -p var var/{lib,lock,log,run,tmp} ; \ + mkdir -p var/lock/subsys var/lib/urandom + +.PHONY: basic_system-image_install +basic_system-image_install: # linuxrc-install + $(if $(not_native), \ + $(call image_install_fn,basic_system,$(TARGET_TOOL_INSTALL_DIR)),) + +ROOT_PACKAGES = $(if $($(PLATFORM)_root_packages),$($(PLATFORM)_root_packages),$(default_root_packages)) + +.PHONY: install-packages +install-packages: $(patsubst %,%-find-source,$(ROOT_PACKAGES)) + @$(BUILD_ENV) ; \ + set -eu$(BUILD_DEBUG) ; \ + d=$(MU_BUILD_ROOT_DIR)/packages-$(PLATFORM) ; \ + rm -rf $${d} ; \ + mkdir -p $${d}; \ + $(MAKE) -C $(MU_BUILD_ROOT_DIR) IMAGE_INSTALL_DIR=$${d} \ + $(patsubst %,%-image_install, \ + basic_system \ + $(ROOT_PACKAGES)) || exit 1; \ + $(call build_msg_fn, Relocating ELF executables to run in $${d}) ; \ + find $${d} -type f \ + -exec elftool quiet in '{}' out '{}' \ + set-interpreter \ + $${d}/$(arch_lib_dir)/$(DYNAMIC_LINKER) \ + set-rpath $${d}/$(arch_lib_dir):$${d}/lib ';' ; \ + : strip symbols from files ; \ + if [ $${strip_symbols:-no} = 'yes' ] ; then \ + $(call build_msg_fn, Stripping symbols from files) ; \ + find $${d} -type f \ + -exec \ + $(TARGET_PREFIX)strip \ + --strip-unneeded '{}' ';' \ + >& /dev/null ; \ + else \ + $(call build_msg_fn, NOT stripping symbols) ; \ + fi + +# readonly root squashfs image +# Note: $(call build_msg_fn) does not seem to work inside of fakeroot so we use echo +.PHONY: ro-image +$(PLATFORM_IMAGE_DIR)/ro.img ro-image: $(patsubst %,%-find-source,$(ROOT_PACKAGES)) + @$(BUILD_ENV) ; \ + d=$(PLATFORM_IMAGE_DIR) ; \ + mkdir -p $$d; \ + ro_image=$$d/ro.img ; \ + rm -f $${ro_image} ; \ + tmp_dir="`mktemp -d $$d/ro-image-XXXXXX`" ; \ + chmod 0755 $${tmp_dir} ; \ + cd $${tmp_dir} ; \ + trap "rm -rf $${tmp_dir}" err ; \ + fakeroot /bin/bash -c "{ \ + set -eu$(BUILD_DEBUG) ; \ + $(MAKE) -C $(MU_BUILD_ROOT_DIR) IMAGE_INSTALL_DIR=$${tmp_dir} \ + $(patsubst %,%-image_install, \ + basic_system \ + $(ROOT_PACKAGES)) ; \ + : make dev directory ; \ + $(linuxrc_makedev) ; \ + echo @@@@ Relocating ELF executables to run in / @@@@ ; \ + find $${d} -type f \ + -exec elftool quiet in '{}' out '{}' \ + set-interpreter \ + /$(arch_lib_dir)/$(DYNAMIC_LINKER) \ + set-rpath /$(arch_lib_dir):/lib ';' ; \ + : strip symbols from files ; \ + if [ '$${strip_symbols:-yes}' = 'yes' ] ; then \ + echo @@@@ Stripping symbols from files @@@@ ; \ + find $${tmp_dir} -type f \ + -exec \ + $(TARGET_PREFIX)strip \ + --strip-unneeded '{}' ';' \ + >& /dev/null ; \ + else \ + echo @@@@ NOT stripping symbols @@@@ ; \ + fi ; \ + if [ $${sign_executables:-yes} = 'yes' \ + -a -n "$($(PLATFORM)_public_key)" ] ; then \ + echo @@@@ Signing executables @@@@ ; \ + find $${tmp_dir} -type f \ + | xargs sign $($(PLATFORM)_public_key) \ + $($(PLATFORM)_private_key_passphrase) ; \ + fi ; \ + : make read-only file system ; \ + mksquashfs \ + $${tmp_dir} $${ro_image} \ + -no-exports -no-progress -no-recovery ; \ + }" ; \ + : cleanup tmp directory ; \ + rm -rf $${tmp_dir} + +MKFS_JFFS2_BYTE_ORDER_x86_64 = -l +MKFS_JFFS2_BYTE_ORDER_i686 = -l +MKFS_JFFS2_BYTE_ORDER_ppc = -b +MKFS_JFFS2_BYTE_ORDER_mips = -b +MKFS_JFFS2_BYTE_ORDER_native = $(MKFS_JFFS2_BYTE_ORDER_$(NATIVE_ARCH)) + +MKFS_JFFS2_SECTOR_SIZE_IN_KBYTES = \ + $(call ifdef_fn,$(PLATFORM)_jffs2_sector_size_in_kbytes,256) + +mkfs_fn_jffs2 = mkfs.jffs2 \ + --eraseblock=$(MKFS_JFFS2_SECTOR_SIZE_IN_KBYTES)KiB \ + --root=$(1) --output=$(2) \ + $(MKFS_JFFS2_BYTE_ORDER_$(BASIC_ARCH)) + +# As things stand the actual initrd size parameter +# is set in .../open-repo/build-data/packages/linuxrc.mk. +EXT2_RW_IMAGE_SIZE=notused + +mkfs_fn_ext2 = \ + e2fsimage -d $(1) -f $(2) -s $(EXT2_RW_IMAGE_SIZE) + +RW_IMAGE_TYPE=jffs2 + +make_rw_image_fn = \ + $(call mkfs_fn_$(RW_IMAGE_TYPE),$(1),$(2)) + +rw_image_embed_ro_image_fn = \ + mkdir -p proc initrd images ro rw union ; \ + cp $(PLATFORM_IMAGE_DIR)/$(1) images/$(1) ; \ + md5sum images/$(1) > images/$(1).md5 ; \ + echo Built by $(LOGNAME) at `date` > images/$(1).stamp ; \ + mkdir -p changes/$(1) + +# make sure RW_IMAGE_TYPE is a type we know how to build +.PHONY: rw-image-check-type +rw-image-check-type: + @$(BUILD_ENV) ; \ + if [ -z "$(make_rw_image_fn)" ] ; then \ + $(call build_msg_fn,Unknown read/write fs image type; \ + try RW_IMAGE_TYPE=ext2 or RW_IMAGE_TYPE=jffs2) ; \ + exit 1; \ + fi + +# read write image +.PHONY: rw-image +rw-image: rw-image-check-type ro-image + @$(BUILD_ENV) ; \ + d=$(PLATFORM_IMAGE_DIR) ; \ + mkdir -p $$d ; \ + rw_image="$$d/rw.$(RW_IMAGE_TYPE)" ; \ + ro_image="ro.img" ; \ + rm -f $$rw_image ; \ + tmp_dir="`mktemp -d $$d/rw-image-XXXXXX`" ; \ + chmod 0755 $${tmp_dir} ; \ + cd $${tmp_dir} ; \ + trap "rm -rf $${tmp_dir}" err ; \ + fakeroot /bin/bash -c "{ \ + set -eu$(BUILD_DEBUG) ; \ + $(linuxrc_makedev) ; \ + $(call rw_image_embed_ro_image_fn,$${ro_image}) ; \ + $(call make_rw_image_fn,$${tmp_dir},$${rw_image}) ; \ + }" ; \ + : cleanup tmp directory ; \ + rm -rf $${tmp_dir} + +images: linuxrc-install linux-install $(image_extra_dependencies) rw-image + @$(BUILD_ENV) ; \ + d=$(PLATFORM_IMAGE_DIR) ; \ + cd $(BUILD_DIR)/linux-$(PLATFORM) ; \ + i="" ; \ + [[ -z $$i && -f bzImage ]] && i=bzImage ; \ + [[ -z $$i && -f zImage ]] && i=zImage ; \ + [[ -z $$i && -f linux ]] && i=linux ; \ + [[ -z $$i && -f vmlinux ]] && i=vmlinux ; \ + [[ -z $$i ]] \ + && $(call build_msg_fn,no linux image to install \ + in $(BUILD_DIR)/linux-$(PLATFORM)) \ + && exit 1 ; \ + cp $$i $$d + +###################################################################### +# Tool chain build/install +###################################################################### + +.PHONY: ccache-install +ccache-install: + $(MAKE) -C $(MU_BUILD_ROOT_DIR) ccache-build + mkdir -p $(TOOL_INSTALL_DIR)/ccache-bin + ln -sf $(MU_BUILD_ROOT_DIR)/build-tool-native/ccache/ccache \ + $(TOOL_INSTALL_DIR)/ccache-bin/$(TARGET_PREFIX)gcc + +TOOL_MAKE = $(MAKE) is_build_tool=yes + +tool_make_target_fn = \ + $(if $(strip $(NATIVE_TOOLS)), \ + $(TOOL_MAKE) $(patsubst %,%-$(1),$(NATIVE_TOOLS)) ARCH=native || exit 1 ;) \ + $(TOOL_MAKE) $(patsubst %,%-$(1),$(CROSS_TOOLS)) + +.PHONY: install-tools +install-tools: + $(call tool_make_target_fn,install) + +.PHONY: bootstrap-tools +bootstrap-tools: + $(TOOL_MAKE) make-install findutils-install git-install \ + automake-install autoconf-install libtool-install fakeroot-install + + +###################################################################### +# Clean +###################################################################### + +package_clean_script = \ + @$(call build_msg_fn, Cleaning $* in $(PACKAGE_INSTALL_DIR)) ; \ + $(BUILD_ENV) ; \ + $(if $(is_build_tool),,rm -rf $(PACKAGE_INSTALL_DIR) ;) \ + rm -rf $(TIMESTAMP_DIR)/$(call timestamp_name_fn,*) ; \ + $(if $($(PACKAGE)_clean), \ + $($(PACKAGE)_clean), \ + $(PACKAGE_MAKE) clean) + +.PHONY: %-clean +%-clean: + $(package_clean_script) + +# Wipe e.g. remove build and install directories for packages. +package_wipe_script = \ + @message=$(if $(is_build_tool),"Wiping build $(PACKAGE)","Wiping build/install $(PACKAGE)") ; \ + $(call build_msg_fn,$$message) ; \ + $(BUILD_ENV) ; \ + rm -rf $(if $(is_build_tool),$(PACKAGE_BUILD_DIR),$(PACKAGE_INSTALL_DIR) $(PACKAGE_BUILD_DIR)) + +.PHONY: %-wipe +%-wipe: + $(package_wipe_script) + +# Wipe entire build/install area for TAG and PLATFORM +.PHONY: wipe-all +wipe-all: + @$(call build_msg_fn, Wiping $(BUILD_DIR) $(INSTALL_DIR)) ; \ + $(BUILD_ENV) ; \ + rm -rf $(BUILD_DIR) $(INSTALL_DIR) + +# Clean everything +distclean: + rm -rf $(MU_BUILD_ROOT_DIR)/$(BUILD_PREFIX_package)*/ + rm -rf $(MU_BUILD_ROOT_DIR)/$(BUILD_PREFIX_tool)* + rm -rf $(MU_BUILD_ROOT_DIR)/$(INSTALL_PREFIX)* + rm -rf $(MU_BUILD_ROOT_DIR)/$(IMAGES_PREFIX)* + rm -rf $(TOOL_INSTALL_DIR) + rm -rf $(MU_BUILD_ROOT_DIR)/*.deb + rm -rf $(MU_BUILD_ROOT_DIR)/*.rpm + rm -rf $(MU_BUILD_ROOT_DIR)/*.changes + rm -rf $(MU_BUILD_ROOT_DIR)/python + if [ -e /usr/bin/dh ];then (cd $(MU_BUILD_ROOT_DIR)/deb/;debian/rules clean); fi + rm -f $(MU_BUILD_ROOT_DIR)/deb/debian/*.install + rm -f $(MU_BUILD_ROOT_DIR)/deb/debian/*.dkms + rm -f $(MU_BUILD_ROOT_DIR)/deb/debian/changelog diff --git a/vpp/build-root/autowank b/vpp/build-root/autowank new file mode 100755 index 00000000..21a440df --- /dev/null +++ b/vpp/build-root/autowank @@ -0,0 +1,307 @@ +#!/bin/bash + +# Copyright (c) 2015 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This aptly-named script verifies and fixes time ordering +# problems with Makefile.{am,in} aclocal.m4 configure* files. + +set -eu +#set -vx + +touch="" +commit="" +comma_v="" +aclocal="" +optimize="" + +# The old autowank scheme used "touch <foo> ; sleep 1" +# to ensure differentiable, ordered timestamps. Worked, but +# took N seconds given N files to fix. We have an example +# which wastes multiple minutes given the old scheme. +# +# This version generates a sequence of timestamps +# starting an hour ago. That gives us +# lots to play with, in case some obnoxious program feels the need +# to complain about timestamps in the future. + +# If we're in UTC+N land, generate UTC+(N+1) +# If we're in UTC-N land, generate UTC-(N-1) + +my_tz=`date +%z` +sign=`echo $my_tz | sed -n -e 's/^\(.\{1\}\).*$/\1/p'` +t=`echo $my_tz | sed -n -e 's/^\(.\{1\}\)//p'` +tz_hour=`echo $t | sed -n -e 's/^\(.\{2\}\).*$/\1/p'` +tz_hour=`echo $tz_hour | sed 's/^0//'` + +if [ $sign = "-" ] ; then + sign="+" + let tz_hour=$tz_hour+1 + if [[ $tz_hour -ge "24" ]] ; then + tz_hour=0 + fi +else + sign="-" + let tz_hour=$tz_hour-1 || true + if [[ $tz_hour -lt "0" ]] ; then + tz_hour=23 + fi +fi + +# Timestamp, an hour ago: +ts_begin=`TZ=UTC${sign}${tz_hour} date +%Y%m%d%H%M.%S` + +# break into constituent parts +year=`echo $ts_begin | sed -n -e 's/^\(.\{4\}\).*$/\1/p'` +t=`echo $ts_begin | sed -n -e 's/^\(.\{4\}\)//p'` +month=`echo $t | sed -n -e 's/^\(.\{2\}\).*$/\1/p'` +t=`echo $t | sed -n -e 's/^\(.\{2\}\)//p'` +day=`echo $t | sed -n -e 's/^\(.\{2\}\).*$/\1/p'` +t=`echo $t | sed -n -e 's/^\(.\{2\}\)//p'` +hour=`echo $t | sed -n -e 's/^\(.\{2\}\).*$/\1/p'` +t=`echo $t | sed -n -e 's/^\(.\{2\}\)//p'` +min=`echo $t | sed -n -e 's/^\(.\{2\}\).*$/\1/p'` +t=`echo $t | sed -n -e 's/^\(.\{2\}\)//p'` +sec=`echo $t | sed -n -e 's/\.//p'` + +# How many days in the current month? +# Good until someone changes the calendar rules +days_in_current_month() { + if [[ $month -eq 9 || $month -eq 4 \ + || $month -eq 6 || $month -eq 11 ]] ; then + return 30; + fi + if [[ $month -eq 2 ]] ; then + let t=($year/400)*400 + if [[ $t -eq $year ]] ; then + return 29; + fi + let t=($year/100)*100 + if [[ $t -eq $year ]] ; then + return 28; + fi + let t=($year/4)*4 + if [[ $t -eq $year ]] ; then + return 29; + fi + return 28; + fi + return 31; +} + +# The next timestamp to issue via touch +# A real hemorrhoid because bash isnt easily convinced +# that 08 is a decimal number +next_ts() { + sec=`echo $sec | sed 's/^0//'` + let sec=$sec+1 + if [[ "$sec" -lt "60" ]] ; then + if [[ "$sec" -lt "10" ]] ; then + sec=0$sec + fi + return 0; + fi + sec="00" + min=`echo $min | sed 's/^0//'` + let min=$min+1 + if [[ "$min" -lt "60" ]] ; then + if [[ "$min" -lt "10" ]] ; then + min=0$min + fi + return 0; + fi + min="00" + hour=`echo $hour | sed 's/^0//'` + let hour=$hour+1 + if [[ "$hour" -lt "24" ]] ; then + if [[ "$hour" -lt "10" ]] ; then + hour=0$hour + fi + return 0; + fi + hour="00" + days_in_current_month + days_in_month=$? + if [[ "$day" -lt "$days_in_month" ]] ; then + day=`echo $day | sed 's/^0//'` + let day=$day+1 + if [[ "$day" -lt "10" ]] ; then + day=0$day + fi + return 0; + fi + day="01" + month=`echo $month | sed 's/^0//'` + let month=$month+1 + if [[ "$month" -lt "13" ]] ; then + if [[ "$month" -lt "10" ]] ; then + month=0$month + fi + return 0; + fi + month="01" + let year=$year+1 + return 0; +} + +while [ $# != 0 ] ; do + case "$1" in + (--commav) comma_v=",v" ;; + (--touch) touch=yes ;; + (--aclocal) aclocal=yes ;; + (--nooptimize) optimize="" ;; + (--commit=*) commit="$1" ;; + (*) echo "$0: usage [--touch|--commit|]" > /dev/stderr + exit 17 ;; + esac + shift +done + +if [ "${aclocal}" != "" ] ; then + if [ -f aclocal.m4 ] ; then + echo touching aclocal.m4 + sleep 1 + touch aclocal.m4 + else + echo aclocal.m4 not found + fi +fi + +if [ "${comma_v}" != "" -a "${commit}" != "" ] ; then + echo "No, you may NOT molest ,v files directly. Go away." > /dev/stderr + exit 1 +fi + +function touchme () +{ + local victim="${1}" + shift + local touchmebaby="" + local sein="is" + local newer="no" + local older="no" + + if [ ! -r "$victim" ] ; then + return + fi + + while [ $# != 0 ] ; do + if [ "${1}" -nt "${victim}" ] ; then + newer="yes" + fi + if [ "${1}" -ot "${victim}" ] ; then + older="yes" + fi + if [ "${newer}" = "no" -a "${older}" = "no" ] ; then + newer="yes" + fi + + if [ "${newer}" = "yes" ] ; then + if [ "${touchmebaby}" = "" ] ; then + touchmebaby="${1}" + else + sein="are" + touchmebaby="${touchmebaby} ${1}" + fi + fi + shift + done + if [ -n "${touchmebaby}" ] ; then + echo "*** ${touchmebaby} ${sein} newer than ${victim} " + if [ -n "${touch}" ] ; then + # + # This is the old version, in case something backfires... + if [ "${optimize}" != "yes" ] ; then + echo "Fixing " ;touch -c "$victim" ; sleep 1 + else + echo "Fixing " + # echo touch -c -t $year$month$day$hour$min.$sec "$victim" + touch -c -t $year$month$day$hour$min.$sec "$victim" + next_ts + fi + fi + fi +} + +makefileins="`/usr/bin/find . -name Attic -prune -o -name Makefile.in${comma_v}`" + +# aclocal.m4 depends on ***/Makefile.am, configure.ac, acinclude.m4, *.m4 crap +touchme aclocal.m4${comma_v} \ + `/usr/bin/find . -name Attic -prune -o -name Makefile.am${comma_v}` \ + "configure.in${comma_v}" "configure.ac${comma_v}" \ + "acinclude.m4${comma_v}" + +# Makefile.in must be newer than Makefile.am +for f in $makefileins ; do + d="`dirname ${f}`" + touchme "${d}/Makefile.in${comma_v}" "${d}/Makefile.am${comma_v}" +done + +# Makefile.in depends on aclocal.m4 +for f in $makefileins ; do + d="`dirname $f`" + touchme "${d}/Makefile.in${comma_v}" "aclocal.m4${comma_v}" +done + +# config.in must be newer than aclocal.m4 and configure.ac +if [ -f "config.in${comma_v}" ] ; then + touchme "config.in${comma_v}" "aclocal.m4${comma_v}" \ + "configure.ac${comma_v}" \ + "configure.in${comma_v}" +fi + +# config.h.in (or More Thoroughly Modern configh.in) +# must be newer than aclocal.m4 and (obsolete) acconfig.h +for c_h_in in config.h.in configh.in ; do + if [ -f "${c_h_in}${comma_v}" ]; then + touchme "${c_h_in}${comma_v}" "aclocal.m4${comma_v}" "acconfig.h${comma_v}" + #>>>> WTF? Why? This is nonsensical + ## ***/Makefile.in must be newer than config.h.in + #for f in $makefileins ; do + # touchme "$f" "${c_h_in}${comma_v}" + #done + fi +done + +# configure must be newer than everything +# touchme configure $makefileins -- why would this be needed? +touchme "configure${comma_v}" "aclocal.m4${comma_v}" "acconfig.h${comma_v}" \ + "config.in${comma_v}" "config.h.in${comma_v}" \ + "configh.in${comma_v}" + +if [ -n "${commit}" ] ; then + commit="${commit:9}" # strip off "--commit=" + # First ***/Makefile.am, + # configure.in, configure.ac, + # ***/*.m4 + # acconfig.h + cvs commit -m "${commit}" \ + `for f in ${makefileins} ; do \ + [ -f "$${f%.in}.am" ] && echo "$${f%.in}.am" ; \ + done` \ + `[ -f configure.in ] && echo configure.in` \ + `[ -f configure.ac ] && echo configure.ac` \ + `[ -f acconfig.h ] && echo acconfig.h` \ + `/usr/bin/find . -name '*.m4' -mindepth 2` + + # Next aclocal.m4 + [ -f "aclocal.m4" ] && cvs commit -m "${commit}" aclocal.m4 + + # Next config.in, config.h.in, configh.in + [ -f "config.in" ] && cvs commit -m "${commit}" config.in + [ -f "config.h.in" ] && cvs commit -m "${commit}" config.h.in + [ -f "configh.in" ] && cvs commit -m "${commit}" configh.in + + # Last ***/Makefile.in, configure + cvs commit -m "${commit}" ${makefileins} configure +fi diff --git a/vpp/build-root/bootstrap.sh b/vpp/build-root/bootstrap.sh new file mode 100755 index 00000000..f83734fd --- /dev/null +++ b/vpp/build-root/bootstrap.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +build_root=`pwd` +cd ../ +wsroot=`pwd` + +# PATH +if [[ ! $CCACHE_DIR ]];then + CCACHE_DIR="$build_root/.ccache" +fi +ADD_TO_PATH="$build_root/tools/ccache-bin:$build_root/tools/bin" + +# Construct build-config.mk +cd $build_root +echo SOURCE_PATH = $wsroot > build-config.mk +echo +echo Saving PATH settings in `pwd`/path_setup +echo Source this file later, as needed +cat >path_setup <<EOF +#!/bin/bash + +export PATH=$ADD_TO_PATH:$PATH +export CCACHE_DIR=$CCACHE_DIR +EOF + +# regenerate tools/ccache-bin +rm -rf tools/ccache-bin +mkdir -p tools/ccache-bin + +if [ ! -f /usr/bin/ccache ] ; then + echo Please install ccache AYEC and re-run this script +fi + +cd tools/ccache-bin +for c in gcc g++ + do + if [ -f /usr/bin/ccache ] ; then + ln -s /usr/bin/ccache $c + else + ln -s /usr/bin/gcc + fi +done + +cd $wsroot + +cd $build_root +echo Compile native tools +for tool in vppapigen +do + make V=0 is_build_tool=yes $tool-install +done + diff --git a/vpp/build-root/build-config.mk.README b/vpp/build-root/build-config.mk.README new file mode 100644 index 00000000..8b8c87dd --- /dev/null +++ b/vpp/build-root/build-config.mk.README @@ -0,0 +1,3 @@ +# To specify directories to find sources, build/packages/*.mk +# and build/platforms.mk +# SOURCE_PATH = PATH1 PATH2 ... diff --git a/vpp/build-root/config.site b/vpp/build-root/config.site new file mode 100644 index 00000000..9f607086 --- /dev/null +++ b/vpp/build-root/config.site @@ -0,0 +1,105 @@ +###################################################################### +# glibc +###################################################################### + +# glibc needs this for cross compiling +libc_cv_forced_unwind=yes +libc_cv_c_cleanup=yes +libc_cv_ssp=no +# fixes gentoo build; not sure why? +libc_cv_uname_release="" +libc_cv_uname_version="" +ac_cv_header_cpuid_h=yes +###################################################################### +# bash +###################################################################### + +# Bash configure.in uses this to work around an autoconf 2.53 bug +ac_cv_func_setvbuf_reversed=no +ac_cv_rl_version=5.1 +bash_cv_termcap_lib=libncurses + +# These mostly come from debian bash-2.05b changes +# They are needed to make a functioning bash. Without these +# settings gdbserver exiting would cause the invoking bash to +# exit also. +bash_cv_have_mbstate_t=yes +bash_cv_dup2_broken=no +bash_cv_pgrp_pipe=no +bash_cv_sys_siglist=yes +bash_cv_under_sys_siglist=yes +bash_cv_opendir_not_robust=no +bash_cv_printf_declared=yes +bash_cv_ulimit_maxfds=yes +bash_cv_getenv_redef=yes +bash_cv_getcwd_calls_popen=no +bash_cv_func_strcoll_broken=no +bash_cv_must_reinstall_sighandlers=no +bash_cv_type_quad_t=yes +bash_cv_func_sigsetjmp=present +bash_cv_job_control_missing=present +bash_cv_sys_named_pipes=present +bash_cv_type_rlimit=long +bash_cv_printf_a_format=yes +bash_cv_unusable_rtsigs=no + +###################################################################### +# Apache +###################################################################### +ac_cv_func_setpgrp_void=yes +apr_cv_process_shared_works=yes +apr_cv_tcp_nodelay_with_cork=yes +ap_void_ptr_lt_long=no + +case ${host_cpu} in +x86_64 | alpha) + ac_cv_sizeof_ssize_t=8 + ac_cv_sizeof_size_t=8 + ac_cv_sizeof_pid_t=4 + ;; +*) + ac_cv_sizeof_ssize_t=4 + ac_cv_sizeof_size_t=4 + ac_cv_sizeof_pid_t=4 + ;; +esac + +###################################################################### +# gdb +###################################################################### +gdb_cv_func_ptrace_args=int,int,long,long + +###################################################################### +# dpkg +###################################################################### +dpkg_cv_va_copy=yes + +###################################################################### +# coreutils +###################################################################### +ac_cv_search_clock_gettime=no +gl_cv_fs_space=yes + +###################################################################### +# valgrind +###################################################################### +ac_cv_file__proc_self_fd=yes +ac_cv_file__proc_self_exe=yes +ac_cv_file__proc_self_maps=yes + +###################################################################### +# tcpdump +###################################################################### +ac_cv_linux_vers=2 +ac_cv_func_pcap_findalldevs=no + +###################################################################### +# flex +###################################################################### +ac_cv_func_malloc_0_nonnull=yes +ac_cv_func_realloc_0_nonnull=yes + +###################################################################### +# tar +###################################################################### +tar_gl_cv_func_mknod_works=yes diff --git a/vpp/build-root/copyimg b/vpp/build-root/copyimg new file mode 100755 index 00000000..e5e3fc26 --- /dev/null +++ b/vpp/build-root/copyimg @@ -0,0 +1,83 @@ +#!/bin/sh + +if [ $# -lt 2 ]; then + cat - <<EOF +$0 FROM-DIR TO-DIR ENVIRONMENT + +Copies files from one directory to another with possible +transformations. + +Files named FILE.spp will be transformed via the spp preprocessor +subject to environment definitions. Source FILE.copyimgspp results in +destination file FILE in the corresponding destination directory. + +Files named FILE.copyimgsh are run as shell scripts in (i.e. via chdir) +the corresponding destination directory (and not copied). + +First regular files are copied. Then transformations are preformed. +Finally, shell scripts are run. +EOF + exit 1; +fi + +FROM_DIR=$1 +TO_DIR=$2 + +FILTER=" -and -not -name '*~'"; +FILTER="${FILTER} -and -not -name '.*~'"; +FILTER="$FILTER -and -not -path '*/.git*'"; +FILTER="$FILTER -and -not -path '*/.svn*'"; +FILTER="$FILTER -and -not -path '*/.CVS*'"; + +FROM_FILES=`(cd $FROM_DIR; eval "find . -not -type d $FILTER")`; + FROM_DIRS=`(cd $FROM_DIR; eval "find . -type d $FILTER")`; + +COPY_FILES= +SPP_FILES= +SH_FILES= +for f in $FROM_FILES; do + case $f in + *.copyimgspp) SPP_FILES="$SPP_FILES $f" ;; + *.copyimgsh) SH_FILES="$SH_FILES $f" ;; + *) COPY_FILES="$COPY_FILES $f";; + esac +done + +# Make destination directories. +mkdir -p $TO_DIR; +if [ "$FROM_DIRS" != "" ]; then + for d in $FROM_DIRS; do + mkdir -p $TO_DIR/$d; + done +fi + +# Copy files +if [ "$COPY_FILES" != "" ]; then + tar -cf - -C $FROM_DIR $COPY_FILES | tar --preserve-permissions -xf - -C $TO_DIR; +fi + +# Use spp to transform any spp files +if [ "$SPP_FILES" != "" ]; then + for f in $SPP_FILES; do + d=`dirname $f`; + b=`basename $f .copyimgspp`; + mkdir -p $TO_DIR/$d; + t=$TO_DIR/$d/$b; + spp -o $TO_DIR/$d/$b $FROM_DIR/$f || exit 1; + done; +fi + +# Now that all files have been copied/created we run any shell scripts +ABS_FROM_DIR=`(cd $FROM_DIR; pwd)`; +if [ "$SH_FILES" != "" ]; then + # Allow directory to define some functions + if [ -f $FROM_DIR/copyimgsh-functions.sh ]; then + . $FROM_DIR/copyimgsh-functions.sh ; + fi ; + for f in $SH_FILES; do + d=`dirname $f`; + b=`basename $f`; + mkdir -p $TO_DIR/$d; + (cd $TO_DIR/$d; . $ABS_FROM_DIR/$d/$b) || exit 1; + done; +fi; diff --git a/vpp/build-root/deb/debian/.gitignore b/vpp/build-root/deb/debian/.gitignore new file mode 100644 index 00000000..75d8fbbc --- /dev/null +++ b/vpp/build-root/deb/debian/.gitignore @@ -0,0 +1,14 @@ +changelog +files +*debhelper* +*.substvars +*.install +vpp-dpdk-dkms* +vpp/ +vpp-dev/ +vpp-lib/ +vpp-dpdk-dev/ +vpp-dpdk-dkms/ +vpp-dbg/ +vppctl/ +vpp-python-api/ diff --git a/vpp/build-root/deb/debian/README.vpp b/vpp/build-root/deb/debian/README.vpp new file mode 100755 index 00000000..b343c786 --- /dev/null +++ b/vpp/build-root/deb/debian/README.vpp @@ -0,0 +1,56 @@ +Building DEB packages +===================== + +REQUIREMENTS: + You will need a working Internet connection to execute the build, because + the build procedure for the included "dpdk" project attempts to contact the + Internet host "dpdk.org". + +There are three main parts to the process: + a) Stage the source tree so that dpkg-source will recognize its organization + and create a valid DSC source package for you; + b) Ensure that the tools required for building DEB packages are installed; + and + c) Launch the build. + +1) Create, or have on hand, a local clone of the git repository, with no +untracked files or local modifications pending, up-to-date with the branch or +commit reference from which you wish to construct the source release. + +The branch and repository origins will differ based on local conditions. + +Example: +$ git clone -b master ssh://git@example.com:7999/~username/open-vpp + +("-b master" can be omitted since master is the default branch) + +2) Rename the checkout with a version number embedded in its name as is +conventional for code releases. Again, your version number may vary. + +Example: +$ mv open-vpp open-vpp-0.0.0 + +3) Ensure that the dpkg-buildpackage program is installed. + +E.g., + +# apt-get install dpkg-dev + +4) From the PARENT directory of the debian/ directory, run: + +$ cd open-vpp-0.0.0 +$ dpkg-buildpackage -I .git -us -uc + +(The -us and -uc flags omit GPG signatures from the .dsc and .changes files, +respectively. You can add them later, or if you are preparing a signed release +and have the signing key on hand, leave off the flags.) + +5) Get rid of the source directory; you now either have a source package with +which you can re-create it at any time, or there were problems with the build, +and you should go back to your git checkout to fix them. + +$ rm -r open-vpp-0.0.0 + +END + +vim:set ai et sw=4 ts=4 tw=80: diff --git a/vpp/build-root/deb/debian/compat b/vpp/build-root/deb/debian/compat new file mode 100755 index 00000000..ec635144 --- /dev/null +++ b/vpp/build-root/deb/debian/compat @@ -0,0 +1 @@ +9 diff --git a/vpp/build-root/deb/debian/control b/vpp/build-root/deb/debian/control new file mode 100644 index 00000000..643774e3 --- /dev/null +++ b/vpp/build-root/deb/debian/control @@ -0,0 +1,68 @@ +Source: vpp +Section: net +Priority: extra +Maintainer: Cisco OpenVPP Packaging Team <bogus.address@cisco.com> +Build-Depends: debhelper (>= 9), dkms, dh-systemd, chrpath +Standards-Version: 3.9.4 + +Package: vpp +Architecture: any +Depends: vpp-lib (= ${source:Version}), ${shlibs:Depends}, ${misc:Depends}, ${python:Depends} +Description: Vector Packet Processing--executables + This package provides VPP executables: vpp, vpp_api_test, vpp_json_test + vpp - the vector packet engine + vpp_api_test - vector packet engine API test tool + vpp_json_test - vector packet engine JSON test tool + +Package: vpp-dbg +Architecture: any +Depends: ${shlibs:Depends}, ${misc:Depends} +Description: Vector Packet Processing--debug symbols + +Package: vpp-dev +Architecture: any +Depends: ${shlibs:Depends}, ${misc:Depends} +Description: Vector Packet Processing--development support + This package contains development support files for the VPP libraries + . + +Package: vpp-dpdk-dev +Architecture: any +Depends: ${shlibs:Depends}, ${misc:Depends} +Description: Vector Packet Processing--development support + This package contains dpdk header files which match the dpdk version + compiled into the vpp executable + . + +Package: vpp-lib +Architecture: any +Depends: ${shlibs:Depends}, ${misc:Depends} +Description: Vector Packet Processing--runtime libraries + This package contains the VPP shared libraries, including: + . + vppinfra - foundation library supporting vectors, hashes, bitmaps, pools, and string formatting. + dpdk - DPDK library + svm - vm library + vlib - vector processing library + vlib-api - binary API library + vnet - network stack library + +Package: vpp-plugins +Architecture: any +Depends: vpp (= ${source:Version}), ${shlibs:Depends}, ${misc:Depends} +Description: Vector Packet Processing--runtime plugins + This package contains VPP plugins + . + +Package: vpp-dpdk-dkms +Architecture: any +Depends: ${shlibs:Depends}, ${misc:Depends} +Description: DPDK 2.1 igb_uio_driver + This package contains Linux kernel modules distributed with DPDK. + +Package: vpp-python-api +Architecture: any +Depends: ${python:Depends}, ${misc:Depends}, vpp (= ${source:Version}) +Description: VPP Python API bindings + This package contains VPP python api bindings + . diff --git a/vpp/build-root/deb/debian/copyright b/vpp/build-root/deb/debian/copyright new file mode 100644 index 00000000..f9775c15 --- /dev/null +++ b/vpp/build-root/deb/debian/copyright @@ -0,0 +1,9 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: optional. +Upstream-Contact: optional. +Source: optional. +Disclaimer: optional. +Comment: optional. +License: Apache-2.0 +Copyright: 2015 Cisco and/or its affiliates and others. + diff --git a/vpp/build-root/deb/debian/rules b/vpp/build-root/deb/debian/rules new file mode 100755 index 00000000..186fa840 --- /dev/null +++ b/vpp/build-root/deb/debian/rules @@ -0,0 +1,31 @@ +#!/usr/bin/make -f +# See debhelper(7) (uncomment to enable) +# output every command that modifies files on the build system. +DH_VERBOSE = 1 + +# see EXAMPLES in dpkg-buildflags(1) and read /usr/share/dpkg/* +DPKG_EXPORT_BUILDFLAGS = 1 +include /usr/share/dpkg/default.mk + +# see FEATURE AREAS in dpkg-buildflags(1) +#export DEB_BUILD_MAINT_OPTIONS = hardening=+all + +# see ENVIRONMENT in dpkg-buildflags(1) +# package maintainers to append CFLAGS +#export DEB_CFLAGS_MAINT_APPEND = -Wall -pedantic +# package maintainers to append LDFLAGS +#export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed + +# main packaging script based on dh7 syntax +%: + dh $@ --with dkms --with systemd,python2 + +override_dh_install: + dh_install --exclude .git + ../scripts/remove-rpath . + +override_dh_strip: + dh_strip --dbg-package=vpp-dbg + +override_dh_dkms: + dh_dkms -pvpp-dpdk-dkms diff --git a/vpp/build-root/deb/debian/source/format b/vpp/build-root/deb/debian/source/format new file mode 100755 index 00000000..89ae9db8 --- /dev/null +++ b/vpp/build-root/deb/debian/source/format @@ -0,0 +1 @@ +3.0 (native) diff --git a/vpp/build-root/deb/debian/vpp-bin.README.Debian b/vpp/build-root/deb/debian/vpp-bin.README.Debian new file mode 100644 index 00000000..19228969 --- /dev/null +++ b/vpp/build-root/deb/debian/vpp-bin.README.Debian @@ -0,0 +1,53 @@ +To run vpp with the debug shell: + +sudo vpp unix interactive + +which will result in a prompt that looks like: + +DBGvpd# + +To give it a spin, we can create a tap interface and try a simple ping +(with trace). + +To create the tap: + +DBGvpd# tap connect foobar +Created tap-0 for Linux tap 'foobar' +DBGvpd# show int + +To assign it an ip address (and 'up' the interface): + +DBGvpd# set int ip address tap-0 192.168.1.1/24 +DBGvpd# set int state tap-0 up + +To turn on packet tracing for the tap interface: +DBGvpd# trace add tapcli-rx 10 + +Now, to set up and try the other end from the unix prompt: +vagrant@vagrant-ubuntu-trusty-64:~$ sudo ip addr add 192.168.1.2/24 dev foobar +vagrant@vagrant-ubuntu-trusty-64:~$ ping -c 3 192.168.1.1 + +To look at the trace, back in the vpp CLI: +DBGvpd# show trace + +And to stop tracing: + +DBGvpd# clear trace + +Other fun things to look at: + +The vlib packet processing graph: +DBGvpd# show vlib graph + +which will produce output like: + + Name Next Previous +ip4-icmp-input error-punt [0] ip4-local + ip4-icmp-echo-request [1] + vpe-icmp4-oam [2] + +To read this, the first column (Name) is the name of the node. +The second column (Next) is the name of the children of that node. +The third column (Previous) is the name of the parents of this node. + +END diff --git a/vpp/build-root/deb/debian/vpp-python-api.postinst b/vpp/build-root/deb/debian/vpp-python-api.postinst new file mode 100644 index 00000000..ca1c856f --- /dev/null +++ b/vpp/build-root/deb/debian/vpp-python-api.postinst @@ -0,0 +1,5 @@ +#!/bin/sh -e + +# after installing python-api files +python2_sitedir=$(python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") +easy_install --install-dir=$python2_sitedir -z $python2_sitedir/vpp_papi/vpp_papi-*.egg diff --git a/vpp/build-root/deb/debian/vpp-python-api.prerm b/vpp/build-root/deb/debian/vpp-python-api.prerm new file mode 100644 index 00000000..e6d92df9 --- /dev/null +++ b/vpp/build-root/deb/debian/vpp-python-api.prerm @@ -0,0 +1,8 @@ +#!/bin/sh -e + +# before removing python-api files +python2_sitedir=$(python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") +easy_install --install-dir=$python2_sitedir -mxNq vpp_papi + +# the egg has been copied during install +rm $python2_sitedir/vpp_papi-*.egg diff --git a/vpp/build-root/deb/debian/vpp.postinst b/vpp/build-root/deb/debian/vpp.postinst new file mode 100644 index 00000000..78fcac22 --- /dev/null +++ b/vpp/build-root/deb/debian/vpp.postinst @@ -0,0 +1,8 @@ +#!/bin/sh -e + +# try to set the required values now. This may or may not work. +sysctl --system + +#DEBHELPER# + +exit 0 diff --git a/vpp/build-root/deb/debian/vpp.postrm b/vpp/build-root/deb/debian/vpp.postrm new file mode 100644 index 00000000..ac16a459 --- /dev/null +++ b/vpp/build-root/deb/debian/vpp.postrm @@ -0,0 +1,21 @@ +#!/bin/sh -e + +removed= + +# Unbind user-mode PCI drivers +pci_dirs=`find /sys/bus/pci/drivers -type d -name igb_uio -o -name uio_pci_generic -o -name vfio-pci` +for d in $pci_dirs; do + for f in ${d}/*; do + [ -e "${f}/config" ] || continue + echo 1 > ${f}/remove + basename `dirname ${f}` | xargs echo -n "Removing driver"; echo " for PCI ID" `basename ${f}` + removed=y + done +done +if [ -n "${removed}" ]; then + echo "There are changes in PCI drivers, rescaning" + echo 1 > /sys/bus/pci/rescan +else + echo "There weren't PCI devices binded" +fi + diff --git a/vpp/build-root/deb/debian/vpp.preinst b/vpp/build-root/deb/debian/vpp.preinst new file mode 100644 index 00000000..d33cacfc --- /dev/null +++ b/vpp/build-root/deb/debian/vpp.preinst @@ -0,0 +1,4 @@ +#!/bin/sh -e + +# Add the vpp group +groupadd -f -r vpp diff --git a/vpp/build-root/deb/debian/vpp.service b/vpp/build-root/deb/debian/vpp.service new file mode 100644 index 00000000..40549856 --- /dev/null +++ b/vpp/build-root/deb/debian/vpp.service @@ -0,0 +1,14 @@ +[Unit] +Description=vector packet processing engine +After=network.target + +[Service] +Type=simple +ExecStartPre=-/bin/rm -f /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api +ExecStartPre=-/sbin/modprobe igb_uio +ExecStart=/usr/bin/vpp -c /etc/vpp/startup.conf +ExecStopPost=/bin/rm -f /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api +Restart=always + +[Install] +WantedBy=multi-user.target diff --git a/vpp/build-root/deb/debian/vpp.upstart b/vpp/build-root/deb/debian/vpp.upstart new file mode 100644 index 00000000..f5908783 --- /dev/null +++ b/vpp/build-root/deb/debian/vpp.upstart @@ -0,0 +1,21 @@ +description "vector packet processing engine" +author "Cisco Systems, Inc <listname@cisco.com>" + +manual + +respawn + +pre-start script + rm -f /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api || true + # should be there via dkms, but if not, start anyway + modprobe igb_uio || true +end script + + +script + exec vpp -c /etc/vpp/startup.conf +end script + +post-stop script + rm -f /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api || true +end script diff --git a/vpp/build-root/emacs-lisp/README b/vpp/build-root/emacs-lisp/README new file mode 100644 index 00000000..483e1c39 --- /dev/null +++ b/vpp/build-root/emacs-lisp/README @@ -0,0 +1,86 @@ +How to construct a complete plugin using the emacs skeletons + +0. Install open-vpp, including the development package. + +1. Load emacs skeletons + + M-x find-file all-skel.el + M-x eval-buffer + +2. Pick a single-word, lower-case name for your plugin. For example: macswap. +Hereafter, we'll refer to the selected name as <plugin-name>. + +3. Generate the entire plugin: + + M-x make-plugin + Plugin-name: <plugin-name> + +Or, generate each file individually: + +3. Create the required directories, e.g. under .../vpp + + $ mkdir -p <plugin-name>-plugin/<plugin-name> + +4. Create <plugin-name>-plugin/{configure.ac,Makefile.am} + + M-x find-file <plugin-name>-plugin/configure.ac + M-x plugin-configure-skel + + M-x find-file <plugin-name>-plugin/Makefile.am + M-x skel-plugin-makefile + +5. Create the api skeleton + M-x find-file <plugin-name>-plugin/<plugin-name>/<plugin-name>.api + M-x skel-plugin-api + +6. Create the api message enumeration header file + M-x find-file <plugin-name>-plugin/<plugin-name>/<plugin-name>_msg_enum.h + M-x skel-plugin-msg-enum + +7. Create the "all-api" header file + M-x find-file <plugin-name>-plugin/<plugin-name>/<plugin-name>_all_api_h.h + M-x skel-plugin-all-apih + +8. Create the main data structure definition header file + M-x find-file <plugin-name>-plugin/<plugin-name>/<plugin-name>.h + M-x skel-plugin-h + +9. Create the plugin main C file + M-x find-file <plugin-name>-plugin/<plugin-name>/<plugin-name>.c + M-x skel-plugin-main + +10. Create the vpp-api-test plugin main C file + M-x find-file <plugin-name>-plugin/<plugin-name>/<plugin-name>_test.c + M-x skel-plugin-test + +11. Create the data plane packet processing node + M-x find-file <plugin-name>-plugin/<plugin-name>/node.c + M-x skel-plugin-node + +12. Process autotools input files + + $ cd <plugin-name>-plugin + $ autoreconf -i -f + +13. Build the plugin skeleton + + $ mkdir build + $ cd build + $ ../configure --with-plugin-toolkit + $ make + $ sudo make install + + + + + + + + + + + + + + + diff --git a/vpp/build-root/emacs-lisp/all-skel.el b/vpp/build-root/emacs-lisp/all-skel.el new file mode 100644 index 00000000..2bf15b24 --- /dev/null +++ b/vpp/build-root/emacs-lisp/all-skel.el @@ -0,0 +1,41 @@ +;;; Copyright (c) 2016 Cisco and/or its affiliates. +;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;; you may not use this file except in compliance with the License. +;;; You may obtain a copy of the License at: +;;; +;;; http://www.apache.org/licenses/LICENSE-2.0 +;;; +;;; Unless required by applicable law or agreed to in writing, software +;;; distributed under the License is distributed on an "AS IS" BASIS, +;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;; See the License for the specific language governing permissions and +;;; limitations under the License. + +;; plugin all-in-1 program + +(load-file "./plugin.el") + +;; list of clib / vlib / vnet / vpp skeleton files + +(load-file "./cli-cmd-skel.el") +(load-file "./config-skel.el") +(load-file "./dual-loop-skel.el") +(load-file "./periodic-skel.el") +(load-file "./pipe-skel.el") +(load-file "./plugin-all-apih-skel.el") +(load-file "./plugin-api-skel.el") +(load-file "./plugin-configure-skel.el") +(load-file "./plugin-h-skel.el") +(load-file "./plugin-main-skel.el") +(load-file "./plugin-makefile-skel.el") +(load-file "./plugin-msg-enum-skel.el") +(load-file "./plugin-node-skel.el") +(load-file "./plugin-test-skel.el") +(load-file "./tunnel-c-skel.el") +(load-file "./tunnel-decap-skel.el") +(load-file "./tunnel-encap-skel.el") +(load-file "./tunnel-h-skel.el") +(load-file "./elog-4-int-skel.el") +(load-file "./elog-4-int-track-skel.el") +(load-file "./elog-enum-skel.el") +(load-file "./elog-one-datum-skel.el") diff --git a/vpp/build-root/emacs-lisp/cli-cmd-skel.el b/vpp/build-root/emacs-lisp/cli-cmd-skel.el new file mode 100644 index 00000000..cf8658c4 --- /dev/null +++ b/vpp/build-root/emacs-lisp/cli-cmd-skel.el @@ -0,0 +1,32 @@ +;;; cli-cmd-skel.el - cli command skeleton + +(require 'skeleton) + +(define-skeleton skel-cli-cmd +"Insert a CLI command " +nil +'(setq cmd-name (skeleton-read "Command Name: ")) +'(setq path (skeleton-read "Path: ")) + +" +static clib_error_t * +" cmd-name "_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { + if (unformat (input, \"whatever %d\", &whatever)) + ; + else + return clib_error_return (0, \"unknown input `%U'\", + format_unformat_error, input); + } + return 0; +} + +VLIB_CLI_COMMAND (" cmd-name "_command, static) = { + .path = \"" path "\", + .short_help = \"" path "\", + .function = " cmd-name "_command_fn, +}; +") diff --git a/vpp/build-root/emacs-lisp/config-skel.el b/vpp/build-root/emacs-lisp/config-skel.el new file mode 100644 index 00000000..dc2ec380 --- /dev/null +++ b/vpp/build-root/emacs-lisp/config-skel.el @@ -0,0 +1,28 @@ +;;; config-skel.el - config function command skeleton + +(require 'skeleton) + +(define-skeleton skel-config +"Insert a vlib config skeleton " +nil +'(setq cfg-name (skeleton-read "Config Class Name: ")) + +" +static clib_error_t * +" cfg-name "_config (vlib_main_t * vm, unformat_input_t * input) +{ + u32 whatever; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { + if (unformat (input, \"whatever %d\", &whatever)) + ; + else + return clib_error_return (0, \"unknown input `%U'\", + format_unformat_error, input); + } + + return 0; +} + +VLIB_CONFIG_FUNCTION (" cfg-name "_config, \"" cfg-name "\"); +") diff --git a/vpp/build-root/emacs-lisp/dual-loop-skel.el b/vpp/build-root/emacs-lisp/dual-loop-skel.el new file mode 100644 index 00000000..b23d65ac --- /dev/null +++ b/vpp/build-root/emacs-lisp/dual-loop-skel.el @@ -0,0 +1,302 @@ +;;; dual-loop-skel.el - Eliotic dual-loop node skeleton + +(require 'skeleton) + +(define-skeleton skel-dual-loop +"Insert a skeleton dual-loop graph node" +nil +'(setq node-name (skeleton-read "Node Name: ")) +'(setq uc-node-name (upcase node-name)) +" +#include <vlib/vlib.h> +#include <vnet/vnet.h> +#include <vnet/pg/pg.h> + +#include <vnet/ip/ip.h> +#include <vnet/ethernet/ethernet.h> + +#include <vppinfra/hash.h> +#include <vppinfra/error.h> +#include <vppinfra/elog.h> + +typedef struct { + /* convenience */ + vlib_main_t * vlib_main; + vnet_main_t * vnet_main; + ethernet_main_t * ethernet_main; +} " node-name "_main_t; + +" node-name "_main_t " node-name "_main; + +vlib_node_registration_t " node-name "_node; + +typedef struct { + u32 next_index; + u32 sw_if_index; +} " node-name "_trace_t; + +/* packet trace format function */ +static u8 * format_" node-name "_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + " node-name "_trace_t * t = va_arg (*args, " node-name "_trace_t *); + + s = format (s, \"" uc-node-name ": sw_if_index %d, next index %d\", + t->sw_if_index, t->next_index); + return s; +} + +vlib_node_registration_t " node-name "_node; + +#define foreach_" node-name "_error \\ +_(SWAPPED, \"Mac swap packets processed\") + +typedef enum { +#define _(sym,str) " uc-node-name "_ERROR_##sym, + foreach_" node-name "_error +#undef _ + " uc-node-name "_N_ERROR, +} " node-name "_error_t; + +static char * " node-name "_error_strings[] = { +#define _(sym,string) string, + foreach_" node-name "_error +#undef _ +}; + +typedef enum { + " uc-node-name "_NEXT_INTERFACE_OUTPUT, + " uc-node-name "_N_NEXT, +} " node-name "_next_t; + +#define foreach_mac_address_offset \\ +_(0) \\ +_(1) \\ +_(2) \\ +_(3) \\ +_(4) \\ +_(5) + +static uword +" node-name "_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + u32 n_left_from, * from, * to_next; + " node-name "_next_t next_index; + u32 pkts_swapped = 0; + + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, + to_next, n_left_to_next); + + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 next0 = " uc-node-name "_NEXT_INTERFACE_OUTPUT; + u32 next1 = " uc-node-name "_NEXT_INTERFACE_OUTPUT; + u32 sw_if_index0, sw_if_index1; + u8 tmp0[6], tmp1[6]; + ethernet_header_t *en0, *en1; + u32 bi0, bi1; + vlib_buffer_t * b0, * b1; + + /* Prefetch next iteration. */ + { + vlib_buffer_t * p2, * p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header (p2, LOAD); + vlib_prefetch_buffer_header (p3, LOAD); + + CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE); + } + + /* speculatively enqueue b0 and b1 to the current next frame */ + to_next[0] = bi0 = from[0]; + to_next[1] = bi1 = from[1]; + from += 2; + to_next += 2; + n_left_from -= 2; + n_left_to_next -= 2; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + + /* $$$$$ Dual loop: process 2 x packets here $$$$$ */ + ASSERT (b0->current_data == 0); + ASSERT (b1->current_data == 0); + + en0 = vlib_buffer_get_current (b0); + en1 = vlib_buffer_get_current (b1); + + /* This is not the fastest way to swap src + dst mac addresses */ +#define _(a) tmp0[a] = en0->src_address[a]; + foreach_mac_address_offset; +#undef _ +#define _(a) en0->src_address[a] = en0->dst_address[a]; + foreach_mac_address_offset; +#undef _ +#define _(a) en0->dst_address[a] = tmp0[a]; + foreach_mac_address_offset; +#undef _ + +#define _(a) tmp1[a] = en1->src_address[a]; + foreach_mac_address_offset; +#undef _ +#define _(a) en1->src_address[a] = en1->dst_address[a]; + foreach_mac_address_offset; +#undef _ +#define _(a) en1->dst_address[a] = tmp1[a]; + foreach_mac_address_offset; +#undef _ + + sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX]; + sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX]; + + /* Send pkt back out the RX interface */ + vnet_buffer(b0)->sw_if_index[VLIB_TX] = sw_if_index0; + vnet_buffer(b1)->sw_if_index[VLIB_TX] = sw_if_index1; + + pkts_swapped += 2; + /* $$$$$ End of processing 2 x packets $$$$$ */ + + if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE))) + { + if (b0->flags & VLIB_BUFFER_IS_TRACED) + { + " node-name "_trace_t *t = + vlib_add_trace (vm, node, b0, sizeof (*t)); + t->sw_if_index = sw_if_index0; + t->next_index = next0; + } + if (b1->flags & VLIB_BUFFER_IS_TRACED) + { + " node-name "_trace_t *t = + vlib_add_trace (vm, node, b1, sizeof (*t)); + t->sw_if_index = sw_if_index1; + t->next_index = next1; + } + } + + /* verify speculative enqueues, maybe switch current next frame */ + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, + to_next, n_left_to_next, + bi0, bi1, next0, next1); + } + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t * b0; + u32 next0 = " uc-node-name "_NEXT_INTERFACE_OUTPUT; + u32 sw_if_index0; + u8 tmp0[6]; + ethernet_header_t *en0; + + /* speculatively enqueue b0 to the current next frame */ + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + + /* $$$$$ Single loop: process 1 packet here $$$$$ */ + + /* + * Direct from the driver, we should be at offset 0 + * aka at &b0->data[0] + */ + ASSERT (b0->current_data == 0); + + en0 = vlib_buffer_get_current (b0); + + /* This is not the fastest way to swap src + dst mac addresses */ +#define _(a) tmp0[a] = en0->src_address[a]; + foreach_mac_address_offset; +#undef _ +#define _(a) en0->src_address[a] = en0->dst_address[a]; + foreach_mac_address_offset; +#undef _ +#define _(a) en0->dst_address[a] = tmp0[a]; + foreach_mac_address_offset; +#undef _ + + sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX]; + + /* Send pkt back out the RX interface */ + vnet_buffer(b0)->sw_if_index[VLIB_TX] = sw_if_index0; + + if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE) + && (b0->flags & VLIB_BUFFER_IS_TRACED))) + { + " node-name "_trace_t *t = + vlib_add_trace (vm, node, b0, sizeof (*t)); + t->sw_if_index = sw_if_index0; + t->next_index = next0; + } + + pkts_swapped += 1; + + /* $$$$$ Done processing 1 packet here $$$$$ */ + + /* verify speculative enqueue, maybe switch current next frame */ + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + vlib_node_increment_counter (vm, " node-name "_node.index, + " uc-node-name "_ERROR_SWAPPED, pkts_swapped); + return frame->n_vectors; +} + +VLIB_REGISTER_NODE (" node-name "_node) = { + .function = " node-name "_node_fn, + .name = \"" node-name "\", + .vector_size = sizeof (u32), + .format_trace = format_" node-name "_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(" node-name "_error_strings), + .error_strings = " node-name "_error_strings, + + .n_next_nodes = " uc-node-name "_N_NEXT, + + /* edit / add dispositions here */ + .next_nodes = { + [" uc-node-name "_NEXT_INTERFACE_OUTPUT] = \"interface-output\", + }, +}; + +clib_error_t *" node-name "_init (vlib_main_t *vm) +{ + " node-name "_main_t *msm = &" node-name "_main; + + /* $$$$$ Initialize " node-name "_main_t structure here. $$$$$ */ + msm->vlib_main = vm; + msm->vnet_main = vnet_get_main(); + msm->ethernet_main = ethernet_get_main(vm); + + return 0; +} + +VLIB_INIT_FUNCTION(" node-name "_init); +") + diff --git a/vpp/build-root/emacs-lisp/elog-4-int-skel.el b/vpp/build-root/emacs-lisp/elog-4-int-skel.el new file mode 100644 index 00000000..fda412a7 --- /dev/null +++ b/vpp/build-root/emacs-lisp/elog-4-int-skel.el @@ -0,0 +1,31 @@ +;;; elog-4-int-skel.el - 4 integer elog skeleton + +(require 'skeleton) + +(define-skeleton skel-elog-4-int +"Insert a skeleton 4-integer event definition" +nil +'(setq function-name (skeleton-read "Function: ")) +'(setq label (skeleton-read "Label: ")) + +" + +/* $$$ May or may not be needed */ +#include <vlib/vlib.h> +#include <vppinfra/elog.h> + +static inline void " function-name " (u32 *data) +{ + ELOG_TYPE_DECLARE(e) = + { + .format = \"" label ": first %d second %d third %d fourth %d\", + .format_args = \"i4i4i4i4\", + }; + struct { u32 data[4];} * ed; + ed = ELOG_DATA (&vlib_global_main.elog_main, e); + ed->data[0] = data[0]; + ed->data[1] = data[1]; + ed->data[2] = data[2]; + ed->data[3] = data[3]; +} +") diff --git a/vpp/build-root/emacs-lisp/elog-4-int-track-skel.el b/vpp/build-root/emacs-lisp/elog-4-int-track-skel.el new file mode 100644 index 00000000..506cc3a2 --- /dev/null +++ b/vpp/build-root/emacs-lisp/elog-4-int-track-skel.el @@ -0,0 +1,34 @@ +;;; elog-4-int-skel.el - 4 integer elog skeleton + +(require 'skeleton) + +(define-skeleton skel-elog-4-int-track +"Insert a skeleton 4-integer-with-track event definition" +nil +'(setq function-name (skeleton-read "Function: ")) +'(setq track-label (skeleton-read "Track Label: ")) +'(setq label (skeleton-read "Label: ")) + +" + +/* $$$ May or may not be needed */ +#include <vlib/vlib.h> +#include <vppinfra/elog.h> + +static inline void " function-name " (u32 *data) +{ + ELOG_TYPE_DECLARE(e) = + { + .format = \"" label ": first %d second %d third %d fourth %d\", + .format_args = \"i4i4i4i4\", + }; + struct { u32 data[4];} * ed; + ELOG_TRACK(" track-label "); + ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, " track-label "); + ed->data[0] = data[0]; + ed->data[1] = data[1]; + ed->data[2] = data[2]; + ed->data[3] = data[3]; +} + +") diff --git a/vpp/build-root/emacs-lisp/elog-enum-skel.el b/vpp/build-root/emacs-lisp/elog-enum-skel.el new file mode 100644 index 00000000..836ce86a --- /dev/null +++ b/vpp/build-root/emacs-lisp/elog-enum-skel.el @@ -0,0 +1,35 @@ +;;; elog-enum-skel.el - enum elog skeleton + +(require 'skeleton) + +(define-skeleton skel-elog-enum +"Insert a skeleton enum event definition" +nil +'(setq function-name (skeleton-read "Function: ")) +'(setq label (skeleton-read "Label: ")) + +" + +/* $$$ May or may not be needed */ +#include <vlib/vlib.h> +#include <vppinfra/elog.h> + +static inline void " function-name " (u8 which) +{ + ELOG_TYPE_DECLARE (e) = + { + .format = \"" label ": %s\", + .format_args = \"t1\", + .n_enum_strings = 2, + .enum_strings = + { + \"string 1\", + \"string 2\", + }, + }; + struct { u8 which;} * ed; + ed = ELOG_DATA (&vlib_global_main.elog_main, e); + ed->which = which; +} + +") diff --git a/vpp/build-root/emacs-lisp/elog-one-datum-skel.el b/vpp/build-root/emacs-lisp/elog-one-datum-skel.el new file mode 100644 index 00000000..11f83896 --- /dev/null +++ b/vpp/build-root/emacs-lisp/elog-one-datum-skel.el @@ -0,0 +1,28 @@ +;;; elog-one-datum-skel.el - single u32 datum elog skeleton + +(require 'skeleton) + +(define-skeleton skel-elog-one-datum +"Insert a skeleton single datum event definition" +nil +'(setq function-name (skeleton-read "Function: ")) +'(setq label (skeleton-read "Label: ")) + +" + +/* $$$ May or may not be needed */ +#include <vlib/vlib.h> +#include <vppinfra/elog.h> + +static inline void " function-name " (u32 data) +{ + ELOG_TYPE_DECLARE (e) = + { + .format = \"" label ": %d\", + .format_args = \"i4\", + }; + + elog (&vlib_global_main.elog_main, &e, data); +} + +") diff --git a/vpp/build-root/emacs-lisp/fix-coding-style.el b/vpp/build-root/emacs-lisp/fix-coding-style.el new file mode 100755 index 00000000..0bb63483 --- /dev/null +++ b/vpp/build-root/emacs-lisp/fix-coding-style.el @@ -0,0 +1,162 @@ +#!/usr/bin/emacs --script + +;; Insert style boilerplate if it's not already there +;; +;; Breaking the string in half keeps emacs +;; from trying to interpret the local variable +;; settings e.g. when it reads the lisp source code + +(defun insert-style-boilerplate () (interactive) + (save-excursion + (goto-char (point-min)) + (if (eq nil (search-forward "coding-style-patch-verification" + (point-max) t)) + (let ((junk 0)) (goto-char (point-max)) + (insert " +/* + * fd.io coding-style-patch-verification: ON + * + * Local Var" "iables: + * eval: (c-set-style \"gnu\") + * End: + */"))))) + +;; (cons xxx <list>) means insert xxx at the head of <list> +;; Build a sorted list of *INDENT-OFF* lines, by searching +;; backwards. The initial (setq indent-offset-list nil) +;; results in (cdr <last-cell>) nil, which makes it a proper list + +(defun find-indent-offs () (interactive) + (save-excursion + (if (boundp 'indent-offset-list) + (makunbound 'indent-offset-list)) + (setq indent-offset-list nil) + (goto-char (point-max)) + (while (search-backward "*INDENT-OFF*" (point-min) t) + (move-beginning-of-line nil) + (setq indent-offset-list (cons (point) indent-offset-list)) + (previous-line)))) + +;; Insert indent-off ... indent-on brackets around +;; a certain xxx_foreach macro, etc. which "indent" +;; completely screws up. Doesn't handle nesting, of which there +;; are few examples (fortunately). + +(defun fix-initializer (what) (interactive) + (find-indent-offs) + (save-excursion + (goto-char (point-min)) + (while (search-forward-regexp what (point-max) t) + (move-beginning-of-line nil) + (previous-line) + (let ((index 0)(pointval 0)) + (while (and (< pointval (point))(elt indent-offset-list index)) + (setq pointval (elt indent-offset-list index)) + (setq index (1+ index))) + (if (not (eq pointval (point))) + (let ((junk 0)) + (next-line) + (open-line 1) + (c-indent-line-or-region) + (insert "/* *INDENT-OFF* */") + (search-forward "{") + (backward-char) + (forward-sexp) + (move-end-of-line nil) + (newline 1) + (c-indent-line-or-region) + (insert "/* *INDENT-ON* */") + (find-indent-offs)) + (search-forward "*INDENT-ON*")))))) + +(defun fix-pool-foreach () (interactive) + (fix-initializer "pool_foreach *(")) + +(defun fix-pool-foreach-index () (interactive) + (fix-initializer "pool_foreach_index *(")) + +(defun fix-hash-foreach () (interactive) + (fix-initializer "hash_foreach *(")) + +(defun fix-hash-foreach-pair () (interactive) + (fix-initializer "hash_foreach_pair *(")) + +(defun fix-hash-foreach-mem () (interactive) + (fix-initializer "hash_foreach_mem *(")) + +(defun fix-clib-fifo-foreach () (interactive) + (fix-initializer "clib_fifo_foreach *(")) + +(defun fix-clib-bitmap-foreach () (interactive) + (fix-initializer "clib_bitmap_foreach *(")) + +(defun fix-foreach-ip-interface-address () (interactive) + (fix-initializer "foreach_ip_interface_address *(")) + +(defun fix-vlib-register-thread () (interactive) + (fix-initializer "VLIB_REGISTER_THREAD *(")) + +(defun fix-vlib-cli-command () (interactive) + (fix-initializer "VLIB_CLI_COMMAND *(")) + +(defun fix-vlib-register-node () (interactive) + (fix-initializer "VLIB_REGISTER_NODE *(")) + +(defun fix-reply-macro2 () (interactive) + (fix-initializer "REPLY_MACRO2 *(")) + +(defun fix-vnet-device-class () (interactive) + (fix-initializer "VNET_DEVICE_CLASS *(")) + +(defun fix-vnet-hw-interface-class () (interactive) + (fix-initializer "VNET_HW_INTERFACE_CLASS *(")) + +(defun fix-clib-packed () (interactive) + (fix-initializer "CLIB_PACKED *(")) +(defun fix-vl-api-packed () (interactive) + (fix-initializer "VL_API_PACKED *(")) + +;; Driver routine which runs the set of functions +;; defined above, as well as the bottom boilerplate function + +(defun fd-io-styleify () (interactive) + (fix-pool-foreach) + (fix-pool-foreach-index) + (fix-hash-foreach) + (fix-hash-foreach-pair) + (fix-hash-foreach-mem) + (fix-foreach-ip-interface-address) + (fix-clib-fifo-foreach) + (fix-clib-bitmap-foreach) + (fix-vlib-register-thread) + (fix-vlib-cli-command) + (fix-vlib-register-node) + (fix-reply-macro2) + (fix-vnet-device-class) + (fix-vnet-hw-interface-class) + (fix-clib-packed) + (fix-vl-api-packed) + (insert-style-boilerplate) + (if (boundp 'indent-offset-list) + (makunbound 'indent-offset-list))) + +;; When run as a script, this sexp +;; walks the list of files supplied on the command line. +;; +;; (elt argv index) returns nil if you M-x eval-buffer +;; or M-x load-file the file, so we won't accidentally +;; evaluate (save-buffers-kill-emacs)... + +(let ((file-index 0)) + (if (elt argv file-index) + (while (elt argv file-index) + (find-file (elt argv file-index)) + (fd-io-styleify) + (message "Done %s..." (elt argv file-index)) + (setq file-index (1+ file-index)))) + (if (> file-index 0) + (let ((junk 0)) + (message "Save and quit...") + (save-buffers-kill-emacs t)))) + + diff --git a/vpp/build-root/emacs-lisp/make-plugin.sh b/vpp/build-root/emacs-lisp/make-plugin.sh new file mode 100755 index 00000000..4985974c --- /dev/null +++ b/vpp/build-root/emacs-lisp/make-plugin.sh @@ -0,0 +1,4 @@ +#!/usr/bin/emacs --script +(load-file "./all-skel.el") +(make-plugin) +(save-some-buffers t) diff --git a/vpp/build-root/emacs-lisp/periodic-skel.el b/vpp/build-root/emacs-lisp/periodic-skel.el new file mode 100644 index 00000000..a8f3ef6d --- /dev/null +++ b/vpp/build-root/emacs-lisp/periodic-skel.el @@ -0,0 +1,86 @@ +;;; pipe-skel.el - pipelined graph node skeleton + +(require 'skeleton) + +(define-skeleton skel-periodic +"Insert a skeleton periodic process node" +nil +'(setq node-name (skeleton-read "Name: ")) +'(setq uc-node-name (upcase node-name)) +'(setq poll-period (skeleton-read "Poll period (f64 seconds, e.g. 10.0): ")) + +" +#define " uc-node-name "_POLL_PERIOD " poll-period " + +static uword +" node-name "_process (vlib_main_t * vm, + vlib_node_runtime_t * rt, + vlib_frame_t * f) +{ + f64 poll_time_remaining; + uword event_type, * event_data = 0; + + poll_time_remaining = " uc-node-name "_POLL_PERIOD; + while (1) { + int i; + + /* + * Sleep until next periodic call due, or until we receive event(s) + */ + poll_time_remaining = + vlib_process_wait_for_event_or_clock (vm, poll_time_remaining); + + event_type = vlib_process_get_events (vm, &event_data); + switch (event_type) { + case ~0: /* no events => timeout */ + break; + + /* + * $$$$ FIXME: add cases / handlers for each event type + */ + case EVENT1: + for (i = 0; i < vec_len (event_data); i++) + handle_event1 (mm, event_data[i]); + break; + + case EVENT2: + for (i = 0; i < vec_len (event_data); i++) + handle_event2 (vm, event_data[i]); + break; + + /* ... and so forth for each event type */ + + default: + /* This should never happen... */ + clib_warning (\"BUG: unhandled event type %d\", event_type); + break; + } + if (event_data) + _vec_len (event_data) = 0; + + /* Timer expired, call periodic function */ + if (vlib_process_suspend_time_is_zero (poll_time_remaining)) { + " node-name "_periodic (vm); + poll_time_remaining = " uc-node-name "_POLL_PERIOD; + } + } + + return 0; +} + +/* + * " node-name " periodic node declaration + */ +static VLIB_REGISTER_NODE (" node-name "_node) = { + .function = " node-name "_process, + .type = VLIB_NODE_TYPE_PROCESS, + .name = \"" node-name "-process\", +}; + +/* + * To signal an event: + * + * vlib_process_signal_event (vm, " node-name "_node.index, EVENTn, datum); + * + */ +") diff --git a/vpp/build-root/emacs-lisp/pipe-skel.el b/vpp/build-root/emacs-lisp/pipe-skel.el new file mode 100644 index 00000000..911e0d07 --- /dev/null +++ b/vpp/build-root/emacs-lisp/pipe-skel.el @@ -0,0 +1,132 @@ +;;; pipe-skel.el - pipelined graph node skeleton + +(require 'skeleton) + +(define-skeleton skel-pipeline-node +"Insert a skeleton pipelined graph node" +nil +'(setq node-name (skeleton-read "Node Name: ")) +'(setq uc-node-name (upcase node-name)) +'(setq nstages (skeleton-read "Number of pipeline stages: ")) +" +#include <vlib/vlib.h> +#include <vppinfra/error.h> + +/* + * Dump these counters via the \"show error\" CLI command + * FIXME: Add packet counter / error strings as desired + */ + +#define foreach_" node-name "_error \\ +_(ERROR1, \"sample counter/ error string\") + +static char * " node-name "_error_strings[] = { +#define _(sym,string) string, + foreach_" node-name "_error +#undef _ +}; + +/* + * packet error / counter enumeration + * + * To count and drop a vlib_buffer_t *b: + * + * Set b->error = node->errors[" uc-node-name "_ERROR_xxx]; + * last_stage returns a disposition index bound to \"error-drop\" + * + * To manually increment the specific counter " uc-node-name "_ERROR1 + * + * vlib_node_t *n = vlib_get_node (vm, " node-name ".index); + * u32 node_counter_base_index = n->error_heap_index; + * vlib_error_main_t * em = &vm->error_main; + * em->counters[node_counter_base_index + " uc-node-name "_ERROR1] += 1; + * + */ + +typedef enum { +#define _(sym,str) " uc-node-name "_ERROR_##sym, + foreach_" node-name "_error +#undef _ + " uc-node-name "_N_ERROR, +} " node-name "_error_t; + +/* + * enumeration of per-packet dispositions + * FIXME: add dispositions as desired + */ + +typedef enum { \n" +" " uc-node-name "_NEXT_NORMAL,\n" +" " uc-node-name "_N_NEXT, +} " node-name "_next_t; + +#define NSTAGES " nstages " + +/* + * Use the generic buffer metadata + first line of packet data prefetch + * stage function from <api/pipeline.h>. This is usually a Good Idea. + */ +#define stage0 generic_stage0 + +/* + * FIXME: add stage functions. Here is the function prototype: + * + * static inline void stageN (vlib_main_t * vm, + * vlib_node_runtime_t * node, + * u32 buffer_index) + */ + +/* + * FIXME: the last pipeline stage returns the desired pkt next node index, + * from the " node-name "_next_t enum above + */ +static inline u32 last_stage (vlib_main_t *vm, vlib_node_runtime_t *node, + u32 bi) +{ + vlib_buffer_t *b = vlib_get_buffer (vm, bi); + + b->error = node->errors[EXAMPLE_ERROR_ERROR1]; + + return " uc-node-name "_NEXT_NORMAL; +} + +#include <api/pipeline.h> + +static uword " node-name "_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + return dispatch_pipeline (vm, node, frame); +} + +static VLIB_REGISTER_NODE (example_node) = { + .function = " node-name "_node_fn, + .name = \"" node-name "-node\", + .vector_size = sizeof (u32), + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(" node-name "_error_strings), + .error_strings = " node-name "_error_strings, + + .n_next_nodes = " uc-node-name "_N_NEXT, + + /* edit / add dispositions here */ + .next_nodes = { + [" uc-node-name "_NEXT_NORMAL] = \"error-drop\", + }, +}; + +/* + * packet generator definition to push superframes of data into the + * new graph node. Cut and paste into <file>, then + * \"exec <file>\", \"pa enable test\" at the QVNET prompt... + * +packet-generator new { + name test + limit 100 + node " node-name "-node + size 374-374 + data { hex 0x02b46b96000100096978676265000500bf436973636f20494f5320536f6674776172652c2043333735304520536f66747761726520284333373530452d554e4956455253414c2d4d292c2056657273696f6e2031322e32283335295345352c2052454c4541534520534f4654574152452028666331290a436f707972696768742028632920313938362d3230303720627920436973636f2053797374656d732c20496e632e0a436f6d70696c6564205468752031392d4a756c2d30372031363a3137206279206e616368656e00060018636973636f2057532d4333373530452d3234544400020011000000010101cc0004000000000003001b54656e4769676162697445746865726e6574312f302f3100040008000000280008002400000c011200000000ffffffff010221ff000000000000001e7a50f000ff000000090004000a00060001000b0005010012000500001300050000160011000000010101cc000400000000001a00100000000100000000ffffffff } +} + */ +") diff --git a/vpp/build-root/emacs-lisp/plugin-all-apih-skel.el b/vpp/build-root/emacs-lisp/plugin-all-apih-skel.el new file mode 100644 index 00000000..0f073f9c --- /dev/null +++ b/vpp/build-root/emacs-lisp/plugin-all-apih-skel.el @@ -0,0 +1,43 @@ +;;; plugin-all-apih-skel.el - vpp engine plug-in "all-apih.c" skeleton +;;; +;;; Copyright (c) 2016 Cisco and/or its affiliates. +;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;; you may not use this file except in compliance with the License. +;;; You may obtain a copy of the License at: +;;; +;;; http://www.apache.org/licenses/LICENSE-2.0 +;;; +;;; Unless required by applicable law or agreed to in writing, software +;;; distributed under the License is distributed on an "AS IS" BASIS, +;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;; See the License for the specific language governing permissions and +;;; limitations under the License. + +(require 'skeleton) + +(define-skeleton skel-plugin-all-apih +"Insert a plug-in 'all_api_h.h' skeleton " +nil +'(if (not (boundp 'plugin-name)) + (setq plugin-name (read-string "Plugin name: "))) +'(setq PLUGIN-NAME (upcase plugin-name)) +" +/* + * " plugin-name "_all_api_h.h - skeleton vpp engine plug-in api #include file + * + * Copyright (c) <current-year> <your-organization> + * Licensed under the Apache License, Version 2.0 (the \"License\"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an \"AS IS\" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* Include the generated file, see BUILT_SOURCES in Makefile.am */ +#include <" plugin-name "/" plugin-name ".api.h> +") diff --git a/vpp/build-root/emacs-lisp/plugin-api-skel.el b/vpp/build-root/emacs-lisp/plugin-api-skel.el new file mode 100644 index 00000000..74519e70 --- /dev/null +++ b/vpp/build-root/emacs-lisp/plugin-api-skel.el @@ -0,0 +1,48 @@ +;;; plugin-api-skel.el - vpp engine plug-in "all-apih.c" skeleton +;;; +;;; Copyright (c) 2016 Cisco and/or its affiliates. +;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;; you may not use this file except in compliance with the License. +;;; You may obtain a copy of the License at: +;;; +;;; http://www.apache.org/licenses/LICENSE-2.0 +;;; +;;; Unless required by applicable law or agreed to in writing, software +;;; distributed under the License is distributed on an "AS IS" BASIS, +;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;; See the License for the specific language governing permissions and +;;; limitations under the License. + +(require 'skeleton) + +(define-skeleton skel-plugin-api +"Insert a plug-in '<name>.api' skeleton " +nil +'(if (not (boundp 'plugin-name)) + (setq plugin-name (read-string "Plugin name: "))) +'(setq PLUGIN-NAME (upcase plugin-name)) +" +/* Define a simple enable-disable binary API to control the feature */ + +define " plugin-name "_enable_disable { + /* Client identifier, set from api_main.my_client_index */ + u32 client_index; + + /* Arbitrary context, so client can match reply to request */ + u32 context; + + /* Enable / disable the feature */ + u8 enable_disable; + + /* Interface handle */ + u32 sw_if_index; +}; + +define " plugin-name "_enable_disable_reply { + /* From the request */ + u32 context; + + /* Return value, zero means all OK */ + i32 retval; +}; +") diff --git a/vpp/build-root/emacs-lisp/plugin-configure-skel.el b/vpp/build-root/emacs-lisp/plugin-configure-skel.el new file mode 100644 index 00000000..ebf0bf69 --- /dev/null +++ b/vpp/build-root/emacs-lisp/plugin-configure-skel.el @@ -0,0 +1,33 @@ +;;; plugin-configure-skel.el - vpp engine plug-in "main.c" skeleton +;;; +;;; Copyright (c) 2016 Cisco and/or its affiliates. +;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;; you may not use this file except in compliance with the License. +;;; You may obtain a copy of the License at: +;;; +;;; http://www.apache.org/licenses/LICENSE-2.0 +;;; +;;; Unless required by applicable law or agreed to in writing, software +;;; distributed under the License is distributed on an "AS IS" BASIS, +;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;; See the License for the specific language governing permissions and +;;; limitations under the License. + +(require 'skeleton) + +(define-skeleton skel-plugin-configure +"Insert a plug-in 'configure.ac' skeleton " +nil +'(if (not (boundp 'plugin-name)) + (setq plugin-name (read-string "Plugin name: "))) +'(setq PLUGIN-NAME (upcase plugin-name)) +" +AC_INIT(" plugin-name "_plugin, 1.0) +AM_INIT_AUTOMAKE +AM_SILENT_RULES([yes]) + +AC_PROG_LIBTOOL +AC_PROG_CC + +AC_OUTPUT([Makefile]) +") diff --git a/vpp/build-root/emacs-lisp/plugin-h-skel.el b/vpp/build-root/emacs-lisp/plugin-h-skel.el new file mode 100644 index 00000000..8bf9b6fe --- /dev/null +++ b/vpp/build-root/emacs-lisp/plugin-h-skel.el @@ -0,0 +1,66 @@ +;;; plugin-h-skel.el - vpp engine plug-in "main.c" skeleton +;;; +;;; Copyright (c) 2016 Cisco and/or its affiliates. +;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;; you may not use this file except in compliance with the License. +;;; You may obtain a copy of the License at: +;;; +;;; http://www.apache.org/licenses/LICENSE-2.0 +;;; +;;; Unless required by applicable law or agreed to in writing, software +;;; distributed under the License is distributed on an "AS IS" BASIS, +;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;; See the License for the specific language governing permissions and +;;; limitations under the License. + +(require 'skeleton) + +(define-skeleton skel-plugin-h +"Insert a plug-in 'main.c' skeleton " +nil +'(if (not (boundp 'plugin-name)) + (setq plugin-name (read-string "Plugin name: "))) +'(setq PLUGIN-NAME (upcase plugin-name)) +" +/* + * " plugin-name ".h - skeleton vpp engine plug-in header file + * + * Copyright (c) <current-year> <your-organization> + * Licensed under the Apache License, Version 2.0 (the \"License\"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an \"AS IS\" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __included_" plugin-name "_h__ +#define __included_" plugin-name "_h__ + +#include <vnet/vnet.h> +#include <vnet/ip/ip.h> +#include <vnet/ethernet/ethernet.h> + +#include <vppinfra/hash.h> +#include <vppinfra/error.h> + +typedef struct { + /* API message ID base */ + u16 msg_id_base; + + /* convenience */ + vlib_main_t * vlib_main; + vnet_main_t * vnet_main; + ethernet_main_t * ethernet_main; +} " plugin-name "_main_t; + +" plugin-name "_main_t " plugin-name "_main; + +vlib_node_registration_t " plugin-name "_node; + +#endif /* __included_" plugin-name "_h__ */ +") diff --git a/vpp/build-root/emacs-lisp/plugin-main-skel.el b/vpp/build-root/emacs-lisp/plugin-main-skel.el new file mode 100644 index 00000000..47240695 --- /dev/null +++ b/vpp/build-root/emacs-lisp/plugin-main-skel.el @@ -0,0 +1,275 @@ +;;; plugin-main-skel.el - vpp engine plug-in "main.c" skeleton +;;; +;;; Copyright (c) 2016 Cisco and/or its affiliates. +;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;; you may not use this file except in compliance with the License. +;;; You may obtain a copy of the License at: +;;; +;;; http://www.apache.org/licenses/LICENSE-2.0 +;;; +;;; Unless required by applicable law or agreed to in writing, software +;;; distributed under the License is distributed on an "AS IS" BASIS, +;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;; See the License for the specific language governing permissions and +;;; limitations under the License. + +(require 'skeleton) + +(define-skeleton skel-plugin-main +"Insert a plug-in 'main.c' skeleton " +nil +'(if (not (boundp 'plugin-name)) + (setq plugin-name (read-string "Plugin name: "))) +'(setq PLUGIN-NAME (upcase plugin-name)) +" +/* + * " plugin-name ".c - skeleton vpp engine plug-in + * + * Copyright (c) <current-year> <your-organization> + * Licensed under the Apache License, Version 2.0 (the \"License\"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an \"AS IS\" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <vnet/vnet.h> +#include <vnet/plugin/plugin.h> +#include <" plugin-name "/" plugin-name ".h> + +#include <vlibapi/api.h> +#include <vlibmemory/api.h> +#include <vlibsocket/api.h> + +/* define message IDs */ +#include <" plugin-name "/" plugin-name "_msg_enum.h> + +/* define message structures */ +#define vl_typedefs +#include <" plugin-name "/" plugin-name "_all_api_h.h> +#undef vl_typedefs + +/* define generated endian-swappers */ +#define vl_endianfun +#include <" plugin-name "/" plugin-name "_all_api_h.h> +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include <" plugin-name "/" plugin-name "_all_api_h.h> +#undef vl_printfun + +/* Get the API version number */ +#define vl_api_version(n,v) static u32 api_version=(v); +#include <" plugin-name "/" plugin-name "_all_api_h.h> +#undef vl_api_version + +/* + * A handy macro to set up a message reply. + * Assumes that the following variables are available: + * mp - pointer to request message + * rmp - pointer to reply message type + * rv - return value + */ + +#define REPLY_MACRO(t) \\ +do { \\ + unix_shared_memory_queue_t * q = \\ + vl_api_client_index_to_input_queue (mp->client_index); \\ + if (!q) \\ + return; \\ + \\ + rmp = vl_msg_api_alloc (sizeof (*rmp)); \\ + rmp->_vl_msg_id = ntohs((t)+sm->msg_id_base); \\ + rmp->context = mp->context; \\ + rmp->retval = ntohl(rv); \\ + \\ + vl_msg_api_send_shmem (q, (u8 *)&rmp); \\ +} while(0); + + +/* List of message types that this plugin understands */ + +#define foreach_" plugin-name "_plugin_api_msg \\ +_(" PLUGIN-NAME "_ENABLE_DISABLE, " plugin-name "_enable_disable) + +/* + * This routine exists to convince the vlib plugin framework that + * we haven't accidentally copied a random .dll into the plugin directory. + * + * Also collects global variable pointers passed from the vpp engine + */ + +clib_error_t * +vlib_plugin_register (vlib_main_t * vm, vnet_plugin_handoff_t * h, + int from_early_init) +{ + " plugin-name "_main_t * sm = &" plugin-name "_main; + clib_error_t * error = 0; + + sm->vlib_main = vm; + sm->vnet_main = h->vnet_main; + sm->ethernet_main = h->ethernet_main; + + return error; +} + +/* Action function shared between message handler and debug CLI */ + +int " plugin-name "_enable_disable (" plugin-name "_main_t * sm, u32 sw_if_index, + int enable_disable) +{ + vnet_sw_interface_t * sw; + int rv = 0; + + /* Utterly wrong? */ + if (pool_is_free_index (sm->vnet_main->interface_main.sw_interfaces, + sw_if_index)) + return VNET_API_ERROR_INVALID_SW_IF_INDEX; + + /* Not a physical port? */ + sw = vnet_get_sw_interface (sm->vnet_main, sw_if_index); + if (sw->type != VNET_SW_INTERFACE_TYPE_HARDWARE) + return VNET_API_ERROR_INVALID_SW_IF_INDEX; + + vnet_feature_enable_disable (\"device-input\", \"" plugin-name "\", + sw_if_index, enable_disable, 0, 0); + + return rv; +} + +static clib_error_t * +" plugin-name "_enable_disable_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + " plugin-name "_main_t * sm = &" plugin-name "_main; + u32 sw_if_index = ~0; + int enable_disable = 1; + + int rv; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { + if (unformat (input, \"disable\")) + enable_disable = 0; + else if (unformat (input, \"%U\", unformat_vnet_sw_interface, + sm->vnet_main, &sw_if_index)) + ; + else + break; + } + + if (sw_if_index == ~0) + return clib_error_return (0, \"Please specify an interface...\"); + + rv = " plugin-name "_enable_disable (sm, sw_if_index, enable_disable); + + switch(rv) { + case 0: + break; + + case VNET_API_ERROR_INVALID_SW_IF_INDEX: + return clib_error_return + (0, \"Invalid interface, only works on physical ports\"); + break; + + case VNET_API_ERROR_UNIMPLEMENTED: + return clib_error_return (0, \"Device driver doesn't support redirection\"); + break; + + default: + return clib_error_return (0, \"" plugin-name "_enable_disable returned %d\", + rv); + } + return 0; +} + +VLIB_CLI_COMMAND (" plugin-name "_enable_disable_command, static) = { + .path = \"" plugin-name " enable-disable\", + .short_help = + \"" plugin-name " enable-disable <interface-name> [disable]\", + .function = " plugin-name "_enable_disable_command_fn, +}; + +/* API message handler */ +static void vl_api_" plugin-name "_enable_disable_t_handler +(vl_api_" plugin-name "_enable_disable_t * mp) +{ + vl_api_" plugin-name "_enable_disable_reply_t * rmp; + " plugin-name "_main_t * sm = &" plugin-name "_main; + int rv; + + rv = " plugin-name "_enable_disable (sm, ntohl(mp->sw_if_index), + (int) (mp->enable_disable)); + + REPLY_MACRO(VL_API_" PLUGIN-NAME "_ENABLE_DISABLE_REPLY); +} + +/* Set up the API message handling tables */ +static clib_error_t * +" plugin-name "_plugin_api_hookup (vlib_main_t *vm) +{ + " plugin-name "_main_t * sm = &" plugin-name "_main; +#define _(N,n) \\ + vl_msg_api_set_handlers((VL_API_##N + sm->msg_id_base), \\ + #n, \\ + vl_api_##n##_t_handler, \\ + vl_noop_handler, \\ + vl_api_##n##_t_endian, \\ + vl_api_##n##_t_print, \\ + sizeof(vl_api_##n##_t), 1); + foreach_" plugin-name "_plugin_api_msg; +#undef _ + + return 0; +} + +#define vl_msg_name_crc_list +#include <" plugin-name "/" plugin-name "_all_api_h.h> +#undef vl_msg_name_crc_list + +static void +setup_message_id_table (" plugin-name "_main_t * sm, api_main_t * am) +{ +#define _(id,n,crc) \ + vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id + sm->msg_id_base); + foreach_vl_msg_name_crc_" plugin-name" ; +#undef _ +} + +static clib_error_t * " plugin-name "_init (vlib_main_t * vm) +{ + " plugin-name "_main_t * sm = &" plugin-name "_main; + clib_error_t * error = 0; + u8 * name; + + name = format (0, \"" plugin-name "_%08x%c\", api_version, 0); + + /* Ask for a correctly-sized block of API message decode slots */ + sm->msg_id_base = vl_msg_api_get_msg_ids + ((char *) name, VL_MSG_FIRST_AVAILABLE); + + error = " plugin-name "_plugin_api_hookup (vm); + + vec_free(name); + + return error; +} + +VLIB_INIT_FUNCTION (" plugin-name "_init); + +VNET_FEATURE_INIT (" plugin-name ", static) = +{ + .arc_name = \"device-input\", + .node_name = \"" plugin-name "\", + .runs_before = VNET_FEATURES (\"ethernet-input\"), +}; +") + diff --git a/vpp/build-root/emacs-lisp/plugin-makefile-skel.el b/vpp/build-root/emacs-lisp/plugin-makefile-skel.el new file mode 100644 index 00000000..7cb6cbfd --- /dev/null +++ b/vpp/build-root/emacs-lisp/plugin-makefile-skel.el @@ -0,0 +1,75 @@ +;;; plugin-makefile-skel.el - vpp engine plug-in "main.c" skeleton +;;; +;;; Copyright (c) 2016 Cisco and/or its affiliates. +;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;; you may not use this file except in compliance with the License. +;;; You may obtain a copy of the License at: +;;; +;;; http://www.apache.org/licenses/LICENSE-2.0 +;;; +;;; Unless required by applicable law or agreed to in writing, software +;;; distributed under the License is distributed on an "AS IS" BASIS, +;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;; See the License for the specific language governing permissions and +;;; limitations under the License. + +(require 'skeleton) + +(define-skeleton skel-plugin-makefile +"Insert a plug-in 'Makefile.am' skeleton " +nil +'(if (not (boundp 'plugin-name)) + (setq plugin-name (read-string "Plugin name: "))) +'(setq PLUGIN-NAME (upcase plugin-name)) +" +# Copyright (c) <current-year> <your-organization> +# Licensed under the Apache License, Version 2.0 (the \"License\"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an \"AS IS\" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +AUTOMAKE_OPTIONS = foreign subdir-objects + +AM_CFLAGS = -Wall +AM_LDFLAGS = -module -shared -avoid-version + +vppapitestpluginsdir = ${libdir}/vpp_api_test_plugins +vpppluginsdir = ${libdir}/vpp_plugins + +vppplugins_LTLIBRARIES = " plugin-name "_plugin.la +vppapitestplugins_LTLIBRARIES = " plugin-name "_test_plugin.la + +" plugin-name "_plugin_la_SOURCES = " plugin-name "/" plugin-name ".c \\ + " plugin-name "/node.c \\ + " plugin-name "/" plugin-name "_plugin.api.h +" plugin-name "_plugin_la_LDFLAGS = -module + +BUILT_SOURCES = " plugin-name "/" plugin-name ".api.h + +SUFFIXES = .api.h .api + +%.api.h: %.api + mkdir -p `dirname $@` ; \\ + $(CC) $(CPPFLAGS) -E -P -C -x c $^ \\ + | vppapigen --input - --output $@ --show-name $@ + +noinst_HEADERS = \\ + " plugin-name "/" plugin-name "_all_api_h.h \\ + " plugin-name "/" plugin-name "_msg_enum.h \\ + " plugin-name "/" plugin-name ".api.h + +" plugin-name "_test_plugin_la_SOURCES = \\ + " plugin-name "/" plugin-name "_test.c " plugin-name "/" plugin-name "_plugin.api.h + +# Remove *.la files +install-data-hook: + @(cd $(vpppluginsdir) && $(RM) $(vppplugins_LTLIBRARIES)) + @(cd $(vppapitestpluginsdir) && $(RM) $(vppapitestplugins_LTLIBRARIES)) +") diff --git a/vpp/build-root/emacs-lisp/plugin-msg-enum-skel.el b/vpp/build-root/emacs-lisp/plugin-msg-enum-skel.el new file mode 100644 index 00000000..c44af051 --- /dev/null +++ b/vpp/build-root/emacs-lisp/plugin-msg-enum-skel.el @@ -0,0 +1,55 @@ +;;; plugin-msg-enum-skel.el - vpp engine plug-in message enum skeleton +;;; +;;; Copyright (c) 2016 Cisco and/or its affiliates. +;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;; you may not use this file except in compliance with the License. +;;; You may obtain a copy of the License at: +;;; +;;; http://www.apache.org/licenses/LICENSE-2.0 +;;; +;;; Unless required by applicable law or agreed to in writing, software +;;; distributed under the License is distributed on an "AS IS" BASIS, +;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;; See the License for the specific language governing permissions and +;;; limitations under the License. + +(require 'skeleton) + +(define-skeleton skel-plugin-msg-enum +"Insert a plug-in message enumeration skeleton " +nil +'(if (not (boundp 'plugin-name)) + (setq plugin-name (read-string "Plugin name: "))) +'(setq PLUGIN-NAME (upcase plugin-name)) +" +/* + * " plugin-name "_msg_enum.h - skeleton vpp engine plug-in message enumeration + * + * Copyright (c) <current-year> <your-organization> + * Licensed under the Apache License, Version 2.0 (the \"License\"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an \"AS IS\" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef included_" plugin-name "_msg_enum_h +#define included_" plugin-name "_msg_enum_h + +#include <vppinfra/byte_order.h> + +#define vl_msg_id(n,h) n, +typedef enum { +#include <" plugin-name "/" plugin-name "_all_api_h.h> + /* We'll want to know how many messages IDs we need... */ + VL_MSG_FIRST_AVAILABLE, +} vl_msg_id_t; +#undef vl_msg_id + +#endif /* included_" plugin-name "_msg_enum_h */ +") diff --git a/vpp/build-root/emacs-lisp/plugin-node-skel.el b/vpp/build-root/emacs-lisp/plugin-node-skel.el new file mode 100644 index 00000000..ee745c35 --- /dev/null +++ b/vpp/build-root/emacs-lisp/plugin-node-skel.el @@ -0,0 +1,320 @@ +;;; plugin-node-skel.el - vpp engine plug-in "node.c" skeleton +;;; +;;; Copyright (c) 2016 Cisco and/or its affiliates. +;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;; you may not use this file except in compliance with the License. +;;; You may obtain a copy of the License at: +;;; +;;; http://www.apache.org/licenses/LICENSE-2.0 +;;; +;;; Unless required by applicable law or agreed to in writing, software +;;; distributed under the License is distributed on an "AS IS" BASIS, +;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;; See the License for the specific language governing permissions and +;;; limitations under the License. + +(require 'skeleton) + +(define-skeleton skel-plugin-node +"Insert a plug-in 'node.c' skeleton " +nil +'(if (not (boundp 'plugin-name)) + (setq plugin-name (read-string "Plugin name: "))) +'(setq PLUGIN-NAME (upcase plugin-name)) +" +/* + * node.c - skeleton vpp engine plug-in dual-loop node skeleton + * + * Copyright (c) <current-year> <your-organization> + * Licensed under the Apache License, Version 2.0 (the \"License\"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an \"AS IS\" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include <vlib/vlib.h> +#include <vnet/vnet.h> +#include <vnet/pg/pg.h> +#include <vppinfra/error.h> +#include <" plugin-name "/" plugin-name ".h> + +typedef struct { + u32 next_index; + u32 sw_if_index; + u8 new_src_mac[6]; + u8 new_dst_mac[6]; +} " plugin-name "_trace_t; + +static u8 * +format_mac_address (u8 * s, va_list * args) +{ + u8 *a = va_arg (*args, u8 *); + return format (s, \"%02x:%02x:%02x:%02x:%02x:%02x\", + a[0], a[1], a[2], a[3], a[4], a[5]); +} + +/* packet trace format function */ +static u8 * format_" plugin-name "_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + " plugin-name "_trace_t * t = va_arg (*args, " plugin-name "_trace_t *); + + s = format (s, \"" PLUGIN-NAME ": sw_if_index %d, next index %d\\n\", + t->sw_if_index, t->next_index); + s = format (s, \" new src %U -> new dst %U\", + format_mac_address, t->new_src_mac, + format_mac_address, t->new_dst_mac); + return s; +} + +vlib_node_registration_t " plugin-name "_node; + +#define foreach_" plugin-name "_error \\ +_(SWAPPED, \"Mac swap packets processed\") + +typedef enum { +#define _(sym,str) " PLUGIN-NAME "_ERROR_##sym, + foreach_" plugin-name "_error +#undef _ + " PLUGIN-NAME "_N_ERROR, +} " plugin-name "_error_t; + +static char * " plugin-name "_error_strings[] = { +#define _(sym,string) string, + foreach_" plugin-name "_error +#undef _ +}; + +typedef enum { + " PLUGIN-NAME "_NEXT_INTERFACE_OUTPUT, + " PLUGIN-NAME "_N_NEXT, +} " plugin-name "_next_t; + +#define foreach_mac_address_offset \\ +_(0) \\ +_(1) \\ +_(2) \\ +_(3) \\ +_(4) \\ +_(5) + +static uword +" plugin-name "_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + u32 n_left_from, * from, * to_next; + " plugin-name "_next_t next_index; + u32 pkts_swapped = 0; + + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, + to_next, n_left_to_next); + + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 next0 = " PLUGIN-NAME "_NEXT_INTERFACE_OUTPUT; + u32 next1 = " PLUGIN-NAME "_NEXT_INTERFACE_OUTPUT; + u32 sw_if_index0, sw_if_index1; + u8 tmp0[6], tmp1[6]; + ethernet_header_t *en0, *en1; + u32 bi0, bi1; + vlib_buffer_t * b0, * b1; + + /* Prefetch next iteration. */ + { + vlib_buffer_t * p2, * p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header (p2, LOAD); + vlib_prefetch_buffer_header (p3, LOAD); + + CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE); + } + + /* speculatively enqueue b0 and b1 to the current next frame */ + to_next[0] = bi0 = from[0]; + to_next[1] = bi1 = from[1]; + from += 2; + to_next += 2; + n_left_from -= 2; + n_left_to_next -= 2; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + + ASSERT (b0->current_data == 0); + ASSERT (b1->current_data == 0); + + en0 = vlib_buffer_get_current (b0); + en1 = vlib_buffer_get_current (b1); + + /* This is not the fastest way to swap src + dst mac addresses */ +#define _(a) tmp0[a] = en0->src_address[a]; + foreach_mac_address_offset; +#undef _ +#define _(a) en0->src_address[a] = en0->dst_address[a]; + foreach_mac_address_offset; +#undef _ +#define _(a) en0->dst_address[a] = tmp0[a]; + foreach_mac_address_offset; +#undef _ + +#define _(a) tmp1[a] = en1->src_address[a]; + foreach_mac_address_offset; +#undef _ +#define _(a) en1->src_address[a] = en1->dst_address[a]; + foreach_mac_address_offset; +#undef _ +#define _(a) en1->dst_address[a] = tmp1[a]; + foreach_mac_address_offset; +#undef _ + + + + sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX]; + sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX]; + + /* Send pkt back out the RX interface */ + vnet_buffer(b0)->sw_if_index[VLIB_TX] = sw_if_index0; + vnet_buffer(b1)->sw_if_index[VLIB_TX] = sw_if_index1; + + pkts_swapped += 2; + + if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE))) + { + if (b0->flags & VLIB_BUFFER_IS_TRACED) + { + " plugin-name "_trace_t *t = + vlib_add_trace (vm, node, b0, sizeof (*t)); + t->sw_if_index = sw_if_index0; + t->next_index = next0; + clib_memcpy (t->new_src_mac, en0->src_address, + sizeof (t->new_src_mac)); + clib_memcpy (t->new_dst_mac, en0->dst_address, + sizeof (t->new_dst_mac)); + } + if (b1->flags & VLIB_BUFFER_IS_TRACED) + { + " plugin-name "_trace_t *t = + vlib_add_trace (vm, node, b1, sizeof (*t)); + t->sw_if_index = sw_if_index1; + t->next_index = next1; + clib_memcpy (t->new_src_mac, en1->src_address, + sizeof (t->new_src_mac)); + clib_memcpy (t->new_dst_mac, en1->dst_address, + sizeof (t->new_dst_mac)); + } + } + + /* verify speculative enqueues, maybe switch current next frame */ + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, + to_next, n_left_to_next, + bi0, bi1, next0, next1); + } + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t * b0; + u32 next0 = " PLUGIN-NAME "_NEXT_INTERFACE_OUTPUT; + u32 sw_if_index0; + u8 tmp0[6]; + ethernet_header_t *en0; + + /* speculatively enqueue b0 to the current next frame */ + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + /* + * Direct from the driver, we should be at offset 0 + * aka at &b0->data[0] + */ + ASSERT (b0->current_data == 0); + + en0 = vlib_buffer_get_current (b0); + + /* This is not the fastest way to swap src + dst mac addresses */ +#define _(a) tmp0[a] = en0->src_address[a]; + foreach_mac_address_offset; +#undef _ +#define _(a) en0->src_address[a] = en0->dst_address[a]; + foreach_mac_address_offset; +#undef _ +#define _(a) en0->dst_address[a] = tmp0[a]; + foreach_mac_address_offset; +#undef _ + + sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX]; + + /* Send pkt back out the RX interface */ + vnet_buffer(b0)->sw_if_index[VLIB_TX] = sw_if_index0; + + if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE) + && (b0->flags & VLIB_BUFFER_IS_TRACED))) { + " plugin-name "_trace_t *t = + vlib_add_trace (vm, node, b0, sizeof (*t)); + t->sw_if_index = sw_if_index0; + t->next_index = next0; + clib_memcpy (t->new_src_mac, en0->src_address, + sizeof (t->new_src_mac)); + clib_memcpy (t->new_dst_mac, en0->dst_address, + sizeof (t->new_dst_mac)); + } + + pkts_swapped += 1; + + /* verify speculative enqueue, maybe switch current next frame */ + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + vlib_node_increment_counter (vm, " plugin-name "_node.index, + " PLUGIN-NAME "_ERROR_SWAPPED, pkts_swapped); + return frame->n_vectors; +} + +VLIB_REGISTER_NODE (" plugin-name "_node) = { + .function = " plugin-name "_node_fn, + .name = \"" plugin-name "\", + .vector_size = sizeof (u32), + .format_trace = format_" plugin-name "_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(" plugin-name "_error_strings), + .error_strings = " plugin-name "_error_strings, + + .n_next_nodes = " PLUGIN-NAME "_N_NEXT, + + /* edit / add dispositions here */ + .next_nodes = { + [" PLUGIN-NAME "_NEXT_INTERFACE_OUTPUT] = \"interface-output\", + }, +}; +") diff --git a/vpp/build-root/emacs-lisp/plugin-test-skel.el b/vpp/build-root/emacs-lisp/plugin-test-skel.el new file mode 100644 index 00000000..5231a236 --- /dev/null +++ b/vpp/build-root/emacs-lisp/plugin-test-skel.el @@ -0,0 +1,235 @@ +;;; plugin-test-skel.el - vpp-api-test plug-in skeleton +;;; +;;; Copyright (c) 2016 Cisco and/or its affiliates. +;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;; you may not use this file except in compliance with the License. +;;; You may obtain a copy of the License at: +;;; +;;; http://www.apache.org/licenses/LICENSE-2.0 +;;; +;;; Unless required by applicable law or agreed to in writing, software +;;; distributed under the License is distributed on an "AS IS" BASIS, +;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;; See the License for the specific language governing permissions and +;;; limitations under the License. + +(require 'skeleton) + +(define-skeleton skel-plugin-test +"Insert a plug-in vpp-api-test skeleton " +nil +'(if (not (boundp 'plugin-name)) + (setq plugin-name (read-string "Plugin name: "))) +'(setq PLUGIN-NAME (upcase plugin-name)) +" +/* + * " plugin-name ".c - skeleton vpp-api-test plug-in + * + * Copyright (c) <current-year> <your-organization> + * Licensed under the Apache License, Version 2.0 (the \"License\"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an \"AS IS\" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include <vat/vat.h> +#include <vlibapi/api.h> +#include <vlibmemory/api.h> +#include <vlibsocket/api.h> +#include <vppinfra/error.h> + +uword unformat_sw_if_index (unformat_input_t * input, va_list * args); + +/* Declare message IDs */ +#include <" plugin-name "/" plugin-name "_msg_enum.h> + +/* define message structures */ +#define vl_typedefs +#include <" plugin-name "/" plugin-name "_all_api_h.h> +#undef vl_typedefs + +/* declare message handlers for each api */ + +#define vl_endianfun /* define message structures */ +#include <" plugin-name "/" plugin-name "_all_api_h.h> +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) +#define vl_printfun +#include <" plugin-name "/" plugin-name "_all_api_h.h> +#undef vl_printfun + +/* Get the API version number. */ +#define vl_api_version(n,v) static u32 api_version=(v); +#include <" plugin-name "/" plugin-name "_all_api_h.h> +#undef vl_api_version + + +typedef struct { + /* API message ID base */ + u16 msg_id_base; + vat_main_t *vat_main; +} " plugin-name "_test_main_t; + +" plugin-name "_test_main_t " plugin-name "_test_main; + +#define foreach_standard_reply_retval_handler \\ +_(" plugin-name "_enable_disable_reply) + +#define _(n) \\ + static void vl_api_##n##_t_handler \\ + (vl_api_##n##_t * mp) \\ + { \\ + vat_main_t * vam = " plugin-name "_test_main.vat_main; \\ + i32 retval = ntohl(mp->retval); \\ + if (vam->async_mode) { \\ + vam->async_errors += (retval < 0); \\ + } else { \\ + vam->retval = retval; \\ + vam->result_ready = 1; \\ + } \\ + } +foreach_standard_reply_retval_handler; +#undef _ + +/* + * Table of message reply handlers, must include boilerplate handlers + * we just generated + */ +#define foreach_vpe_api_reply_msg \\ +_(" PLUGIN-NAME "_ENABLE_DISABLE_REPLY, " plugin-name "_enable_disable_reply) + + +/* M: construct, but don't yet send a message */ + +#define M(T,t) \\ +do { \\ + vam->result_ready = 0; \\ + mp = vl_msg_api_alloc(sizeof(*mp)); \\ + memset (mp, 0, sizeof (*mp)); \\ + mp->_vl_msg_id = ntohs (VL_API_##T + sm->msg_id_base); \\ + mp->client_index = vam->my_client_index; \\ +} while(0); + +#define M2(T,t,n) \\ +do { \\ + vam->result_ready = 0; \\ + mp = vl_msg_api_alloc(sizeof(*mp)+(n)); \\ + memset (mp, 0, sizeof (*mp)); \\ + mp->_vl_msg_id = ntohs (VL_API_##T + sm->msg_id_base); \\ + mp->client_index = vam->my_client_index; \\ +} while(0); + +/* S: send a message */ +#define S (vl_msg_api_send_shmem (vam->vl_input_queue, (u8 *)&mp)) + +/* W: wait for results, with timeout */ +#define W \\ +do { \\ + timeout = vat_time_now (vam) + 1.0; \\ + \\ + while (vat_time_now (vam) < timeout) { \\ + if (vam->result_ready == 1) { \\ + return (vam->retval); \\ + } \\ + } \\ + return -99; \\ +} while(0); + +static int api_" plugin-name "_enable_disable (vat_main_t * vam) +{ + " plugin-name "_test_main_t * sm = &" plugin-name "_test_main; + unformat_input_t * i = vam->input; + f64 timeout; + int enable_disable = 1; + u32 sw_if_index = ~0; + vl_api_" plugin-name "_enable_disable_t * mp; + + /* Parse args required to build the message */ + while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT) { + if (unformat (i, \"%U\", unformat_sw_if_index, vam, &sw_if_index)) + ; + else if (unformat (i, \"sw_if_index %d\", &sw_if_index)) + ; + else if (unformat (i, \"disable\")) + enable_disable = 0; + else + break; + } + + if (sw_if_index == ~0) { + errmsg (\"missing interface name / explicit sw_if_index number \\n\"); + return -99; + } + + /* Construct the API message */ + M(" PLUGIN-NAME "_ENABLE_DISABLE, " plugin-name "_enable_disable); + mp->sw_if_index = ntohl (sw_if_index); + mp->enable_disable = enable_disable; + + /* send it... */ + S; + + /* Wait for a reply... */ + W; +} + +/* + * List of messages that the api test plugin sends, + * and that the data plane plugin processes + */ +#define foreach_vpe_api_msg \\ +_(" plugin-name "_enable_disable, \"<intfc> [disable]\") + +void vat_api_hookup (vat_main_t *vam) +{ + " plugin-name "_test_main_t * sm = &" plugin-name "_test_main; + /* Hook up handlers for replies from the data plane plug-in */ +#define _(N,n) \\ + vl_msg_api_set_handlers((VL_API_##N + sm->msg_id_base), \\ + #n, \\ + vl_api_##n##_t_handler, \\ + vl_noop_handler, \\ + vl_api_##n##_t_endian, \\ + vl_api_##n##_t_print, \\ + sizeof(vl_api_##n##_t), 1); + foreach_vpe_api_reply_msg; +#undef _ + + /* API messages we can send */ +#define _(n,h) hash_set_mem (vam->function_by_name, #n, api_##n); + foreach_vpe_api_msg; +#undef _ + + /* Help strings */ +#define _(n,h) hash_set_mem (vam->help_by_name, #n, h); + foreach_vpe_api_msg; +#undef _ +} + +clib_error_t * vat_plugin_register (vat_main_t *vam) +{ + " plugin-name "_test_main_t * sm = &" plugin-name "_test_main; + u8 * name; + + sm->vat_main = vam; + + /* Ask the vpp engine for the first assigned message-id */ + name = format (0, \"" plugin-name "_%08x%c\", api_version, 0); + sm->msg_id_base = vl_client_get_first_plugin_msg_id ((char *) name); + + if (sm->msg_id_base != (u16) ~0) + vat_api_hookup (vam); + + vec_free(name); + + return 0; +} +") diff --git a/vpp/build-root/emacs-lisp/plugin.el b/vpp/build-root/emacs-lisp/plugin.el new file mode 100644 index 00000000..006ae54d --- /dev/null +++ b/vpp/build-root/emacs-lisp/plugin.el @@ -0,0 +1,35 @@ +(defun make-plugin () + "Create a plugin" + (interactive) + (save-excursion + (let (cd-args cmd-args start-dir) + (setq start-dir default-directory) + (makunbound 'plugin-name) + (makunbound 'PLUGIN-NAME) + (setq plugin-name (read-string "Plugin name: ")) + (setq PLUGIN-NAME (upcase plugin-name)) + (setq cmd-args (concat "mkdir -p " plugin-name "-plugin/" plugin-name)) + (shell-command cmd-args) + (setq cd-args (concat start-dir plugin-name "-plugin")) + (setq default-directory cd-args) + (find-file "Makefile.am") + (skel-plugin-makefile) + (find-file "configure.ac") + (skel-plugin-configure) + (setq default-directory (concat cd-args "/" plugin-name)) + (find-file (concat plugin-name ".api")) + (skel-plugin-api) + (find-file (concat plugin-name "_all_api_h.h")) + (skel-plugin-all-apih) + (find-file (concat plugin-name ".h")) + (skel-plugin-h) + (find-file (concat plugin-name ".c")) + (skel-plugin-main) + (find-file (concat plugin-name "_msg_enum.h")) + (skel-plugin-msg-enum) + (find-file "node.c") + (skel-plugin-node) + (find-file (concat plugin-name "_test.c")) + (skel-plugin-test) + (cd start-dir)))) + diff --git a/vpp/build-root/emacs-lisp/tunnel-c-skel.el b/vpp/build-root/emacs-lisp/tunnel-c-skel.el new file mode 100644 index 00000000..aa260e53 --- /dev/null +++ b/vpp/build-root/emacs-lisp/tunnel-c-skel.el @@ -0,0 +1,441 @@ +;;; tunnel-c-skel.el - tunnel encap cli / api + +(require 'skeleton) + +(define-skeleton skel-tunnel-c +"Insert a tunnel cli/api implementation" +nil +'(setq encap_stack (skeleton-read "encap_stack (e.g ip4_udp_lisp): ")) +'(setq ENCAP_STACK (upcase encap_stack)) +'(setq encap-stack (replace-regexp-in-string "_" "-" encap_stack)) +" +#include <vnet/" encap-stack "/" encap_stack" .h> + +" encap_stack "_main_t " encap_stack "_main; + +static u8 * format_decap_next (u8 * s, va_list * args) +{ + u32 next_index = va_arg (*args, u32); + + switch (next_index) + { + case " ENCAP_STACK "_INPUT_NEXT_DROP: + return format (s, \"drop\"); + case " ENCAP_STACK "_INPUT_NEXT_IP4_INPUT: + return format (s, \"ip4\"); + case " ENCAP_STACK "_INPUT_NEXT_IP6_INPUT: + return format (s, \"ip6\"); + case " ENCAP_STACK "_INPUT_NEXT_" ENCAP_STACK "_ENCAP: + return format (s, \"" encap-stack "\"); + default: + return format (s, \"unknown %d\", next_index); + } + return s; +} + +u8 * format_" encap_stack "_tunnel (u8 * s, va_list * args) +{ + " encap_stack "_tunnel_t * t = va_arg (*args, " encap_stack "_tunnel_t *); + " encap_stack "_main_t * ngm = &" encap_stack "_main; + + s = format (s, + \"[%d] %U (src) %U (dst) fibs: encap %d, decap %d\", + t - ngm->tunnels, + format_ip4_address, &t->src, + format_ip4_address, &t->dst, + t->encap_fib_index, + t->decap_fib_index); + + s = format (s, \" decap next %U\\n\", format_decap_next, t->decap_next_index); + /* FIXME: add protocol details */ + return s; +} + +static u8 * format_" encap_stack "_name (u8 * s, va_list * args) +{ + u32 dev_instance = va_arg (*args, u32); + return format (s, \"" encap_stack "_tunnel%d\", dev_instance); +} + +static uword dummy_interface_tx (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + clib_warning (\"you shouldn't be here, leaking buffers...\"); + return frame->n_vectors; +} + +VNET_DEVICE_CLASS (" encap_stack "_device_class,static) = { + .name = "" ENCAP_STACK "", + .format_device_name = format_" encap_stack "_name, + .format_tx_trace = format_" encap_stack "_encap_trace, + .tx_function = dummy_interface_tx, +}; + +static uword dummy_set_rewrite (vnet_main_t * vnm, + u32 sw_if_index, + u32 l3_type, + void * dst_address, + void * rewrite, + uword max_rewrite_bytes) +{ + return 0; +} + +u8 * format_" encap_stack "_header_with_length (u8 * s, va_list * args) +{ + " encap_stack "_header_t * h = va_arg (*args, " encap_stack "_header_t *); + u32 max_header_bytes = va_arg (*args, u32); + u32 header_bytes; + + header_bytes = sizeof (h[0]); + if (max_header_bytes != 0 && header_bytes > max_header_bytes) + return format (s, \"" encap-stack "header truncated\"); + + /* FIXME: pretty-print an " encap_stack " header */ + + return s; +} + +VNET_HW_INTERFACE_CLASS (" encap_stack "_hw_class) = { + .name = \"" ENCAP_STACK "\", + .format_header = format_" encap_stack "_header_with_length, + .set_rewrite = dummy_set_rewrite, +}; + +#define foreach_copy_field \ +_(src.as_u32) \ +_(dst.as_u32) \ +_(encap_fib_index) \ +_(decap_fib_index) \ +_(decap_next_index) \ +_(FIXME_ADD_ALL_COPY_FIELDS ) + +static int " encap_stack "_rewrite (" encap_stack "_tunnel_t * t) +{ + u8 *rw = 0; + ip4_header_t * ip0; + " encap_stack "_header_t * h0; + int len; + + len = sizeof (*h0); + + vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES); + + h0 = (ip4_udp_" encap_stack "_header_t *) rw; + + /* FIXME: build the actual header here... */ + + /* Fixed portion of the (outer) ip4 header */ + ip0 = &h0->ip4; + ip0->ip_version_and_header_length = 0x45; + ip0->ttl = 254; + ip0->protocol = IP_PROTOCOL_UDP; + + /* we'll fix up the ip4 header length and checksum after-the-fact */ + ip0->src_address.as_u32 = t->src.as_u32; + ip0->dst_address.as_u32 = t->dst.as_u32; + ip0->checksum = ip4_header_checksum (ip0); + + /* UDP header, randomize src port on something, maybe? */ + h0->udp.src_port = clib_host_to_net_u16 (4341); + h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_" encap_stack "); + + /* $$$ build a <mumble> tunnel header here */ + + t->rewrite = rw; + return (0); +} + +int vnet_" encap_stack "_add_del_tunnel +(vnet_" encap_stack "_add_del_tunnel_args_t *a, u32 * hw_if_indexp) +{ + " encap_stack "_main_t * ngm = &" encap_stack "_main; + " encap_stack "_tunnel_t *t = 0; + vnet_main_t * vnm = ngm->vnet_main; + vnet_hw_interface_t * hi; + uword * p; + u32 hw_if_index = ~0; + int rv; + " encap_stack "_tunnel_key_t key, *key_copy; + hash_pair_t *hp; + + key.FIXME = clib_host_to_net_XXX(FIXME); + + p = hash_get_mem (ngm->" encap_stack "_tunnel_by_key, &key); + + if (a->is_add) + { + /* adding a tunnel: tunnel must not already exist */ + if (p) + return VNET_API_ERROR_INVALID_VALUE; + + pool_get_aligned (ngm->tunnels, t, CLIB_CACHE_LINE_BYTES); + memset (t, 0, sizeof (*t)); + + /* copy from arg structure */ +#define _(x) t->x = a->x; + foreach_copy_field; +#undef _ + + rv = " encap_stack "_rewrite (t); + + if (rv) + { + pool_put (ngm->tunnels, t); + return rv; + } + + /* $$$$ use a simple hash if you can ... */ + key_copy = clib_mem_alloc (sizeof (*key_copy)); + clib_memcpy (key_copy, &key, sizeof (*key_copy)); + + hash_set_mem (ngm->" encap_stack "_tunnel_by_key, key_copy, + t - ngm->tunnels); + + /* + * interface freelist / recycle shtik + * This simple implementation rapidly reuses freed tunnel interfaces. + * Consider whether to refcount, etc. etc. + */ + if (vec_len (ngm->free_" encap_stack "_tunnel_hw_if_indices) > 0) + { + hw_if_index = ngm->free_" encap_stack "_tunnel_hw_if_indices + [vec_len (ngm->free_" encap_stack "_tunnel_hw_if_indices)-1]; + _vec_len (ngm->free_" encap_stack "_tunnel_hw_if_indices) -= 1; + + hi = vnet_get_hw_interface (vnm, hw_if_index); + hi->dev_instance = t - ngm->tunnels; + hi->hw_instance = hi->dev_instance; + } + else + { + hw_if_index = vnet_register_interface + (vnm, " encap_stack "_device_class.index, t - ngm->tunnels, + " encap_stack "_hw_class.index, t - ngm->tunnels); + hi = vnet_get_hw_interface (vnm, hw_if_index); + hi->output_node_index = " encap_stack "_encap_node.index; + } + + t->hw_if_index = hw_if_index; + + vnet_sw_interface_set_flags (vnm, hi->sw_if_index, + VNET_SW_INTERFACE_FLAG_ADMIN_UP); + } + else + { + /* deleting a tunnel: tunnel must exist */ + if (!p) + return VNET_API_ERROR_NO_SUCH_ENTRY; + + t = pool_elt_at_index (ngm->tunnels, p[0]); + + vnet_sw_interface_set_flags (vnm, t->hw_if_index, 0 /* down */); + vec_add1 (ngm->free_" encap_stack "_tunnel_hw_if_indices, t->hw_if_index); + + hp = hash_get_pair (ngm->" encap_stack "_tunnel_by_key, &key); + key_copy = (void *)(hp->key); + hash_unset_mem (ngm->" encap_stack "_tunnel_by_key, &key); + clib_mem_free (key_copy); + + vec_free (t->rewrite); + pool_put (ngm->tunnels, t); + } + + if (hw_if_indexp) + *hw_if_indexp = hw_if_index; + + return 0; +} + +static u32 fib_index_from_fib_id (u32 fib_id) +{ + ip4_main_t * im = &ip4_main; + uword * p; + + p = hash_get (im->fib_index_by_table_id, fib_id); + if (!p) + return ~0; + + return p[0]; +} + +static uword unformat_decap_next (unformat_input_t * input, va_list * args) +{ + u32 * result = va_arg (*args, u32 *); + u32 tmp; + + if (unformat (input, \"drop\")) + *result = " ENCAP_STACK "_INPUT_NEXT_DROP; + else if (unformat (input, \"ip4\")) + *result = " ENCAP_STACK "_INPUT_NEXT_IP4_INPUT; + else if (unformat (input, \"ip6\")) + *result = " ENCAP_STACK "_INPUT_NEXT_IP6_INPUT; + else if (unformat (input, \"ethernet\")) + *result = " ENCAP_STACK "_INPUT_NEXT_IP6_INPUT; + else if (unformat (input, \"" encap-stack "\")) + *result = " ENCAP_STACK "_INPUT_NEXT_" ENCAP_STACK "_ENCAP; + else if (unformat (input, \"%d\", &tmp)) + *result = tmp; + else + return 0; + return 1; +} + +static clib_error_t * +" encap_stack "_add_del_tunnel_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, * line_input = &_line_input; + ip4_address_t src, dst; + u8 is_add = 1; + u8 src_set = 0; + u8 dst_set = 0; + u32 encap_fib_index = 0; + u32 decap_fib_index = 0; + u8 next_protocol = " ENCAP_STACK "_NEXT_PROTOCOL_IP4; + u32 decap_next_index = " ENCAP_STACK "_INPUT_NEXT_IP4_INPUT; + u8 flags = " ENCAP_STACK "_FLAGS_P; + u8 ver_res = 0; + u8 res = 0; + u32 iid = 0; + u8 iid_set = 0; + u32 tmp; + int rv; + vnet_" encap_stack "_add_del_tunnel_args_t _a, * a = &_a; + + /* Get a line of input. */ + if (! unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { + if (unformat (line_input, \"del\")) + is_add = 0; + else if (unformat (line_input, \"src %U\", + unformat_ip4_address, &src)) + src_set = 1; + else if (unformat (line_input, \"dst %U\", + unformat_ip4_address, &dst)) + dst_set = 1; + else if (unformat (line_input, \"encap-vrf-id %d\", &tmp)) + { + encap_fib_index = fib_index_from_fib_id (tmp); + if (encap_fib_index == ~0) + return clib_error_return (0, \"nonexistent encap fib id %d\", tmp); + } + else if (unformat (line_input, \"decap-vrf-id %d\", &tmp)) + { + decap_fib_index = fib_index_from_fib_id (tmp); + if (decap_fib_index == ~0) + return clib_error_return (0, \"nonexistent decap fib id %d\", tmp); + } + else if (unformat (line_input, \"decap-next %U\", unformat_decap_next, + &decap_next_index)) + ; + else if (unformat(line_input, \"next-ip4\")) + next_protocol = 1; + else if (unformat(line_input, \"next-ip6\")) + next_protocol = 2; + else if (unformat(line_input, \"next-ethernet\")) + next_protocol = 3; + else if (unformat(line_input, \"next-nsh\")) + next_protocol = 4; + /* + * $$$ allow the user to specify anything they want + * in the " ENCAP_STACK " header + */ + else + return clib_error_return (0, \"parse error: '%U'\", + format_unformat_error, line_input); + } + + unformat_free (line_input); + + if (src_set == 0) + return clib_error_return (0, \"tunnel src address not specified\"); + + if (dst_set == 0) + return clib_error_return (0, \"tunnel dst address not specified\"); + + memset (a, 0, sizeof (*a)); + + a->is_add = is_add; + +#define _(x) a->x = x; + foreach_copy_field; +#undef _ + + rv = vnet_" encap_stack "_add_del_tunnel (a, 0 /* hw_if_indexp */); + + switch(rv) + { + case 0: + break; + case VNET_API_ERROR_INVALID_VALUE: + return clib_error_return (0, \"tunnel already exists...\"); + + case VNET_API_ERROR_NO_SUCH_ENTRY: + return clib_error_return (0, \"tunnel does not exist...\"); + + default: + return clib_error_return + (0, \"vnet_" encap_stack "_add_del_tunnel returned %d\", rv); + } + + return 0; +} + +VLIB_CLI_COMMAND (create_" encap_stack "_tunnel_command, static) = { + .path = \"lisp gpe tunnel\", + .short_help = + \"<mumble> tunnel src <ip4-addr> dst <ip4-addr>\\n\" + \" [encap-fib-id <nn>] [decap-fib-id <nn>]\\n\" + \" [decap-next [ip4|ip6|ethernet|nsh-encap|<nn>]][del]\\n\", + .function = " encap_stack "_add_del_tunnel_command_fn, +}; + +static clib_error_t * +show_" encap_stack "_tunnel_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + " encap_stack "_main_t * ngm = &" encap_stack "_main; + " encap_stack "_tunnel_t * t; + + if (pool_elts (ngm->tunnels) == 0) + vlib_cli_output (vm, \"No lisp-gpe tunnels configured...\"); + + pool_foreach (t, ngm->tunnels, + ({ + vlib_cli_output (vm, \"%U\", format_" encap_stack "_tunnel); + })); + + return 0; +} + +VLIB_CLI_COMMAND (show_" encap_stack "_tunnel_command, static) = { + .path = \"show lisp gpe tunnel\", + .function = show_" encap_stack "_tunnel_command_fn, +}; + +clib_error_t *" encap_stack "_init (vlib_main_t *vm) +{ + " encap_stack "_main_t *ngm = &" encap_stack "_main; + + ngm->vnet_main = vnet_get_main(); + ngm->vlib_main = vm; + + ngm->" encap_stack "_tunnel_by_key + = hash_create_mem (0, sizeof(" encap_stack "_tunnel_key_t), sizeof (uword)); + + /* YMMV, register with the local netstack */ + udp_register_dst_port (vm, UDP_DST_PORT_" encap_stack ", + " encap_stack "_input_node.index, 1 /* is_ip4 */); + return 0; +} + +VLIB_INIT_FUNCTION(" encap_stack "_init); + +") + diff --git a/vpp/build-root/emacs-lisp/tunnel-decap-skel.el b/vpp/build-root/emacs-lisp/tunnel-decap-skel.el new file mode 100644 index 00000000..380273b6 --- /dev/null +++ b/vpp/build-root/emacs-lisp/tunnel-decap-skel.el @@ -0,0 +1,299 @@ +;;; tunnel-decap-skel.el - tunnel decapsulation skeleton + +(require 'skeleton) + +(define-skeleton skel-tunnel-decap +"Insert a tunnel decap implementation" +nil +'(setq encap_stack (skeleton-read "encap_stack (e.g ip4_udp_lisp): ")) +'(setq ENCAP_STACK (upcase encap_stack)) +'(setq encap-stack (replace-regexp-in-string "_" "-" encap_stack)) +'(setq ENCAP-STACK (upcase encap-stack)) +" +#include <vlib/vlib.h> +#include <vnet/pg/pg.h> +#include <vnet/" encap-stack "/" encap_stack ".h> + +typedef struct { + u32 next_index; + u32 tunnel_index; + u32 error; + " encap_stack "_header_t h; +} " encap_stack "_rx_trace_t; + +static u8 * format_" encap_stack "_rx_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + " encap_stack "_rx_trace_t * t = va_arg (*args, " encap_stack "_rx_trace_t *); + + if (t->tunnel_index != ~0) + { + s = format (s, \"" ENCAP-STACK ": tunnel %d next %d error %d\", + t->tunnel_index, t->next_index, t->error); + } + else + { + s = format (s, \"" ENCAP-STACK ": no tunnel next %d error %d\\n\", + t->next_index, t->error); + } + s = format (s, \"\\n %U\", format_" encap_stack "_header_with_length, &t->h, + (u32) sizeof (t->h) /* max size */); + return s; +} + +static uword +" encap_stack "_input (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + u32 n_left_from, next_index, * from, * to_next; + " encap_stack "_main_t * ngm = &" encap_stack "_main; + u32 last_tunnel_index = ~0; + " encap_stack "_tunnel_key_t last_key; + u32 pkts_decapsulated = 0; + + memset (&last_key, 0xff, sizeof (last_key)); + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, + to_next, n_left_to_next); + +#if 0 /* $$$ dual loop when the single loop works */ + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 bi0, bi1; + vlib_buffer_t * b0, * b1; + nsh_unicast_header_t * h0, * h1; + u32 label0, label1; + u32 next0, next1; + uword * p0, * p1; + + /* Prefetch next iteration. */ + { + vlib_buffer_t * p2, * p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header (p2, LOAD); + vlib_prefetch_buffer_header (p3, LOAD); + + CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); + } + + bi0 = from[0]; + bi1 = from[1]; + to_next[0] = bi0; + to_next[1] = bi1; + from += 2; + to_next += 2; + n_left_to_next -= 2; + n_left_from -= 2; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + + h0 = vlib_buffer_get_current (b0); + h1 = vlib_buffer_get_current (b1); + + next0 = next1 = " ENCAP_STACK "_INPUT_NEXT_IP4_INPUT; + + label0 = clib_net_to_host_u32 (h0->label_exp_s_ttl); + label1 = clib_net_to_host_u32 (h1->label_exp_s_ttl); + + /* + * Translate label contents into a fib index. + * This is a decent sanity check, and guarantees + * a sane FIB for the downstream lookup + */ + label0 = vnet_nsh_uc_get_label (label0); + label1 = vnet_nsh_uc_get_label (label1); + + /* If 2xlabels match, and match the 1-wide cache, use it */ + if (label0 == label1 && rt->last_label == label0) + { + vnet_buffer(b0)->sw_if_index[VLIB_TX] = rt->last_fib_index; + vnet_buffer(b1)->sw_if_index[VLIB_TX] = rt->last_fib_index; + } + else + { + p0 = hash_get (rt->mm->fib_index_by_nsh_label, label0); + if (PREDICT_FALSE (p0 == 0)) + { + next0 = " ENCAP_STACK "_INPUT_NEXT_DROP; + b0->error = node->errors[NSH_ERROR_BAD_LABEL]; + } + else + vnet_buffer(b0)->sw_if_index[VLIB_TX] = p0[0]; + + p1 = hash_get (rt->mm->fib_index_by_nsh_label, label1); + if (PREDICT_FALSE (p1 == 0)) + { + next1 = " ENCAP_STACK "_INPUT_NEXT_DROP; + b1->error = node->errors[NSH_ERROR_BAD_LABEL]; + } + else + { + vnet_buffer(b1)->sw_if_index[VLIB_TX] = p1[0]; + rt->last_fib_index = p1[0]; + rt->last_label = label1; + } + } + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + nsh_rx_trace_t *tr = vlib_add_trace (vm, node, + b0, sizeof (*tr)); + tr->label_exp_s_ttl = label0; + } + if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) + { + nsh_rx_trace_t *tr = vlib_add_trace (vm, node, + b1, sizeof (*tr)); + tr->label_exp_s_ttl = label1; + } + + vlib_buffer_advance (b0, sizeof (*h0)); + vlib_buffer_advance (b1, sizeof (*h1)); + + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, + to_next, n_left_to_next, + bi0, bi1, next0, next1); + } +#endif + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t * b0; + u32 next0; + " encap_stack "_header_t * iuX0; + uword * p0; + u32 tunnel_index0; + " encap_stack "_tunnel_t * t0; + " encap_stack "_tunnel_key_t key0; + u32 error0; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + + /* + * udp leaves current_data pointing at the tunnel header + * $$$$ FIXME + */ + vlib_buffer_advance + (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t))); + + iuX0 = vlib_buffer_get_current (b0); + + /* pop (ip, udp, lisp-gpe) */ + vlib_buffer_advance (b0, sizeof (*iuX0)); + + tunnel_index0 = ~0; + error0 = 0; + next0 = " ENCAP_STACK "_INPUT_NEXT_DROP; + + key0.src = iuX0->ip4.src_address.as_u32; + key0.iid = iuX0->lisp.iid; + + /* $$$ validate key comparison */ + if (PREDICT_FALSE ((key0.as_u64[0] != last_key.as_u64[0]))) + { + p0 = hash_get_mem (ngm->" encap_stack "_tunnel_by_key, &key0); + + if (p0 == 0) + { + error0 = " ENCAP_STACK "_ERROR_NO_SUCH_TUNNEL; + goto trace0; + } + + last_key.as_u64[0] = key0.as_u64[0]; + tunnel_index0 = last_tunnel_index = p0[0]; + } + else + tunnel_index0 = last_tunnel_index; + + t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0); + + next0 = t0->decap_next_index; + + /* Required to make the l2 tag push / pop code work on l2 subifs */ + vnet_update_l2_len (b0); + + /* + * ip[46] lookup in the configured FIB + * " encap-stack ", here's the encap tunnel sw_if_index + */ + vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index; + pkts_decapsulated ++; + + trace0: + b0->error = error0 ? node->errors[error0] : 0; + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + " encap_stack "_rx_trace_t *tr + = vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->next_index = next0; + tr->error = error0; + tr->tunnel_index = tunnel_index0; + tr->h = iuX0->lisp; + } + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + vlib_node_increment_counter (vm, " encap_stack "_input_node.index, + " ENCAP_STACK "_ERROR_DECAPSULATED, + pkts_decapsulated); + return from_frame->n_vectors; +} + +static char * " encap_stack "_error_strings[] = { +#define " encap_stack "_error(n,s) s, +#include <vnet/" encap-stack "/" encap_stack "_error.def> +#undef " encap_stack "_error +#undef _ +}; + +VLIB_REGISTER_NODE (" encap_stack "_input_node) = { + .function = \"" encap_stack "_input\", + .name = \"" encap-stack "-input\", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32), + + .n_errors = " ENCAP_STACK "_N_ERROR, + .error_strings = " encap_stack "_error_strings, + + .n_next_nodes = " ENCAP_STACK "_INPUT_N_NEXT, + .next_nodes = { +#define _(s,n) [" ENCAP_STACK "_INPUT_NEXT_##s] = n, + foreach_" encap_stack "_input_next +#undef _ + }, + + .format_buffer = format_" encap_stack "_header_with_length, + .format_trace = format_" encap_stack "_rx_trace, + // $$$$ .unformat_buffer = unformat_" encap_stack "_header, +}; + +") diff --git a/vpp/build-root/emacs-lisp/tunnel-encap-skel.el b/vpp/build-root/emacs-lisp/tunnel-encap-skel.el new file mode 100644 index 00000000..9c98a597 --- /dev/null +++ b/vpp/build-root/emacs-lisp/tunnel-encap-skel.el @@ -0,0 +1,245 @@ +;;; tunnel-encap-skel.el - tunnel interface output skeleton + +(require 'skeleton) + +(define-skeleton skel-tunnel-encap +"Insert a tunnel encap implementation" +nil +'(setq encap_stack (skeleton-read "encap_stack (e.g ip4_udp_lisp): ")) +'(setq ENCAP_STACK (upcase encap_stack)) +'(setq encap-stack (replace-regexp-in-string "_" "-" encap_stack)) +'(setq ENCAP-STACK (upcase encap-stack)) +" +#include <vppinfra/error.h> +#include <vppinfra/hash.h> +#include <vnet/vnet.h> +#include <vnet/ip/ip.h> +#include <vnet/ethernet/ethernet.h> +#include <vnet/" encap-stack "/" encap_stack ".h> + +/* Statistics (not really errors) */ +#define foreach_" encap_stack "_encap_error \\ +_(ENCAPSULATED, \"good packets encapsulated\") + +static char * " encap_stack "_encap_error_strings[] = { +#define _(sym,string) string, + foreach_" encap_stack "_encap_error +#undef _ +}; + +typedef enum { +#define _(sym,str) " ENCAP_STACK "_ENCAP_ERROR_##sym, + foreach_" encap_stack "_encap_error +#undef _ + " ENCAP_STACK "_ENCAP_N_ERROR, +} " encap_stack "_encap_error_t; + +typedef enum { + " ENCAP_STACK "_ENCAP_NEXT_IP4_LOOKUP, + " ENCAP_STACK "_ENCAP_NEXT_DROP, + " ENCAP_STACK "_ENCAP_N_NEXT, +} " encap_stack "_encap_next_t; + +typedef struct { + u32 tunnel_index; +} " encap_stack "_encap_trace_t; + +u8 * format_" encap_stack "_encap_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + " encap_stack "_encap_trace_t * t + = va_arg (*args, " encap_stack "_encap_trace_t *); + + s = format (s, \"" ENCAP-STACK ": tunnel %d\", t->tunnel_index); + return s; +} + +/* $$$$ FIXME adjust to match the rewrite string */ +#define foreach_fixed_header_offset \\ +_(0) _(1) _(2) _(3) _(FIXME) + +static uword +" encap_stack "_encap (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + u32 n_left_from, next_index, * from, * to_next; + " encap_stack "_main_t * ngm = &" encap_stack "_main; + vnet_main_t * vnm = ngm->vnet_main; + u32 pkts_encapsulated = 0; + u16 old_l0 = 0, old_l1 = 0; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, + to_next, n_left_to_next); + +#if 0 /* $$$ dual loop when the single loop works */ + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 bi0, bi1; + vlib_buffer_t * b0, * b1; + nsh_unicast_header_t * h0, * h1; + u32 label0, label1; + u32 next0, next1; + uword * p0, * p1; + + /* Prefetch next iteration. */ + { + vlib_buffer_t * p2, * p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header (p2, LOAD); + vlib_prefetch_buffer_header (p3, LOAD); + + CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); + } + + bi0 = from[0]; + bi1 = from[1]; + to_next[0] = bi0; + to_next[1] = bi1; + from += 2; + to_next += 2; + n_left_to_next -= 2; + n_left_from -= 2; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + + h0 = vlib_buffer_get_current (b0); + h1 = vlib_buffer_get_current (b1); + + next0 = next1 = " ENCAP_STACK "_ENCAP_NEXT_IP4_LOOKUP; + + vlib_buffer_advance (b0, sizeof (*h0)); + vlib_buffer_advance (b1, sizeof (*h1)); + + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, + to_next, n_left_to_next, + bi0, bi1, next0, next1); + } +#endif + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t * b0; + u32 next0 = " ENCAP_STACK "_ENCAP_NEXT_IP4_LOOKUP; + vnet_hw_interface_t * hi0; + ip4_header_t * ip0; + udp_header_t * udp0; + u64 * copy_src0, * copy_dst0; + u32 * copy_src_last0, * copy_dst_last0; + " encap_stack "_tunnel_t * t0; + u16 new_l0; + ip_csum_t sum0; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + + /* 1-wide cache? */ + hi0 = vnet_get_sup_hw_interface + (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]); + + t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance); + + ASSERT(vec_len(t0->rewrite) >= 24); + + /* Apply the rewrite string. $$$$ vnet_rewrite? */ + vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite)); + + ip0 = vlib_buffer_get_current(b0); + /* Copy the fixed header */ + copy_dst0 = (u64 *) ip0; + copy_src0 = (u64 *) t0->rewrite; + + ASSERT (sizeof (ip4_udp_" encap_stack "_header_t) == FIXME); + + /* Copy first N octets 8-bytes at a time */ +#define _(offs) copy_dst0[offs] = copy_src0[offs]; + foreach_fixed_header_offset; +#undef _ +#if 0 /* needed if encap not a multiple of 8 bytes */ + /* Last 4 octets. Hopefully gcc will be our friend */ + copy_dst_last0 = (u32 *)(©_dst0[FIXME]); + copy_src_last0 = (u32 *)(©_src0[FIXME]); + copy_dst_last0[0] = copy_src_last0[0]; + +#endif + /* fix the <bleep>ing outer-IP checksum */ + sum0 = ip0->checksum; + /* old_l0 always 0, see the rewrite setup */ + new_l0 = + clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)); + + sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t, + length /* changed member */); + ip0->checksum = ip_csum_fold (sum0); + ip0->length = new_l0; + + /* Fix UDP length */ + udp0 = (udp_header_t *)(ip0+1); + new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) + - sizeof (*ip0)); + + udp0->length = new_l0; + + /* Reset to look up tunnel partner in the configured FIB */ + vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index; + pkts_encapsulated ++; + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + " encap_stack "_encap_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->tunnel_index = t0 - ngm->tunnels; + } + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + vlib_node_increment_counter (vm, node->node_index, + " ENCAP_STACK "_ENCAP_ERROR_ENCAPSULATED, + pkts_encapsulated); + return from_frame->n_vectors; +} + +VLIB_REGISTER_NODE (" encap_stack "_encap_node) = { + .function = " encap_stack "_encap, + .name = \"" encap-stack "-encap\", + .vector_size = sizeof (u32), + .format_trace = format_" encap_stack "_encap_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(" encap_stack "_encap_error_strings), + .error_strings = " encap_stack "_encap_error_strings, + + .n_next_nodes = " ENCAP_STACK "_ENCAP_N_NEXT, + + .next_nodes = { + [" ENCAP_STACK "_ENCAP_NEXT_IP4_LOOKUP] = \"ip4-lookup\", + [" ENCAP_STACK "_ENCAP_NEXT_DROP] = \"error-drop\", + }, +}; +") diff --git a/vpp/build-root/emacs-lisp/tunnel-h-skel.el b/vpp/build-root/emacs-lisp/tunnel-h-skel.el new file mode 100644 index 00000000..067cf134 --- /dev/null +++ b/vpp/build-root/emacs-lisp/tunnel-h-skel.el @@ -0,0 +1,128 @@ +;;; tunnel-h-skel.el - tunnel encap header file skeleton + +(require 'skeleton) + +(define-skeleton skel-tunnel-h +"Insert a tunnel encap header file" +nil +'(setq encap_stack (skeleton-read "encap_stack (e.g ip4_udp_lisp): ")) +'(setq ENCAP_STACK (upcase encap_stack)) +'(setq encap-stack (replace-regexp-in-string "_" "-" encap_stack)) +" +#ifndef included_vnet_" encap_stack "_h +#define included_vnet_" encap_stack "_h + +#include <vppinfra/error.h> +#include <vppinfra/hash.h> +#include <vnet/vnet.h> +#include <vnet/ip/ip.h> +#include <vnet/l2/l2_input.h> +#include <vnet/ethernet/ethernet.h> +#include <vnet/" encap-stack "/" encap_stack "_packet.h> +#include <vnet/ip/ip4_packet.h> +#include <vnet/ip/udp.h> + +/* Encap stack built in encap.c */ +typedef CLIB_PACKED (struct { + ip4_header_t ip4; /* 20 bytes */ + udp_header_t udp; /* 8 bytes */ + " encap_stack "_header_t lisp; /* 8 bytes */ +}) " encap_stack "_header_t; + +typedef CLIB_PACKED(struct { + /* + * Key fields: + * all fields in NET byte order + */ + union { + struct { + u32 FIXME_NET_BYTE_ORDER; + }; + u64 as_u64[1]; + }; +}) " encap_stack "_tunnel_key_t; + +typedef struct { + /* Rewrite string. $$$$ maybe: embed vnet_rewrite header */ + u8 * rewrite; + + /* decap next index */ + u32 decap_next_index; + + /* tunnel src and dst addresses */ + ip4_address_t src; + ip4_address_t dst; + + /* FIB indices */ + u32 encap_fib_index; /* tunnel partner lookup here */ + u32 decap_fib_index; /* inner IP lookup here */ + + /* vnet intfc hw/sw_if_index */ + u32 hw_if_index; + + /* encap header fields in HOST byte order */ + u32 FIXME; +} " encap_stack "_tunnel_t; + +#define foreach_" encap_stack "_input_next \\ +_(DROP, \"error-drop\") \\ +_(IP4_INPUT, \"ip4-input\") \\ +_(IP6_INPUT, \"ip6-input\") \\ +_(ETHERNET_INPUT, \"ethernet-input\") \\ +_(" ENCAP_STACK "_ENCAP, \"" encap-stack "-encap\") + +typedef enum { +#define _(s,n) " ENCAP_STACK "_INPUT_NEXT_##s, + foreach_" encap_stack "_input_next +#undef _ + " ENCAP_STACK "_INPUT_N_NEXT, +} " encap_stack "_input_next_t; + +typedef enum { +#define " encap_stack "_error(n,s) " ENCAP_STACK "_ERROR_##n, +#include <vnet/" encap-stack "/" encap_stack "_error.def> +#undef " encap_stack "_error + " ENCAP_STACK "_N_ERROR, +} " encap_stack "_input_error_t; + +typedef struct { + /* vector of encap tunnel instances */ + " encap_stack "_tunnel_t *tunnels; + + /* lookup tunnel by key */ + uword * " encap_stack "_tunnel_by_key; + + /* Free vlib hw_if_indices */ + u32 * free_" encap_stack "_tunnel_hw_if_indices; + + /* convenience */ + vlib_main_t * vlib_main; + vnet_main_t * vnet_main; +} " encap_stack "_main_t; + +" encap_stack "_main_t " encap_stack "_main; + +vlib_node_registration_t " encap_stack "_input_node; +vlib_node_registration_t " encap_stack "_encap_node; + +u8 * format_" encap_stack "_encap_trace (u8 * s, va_list * args); +u8 * format_" encap_stack "_header_with_length (u8 * s, va_list * args); + +typedef struct { + u8 is_add; + ip4_address_t src, dst; + u32 encap_fib_index; + u32 decap_fib_index; + u32 decap_next_index; + /* encap fields in HOST byte order */ + u8 FIXME_HOST_BYTE_ORDER; +} vnet_" encap_stack "_add_del_tunnel_args_t; + +int vnet_" encap_stack "_add_del_tunnel +(vnet_" encap_stack "_add_del_tunnel_args_t *a, u32 * hw_if_indexp); + +u8 * format_" encap_stack "_header_with_length (u8 * s, va_list * args); + +#endif /* included_vnet_" encap_stack "_h */ + +") diff --git a/vpp/build-root/packages/vppapigen.mk b/vpp/build-root/packages/vppapigen.mk new file mode 100644 index 00000000..0d284631 --- /dev/null +++ b/vpp/build-root/packages/vppapigen.mk @@ -0,0 +1,5 @@ +vppapigen_configure_depend = vppinfra-install + +vppapigen_CPPFLAGS = $(call installed_includes_fn, vppinfra) + +vppapigen_LDFLAGS = $(call installed_libs_fn, vppinfra) diff --git a/vpp/build-root/packages/vppinfra.mk b/vpp/build-root/packages/vppinfra.mk new file mode 100644 index 00000000..db48ed58 --- /dev/null +++ b/vpp/build-root/packages/vppinfra.mk @@ -0,0 +1,4 @@ +# nothing + + + diff --git a/vpp/build-root/platforms.mk b/vpp/build-root/platforms.mk new file mode 100644 index 00000000..cb36d1bf --- /dev/null +++ b/vpp/build-root/platforms.mk @@ -0,0 +1,50 @@ +# Copyright (c) 2015 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright (c) 2007-2008 Eliot Dresselhaus +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + +# Platform selects e.g. Linux config file +PLATFORM = native + +native_arch = native + +# Default for which packages go into read-only image +# used to have pam +default_root_packages = bash coreutils sysvinit util-linux mingetty procps + +# Linux based platforms (PLATFORM=i686 PLATFORM=ppc etc.) +i686_arch = i686 +x86_64_arch = x86_64 +ppc_arch = ppc + diff --git a/vpp/build-root/rpm/vpp.service b/vpp/build-root/rpm/vpp.service new file mode 100644 index 00000000..40bf9d48 --- /dev/null +++ b/vpp/build-root/rpm/vpp.service @@ -0,0 +1,14 @@ +[Unit] +Description=Vector Packet Processing Process +After=syslog.target network.target auditd.service + +[Service] +ExecStartPre=-/bin/rm -f /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api +ExecStartPre=-/sbin/modprobe uio_pci_generic +ExecStart=/usr/bin/vpp -c /etc/vpp/startup.conf +Type=simple +Restart=on-failure +RestartSec=5s + +[Install] +WantedBy=multi-user.target diff --git a/vpp/build-root/rpm/vpp.spec b/vpp/build-root/rpm/vpp.spec new file mode 100644 index 00000000..194d205f --- /dev/null +++ b/vpp/build-root/rpm/vpp.spec @@ -0,0 +1,264 @@ +%define _mu_build_dir %{_mu_build_root_dir} +%define _vpp_install_dir %{_install_dir} +%define _vpp_build_dir build-tool-native +%define _unitdir /lib/systemd/system +%define _topdir %(pwd) +%define _builddir %{_topdir} +%define _version %(../scripts/version rpm-version) +%define _release %(../scripts/version rpm-release) + +# Failsafe backport of Python2-macros for RHEL <= 6 +%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")} +%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")} +%{!?python_version: %global python_version %(%{__python} -c "import sys; sys.stdout.write(sys.version[:3])")} +%{!?__python2: %global __python2 %{__python}} +%{!?python2_sitelib: %global python2_sitelib %{python_sitelib}} +%{!?python2_sitearch: %global python2_sitearch %{python_sitearch}} +%{!?python2_version: %global python2_version %{python_version}} + +%{!?python2_minor_version: %define python2_minor_version %(%{__python} -c "import sys ; print sys.version[2:3]")} + +%{?systemd_requires} + +Name: vpp +Summary: Vector Packet Processing +License: MIT +Version: %{_version} +Release: %{_release} +Requires: vpp-lib = %{_version}-%{_release}, net-tools, pciutils, python +BuildRequires: systemd, chrpath + +Source: %{name}-%{_version}-%{_release}.tar.gz + +%description +This package provides VPP executables: vpp, vpp_api_test, vpp_json_test +vpp - the vector packet engine +vpp_api_test - vector packet engine API test tool +vpp_json_test - vector packet engine JSON test tool + +%package lib +Summary: VPP libraries +Group: System Environment/Libraries + +%description lib +This package contains the VPP shared libraries, including: +vppinfra - foundation library supporting vectors, hashes, bitmaps, pools, and string formatting. +dpdk - DPDK library +svm - vm library +vlib - vector processing library +vlib-api - binary API library +vnet - network stack library + +%package devel +Summary: VPP header files, static libraries +Group: Development/Libraries +Requires: vpp-lib + +%description devel +This package contains the header files for VPP. +Install this package if you want to write a +program for compilation and linking with vpp lib. +vlib +vlibmemory +vnet - devices, classify, dhcp, ethernet flow, gre, ip, etc. +vpp-api +vppinfra + +%package plugins +Summary: Vector Packet Processing--runtime plugins +Group: System Environment/Libraries +Requires: vpp = %{_version}-%{_release} +%description plugins +This package contains VPP plugins + +%package python-api +Summary: VPP api python bindings +Group: Development/Libraries +Requires: vpp = %{_version}-%{_release}, vpp-lib = %{_version}-%{_release}, python-setuptools + +%description python-api +This package contains the python bindings for the vpp api + +%prep +%setup -q -n %{name}-%{_version} + +%build +make bootstrap +make build-release + +%pre +# Add the vpp group +groupadd -f -r vpp + +%install +# +# binaries +# +mkdir -p -m755 %{buildroot}%{_bindir} +mkdir -p -m755 %{buildroot}%{_unitdir} +install -p -m 755 %{_mu_build_dir}/%{_vpp_install_dir}/*/bin/* %{buildroot}%{_bindir} +install -p -m 755 %{_mu_build_dir}/%{_vpp_build_dir}/vppapigen/vppapigen %{buildroot}%{_bindir} + +# core api +mkdir -p -m755 %{buildroot}/usr/share/vpp/api +install -p -m 644 %{_mu_build_dir}/%{_vpp_install_dir}/vpp/vpp-api/vpe.api.json %{buildroot}/usr/share/vpp/api +install -p -m 644 %{_mu_build_dir}/%{_vpp_install_dir}/vlib-api/vlibmemory/memclnt.api.json %{buildroot}/usr/share/vpp/api + +# +# configs +# +mkdir -p -m755 %{buildroot}/etc/vpp +mkdir -p -m755 %{buildroot}/etc/sysctl.d +install -p -m 644 %{_mu_build_dir}/rpm/vpp.service %{buildroot}%{_unitdir} +install -p -m 644 %{_mu_build_dir}/../vpp/conf/startup.uiopcigeneric.conf %{buildroot}/etc/vpp/startup.conf +install -p -m 644 %{_mu_build_dir}/../vpp/conf/80-vpp.conf %{buildroot}/etc/sysctl.d +# +# libraries +# +mkdir -p -m755 %{buildroot}%{_libdir} +for file in $(find %{_mu_build_dir}/%{_vpp_install_dir}/*/lib* -type f -name '*.so.*.*.*' -print ) +do + install -p -m 755 $file %{buildroot}%{_libdir} +done +for file in $(cd %{buildroot}%{_libdir} && find . -type f -print | sed -e 's/^\.\///') +do + # make lib symlinks + ( cd %{buildroot}%{_libdir} && + ln -fs $file $(echo $file | sed -e 's/\(\.so\.[0-9]\+\).*/\1/') ) + ( cd %{buildroot}%{_libdir} && + ln -fs $file $(echo $file | sed -e 's/\(\.so\)\.[0-9]\+.*/\1/') ) +done +for file in $(find %{_mu_build_dir}/%{_vpp_install_dir}/vnet -type f -name '*.api.json' -print ) +do + install -p -m 644 $file %{buildroot}/usr/share/vpp/api +done + +# Python bindings +mkdir -p -m755 %{buildroot}%{python2_sitelib} +install -p -m 666 %{_mu_build_dir}/%{_vpp_install_dir}/*/lib/python2.7/site-packages/vpp_papi-*.egg %{buildroot}%{python2_sitelib} + +# +# devel +# +for dir in $(find %{_mu_build_dir}/%{_vpp_install_dir}/*/include/ -maxdepth 0 -type d -print | grep -v dpdk) +do + for subdir in $(cd ${dir} && find . -type d -print) + do + mkdir -p -m755 %{buildroot}/usr/include/${subdir} + done + for file in $(cd ${dir} && find . -type f -print) + do + install -p -m 644 $dir/$file %{buildroot}%{_includedir}/$file + done +done + +mkdir -p -m755 %{buildroot}%{python2_sitelib}/jvppgen +install -p -m755 %{_mu_build_dir}/../vpp-api/java/jvpp/gen/jvpp_gen.py %{buildroot}/usr/bin +for i in $(ls %{_mu_build_dir}/../vpp-api/java/jvpp/gen/jvppgen/*.py); do + install -p -m666 ${i} %{buildroot}%{python2_sitelib}/jvppgen +done; + +# sample plugin +mkdir -p -m755 %{buildroot}/usr/share/doc/vpp/examples/sample-plugin/sample +for file in $(cd %{_mu_build_dir}/%{_vpp_install_dir}/../../plugins/sample-plugin && git ls-files .) +do + install -p -m 644 %{_mu_build_dir}/%{_vpp_install_dir}/../../plugins/sample-plugin/$file \ + %{buildroot}/usr/share/doc/vpp/examples/sample-plugin/$file +done + + +# +# vpp-plugins +# +mkdir -p -m755 %{buildroot}/usr/lib/vpp_plugins +mkdir -p -m755 %{buildroot}/usr/lib/vpp_api_test_plugins +for file in $(cd %{_mu_build_dir}/%{_vpp_install_dir}/plugins/lib64/vpp_plugins && find -type f -print) +do + install -p -m 644 %{_mu_build_dir}/%{_vpp_install_dir}/plugins/lib64/vpp_plugins/$file \ + %{buildroot}/usr/lib/vpp_plugins/$file +done + +for file in $(cd %{_mu_build_dir}/%{_vpp_install_dir}/plugins/lib64/vpp_api_test_plugins && find -type f -print) +do + install -p -m 644 %{_mu_build_dir}/%{_vpp_install_dir}/plugins/lib64/vpp_api_test_plugins/$file \ + %{buildroot}/usr/lib/vpp_api_test_plugins/$file +done + +for file in $(find %{_mu_build_dir}/%{_vpp_install_dir}/plugins -type f -name '*.api.json' -print ) +do + install -p -m 644 $file %{buildroot}/usr/share/vpp/api +done + +# +# remove RPATH from ELF binaries +# +%{_mu_build_dir}/scripts/remove-rpath %{buildroot} + +%post +sysctl --system +%systemd_post vpp.service + +%post python-api +easy_install -z %{python2_sitelib}/vpp_papi-*.egg + +%preun +%systemd_preun vpp.service + +%preun python-api +easy_install -mxNq vpp_papi + +%postun +%systemd_postun + +# Unbind user-mode PCI drivers +removed= +pci_dirs=`find /sys/bus/pci/drivers -type d -name igb_uio -o -name uio_pci_generic -o -name vfio-pci` +for d in $pci_dirs; do + for f in ${d}/*; do + [ -e "${f}/config" ] || continue + echo 1 > ${f}/remove + basename `dirname ${f}` | xargs echo -n "Removing driver"; echo " for PCI ID" `basename ${f}` + removed=y + done +done +if [ -n "${removed}" ]; then + echo "There are changes in PCI drivers, rescaning" + echo 1 > /sys/bus/pci/rescan +else + echo "There weren't PCI devices binded" +fi + +%files +%defattr(-,bin,bin) +%{_unitdir}/vpp.service +/usr/bin/vpp* +/usr/bin/svm* +/usr/bin/elftool +%config /etc/sysctl.d/80-vpp.conf +%config /etc/vpp/startup.conf +/usr/share/vpp/api/* + +%files lib +%defattr(-,bin,bin) +%exclude %{_libdir}/vpp_plugins +%exclude %{_libdir}/vpp_api_test_plugins +%{_libdir}/* +/usr/share/vpp/api/* + +%files python-api +%defattr(644,root,root) +%{python2_sitelib}/vpp_papi-*.egg + +%files devel +%defattr(-,bin,bin) +/usr/bin/vppapigen +/usr/bin/jvpp_gen.py +%{_includedir}/* +%{python2_sitelib}/jvppgen/* +/usr/share/doc/vpp/examples/sample-plugin + +%files plugins +%defattr(-,bin,bin) +/usr/lib/vpp_plugins/* +/usr/lib/vpp_api_test_plugins/* +/usr/share/vpp/api/* diff --git a/vpp/build-root/scripts/checkstyle.sh b/vpp/build-root/scripts/checkstyle.sh new file mode 100755 index 00000000..60129676 --- /dev/null +++ b/vpp/build-root/scripts/checkstyle.sh @@ -0,0 +1,92 @@ +#!/bin/bash + +VPP_DIR=`dirname $0`/../../ +EXIT_CODE=0 +FIX="0" +FULL="0" +CHECKSTYLED_FILES="" +UNCHECKSTYLED_FILES="" + +# If the user provides --fix, then actually fix things +# Note: this is meant for use outside of the CI Jobs, by users cleaning things up + +while true; do + case ${1} in + --fix) + FIX="1" + ;; + --full) + FULL="1" + ;; + esac + shift || break +done + +if [ "${FULL}" == "1" ]; then + FILELIST=$(git ls-tree -r HEAD --name-only) +else + FILELIST=$((git diff HEAD~1.. --name-only; git ls-files -m ) | sort -u) +fi + +# Check to make sure we have indent. Exit if we don't with an error message, but +# don't *fail*. +command -v indent > /dev/null +if [ $? != 0 ]; then + echo "Cound not find required commend \"indent\". Checkstyle aborted" + exit ${EXIT_CODE} +fi +indent --version + +cd ${VPP_DIR} +git status +for i in ${FILELIST}; do + if [ -f ${i} ] && [ ${i} != "build-root/scripts/checkstyle.sh" ] && [ ${i} != "build-root/emacs-lisp/fix-coding-style.el" ]; then + grep -q "fd.io coding-style-patch-verification: ON" ${i} + if [ $? == 0 ]; then + CHECKSTYLED_FILES="${CHECKSTYLED_FILES} ${i}" + if [ ${FIX} == 0 ]; then + indent ${i} -o ${i}.out1 > /dev/null 2>&1 + indent ${i}.out1 -o ${i}.out2 > /dev/null 2>&1 + # Remove trailing whitespace + sed -i -e 's/[[:space:]]*$//' ${i}.out2 + diff -q ${i} ${i}.out2 + else + indent ${i} + indent ${i} + # Remove trailing whitespace + sed -i -e 's/[[:space:]]*$//' ${i} + fi + if [ $? != 0 ]; then + EXIT_CODE=1 + echo + echo "Checkstyle failed for ${i}." + echo "Run indent (twice!) as shown to fix the problem:" + echo "indent ${VPP_DIR}${i}" + echo "indent ${VPP_DIR}${i}" + fi + if [ -f ${i}.out1 ]; then + rm ${i}.out1 + fi + if [ -f ${i}.out2 ]; then + rm ${i}.out2 + fi + else + UNCHECKSTYLED_FILES="${UNCHECKSTYLED_FILES} ${i}" + fi + else + UNCHECKSTYLED_FILES="${UNCHECKSTYLED_FILES} ${i}" + fi +done + +if [ ${EXIT_CODE} == 0 ]; then + echo "*******************************************************************" + echo "* VPP CHECKSTYLE SUCCESSFULLY COMPLETED" + echo "*******************************************************************" +else + echo "*******************************************************************" + echo "* VPP CHECKSTYLE FAILED" + echo "* CONSULT FAILURE LOG ABOVE" + echo "* NOTE: Running 'build-root/scripts/checkstyle.sh --fix' *MAY* fix the issue" + echo "*******************************************************************" +fi +exit ${EXIT_CODE} diff --git a/vpp/build-root/scripts/csit-test-branch b/vpp/build-root/scripts/csit-test-branch new file mode 100755 index 00000000..ede63372 --- /dev/null +++ b/vpp/build-root/scripts/csit-test-branch @@ -0,0 +1,2 @@ +#!/bin/sh +echo oper-rls1701-170115 diff --git a/vpp/build-root/scripts/find-api-core-contents b/vpp/build-root/scripts/find-api-core-contents new file mode 100755 index 00000000..f1f96f1f --- /dev/null +++ b/vpp/build-root/scripts/find-api-core-contents @@ -0,0 +1,9 @@ +#!/bin/bash + +for i in $(find ${1}/vpp -name *.api.json -type f -print); do + echo ../${i} /usr/share/vpp/api/ >> ${2} +done +for i in $(find ${1}/vlib-api -name *.api.json -type f -print); do + echo ../${i} /usr/share/vpp/api/ >> ${2} +done + diff --git a/vpp/build-root/scripts/find-api-lib-contents b/vpp/build-root/scripts/find-api-lib-contents new file mode 100755 index 00000000..562db7b8 --- /dev/null +++ b/vpp/build-root/scripts/find-api-lib-contents @@ -0,0 +1,6 @@ +#!/bin/bash + +for i in $(find ${1}/vnet -name *.api.json -type f -print); do + echo ../${i} /usr/share/vpp/api/ >> ${2} +done + diff --git a/vpp/build-root/scripts/find-dev-contents b/vpp/build-root/scripts/find-dev-contents new file mode 100755 index 00000000..2dc6cc4d --- /dev/null +++ b/vpp/build-root/scripts/find-dev-contents @@ -0,0 +1,31 @@ +#!/bin/bash + +# includes +paths=`find $1/*/include -type f -print | grep -v '/dpdk/include/'` +rm -f $2 + +for path in $paths +do + relpath=`echo $path | sed -e 's:.*/include/::'` + dir=`dirname $relpath` + if [ $dir = "." ] ; then + echo ../$path /usr/include >> $2 + else + echo ../$path /usr/include/$dir >> $2 + fi +done + +# sample plugin +paths=`(cd ..; find plugins/sample-plugin -type f -print | grep -v autom4te)` + +for path in $paths +do + relpath=`echo $path | sed -e 's:.*plugins/::'` + dir=`dirname $relpath` + if [ $dir = "sample-plugin" ] ; then + echo ../../$path /usr/share/doc/vpp/examples/plugins/sample-plugin >> $2 + else + echo ../../$path \ + /usr/share/doc/vpp/examples/plugins/$dir >> $2 + fi +done diff --git a/vpp/build-root/scripts/find-dpdk-contents b/vpp/build-root/scripts/find-dpdk-contents new file mode 100755 index 00000000..c7065139 --- /dev/null +++ b/vpp/build-root/scripts/find-dpdk-contents @@ -0,0 +1,29 @@ +#!/bin/bash + +# includes +rm -rf dpdk-includes +mkdir dpdk-includes +(cd $1/dpdk/include; tar cfh - . | (cd ../../../dpdk-includes; tar xf -)) + +# If CDPATH is set, the "Change Directory" builtin (cd) will output the +# destination directory when a relative path is passed as an argument. +# In this case, this has the negative side effect of polluting the "paths" +# variable with the destination directory, breaking the package generation. +# +# Patient: Doctor! Doctor! It hurts when I do this... +# Doctor: Don't do that! +# +unset CDPATH +paths=`cd dpdk-includes; find . -type f -print` +rm -f $2 + +for path in $paths +do + dir=`dirname $path` + if [ $dir = "." ] ; then + echo ../dpdk-includes/$path /usr/include/vpp-dpdk >> $2 + else + echo ../dpdk-includes/$path /usr/include/vpp-dpdk/$dir >> $2 + fi +done + diff --git a/vpp/build-root/scripts/find-plugins-contents b/vpp/build-root/scripts/find-plugins-contents new file mode 100755 index 00000000..a5a52acf --- /dev/null +++ b/vpp/build-root/scripts/find-plugins-contents @@ -0,0 +1,15 @@ +#!/bin/bash + +rm -f $2 + +for i in ${1}/plugins/lib64/vpp_plugins/*.so; do + echo ../${i} /usr/lib/vpp_plugins >> ${2} +done + +for i in ${1}/plugins/lib64/vpp_api_test_plugins/*.so; do + echo ../${i} /usr/lib/vpp_api_test_plugins >> ${2} +done + +for i in $(find ${1}/plugins -name *.api.json -type f -print); do + echo ../${i} /usr/share/vpp/api/ >> ${2} +done diff --git a/vpp/build-root/scripts/find-python-api-contents b/vpp/build-root/scripts/find-python-api-contents new file mode 100755 index 00000000..9b390e75 --- /dev/null +++ b/vpp/build-root/scripts/find-python-api-contents @@ -0,0 +1,8 @@ +#!/bin/bash + +rm -f $2 + +for i in $(find ${1}/vpp-api/lib/python2.7/site-packages/ -type f -print); do + echo ../${i} /usr/lib/python2.7/site-packages/vpp_papi >> ${2} +done + diff --git a/vpp/build-root/scripts/generate-deb-changelog b/vpp/build-root/scripts/generate-deb-changelog new file mode 100755 index 00000000..7bdc6337 --- /dev/null +++ b/vpp/build-root/scripts/generate-deb-changelog @@ -0,0 +1,37 @@ +#!/bin/bash + +CHANGELOG=deb/debian/changelog +DIST=unstable +FIRST=1 + +print_changelog_item() { + DATE=$(git log -1 --format=%cD ${TAG}) + DEBFULLNAME=$(git log -1 --format=%an ${TAG}) + DEBEMAIL=$(git log -1 --format=%ae ${TAG}) + + if [ ${FIRST} = 0 ]; then echo >> ${CHANGELOG}; fi + FIRST=0 + + echo "vpp (${VER}) ${DIST}; urgency=low" >> ${CHANGELOG} + echo >> ${CHANGELOG} + echo "${DESC}" >> ${CHANGELOG} + echo >> ${CHANGELOG} + echo " -- ${DEBFULLNAME} <${DEBEMAIL}> ${DATE}" >> ${CHANGELOG} +} + +VER=$(scripts/version) +TAG=HEAD +ADDS=$(echo ${VER} | sed -e 's/~.*//'| cut -s -d- -f2) + +rm -f ${CHANGELOG} + +if [ -n "${ADDS}" ]; then + DESC=" * includes ${ADDS} commits after $(echo ${VER}| cut -d- -f1) release" + print_changelog_item +fi + +for TAG in $(git tag -l 'v[0-9][0-9].[0-9][0-9]' | sort -r ); do + VER=$(echo ${TAG}| sed -e 's/^v//') + DESC=$(git tag -l -n20 ${TAG} | tail -n+2 | sed -e 's/^ */ /') + print_changelog_item +done diff --git a/vpp/build-root/scripts/lsnet b/vpp/build-root/scripts/lsnet new file mode 100755 index 00000000..ed590e53 --- /dev/null +++ b/vpp/build-root/scripts/lsnet @@ -0,0 +1,20 @@ +#!/bin/bash + +echo "PCI Address MAC address Device Name Driver State Speed Port Type" +echo "============ ================= ============== ========== ======== ========== ====================" + +for f in /sys/class/net/*; do + dev=$(basename ${f}) + if [ -e $f/device ] ; then + dev=$(basename ${f}) + pci_addr=$(basename $(readlink $f/device)) + mac=$(cat $f/address) + driver=$(basename $(readlink $f/device/driver)) + oper=$(cat $f/operstate) + speed=$(sudo ethtool $dev | grep Speed | cut -d" " -f2) + port=$(ethtool $dev 2> /dev/null | sed -ne 's/.*Port: \(.*\)/\1/p') + printf "%-12s %-14s %-14s %-10s %-8s %-10s %-20s\n" $pci_addr $mac $dev $driver $oper $speed "$port" + # ethtool $dev | grep Port: + fi +done + diff --git a/vpp/build-root/scripts/make-plugin-toolkit b/vpp/build-root/scripts/make-plugin-toolkit new file mode 100755 index 00000000..e1d6fcfb --- /dev/null +++ b/vpp/build-root/scripts/make-plugin-toolkit @@ -0,0 +1,40 @@ +#!/bin/bash + +set -eux + +build_tarball() { + for dir in vppinfra dpdk svm vlib-api vlib vnet vpp vpp-api-test + do + tar -C install-$1/$dir/include -cf - . | tar -C $tmp_dir/include -xf - + done + tar -C ../sample-plugin -cf - . \ + | tar -C $tmp_dir/src/sample-plugin -xf - + cp tools/bin/vppapigen $tmp_dir/tools/bin + echo Created by `id -u -n` on `hostname` at `date` > \ + $tmp_dir/toolkit-version-stamp + cp scripts/vpp-plugin-toolkit-readme $tmp_dir/README + tar -C $tmp_dir -zcf $PWD/vpp-plugin-toolkit-$1.tar.gz . +} + +if [ `basename $PWD` != "build-root" ] ; then + echo Please run this script from build-root + exit 1 +fi + +echo Pull additional tookit repos +make PLATFORM=vpp sample-plugin-find-source + +make PLATFORM=vpp TAG=vpp wipe-all +echo Build vpp forwarder production package +make PLATFORM=vpp TAG=vpp strip_sumbols=yes install-packages + +tmp_dir="`mktemp -d /tmp/plugin-XXXXXX`" +trap "rm -rf $tmp_dir" err + +echo Create vpp forwarder production plugin toolkit tarball +mkdir -p $tmp_dir/tools/bin $tmp_dir/include $tmp_dir/lib64 \ + $tmp_dir/src/sample-plugin +build_tarball vpp-native +rm -rf $tmp_dir + +exit 0 diff --git a/vpp/build-root/scripts/pci-nic-bind b/vpp/build-root/scripts/pci-nic-bind new file mode 100755 index 00000000..f3a0c264 --- /dev/null +++ b/vpp/build-root/scripts/pci-nic-bind @@ -0,0 +1,94 @@ +#!/bin/bash + +uio_drivers="igb_uio uio_pci_generic vfio-pci" +tmpfile=$(mktemp) + + +function bind_drv() { + addr=$1 + modalias=$(cat $selection/modalias) + native_drv=$(modprobe -R $modalias) + array=() + + for drv in $native_drv $uio_drivers; do + if [ -e /sys/bus/pci/drivers/$drv ]; then + echo driver $drv + drv_desc=$(modinfo $drv | grep description: | sed -e 's/.*:[[:space:]]\+//' ) + array+=("${drv}") + array+=("${drv_desc}") + fi + done + dialog --backtitle "PCI NIC Bind Utility" \ + --clear \ + --menu "Select kernel driver" 18 100 12 \ + "${array[@]}" 2> $tmpfile + retval=$? + selection=$(cat $tmpfile) + rm $tmpfile + if [ $retval -ne 0 ]; then + return + fi + vd=$(cat /sys/bus/pci/devices/${addr}/vendor /sys/bus/pci/devices/${addr}/device) + echo $addr | tee /sys/bus/pci/devices/${addr}/driver/unbind > /dev/null 2> /dev/null + echo $vd | tee /sys/bus/pci/drivers/${selection}/new_id > /dev/null 2> /dev/null + echo $addr | tee /sys/bus/pci/drivers/${selection}/bind > /dev/null 2> /dev/null +} + +function find_pci_slot() { + addr=$1 + [ ! "$(ls -A /sys/bus/pci/slots )" ] && echo "No PCI slot data" && return + for slot in $(find /sys/bus/pci/slots/* -maxdepth 0 -exec basename {} \;); do + slot_addr=$(cat /sys/bus/pci/slots/$slot/address) + if [[ "${addr}" == *"${slot_addr}"* ]]; then + echo "PCI slot: ${slot}" + return + fi + done + echo "Unknown PCI slot" +} + +! type -ap dialog > /dev/null && echo "Please install dialog (apt-get install dialog)" && exit +if [ $USER != "root" ] ; then +echo "Restarting script with sudo..." + sudo $0 ${*} + exit +fi + +cd /sys/bus/pci/devices + +while true; do + array=() + for addr in *; do + class=$(cat ${addr}/class) + if [ "$class" = "0x020000" ]; then + name=$(lspci -s $addr | sed -e 's/.*: //') + if [ -e "/sys/bus/pci/devices/$addr/driver" ]; then + drv=$(basename $(readlink -f /sys/bus/pci/devices/$addr/driver)) + else + drv=" " + fi + slot=$(find_pci_slot ${addr}) + array+=("${addr}") + array+=("${drv}|${name}") + array+=("${slot}") + fi + done + + dialog --backtitle "PCI NIC Bind Utility" \ + --item-help \ + --clear \ + --column-separator '|' \ + --menu "Select NIC" 18 100 12 \ + "${array[@]}" 2> $tmpfile + + retval=$? + selection=$(cat $tmpfile) + rm $tmpfile + if [ $retval -ne 0 ]; then + exit + fi + bind_drv $selection +done + + + diff --git a/vpp/build-root/scripts/pci-nic-bind-to-kernel b/vpp/build-root/scripts/pci-nic-bind-to-kernel new file mode 100755 index 00000000..3d8559e3 --- /dev/null +++ b/vpp/build-root/scripts/pci-nic-bind-to-kernel @@ -0,0 +1,19 @@ +#!/bin/bash + +# Bind all unused PCI devices bound to uio drivers +# back to default kernel driver + +if [ $USER != "root" ] ; then + echo "Restarting script with sudo..." + sudo $0 ${*} + exit +fi + +for f in /sys/bus/pci/drivers/{igb_uio,uio_pci_generic,vfio-pci}/*; do + [ -e ${f}/config ] || continue + fuser -s ${f}/config && continue + echo 1 > ${f}/remove + removed=y +done + +[ -n ${removed} ] && echo 1 > /sys/bus/pci/rescan diff --git a/vpp/build-root/scripts/remove-rpath b/vpp/build-root/scripts/remove-rpath new file mode 100755 index 00000000..bda3d60d --- /dev/null +++ b/vpp/build-root/scripts/remove-rpath @@ -0,0 +1,24 @@ +#!/bin/bash + +if [ -z $1 ]; then + echo "Please specify path" + exit 1 +fi + +which chrpath &> /dev/null + +if [ $? -ne 0 ] ; then + echo "Please install chrpath tool" + exit 1 +fi + +libs=$(find $1 -type f -name \*.so) +execs=$(find $1 -type f -path \*/bin/\* ) + +for i in $libs $execs; do + chrpath $i 2> /dev/null | grep -q build-root + if [ $? -eq 0 ] ; then + chrpath $i + fi +done + diff --git a/vpp/build-root/scripts/verdist b/vpp/build-root/scripts/verdist new file mode 100755 index 00000000..9d1f1b5a --- /dev/null +++ b/vpp/build-root/scripts/verdist @@ -0,0 +1,31 @@ +#!/bin/bash + +# +# Add version to dist tarball. +# +BR=$1 +prefix=$2 +verstring=$3 +BASE=`pwd` + +git rev-parse 2> /dev/null +if [ $? == 0 ]; then + git archive --prefix=${prefix}/ HEAD | gzip -9 > ${verstring}.tar.gz +else + cd .. + tar -c ${prefix} | gzip -9 > ${verstring}.tar.gz + cp ${verstring}.tar.gz $BASE + cd $BASE +fi + +mkdir ${BASE}/tmp +cd ${BASE}/tmp +tar -xzf ${BASE}/${verstring}.tar.gz +rm ${BASE}/${verstring}.tar.gz + +cp ${BR}/scripts/.version ${BASE}/tmp/${prefix}/build-root/scripts +tar -c ${prefix} | gzip -9 > ${verstring}.tar.gz +mv ${verstring}.tar.gz ${BASE} + +cd ${BASE} +rm -rf tmp diff --git a/vpp/build-root/scripts/version b/vpp/build-root/scripts/version new file mode 100755 index 00000000..d92eb8b7 --- /dev/null +++ b/vpp/build-root/scripts/version @@ -0,0 +1,54 @@ +#!/bin/bash + +path=$( cd "$(dirname "${BASH_SOURCE}")" ; pwd -P ) + +cd "$path" + +git rev-parse 2> /dev/null +if [ $? == 0 ]; then + vstring=$(git describe) +elif [ -f .version ]; then + vstring=$(cat .version) +else + if [ -f ../rpm/*.gz ]; then + vstring=$(ls ../rpm/*.gz) + else + exit 1 + fi +fi + +TAG=$(echo ${vstring} | cut -d- -f1 | sed -e 's/^v//') +ADD=$(echo ${vstring} | cut -s -d- -f2) + +git rev-parse 2> /dev/null +if [ $? == 0 ]; then + CMT=$(git describe --dirty --match 'v*'| cut -s -d- -f3,4) +else + CMT=$(echo ${vstring} | cut -s -d- -f3,4) +fi +CMTR=$(echo $CMT | sed 's/-/_/') + +if [ -n "${BUILD_NUMBER}" ]; then + BLD="~b${BUILD_NUMBER}" +fi + +if [ "$1" = "rpm-version" ]; then + echo ${TAG} + exit +fi + +if [ "$1" = "rpm-release" ]; then + [ -z "${ADD}" ] && echo release && exit + echo ${ADD}${CMTR:+~${CMTR}}${BLD} + exit +fi + + if [ -n "${ADD}" ]; then + if [ "$1" = "rpm-string" ]; then + echo ${TAG}-${ADD}${CMTR:+~${CMTR}}${BLD} + else + echo ${TAG}-${ADD}${CMT:+~${CMT}}${BLD} + fi + else + echo ${TAG}-release +fi diff --git a/vpp/build-root/vagrant/.gitignore b/vpp/build-root/vagrant/.gitignore new file mode 100644 index 00000000..a977916f --- /dev/null +++ b/vpp/build-root/vagrant/.gitignore @@ -0,0 +1 @@ +.vagrant/ diff --git a/vpp/build-root/vagrant/README b/vpp/build-root/vagrant/README new file mode 100644 index 00000000..238c90ce --- /dev/null +++ b/vpp/build-root/vagrant/README @@ -0,0 +1,28 @@ +INTRO: + +This is a vagrant environment for VPP. + +VPP currently works under Linux and has support for: + +- Ubuntu 14.04, Ubuntu 16.04 and Centos7.2 + +The VM builds VPP from source which can be located at /vpp + +VM PARTICULARS: +This vagrant environment creates a VM based on environment variables found in ./env.sh +To use, edit env.sh then + source ./env.sh + vagrant up + +By default, the VM created is/has: +- Ubuntu 14.04 +- 2 vCPUs +- 4G of RAM +- 2 NICs (1 x NAT - host access, 1 x VPP DPDK enabled) + +PROVIDERS: + +Supported vagrant providers are: + +- Virtualbox, VMware Fusion/Workstation, Libvirt + diff --git a/vpp/build-root/vagrant/Vagrantfile b/vpp/build-root/vagrant/Vagrantfile new file mode 100644 index 00000000..b463d646 --- /dev/null +++ b/vpp/build-root/vagrant/Vagrantfile @@ -0,0 +1,113 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +Vagrant.configure(2) do |config| + + # Pick the right distro and bootstrap, default is ubuntu1604 + distro = ( ENV['VPP_VAGRANT_DISTRO'] || "ubuntu1604") + if distro == 'centos7' + config.vm.box = "puppetlabs/centos-7.2-64-nocm" + config.ssh.insert_key = false + elsif distro == 'ubuntu1404' + config.vm.box = "puppetlabs/ubuntu-14.04-64-nocm" + else + config.vm.box = "puppetlabs/ubuntu-16.04-64-nocm" + end + config.vm.box_check_update = false + + config.vm.provision :shell, :path => File.join(File.dirname(__FILE__),"update.sh") + config.vm.provision :shell, :path => File.join(File.dirname(__FILE__),"build.sh"), :args => "/vpp vagrant" + + post_build = ( ENV['VPP_VAGRANT_POST_BUILD'] ) + if post_build == "test" + config.vm.provision "shell", inline: "echo Testing VPP; cd /vpp; make test" + elsif post_build == "install" + config.vm.provision :shell, :path => File.join(File.dirname(__FILE__),"install.sh"), :args => "/vpp" + config.vm.provision :shell, :path => File.join(File.dirname(__FILE__),"clearinterfaces.sh") + config.vm.provision :shell, :path => File.join(File.dirname(__FILE__),"run.sh") + end + + # Add .gnupg dir in so folks can sign patches + # Note, as gnupg puts socket files in that dir, we have + # to be cautious and make sure we are dealing with a plain file + homedir = File.expand_path("~/") + Dir["#{homedir}/.gnupg/**/*"].each do |fname| + if File.file?(fname) + destname = fname.sub(Regexp.escape("#{homedir}/"),'') + config.vm.provision "file", source: fname, destination: destname + end + end + + # Copy in the .gitconfig if it exists + if File.file?(File.expand_path("~/.gitconfig")) + config.vm.provision "file", source: "~/.gitconfig", destination: ".gitconfig" + end + + # vagrant-cachier caches apt/yum etc to speed subsequent + # vagrant up + # to enable, run + # vagrant plugin install vagrant-cachier + # + if Vagrant.has_plugin?("vagrant-cachier") + config.cache.scope = :box + end + + # Define some physical ports for your VMs to be used by DPDK + nics = (ENV['VPP_VAGRANT_NICS'] || "2").to_i(10) + for i in 1..nics + config.vm.network "private_network", type: "dhcp" + end + + # use http proxy if avaiable + if ENV['http_proxy'] && Vagrant.has_plugin?("vagrant-proxyconf") + config.proxy.http = ENV['http_proxy'] + config.proxy.https = ENV['https_proxy'] + config.proxy.no_proxy = "localhost,127.0.0.1" + end + + vmcpu=(ENV['VPP_VAGRANT_VMCPU'] || 2) + vmram=(ENV['VPP_VAGRANT_VMRAM'] || 4096) + + config.ssh.forward_agent = true + config.ssh.forward_x11 = true + + config.vm.provider "virtualbox" do |vb| + vb.customize ["modifyvm", :id, "--ioapic", "on"] + vb.memory = "#{vmram}" + vb.cpus = "#{vmcpu}" + + # rsync the vpp directory if provision hasn't happened yet + unless File.exist? (".vagrant/machines/default/virtualbox/action_provision") + config.vm.synced_folder "../../", "/vpp", type: "rsync", + rsync__auto: false, + rsync__exclude: [ + "build-root/build*/", + "build-root/install*/", + "build-root/images*/", + "build-root/*.deb", + "build-root/*.rpm", + "build-root/*.changes", + "build-root/python", + "build-root/deb/debian/*.dkms", + "build-root/deb/debian/*.install", + "build-root/deb/debian/changes", + "build-root/tools"] + end + + #support for the SSE4.x instruction is required in some versions of VB. + vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.1", "1"] + vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.2", "1"] + end + config.vm.provider "vmware_fusion" do |fusion,override| + fusion.vmx["memsize"] = "#{vmram}" + fusion.vmx["numvcpus"] = "#{vmcpu}" + end + config.vm.provider "libvirt" do |lv| + lv.memory = "#{vmram}" + lv.cpus = "#{vmcpu}" + end + config.vm.provider "vmware_workstation" do |vws,override| + vws.vmx["memsize"] = "#{vmram}" + vws.vmx["numvcpus"] = "#{vmcpu}" + end +end diff --git a/vpp/build-root/vagrant/WELCOME b/vpp/build-root/vagrant/WELCOME new file mode 100644 index 00000000..eb6aa2fd --- /dev/null +++ b/vpp/build-root/vagrant/WELCOME @@ -0,0 +1,61 @@ +VPP has now been built, installed, and started. + +To give it a spin, we can create a tap interface and try a simple ping +(with trace). + +Make sure you have run: + +$ vagrant ssh + +To get to the vagrant VM: + +vagrant@localhost:~$ + +Confirm that vpp is running with + +vagrant@localhost:~$ sudo status vpp +vpp start/running, process 25202 + +To create the tap: + +vagrant@localhost:~$ sudo vppctl tap connect foobar +Created tap-0 for Linux tap 'foobar' +vagrant@localhost:~$ sudo vppctl show int + +To assign it an ip address (and 'up' the interface): + +vagrant@localhost:~$ sudo vppctl set int ip address tap-0 192.168.1.1/24 +vagrant@localhost:~$ sudo vppctl set int state tap-0 up + +To turn on packet tracing for the tap interface: +vagrant@localhost:~$ sudo vppctl trace add tapcli-rx 10 + +Now, to set up and try the other end: +vagrant@localhost:~$ sudo ip addr add 192.168.1.2/24 dev foobar +vagrant@localhost:~$ ping -c 3 192.168.1.1 + +To look at the trace: +vagrant@localhost:~$ sudo vppctl show trace + +And to stop tracing: + +vagrant@localhost:~$ sudo vppctl clear trace + +Other fun things to look at: + +The vlib packet processing graph: +vagrant@localhost:~$ sudo vppctl show vlib graph + +which will produce output like: + + Name Next Previous +ip4-icmp-input error-punt [0] ip4-local + ip4-icmp-echo-request [1] + vpe-icmp4-oam [2] + +To read this, the first column (Name) is the name of the node. +The second column (Next) is the name of the children of that node. +The third column (Previous) is the name of the parents of this node. + +To see this README again: +cat /vagrant/README diff --git a/vpp/build-root/vagrant/build.sh b/vpp/build-root/vagrant/build.sh new file mode 100755 index 00000000..76838e28 --- /dev/null +++ b/vpp/build-root/vagrant/build.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +# Get Command Line arguements if present +VPP_DIR=$1 +if [ "x$1" != "x" ]; then + VPP_DIR=$1 +else + VPP_DIR=`dirname $0`/../../ +fi + +if [ "x$2" != "x" ]; then + SUDOCMD="sudo -H -u $2" +fi +echo 0:$0 +echo 1:$1 +echo 2:$2 +echo VPP_DIR: $VPP_DIR +echo SUDOCMD: $SUDOCMD + +# Figure out what system we are running on +if [ -f /etc/lsb-release ];then + . /etc/lsb-release +elif [ -f /etc/redhat-release ];then + sudo yum install -y redhat-lsb + DISTRIB_ID=`lsb_release -si` + DISTRIB_RELEASE=`lsb_release -sr` + DISTRIB_CODENAME=`lsb_release -sc` + DISTRIB_DESCRIPTION=`lsb_release -sd` +fi +KERNEL_OS=`uname -o` +KERNEL_MACHINE=`uname -m` +KERNEL_RELEASE=`uname -r` +KERNEL_VERSION=`uname -v` + +echo KERNEL_OS: $KERNEL_OS +echo KERNEL_MACHINE: $KERNEL_MACHINE +echo KERNEL_RELEASE: $KERNEL_RELEASE +echo KERNEL_VERSION: $KERNEL_VERSION +echo DISTRIB_ID: $DISTRIB_ID +echo DISTRIB_RELEASE: $DISTRIB_RELEASE +echo DISTRIB_CODENAME: $DISTRIB_CODENAME +echo DISTRIB_DESCRIPTION: $DISTRIB_DESCRIPTION + +# Install dependencies +cd $VPP_DIR +make UNATTENDED=yes install-dep + +# Really really clean things up so we can be sure +# that the build works even when switching distros +$SUDOCMD make wipe +(cd build-root/;$SUDOCMD make distclean) +rm -f build-root/.bootstrap.ok + +if [ $DISTRIB_ID == "CentOS" ]; then + echo rpm -V apr-devel + rpm -V apr-devel + if [ $? != 0 ]; then sudo yum reinstall -y apr-devel;fi + echo rpm -V ganglia-devel + rpm -V ganglia-devel + if [ $? != 0 ]; then sudo yum reinstall -y ganglia-devel;fi + echo rpm -V libconfuse-devel + rpm -V libconfuse-devel + if [ $? != 0 ]; then sudo yum reinstall -y libconfuse-devel;fi +fi + +# Build and install packaging +$SUDOCMD make bootstrap +if [ $DISTRIB_ID == "Ubuntu" ]; then + $SUDOCMD make pkg-deb +elif [ $DISTRIB_ID == "CentOS" ]; then + (cd $VPP_DIR/vnet ;$SUDOCMD aclocal;$SUDOCMD automake -a) + $SUDOCMD make pkg-rpm +fi + diff --git a/vpp/build-root/vagrant/clearinterfaces.sh b/vpp/build-root/vagrant/clearinterfaces.sh new file mode 100755 index 00000000..78f6705c --- /dev/null +++ b/vpp/build-root/vagrant/clearinterfaces.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# Capture all the interface IPs, in case we need them later +ip -o addr show > ~vagrant/ifconfiga +chown vagrant:vagrant ~vagrant/ifconfiga + +# Disable all ethernet interfaces other than the default route +# interface so VPP will use those interfaces. The VPP auto-blacklist +# algorithm prevents the use of any physical interface contained in the +# routing table (i.e. "route --inet --inet6") preventing the theft of +# the management ethernet interface by VPP from the kernel. +for intf in $(ls /sys/class/net) ; do + if [ -d /sys/class/net/$intf/device ] && + [ "$(route --inet --inet6 | grep default | grep $intf)" == "" ] ; then + ifconfig $intf down + fi +done diff --git a/vpp/build-root/vagrant/env.sh b/vpp/build-root/vagrant/env.sh new file mode 100644 index 00000000..f0edfd88 --- /dev/null +++ b/vpp/build-root/vagrant/env.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +export VPP_VAGRANT_DISTRO="ubuntu1404" +export VPP_VAGRANT_NICS=2 +export VPP_VAGRANT_VMCPU=4 +export VPP_VAGRANT_VMRAM=4096 diff --git a/vpp/build-root/vagrant/install.sh b/vpp/build-root/vagrant/install.sh new file mode 100644 index 00000000..a53faa4d --- /dev/null +++ b/vpp/build-root/vagrant/install.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# Get Command Line arguements if present +VPP_DIR=$1 +if [ "x$1" != "x" ]; then + VPP_DIR=$1 +else + VPP_DIR=`dirname $0`/../../ +fi + +# Figure out what system we are running on +if [ -f /etc/lsb-release ];then + . /etc/lsb-release +elif [ -f /etc/redhat-release ];then + sudo yum install -y redhat-lsb + DISTRIB_ID=`lsb_release -si` + DISTRIB_RELEASE=`lsb_release -sr` + DISTRIB_CODENAME=`lsb_release -sc` + DISTRIB_DESCRIPTION=`lsb_release -sd` +fi +echo DISTRIB_ID: $DISTRIB_ID +echo DISTRIB_RELEASE: $DISTRIB_RELEASE +echo DISTRIB_CODENAME: $DISTRIB_CODENAME +echo DISTRIB_DESCRIPTION: $DISTRIB_DESCRIPTION + +if [ $DISTRIB_ID == "Ubuntu" ]; then + (cd ${VPP_DIR}/build-root/;sudo dpkg -i *.deb) +elif [ $DISTRIB_ID == "CentOS" ]; then + (cd ${VPP_DIR}/build-root/;sudo rpm -Uvh *.rpm) +fi
\ No newline at end of file diff --git a/vpp/build-root/vagrant/run.sh b/vpp/build-root/vagrant/run.sh new file mode 100755 index 00000000..1cd33826 --- /dev/null +++ b/vpp/build-root/vagrant/run.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Figure out what system we are running on +if [ -f /etc/lsb-release ];then + . /etc/lsb-release +elif [ -f /etc/redhat-release ];then + yum install -y redhat-lsb + DISTRIB_ID=`lsb_release -si` + DISTRIB_RELEASE=`lsb_release -sr` + DISTRIB_CODENAME=`lsb_release -sc` + DISTRIB_DESCRIPTION=`lsb_release -sd` +fi + +if [ $DISTRIB_ID == "CentOS" ]; then + # Install uio-pci-generic + modprobe uio_pci_generic +fi +echo "Starting VPP..." +if [ $DISTRIB_ID == "Ubuntu" ] && [ $DISTRIB_CODENAME = "trusty" ] ; then + start vpp +else + service vpp start +fi diff --git a/vpp/build-root/vagrant/update.sh b/vpp/build-root/vagrant/update.sh new file mode 100755 index 00000000..f4139d74 --- /dev/null +++ b/vpp/build-root/vagrant/update.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Make sure that we get the hugepages we need on provision boot +# Note: The package install should take care of this at the end +# But sometimes after all the work of provisioning, we can't +# get the requested number of hugepages without rebooting. +# So do it here just in case +sysctl -w vm.nr_hugepages=1024 +HUGEPAGES=`sysctl -n vm.nr_hugepages` +if [ $HUGEPAGES != 1024 ]; then + echo "ERROR: Unable to get 1024 hugepages, only got $HUGEPAGES. Cannot finish." + exit +fi + +# Figure out what system we are running on +if [ -f /etc/lsb-release ];then + . /etc/lsb-release +elif [ -f /etc/redhat-release ];then + yum install -y redhat-lsb + DISTRIB_ID=`lsb_release -si` + DISTRIB_RELEASE=`lsb_release -sr` + DISTRIB_CODENAME=`lsb_release -sc` + DISTRIB_DESCRIPTION=`lsb_release -sd` +fi + +# Do initial setup for the system +if [ $DISTRIB_ID == "Ubuntu" ]; then + + export DEBIAN_PRIORITY=critical + export DEBIAN_FRONTEND=noninteractive + export DEBCONF_NONINTERACTIVE_SEEN=true + APT_OPTS="--assume-yes --no-install-suggests --no-install-recommends -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\"" + + # Standard update + upgrade dance + apt-get update ${APT_OPTS} >/dev/null + apt-get upgrade ${APT_OPTS} >/dev/null + + # Fix the silly notion that /bin/sh should point to dash by pointing it to bash + + update-alternatives --install /bin/sh sh /bin/bash 100 + + # Install useful but non-mandatory tools + apt-get install -y emacs x11-utils git-review gdb gdbserver +elif [ $DISTRIB_ID == "CentOS" ]; then + # Standard update + upgrade dance + yum check-update + yum update -y +fi |