aboutsummaryrefslogtreecommitdiffstats
path: root/plugins/vcgn-plugin
diff options
context:
space:
mode:
authorOle Troan <ot@cisco.com>2016-06-24 20:10:30 +0200
committerDave Barach <openvpp@barachs.net>2016-06-27 15:04:00 +0000
commitea3e1fc8754d7ebeca85ecc448b263f6ccb6ae6f (patch)
tree5aa3322e4bf45f2b621a5b62f58fdc8bd4d7bca1 /plugins/vcgn-plugin
parentaa6920e0a80d8271be1dda59f613a1d2b0e1d3e6 (diff)
Plugins: Clean up the plugin directory so that each plugin has its own
directory and GNU autotools setup. Change-Id: I6c59d1297389c9413db0c0b9bdf3b759080bf1b8 Signed-off-by: Ole Troan <ot@cisco.com>
Diffstat (limited to 'plugins/vcgn-plugin')
-rw-r--r--plugins/vcgn-plugin/Makefile.am99
-rw-r--r--plugins/vcgn-plugin/configure.ac17
-rw-r--r--plugins/vcgn-plugin/vcgn/README100
-rw-r--r--plugins/vcgn-plugin/vcgn/cgn_bitmap.h133
-rw-r--r--plugins/vcgn-plugin/vcgn/cgse_defs.h88
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_bulk_port.c964
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_bulk_port.h157
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_bulk_port_defs.h57
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_cli.h206
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_cli_handler.c961
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_common_api.h22
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_config.c77
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_config.h582
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_config_api.h46
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_db.h701
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_db_scanner.c493
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_db_v2.c3802
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_debug_msg_handler.c1780
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_global.c79
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_global.h87
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp.h60
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_error_inside_input.c476
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_error_outside_input.c452
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_query_inside_input.c404
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_query_inside_input_exception.c235
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_query_outside_input.c381
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_ipv4_tcp_inside_input.c424
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_ipv4_tcp_inside_input_exceptions.c314
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_ipv4_tcp_outside_input.c382
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_ipv4_udp.h41
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_ipv4_udp_inside_input.c508
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_ipv4_udp_inside_input_exceptions.c283
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_ipv4_udp_outside_input.c605
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_log_api.h114
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_log_common.h79
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_logging.c3518
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_logging.h1091
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_pcp_server.h398
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_ports.c1113
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_ports.h208
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_show.c810
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_show_api.h40
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_show_response.h580
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_syslog.c1787
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_syslog.h190
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_util.c2256
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_v4_ftp_alg.h133
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_v4_functions.c364
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_v4_functions.h342
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_v4_pptp_alg.h150
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_v4_tcp_in2out_stages.c679
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_va_db.c286
-rw-r--r--plugins/vcgn-plugin/vcgn/cnat_va_db.h121
-rw-r--r--plugins/vcgn-plugin/vcgn/dslite_db.h170
-rw-r--r--plugins/vcgn-plugin/vcgn/dslite_defs.h336
-rw-r--r--plugins/vcgn-plugin/vcgn/index_list.c336
-rw-r--r--plugins/vcgn-plugin/vcgn/index_list.h118
-rw-r--r--plugins/vcgn-plugin/vcgn/nat64_db.h480
-rw-r--r--plugins/vcgn-plugin/vcgn/nat64_defs.h576
-rw-r--r--plugins/vcgn-plugin/vcgn/nat64_tcp_sm.h91
-rw-r--r--plugins/vcgn-plugin/vcgn/platform_common.h136
-rw-r--r--plugins/vcgn-plugin/vcgn/platform_common_override.h304
-rw-r--r--plugins/vcgn-plugin/vcgn/spp_ctx.h76
-rw-r--r--plugins/vcgn-plugin/vcgn/spp_platform_trace_log.c989
-rw-r--r--plugins/vcgn-plugin/vcgn/spp_platform_trace_log.h358
-rw-r--r--plugins/vcgn-plugin/vcgn/spp_timers.h139
-rw-r--r--plugins/vcgn-plugin/vcgn/tcp_header_definitions.h1582
-rw-r--r--plugins/vcgn-plugin/vcgn/vcgn_classify.c1508
-rw-r--r--plugins/vcgn-plugin/vcgn/vcgn_db.h117
69 files changed, 36591 insertions, 0 deletions
diff --git a/plugins/vcgn-plugin/Makefile.am b/plugins/vcgn-plugin/Makefile.am
new file mode 100644
index 00000000000..f9705d05d1c
--- /dev/null
+++ b/plugins/vcgn-plugin/Makefile.am
@@ -0,0 +1,99 @@
+# Copyright (c) 2015 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+AUTOMAKE_OPTIONS = foreign subdir-objects
+
+AM_CFLAGS = -Wall -I@TOOLKIT_INCLUDE@
+
+########################################
+# Virtual Carrier Grade NAT
+########################################
+
+libvcgn_plugin_la_SOURCES = \
+ vcgn/cnat_bulk_port.c \
+ vcgn/cnat_config.c \
+ vcgn/cnat_db_scanner.c \
+ vcgn/cnat_db_v2.c \
+ vcgn/cnat_debug_msg_handler.c \
+ vcgn/cnat_cli_handler.c \
+ vcgn/cnat_global.c \
+ vcgn/cnat_ipv4_udp_inside_input.c \
+ vcgn/cnat_ipv4_udp_inside_input_exceptions.c \
+ vcgn/cnat_ipv4_udp_outside_input.c \
+ vcgn/cnat_ipv4_tcp_inside_input.c \
+ vcgn/cnat_ipv4_tcp_inside_input_exceptions.c \
+ vcgn/cnat_ipv4_tcp_outside_input.c \
+ vcgn/cnat_ipv4_icmp_query_inside_input.c \
+ vcgn/cnat_ipv4_icmp_query_inside_input_exception.c \
+ vcgn/cnat_ipv4_icmp_query_outside_input.c \
+ vcgn/cnat_ipv4_icmp_error_inside_input.c \
+ vcgn/cnat_ipv4_icmp_error_outside_input.c \
+ vcgn/cnat_logging.c \
+ vcgn/cnat_ports.c \
+ vcgn/cnat_util.c \
+ vcgn/cnat_show.c \
+ vcgn/cnat_syslog.c \
+ vcgn/cnat_v4_functions.c \
+ vcgn/index_list.c \
+ vcgn/spp_platform_trace_log.c \
+ vcgn/vcgn_classify.c
+
+nobase_include_HEADERS = \
+ vcgn/cgn_bitmap.h \
+ vcgn/cgse_defs.h \
+ vcgn/cnat_bulk_port_defs.h \
+ vcgn/cnat_bulk_port.h \
+ vcgn/cnat_cli.h \
+ vcgn/cnat_common_api.h \
+ vcgn/cnat_config_api.h \
+ vcgn/cnat_config.h \
+ vcgn/cnat_db.h \
+ vcgn/cnat_global.h \
+ vcgn/cnat_ipv4_icmp.h \
+ vcgn/cnat_ipv4_udp.h \
+ vcgn/cnat_log_api.h \
+ vcgn/cnat_log_common.h \
+ vcgn/cnat_logging.h \
+ vcgn/cnat_pcp_server.h \
+ vcgn/cnat_ports.h \
+ vcgn/cnat_show_api.h \
+ vcgn/cnat_show_response.h \
+ vcgn/cnat_syslog.h \
+ vcgn/cnat_v4_ftp_alg.h \
+ vcgn/cnat_v4_functions.h \
+ vcgn/cnat_v4_pptp_alg.h \
+ vcgn/cnat_va_db.h \
+ vcgn/dslite_db.h \
+ vcgn/dslite_defs.h \
+ vcgn/index_list.h \
+ vcgn/nat64_db.h \
+ vcgn/nat64_defs.h \
+ vcgn/nat64_tcp_sm.h \
+ vcgn/platform_common.h \
+ vcgn/platform_common_override.h \
+ vcgn/spp_ctx.h \
+ vcgn/spp_platform_trace_log.h \
+ vcgn/spp_timers.h \
+ vcgn/tcp_header_definitions.h \
+ vcgn/vcgn_db.h
+
+libvcgn_plugin_la_LDFLAGS = -module
+
+lib_LTLIBRARIES = libvcgn_plugin.la
+
+if WITH_PLUGIN_TOOLKIT
+install-data-hook:
+ mkdir /usr/lib/vpp_plugins || true
+ mkdir /usr/lib/vpp_api_test_plugins || true
+ cp -L $(prefix)/lib/libvcgn_plugin.so /usr/lib/vpp_plugins
+endif
diff --git a/plugins/vcgn-plugin/configure.ac b/plugins/vcgn-plugin/configure.ac
new file mode 100644
index 00000000000..ced22669a34
--- /dev/null
+++ b/plugins/vcgn-plugin/configure.ac
@@ -0,0 +1,17 @@
+AC_INIT(vcgn_plugin, 1.0)
+LT_INIT
+AM_INIT_AUTOMAKE
+
+AM_PROG_AS
+AC_PROG_CC
+AM_PROG_CC_C_O
+
+AC_ARG_WITH(plugin-toolkit,
+ AC_HELP_STRING([--with-plugin-toolkit],
+ [build using the vpp toolkit]),
+ [with_plugin_toolkit=${prefix}/include],
+ [with_plugin_toolkit=.])
+
+AC_SUBST(TOOLKIT_INCLUDE,[${with_plugin_toolkit}])
+AM_CONDITIONAL(WITH_PLUGIN_TOOLKIT, test "$with_plugin_toolkit" != ".")
+AC_OUTPUT([Makefile])
diff --git a/plugins/vcgn-plugin/vcgn/README b/plugins/vcgn-plugin/vcgn/README
new file mode 100644
index 00000000000..9b903967ac4
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/README
@@ -0,0 +1,100 @@
+ =================================
+ CARRIER GRADE NAT - NAT44 README
+ =================================
+
+What is implemented:
+====================
+* NAT44 UDP, TCP, ICMP protocols
+* Show and config commands for various parameters for the same
+* NF9 logging is implemented but is not tested
+
+What is not implemented:
+=========================
+* TCP MSS
+* TCP refresh direction
+* Static port forwarding
+* Syslog support
+* Destination based logging or session logging
+* None of the ALGs
+* Performance optimization
+* Binary APIs, suitable for configuring the feature from netconf/restconf/yang
+* Support for VLANs
+
+Setup
+=====
+
+ +--------------+
++------------+ GEb/0/0 | |
+| Tester +-------------->+ vCGN/vPE-f |
+| +<--------------+ VM in UCS |
++------------+ GE13/0/0 | |
+ +--------------+
+
+Configure Interfaces and add routes in vPE-f
+=============================================
+set int ip address GigabitEthernetb/0/0 10.4.5.2/24
+set int state GigabitEthernetb/0/0 up
+set int ip address GigabitEthernet13/0/0 20.4.5.2/24
+set int state GigabitEthernet13/0/0 up
+ip route add 4.4.4.0/24 via GigabitEthernet13/0/0
+ip route add 1.2.3.0/24 via GigabitEthernetb/0/0 show ip fib
+
+Configure vCGN NAT44 for UDP/TCP/ICMP
+=======================================
+set vcgn inside GigabitEthernetb/0/0 outside GigabitEthernet13/0/0
+set vcgn port limit 65535 set vcgn dynamic port start 5641
+set vcgn map 10.1.1.0 - 10.1.1.31 set vcgn tcp timeout active 65535 init 65535
+set vcgn udp timeout active 65535 init 65535 set vcgn icmp timeout 65535
+
+Set ARP entries for CGN to Tester
+==================================
+set ip arp GigabitEthernet13/0/0 4.4.4.4 11:22:33:44:55:00
+set ip arp GigabitEthernetb/0/0 1.2.3.0 11:22:33:44:55:10
+set ip arp GigabitEthernetb/0/0 1.2.3.1 11:22:33:44:55:12
+set ip arp GigabitEthernetb/0/0 1.2.3.2 11:22:33:44:55:13
+set ip arp GigabitEthernetb/0/0 1.2.3.3 11:22:33:44:55:14
+set ip arp GigabitEthernetb/0/0 1.2.3.4 11:22:33:4e:55:11
+set ip arp GigabitEthernetb/0/0 1.2.3.5 11:22:33:44:55:15
+set ip arp GigabitEthernetb/0/0 1.2.3.6 11:22:33:44:55:16
+set ip arp GigabitEthernetb/0/0 1.2.3.7 11:22:33:44:55:17
+set ip arp GigabitEthernetb/0/0 1.2.3.8 11:22:33:44:55:18
+set ip arp GigabitEthernetb/0/0 1.2.3.9 11:22:33:44:55:19
+set ip arp GigabitEthernetb/0/0 1.2.3.10 11:22:33:44:55:1a
+set ip arp GigabitEthernetb/0/0 1.2.3.11 11:22:33:44:55:1b
+set ip arp GigabitEthernetb/0/0 1.2.3.12 11:22:33:44:55:1c
+set ip arp GigabitEthernetb/0/0 1.2.3.13 11:22:33:44:55:1d
+set ip arp GigabitEthernetb/0/0 1.2.3.14 11:22:33:44:55:1e
+set ip arp GigabitEthernetb/0/0 1.2.3.15 11:22:33:44:55:1f
+set ip arp GigabitEthernetb/0/0 1.2.3.16 11:22:33:44:50:01
+set ip arp GigabitEthernetb/0/0 1.2.3.17 11:22:33:44:51:01
+set ip arp GigabitEthernetb/0/0 1.2.3.18 11:22:33:44:52:01
+set ip arp GigabitEthernetb/0/0 1.2.3.19 11:22:33:44:53:01
+set ip arp GigabitEthernetb/0/0 1.2.3.20 11:22:33:44:54:02
+set ip arp GigabitEthernetb/0/0 1.2.3.21 11:22:33:44:55:01
+set ip arp GigabitEthernetb/0/0 1.2.3.22 11:22:33:44:56:02
+set ip arp GigabitEthernetb/0/0 1.2.3.23 11:22:33:44:57:00
+set ip arp GigabitEthernetb/0/0 1.2.3.24 11:22:33:44:58:02
+set ip arp GigabitEthernetb/0/0 1.2.3.25 11:22:33:44:59:03
+set ip arp GigabitEthernetb/0/0 1.2.3.26 11:22:33:44:5a:01
+set ip arp GigabitEthernetb/0/0 1.2.3.27 11:22:33:44:5b:02
+set ip arp GigabitEthernetb/0/0 1.2.3.28 11:22:33:44:5c:03
+set ip arp GigabitEthernetb/0/0 1.2.3.29 11:22:33:44:5d:04
+set ip arp GigabitEthernetb/0/0 1.2.3.30 11:22:33:44:5e:05
+set ip arp GigabitEthernetb/0/0 1.2.3.31 11:22:33:44:5f:06
+
+
+Show commands
+=============
+show vcgn config
+show vcgn statistics
+show node counters
+show interface
+
+Show commands to show translations
+==================================
+show vcgn inside-translation protocol tcp inside-addr 1.2.3.4 start-port 5641 end-port 5645
+show vcgn outside-translation protocol tcp outside-addr 10.1.1.31 start-port 7000 end-port 8000
+show vcgn inside-translation protocol icmp inside-addr 1.2.3.4 start-port 7000 end-port 8000
+show vcgn outside-translation protocol icmp outside-addr 10.1.1.31 start-port 7000 end-port 8000
+
+
diff --git a/plugins/vcgn-plugin/vcgn/cgn_bitmap.h b/plugins/vcgn-plugin/vcgn/cgn_bitmap.h
new file mode 100644
index 00000000000..6c46b75a608
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cgn_bitmap.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Modifications to this file
+ * Copyright (c) 2006-2009 by cisco Systems, Inc.
+ * All rights reserved.
+ */
+
+/*
+ Copyright (c) 2001, 2002, 2003, 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef __CGN_BITMAP_H__
+#define __CGN_BITMAP_H__
+
+/* Bitmaps built as vectors of machine words. */
+
+#include <string.h>
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/random.h>
+
+#define clib_bitmap_dup(v) vec_dup(v)
+#define clib_bitmap_free(v) vec_free(v)
+#define clib_bitmap_bytes(v) vec_bytes(v)
+#define clib_bitmap_zero(v) vec_zero(v)
+
+/* Allocate bitmap with given number of bits. */
+#define clib_bitmap_alloc(v,n_bits) \
+ v = vec_new (uword, ((n_bits) + BITS (uword) - 1) / BITS (uword))
+
+/* Sets given bit. Returns old value. */
+static inline uword
+cgn_clib_bitmap_set_no_check (uword * a, uword i)
+{
+ uword i0 = i / BITS (a[0]);
+ uword bit = (uword) 1 << (i % BITS (a[0]));
+ uword ai;
+
+/* ASSERT (i0 < vec_len (a)); */
+ ai = a[i0];
+ a[i0] = ai | bit;
+
+ return (ai & bit) != 0;
+}
+
+/* Clears given bit. Returns old value. */
+static inline
+uword cgn_clib_bitmap_clear_no_check (uword * a, uword i)
+{
+ uword i0 = i / BITS (a[0]);
+ uword bit = (uword) 1 << (i % BITS (a[0]));
+ uword ai;
+
+/* ASSERT (i0 < vec_len (a)); */
+ ai = a[i0];
+ a[i0] = ai & ~bit;
+
+ return (ai & bit) != 0;
+}
+
+/* Gets num_bits from ai start at start. assume that all bits are
+ * in the same uword.
+ */
+static inline uword cgn_clib_bitmap_get_bits (uword *ai, u16 start,
+ unsigned char num_bits)
+{
+ uword i0 = start / BITS (ai[0]);
+ uword i1 = start % BITS (ai[0]);
+ uword result = ai[i0] >> i1;
+ if(num_bits >= BITS(ai[0])) return result;
+ /* Else, we have to trim the bits */
+ result = result & (((uword)1 << num_bits) - 1);
+ return result;
+}
+
+/* Check if all of the bits from start to numb_bits are avaiable */
+static inline uword cgn_clib_bitmap_check_if_all (uword *ai, u16 start,
+ i16 num_bits)
+{
+ /* Now check if any bits are zero.. if yes, return false */
+ uword bitmask;
+ if(num_bits >= BITS(ai[0])) {
+ /* assume that its going to be multiples of BUTS(ai[0]) */
+ uword i0 = start / BITS (ai[0]);
+ bitmask = ~0; /* set all bits to 1 */
+ do {
+ if(ai[i0] ^ bitmask) return 0;
+ num_bits = num_bits - BITS (ai[0]);
+ i0++;
+ } while (num_bits > 0);
+ return 1;
+ }
+ else {
+ uword result = cgn_clib_bitmap_get_bits (ai, start, num_bits);
+ bitmask = ((uword)1 << num_bits) -1; /* set only num_bits */
+ return (!(result ^ bitmask));
+ }
+}
+
+#endif
diff --git a/plugins/vcgn-plugin/vcgn/cgse_defs.h b/plugins/vcgn-plugin/vcgn/cgse_defs.h
new file mode 100644
index 00000000000..08255875fec
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cgse_defs.h
@@ -0,0 +1,88 @@
+/*
+ *------------------------------------------------------------------
+ * cgse_defs.h - CGSE specific definiitions
+ *
+ * Copyright (c) 2007-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CGSE_DEFS_H__
+#define __CGSE_DEFS_H__
+
+#include "spp_platform_common.h"
+#include <cnat_cli.h>
+
+
+#define CGSE_SVI_TYPE_CNAT 1
+#define CGSE_SVI_TYPE_XLAT 2
+#define CGSE_SVI_TYPE_NAT64_STATEFUL 3
+#define CGSE_SVI_TYPE_V6RD 4
+#define CGSE_SVI_TYPE_INFRA 5
+#define CGSE_SVI_TYPE_DS_LITE 7
+#define CGSE_SVI_TYPE_MAPE 9
+
+#define CGSE_SET_TX_PKT_TYPE(type) PLATFORM_SET_CTX_RU_TX_PKT_TYPE(ctx, type)
+
+#define CGSE_INVALID_UIDX 0xffff /*invalid svi app uidb index */
+#define CGSE_INVALID_VRFID 0xffffffff /*invalid vrf id */
+
+#define CGSE_VRF_MASK 0x3fff
+#define CGSE_MAX_VRFMAP_ENTRIES (CGSE_VRF_MASK + 1)
+
+#define CGSE_VRFMAP_ENTRY_INVALID 0xffff
+
+
+#define CGSE_INVALID_CGSE_ID (0)
+
+#define CGSE_TABLE_ENTRY_DELETED 0
+#define CGSE_TABLE_ENTRY_ACTIVE 1
+#define CGSE_TABLE_ENTRY_DORMANT 2
+#define CGSE_TABLE_ENTRY_INVALID_UIDB 3
+
+
+#define CGSE_CONFIG_HANDLER_DEBUG_PRINTF1(level, a) \
+ if (cgse_config_debug_level > level) printf(a);
+
+#define CGSE_CONFIG_HANDLER_DEBUG_PRINTF2(level, a, b) \
+ if (cgse_config_debug_level > level) printf(a, b);
+
+#define CGSE_CONFIG_HANDLER_DEBUG_PRINTF3(level, a, b, c) \
+ if (cgse_config_debug_level > level) printf(a, b, c);
+
+#define CGSE_CONFIG_HANDLER_DEBUG_PRINTF4(level, a, b, c, d) \
+ if (cgse_config_debug_level > level) printf(a, b, c, d);
+
+#define CGSE_CONFIG_HANDLER_DEBUG_PRINTF5(level, a, b, c, d, e) \
+ if (cgse_config_debug_level > level) printf(a, b, c, d, e);
+
+#define CGSE_CONFIG_HANDLER_DEBUG_PRINTF6(level, a, b, c, d, e, f) \
+ if (cgse_config_debug_level > level) printf(a, b, c, d, e, f);
+
+#define CGSE_CONFIG_HANDLER_DEBUG_PRINTF7(level, a, b, c, d, e, f, g) \
+ if (cgse_config_debug_level > level) printf(a, b, c, d, e, f, g);
+
+#define CGSE_CONFIG_HANDLER_DEBUG_PRINTF8(level, a, b, c, d, e, f, g, h) \
+ if (cgse_config_debug_level > level) printf(a, b, c, d, e, f, g, h);
+
+#define CGSE_CONFIG_HANDLER_DEBUG_PRINTF9(level, a, b, c, d, e, f, g, h, i) \
+ if (cgse_config_debug_level > level) printf(a, b, c, d, e, f, g, h, i);
+
+extern u16 *cgse_uidb_index_cgse_id_mapping_ptr;
+
+#define CGSE_ADD_UIDB_INDEX_CGSE_ID_MAPPING(uidb_index, cgse_id) \
+ *(cgse_uidb_index_cgse_id_mapping_ptr + uidb_index) = cgse_id;
+
+extern u8 my_instance_number;
+
+#endif
diff --git a/plugins/vcgn-plugin/vcgn/cnat_bulk_port.c b/plugins/vcgn-plugin/vcgn/cnat_bulk_port.c
new file mode 100644
index 00000000000..d8894eb84f5
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_bulk_port.c
@@ -0,0 +1,964 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_bulk_ports.c - wrappers for bulk port allocation
+ *
+ * Copyright (c) 2011-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/buffer.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/bitmap.h>
+
+#include "cnat_db.h"
+#include "cnat_config.h"
+#include "cnat_global.h"
+#include "cnat_logging.h"
+#include "spp_timers.h"
+#include "platform_common.h"
+#include "cgn_bitmap.h"
+#include "spp_platform_trace_log.h"
+#include "cnat_ports.h"
+
+#ifndef NO_BULK_LOGGING
+
+#define PORT_TO_CACHE(y, z) ((y)/(z))
+/* The last bit (MSB) is used to indicate whether the cache entry is full */
+#define CACHE_TO_PORT(x, z) (((x)& 0x7FFF) * (z))
+#define IS_CACHE_ENTRY_FULL(x) ((x) & 0x8000)
+#define MARK_CACHE_ENTRY_AS_FULL(x) ((x) = ((x) | 0x8000))
+#define UNMARK_CACHE_ENTRY_AS_FULL(x) ((x) = ((x) & 0x7FFF))
+#define CACHE_ENTRY_WITHOUT_FULL_STAT(x) ((x) & 0x7FFF)
+
+
+#define NUM_BULK_CHECK 128 /* max number of previous chache to check.
+ * somewhat orbirtrary.. assume 64 as bulk size.. can handle up
+ * to 128*64 ports allocated by a single subscriber */
+
+/* #define DEBUG_BULK_PORT 1 */
+/* #define DEBUG_BULK_PORT_DETAIL 1 */
+#define HAVE_BULK_PORT_STATS 1
+
+#ifdef HAVE_BULK_PORT_STATS
+static uword bulk_cache_hit_count;
+static uword bulk_port_use_count;
+static uword bulk_port_alloc_count;
+static uword mapped_port_alloc_count;
+#endif /* HAVE_BULK_PORT_STATS */
+
+static u32 bulk_port_rand_across;
+
+void show_bulk_port_allocation(u16 in_vrfid, u32 inside_ip)
+{
+ cnat_db_key_bucket_t u_ki;
+ cnat_user_db_entry_t *udb;
+ int i;
+ u32 head;
+ cnat_main_db_entry_t *db = NULL;
+ i16 printed_so_far = 0; /* entries printed so far */
+ u16 prev_bulks[NUM_BULK_CHECK];
+ cnat_vrfmap_t *my_vrfmap = 0;
+ cnat_vrfmap_t *vrfmap = 0;
+ bulk_alloc_size_t bulk_size;
+
+ u_ki.k.k.vrf = in_vrfid;
+ u_ki.k.k.ipv4 = inside_ip;
+ u_ki.k.k.port = 0;
+
+ PLATFORM_DEBUG_PRINT("Searching for user %x in invrf %d\n",
+ inside_ip, in_vrfid);
+ udb = cnat_user_db_lookup_entry(&u_ki);
+ if(!udb) {
+ PLATFORM_DEBUG_PRINT("No such user\n"); return;
+ }
+
+ pool_foreach (vrfmap, cnat_map_by_vrf, ({
+ if(vrfmap->i_vrf == in_vrfid) {
+ my_vrfmap = vrfmap;
+ break;
+ }}));
+
+ if(!my_vrfmap) {
+ PLATFORM_DEBUG_PRINT("Vrf map not found\n");
+ return;
+ }
+ bulk_size = BULKSIZE_FROM_VRFMAP(my_vrfmap);
+
+ if(bulk_size == BULK_ALLOC_SIZE_NONE) {
+ PLATFORM_DEBUG_PRINT("Bulk allocation not enabled\n");
+ return;
+ }
+
+ PLATFORM_DEBUG_PRINT("\nBulk cache for subscriber 0x%x: ", inside_ip);
+ for(i=0; i < BULK_RANGE_CACHE_SIZE; i++) {
+ PLATFORM_DEBUG_PRINT("%d , ",
+ CACHE_TO_PORT(udb->bulk_port_range_cache[i], bulk_size));
+ }
+ PLATFORM_DEBUG_PRINT("\nNon cached bulk allocation for subscriber 0x%x:\n",
+ inside_ip);
+ ASSERT(udb);
+ memset(prev_bulks, 0,sizeof(prev_bulks));
+
+ head = udb->translation_list_head_index;
+ if(PREDICT_FALSE(head == EMPTY)) {
+ return;
+ }
+ db = cnat_main_db + head;
+ while (1) {
+ /* skip static ports - static ports may not belong to bulk pool*/
+ if(db->out2in_key.k.port < cnat_static_port_range) goto next_entry;
+
+ u16 bm_index = PORT_TO_CACHE(db->out2in_key.k.port, bulk_size);
+
+ /*Check if we have already tested this bulk */
+ for(i=0; i < printed_so_far; i++) {
+ if(prev_bulks[i] == bm_index) goto next_entry;
+ }
+
+ /*Check if this base port is already part of cache */
+ for(i=0; i < BULK_RANGE_CACHE_SIZE; i++) {
+ if(CACHE_ENTRY_WITHOUT_FULL_STAT(udb->bulk_port_range_cache[i])
+ == bm_index) {
+ goto next_entry;
+ }
+ }
+ /* this is not in chache already */
+ PLATFORM_DEBUG_PRINT("%d ", CACHE_TO_PORT(bm_index, bulk_size));
+ if(printed_so_far < NUM_BULK_CHECK) {
+ prev_bulks[printed_so_far] = bm_index;
+ printed_so_far++;
+ }
+
+next_entry:
+ db = cnat_main_db + db->user_ports.next;
+ /*
+ * its a circular list, so if we have reached the head again
+ * all the entries for that user have been read
+ */
+ if (db == (cnat_main_db + head)) {
+ break;
+ }
+ } /* while loop for db entries */
+
+ PLATFORM_DEBUG_PRINT("\n");
+ return;
+}
+
+void show_bulk_port_stats()
+{
+
+ cnat_vrfmap_t *my_vrfmap = 0;
+ PLATFORM_DEBUG_PRINT("Bulk size settings of each inside vrf ...\n");
+ pool_foreach (my_vrfmap, cnat_map_by_vrf, ({
+ PLATFORM_DEBUG_PRINT("vrf id %d, bulk size %d\n", my_vrfmap->i_vrf,
+ BULKSIZE_FROM_VRFMAP(my_vrfmap));
+ }));
+
+#ifdef HAVE_BULK_PORT_STATS
+ PLATFORM_DEBUG_PRINT("\nBulk port allocation, use and cache hit statistics\n");
+ PLATFORM_DEBUG_PRINT("Number of times bulk ports allocated %lld\n",
+ bulk_port_alloc_count);
+ PLATFORM_DEBUG_PRINT("Number of times pre-allocated ports used %lld\n",
+ bulk_port_use_count);
+ PLATFORM_DEBUG_PRINT(
+ "Number of times pre-allocated bulk port found from cache %lld\n",
+ bulk_cache_hit_count);
+ PLATFORM_DEBUG_PRINT(
+ "Number of times mapped port (static) allocations made %lld\n",
+ mapped_port_alloc_count);
+#else
+ PLATFORM_DEBUG_PRINT("\nNat44 bulk port statistics not turned on\n");
+#endif /* HAVE_BULK_PORT_STATS */
+}
+
+void clear_bulk_port_stats()
+{
+#ifdef HAVE_BULK_PORT_STATS
+ bulk_port_alloc_count = 0;
+ bulk_port_use_count = 0;
+ bulk_cache_hit_count = 0;
+ mapped_port_alloc_count = 0;
+#endif /* HAVE_BULK_PORT_STATS */
+ return;
+}
+
+void cnat_update_bulk_range_cache(cnat_user_db_entry_t *udb, u16 o_port,
+ bulk_alloc_size_t bulk_size)
+{
+ i16 i;
+ if(!udb) {
+#ifdef DEBUG_BULK_PORT
+ PLATFORM_DEBUG_PRINT("%s, null udb!\n", __func__);
+#endif
+ return;
+ }
+ if(BULK_ALLOC_SIZE_NONE == bulk_size) { /* no bulk logging */
+ return;
+ }
+
+ /* Take care of caching */
+ if(o_port & 0x1) {
+ o_port--;
+ }
+ if(PREDICT_FALSE(o_port <= 0)) {
+#ifdef DEBUG_BULK_PORT
+ PLATFORM_DEBUG_PRINT("%s invalid port: %d\n", __func__, o_port);
+#endif
+ return;
+ }
+
+ /* First preference is for the cache entry's that are not used yet */
+ for(i=0; i < BULK_RANGE_CACHE_SIZE; i++) {
+ if(PREDICT_FALSE(
+ udb->bulk_port_range_cache[i] == (i16)BULK_RANGE_INVALID)) {
+ udb->bulk_port_range_cache[i] = PORT_TO_CACHE(o_port, bulk_size);
+ return;
+ }
+ }
+
+ /* Now check if any cache entry is full and if it can be replaced */
+ for(i=0; i < BULK_RANGE_CACHE_SIZE; i++) {
+ if(PREDICT_FALSE(IS_CACHE_ENTRY_FULL(udb->bulk_port_range_cache[i]))) {
+ udb->bulk_port_range_cache[i] = PORT_TO_CACHE(o_port, bulk_size);
+ return;
+ }
+ }
+
+ return;
+}
+
+
+void cnat_port_free_v2_bulk (
+ cnat_portmap_v2_t *pm,
+ int index,
+ port_pair_t ptype,
+ u16 base_port,
+ cnat_user_db_entry_t *udb,
+ u16 static_port_range,
+ bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req)
+{
+ cnat_portmap_v2_t *my_pm;
+ i16 bm_index;
+ i16 i;
+ int unmark_full_status = 0;
+
+ *nfv9_log_req = BULK_ALLOC_NOT_ATTEMPTED;
+
+ /* First free up the port */
+ cnat_port_free_v2(pm, index, ptype, base_port, static_port_range);
+ if(BULK_ALLOC_SIZE_NONE == bulk_size) /* no bulk logging */
+ return;
+ if(PREDICT_FALSE(!udb)) {
+#ifdef DEBUG_BULK_PORT
+ PLATFORM_DEBUG_PRINT("%s udb is null\n", __func__);
+#endif
+ }
+
+ if(PREDICT_FALSE(base_port < static_port_range)) {
+ return;
+ }
+ /* Now check if cache needs to be removed */
+ my_pm = pm + index;
+ base_port = base_port/bulk_size;
+ base_port = base_port * bulk_size; /*Align it to multiples of bulk_size */
+ if(PREDICT_TRUE(!cgn_clib_bitmap_check_if_all(
+ my_pm->bm, base_port, bulk_size))) {
+ *nfv9_log_req = CACHE_ALLOC_NO_LOG_REQUIRED;
+ unmark_full_status = 1;
+ /* One or more ports are still in use */
+ } else {
+ *nfv9_log_req = base_port; /* logging required now. indicate base port*/
+ }
+ bm_index = PORT_TO_CACHE(base_port, bulk_size);
+ /* Now check if this is in the cache */
+ for(i=0; i < BULK_RANGE_CACHE_SIZE; i++) {
+ if(PREDICT_FALSE(
+ CACHE_ENTRY_WITHOUT_FULL_STAT(udb->bulk_port_range_cache[i]))
+ == bm_index) {
+ if(unmark_full_status) {
+ /* Unmark full stat.. if it was marked so..*/
+ UNMARK_CACHE_ENTRY_AS_FULL(udb->bulk_port_range_cache[i]);
+ } else {
+ udb->bulk_port_range_cache[i] = (i16)BULK_RANGE_INVALID;
+#ifdef DEBUG_BULK_PORT
+ PLATFORM_DEBUG_PRINT(
+ "Clearing cache for client 0x%x, bulk port %d\n",
+ my_pm->ipv4_address, base_port);
+#endif
+ }
+ break;
+ }
+ }
+ return;
+}
+
+
+/* Get suitable port from range */
+static i16 get_suiting_port_pos_from_range(cnat_portmap_v2_t *my_pm,
+ u16 bulk_start, i16 bulk_size, port_pair_t pair_type)
+{
+ i16 num_pos = 0, num_bits, iterations;
+ uword bulk_ports;
+ i16 inc = 0;
+ i16 num_uwords = bulk_size/BITS(my_pm->bm[0]);
+
+ if(PREDICT_FALSE(!num_uwords)) {
+ iterations = 0;
+ num_bits = bulk_size;
+ bulk_size = 0;
+ } else {
+ bulk_port_rand_across = randq1(bulk_port_rand_across);
+ iterations = bulk_port_rand_across % num_uwords;
+ num_bits = BITS(my_pm->bm[0]);
+ }
+
+ do {
+ bulk_ports = cgn_clib_bitmap_get_bits(my_pm->bm,
+ (bulk_start + iterations * BITS(my_pm->bm[0])), num_bits);
+#ifdef DEBUG_BULK_PORT_DETAIL
+ PLATFORM_DEBUG_PRINT("%s %d, bulk start %d, num_bits %d, ports %lld \n",
+ __func__, __LINE__, bulk_start, num_bits, bulk_ports);
+#endif /* DEBUG_BULK_PORT_DETAIL */
+ if(PREDICT_FALSE(!bulk_ports)) goto next_uword;
+ if(PREDICT_TRUE((pair_type == PORT_SINGLE)
+ || (pair_type == PORT_PAIR))) {
+ num_pos =0;
+ inc = 1;
+ } else if(pair_type == PORT_S_ODD) {
+ num_pos = 1;
+ inc = 2;
+ } else if(pair_type == PORT_S_EVEN) {
+ num_pos =0;
+ inc = 2;
+ }
+
+ for(; num_pos < num_bits; num_pos = num_pos + inc) {
+ if(!((bulk_ports >> num_pos) & 1))
+ continue; /* In use */
+ /* Check if the available port meets our
+ * criteria such as add, even, pair etc */
+ else if(PREDICT_FALSE(
+ (pair_type == PORT_PAIR) && ((num_pos & 0x1) ||
+ (!((bulk_ports >> (num_pos + 1)) & 1)))))
+ continue;
+ else break; /* Found one that meets the criteria */
+ }
+ if(num_pos < num_bits)
+ return (num_pos + iterations * BITS(my_pm->bm[0]));
+next_uword:
+ num_bits = BITS(my_pm->bm[0]);
+ bulk_size -= BITS(my_pm->bm[0]);
+ iterations++;
+ if(iterations >= num_uwords) iterations = 0;
+ } while (bulk_size > 0);
+
+ return -2; /* nothing found */
+}
+
+static cnat_errno_t try_bulk_port_from_non_cache(
+ cnat_user_db_entry_t *udb,
+ cnat_portmap_v2_t *my_pm,
+ port_pair_t pair_type,
+ bulk_alloc_size_t bulk_size,
+ u16 *port_available,
+ u16 static_port_range
+ )
+{
+ /****
+ 1. user should have existing translations.. otherwise, we wouldn't get here.
+ 2. For each, get the outside port. get the base port.
+ check if it is already in cache
+ 3. if not, we stand chance.
+ 4. Check for availability from this non cached pool.
+ 5. if found, repalce this with one of the cache that is invalid or full??
+ 6. if we are replacing the cache.. it has to be governed by user
+ preference on prefer oldest pool or prefer newest pool
+ ********/
+ u32 head;
+ cnat_main_db_entry_t *db = NULL;
+ u16 bulk_start; /* start point in 64 bitmap array to search for port */
+ i16 port_pos; /* indicates the position of available port in bulk */
+ i16 i; /* just a counter */
+ i16 attempts_so_far = 0; /* (futile-;) attemps so far..*/
+ u16 prev_bulks[NUM_BULK_CHECK];
+ ASSERT(udb);
+ memset(prev_bulks, 0,sizeof(prev_bulks));
+
+ head = udb->translation_list_head_index;
+ if(PREDICT_FALSE(head == EMPTY)) return CNAT_NO_PRE_ALLOCATED_BULK_PORTS;
+
+ db = cnat_main_db + head;
+ while (1) { //what should be the limit??
+
+ /* skip static ports - static ports may not belong to bulk pool*/
+ if(db->out2in_key.k.port < static_port_range) goto next_entry;
+
+ u16 bm_index = PORT_TO_CACHE(db->out2in_key.k.port, bulk_size);
+
+ /*Check if we have already tested this bulk */
+ for(i=0; i < attempts_so_far; i++) {
+ if(prev_bulks[i] == bm_index) {
+ goto next_entry;
+ }
+ }
+
+ /*Check if this base port is already part of cache */
+ for(i=0; i < BULK_RANGE_CACHE_SIZE; i++) {
+ if(CACHE_ENTRY_WITHOUT_FULL_STAT(udb->bulk_port_range_cache[i])
+ == bm_index)
+ goto next_entry;
+ }
+
+ /* this is not in chache already */
+ bulk_start = CACHE_TO_PORT(bm_index, bulk_size);
+ port_pos = get_suiting_port_pos_from_range(my_pm,
+ bulk_start, bulk_size, pair_type);
+
+ if(port_pos < 0) { /* no port available in this range */
+ /* Mark this bulk so that we don't have to try this again */
+ if(attempts_so_far < NUM_BULK_CHECK) {
+ prev_bulks[attempts_so_far] = bm_index;
+ attempts_so_far++;
+ }
+ goto next_entry;
+ }
+
+ /* Got one...Get the port number */
+ *port_available = bulk_start + port_pos;
+
+ /* Check to see if we shoud replace one of the cache */
+ for(i=0; i < BULK_RANGE_CACHE_SIZE; i++) {
+ if(PREDICT_FALSE((udb->bulk_port_range_cache[i]
+ == (i16)BULK_RANGE_INVALID) || (
+ IS_CACHE_ENTRY_FULL(udb->bulk_port_range_cache[i])))) {
+ udb->bulk_port_range_cache[i] = bm_index;
+ return CNAT_SUCCESS;
+ }
+ }
+ /* Check to replace an existing (in use) entry */
+ /* TODO: enforce policy */
+ /* order of looping should depend on policy */
+
+ return CNAT_SUCCESS;
+
+next_entry:
+ db = cnat_main_db + db->user_ports.next;
+ /*
+ * its a circular list, so if we have reached the head again
+ * all the entries for that user have been read
+ */
+ if (db == (cnat_main_db + head)) {
+ break;
+ }
+ } /* while loop for db entries */
+ /* no ports available from pre allocated bulk pool */
+ return CNAT_NO_PORT_FROM_BULK;
+}
+
+cnat_errno_t
+cnat_dynamic_port_alloc_v2_bulk (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ port_pair_t pair_type,
+ u32 *index,
+ u32 *o_ipv4_address,
+ u16 *o_port,
+ u16 static_port_range,
+ cnat_user_db_entry_t *udb,
+ bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req,
+ u16 ip_n_to_1,
+ u32 *rseed_ip
+ )
+{
+
+ cnat_errno_t rv;
+ u16 port_available = 0;
+ i16 i;
+ cnat_portmap_v2_t *my_pm;
+
+ if((BULK_ALLOC_SIZE_NONE != bulk_size) /* bulk logging enabled */
+ && (udb)) { /* This user does have translations already */
+ u16 bulk_start;
+ i16 port_pos;
+
+ my_pm = pm + *index;
+ /* We have a case to check if bulk allocated ports can be used */
+ /* TODO: order of looping to be based on policy
+ * like prefer older or prefer newer ??
+ * For now, start with most recent cache entry
+ * so that we stand a better chance of
+ * finding a port
+ */
+ for(i= 0; i < BULK_RANGE_CACHE_SIZE; i++) {
+ if(PREDICT_TRUE((udb->bulk_port_range_cache[i] ==
+ (i16)BULK_RANGE_INVALID) ||
+ IS_CACHE_ENTRY_FULL(udb->bulk_port_range_cache[i]))) {
+ continue; /* This range is not initialized yet or it is full */
+ }
+ bulk_start = CACHE_TO_PORT(udb->bulk_port_range_cache[i],
+ bulk_size);
+ port_pos = get_suiting_port_pos_from_range(my_pm,
+ bulk_start, bulk_size, pair_type);
+ if(PREDICT_FALSE(port_pos < 0)) {
+ /* Mark this cache entry as full so that we do not
+ * waste time on this entry again */
+ MARK_CACHE_ENTRY_AS_FULL(udb->bulk_port_range_cache[i]);
+#ifdef DEBUG_BULK_PORT
+ PLATFORM_DEBUG_PRINT("Marked bulk cache entry %d as full for %x \n",
+ i, my_pm->ipv4_address);
+#endif /* #ifdef DEBUG_BULK_PORT */
+ continue;
+ }
+ /* Get the port number */
+ port_available = bulk_start+ port_pos;
+#ifdef DEBUG_BULK_PORT
+ PLATFORM_DEBUG_PRINT(
+ "Found port from cache : IP 0x%x, port %d %d iterations\n",
+ my_pm->ipv4_address, port_available, i)
+#endif
+#ifdef HAVE_BULK_PORT_STATS
+ bulk_cache_hit_count++;
+#endif /* HAVE_BULK_PORT_STATS */
+ break;
+ } /* end of for loop for cache check */
+ /* If we have not found a port yet, check if we can have
+ * pre allocated bulk port from non-cache */
+ if(PREDICT_FALSE(i == BULK_RANGE_CACHE_SIZE)) {
+ if( try_bulk_port_from_non_cache(udb, my_pm, pair_type,
+ bulk_size, &port_available,
+ static_port_range) != CNAT_SUCCESS ) {
+ goto ALLCOATE_NEW_BULK;
+ }
+#ifdef DEBUG_BULK_PORT
+ PLATFORM_DEBUG_PRINT("Found port from non-cache : IP 0x%x, port %d\n",
+ my_pm->ipv4_address, port_available);
+#endif
+ }
+ /* Assign the port, mark it as in use */
+ cgn_clib_bitmap_clear_no_check(my_pm->bm, port_available);
+ (my_pm->inuse)++;
+ if(PREDICT_FALSE(pair_type == PORT_PAIR)) {/* Mark the next one too */
+ cgn_clib_bitmap_clear_no_check(my_pm->bm, port_available + 1);
+ (my_pm->inuse)++;
+ }
+ *o_ipv4_address = my_pm->ipv4_address;
+ *o_port = port_available;
+ *nfv9_log_req = CACHE_ALLOC_NO_LOG_REQUIRED;
+#ifdef HAVE_BULK_PORT_STATS
+ bulk_port_use_count++;
+#endif /* HAVE_BULK_PORT_STATS */
+ return (CNAT_SUCCESS);
+ }
+ALLCOATE_NEW_BULK:
+#ifdef DEBUG_BULK_PORT
+ if(BULK_ALLOC_SIZE_NONE != bulk_size) {
+ PLATFORM_DEBUG_PRINT(
+ "No port available from bulk cache, bulk size %d\n", bulk_size);
+ }
+#endif
+ /* For whatever reason, we have not got a port yet */
+ rv = cnat_dynamic_port_alloc_v2(pm, atype, pair_type, index,
+ o_ipv4_address, o_port, static_port_range, bulk_size, nfv9_log_req,
+ ip_n_to_1, rseed_ip);
+ if (PREDICT_FALSE(rv != CNAT_SUCCESS)) {
+ return rv;
+ }
+ /* Take care of caching */
+ if(PREDICT_FALSE(udb != NULL)) {
+ /* Predict false because, we usually allocate for new users */
+ cnat_update_bulk_range_cache(udb, *o_port, bulk_size);
+ }
+#ifdef HAVE_BULK_PORT_STATS
+ bulk_port_alloc_count++;
+#endif /* HAVE_BULK_PORT_STATS */
+ return (CNAT_SUCCESS);
+}
+
+
+cnat_errno_t
+cnat_static_port_alloc_v2_bulk (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ port_pair_t pair_type,
+ u32 i_ipv4_address,
+ u16 i_port,
+ u32 *index,
+ u32 *o_ipv4_address,
+ u16 *o_port,
+ u16 static_port_range,
+ cnat_user_db_entry_t *udb,
+ bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req,
+ u16 ip_n_to_1
+ )
+{
+
+ /***
+ * Requirements -
+ * 1. If the port allocated is below dyn start, it should be individual
+ * port (not bulk)
+ * 2. If NOT, it should be bulk allocated
+ * 3. Try and keep the inside port same as outside port in both the
+ * cases (best effort)
+
+ * Algorithm
+ * 1. Check if it is below stat port start or user is new or bulk is
+ * disabled. If yes, call existing function
+ * 2. If not, see if we can pick from bulk and yet try to keep the port
+ * same - difficult thing - check if the port is free - then check if the
+ * entire bulk is free - if not check if bulk is owned by the user already.
+ * If all of these fail, call existing function to allocate a new bulk
+ * 3. Update cache, etc return log requirements
+ *****/
+
+ cnat_errno_t rv;
+ i16 i;
+ u32 head;
+ cnat_portmap_v2_t *my_pm;
+ uword bit_test_result, start_bit;
+ cnat_main_db_entry_t *db = NULL;
+
+ if((BULK_ALLOC_SIZE_NONE != bulk_size) /* bulk logging enabled */
+ && (udb) && /* This user does have translations already */
+ i_port >= static_port_range ) { /* It is outside stat port range*/
+
+ my_pm = pm + *index;
+ /* We have a case to check if bulk allocated ports can be used */
+
+ /* First check if the required port is available. */
+ if(PREDICT_FALSE(clib_bitmap_get_no_check(my_pm->bm, i_port) == 0)) {
+ goto ALLOCATE_NEW_BULK_STATIC;
+ }
+
+ /* Port is free.. check if the bulk is also free */
+ start_bit= ((i_port/bulk_size) * bulk_size);
+ bit_test_result = cgn_clib_bitmap_check_if_all(my_pm->bm,
+ start_bit, bulk_size);
+ if(PREDICT_TRUE(bit_test_result)) { /* bulk is available, grab it */
+ goto ALLOCATE_NEW_BULK_STATIC;
+ }
+
+ /* else, bulk is taken by someone. check if it is me */
+ /* Check if we own the bulk by any chance */
+ for(i=0; i < BULK_RANGE_CACHE_SIZE; i++) {
+ if(udb->bulk_port_range_cache[i] == start_bit) break;
+ }
+ if(i == BULK_RANGE_CACHE_SIZE) { /* no luck with cache */
+ head = udb->translation_list_head_index;
+ if(PREDICT_FALSE(head == EMPTY))
+ goto ALLOCATE_NEW_BULK_STATIC;
+ db = cnat_main_db + head;
+ i = 0;
+ while(1) {
+ if((db->out2in_key.k.port/bulk_size) * bulk_size == start_bit) {
+ i = 1; /* Just to indicate it is found */
+ break;
+ }
+ db = cnat_main_db + db->user_ports.next;
+ /*
+ * its a circular list, so if we have reached the head again
+ * all the entries for that user have been read
+ */
+ if (db == (cnat_main_db + head)) break;
+ } /* while loop for db entries */
+ if(!i) {
+ goto ALLOCATE_NEW_BULK_STATIC;
+ }
+ }
+ /* Assign the port, mark it as in use */
+ cgn_clib_bitmap_clear_no_check(my_pm->bm, i_port);
+ (my_pm->inuse)++;
+ *o_ipv4_address = my_pm->ipv4_address;
+ *o_port = i_port;
+ *nfv9_log_req = CACHE_ALLOC_NO_LOG_REQUIRED;
+#ifdef HAVE_BULK_PORT_STATS
+ bulk_port_use_count++;
+#endif /* HAVE_BULK_PORT_STATS */
+
+#ifdef DEBUG_BULK_PORT
+ PLATFORM_DEBUG_PRINT("%s, %d, found stat port from bulk: %x, %d\n",
+ __func__,
+ __LINE__, *o_ipv4_address, *o_port);
+#endif /* DEBUG_BULK_PORT */
+ return (CNAT_SUCCESS);
+ }
+
+ALLOCATE_NEW_BULK_STATIC:
+#ifdef DEBUG_BULK_PORT
+ PLATFORM_DEBUG_PRINT("%s No port available from bulk cache, bulk size %d\n",
+ __func__,bulk_size);
+#endif
+ /* For whatever reason, we have not got a port yet */
+ rv = cnat_static_port_alloc_v2(pm, atype, pair_type, i_ipv4_address,
+ i_port, index, o_ipv4_address, o_port, static_port_range,
+ bulk_size, nfv9_log_req,ip_n_to_1);
+ if (PREDICT_FALSE(rv != CNAT_SUCCESS)) {
+ return rv;
+ }
+ /* Take care of caching only if it was a bulk alloc */
+ if(PREDICT_FALSE(udb && (BULK_ALLOC_NOT_ATTEMPTED != *nfv9_log_req))) {
+ cnat_update_bulk_range_cache(udb, *o_port, bulk_size);
+ }
+#ifdef HAVE_BULK_PORT_STATS
+ bulk_port_alloc_count++;
+#endif /* HAVE_BULK_PORT_STATS */
+ return (CNAT_SUCCESS);
+
+}
+
+cnat_errno_t
+cnat_mapped_static_port_alloc_v2_bulk (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ u32 *index,
+ u32 ipv4_address,
+ u16 port,
+ cnat_user_db_entry_t *udb,
+ bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req,
+ u16 ip_n_to_1
+ )
+{
+ /* Requirements :
+ * 1. Check if bulk allocation is required.
+ * 2. Call cnat_mapped_static_port_alloc_v2 to allocate
+ * 3. Decide if alloc has to be cached
+ * 4. Update nfv9_log_req
+ */
+ cnat_errno_t rv;
+ rv = cnat_mapped_static_port_alloc_v2 (pm,
+ atype, index, ipv4_address, port, nfv9_log_req, bulk_size, ip_n_to_1);
+ if (PREDICT_FALSE(rv != CNAT_SUCCESS)) {
+ return rv;
+ }
+ /* Take care of caching only if it was a bulk alloc */
+ if(PREDICT_FALSE(udb && (BULK_ALLOC_NOT_ATTEMPTED != *nfv9_log_req))) {
+ int i;
+ port = port*bulk_size;
+ port = port/bulk_size; /* align it to bulk size boundary */
+ for(i=0; i < BULK_RANGE_CACHE_SIZE; i++) {
+ if(CACHE_ENTRY_WITHOUT_FULL_STAT(udb->bulk_port_range_cache[i])
+ == PORT_TO_CACHE(port, bulk_size))
+ break;
+ }
+ if( i == BULK_RANGE_CACHE_SIZE) { /* else, it is alredy in cache */
+ cnat_update_bulk_range_cache(udb, port, bulk_size);
+ }
+ }
+#ifdef HAVE_BULK_PORT_STATS
+ mapped_port_alloc_count++;
+#endif /* HAVE_BULK_PORT_STATS */
+ return (CNAT_SUCCESS);
+}
+
+
+cnat_errno_t
+cnat_dynamic_port_alloc_rtsp_bulk (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ port_pair_t pair_type,
+ u16 i_port,
+ u32 *index,
+ u32 *o_ipv4_address,
+ u16 *o_port,
+ u16 static_port_range,
+ cnat_user_db_entry_t *udb,
+ bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req,
+ u32 *rseed_ip)
+{
+
+ /***
+ * Algorithm
+ * 1. Compute the range of ports required based on the number of digits
+ * in the port request made by the client.
+ * 2. Check if bulk logging is enabled. If not, use the existing method.
+ * 3. Check if there are 2 adjacent ports available that meet the above
+ * criteria in any of the bulk allocations made already.
+ * 4. If yes, mark them in use and return.
+ * 5. If not allocate a new bulk and pick 2 ports in it
+ ***/
+
+ i16 i;
+ cnat_portmap_v2_t *my_pm = 0;
+ u32 start_port1, end_port1, start_port2, end_port2;
+ int range_loop;
+ u16 bulk_start;
+ i16 port_pos;
+ u16 port_available = 0;
+
+ ASSERT(index);
+ ASSERT(o_ipv4_address);
+ ASSERT(o_port);
+
+ /*
+ * Check if the port is 4 digit or 5 digit. I am assuming we are
+ * not getting 3 (or 2 or 1) digit ports, which we cannot anyway
+ * allocate same sized outside ports - as outside ports start from 1024
+ *
+ * Static Port has its own reserved range. Ensure that the range is
+ * such that atleast few 4 digit ports are available for RTSP. If
+ * not it does not make sense to do special allocation for RTSP.
+ */
+ if (PREDICT_TRUE(static_port_range < MIN_STATIC_PORT_RANGE_FOR_RTSP)) {
+ /*
+ * 4 digit port or less
+ */
+ if (i_port <= 9999) {
+ start_port1 = static_port_range;
+ end_port1 = 9999;
+
+ start_port2 = 10000;
+ end_port2 = PORTS_PER_ADDR - 1;
+ } else { /* 5 digit port */
+ start_port1 = 10000;
+ end_port1 = PORTS_PER_ADDR - 1;
+
+ start_port2 = static_port_range;
+ end_port2 = 9999;
+ }
+ } else { /* Static port range is too big */
+ start_port1 = static_port_range;
+ end_port1 = PORTS_PER_ADDR - 1;
+
+ /*
+ * PORTS_PER_ADDR is just a placeholder for
+ * INVALID_PORT, valid ports are b/w 1 and PORTS_PER_ADDR
+ */
+ start_port2 = PORTS_PER_ADDR;
+ end_port2 = PORTS_PER_ADDR;
+ }
+
+
+ if(PREDICT_TRUE(udb != NULL)) {
+ my_pm = pm + *index;
+ }
+
+ /* Now check if this user already owns a bulk range that is
+ * within start range 1
+ */
+
+ u32 start_range = start_port1;
+ u32 end_range = end_port1;
+ for(range_loop = 0; range_loop < 2; range_loop++) {
+ if((BULK_ALLOC_SIZE_NONE == bulk_size) || (!udb)) {
+ goto ALLOCATE_NEW_RTSP_PORTS;
+ }
+ for(i= 0; i < BULK_RANGE_CACHE_SIZE; i++) {
+ if(PREDICT_TRUE((udb->bulk_port_range_cache[i] ==
+ (i16)BULK_RANGE_INVALID) ||
+ IS_CACHE_ENTRY_FULL(udb->bulk_port_range_cache[i]))) {
+ continue; /* This range is not initialized yet or it is full */
+ }
+
+ bulk_start = CACHE_TO_PORT(udb->bulk_port_range_cache[i],
+ bulk_size);
+ if(bulk_start < start_port1 || bulk_start >= end_port1) {
+ continue; /* Not in the range */
+ }
+
+ port_pos = get_suiting_port_pos_from_range(my_pm,
+ bulk_start, bulk_size, pair_type);
+ if(PREDICT_FALSE(port_pos < 0)) {
+ /* Not Marking this cache entry as full as it failed
+ * for pair type. It might have individual entries
+ */
+ continue;
+ }
+ /* Get the port number */
+ port_available = bulk_start+ port_pos;
+#ifdef DEBUG_BULK_PORT
+ PLATFORM_DEBUG_PRINT(
+ "Found port from cache : IP 0x%x, port %d %d iterations\n",
+ my_pm->ipv4_address, port_available, i)
+#endif
+#ifdef HAVE_BULK_PORT_STATS
+ bulk_cache_hit_count += 2;
+#endif /* HAVE_BULK_PORT_STATS */
+ break;
+ } /* end of for loop for cache check */
+
+ if(PREDICT_FALSE(i == BULK_RANGE_CACHE_SIZE)) {
+ /* we have not found a port yet, but to do not want to try
+ * non-cache bulks.. because, it is a very low probability and
+ * do not want to tweak that code for this special case
+ * The impact of non checking the non-cache is, we give this
+ * user few extra ports .. which is OK
+ */
+ goto ALLOCATE_NEW_RTSP_PORTS;
+ }
+#ifdef DEBUG_BULK_PORT
+ PLATFORM_DEBUG_PRINT("RTSP: Found port from non-cache : IP 0x%x, port %d\n",
+ my_pm->ipv4_address, port_available);
+#endif
+
+ /* Assign the port, mark it as in use */
+ cgn_clib_bitmap_clear_no_check(my_pm->bm, port_available);
+ (my_pm->inuse)++;
+ cgn_clib_bitmap_clear_no_check(my_pm->bm, port_available + 1);
+ (my_pm->inuse)++;
+
+ *o_ipv4_address = my_pm->ipv4_address;
+ *o_port = port_available;
+ *nfv9_log_req = CACHE_ALLOC_NO_LOG_REQUIRED;
+#ifdef HAVE_BULK_PORT_STATS
+ bulk_port_use_count += 2;
+#endif /* HAVE_BULK_PORT_STATS */
+ return (CNAT_SUCCESS);
+
+ALLOCATE_NEW_RTSP_PORTS:
+ /* No luck. Let's try allocating new bulk.. */
+ if(PREDICT_TRUE(CNAT_SUCCESS == cnat_dynamic_port_alloc_rtsp
+ (pm, atype, pair_type,
+ start_range, end_range,index, o_ipv4_address,
+ o_port, bulk_size, nfv9_log_req,rseed_ip))) {
+ if(PREDICT_FALSE(udb &&
+ (BULK_ALLOC_NOT_ATTEMPTED != *nfv9_log_req))) {
+ cnat_update_bulk_range_cache(udb, *o_port, bulk_size);
+ }
+#ifdef HAVE_BULK_PORT_STATS
+ bulk_port_alloc_count++;
+#endif /* HAVE_BULK_PORT_STATS */
+ return CNAT_SUCCESS;
+ }
+
+ /* Could not allocate in range 1.. so move to range 2. */
+ start_range = start_port2;
+ end_range = end_port2;
+
+ }
+
+ return (CNAT_NOT_FOUND_DIRECT); /* if we are here, we could not get any ports */
+
+}
+
+#else /* Dummy definitions */
+void show_bulk_port_stats()
+{
+ PLATFORM_DEBUG_PRINT("\nBulk logging feature not included\n");
+}
+
+ void clear_bulk_port_stats()
+{
+ PLATFORM_DEBUG_PRINT("\nBulk logging feature not included\n");
+}
+#endif /* NO_BULK_LOGGING */
diff --git a/plugins/vcgn-plugin/vcgn/cnat_bulk_port.h b/plugins/vcgn-plugin/vcgn/cnat_bulk_port.h
new file mode 100644
index 00000000000..3e48b9a7794
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_bulk_port.h
@@ -0,0 +1,157 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_bulk_port_defs.h bulk port alloc definitions
+ *
+ * Copyright (c) 2011-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_BULK_PORT_H__
+#define __CNAT_BULK_PORT_H__
+
+#ifndef NO_BULK_LOGGING
+#include "cnat_bulk_port_defs.h"
+
+cnat_errno_t
+cnat_dynamic_port_alloc_v2_bulk (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ port_pair_t pair_type,
+ u32 *index,
+ u32 *o_ipv4_address,
+ u16 *o_port,
+ u16 static_port_range,
+ cnat_user_db_entry_t *udb,
+ bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req,
+ u16 ip_n_to_1,
+ u32 *rseed_ip);
+
+void cnat_update_bulk_range_cache(cnat_user_db_entry_t *udb, u16 o_port,
+ bulk_alloc_size_t bulk_size);
+
+void cnat_port_free_v2_bulk (
+ cnat_portmap_v2_t *pm,
+ int index,
+ port_pair_t ptype,
+ u16 base_port,
+ cnat_user_db_entry_t *udb,
+ u16 static_port_range,
+ bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req);
+
+cnat_errno_t cnat_static_port_alloc_v2_bulk (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ port_pair_t pair_type,
+ u32 i_ipv4_address,
+ u16 i_port,
+ u32 *index,
+ u32 *o_ipv4_address,
+ u16 *o_port,
+ u16 static_port_range,
+ cnat_user_db_entry_t *udb,
+ bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req,
+ u16 ip_n_to_1
+ );
+
+cnat_errno_t cnat_dynamic_port_alloc_rtsp_bulk (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ port_pair_t pair_type,
+ u16 i_port,
+ u32 *index,
+ u32 *o_ipv4_address,
+ u16 *o_port,
+ u16 static_port_range,
+ cnat_user_db_entry_t *udb,
+ bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req,
+ u32 *rseed_ip);
+
+cnat_errno_t
+cnat_mapped_static_port_alloc_v2_bulk (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ u32 *index,
+ u32 ipv4_address,
+ u16 port,
+ cnat_user_db_entry_t *udb,
+ bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req,
+ u16 ip_n_to_1
+ );
+
+#else /* NO_BULK_LOGGING */
+/* use older code */
+inline cnat_errno_t
+cnat_dynamic_port_alloc_v2_bulk (
+ cnat_vrfmap_t *vrf_map,
+ port_alloc_t atype,
+ port_pair_t pair_type,
+ u32 *index,
+ u32 *o_ipv4_address,
+ u16 *o_port,
+ u16 static_port_range,
+ u16 ip_n_to_1,
+ u32 *rseed_ip
+ )
+{
+ return cnat_dynamic_port_alloc_v2(vrf_map->portmap_list, atype,
+ pair_type, index, o_ipv4_address, o_port, static_port_range,
+ ip_n_to_1, rseed_ip);
+}
+
+inline void cnat_port_free_v2_bulk (
+ cnat_portmap_v2_t *pm,
+ int index,
+ port_pair_t ptype,
+ u16 base_port,
+ cnat_user_db_entry_t *udb,
+ u16 static_port_range);
+{
+ return cnat_port_free_v2(pm, index, ptype, base_port,
+ static_port_range);
+}
+
+inline cnat_errno_t cnat_static_port_alloc_v2_bulk (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ port_pair_t pair_type,
+ u32 i_ipv4_address,
+ u16 i_port,
+ u32 *index,
+ u32 *o_ipv4_address,
+ u16 *o_port,
+ u16 static_port_range)
+{
+ return cnat_static_port_alloc_v2 (pm, atype, pair_type,
+ i_ipv4_address, i_port, index, o_ipv4_address, o_port);
+}
+
+inline cnat_errno_t
+cnat_mapped_static_port_alloc_v2_bulk (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ u32 *index,
+ u32 ipv4_address,
+ u16 port)
+{
+ return cnat_mapped_static_port_alloc_v2(pm, atype, index
+ ipv4_address, port);
+}
+
+#endif /* NO_BULK_LOGGING */
+#endif /* __CNAT_BULK_PORT_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/cnat_bulk_port_defs.h b/plugins/vcgn-plugin/vcgn/cnat_bulk_port_defs.h
new file mode 100644
index 00000000000..edb47b0a8e1
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_bulk_port_defs.h
@@ -0,0 +1,57 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_bulk_port_defs.h bulk port alloc definitions
+ *
+ * Copyright (c) 2011 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_BULK_PORT_DEFS_H__
+#define __CNAT_BULK_PORT_DEFS_H__
+
+
+#ifndef NO_BULK_LOGGING
+
+typedef enum {
+ BULK_ALLOC_SIZE_NONE = 1,
+ BULK_ALLOC_SIZE_16 = 16,
+ BULK_ALLOC_SIZE_32 = 32,
+ BULK_ALLOC_SIZE_64 = 64,
+ BULK_ALLOC_SIZE_128 = 128,
+ BULK_ALLOC_SIZE_256 = 256,
+ BULK_ALLOC_SIZE_512 = 512,
+ BULK_ALLOC_SIZE_1024 = 1024,
+ BULK_ALLOC_SIZE_2048 = 2048,
+ BULK_ALLOC_SIZE_4096 = 4096
+} bulk_alloc_size_t;
+
+/* #define DEBUG_BULK_PORT 1 TODO: remove this later */
+
+#define CACHE_ALLOC_NO_LOG_REQUIRED -1
+#define BULK_ALLOC_NOT_ATTEMPTED -2
+
+#define BULK_RANGE_INVALID 0xFFFF
+#define BULK_RANGE_CACHE_SIZE 4
+
+#define BULKSIZE_FROM_VRFMAP(vrfmap) ((vrfmap)->bulk_size)
+
+#define INIT_BULK_CACHE(udb) \
+ { \
+ int i; \
+ for(i =0; i < BULK_RANGE_CACHE_SIZE; i++) \
+ (udb)->bulk_port_range_cache[i] = (i16)BULK_RANGE_INVALID; \
+ }
+
+#endif /* NO_BULK_LOGGING */
+#endif /* __CNAT_BULK_PORT_DEFS_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/cnat_cli.h b/plugins/vcgn-plugin/vcgn/cnat_cli.h
new file mode 100644
index 00000000000..e9d190a577a
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_cli.h
@@ -0,0 +1,206 @@
+/* *------------------------------------------------------------------
+ * cnat_cli.h - CLI definitions
+ *
+ * Copyright (c) 2007-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_CLI_H__
+#define __CNAT_CLI_H__
+
+#include "cnat_config_api.h"
+#include "cnat_show_api.h"
+
+/* from iox cli error */
+typedef enum {
+ CNAT_SUCCESS = 0,
+ CNAT_NO_CONFIG,
+ CNAT_NO_VRF_RUN,
+ CNAT_NO_POOL_ANY,
+ CNAT_NO_PORT_ANY,
+#ifndef NO_BULK_LOGGING
+ CNAT_NO_PORT_FROM_BULK,
+ CNAT_NO_PRE_ALLOCATED_BULK_PORTS,
+#endif /* NO_BULK_LOGGING */
+ CNAT_BAD_INUSE_ANY,
+ CNAT_NOT_FOUND_ANY,
+ CNAT_INV_PORT_DIRECT,
+ CNAT_DEL_PORT_DIRECT,
+ CNAT_BAD_INUSE_DIRECT,
+ CNAT_NOT_FOUND_DIRECT,
+ CNAT_OUT_LIMIT,
+ CNAT_MAIN_DB_LIMIT,
+ CNAT_USER_DB_LIMIT,
+ CNAT_NOT_STATIC_PORT,
+ CNAT_BAD_STATIC_PORT_REQ,
+ CNAT_NOT_THIS_CORE,
+ CNAT_ERR_PARSER,
+ CNAT_ERR_INVALID_MSG_ID,
+ CNAT_ERR_INVALID_MSG_SIZE,
+ CNAT_ERR_INVALID_PAYLOAD_SIZE,
+ CNAT_ERR_BAD_TCP_UDP_PORT,
+ CNAT_ERR_BULK_SINGLE_FAILURE,
+ CNAT_ERR_XLAT_ID_INVALID,
+ CNAT_ERR_XLAT_V6_PREFIX_INVALID,
+ CNAT_ERR_XLAT_V4_PREFIX_INVALID,
+ CNAT_ERR_XLAT_TCP_MSS_INVALID,
+ CNAT_ERR_6RD_ID_INVALID,
+ CNAT_ERR_6RD_V4_TUNNEL_SRC_INVALID,
+ CNAT_ERR_6RD_V6_PREFIX_INVALID,
+ CNAT_ERR_6RD_V6_BR_UNICAST_INVALID,
+ CNAT_ERR_6RD_V4_PREFIX_MASK_LEN_INVALID,
+ CNAT_ERR_6RD_V4_SUFFIX_MASK_LEN_INVALID,
+ CNAT_ERR_6RD_V4_COMBO_MASK_LEN_INVALID,
+ CNAT_ERR_6RD_TUNNEL_MTU_INVALID,
+ CNAT_ERR_6RD_TUNNEL_TTL_INVALID,
+ CNAT_ERR_6RD_TUNNEL_TOS_INVALID,
+ CNAT_ERR_NAT64_NO_VRF_RUN,
+ CNAT_ERR_NAT64_ID_INVALID,
+ CNAT_ERR_NAT64_V6_PREFIX_INVALID,
+ CNAT_ERR_NAT64_V4_PREFIX_INVALID,
+ CNAT_ERR_NAT64_TCP_MSS_INVALID,
+#ifdef CGSE_DS_LITE
+ CNAT_ERR_DS_LITE_ID_INVALID,
+#endif /* CGSE_DS_LITE */
+ CNAT_ERR_NO_SESSION_DB,
+ CNAT_ERR_MAPE_ID_INVALID,
+ CNAT_ERR_MAX
+} cnat_errno_t;
+
+#define CNAT_TRUE 1
+#define CNAT_FALSE 0
+
+
+#define CNAT_DEBUG_NONE (0)
+#define CNAT_DEBUG_GLOBAL_ERR (1 << 0)
+#define CNAT_DEBUG_DROP_TCP (1 << 0)
+#define CNAT_DEBUG_DROP_UDP (1 << 1)
+#define CNAT_DEBUG_DROP_ICMP (1 << 2)
+#define CNAT_DEBUG_ERR_TCP (1 << 3)
+#define CNAT_DEBUG_ERR_UDP (1 << 4)
+#define CNAT_DEBUG_ERR_ICMP (1 << 5)
+#define CNAT_DEBUG_ERR_ALG (1 << 6)
+#define CNAT_DEBUG_GLOBAL_ALL (1 << 7)
+#define CNAT_DEBUG_FTP_ALG (1 << 8)
+
+
+
+#define CNAT_DEBUG_ALL 0x1FF /*all of above*/
+#define CNAT_DEBUG_ERR_ALL 0x38
+
+#define CNAT_DB_CLEAR_SPECIFIC (0)
+#define CNAT_DB_CLEAR_ALL (1 << 0)
+#define CNAT_DB_CLEAR_VRF (1 << 1)
+#define CNAT_DB_CLEAR_ADDR (1 << 2)
+#define CNAT_DB_CLEAR_PROTO (1 << 3)
+#define CNAT_DB_CLEAR_PORT (1 << 4)
+
+
+#define MAX_UIDX 0x3fff /*the max svi app uidb index */
+/* address mask per core */
+#define ADDR_MASK_PER_CORE PLATFORM_ADDR_MASK_PER_CORE
+#define ADDR_MASK_PER_CORE_PER_PARTITION \
+ PLATFORM_ADDR_MASK_PER_CORE_PER_PARTITION
+
+#define MAX_CORES PLATFORM_MAX_CORES
+#define MAX_CORES_PER_PARTITION PLATFORM_MAX_CORES_PER_PARTITION
+
+/*
+ * Maximum pool size that is supported by platform
+ */
+#define CNAT_MAX_ADDR_POOL_SIZE PLATFORM_CNAT_MAX_ADDR_POOL_SIZE
+#define CNAT_MAX_ADDR_POOL_SIZE_PER_CORE \
+ (CNAT_MAX_ADDR_POOL_SIZE / MAX_CORES_PER_PARTITION)
+
+#define BOUNDARY_VALUE 256
+
+#define BOUNDARY_VALUE_MASK 0xff
+
+#define NUM_ADDR_IN_RANGE(range, value, instance) \
+ ((range / value) + ((instance % MAX_CORES_PER_PARTITION) < (range%value) ? 1 : 0))
+
+typedef enum {
+ CNAT_DEBUG_FLAGS_DUMP = 0,
+ CNAT_DEBUG_FLAG_UDP_INSIDE_CHECKSUM_DISABLE,
+ CNAT_DEBUG_FLAG_UDP_OUTSIDE_CHECKSUM_DISABLE,
+ CNAT_DEBUG_FLAG_UDP_OUTSIDE_PKT_DUMP_ENABLE,
+ CNAT_DEBUG_FLAG_UDP_INSIDE_PKT_DUMP_ENABLE,
+ CNAT_DEBUG_FLAG_ICMP_PKT_DUMP_ENABLE,
+ CNAT_DEBUG_FLAG_FRAG_PKT_DUMP_ENABLE,
+ CNAT_DEBUG_FLAG_CONFIG_DEBUG_ENABLE,
+ CNAT_DEBUG_FLAG_GLOBAL_DEBUG_ALL_ENABLE,
+ CNAT_DEBUG_FLAG_SUMMARY_STATS_DEBUG_ENABLE,
+ CNAT_DEBUG_FLAG_SHOW_DEBUG_ENABLE,
+ CNAT_DEBUG_FLAG_XLAT_CONFIG_DEBUG_ENABLE,
+ CNAT_DEBUG_FLAG_XLAT_DATA_PATH_DEBUG_ENABLE,
+ CNAT_DEBUG_FLAG_TCP_LOGGING_ENABLE,
+ CNAT_DEBUG_FLAG_NFV9_LOGGING_DUMP_ENABLE,
+ CNAT_DEBUG_FLAG_SYSLOG_LOGGING_DUMP_ENABLE,
+ CNAT_DEBUG_SET_STATIC_PORT_RANGE,
+ CNAT_DEBUG_FLAG_V6RD_DATA_PATH_DEBUG_ENABLE,
+ CNAT_DEBUG_FLAG_V6RD_CONFIG_DEBUG_ENABLE,
+ CNAT_DEBUG_FLAG_V6RD_DEFRAG_DEBUG_ENABLE,
+ CNAT_DEBUG_FLAG_NAT64_CONFIG_DEBUG_ENABLE,
+ CNAT_DEBUG_FLAG_NAT64_DATA_PATH_DEBUG_ENABLE,
+ CNAT_DEBUG_FLAG_DSLITE_DP_ENABLE,
+ CNAT_DEBUG_FLAG_DSLITE_CONFIG_DEBUG_ENABLE,
+ CNAT_DEBUG_FLAG_CONFIG_PPTP_ENABLE = 24,
+ CNAT_DEBUG_FLAG_CONFIG_PCP_ENABLE = 25,
+ CNAT_DEBUG_FLAG_MAPE_CONFIG_DEBUG_ENABLE,
+ CNAT_DEBUG_FLAG_MAPE_DATA_PATH_DEBUG_ENABLE,
+ CNAT_DEBUG_FLAG_MAX,
+} cnat_debug_variable_value;
+
+/*
+ * Don't use too small values for PATH MTU
+ */
+#define MIN_NFV9_PATH_MTU 100
+
+extern u32 global_debug_flag;
+extern u16 debug_i_vrf;
+extern u32 debug_i_flag;
+extern u32 debug_i_addr_start;
+extern u32 debug_i_addr_end;
+extern u16 debug_o_vrf;
+extern u32 debug_o_flag;
+extern u32 debug_o_addr_start;
+extern u32 debug_o_addr_end;
+extern u32 tcp_logging_enable_flag;
+extern u32 nfv9_logging_debug_flag;
+
+extern u32 udp_inside_checksum_disable;
+extern u32 udp_outside_checksum_disable;
+extern u32 udp_inside_packet_dump_enable;
+extern u32 udp_outside_packet_dump_enable;
+
+extern u32 icmp_debug_flag;
+extern u32 frag_debug_flag;
+
+extern u32 summary_stats_debug_flag;
+
+extern u32 config_debug_level;
+extern u32 show_debug_level;
+
+
+/* CLI API prototypes called from vcgn_classify.c */
+extern void cnat_nat44_add_vrf_map_t_handler(spp_api_cnat_v4_add_vrf_map_t *mp,
+ vlib_main_t *vm);
+extern void cnat_nat44_handle_show_stats(vlib_main_t *vm);
+extern void cnat_nat44_handle_show_config(vlib_main_t *vm);
+extern void cnat_nat44_set_protocol_timeout_value(u16 active,
+ u16 init, u8 *proto, u8 reset, vlib_main_t *vm);
+extern void cnat_v4_show_inside_entry_req_t_handler
+(spp_api_cnat_v4_show_inside_entry_req_t *mp, vlib_main_t *vm);
+
+#endif /* __CNAT_CLI_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/cnat_cli_handler.c b/plugins/vcgn-plugin/vcgn/cnat_cli_handler.c
new file mode 100644
index 00000000000..a4010349b40
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_cli_handler.c
@@ -0,0 +1,961 @@
+/* *------------------------------------------------------------------
+ * cnat_cli_handler.c - CLI handler definitions
+ *
+ * Copyright (c) 2007-2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/clib.h>
+#include <vppinfra/error.h>
+
+#include "cnat_db.h"
+#include "cnat_config.h"
+#include "cnat_global.h"
+#include "cnat_cli.h"
+#include "cnat_logging.h"
+#include "cnat_syslog.h"
+#include "cnat_config_api.h"
+#include "cnat_show_api.h"
+#include "cnat_show_response.h"
+
+#include <arpa/inet.h>
+
+#if DPDK
+#include <vnet/devices/dpdk/dpdk.h>
+#endif
+
+u32 show_debug_level = 0;
+
+u32
+cnat_get_vrfmap_nfv9_logging_index (u32 i_vrf_id)
+{
+ cnat_nfv9_logging_info_t *my_nfv9_logging_info = 0;
+ u32 logging_index = EMPTY;
+
+ /*
+ * Start with global logging index if available
+ */
+ if (cnat_nfv9_global_info.cnat_nfv9_init_done) {
+ logging_index = cnat_nfv9_global_info.cnat_nfv9_global_collector_index;
+
+ pool_foreach (my_nfv9_logging_info, cnat_nfv9_logging_info_pool, ({
+ if (my_nfv9_logging_info->i_vrf_id == i_vrf_id) {
+ logging_index = my_nfv9_logging_info -
+ cnat_nfv9_logging_info_pool;
+ break;
+ }
+ }));
+ }
+ return (logging_index);
+}
+
+u32
+cnat_get_vrfmap_syslog_logging_index (u32 i_vrf_id)
+{
+ cnat_syslog_logging_info_t *my_syslog_info = NULL;
+ u32 logging_index = EMPTY;
+
+ /*
+ * Start with global logging index if available
+ */
+ if(PREDICT_TRUE(cnat_syslog_global_info.cnat_syslog_init_done)) {
+
+ pool_foreach (my_syslog_info, cnat_syslog_logging_info_pool, ({
+ if (my_syslog_info->i_vrf_id == i_vrf_id) {
+ logging_index = my_syslog_info -
+ cnat_syslog_logging_info_pool;
+ break;
+ }
+ }));
+ }
+ return (logging_index);
+}
+
+void
+cnat_set_vrf_params_with_default(cnat_vrfmap_t *my_vrfmap, u32 i_vrf, u32 i_vrf_id)
+{
+
+ my_vrfmap->status = S_WAO;
+
+ my_vrfmap->i_vrf = i_vrf;
+ my_vrfmap->i_vrf_id = i_vrf_id;
+
+ my_vrfmap->o_vrf = INVALID_UIDX;
+ my_vrfmap->o_vrf_id = INVALID_VRFID;
+
+#ifndef NO_BULK_LOGGING
+ BULKSIZE_FROM_VRFMAP(my_vrfmap) = BULK_ALLOC_SIZE_NONE;
+#endif /* #ifndef NO_BULK_LOGGING */
+ my_vrfmap->tcp_mss = V4_TCP_MSS_NOT_CONFIGURED_VALUE;
+ my_vrfmap->frag_tout = CNAT_IPV4_FRAG_TIMEOUT_DEF;
+ my_vrfmap->port_limit = V4_DEF_VRF_MAX_PORTS;
+ my_vrfmap->nfv9_logging_index =
+ cnat_get_vrfmap_nfv9_logging_index(i_vrf_id);
+ my_vrfmap->syslog_logging_index =
+ cnat_get_vrfmap_syslog_logging_index(i_vrf_id);
+
+ /* Copy logging policy from nfv9 info. */
+ if(my_vrfmap->nfv9_logging_index != EMPTY) {
+ cnat_nfv9_logging_info_t *nfv9_logging_info =
+ cnat_nfv9_logging_info_pool + my_vrfmap->nfv9_logging_index;
+ my_vrfmap->nf_logging_policy = nfv9_logging_info->logging_policy;
+ }
+ if(my_vrfmap->syslog_logging_index != EMPTY) {
+ cnat_syslog_logging_info_t *syslog_logging_info =
+ cnat_syslog_logging_info_pool + my_vrfmap->syslog_logging_index;
+ my_vrfmap->syslog_logging_policy = syslog_logging_info->logging_policy;
+ }
+ #if 0
+ printf("Initializing params in cnat_set_vrf_params_with_default\n"
+ "my_vrfmap->status = %u\n"
+ "my_vrfmap->tcp_mss = %u\n"
+ "my_vrfmap->i_vrf = %u\n"
+ "my_vrfmap->i_vrf_id = %u\n"
+ "my_vrfmap->o_vrf = %u\n"
+ "my_vrfmap->o_vrf_id = %u\n"
+ "my_vrfmap->bulk_size = %u\n"
+ "my_vrfmap->nfv9_logging_index = %u\n"
+ "my_vrfmap->syslog_logging_index = %u\n"
+ "my_vrfmap->frag_tout = %u\n"
+ "my_vrfmap->port_limit = %u\n"
+ "my_vrfmap->nf_logging_policy = %u\n"
+ "my_vrfmap->syslog_logging_policy = %u\n",
+ my_vrfmap->status,
+ my_vrfmap->tcp_mss,
+ my_vrfmap->i_vrf,
+ my_vrfmap->i_vrf_id,
+ my_vrfmap->o_vrf,
+ my_vrfmap->o_vrf_id,
+ my_vrfmap->bulk_size,
+ my_vrfmap->nfv9_logging_index,
+ my_vrfmap->syslog_logging_index,
+ my_vrfmap->frag_tout,
+ my_vrfmap->port_limit,
+ my_vrfmap->nf_logging_policy,
+ my_vrfmap->syslog_logging_policy);
+ #endif /* if 0 */
+}
+
+/* config command handlers */
+void cnat_nat44_add_vrf_map_t_handler(spp_api_cnat_v4_add_vrf_map_t *mp,
+ vlib_main_t *vm)
+{
+ void cnat_table_entry_fill_map(u32 start_addr, u32 end_addr,
+ cnat_portmap_v2_t **port_map_holder);
+ u32 start_addr, end_addr;
+ u32 pm_len __attribute__((unused));
+ cnat_vrfmap_t *my_vrfmap = 0;
+ cnat_portmap_v2_t *pm = 0;
+ u16 i_vrf, o_vrf;
+ u32 ivrf_id, ovrf_id;
+ u16 my_vrfmap_index;
+ u8 i = 0;
+
+ start_addr = mp->start_addr[0];
+ end_addr = mp->end_addr[0];
+ i_vrf = mp->i_vrf;
+ o_vrf = mp->o_vrf;
+ ovrf_id = mp->o_vrf_id;
+ ivrf_id = mp->i_vrf_id;
+
+#if DEBUG_NOT_COMMENTED
+ vlib_cli_output(vm, "%s: saddr[0x%x], eaddr[0x%x], i_vrf[0x%x], o_vrf[0x%x], "
+ "ovrf_id[0x%x], ivrf_id[0x%x]\n", __func__, start_addr, end_addr,
+ i_vrf, o_vrf, ovrf_id, ivrf_id);
+#endif
+ if (start_addr > end_addr) {
+ vlib_cli_output(vm, "Add VRF Map failed start addr 0x%x > end addr 0x%x\n",
+ start_addr, end_addr);
+ return;
+ }
+ if ((end_addr - start_addr) > CNAT_MAX_ADDR_POOL_SIZE) {
+ vlib_cli_output(vm, "Add VRF Map failed start addr 0x%x - end addr "
+ "0x%x range > 65536\n", start_addr, end_addr);
+ return;
+ }
+ my_vrfmap_index = vrf_map_array[i_vrf];
+
+ if (my_vrfmap_index != VRF_MAP_ENTRY_EMPTY) {
+
+ my_vrfmap = cnat_map_by_vrf + my_vrfmap_index;
+
+ my_vrfmap->o_vrf = o_vrf;
+ my_vrfmap->i_vrf_id = ivrf_id;
+ my_vrfmap->o_vrf_id = ovrf_id;
+ } else {
+ /*
+ * first time add
+ */
+ pool_get(cnat_map_by_vrf, my_vrfmap);
+ memset(my_vrfmap, 0, sizeof(*my_vrfmap));
+ /* waiting for outside vrf */
+ cnat_set_vrf_params_with_default(my_vrfmap, i_vrf, ivrf_id);
+ my_vrfmap->i_vrf = i_vrf;
+ my_vrfmap->o_vrf = o_vrf;
+ my_vrfmap->i_vrf_id = ivrf_id;
+ my_vrfmap->o_vrf_id = ovrf_id;
+#ifndef NO_BULK_LOGGING
+ BULKSIZE_FROM_VRFMAP(my_vrfmap) = BULK_ALLOC_SIZE_NONE;
+#endif /* #ifndef NO_BULK_LOGGING */
+
+ my_vrfmap->tcp_mss = V4_TCP_MSS_NOT_CONFIGURED_VALUE;
+ my_vrfmap->status = S_WA;
+ my_vrfmap->frag_tout = 0; /* currently setting it to 0 */
+ my_vrfmap->port_limit = V4_DEF_VRF_MAX_PORTS;
+ vrf_map_array[i_vrf] = (my_vrfmap - cnat_map_by_vrf);
+ }
+ pm = my_vrfmap->portmap_list;
+ pm_len = vec_len(pm);
+ for(i=0; i < 1 ; i++) {
+ start_addr = mp->start_addr[i];
+ end_addr = mp->end_addr[i];
+ if((start_addr == 0) || (end_addr == 0))
+ break;
+
+ cnat_table_entry_fill_map(start_addr, end_addr,
+ &(my_vrfmap->portmap_list));
+ }
+ my_vrfmap->status = S_RUN;
+ vlib_cli_output(vm, "Address Pool Config Successful !!\n");
+ return;
+}
+
+void cnat_nat44_set_protocol_timeout_value(u16 active,
+ u16 init, u8 *proto, u8 reset, vlib_main_t *vm)
+{
+ if (!strncmp((char *) proto, "tcp", 3)) {
+ tcp_initial_setup_timeout = (reset) ? V4_DEF_TCP_IS_TO : init;
+ tcp_active_timeout = (reset) ? V4_DEF_TCP_AS_TO : active;
+
+ } else if (!strncmp((char *) proto, "udp", 3)) {
+ udp_init_session_timeout = (reset) ? V4_DEF_UDP_IS_TO : init;
+ udp_act_session_timeout = (reset) ? V4_DEF_UDP_AS_TO : active;
+
+ } else if (!strncmp((char *) proto, "icmp", 4)) {
+ icmp_session_timeout = (reset) ? V4_DEF_ICMP_S_TO : active;
+
+ } else {
+ vlib_cli_output(vm, "Error !! Unsupported protocol %s\n", proto);
+ }
+ return;
+}
+
+
+
+
+/* Show command handlers */
+void cnat_nat44_handle_show_stats(vlib_main_t *vm)
+{
+ pool_header_t *h;
+ u32 used, free;
+ cnat_vrfmap_t *my_vrfmap =0;
+ cnat_portmap_v2_t *pm =0, *my_pm = 0;
+ u32 i, pm_len;
+ struct in_addr ip;
+ void cnat_nfv9_show_collector
+ (vlib_main_t *vm, cnat_nfv9_logging_info_t *my_nfv9_logging_info);
+
+ /* active translations */
+ h = pool_header(cnat_main_db);
+ free = vec_len(h->free_indices);
+ used = vec_len(cnat_main_db) - free;
+
+ vlib_cli_output(vm, "vCGN NAT44 Statistics :\n");
+ vlib_cli_output(vm, "\tActive Translations : %u\n",
+ NAT44_COMMON_STATS.active_translations);
+ vlib_cli_output(vm, "\tTotal free translation entries : %u\n", free);
+ vlib_cli_output(vm, "\tTotal used translation entries : %u\n", used);
+ vlib_cli_output(vm, "\ti2o drops due to port limit exceeded : %lu\n",
+ in2out_drops_port_limit_exceeded);
+ vlib_cli_output(vm, "\ti2o drops due to system limit reached : %lu\n",
+ in2out_drops_system_limit_reached);
+ vlib_cli_output(vm, "\ti2o drops due to resource depletion : %lu\n",
+ in2out_drops_resource_depletion);
+ vlib_cli_output(vm, "\to2i drops due to no translations : %lu\n",
+ NAT44_COMMON_STATS.no_translation_entry_drops);
+
+ vlib_cli_output(vm, "\tPool address usage:\n");
+ vlib_cli_output(vm, "\t-------------------------------------------------\n");
+ vlib_cli_output(vm, "\tExternal Address \tPorts Used\n");
+ vlib_cli_output(vm, "\t-------------------------------------------------\n");
+
+ used = 0;
+ pool_foreach (my_vrfmap, cnat_map_by_vrf, ({
+ pm = my_vrfmap->portmap_list;
+ pm_len = vec_len(pm);
+ for (i = 0; i < pm_len; i++) {
+ my_pm = pm + i;
+ if (my_pm->inuse) {
+ used++;
+ /* maximum of 200 addresses to be returned */
+ if (used <= 200) {
+ ip.s_addr = ntohl(my_pm->ipv4_address);
+ vlib_cli_output(vm, "\t%s \t\t%u\n", inet_ntoa(ip), my_pm->inuse);
+ }
+ }
+ }
+ }));
+ return;
+}
+
+void cnat_nat44_handle_show_config(vlib_main_t *vm)
+{
+ cnat_vrfmap_t * my_vrfmap;
+ cnat_portmap_v2_t *pm = 0;
+ cnat_portmap_v2_t *my_pm = 0;
+ u32 pm_len;
+ struct in_addr ip_addr;
+ u8 status_str[20];
+ cnat_nfv9_logging_info_t *my_nfv9_logging_info,
+ *global_nfv9_logging_info = 0;
+
+ vnet_hw_interface_t * hw;
+ vnet_main_t * vnm = vnet_get_main();
+
+ void cnat_nfv9_show_collector
+ (vlib_main_t *vm, cnat_nfv9_logging_info_t *my_nfv9_logging_info);
+
+ vlib_cli_output(vm, "vCGN NAT44 Config:\n");
+ vlib_cli_output(vm, "\tPort Limit : %u\n", cnat_main_db_max_ports_per_user);
+ vlib_cli_output(vm, "\ttotal address pool : %u\n", total_address_pool_allocated);
+ vlib_cli_output(vm, "\tdynamic port start range : %u\n", cnat_static_port_range);
+
+ pool_foreach(my_vrfmap, cnat_map_by_vrf, ({
+ hw = vnet_get_hw_interface (vnm, my_vrfmap->i_vrf);
+ vlib_cli_output(vm, "\tInside Interface : %v\n", hw->name);
+ hw = vnet_get_hw_interface (vnm, my_vrfmap->o_vrf);
+ vlib_cli_output(vm, "\tOutside Interface : %v\n", hw->name);
+
+ memset(status_str, 0x00, sizeof(status_str));
+ switch(my_vrfmap->status) {
+ case S_WAO: clib_memcpy(status_str, "S_WAO", 5); break;
+ case S_WA: clib_memcpy(status_str, "S_WA", 4); break;
+ case S_WO: clib_memcpy(status_str, "S_WO", 4); break;
+ case S_RUN: clib_memcpy(status_str, "ONLINE", 6); break;
+ case S_DEL: clib_memcpy(status_str, "S_DEL", 5); break;
+ default: clib_memcpy(status_str, "Invalid state", 13);
+
+ }
+ vlib_cli_output(vm,
+ "\tAddress pool map table status : %s\n", status_str);
+
+ pm = my_vrfmap->portmap_list;
+ pm_len = vec_len(pm);
+ my_pm = pm;
+ ip_addr.s_addr = clib_net_to_host_u32(my_pm->ipv4_address);
+ vlib_cli_output(vm,
+ "\tStart Address : %s\n", inet_ntoa(ip_addr));
+ my_pm = pm + (pm_len - 1);
+ ip_addr.s_addr = clib_net_to_host_u32(my_pm->ipv4_address);
+ vlib_cli_output(vm,
+ "\tEnd Address : %s\n", inet_ntoa(ip_addr));
+
+ }));
+ vlib_cli_output(vm,
+ "\ttcp init timeout : %u sec\n", tcp_initial_setup_timeout);
+ vlib_cli_output(vm,
+ "\ttcp active timeout : %u sec\n", tcp_active_timeout);
+ vlib_cli_output(vm,
+ "\tudp init timeout : %u sec\n", udp_init_session_timeout);
+ vlib_cli_output(vm,
+ "\tudp active timeout : %u sec\n", udp_act_session_timeout);
+ vlib_cli_output(vm,
+ "\ticmp session timeout: %u sec\n", icmp_session_timeout);
+
+#if 0
+ if (cnat_nfv9_global_info.cnat_nfv9_global_collector_index != EMPTY) {
+ vlib_cli_output(vm,"\nGloabal NFV9 Collector :");
+ global_nfv9_logging_info = cnat_nfv9_logging_info_pool +
+ cnat_nfv9_global_info.cnat_nfv9_global_collector_index;
+ cnat_nfv9_show_collector(vm, global_nfv9_logging_info);
+ }
+#endif
+
+ vlib_cli_output(vm, "\nNFV9 Collector :");
+ if (cnat_nfv9_logging_info_pool !=NULL) {
+ pool_foreach (my_nfv9_logging_info, cnat_nfv9_logging_info_pool, ({
+ if (my_nfv9_logging_info != global_nfv9_logging_info) {
+ cnat_nfv9_show_collector(vm, my_nfv9_logging_info);
+ vlib_cli_output(vm, "\n");
+ }
+ }));
+ } else {
+ vlib_cli_output(vm, "\n");
+ }
+
+ return;
+}
+
+/*
+ * Check if the request flag matches the entry flags and
+ * if so return "1"
+ *
+ * entry_flag_ptr is an output parameter - it returns the flags
+ * corresponding to the translation entry
+ */
+static u8 cnat_v4_show_verify_display_entry (
+ u16 request_flag,
+ cnat_main_db_entry_t *db,
+ u16 *entry_flag_ptr)
+{
+ u8 display_entry = 0;
+
+ /*
+ * This should never happen
+ */
+ if (!entry_flag_ptr) {
+ return (display_entry);
+ }
+
+ *entry_flag_ptr = 0;
+
+ if ((db->flags & CNAT_DB_FLAG_STATIC_PORT)
+ &&(db->flags & CNAT_DB_FLAG_ALG_ENTRY)) {
+ *entry_flag_ptr |= CNAT_TRANSLATION_ENTRY_STATIC;
+ *entry_flag_ptr |= CNAT_TRANSLATION_ENTRY_ALG;
+ } else if (db->flags & CNAT_DB_FLAG_STATIC_PORT) {
+ *entry_flag_ptr |= CNAT_TRANSLATION_ENTRY_STATIC;
+ } else if ((db->flags & CNAT_DB_FLAG_ALG_ENTRY) ||
+ (db->flags & CNAT_DB_FLAG_PPTP_GRE_ENTRY)) {
+ *entry_flag_ptr |= CNAT_TRANSLATION_ENTRY_ALG;
+ } else if (db->flags & CNAT_DB_FLAG_PCPI) {
+ *entry_flag_ptr |= CNAT_TRANSLATION_ENTRY_PCPI_DYNAMIC;
+ } else if (db->flags & CNAT_DB_FLAG_PCPE) {
+ *entry_flag_ptr |= CNAT_TRANSLATION_ENTRY_PCPE_DYNAMIC;
+ } else {
+ *entry_flag_ptr |= CNAT_TRANSLATION_ENTRY_DYNAMIC;
+ }
+
+ if (request_flag == CNAT_TRANSLATION_ENTRY_ALL) {
+ display_entry = 1;
+ } else {
+ /*
+ * Check if the request_flag is STATIC or ALG
+ * and the entry is STATIC or ALG as well
+ */
+ if ((request_flag & CNAT_TRANSLATION_ENTRY_STATIC) &&
+ (*entry_flag_ptr & CNAT_TRANSLATION_ENTRY_STATIC)) {
+ display_entry = 1;
+ }
+
+ if ((request_flag & CNAT_TRANSLATION_ENTRY_ALG) &&
+ (*entry_flag_ptr & CNAT_TRANSLATION_ENTRY_ALG)) {
+ display_entry = 1;
+ }
+
+ if ((request_flag & CNAT_TRANSLATION_ENTRY_PCPI_DYNAMIC) &&
+ (*entry_flag_ptr & CNAT_TRANSLATION_ENTRY_PCPI_DYNAMIC)) {
+ display_entry = 1;
+ }
+
+ if ((request_flag & CNAT_TRANSLATION_ENTRY_PCPE_DYNAMIC) &&
+ (*entry_flag_ptr & CNAT_TRANSLATION_ENTRY_PCPE_DYNAMIC)) {
+ display_entry = 1;
+ }
+
+ /*
+ * For dynamic entry case, check if flags field is 0
+ */
+ if ((request_flag & CNAT_TRANSLATION_ENTRY_DYNAMIC) &&
+ (*entry_flag_ptr & CNAT_TRANSLATION_ENTRY_DYNAMIC)) {
+ display_entry = 1;
+ }
+ }
+
+ if (PREDICT_FALSE(show_debug_level > 2)) {
+ PLATFORM_DEBUG_PRINT("Entry (0x%x, %d) -> (0x%x, %d) request_flag 0x%x, entry_flag 0x%x, display_entry %d\n", db->in2out_key.k.ipv4, db->in2out_key.k.port, db->out2in_key.k.ipv4, db->out2in_key.k.port, request_flag, *entry_flag_ptr, display_entry);
+ }
+
+ return (display_entry);
+}
+void cnat_v4_show_inside_entry_req_t_handler
+(spp_api_cnat_v4_show_inside_entry_req_t *mp, vlib_main_t * vm)
+{
+ cnat_user_db_entry_t *udb = NULL;
+ cnat_main_db_entry_t *db = NULL;
+ cnat_db_key_bucket_t u_ki, ki;
+ u64 a, b, c;
+ u32 index;
+ u16 start_port, end_port, port;
+ u16 request_flag = 0;
+ u16 entry_flag = 0;
+ u8 num_entries = 0;
+ u8 proto, all;
+ u8 done = 0;
+ cnat_v4_show_translation_entry *entry_list;
+ cnat_v4_show_translation_entry entry[PLATFORM_MAX_TRANSLATION_ENTRIES];
+ u8 display_entry;
+ u8 flag_str[11];
+ vnet_hw_interface_t * hw;
+ vnet_main_t * vnm = vnet_get_main();
+
+ ki.k.k.ipv4 = mp->ipv4_addr;
+ ki.k.k.vrf = mp->vrf_id;
+ start_port = mp->start_port;
+ end_port = mp->end_port;
+#if DEBUG
+ vlib_cli_output(vm, "## proto %d, inside-addr 0x%x, start_port %u, "
+ "end_port %u, vrf 0x%x, flag 0x%x\n",
+ mp->protocol,
+ mp->ipv4_addr,
+ mp->start_port,
+ mp->end_port,
+ mp->vrf_id,
+ mp->flags);
+#endif
+
+ proto = mp->protocol;
+ ki.k.k.vrf |= ((u16)proto << CNAT_PRO_SHIFT);
+
+ all = mp->all_entries; /* for no port range case */
+ request_flag = mp->flags; /* for all, alg, static entries case */
+ entry_list = entry;
+
+ /*
+ * check if the address is belonging to this core
+ */
+
+
+ /*
+ * first we check if the user exists in the udb, if he is not then
+ * it does not make sense to check the main db for translations
+ */
+ u_ki.k.k.vrf = ki.k.k.vrf & CNAT_VRF_MASK;
+ u_ki.k.k.ipv4 = ki.k.k.ipv4;
+ u_ki.k.k.port = 0;
+
+ if (PREDICT_FALSE(show_debug_level > 0)) {
+ vlib_cli_output(vm, "\nI_TRANS_CORE %d: IPv4 0x%x, VRF 0x%x, "
+ "start_port %d, end_port %d",
+ my_instance_number, ki.k.k.ipv4,
+ ki.k.k.vrf, start_port, end_port);
+ }
+
+ udb = cnat_user_db_lookup_entry(&u_ki);
+ if (!udb) {
+ if (PREDICT_FALSE(show_debug_level > 0)) {
+ vlib_cli_output(vm, "\nReturning %d entries",
+ num_entries);
+ }
+ return;
+ }
+
+ if (all) {
+ #if 0
+ if (PREDICT_FALSE(show_debug_level > 0)) {
+ PLATFORM_DEBUG_PRINT("\nI_TRANS: Printing ALL\n");
+ }
+
+ /*
+ * get the head of list of translation entries for that user
+ * from the user db
+ */
+ head = udb->translation_list_head_index;
+ db = cnat_main_db + head;
+
+ while (num_entries < PLATFORM_MAX_TRANSLATION_ENTRIES) {
+
+ if (((db->in2out_key.k.vrf & CNAT_PRO_MASK) >> CNAT_PRO_SHIFT)
+ != proto) {
+ goto next_entry;
+ }
+
+ display_entry =
+ spp_api_cnat_v4_show_verify_display_entry(request_flag, db,
+ &entry_flag);
+
+ if (display_entry) {
+ entry_list->ipv4_addr =
+ spp_host_to_net_byte_order_32(db->out2in_key.k.ipv4);
+ entry_list->cnat_port =
+ spp_host_to_net_byte_order_16(db->out2in_key.k.port);
+ entry_list->src_port =
+ spp_host_to_net_byte_order_16(db->in2out_key.k.port);
+
+ entry_list->protocol = proto;
+
+ /* incase of gre - in2out is not accounted */
+ if(proto != CNAT_PPTP) {
+
+ entry_list->in2out_packets =
+ spp_host_to_net_byte_order_32(db->in2out_pkts);
+ } else {
+ entry_list->in2out_packets = 0;
+ }
+ entry_list->out2in_packets =
+ spp_host_to_net_byte_order_32(db->out2in_pkts);
+
+ entry_list->flags =
+ spp_host_to_net_byte_order_16(entry_flag);
+
+ num_entries++;
+ entry_list = entry_list + 1;
+ }
+next_entry:
+ db = cnat_main_db + db->user_ports.next;
+ /*
+ * its a circular list, so if we have reached the head again
+ * all the entries for that user have been read
+ */
+ if (db == (cnat_main_db + head)) {
+ break;
+ }
+ }
+ resp->num_entries = num_entries;
+ #endif /* if 0 */
+ } else {
+ if (PREDICT_FALSE(show_debug_level > 0)) {
+ vlib_cli_output(vm, "\nI_TRANS: Printing range %d .. %d\n",
+ start_port, end_port);
+ }
+ /*
+ * port range is specified so for each port calculate the hash and
+ * check if the entry is present in main db
+ */
+ port = start_port;
+ done = 0;
+ while ((!done) && (num_entries < PLATFORM_MAX_TRANSLATION_ENTRIES)) {
+
+ ki.k.k.port = port;
+ if (port >= end_port) {
+ done = 1;
+ } else {
+ port++;
+ }
+ CNAT_V4_GET_HASH(ki.k.key64,
+ ki.bucket,
+ CNAT_MAIN_HASH_MASK);
+ index = cnat_in2out_hash[ki.bucket].next;
+ if (PREDICT_TRUE(index == EMPTY)) {
+ continue;
+ }
+
+ do {
+ db = cnat_main_db + index;
+ if (db->in2out_key.key64 == ki.k.key64) {
+ break;
+ }
+ index = db->in2out_hash.next;
+ } while (index != EMPTY);
+
+ if (index == EMPTY) {
+ continue;
+ } else {
+
+ display_entry =
+ cnat_v4_show_verify_display_entry(request_flag, db,
+ &entry_flag);
+ if (display_entry) {
+
+ entry_list->ipv4_addr =
+ clib_host_to_net_u32(db->out2in_key.k.ipv4);
+ entry_list->cnat_port =
+ clib_host_to_net_u16(db->out2in_key.k.port);
+ entry_list->src_port =
+ clib_host_to_net_u16(db->in2out_key.k.port);
+
+ entry_list->protocol = proto;
+ entry_list->nsessions = db->nsessions;
+ entry_list->flags = ((db->flags & CNAT_DB_FLAG_TCP_ACTIVE) ||
+ (db->flags & CNAT_DB_FLAG_UDP_ACTIVE)) ? 1:0;
+ /* incase of gre - in2out is not accounted */
+ if(proto != CNAT_PPTP) {
+ entry_list->in2out_packets =
+ clib_host_to_net_u32(db->in2out_pkts);
+ } else {
+ entry_list->in2out_packets = 0;
+ }
+
+ entry_list->out2in_packets =
+ clib_host_to_net_u32(db->out2in_pkts);
+
+ if (PREDICT_FALSE(show_debug_level > 3)) {
+ vlib_cli_output(vm, "\n1. Entry: Addr 0x%x, port %d, num_entries %d",
+ clib_net_to_host_u32(entry_list->ipv4_addr),
+ clib_net_to_host_u16(entry_list->cnat_port),
+ num_entries);
+ }
+
+ entry_list = entry_list + 1;
+ num_entries++;
+ }
+ } /* if (index == EMPTY) */
+ } /* while() */
+ }
+
+ if (PREDICT_FALSE(show_debug_level > 0)) {
+ if (num_entries) {
+ vlib_cli_output(vm, "\nReturning %d entries\n",
+ num_entries);
+ }
+ }
+
+ entry_list = entry;
+ u8 i = 0;
+ struct in_addr ip;
+ u8 proto_str[10];
+ u8 transl_str[10];
+ memset(proto_str, 0x00, 10);
+ memset(transl_str, 0x00, 10);
+
+ if (proto == 1) strncpy((char *)proto_str, "udp", 3);
+ else if (proto == 2) strncpy((char *)proto_str, "tcp", 3);
+ else if (proto == 3) strncpy((char *)proto_str, "icmp", 4);
+ else strncpy((char *)proto_str, "unknown", 7);
+
+ if (request_flag == 0x04) strncpy((char *)transl_str, "Dynamic", 7);
+ else strncpy((char *)transl_str, "Unknown", 7); /* currently we are not supporting static/alg entries */
+
+ ip.s_addr = clib_net_to_host_u32(u_ki.k.k.ipv4);
+ hw = vnet_get_hw_interface (vnm, u_ki.k.k.vrf);
+
+ vlib_cli_output (vm, "Inside-translation details\n");
+ vlib_cli_output (vm, "--------------------------\n");
+
+ vlib_cli_output (vm, "Inside interface : %s\n", hw->name);
+ vlib_cli_output (vm, "Inside address : %s\n", inet_ntoa(ip));
+ vlib_cli_output (vm, "Start port : %u\n", start_port);
+ vlib_cli_output (vm, "End port : %u\n", end_port);
+
+ vlib_cli_output (vm, "--------------------------------------------------------------------------------------"
+ "-----------------------\n");
+ vlib_cli_output (vm, "Outside Protocol Inside Outside Translation"
+ " I2O O2I Flag Num\n");
+ vlib_cli_output (vm, "Address Src Port Src Port Type "
+ " Pkts Pkts Sessions\n");
+ vlib_cli_output (vm, "--------------------------------------------------------------------------------------"
+ "-----------------------\n");
+
+ while ((num_entries) && (entry_list) && (i < 50)) {
+
+ ip.s_addr = entry_list->ipv4_addr;
+ memset(flag_str,0x00,11);
+ if((proto == 1) || (proto == 2)) {
+ if(entry_list->flags == 1) {
+ strncpy((char *)flag_str,"Active",6);
+ }
+ else {
+ strncpy((char *) flag_str,"Non Active",10);
+ }
+ } else {
+ strncpy((char *) flag_str, "NA", 2);
+ }
+ vlib_cli_output(vm, "%s %10s %11u %12u %13s %10u %10u %14s %6u\n",
+ inet_ntoa(ip), proto_str,
+ clib_net_to_host_u16(entry_list->src_port),
+ clib_net_to_host_u16(entry_list->cnat_port),
+ transl_str,
+ clib_net_to_host_u32(entry_list->in2out_packets),
+ clib_net_to_host_u32(entry_list->out2in_packets),
+ flag_str,
+ entry_list->nsessions);
+ entry_list++;
+ num_entries--; i++;
+ }
+
+ return;
+}
+
+void cnat_v4_show_outside_entry_req_t_handler
+(spp_api_cnat_v4_show_outside_entry_req_t *mp, vlib_main_t *vm)
+{
+ cnat_main_db_entry_t *db = NULL;
+ cnat_db_key_bucket_t ko;
+ u64 a, b, c;
+ u32 index;
+ u16 start_port, end_port, port;
+ u16 request_flag = 0;
+ u16 entry_flag = 0;
+ u8 num_entries = 0;
+ u8 proto;
+ cnat_v4_show_translation_entry *entry_list;
+ cnat_v4_show_translation_entry entry[PLATFORM_MAX_TRANSLATION_ENTRIES];
+ u8 done = 0;
+ u8 display_entry;
+ u8 flag_str[11];
+ vnet_hw_interface_t * hw;
+ vnet_main_t * vnm = vnet_get_main();
+
+ ko.k.k.ipv4 = mp->ipv4_addr;
+ ko.k.k.vrf = mp->vrf_id;
+ start_port = mp->start_port;
+ end_port = mp->end_port;
+
+ proto = mp->protocol;
+ request_flag = mp->flags;
+
+ ko.k.k.vrf |= ((u16)proto << CNAT_PRO_SHIFT);
+
+ entry_list = entry;
+
+ if (PREDICT_FALSE(show_debug_level > 0)) {
+ vlib_cli_output(vm, "\nO_TRANS_CORE %d: IPv4 0x%x, VRF 0x%x, "
+ "start_port %d, end_port %d", my_instance_number,
+ ko.k.k.ipv4, ko.k.k.vrf, start_port, end_port);
+ }
+
+ /*
+ * for each ip and port combination we need to scan the main db
+ * and check if the entry is present in main db
+ */
+ port = start_port;
+ done = 0;
+ while ((!done) && (num_entries < PLATFORM_MAX_TRANSLATION_ENTRIES)) {
+ ko.k.k.port = port;
+
+ /*
+ * If we have reached the end_port, we are DONE
+ */
+ if (port >= end_port) {
+ done = 1;
+ } else {
+ port++;
+ }
+
+ CNAT_V4_GET_HASH(ko.k.key64,
+ ko.bucket,
+ CNAT_MAIN_HASH_MASK);
+
+ index = cnat_out2in_hash[ko.bucket].next;
+ if (PREDICT_TRUE(index == EMPTY)) {
+ continue;
+ }
+
+ do {
+ db = cnat_main_db + index;
+ if (db->out2in_key.key64 == ko.k.key64) {
+ break;
+ }
+ index = db->out2in_hash.next;
+ } while (index != EMPTY);
+
+ if (index == EMPTY) {
+ continue;
+ } else {
+ display_entry =
+ cnat_v4_show_verify_display_entry(request_flag, db,
+ &entry_flag);
+
+ if (display_entry) {
+ entry_list->ipv4_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+ entry_list->cnat_port =
+ clib_host_to_net_u16(db->out2in_key.k.port);
+ entry_list->src_port =
+ clib_host_to_net_u16(db->in2out_key.k.port);
+ entry_list->protocol = proto;
+ entry_list->nsessions = db->nsessions;
+ entry_list->flags = ((db->flags & CNAT_DB_FLAG_TCP_ACTIVE) ||
+ (db->flags & CNAT_DB_FLAG_UDP_ACTIVE)) ? 1:0;
+ /* incase of gre - in2out is not accounted */
+ if(proto != CNAT_PPTP) {
+ entry_list->in2out_packets =
+ clib_host_to_net_u32(db->in2out_pkts);
+ } else {
+ entry_list->in2out_packets = 0 ;
+ }
+ entry_list->out2in_packets =
+ clib_host_to_net_u32(db->out2in_pkts);
+ #if 0
+ entry_list->flags =
+ clib_host_to_net_u16(entry_flag);
+ #endif
+ entry_list = entry_list + 1;
+ num_entries++;
+ }
+ }
+ }
+
+ if (num_entries == 0) {
+ /* No point proceeding further */
+ return;
+ }
+
+ if (PREDICT_FALSE(show_debug_level > 0)) {
+ if (num_entries) {
+ vlib_cli_output(vm, "\nO_TRANS: Core %d returning %d entries",
+ num_entries);
+ }
+ }
+
+ entry_list = entry;
+ u8 i = 0;
+ struct in_addr ip;
+ u8 proto_str[10];
+ u8 transl_str[10];
+ memset(proto_str, 0x00, 10);
+ memset(transl_str, 0x00, 10);
+
+ if (proto == 1) strncpy((char *) proto_str, "udp", 3);
+ else if (proto == 2) strncpy((char *) proto_str, "tcp", 3);
+ else if (proto == 3) strncpy((char *) proto_str, "icmp", 4);
+ else strncpy((char *) proto_str, "unknown", 7);
+
+ if (request_flag == 0x04) strncpy((char *) transl_str, "Dynamic", 7);
+ else strncpy((char *)transl_str, "Unknown", 7); /* currently we are not supporting static/alg entries */
+
+ ip.s_addr = clib_net_to_host_u32(ko.k.k.ipv4);
+ hw = vnet_get_hw_interface (vnm, (ko.k.k.vrf & CNAT_VRF_MASK));
+
+ vlib_cli_output (vm, "Outside-translation details\n");
+ vlib_cli_output (vm, "--------------------------\n");
+
+ vlib_cli_output (vm, "Outside interface : %s\n", hw->name);
+ vlib_cli_output (vm, "Outside address : %s\n", inet_ntoa(ip));
+ vlib_cli_output (vm, "Start port : %u\n", start_port);
+ vlib_cli_output (vm, "End port : %u\n", end_port);
+
+ vlib_cli_output (vm, "--------------------------------------------------------------------------------------"
+ "-----------------------\n");
+ vlib_cli_output (vm, "Inside Protocol Outside Inside Translation"
+ " I2O O2I Flag Num\n");
+ vlib_cli_output (vm, "Address Dst Port Dst Port Type "
+ " Pkts Pkts Sessions\n");
+ vlib_cli_output (vm, "--------------------------------------------------------------------------------------"
+ "-----------------------\n");
+
+ while ((num_entries) && (entry_list) && (i < 50)) {
+ ip.s_addr = entry_list->ipv4_addr;
+ memset(flag_str,0x00,11);
+ if((proto == 1) || (proto == 2)) {
+ if(entry_list->flags == 1) {
+ strncpy((char *) flag_str,"Active",6);
+ }
+ else {
+ strncpy((char *) flag_str,"Non Active",10);
+ }
+ } else {
+ strncpy((char *) flag_str, "NA", 2);
+ }
+ vlib_cli_output(vm, "%s %10s %11u %12u %13s %10u %10u %14s %6u\n",
+ inet_ntoa(ip), proto_str,
+ clib_net_to_host_u16(entry_list->cnat_port),
+ clib_net_to_host_u16(entry_list->src_port),
+ transl_str,
+ clib_net_to_host_u32(entry_list->in2out_packets),
+ clib_net_to_host_u32(entry_list->out2in_packets),
+ flag_str,
+ entry_list->nsessions);
+ entry_list++;
+ num_entries--; i++;
+
+ }
+ return;
+}
diff --git a/plugins/vcgn-plugin/vcgn/cnat_common_api.h b/plugins/vcgn-plugin/vcgn/cnat_common_api.h
new file mode 100644
index 00000000000..a4eb74432f2
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_common_api.h
@@ -0,0 +1,22 @@
+/*---------------------------------------------------------------------------
+ * Copyright (c) 2009-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+#ifndef __CNAT_COMMON_API_H__
+#define __CNAT_COMMON_API_H__
+
+/* All common API prototypes */
+void cnat_scanner_db_process_turn_on(vlib_main_t *vm);
+
+#endif
diff --git a/plugins/vcgn-plugin/vcgn/cnat_config.c b/plugins/vcgn-plugin/vcgn/cnat_config.c
new file mode 100644
index 00000000000..87183dfa961
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_config.c
@@ -0,0 +1,77 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_config.c - configuration definitions
+ *
+ * Copyright (c) 2007-2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+#include "cnat_config.h"
+#include "cnat_cli.h"
+#include "cnat_v4_pptp_alg.h"
+#include "platform_common.h"
+
+/* session timeout */
+
+u16 tcp_initial_setup_timeout = V4_DEF_TCP_IS_TO; /* sec */
+u16 tcp_active_timeout = V4_DEF_TCP_AS_TO; /* sec */
+u16 udp_init_session_timeout = V4_DEF_UDP_IS_TO; /* 30 sec */
+u16 udp_act_session_timeout = V4_DEF_UDP_AS_TO; /* 2 min */
+u16 icmp_session_timeout = V4_DEF_ICMP_S_TO; /* 60 sec */
+
+cnat_pptp_config_t pptp_cfg =
+ {
+ .enable = PPTP_DISABLED,
+ .timeout = PPTP_GRE_TIMEOUT
+ } ;
+
+/* This flag is used as indication of timeout related config
+ * changes and hence db needs to be updated
+ */
+u8 timeout_dirty_flag = 0;
+
+/* mapping refresh direction,
+ * 1 inbound and outbound refresh
+ */
+u8 mapping_refresh_both_direction = V4_DEF_ENABLE;
+
+u16 cnat_main_db_max_ports_per_user = V4_DEF_MAX_PORTS;
+
+u32 cnat_main_db_icmp_rate_limit = DEF_RATE_LIMIT;
+u32 cnat_main_db_icmp_rate_limit_core = DEF_RATE_LIMIT_CORE;
+u32 crc_zero_udp_rate_limit_core = RATE_LIMIT_UDP_CORE;
+u16 cnat_static_port_range = CNAT_DEF_STATIC_PORT_RANGE;
+
+
+/*
+ * ftp alg enable
+ */
+u8 ftp_alg_enabled = V4_DEF_DISABLE;
+u16 rtsp_alg_port_num = 0;
+
+/*
+ * load balancing debug mode
+ */
+u8 lb_debug_enable = V4_DEF_DISABLE;
+
+
+/* good or evil mode
+ * 0 endpoint-independnet filter, good mode
+ * 1 address depedent filter, evil mode
+ */
+u8 address_dependent_filtering = V4_DEF_DISABLE;
+
+u16 per_user_icmp_msg_limit = ICMP_MSG_RATE_LIMIT;
+
+u16 config_delete_timeout = V4_CONFIG_DELETE_TO;
+
diff --git a/plugins/vcgn-plugin/vcgn/cnat_config.h b/plugins/vcgn-plugin/vcgn/cnat_config.h
new file mode 100644
index 00000000000..f104273716f
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_config.h
@@ -0,0 +1,582 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_config.h - configuration database definitions
+ *
+ * Copyright (c) 2007-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_CONFIG_H__
+#define __CNAT_CONFIG_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "cnat_bulk_port_defs.h"
+
+/* default policy value */
+#define V4_DEF_ICMP_S_TO 60 /*icmp session timeout */
+#define V4_DEF_UDP_IS_TO 30 /*udp init session timeout */
+#define V4_DEF_UDP_AS_TO 120 /*udp active session timeout */
+#define V4_DEF_TCP_IS_TO 120 /*tcp init session timeout */
+#define V4_DEF_TCP_AS_TO 1800 /*tcp active session timeout, 30 min */
+#define V4_DEF_TCP_MSS 1460 /*tcp mss */
+#define V4_DEF_MAX_PORTS 100 /*max port limit per user */
+#define DEF_RATE_LIMIT PLATFORM_MAX_CORES /* No of packets/sec icmp generated */
+#define DEF_RATE_LIMIT_CORE 1 /* No of packets/sec icmp generated (per core) */
+#define RATE_LIMIT_UDP_CORE 1000 /* Max allowed udp crc zero packets/sec/core */
+
+#define NAT44_RESERVED_INST_ID 1
+#define DSLITE_START_ID (NAT44_RESERVED_INST_ID + 1)
+#define V4_DEF_VRF_MAX_PORTS 0 /*max port limit per vrf user;
+ 0 means use the global port limit for user*/
+/*Hardcoded . TBD - can be made configurable */
+
+#define V4_DEF_ENABLE 1 /* feature enable */
+#define V4_DEF_DISABLE 0 /* feature disable */
+
+#define CNAT_DEF_STATIC_PORT_RANGE 1024 /* Default range for static ports */
+/*
+ * If TCP MSS is not configured, store the maximum possible value
+ */
+#define V4_TCP_MSS_NOT_CONFIGURED_VALUE 0xffff
+
+/* default timeout for fragments in seconds set to 2
+ * in case its not configured
+ */
+#define CNAT_IPV4_FRAG_TIMEOUT_DEF 2
+/* other */
+/* max db entries to be scaned */
+#define MAX_DB_ENTRY_PER_SCAN PLATFORM_MAX_DB_ENTRY_PER_SCAN
+/* max db entries selected per scan */
+#define MAX_DB_ENTRY_SELECTED_PER_SCAN PLATFORM_MAX_DB_ENTRY_SELECTED_PER_SCAN
+
+#define ICMP_MSG_RATE_LIMIT 3 /* rate limit for icmp message */
+#define V4_CONFIG_DELETE_TO 600 /* timeout for entry to be deleted */
+
+/* session timeout */
+
+extern u16 tcp_initial_setup_timeout;
+extern u16 tcp_active_timeout;
+extern u16 udp_init_session_timeout;
+extern u16 udp_act_session_timeout;
+extern u16 icmp_session_timeout;
+
+extern u8 timeout_dirty_flag;
+
+/* mapping refresh direction,
+ * 0 outbound only refresh,
+ * 1 inbound and outbound refresh
+ */
+extern u8 mapping_refresh_both_direction;
+
+
+extern u16 cnat_main_db_max_ports_per_user;
+extern u32 cnat_main_db_icmp_rate_limit;
+extern u32 cnat_main_db_icmp_rate_limit_core;
+extern u32 crc_zero_udp_rate_limit_core;
+
+extern u16 cnat_static_port_range;
+
+typedef enum {
+ LOG_FORMAT_UNDEFINED =0,
+ LOG_FORMAT_COMPACT,
+ LOG_FORMAT_NF9,
+ LOG_FORMAT_MAX, /* keep this as last */
+} log_format_t;
+
+typedef enum {
+ CNAT_CONFIG_DEL_OP = 0,
+ CNAT_CONFIG_ADD_OP,
+} cnat_op_flag_t;
+
+extern u8 ftp_alg_enabled;
+extern u16 rtsp_alg_port_num;
+
+/*
+ * load balancing debug mode
+ */
+extern u8 lb_debug_enable;
+
+/* good or evil mode
+ * 0 endpoint-independnet filter, good mode
+ * 1 address depedent filter, evil mode
+ */
+extern u8 address_dependent_filtering;
+
+extern u16 per_user_icmp_msg_limit;
+
+/* vrfmap or portmap holding time
+ * after delete
+ */
+extern u16 config_delete_timeout;
+
+/*
+ * Bit map for various configuration in the POLICY KNOB case
+ */
+#define BIDIR_REFRESH_ENABLE 0x01
+#define BIDIR_REFRESH_DISABLE 0x02
+#define FTP_ALG_ENABLE 0x04
+#define FTP_ALG_DISABLE 0x08
+#define DEFAULT_NFV9_LOGGING_SERVER_ENABLE 0x10
+#define DEFAULT_NFV9_LOGGING_SERVER_DISABLE 0x20
+
+
+/*
+ * This structure contains a single VRF map configuration
+ * from a bulk message. This structure is in conformanace
+ * with the following structures defined in cnat_config_api.h
+ * - spp_api_cnat_v4_bulk_vrf_map_t
+ *
+ * Any change in the above structures should be propagated here
+ */
+typedef struct _spp_api_cnat_v4_single_vrf_map_req {
+ u32 i_vrf_id;
+ u32 o_vrf_id;
+
+ u16 i_vrf;
+ u16 o_vrf;
+
+ u32 start_addr;
+ u32 end_addr;
+
+ u16 vrf_policy_enable;
+#define TCP_MSS_ENABLE 0x0001
+#define TCP_MSS_DISABLE 0x0002
+#define NFV9_LOGGING_ENABLE 0x0004
+#define NFV9_LOGGING_DISABLE 0x0008
+#define VRF_MAP_DELETE 0x0010
+#define VRF_MAP_ADD 0x0020
+#define BULK_ALLOC_CHANGE 0x0040
+
+ u16 tcp_mss_value;
+ u32 vrf_nfv9_logging_ipv4_address;
+ u16 vrf_nfv9_logging_udp_port;
+ u16 vrf_nfv9_refresh_rate;
+ u16 vrf_nfv9_timeout_rate;
+ u16 vrf_nfv9_path_mtu;
+#ifndef NO_BULK_LOGGING
+ bulk_alloc_size_t bulk_size;
+#endif /* NO_BULK_LOGGING */
+} spp_api_cnat_v4_single_vrf_map_req;
+
+typedef struct _spp_api_cnat_v4_single_vrf_map_rc {
+ u8 vrf_map_rc;
+ u8 tcp_mss_rc;
+ u8 nfv9_logging_rc;
+ u8 pad;
+} spp_api_cnat_v4_single_vrf_map_rc;
+
+/*
+ * Bulk Response for the VRF map request
+ */
+typedef struct _spp_api_cnat_v4_bulk_vrf_map_resp {
+ u16 _spp_msg_id;
+ u8 bulk_rc;
+ u8 pad;
+
+ u32 num_vrfmap_entries;
+
+ spp_api_cnat_v4_single_vrf_map_rc vrf_map_rc;
+
+} spp_api_cnat_v4_bulk_vrf_map_resp;
+
+/*
+ * Bulk Response for the Policy Knob request
+ */
+typedef struct _spp_api_cnat_v4_bulk_policy_knob_resp {
+ u16 _spp_msg_id;
+ u8 bulk_rc; /* Global rc code */
+ u8 pad;
+
+ u8 port_limit_rc;
+ u8 icmp_timeout_rc;
+ u8 udp_init_timeout_rc;
+ u8 udp_act_timeout_rc;
+
+ u8 tcp_init_timeout_rc;
+ u8 tcp_act_timeout_rc;
+ u8 nfv9_logging_rc;
+ u8 pad2;
+} spp_api_cnat_v4_bulk_policy_knob_resp;
+
+
+/* PPTP ALG defs and structures */
+
+/* dont change the order..
+ maintened at offset mapped to msg ids */
+
+typedef struct pptp_ctrl_msg_ctrs_t {
+ u64 dummy;
+ u64 sccr;
+ u64 sccrp;
+ u64 stccrq;
+ u64 stccrp;
+ u64 erq;
+ u64 erp;
+ u64 ocrq;
+ u64 ocrp;
+ u64 icrq;
+ u64 icrp;
+ u64 iccn;
+ u64 cclr;
+ u64 cdn;
+ u64 wen;
+ u64 sli;
+}pptp_ctrl_msg_ctrs_t;
+
+#define PPTP_INCR(ctr) pptp_cfg.counters.pptp_##ctr++
+#define PPTP_DECR(ctr) pptp_cfg.counters.pptp_##ctr--
+
+typedef struct pptp_counters_t {
+
+ u64 pptp_ctrl_msg_drops;
+ u64 pptp_active_tunnels;
+ u64 pptp_active_channels;
+ u64 pptp_in2out_gre_drops;
+ u64 pptp_out2in_gre_drops;
+ u64 pptp_in2out_gre_fwds;
+ u64 pptp_out2in_gre_fwds;
+ pptp_ctrl_msg_ctrs_t ctrl_ctrs;
+
+} pptp_counters_t;
+
+#define CNAT_PPTP_ENABLE 1
+#define CNAT_PPTP_DEF_TIMEOUT 60 /* secs */
+
+typedef struct cnat_pptp_config_t {
+ u8 enable;
+ u16 timeout;
+ pptp_counters_t counters;
+
+} cnat_pptp_config_t;
+
+
+#define CNAT_PPTP_ENABLE_FLAG 0x01
+#define CNAT_PPTP_TIMEOUT_FLAG 0x02
+
+/* pptp config msg resp */
+typedef struct _spp_api_cnat_v4_config_pptp_alg_resp {
+ u16 _spp_msg_id;
+ u8 bulk_rc;
+ u8 pad;
+
+} spp_api_cnat_v4_config_pptp_alg_resp_t;
+
+typedef struct {
+ u16 msg_id;
+ u8 rc;
+ u8 pad[5];
+
+ /* better to have a group structures rather than individual
+ variables, any change in counters is will automatically
+ reflect here */
+ pptp_counters_t counters;
+} pptp_show_counters_resp_t ;
+
+
+extern cnat_pptp_config_t pptp_cfg;
+
+
+/* ========= 6RD declarations =============================== */
+
+#define V6RD_ENTRY_DELETE 0x00
+#define IPV4_TUNNEL_SRC_CONFIG 0x04
+#define TUNNEL_MTU_CONFIG 0x08
+#define IPV4_PREFIXMASK_LEN_CONFIG 0x10
+#define IPV4_SUFFIXMASK_LEN_CONFIG 0x20
+#define TTL_CONFIG 0x40
+#define TOS_CONFIG 0x80
+#define V6RD_IPV6_PREFIX_CONFIG 0x100
+#define V6RD_RESET_DF_BIT_CONFIG 0x200
+#define V6RD_UNICAST_ADDR_CONFIG 0x400
+#define V6RD_REASSEMB_CONFIG 0x800
+
+#define TTL_ENABLE 0x1
+#define TOS_ENABLE 0x2
+#define RESET_DF_BIT 0x4
+#define REASSEMBLY_ENABLE 0x8
+
+/* ========= 6RD declarations =============================== */
+
+/*
+ * Single Request for XLAT config
+ */
+typedef struct _spp_api_cnat_v4_single_xlat_config_req {
+
+ /*
+ * Indicates the xlat instance id - How big will this value be
+ * Can we restrict it between 0..255, that way the APP code
+ * can use an array to store the xlat instances.
+ */
+ u32 xlat_id;
+
+#define XLAT_ENTRY_DELETE 0x0000
+#define IPV6_SVI_IF_NUM_CONFIG 0x0001
+#define IPV4_SVI_IF_NUM_CONFIG 0x0002
+#define IPV4_TO_IPV6_TCP_MSS_CONFIG 0x0004
+#define IPV6_TO_IPV4_TCP_MSS_CONFIG 0x0008
+#define IPV6_PREFIX_CONFIG 0x0010
+#define IPV6_UBIT_ON_CONFIG 0x0020
+#define IPV6_NON_TRANSLATABLE_PREFIX_MAP_CONFIG 0x0040
+#define IPV4_TOS_SETTING_CONFIG 0x0080
+#define IPV6_TOS_SETTING_CONFIG 0x0100
+#define IPV4_DFBIT_CLEAR_CONFIG 0x0200
+#define ICMPV6_PTB_MTU_SET_CONFIG 0x0400
+#define IPV6_NON_TRANSLATABLE_PREFIX_MAP_ALG_CONFIG 0x0800
+#define CPE_V4_PREFIX_CONFIG 0x1000 /* for map-t */
+#define CPE_V6_PREFIX_CONFIG 0x2000 /* for map-t */
+#define EXTERNAL_V6_PREFIX_CONFIG 0x4000 /* for map-t */
+#define PORT_SHARING_RATIO_CONFIG 0x8000 /* for map-t */
+#define CONSECUTIVE_PORTS_CONFIG 0x10000 /* for map-t */
+
+ u32 xlat_config_fields_enable;
+
+ /*
+ * If numbers of the IPv6 and IPv4 SVI interfaces
+ */
+ u32 ipv6_svi_if_num;
+ u32 ipv4_svi_if_num;
+
+ /*
+ * TCP MSS values for the 2 XLAT directions
+ */
+ u16 v4_to_v6_tcp_mss;
+ u16 v6_to_v4_tcp_mss;
+
+ /*
+ * XLAT IPv6 prefix
+ */
+ u32 v6_prefix[4];
+
+ /*
+ * XLAT IPv6 prefix mask
+ */
+ u8 v6_prefix_mask_len;
+
+ /*
+ * Set to non-zero if UBITs are reserved
+ */
+#define UBITS_ON 0x01
+#define IPV4_DF_BIT_CLEAR 0x02
+#define ICMPV6_MTU_SET 0x04
+#define IPV4_TOS_SET_ENABLED 0x08
+#define IPV6_TC_SET_ENABLED 0x10
+
+ u8 feature_enable_bits;
+
+ u8 v4_prefix_mask_len;
+
+#define IPV6_NON_TRANSLATABLE_PREFIX_MAP_ALG_HASH 0x1
+#define IPV6_NON_TRANSLATABLE_PREFIX_MAP_ALG_TTL 0x2
+#define IPV6_NON_TRANSLATABLE_PREFIX_MAP_ALG_RANDOM 0x3
+ u8 non_translatable_v6_prefix_v4_map_prefix_alg;
+
+ u8 ipv6_tos_value;
+
+ u8 ipv4_tos_value;
+
+ u8 pad2;
+
+ u8 pad3;
+
+ u32 v4_prefix;
+
+ /*
+ * MAP-T/MAP-E specific parameters
+ */
+ u8 xlat_type;
+
+ u32 cpe_domain_v6_prefix[4];
+ u8 cpe_domain_v6_prefix_len;
+
+ u32 cpe_domain_v4_prefix;
+ u8 cpe_domain_v4_prefix_len;
+
+ u32 external_domain_v6_prefix[4];
+ u8 external_domain_v6_prefix_len;
+
+ u8 port_sharing_ratio_bits;
+ u8 consecutive_ports_bits;
+
+} spp_api_cnat_v4_single_xlat_config_req;
+
+/*
+ * Single Response for the xlat config request
+ */
+typedef struct _spp_api_cnat_v4_single_xlat_config_resp {
+ u8 v4_if_num_rc;
+ u8 v6_if_num_rc;
+ u8 v4_to_v6_tcp_mss_rc;
+ u8 v6_to_v4_tcp_mss_rc;
+
+ u8 v6_prefix_rc;
+ u8 ubit_on_rc;
+ u8 v4_prefix_rc;
+ u8 xlat_id_rc;
+
+ u8 non_translatable_v6_prefix_v4_map_alg_rc;
+ u8 ipv4_dfbit_clear_rc;
+ u8 icmpv6_ptb_mtu_set_rc;
+ u8 ipv4_tos_set_rc;
+
+ u8 ipv6_tos_set_rc;
+ u8 pad1;
+ u8 pad2;
+ u8 pad3;
+} spp_api_cnat_v4_single_xlat_config_resp;
+
+/*
+ * Bulk Response for the xlat config request
+ */
+typedef struct _spp_api_cnat_v4_bulk_xlat_config_resp {
+ u16 _spp_msg_id;
+ u16 pad;
+
+ u32 bulk_rc;
+
+ u32 num_xlat_entries;
+
+ spp_api_cnat_v4_single_xlat_config_resp xlat_config_resp;
+
+} spp_api_cnat_v4_bulk_xlat_config_resp;
+
+typedef struct _spp_api_v6rd_v4_single_v6rd_config_resp {
+ u8 v6rd_id_rc;
+ u8 v4_if_num_rc;
+ u8 v6_if_num_rc;
+ u8 tunnel_source_rc;
+ u8 tunnel_mtu_rc;
+ u8 ipv4masklen_prefix_rc;
+ u8 ipv4masklen_suffix_rc;
+ u8 ttl_rc;
+ u8 tos_rc;
+ u8 anycast_rc;
+ u8 v6_prefix_rc;
+ u8 v6_br_unicast_rc;
+ u8 reassembly_rc;
+ u8 pad1;
+ u8 pad2;
+ u8 pad3;
+} spp_api_v6rd_v4_single_v6rd_config_resp_t;
+
+typedef struct _spp_api_v6rd_v4_bulk_v6rd_config_resp {
+ u16 _spp_msg_id;
+ u16 pad;
+ u32 bulk_rc;
+ u32 num_v6rd_entries;
+ spp_api_v6rd_v4_single_v6rd_config_resp_t v6rd_config_resp[0];
+} spp_api_v6rd_v4_bulk_v6rd_config_resp_t;
+
+/*
+ * Single Request for MAPE config
+ */
+typedef struct _spp_api_mape_single_config_req {
+
+ /*
+ * Indicates the mape instance id - How big will this value be
+ * Can we restrict it between 0..255, that way the APP code
+ * can use an array to store the xlat instances.
+ */
+ u32 mape_id;
+
+#define MAPE_ENTRY_DELETE 0x0000
+#define MAPE_IPV4_SVI_IF_NUM_CONFIG 0x0001
+#define MAPE_IPV6_SVI_IF_NUM_CONFIG 0x0002
+#define MAPE_IPV4_TO_IPV6_TCP_MSS_CONFIG 0x0004
+#define MAPE_IPV6_TO_IPV4_TCP_MSS_CONFIG 0x0008
+#define MAPE_CPE_V4_PREFIX_CONFIG 0x0010
+#define MAPE_CPE_V6_PREFIX_CONFIG 0x0020
+#define MAPE_PORT_SHARING_RATIO_CONFIG 0x0040
+#define MAPE_CONSECUTIVE_PORTS_CONFIG 0x0080
+#define MAPE_PATH_MTU 0x0100
+#define MAPE_TUNNEL_ENDPOINT_V6_CONFIG 0x0200
+
+ u32 mape_config_fields_enable;
+
+ /*
+ * If numbers of the IPv6 and IPv4 SVI interfaces
+ */
+ u32 ipv6_svi_if_num;
+ u32 ipv4_svi_if_num;
+
+ /*
+ * TCP MSS values for the 2 XLAT directions
+ */
+ u16 v4_to_v6_tcp_mss;
+ u16 v6_to_v4_tcp_mss;
+
+ /*
+ * Path v6 MTU.
+ */
+ u32 path_mtu;
+
+ /*
+ * CPE IPv6 prefix and mask len.
+ */
+ u32 cpe_domain_v6_prefix[4];
+ u8 cpe_domain_v6_prefix_len;
+
+ /*
+ * CPE IPv4 prefix and mask len.
+ */
+ u32 cpe_domain_v4_prefix;
+ u8 cpe_domain_v4_prefix_len;
+
+ /*
+ * BR IPv6 tunnel end point V6 prefix and mask len.
+ */
+ u32 aftr_tunnel_endpoint_address_v6[4];
+ u8 aftr_tunnel_endpoint_address_v6_len;
+
+ /*
+ * BR IPv6 tunnel end point V6 prefix and mask len.
+ */
+ u8 port_sharing_ratio_bits;
+ u8 consecutive_ports_bits;
+
+} spp_api_mape_single_config_req;
+
+
+/*
+ * Single Response for the mape config response
+ */
+typedef struct _spp_api_mape_single_config_resp {
+ u8 v4_if_num_rc;
+ u8 v6_if_num_rc;
+ u8 v4_to_v6_tcp_mss_rc;
+ u8 v6_to_v4_tcp_mss_rc;
+ u8 mape_id_rc;
+ u8 path_mtu_rc;
+ u8 cpe_v6_prefix_rc;
+ u8 cpe_v4_prefix_rc;
+ u8 tunnel_endpoint_prefix_rc;
+ u8 port_sharing_ratio_rc;
+ u8 port_contiguous_rc;
+ u8 pad1;
+} spp_api_mape_single_config_resp;
+
+/*
+ * Bulk Response for the mape config request
+ */
+typedef struct _spp_api_mape_bulk_config_resp {
+ u16 _spp_msg_id;
+ u16 pad;
+ u32 bulk_rc;
+ u32 num_mape_entries;
+ spp_api_mape_single_config_resp mape_config_resp;
+} spp_api_mape_bulk_config_resp;
+
+
+#endif /* __CNAT_CONFIG_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/cnat_config_api.h b/plugins/vcgn-plugin/vcgn/cnat_config_api.h
new file mode 100644
index 00000000000..0789d6a92af
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_config_api.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __CNAT_CONFIG_API_H__
+#define __CNAT_CONFIG_API_H__
+
+typedef struct _spp_api_cnat_v4_add_vrf_map {
+ u16 _spp_msg_id;
+ u8 rc;
+ u8 pad;
+ u32 i_vrf_id;
+ u32 o_vrf_id;
+ u16 i_vrf;
+ u16 o_vrf;
+ u32 start_addr[8];
+ u32 end_addr[8];
+} spp_api_cnat_v4_add_vrf_map_t;
+
+typedef struct _spp_api_cnat_v4_config_nfv9_logging {
+ u16 _spp_msg_id;
+ u8 rc;
+ u8 enable;
+ u32 ipv4_address;
+ u32 i_vrf_id;
+ u16 i_vrf;
+ u16 port;
+ u16 refresh_rate;
+ u16 timeout_rate;
+ u16 path_mtu;
+ u8 nfv9_global_collector;
+ u8 session_logging;
+} spp_api_cnat_v4_config_nfv9_logging_t;
+
+
+#endif
diff --git a/plugins/vcgn-plugin/vcgn/cnat_db.h b/plugins/vcgn-plugin/vcgn/cnat_db.h
new file mode 100644
index 00000000000..3596e2384e6
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_db.h
@@ -0,0 +1,701 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_db.h - translation database definitions
+ *
+ * Copyright (c) 2007-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_DB_H__
+#define __CNAT_DB_H__
+
+#include "cnat_cli.h"
+#include "cnat_ports.h"
+#include "index_list.h"
+
+#define VRF_NAME_LEN_STORED 12
+#define MAX_VRFID 400
+typedef struct _cnat_svi_params_entry {
+ u16 svi_type;
+ u16 pad;
+
+ u32 vrf_id;
+ u16 if_num;
+
+ u32 ipv6_addr[4];
+ u32 ipv4_addr;
+
+ u8 direction;
+ u32 tbl_id; /* vrf */
+ u32 vrf_override_id; /* tbl_id for override vrf */
+ u8 vrf_override_flag;
+ u8 partition_id;
+} cnat_svi_params_entry;
+
+typedef struct _cnat_ingress_vrfid_name_entry {
+ u32 vrf_id;
+ u16 ref_count; /*no# of serviceApps under a single vrf*/
+ u8 vrf_name[VRF_NAME_LEN_STORED];
+ u16 pad1;
+} cnat_ingress_vrfid_name_entry;
+#define HASH_ENHANCE 4
+
+#define CNAT_DB_SIZE (PLATFORM_MAX_NAT_ENTRIES / PLATFORM_CNAT_INSTS)
+#define CNAT_MAIN_HASH_SIZE (HASH_ENHANCE * PLATFORM_CNAT_MAIN_PRELIM_HASH_SIZE)
+#define CNAT_MAIN_HASH_MASK (CNAT_MAIN_HASH_SIZE-1)
+
+#define CNAT_USER_DB_SIZE (PLATFORM_MAX_USER_ENTRIES / PLATFORM_CNAT_INSTS)
+#define CNAT_USER_HASH_SIZE (HASH_ENHANCE * PLATFORM_CNAT_USER_PRELIM_HASH_SIZE)
+#define CNAT_USER_HASH_MASK (CNAT_USER_HASH_SIZE-1)
+
+#define CNAT_SESSION_DB_SIZE (PLATFORM_MAX_NAT_ENTRIES / PLATFORM_CNAT_INSTS)
+#define CNAT_SESSION_HASH_SIZE (HASH_ENHANCE * PLATFORM_CNAT_MAIN_PRELIM_HASH_SIZE)
+#define CNAT_SESSION_HASH_MASK (CNAT_SESSION_HASH_SIZE-1)
+
+
+#define CNAT_MAX_SESSIONS_PER_BIB 0xFFFF
+
+#define NUM_BITS_IN_UWORD (8*sizeof(uword))
+
+/* No. of per ip/port config will be limited to 1024 */
+#define CNAT_TIMEOUT_HASH_SIZE 1024
+#define CNAT_TIMEOUT_HASH_MASK (CNAT_TIMEOUT_HASH_SIZE - 1)
+#define CNAT_TIMEOUT_FULL_MASK 0xFFFFFFFFFFFFFFFF
+#define CNAT_TIMEOUT_IPPROT_MASK PLATFORM_CNAT_TIMEOUT_IPPROT_MASK
+#define CNAT_TIMEOUT_PORTPROT_MASK PLATFORM_CNAT_TIMEOUT_PORTPROT_MASK
+
+#define TRUE 1
+#define FALSE 0
+
+/*
+ * The key structure. All fields are in NETWORK byte order!
+ */
+typedef struct {
+ u32 ipv4;
+ u16 port;
+ u16 vrf; //bit0-12:vrf, bit13:unused, bit14-15:protocol
+} cnat_db_key_t;
+
+/* bit14-15:protocol in cnat_db_key_t */
+#define CNAT_INVALID_PROTO 0x0000
+#define CNAT_PPTP 0x0000
+#define CNAT_UDP 0x4000
+#define CNAT_TCP 0x8000
+#define CNAT_ICMP 0xc000
+#define CNAT_VRF_MASK 0x3fff
+#define CNAT_PRO_MASK 0xc000
+#define CNAT_PRO_SHIFT 14
+
+/*
+ * Maximum number of VRF entries supported
+ */
+#define CNAT_MAX_VRFMAP_ENTRIES (CNAT_VRF_MASK + 1)
+/*
+ * for hashing purposes, fetch the key in one instr.
+ */
+typedef union {
+ cnat_db_key_t k;
+ u64 key64;
+} cnat_key_t;
+
+typedef struct {
+ cnat_key_t k;
+ u32 bucket;
+} cnat_db_key_bucket_t;
+
+typedef struct {
+ u32 ipv6[4];
+ cnat_key_t ipv4_key;
+} dslite_key_t;
+
+typedef struct {
+/*
+ cnat_db_key_bucket_t ck;
+ u32 ipv6[4];
+*/
+ dslite_key_t dk;
+ u32 bucket;
+} dslite_db_key_bucket_t;
+
+
+/* Per port/ip timeout related strucutres */
+extern index_slist_t *cnat_timeout_hash;
+
+typedef struct {
+ cnat_key_t timeout_key;
+ u16 timeout_value;
+} cnat_timeout_t;
+
+typedef struct {
+ cnat_timeout_t t_key;
+ index_slist_t t_hash;
+} cnat_timeout_db_entry_t;
+
+extern cnat_timeout_db_entry_t *cnat_timeout_db;
+
+/*
+ * Main translation database entries. Currently 0x5A = 90 bytes in length.
+ * Given 20,000,000 entries, it saves nearly 1gb of SDRAM to pack the entries
+ * and pay the extra prefetch. So, that's what we do.
+ */
+
+typedef struct {
+ /* 0x00 */
+ index_slist_t out2in_hash; /* hash-and-chain, x2 */
+ index_slist_t in2out_hash;
+
+ /* 0x08 */
+ u16 flags; /* Always need flags... */
+#define CNAT_DB_FLAG_PORT_PAIR (1<<0)
+#define CNAT_DB_FLAG_TCP_ACTIVE (1<<1)
+#define CNAT_DB_FLAG_ENTRY_FREE (1<<2)
+#define CNAT_DB_FLAG_UDP_ACTIVE (1<<3)
+#define CNAT_DB_FLAG_STATIC_PORT (1<<4)
+/* This alg entry is set for FTP data connection */
+#define CNAT_DB_FLAG_ALG_ENTRY (1<<5)
+
+/* Will be set for TCP connection with destination port - 1723
+ * note - here CNAT_DB_FLAG_TCP_ACTIVE is also set */
+#define CNAT_DB_FLAG_PPTP_TUNNEL_INIT (1<<6)
+#define CNAT_DB_FLAG_PPTP_TUNNEL_ACTIVE (1<<7)
+
+/* for PPTP GRE packtes */
+#define CNAT_DB_FLAG_PPTP_GRE_ENTRY (1<<8)
+
+/* for PCP support */
+#define CNAT_DB_FLAG_PCPI (1<<9)
+#define CNAT_DB_FLAG_PCPE (1<<10)
+#define CNAT_PCP_FLAG (CNAT_DB_FLAG_PCPI | CNAT_DB_FLAG_PCPE)
+
+#define CNAT_TAC_SEQ_MISMATCH (1<<11)
+/* This alg entry is set for ftp control connection */
+#define CNAT_DB_FLAG_ALG_CTRL_FLOW (1<<12)
+
+/* This is for marking the state where connection is closing */
+#define CNAT_DB_FLAG_TCP_CLOSING (1<<13)
+
+#define CNAT_DB_DSLITE_FLAG (1<<14)
+#define CNAT_DB_NAT64_FLAG (1<<15)
+
+ /* 0x0A */
+ u16 vrfmap_index; /* index of vrfmap */
+
+ /* 0x0C */
+ u32 user_index; /* index of user that owns this entry */
+
+ /* 0x10 */
+ cnat_key_t out2in_key; /* network-to-user, outside-to-inside key */
+
+ /* 0x18 */
+ cnat_key_t in2out_key; /* user-to-network, inside-to-outside key */
+
+ /* 0x20 */
+ index_dlist_t user_ports; /* per-user translation list */
+
+ /* 0x28 */
+ u32 out2in_pkts; /* pkt counters */
+
+ /* 0x2C */
+ u32 in2out_pkts;
+
+ /* 0x30 */
+ u32 entry_expires; /* timestamp used to expire translations */
+
+ /* 0x34 */
+ union { /* used by FTP ALG, pkt len delta due to FTP PORT cmd */
+ u16 delta;
+ i8 alg_dlt[2]; /* two delta values, 0 for previous, 1 for current */
+ u16 il; /* Used to indicate if interleaved mode is used
+ in case of RTSP ALG */
+ } alg;
+
+ /* 0x36 */
+ u16 timeout;
+
+ /* 0x38 */
+ union {
+ struct seq_pcp_t {
+ u32 tcp_seq_num; /* last tcp (FTP) seq # that has pkt len change due to PORT */
+ u32 pcp_lifetime; /* peer and map life time value sent in reply*/
+ } seq_pcp;
+
+ /* This is for TCP seq check */
+ struct tcp_seq_chk_t {
+ u32 seq_no;
+ u32 ack_no;
+ } tcp_seq_chk;
+
+ /* used for pptp alg entries
+ 1. only tunnel : prev and next = 0xFFFFFFFF
+ 2. first gre entry : prev = tunnel db, next = next gre db
+ 3. last gre entry : prev = previous gre/tunnel db, next= 0xFFFFFFFF;
+
+ *while adding gre entry- updated at the begining of head
+ *while deleting gre entry - hash look up will be done and prev and next are adjusted
+ * while deleting need not traverse throufgh the list, as done in index_dlist_remelem
+
+ */
+ index_dlist_t pptp_list;
+
+ } proto_data;
+
+ /* 0x40 */
+ u32 dst_ipv4; /* pointer to ipv4 dst list, used in evil mode */
+
+ /* 0x44 */
+ u16 dst_port;
+
+ /* 0x46 */
+ u16 dslite_nat44_inst_id;
+
+ /* 0x48 */
+ u32 session_head_index;
+
+ /* 0x4C */
+ u16 nsessions;
+
+ /* 0x4E */
+ u8 unused;
+
+ /* 0x4F */
+ u8 scale;
+
+ /* 0x50 */
+ u32 diff_window;
+
+ /* Sizeof cnat_main_db_entry_t = 0x54 */
+} cnat_main_db_entry_t;
+
+/* Caution ...
+ * 1. The size of this structure should be same as that of
+ * nat64_bib_user_entry_t
+ * 2. Do not alter the position of first four fields
+ */
+typedef struct {
+ /* 0x00 */
+ index_slist_t user_hash; /* hash 'n chain bucket chain */
+
+ /* 0x04 */
+ u16 ntranslations; /* translations hold by this user */
+
+ /* 0x06 */
+ u8 icmp_msg_count; /* use to rate limit imcp send to this user */
+
+ /* 0x07 */
+ u8 flags; /* To identfiy whether it is NAT64 or NAT44 etc */
+#define CNAT_USER_DB_NAT44_FLAG 0
+#define CNAT_USER_DB_NAT64_FLAG 1
+#define CNAT_USER_DB_DSLITE_FLAG 2
+#define CNAT_USER_DB_PORT_LIMIT_EXCEEDED 0X80
+
+ /* 0x08 */
+ u32 translation_list_head_index;
+
+ /* 0x0C */
+ u32 portmap_index; /* index of bound port-map */
+
+ /* 0x10 */
+ cnat_key_t key; /* For dslite this should store IPv6 address */
+ u32 ipv6[4]; // B4 ipv6 address
+ /* 0x18 */
+#if 0
+ u32 temp1;
+ u32 temp2;
+ u32 temp3;
+#endif
+ /* 0x28 same as nat64_user_db */
+#ifndef NO_BULK_LOGGING
+ /* Now adding 8 more bytes for bulk allocation.. This makes it
+ * 0x30 (48). Added the same to nat64_bib_user_entry_t make the
+ * the sizes equal. For nat64 stful, we may support bulk allocation
+ * later.
+ */
+ /* Indicates the currently used bulk port range */
+ i16 bulk_port_range_cache[BULK_RANGE_CACHE_SIZE];
+#endif /* #ifndef NO_BULK_LOGGING */
+} cnat_user_db_entry_t;
+
+/*
+ * cnat_session_entry_t
+ * This structure represents the cnat session table. It maintains the
+ * information about the destination of a given translation (main db)
+ * There would be entry here only if packets are send to more than 1 destn
+ * from the same source.
+ */
+typedef struct {
+
+ /* 0x00 */
+ index_slist_t cnat_session_hash;
+
+ /* 0x04 */
+ u32 main_db_index; /* would point to v4 src transport address */
+
+ /* 0x08 */
+ cnat_key_t v4_dest_key;
+
+ /* 0x10 */
+ u16 flags; /* Same as cnat_main_db_t */
+
+ /* 0x12 */
+ u16 timeout;
+
+ /* 0x14 */
+ u32 entry_expires;
+ /* 0x18 */
+ index_dlist_t main_list;
+ /* 0x20 = 32 B */
+
+ union { /* alg same as cnat_main_db_t */
+ u16 delta;
+ i8 alg_dlt[2];
+ u16 il;
+ } alg;
+
+ /* 0x22 */
+ u16 tcp_flags;
+
+ /* 0x24 */
+ u32 tcp_seq_num;
+
+ /* 0x28 */
+ u32 ack_no;
+
+ /* 0x2C */
+ u32 window;
+
+ /* 0x30 */
+ u8 scale;
+
+ /* 0x31 */
+ u8 pad;
+
+ /* 0x32 */
+} cnat_session_entry_t;
+
+
+
+/*
+ * out2in and in2out hash bucket arrays are simply arrays of index_slist_t's
+ */
+
+typedef enum {
+ CNAT_DB_CREATE_DEFAULT=0, /* honor cnat_main_db_max_ports_per_user */
+ CNAT_DB_CREATE_OVERRIDE, /* just do it. */
+} cnat_db_create_policy_t;
+
+typedef struct {
+ cnat_key_t in2out_key;
+ cnat_key_t out2in_key;
+ u32 dst_ipv4; /* evil for mode only */
+ u16 cnat_instance;
+ cnat_portmap_t *portmap;
+ u16 *portmap_inuse;
+ cnat_main_db_entry_t *db;
+ cnat_db_create_policy_t policy;
+ port_pair_t pair_of_ports;
+} cnat_db_create_args_t;
+
+extern cnat_main_db_entry_t *cnat_main_db;
+extern cnat_user_db_entry_t *cnat_user_db;
+extern cnat_session_entry_t *cnat_session_db;
+
+#define S_WAO 0
+#define S_WA 1 /* waiting for address pool */
+#define S_WO 2 /* waiting for outside vrf */
+#define S_RUN 3 /* got everything */
+#define S_DEL 4 /* just delete */
+
+#define INVALID_UIDX 0xffff /*invalid svi app uidb index */
+#define INVALID_VRFID 0xffffffff /*invalid vrf id */
+
+typedef struct {
+ u16 status;
+ u16 tcp_mss; //tcp max segment size for this inside vrf */
+ u32 delete_time;
+ u16 i_vrf; //inside SVI uidx
+ u16 o_vrf; //outside SVI uidx
+ u32 i_vrf_id; //inside vrf id
+ u32 o_vrf_id; //outside vrf id
+ cnat_portmap_v2_t *portmap_list;
+ u32 nfv9_logging_index;
+ u32 syslog_logging_index;
+ u16 ip_n_to_1;
+#ifndef NO_BULK_LOGGING
+ bulk_alloc_size_t bulk_size;
+#endif /* #ifndef NO_BULK_LOGGING */
+ u32 pcp_server_addr;
+ u32 pcp_server_port;
+
+ u8 nf_logging_policy;
+ u8 syslog_logging_policy;
+ u8 frag_tout;
+ u32 rseed_ip;
+ u16 port_limit;
+ u8 tcp_seq_check_enable;
+ u8 pad;
+ u32 tcp_seq_user_window;
+ u8 filter_policy;
+ u8 ignore_port;
+} cnat_vrfmap_t;
+
+/*
+ * When creating cnat_vrfmap entry, ensure that any already
+ * configured logging info is taken into account
+ */
+#define CNAT_SET_VRFMAP_NFV9_LOGGING_INDEX(logging_index, i_vrf) \
+do { \
+ cnat_nfv9_logging_info_t *my_nfv9_logging_info = 0; \
+ pool_foreach (my_nfv9_logging_info, cnat_nfv9_logging_info, ({ \
+ if (my_nfv9_logging_info->i_vrf == i_vrf) { \
+ logging_index = my_nfv9_logging_info - cnat_nfv9_logging_info; \
+ break; \
+ } \
+ })); \
+while (0)
+
+
+typedef struct {
+ /*
+ * spp_ctx_alloc() call failed
+ */
+ u64 nfv9_logging_context_creation_fail_count;
+
+ /*
+ * Cannot send the existing logging pkt, so cannot create
+ * any additional packets for logging purposes
+ */
+ u64 nfv9_logging_context_creation_deferred_count;
+
+ /*
+ * Cannot send the existing logging pkt due to cnat_rewrite_output
+ * superframe being full.
+ */
+ u64 nfv9_downstream_constipation_count;
+
+ /*
+ * buffer for spp_ctx_alloc() call failed
+ */
+ u64 nfv9_logging_context_buffer_allocation_fail_count;
+
+} cnat_global_counters_t;
+
+
+extern cnat_global_counters_t cnat_global_counters;
+
+extern u16 *cnat_portmap_indices_by_vrf;
+extern cnat_vrfmap_t *cnat_portmap_by_vrf;
+extern cnat_portmap_t **cnat_portmaps;
+extern u16 **cnat_portmaps_inuse;
+
+extern cnat_vrfmap_t *cnat_map_by_vrf;
+
+/*
+ * Special define to indicate that the VRF map index entry is empty
+ */
+#define VRF_MAP_ENTRY_EMPTY 0xffff
+extern u16 vrf_map_array[CNAT_MAX_VRFMAP_ENTRIES];
+
+extern cnat_svi_params_entry svi_params_array[CNAT_MAX_VRFMAP_ENTRIES];
+extern cnat_ingress_vrfid_name_entry vrfid_name_map[MAX_VRFID];
+
+extern index_slist_t *cnat_out2in_hash;
+extern index_slist_t *cnat_in2out_hash;
+extern index_slist_t *cnat_user_hash;
+extern index_slist_t *cnat_session_hash;
+
+typedef enum {
+ CNAT_DB_IN2OUT = 0,
+ CNAT_DB_OUT2IN,
+} cnat_db_which_t;
+
+typedef enum {
+ CNAT_NO_ICMP_MSG =0,
+ CNAT_ICMP_MSG,
+} cnat_icmp_msg_t;
+
+typedef struct {
+ cnat_errno_t error;
+ cnat_icmp_msg_t gen_icmp_msg;
+ u32 svi_addr;
+} cnat_gen_icmp_info;
+
+typedef cnat_vrfmap_t nat64_vrfmap_t;
+typedef cnat_portmap_v2_t nat64_portmap_v2_t;
+
+#define CNAT_V4_GET_HASH(key64, hash, mask) \
+ a = key64; \
+ b = c = 0x9e3779b97f4a7c13LL; \
+ /* Jenkins hash, arbitrarily use c as the "answer" */ \
+ hash_mix64(a, b, c); \
+ hash = c & mask;
+
+#define CNAT_V4_GET_SESSION_HASH(main_index, in_addr, port, vrf, hash, mask) \
+ a = main_index ^ in_addr ^ port ^ vrf; \
+ b = c = 0x9e3779b9; \
+ /* Jenkins hash, arbitrarily use c as the "answer" */ \
+ hash_mix32(a, b, c); \
+ hash = c & mask;
+
+#define CNAT_V4_GET_FRAG_HASH(key64, key32, hash, mask) \
+ a = key64; \
+ b = key32; \
+ c = 0x9e3779b97f4a7c13LL; \
+ hash_mix64(a, b, c); \
+ hash = c % mask;
+
+#define CNAT_DB_UPDATE_IN2OUT_TIMER \
+ db->entry_expires = cnat_current_time; \
+ db->in2out_pkts++;
+
+#define CNAT_DB_TIMEOUT_RST(db) \
+ if(PREDICT_TRUE(db->entry_expires != 0 )) \
+ db->entry_expires = cnat_current_time;
+
+#define DEBUG_I2O_DROP(debug_flag) \
+if (debug_i_flag & debug_flag) { \
+ cnat_db_debug_i2o_drop(&ki); \
+}
+
+
+cnat_main_db_entry_t *cnat_main_db_create (cnat_db_create_args_t *a);
+void cnat_main_db_entry_delete(cnat_main_db_entry_t *ep);
+
+void cnat_delete_main_db_entry(cnat_main_db_entry_t *ep);
+void cnat_delete_main_db_entry_v2(cnat_main_db_entry_t *ep);
+
+
+cnat_main_db_entry_t*
+cnat_get_main_db_entry(cnat_db_key_bucket_t *ki,
+ port_pair_t port_type,
+ cnat_errno_t *error,
+ cnat_user_db_entry_t ** user_db_entry);
+
+cnat_main_db_entry_t*
+cnat_get_main_db_entry_v2(cnat_db_key_bucket_t *ki,
+ port_pair_t port_pair_type,
+ port_type_t port_type,
+ cnat_gen_icmp_info *info,
+ cnat_key_t *dest_info);
+
+cnat_main_db_entry_t*
+cnat_create_static_main_db_entry_v2(cnat_db_key_bucket_t *ki,
+ cnat_db_key_bucket_t *ko,
+ cnat_vrfmap_t *my_vrfmap,
+ cnat_gen_icmp_info *info);
+
+cnat_main_db_entry_t*
+cnat_create_main_db_entry_and_hash(cnat_db_key_bucket_t *ki,
+ cnat_db_key_bucket_t *ko,
+ cnat_user_db_entry_t *udb);
+
+cnat_user_db_entry_t*
+cnat_user_db_create_entry(cnat_db_key_bucket_t *uki,
+ u32 portmap_index);
+
+cnat_user_db_entry_t*
+cnat_user_db_lookup_entry(cnat_db_key_bucket_t *uki);
+
+cnat_main_db_entry_t*
+cnat_main_db_lookup_entry(cnat_db_key_bucket_t *ki);
+
+cnat_main_db_entry_t*
+cnat_main_db_lookup_entry_out2in (cnat_db_key_bucket_t *ko);
+
+void cnat_main_db_entry_dump (cnat_main_db_entry_t *db);
+void cnat_db_in2out_hash_delete (cnat_main_db_entry_t *ep, cnat_user_db_entry_t *up);
+void cnat_db_out2in_hash_delete (cnat_main_db_entry_t *ep);
+void cnat_user_db_delete (cnat_user_db_entry_t *up);
+void cnat_db_debug_i2o_drop(cnat_db_key_bucket_t *ki);
+
+/*
+ * Function to dump the Hash Table that maps if_num to uidb_index
+ */
+extern void cnat_if_num_hash_table_dump(void);
+
+#define MAIN_DB_TYPE 0
+#define SESSION_DB_TYPE 1
+u16 query_and_update_db_timeout(void *db, u8 db_type);
+
+u16 cnat_timeout_db_create (cnat_timeout_t t_entry);
+void cnat_timeout_db_delete(cnat_key_t t_key);
+
+cnat_session_entry_t *
+cnat_create_session_db_entry(cnat_key_t *ko,
+ cnat_main_db_entry_t *bdb, u8 log);
+
+void cnat_dest_update_main2session(cnat_main_db_entry_t *mdb,
+ cnat_session_entry_t *sdb);
+
+cnat_session_entry_t *cnat_handle_1to2_session(
+ cnat_main_db_entry_t *mdb,
+ cnat_key_t *dest_info);
+
+void cnat_add_dest_n_log(
+ cnat_main_db_entry_t *mdb,
+ cnat_key_t *dest_info);
+
+cnat_session_entry_t *
+ cnat_session_db_lookup_entry(cnat_key_t *ko,u32 main_db_index);
+
+cnat_session_entry_t *
+ cnat_session_db_edm_lookup_entry(cnat_key_t *ko,
+ u32 session_head_index,
+ u32 main_db_index);
+
+
+typedef struct{
+ u32 sessions;
+ u32 active_translations;
+ u32 num_dynamic_translations;
+ u32 num_static_translations;
+ u64 in2out_drops_port_limit_exceeded;
+ u64 in2out_drops_system_limit_reached;
+ u64 in2out_drops_resource_depletion;
+ u64 no_translation_entry_drops;
+ u32 num_subscribers;
+ u32 dummy;
+ u64 drops_sessiondb_limit_exceeded;
+} nat44_dslite_common_stats_t;
+
+typedef struct {
+ u32 translation_delete_count;
+ u32 translation_create_count;
+ u32 out2in_forwarding_count;
+} nat44_dslite_global_stats_t;
+
+typedef struct {
+ u64 v4_to_v6_tcp_seq_mismatch_drop_count;
+ u64 v4_to_v6_tcp_seq_mismatch_count;
+ u64 v4_to_v6_out2in_session_create_count;
+ u64 v4_to_v6_end_point_filter_drop_count;
+} nat44_counters_stats_t;
+
+#define NAT44_STATS 0
+#define DSLITE_STATS 1
+extern nat44_dslite_common_stats_t nat44_dslite_common_stats[255]; /* 0 is for nat44 */
+extern nat44_dslite_global_stats_t nat44_dslite_global_stats[2]; /* 0 for nat44 and 1 for dslite */
+extern nat44_counters_stats_t nat44_counters_stats[CNAT_MAX_VRFMAP_ENTRIES];/*For displaying show cgn <cgn-name> inside-vrf <vrf-name> counters */
+
+#define NAT44_COMMON_STATS nat44_dslite_common_stats[NAT44_RESERVED_INST_ID]
+#define NAT44_GLOBAL_STATS nat44_dslite_global_stats[NAT44_STATS]
+#define DSLITE_GLOBAL_STATS nat44_dslite_global_stats[DSLITE_STATS]
+#define SESSION_LOG_ENABLE 1
+#define ALG_ENABLED_DB(db) \
+ ((db->flags & CNAT_PCP_FLAG) || \
+ (db->flags & CNAT_DB_FLAG_ALG_CTRL_FLOW) || \
+ (db->flags & (CNAT_DB_FLAG_PPTP_TUNNEL_INIT | \
+ CNAT_DB_FLAG_PPTP_TUNNEL_ACTIVE)))
+
+
+#endif /* __CNAT_DB_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/cnat_db_scanner.c b/plugins/vcgn-plugin/vcgn/cnat_db_scanner.c
new file mode 100644
index 00000000000..6e536d84c79
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_db_scanner.c
@@ -0,0 +1,493 @@
+/*
+ *---------------------------------------------------------------------------
+ * cnat_db_scanner.c - cnat_db_scanner dispatch function and initialization
+ *
+ * Copyright (c) 2009-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/buffer.h>
+#include <vppinfra/string.h>
+#include <vppinfra/random.h>
+#include <vppinfra/fifo.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/format.h>
+
+
+#include "cnat_db.h"
+#include "cnat_logging.h"
+#include "cnat_global.h"
+#include "cnat_ipv4_udp.h"
+#include "cnat_common_api.h"
+
+u32 translation_create_count, translation_delete_count;
+u32 translation_create_rate, translation_delete_rate;
+
+u32 in2out_forwarding_count, out2in_forwarding_count;
+u32 in2out_forwarding_rate, out2in_forwarding_rate;
+
+u32 nat44_active_translations;
+u32 num_entries;
+uword check_these_pool_indices[2*MAX_DB_ENTRY_SELECTED_PER_SCAN];
+
+#define CNAT_DB_SCANNER_TURN_ON 5 /* just an arbitary number for easier debugging */
+
+//extern u32 pcp_throttle_count;
+
+typedef struct {
+ u32 cached_next_index;
+ /* $$$$ add data here */
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} cnat_db_scanner_main_t;
+
+cnat_db_scanner_main_t cnat_db_scanner_main;
+
+
+static inline void check_session_for_expiry(
+ cnat_session_entry_t * sdb, u8 timeout_dirty
+ /*,dslite_table_entry_t *dslite_entry_ptr*/)
+{
+ void cnat_delete_session_db_entry (cnat_session_entry_t *ep, u8 log);
+ /* Tasks -
+ * 1. Check for expiry for this entry
+ * 2. Delete if expired
+ */
+ u32 timeout = 0;
+
+ switch(sdb->v4_dest_key.k.vrf & CNAT_PRO_MASK) {
+ case CNAT_TCP:
+ if (sdb->flags & CNAT_DB_FLAG_TCP_ACTIVE) {
+ timeout = sdb->timeout;
+ if(PREDICT_FALSE(timeout_dirty)) {
+ timeout = query_and_update_db_timeout(
+ (void *)sdb, SESSION_DB_TYPE);
+ }
+ if(PREDICT_TRUE(timeout == 0)) {
+ timeout = tcp_active_timeout;
+ //dslite_entry_ptr->timeout_info.tcp_active_timeout;
+ }
+ } else {
+ timeout = tcp_initial_setup_timeout;
+ //dslite_entry_ptr->timeout_info.tcp_initial_setup_timeout;
+ }
+ break;
+ case CNAT_UDP:
+ if (sdb->flags & CNAT_DB_FLAG_UDP_ACTIVE) {
+ timeout = sdb->timeout;
+ if(PREDICT_FALSE(timeout_dirty)) {
+ timeout = query_and_update_db_timeout(
+ (void *)sdb, SESSION_DB_TYPE);
+ }
+
+ if(PREDICT_TRUE(timeout == 0)) {
+ timeout = udp_act_session_timeout;
+ //dslite_entry_ptr->timeout_info.udp_act_session_timeout;
+ }
+ } else {
+ timeout = udp_init_session_timeout;
+ //dslite_entry_ptr->timeout_info.udp_init_session_timeout;
+ }
+ break;
+ case CNAT_ICMP:
+ timeout = icmp_session_timeout;
+ //dslite_entry_ptr->timeout_info.icmp_session_timeout;
+ break;
+ case CNAT_PPTP:
+ timeout = pptp_cfg.timeout;
+ break;
+ default:
+ return;
+ }
+ /* Changes required for clearing sessions */
+ if (PREDICT_FALSE((sdb->entry_expires == 0) ||
+ (sdb->entry_expires + timeout < cnat_current_time))) {
+ cnat_delete_session_db_entry(sdb, TRUE);
+ }
+}
+
+static u8 handle_db_scan_for_sessions(
+ cnat_main_db_entry_t *db, int *dirty_index, uword db_index
+ /* ,dslite_table_entry_t *dslite_entry_ptr */)
+{
+ /* Tasks -
+ * 1. Traverse through the sessions and check for timeouts
+ * 2. Delete sessions that have exipred
+ * 3. Check if the db has only one session remaining.. if so,
+ * the details of the session has to be moved to main db
+ * and session db entry needs to be freed
+ * 4. If db does not have any sessions left, the db itself
+ * needs to be deleted.
+ */
+ u32 nsessions, session_index_head, session_index;
+ cnat_session_entry_t *sdb;
+ u8 timeout_dirty = FALSE;
+
+ if(PREDICT_FALSE(*dirty_index == db_index)) {
+ *dirty_index = -1;
+ }
+ if(PREDICT_FALSE(timeout_dirty_flag == 1)) {
+ timeout_dirty_flag = 0;
+ *dirty_index = db_index;
+ timeout_dirty = TRUE;
+ }
+
+ session_index_head = session_index = db->session_head_index;
+ nsessions = db->nsessions;
+
+ do {
+ sdb = cnat_session_db + session_index;
+ if(PREDICT_FALSE(!sdb)) {
+ //TO DO: Debug msg?
+ return FALSE;
+ }
+ session_index = sdb->main_list.next;
+ check_session_for_expiry(sdb, timeout_dirty /*,dslite_entry_ptr*/);
+ nsessions--; /* To ensure that we do not get in to an infinite loop */
+ } while(session_index != session_index_head
+ && db->session_head_index != EMPTY &&
+ nsessions);
+
+ /* Note.. the code below assumes that while deleting the
+ * sessions, we do not delete the main db entry if it does
+ * not have any sessions anymore
+ */
+ if(PREDICT_FALSE((!db->nsessions) &&
+ (!(db->flags & CNAT_DB_FLAG_STATIC_PORT)))) {
+ cnat_delete_main_db_entry_v2(db);
+ return TRUE; /* to indicate that main db was deleted */
+ }
+ return FALSE;
+}
+
+static void cnat_db_scanner(void)
+{
+ cnat_main_db_entry_t * db;
+ u32 timeout;
+ cnat_vrfmap_t *my_vrfmap __attribute__((unused)) = 0;
+ static int dirty_index = -1;
+ u16 instance __attribute__((unused));
+ //dslite_table_entry_t *dslite_entry_ptr;
+ u32 i;
+ uword db_index;
+ //pcp_throttle_count = 0;
+
+ for (i = 0; i < num_entries; i++) {
+ db_index = check_these_pool_indices[i];
+ db = cnat_main_db + db_index;
+ timeout=0;
+ my_vrfmap = 0;
+
+#if 0
+ if(PREDICT_FALSE(db->flags & CNAT_PCP_FLAG)) {
+
+ if(db->proto_data.seq_pcp.pcp_lifetime < cnat_current_time) {
+ /* mark as implicit */
+ db->flags &= ~CNAT_PCP_FLAG;
+ }
+ continue;
+ }
+
+#endif
+ if(PREDICT_FALSE(db->nsessions > 1)) {
+ if(PREDICT_FALSE(
+ handle_db_scan_for_sessions(db, &dirty_index, db_index /*,dslite_entry_ptr */))) {
+ continue;
+ } else if(PREDICT_TRUE(db->nsessions > 1)) {
+ continue;
+ }
+ /* if there is exactly one dest left.. let it fall through
+ * and check if that needs to be deleted as well
+ */
+ }
+
+#if 0
+ if (PREDICT_FALSE(db->flags & CNAT_DB_FLAG_STATIC_PORT)) {
+ if (PREDICT_FALSE(db->flags & CNAT_DB_DSLITE_FLAG)) {
+ if(PREDICT_FALSE(
+ ((dslite_entry_ptr->nf_logging_policy != SESSION_LOG_ENABLE) &&
+ (dslite_entry_ptr->syslog_logging_policy != SESSION_LOG_ENABLE))
+ || (db->nsessions !=1))) {
+ continue;
+ }
+ } else {
+ my_vrfmap = cnat_map_by_vrf + db->vrfmap_index;
+ if(PREDICT_FALSE(
+ ((my_vrfmap->nf_logging_policy != SESSION_LOG_ENABLE) &&
+ (my_vrfmap->syslog_logging_policy != SESSION_LOG_ENABLE)) ||
+ (db->nsessions !=1))) {
+ continue;
+ }
+ }
+ }
+#endif
+
+ switch(db->in2out_key.k.vrf & CNAT_PRO_MASK) {
+ case CNAT_TCP:
+ if (db->flags & CNAT_DB_FLAG_TCP_ACTIVE) {
+ timeout = db->timeout;
+ if(PREDICT_FALSE(dirty_index == db_index)) {
+ dirty_index = -1;
+ }
+ if(PREDICT_FALSE(timeout_dirty_flag == 1)) {
+ timeout_dirty_flag = 0;
+ dirty_index = db_index;
+ }
+ if(PREDICT_FALSE(dirty_index != -1)) {
+ timeout = query_and_update_db_timeout(
+ (void *)db, MAIN_DB_TYPE);
+ }
+ if(PREDICT_TRUE(timeout == 0)) {
+ timeout = tcp_active_timeout;
+ }
+ } else {
+ timeout = tcp_initial_setup_timeout;
+ }
+ break;
+ case CNAT_UDP:
+ if (db->flags & CNAT_DB_FLAG_UDP_ACTIVE) {
+ timeout = db->timeout;
+ if(PREDICT_FALSE(dirty_index == db_index)) {
+ dirty_index = -1;
+ }
+ if(PREDICT_FALSE(timeout_dirty_flag == 1)) {
+ timeout_dirty_flag = 0;
+ dirty_index = db_index;
+ }
+ if(PREDICT_FALSE(dirty_index != -1)) {
+ timeout = query_and_update_db_timeout(
+ (void *)db, MAIN_DB_TYPE);
+ }
+ if(PREDICT_TRUE(timeout == 0)) {
+ timeout = udp_act_session_timeout;
+ }
+ } else {
+ timeout = udp_init_session_timeout;
+ }
+ break;
+ case CNAT_ICMP:
+ timeout = icmp_session_timeout;
+ break;
+ case CNAT_PPTP:
+ timeout = pptp_cfg.timeout;
+ break;
+ default:
+ continue;
+ }
+
+
+ /* Ref: CSCtu97536 */
+ if (PREDICT_FALSE((db->entry_expires == 0) ||
+ (db->entry_expires + timeout < cnat_current_time))) {
+#if 0
+ if (PREDICT_FALSE(db->flags & CNAT_DB_FLAG_STATIC_PORT)) {
+ if (PREDICT_FALSE(db->flags & CNAT_DB_DSLITE_FLAG)) {
+ instance = db->dslite_nat44_inst_id;
+ } else {
+ instance = NAT44_RESERVED_INST_ID;
+ cnat_session_log_nat44_mapping_delete(db, 0, my_vrfmap);
+ }
+
+ /* Reset the session details */
+ db->nsessions = 0;
+ db->dst_ipv4 = 0;
+ db->dst_port = 0;
+ db->flags &= ~(CNAT_DB_FLAG_TCP_ACTIVE | CNAT_DB_FLAG_UDP_ACTIVE
+ | CNAT_DB_FLAG_ALG_ENTRY);
+ db->timeout = 0;
+ db->entry_expires = 0;
+ db->alg.delta = 0;
+ db->proto_data.seq_pcp.tcp_seq_num = 0;
+ continue;
+ }
+#endif
+ //printf("DELETING DB ENTRY FOR 0x%x\n", db->in2out_key.k.ipv4);
+ cnat_delete_main_db_entry_v2(db);
+ }
+ //free(check_these_pool_indices[i]);
+ }
+}
+
+static void walk_the_db (void)
+{
+ pool_header_t *h = pool_header(cnat_main_db);
+ u32 db_uword_len;
+ static u32 base_index = 0, free_bitmap_index = 0;
+ int bits_scanned = 0, i;
+ uword inuse_bitmap;
+
+ num_entries=0;
+
+ /* Across all db entries... */
+ db_uword_len = vec_len(cnat_main_db) / NUM_BITS_IN_UWORD;
+ if (PREDICT_FALSE(vec_len(cnat_main_db) % NUM_BITS_IN_UWORD)) {
+ /*
+ * It should not come here as in cnat_db_init_v2()
+ * it is made multiple of NUM_BITS_IN_UWORD
+ */
+ ASSERT(0);
+ return ;
+ }
+
+ if (PREDICT_FALSE(! db_uword_len))
+ return ;
+
+ while (bits_scanned < MAX_DB_ENTRY_PER_SCAN) {
+
+ if (PREDICT_FALSE(free_bitmap_index < vec_len(h->free_bitmap))) {
+
+ /* free_bitmap exists and it is not all 0 */
+
+ inuse_bitmap = ~(h->free_bitmap[free_bitmap_index]);
+ i = 0;
+ while (inuse_bitmap) {
+
+ /* Check to see if the index is in use */
+ if (PREDICT_FALSE((inuse_bitmap >> i) & 1)) {
+ check_these_pool_indices[num_entries] = base_index + i;
+ inuse_bitmap &= ~((uword) 1 << i);
+ num_entries++;
+ }
+ i++;
+ } // while (inuse_bitmap)
+ } else {
+
+ /*
+ * 64-bit entry is 0, means all 64 entries are allocated.
+ * So, simply add all 64 entries here.
+ * No need to form inuse_bitmap, check and reset bits
+ */
+ for (i=0; i<NUM_BITS_IN_UWORD; i++) {
+
+ check_these_pool_indices[num_entries] = base_index + i;
+ num_entries++;
+ }
+ } // if (free_bitmap_index < vec_len(h->free_bitmap))
+
+ /* Update free_bitmap_index and base_index for next run */
+ if (PREDICT_FALSE(free_bitmap_index == db_uword_len - 1)) {
+ /* wrap-around for next run */
+ free_bitmap_index = 0;
+ base_index = 0;
+ } else {
+ free_bitmap_index ++;
+ base_index += NUM_BITS_IN_UWORD;
+ }
+
+ /* increment # of bits scanned */
+ bits_scanned += NUM_BITS_IN_UWORD;
+
+ /* Found enough entries to check ? */
+ if (PREDICT_FALSE(num_entries >= MAX_DB_ENTRY_SELECTED_PER_SCAN))
+ {
+ /* This check is introduced to keep fixed MAX scan entry value */
+ /* This is very much required when we do scanning for NAT64 */
+ /* please check comments in cnat_db_scanner() &
+ * handler_nat64_db_scanner() */
+ if (num_entries >= MAX_COMBINED_DB_ENTRIES_PER_SCAN) {
+ num_entries = MAX_COMBINED_DB_ENTRIES_PER_SCAN;
+ }
+ break;
+ }
+
+ } // while (bits_scanned < MAX_DB_ENTRY_PER_SCAN)
+
+ if (PREDICT_FALSE(num_entries > 0)) {
+ //printf("%s: num_entries [%d]\n", __func__, num_entries);
+ cnat_db_scanner();
+ }
+ return ;
+}
+
+static uword cnat_db_scanner_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ f64 timeout = 0.01; /* timeout value in sec (10 ms) */
+ static u8 timeout_count = 0;
+
+ uword event_type;
+ uword * event_data = 0;
+ /* Wait until vCGN is configured */
+ while (1) {
+ /* Assigning a huge timeout value, vCGN may or
+ * may not get configured within this timeout */
+ vlib_process_wait_for_event_or_clock (vm, 1e9);
+ event_type = vlib_process_get_events (vm, &event_data);
+
+ /* check whether the process is waken up by correct guy,
+ * otherwise continue waiting for the vCGN config */
+ if (event_type == CNAT_DB_SCANNER_TURN_ON) {
+ break;
+ }
+ }
+
+ while(1) {
+ vlib_process_suspend(vm, timeout);
+
+ /* Above suspend API should serve the purpose, no need to invoke wait API */
+ /* vlib_process_wait_for_event_or_clock (vm, timeout); */
+
+ /* Lets make use of this timeout for netflow packet sent */
+ if (timeout_count < 100) { /* 100*10 ms = 1 sec */
+ timeout_count++;
+ } else {
+ if (nfv9_configured) {
+ handle_pending_nfv9_pkts();
+ }
+ timeout_count = 0;
+ }
+ /* Do we need this ? */
+ //event_type = vlib_process_get_events (vm, &event_data);
+ cnat_current_time = (u32)vlib_time_now (vm);
+ if (cnat_db_init_done) {
+ walk_the_db();
+ }
+ }
+
+ return 0;
+}
+
+
+VLIB_REGISTER_NODE (cnat_db_scanner_node) = {
+ .function = cnat_db_scanner_fn,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "cnat-db-scanner",
+ .process_log2_n_stack_bytes = 18,
+};
+
+clib_error_t *cnat_db_scanner_init (vlib_main_t *vm)
+{
+ cnat_db_scanner_main_t *mp = &cnat_db_scanner_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main();
+
+ return 0;
+}
+
+void cnat_scanner_db_process_turn_on(vlib_main_t *vm)
+{
+ vlib_process_signal_event (vm, cnat_db_scanner_node.index,
+ CNAT_DB_SCANNER_TURN_ON, 0);
+ return;
+}
+
+VLIB_INIT_FUNCTION (cnat_db_scanner_init);
+
diff --git a/plugins/vcgn-plugin/vcgn/cnat_db_v2.c b/plugins/vcgn-plugin/vcgn/cnat_db_v2.c
new file mode 100644
index 00000000000..2b43849dca3
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_db_v2.c
@@ -0,0 +1,3802 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_db_v2.c - translation database definitions
+ *
+ * Copyright (c) 2007-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/clib.h>
+#include <vppinfra/error.h>
+
+#include "cnat_db.h"
+#include "cnat_config.h"
+#include "cnat_global.h"
+#include "cnat_v4_functions.h"
+#include "cnat_log_api.h"
+#include "cnat_cli.h"
+#include "spp_platform_trace_log.h"
+#include "cnat_bulk_port.h"
+#include "nat64_db.h"
+#include "dslite_db.h"
+#include "cnat_config_api.h"
+
+#define HASH_TABLE_SIZE 8192 // hash table size
+#define THROTTLE_TIME 180 // throttle time value for out of port msg/user
+
+u8 cnat_db_init_done = 0;
+
+typedef struct {
+ /* Locks for multi thread support */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+ pthread_spinlock_t *main_db_lockp;
+ pthread_spinlock_t *user_db_lockp;
+ pthread_spinlock_t *session_db_lockp;
+
+ u32 cached_next_index;
+ /* $$$$ add data here */
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} cnat_db_v2_main_t;
+
+cnat_db_v2_main_t cnat_db_v2_main;
+
+#if 1
+/* TOBE_PORTED : Remove the following once fixed */
+#undef PREDICT_TRUE
+#undef PREDICT_FALSE
+#define PREDICT_TRUE(x) (x)
+#define PREDICT_FALSE(x) (x)
+#endif
+
+#define foreach_cnat_db_v2_error \
+_(DROP, "error-drop packets")
+
+typedef enum {
+#define _(sym,str) CNAT_DB_V2_##sym,
+ foreach_cnat_db_v2_error
+#undef _
+ CNAT_DB_V2_N_ERROR,
+} cnat_db_v2_error_t;
+
+static char * cnat_db_v2_error_strings[] __attribute__((unused)) = {
+#define _(sym,string) string,
+ foreach_cnat_db_v2_error
+#undef _
+};
+
+
+void cnat_table_entry_fill_map(u32 start_addr, u32 end_addr,
+ cnat_portmap_v2_t **port_map_holder)
+{
+ u32 this_start_addr, this_end_addr, this_addr, new;
+ u32 loop_count;
+ u32 pm_len, i;
+ cnat_portmap_v2_t *my_pm =0;
+ cnat_portmap_v2_t *pm = 0;
+
+ my_instance_number = 0;
+
+ this_start_addr = start_addr;
+ this_end_addr = end_addr;
+
+ /*
+ * How many new addresses are getting added ??
+ */
+ /* commenting this. Right now end - start will be for this vCGN instance */
+ //new = ((this_end_addr - this_start_addr) / MAX_CORES_PER_PARTITION) + 1;
+ new = (this_end_addr - this_start_addr) + 1;
+
+ pm = *port_map_holder;
+ pm_len = vec_len(pm);
+#if DEBUG_NOT_COMMENTED
+ printf("this_start_addr = 0x%08X, this_end_addr = 0x%08X, Num Addr = %d\n",
+ this_start_addr, this_end_addr, new);
+ printf("pm_len = %d\n", pm_len);
+#endif
+ /* Check whether the address pool add requested already exists */
+ my_pm = pm;
+ for(i = 0; i< pm_len; i++) {
+ if(my_pm->ipv4_address == this_start_addr) {
+ printf("address pool with addr 0x%08X exists\n", this_start_addr);
+ return;
+ }
+ my_pm++;
+ }
+
+ /*
+ * For now give a warning message only....
+ */
+#if 0
+ if ((total_address_pool_allocated + new) >
+ CNAT_MAX_ADDR_POOL_SIZE_PER_CORE) {
+ printf("address pool size (%d) would cross permissible limit (%u) \n",
+ (total_address_pool_allocated + new),
+ CNAT_MAX_ADDR_POOL_SIZE_PER_CORE);
+ }
+#endif
+
+ total_address_pool_allocated += new;
+ vec_add2(pm, my_pm, new);
+
+#if DEBUG_NOT_COMMENTED
+ printf("total_address_pool_allocated changed from %d to %d (added %d)",
+ (total_address_pool_allocated - new),
+ total_address_pool_allocated, new);
+ printf("vec add is ok\n");
+#endif
+
+ memset(my_pm, 0, new*sizeof(*my_pm));
+ this_addr = this_start_addr;
+ loop_count = 0; /* Sanity counter */
+
+ while (this_addr <= this_end_addr) {
+#if DEBUG_NOT_COMMENTED
+ printf("loop %d: this addr = 0x%08X\n", loop_count+1, this_addr);
+#endif
+ my_pm->ipv4_address = this_addr;
+ /*
+ * Set all bits to "1" indicating all ports are free
+ */
+ memset(my_pm->bm, 0xff,
+ (((BITS_PER_INST + BITS(uword)-1)/BITS(uword))*(sizeof(uword))));
+ //this_addr += MAX_CORES_PER_PARTITION;
+ this_addr += 1;
+ my_pm++;
+ loop_count++;
+ }
+ /*
+ * We should have loop_count same as the new value
+ */
+ if (loop_count != new) {
+ printf("Mismatch in loop_count (%d) != new (%d)\n",
+ loop_count, new);
+ }
+
+ *port_map_holder = pm;
+
+#if DEBUG_NOT_COMMENTED
+ printf("revised pm len %d\n", vec_len(*port_map_holder));
+#endif
+
+ return;
+}
+
+
+void cnat_delete_session_db_entry (cnat_session_entry_t *ep, u8 log);
+void handle_cnat_port_exceeded_logging(
+ cnat_user_db_entry_t *udb,
+ cnat_key_t * key,
+ cnat_vrfmap_t *vrfmap);
+
+cnat_global_counters_t cnat_global_counters;
+u32 last_log_timestamp = 0;
+u32 last_user_dyn_port_exc_timestamp = 0;
+u32 last_user_stat_port_exc_timestamp = 0;
+
+index_slist_t *cnat_out2in_hash;
+index_slist_t *cnat_in2out_hash;
+index_slist_t *cnat_user_hash;
+index_slist_t *cnat_timeout_hash;
+index_slist_t *cnat_session_hash;
+
+cnat_main_db_entry_t *cnat_main_db;
+cnat_user_db_entry_t *cnat_user_db;
+cnat_session_entry_t *cnat_session_db;
+cnat_timeout_db_entry_t *cnat_timeout_db;
+
+cgse_nat_db_entry_t *cgse_nat_db;
+cgse_nat_user_db_entry_t *cgse_user_db;
+cgse_nat_session_db_entry_t *cgse_session_db;
+
+nat44_dslite_common_stats_t nat44_dslite_common_stats[255]; /* 0 is for nat44 */
+nat44_dslite_global_stats_t nat44_dslite_global_stats[2]; /* 0 for nat44 and 1 for dslite */
+nat44_counters_stats_t nat44_counters_stats[CNAT_MAX_VRFMAP_ENTRIES];
+/*For displaying show cgn <cgn-name> inside-vrf <vrf-name> counters */
+
+/*
+ * This is the pool of vrf map structures used by latest main-db functions
+ */
+cnat_vrfmap_t *cnat_map_by_vrf;
+
+/*
+ * Have a mapping table of vrf_id-->vrf_map_index
+ * This helps in easily getting the vrf_map structure during
+ * main-db create paths
+ */
+u16 vrf_map_array[CNAT_MAX_VRFMAP_ENTRIES];
+cnat_svi_params_entry svi_params_array[CNAT_MAX_VRFMAP_ENTRIES];
+cnat_ingress_vrfid_name_entry vrfid_name_map[MAX_VRFID] = {{0}};
+u64 in2out_drops_port_limit_exceeded;
+u64 in2out_drops_system_limit_reached;
+u64 in2out_drops_resource_depletion;
+u64 no_translation_entry_drops;
+u32 no_sessions;
+
+#define CNAT_SET_ICMP_MSG_INFO \
+if (PREDICT_TRUE((my_vrfmap->i_vrf < CNAT_MAX_VRFMAP_ENTRIES) && \
+ (svi_params_array[my_vrfmap->i_vrf].ipv4_addr))) { \
+ info->gen_icmp_msg = icmp_msg_gen_allowed(); \
+ info->svi_addr = svi_params_array[my_vrfmap->i_vrf].ipv4_addr; \
+}
+
+#define CNAT_DEBUG_INSIDE_ERR(err) \
+if (((protocol == CNAT_UDP) && \
+ (debug_i_flag & CNAT_DEBUG_ERR_UDP)) || \
+ ((protocol == CNAT_TCP) && \
+ (debug_i_flag & CNAT_DEBUG_ERR_TCP)) || \
+ ((protocol == CNAT_ICMP) && \
+ (debug_i_flag & CNAT_DEBUG_ERR_ICMP))) { \
+ cnat_db_debug_error(&u_ki, err); \
+}
+
+#define DSLITE_DEBUG_INSIDE_ERR(err) \
+if (((protocol == CNAT_UDP) && \
+ (debug_i_flag & CNAT_DEBUG_ERR_UDP)) || \
+ ((protocol == CNAT_TCP) && \
+ (debug_i_flag & CNAT_DEBUG_ERR_TCP)) || \
+ ((protocol == CNAT_ICMP) && \
+ (debug_i_flag & CNAT_DEBUG_ERR_ICMP))) { \
+ dslite_db_debug_error(&u_ki, err); \
+}
+
+#define PORT_LIMIT_LOW_THRESHOLD_FOR_SYSLOG 7
+/* If the max_limit is less than 10, no meaningful throttling can be
+ * done.. so, log only once per user and never clear the flag
+ * once the user exceeds limit
+ */
+#define CHECK_CLEAR_PORT_LIMIT_EXCEED_FLAG(udb, max_limit) \
+ if(PREDICT_FALSE(udb->flags & CNAT_USER_DB_PORT_LIMIT_EXCEEDED)) { \
+ if(udb->ntranslations < \
+ ((max_limit/10)*PORT_LIMIT_LOW_THRESHOLD_FOR_SYSLOG) && \
+ max_limit >= 10) { \
+ udb->flags = udb->flags & (~CNAT_USER_DB_PORT_LIMIT_EXCEEDED); \
+ } \
+ }
+
+#ifdef TOBE_PORTED
+/* Commented to remove unused variable warning */
+static char *debug_db_error[] = {
+ "no error", /* CNAT_SUCCESS */
+ "no config", /*CNAT_NO_CONFIG*/
+ "not in run state", /*CNAT_NO_VRF_RUN*/
+ "no pool for any", /*CNAT_NO_POOL_ANY*/
+ "no port for any", /*CNAT_NO_PORT_ANY*/
+ "bad in use for any", /*CNAT_BAD_INUSE_ANY*/
+ "not found for any", /*CNAT_NOT_FOUND_ANY*/
+ "invalid index for direct", /*CNAT_INV_PORT_DIRECT*/
+ "deleted addr for direct", /*CNAT_DEL_PORT_DIRECT*/
+ "bad in use for direct",/*CNAT_BAD_INUSE_DIRECT*/
+ "not found for direct",/*CNAT_NOT_FOUND_DIRECT*/
+ "out of port limit", /*CNAT_OUT_LIMIT*/
+ "main db limit", /*CNAT_MAIN_DB_LIMIT*/
+ "user db limit", /*CNAT_USER_DB_LIMIT*/
+ "not static port", /*CNAT_NOT_STATIC_PORT*/
+ "bad static port request", /*CNAT_BAD_STATIC_PORT_REQ*/
+ "not this core", /*CNAT_NOT_THIS_CORE*/
+ "parser error", /*CNAT_ERR_PARSER*/
+ "invalid msg id", /*CNAT_ERR_INVALID_MSG_ID*/
+ "invalid msg size", /*CNAT_ERR_INVALID_MSG_SIZE*/
+ "invalid payload size", /*CNAT_ERR_INVALID_PAYLOAD_SIZE*/
+ "bad tcp udp port", /*CNAT_ERR_BAD_TCP_UDP_PORT*/
+ "bulk single failure", /*CNAT_ERR_BULK_SINGLE_FAILURE*/
+ "xlat id invalid", /*CNAT_ERR_XLAT_ID_INVALID*/
+ "xlat v6 prefix invalid", /*CNAT_ERR_XLAT_V6_PREFIX_INVALID*/
+ "xlat v4 prefix invalid", /*CNAT_ERR_XLAT_V4_PREFIX_INVALID*/
+ "xlat tcp mss invalid", /*CNAT_ERR_XLAT_TCP_MSS_INVALID*/
+ "6rd id invalid", /*CNAT_ERR_6RD_ID_INVALID*/
+ "6rd v4 tunnel src invalid", /*CNAT_ERR_6RD_V4_TUNNEL_SRC_INVALID*/
+ "6rd v6 prefix invalid", /*CNAT_ERR_6RD_V6_PREFIX_INVALID*/
+ "6rd v6 BR unicast invalid", /*CNAT_ERR_6RD_V6_BR_UNICAST_INVALID*/
+ "6rd v4 prefix masklen invalid", /*CNAT_ERR_6RD_V4_PREFIX_MASK_LEN_INVALID*/
+ "6rd v4 suffix masklen invalid", /*CNAT_ERR_6RD_V4_SUFFIX_MASK_LEN_INVALID*/
+ "6rd v4 combo masklen invalid", /*CNAT_ERR_6RD_V4_COMBO_MASK_LEN_INVALID*/
+ "6rd tunnel mtu invalid", /*CNAT_ERR_6RD_TUNNEL_MTU_INVALID*/
+ "6rd tunnel ttl invalid", /*CNAT_ERR_6RD_TUNNEL_TTL_INVALID*/
+ "6rd tunnel tos invalid", /*CNAT_ERR_6RD_TUNNEL_TOS_INVALID*/
+};
+#endif
+
+f64 port_log_timestamps[HASH_TABLE_SIZE]; /* 32 KB array per core */
+
+void port_exceeded_msg_log (u32 src_addr, u16 i_vrf)
+{
+ u32 hash_value;
+ f64 current_timestamp;
+ vlib_main_t *vlib_main;
+
+ vlib_main = vlib_get_main();
+ current_timestamp = vlib_time_now((vlib_main_t *) vlib_main);
+
+ hash_value = ((src_addr >> 16) ^ ((src_addr & 0xffff) ^ i_vrf)) % (1024*8);
+
+ if (PREDICT_FALSE((current_timestamp - port_log_timestamps[hash_value]) > THROTTLE_TIME)) {
+ u32 arg[2] = {i_vrf, src_addr};
+ /* update timestamp */
+ port_log_timestamps[hash_value] = current_timestamp;
+ spp_printf(CNAT_USER_OUT_OF_PORTS, 2, arg);
+ }
+
+ return ;
+}
+
+static void log_port_alloc_error(cnat_errno_t error, cnat_key_t *k)
+{
+ u32 error_code;
+ u32 arr[] = {k->k.vrf, k->k.ipv4, k->k.port};
+ switch (error)
+ {
+ case CNAT_NO_POOL_ANY:
+ error_code = CNAT_NO_POOL_FOR_ANY_ERROR;
+ break;
+ case CNAT_NO_PORT_ANY:
+ error_code = CNAT_NO_PORT_FOR_ANY_ERROR;
+ break;
+ case CNAT_ERR_PARSER:
+ error_code = CNAT_WRONG_PORT_ALLOC_TYPE;
+ break;
+ case CNAT_BAD_INUSE_ANY:
+ error_code = CNAT_BAD_INUSE_ANY_ERROR;
+ break;
+ case CNAT_BAD_INUSE_DIRECT:
+ error_code = CNAT_BAD_INUSE_DIRECT_ERROR;
+ break;
+ case CNAT_NOT_FOUND_ANY:
+ error_code = CNAT_NOT_FOUND_ANY_ERROR;
+ break;
+ case CNAT_NOT_FOUND_DIRECT:
+ error_code = CNAT_NOT_FOUND_DIRECT_ERROR;
+ break;
+ case CNAT_INV_PORT_DIRECT:
+ error_code = CNAT_INV_PORT_FOR_DIRECT_ERROR;
+ break;
+ default:
+ error_code = CNAT_NEW_PORT_ALLOC_ERROR; /* If this code is seen in the log,
+ it means, new error codes are to be added here */
+ break;
+ }
+ spp_printf(error_code, 3, arr);
+}
+
+void cnat_db_debug_error(cnat_db_key_bucket_t *u_ki,
+ cnat_errno_t error)
+{
+ if (PREDICT_FALSE((u_ki->k.k.vrf == debug_i_vrf) &&
+ ((u_ki->k.k.ipv4 >= debug_i_addr_start) &&
+ (u_ki->k.k.ipv4 <= debug_i_addr_end)))) {
+#ifdef DEBUG_PRINTF_ENABLED
+ PLATFORM_DEBUG_PRINT("failed to allocate port due to %s "
+ "for i-vrf 0x%x addr 0x%x port 0x%x\n",
+ debug_db_error[error], u_ki->k.k.vrf,
+ u_ki->k.k.ipv4, u_ki->k.k.port);
+#endif
+ {
+ u32 arg[] = {u_ki->k.k.vrf, u_ki->k.k.ipv4, u_ki->k.k.port};
+ spp_printf(error, 3, arg);
+ }
+ }
+}
+
+void dslite_db_debug_error(dslite_db_key_bucket_t *u_ki,
+ cnat_errno_t error)
+{
+ if (PREDICT_FALSE((u_ki->dk.ipv4_key.k.vrf == debug_i_vrf) &&
+ ((u_ki->dk.ipv4_key.k.ipv4 >= debug_i_addr_start) &&
+ (u_ki->dk.ipv4_key.k.ipv4 <= debug_i_addr_end)))) {
+#ifdef DEBUG_PRINTF_ENABLED
+ PLATFORM_DEBUG_PRINT("failed to allocate port due to %s "
+ "for i-vrf 0x%x addr 0x%x port 0x%x\n",
+ debug_db_error[error], u_ki->dk.ipv4_key.k.vrf,
+ u_ki->dk.ipv4_key.k.ipv4, u_ki->dk.ipv4_key.k.port);
+#endif
+ {
+ u32 arg[] = {u_ki->dk.ipv4_key.k.vrf, u_ki->dk.ipv4_key.k.ipv4, u_ki->dk.ipv4_key.k.port};
+ spp_printf(error, 3, arg);
+ }
+ }
+}
+
+void cnat_db_debug_i2o_drop(cnat_db_key_bucket_t *ki)
+{
+ if (PREDICT_FALSE(((ki->k.k.vrf & CNAT_VRF_MASK) == debug_i_vrf) &&
+ ((ki->k.k.ipv4 >= debug_i_addr_start) &&
+ (ki->k.k.ipv4 <= debug_i_addr_end)))) {
+#ifdef DEBUG_PRINTF_ENABLED
+ PLATFORM_DEBUG_PRINT("pakcet[i-vrf 0x%x addr 0x%x port 0x%x] dropped\n",
+ ki->k.k.vrf, ki->k.k.ipv4, ki->k.k.port);
+#endif
+ {
+ u32 arg[] = {ki->k.k.vrf, ki->k.k.ipv4, ki->k.k.port};
+ spp_printf(CNAT_PACKET_DROP_ERROR, 3, arg);
+ }
+ }
+}
+
+void cnat_db_in2out_hash_delete (cnat_main_db_entry_t *ep, cnat_user_db_entry_t *up)
+{
+ u64 a, b, c;
+ u32 index, bucket;
+ cnat_main_db_entry_t *this, *prev;
+
+#ifdef DSLITE_DEF
+ if (PREDICT_FALSE(ep->flags & CNAT_DB_DSLITE_FLAG)) {
+ dslite_key_t dk = {
+ {up->ipv6[0], up->ipv6[1], up->ipv6[2], up->ipv6[3]} ,
+ {ep->in2out_key.k.ipv4, ep->in2out_key.k.port, ep->in2out_key.k.vrf}
+ };
+ DSLITE_V6_GET_HASH((&dk),
+ bucket,
+ CNAT_MAIN_HASH_MASK);
+ DSLITE_PRINTF(1, "Delete1 DSL main hash bucket ..%u\n", bucket);
+ } else {
+ CNAT_V4_GET_HASH(ep->in2out_key.key64,
+ bucket, CNAT_MAIN_HASH_MASK)
+ DSLITE_PRINTF(1, "Delete1 NAT44 main hash bucket ..%u\n", bucket);
+ }
+#else
+ CNAT_V4_GET_HASH(ep->in2out_key.key64,
+ bucket, CNAT_MAIN_HASH_MASK)
+#endif
+
+ index = cnat_in2out_hash[bucket].next;
+
+ ASSERT(index != EMPTY);
+
+ prev = 0;
+ do {
+ this = cnat_main_db + index;
+ if (PREDICT_TRUE(this == ep)) {
+ if (prev == 0) {
+ cnat_in2out_hash[bucket].next = ep->in2out_hash.next;
+ return;
+ } else {
+ prev->in2out_hash.next = ep->in2out_hash.next;
+ return;
+ }
+ }
+ prev = this;
+ index = this->in2out_hash.next;
+ } while (index != EMPTY);
+
+ ASSERT(0);
+}
+
+void cnat_db_out2in_hash_delete (cnat_main_db_entry_t *ep)
+{
+ u64 a, b, c;
+ u32 index, bucket;
+ cnat_main_db_entry_t *this, *prev;
+
+ CNAT_V4_GET_HASH(ep->out2in_key.key64,
+ bucket, CNAT_MAIN_HASH_MASK)
+
+ index = cnat_out2in_hash[bucket].next;
+
+ ASSERT(index != EMPTY);
+
+ prev = 0;
+ do {
+ this = cnat_main_db + index;
+ if (PREDICT_TRUE(this == ep)) {
+ if (prev == 0) {
+ cnat_out2in_hash[bucket].next = ep->out2in_hash.next;
+ return;
+ } else {
+ prev->out2in_hash.next = ep->out2in_hash.next;
+ return;
+ }
+ }
+ prev = this;
+ index = this->out2in_hash.next;
+ } while (index != EMPTY);
+
+ ASSERT(0);
+}
+
+cnat_main_db_entry_t*
+cnat_main_db_lookup_entry(cnat_db_key_bucket_t *ki)
+{
+ u64 a, b, c;
+ u32 index;
+ cnat_main_db_entry_t *db;
+
+ CNAT_V4_GET_HASH(ki->k.key64,
+ ki->bucket,
+ CNAT_MAIN_HASH_MASK);
+
+ index = cnat_in2out_hash[ki->bucket].next;
+ if (PREDICT_TRUE(index == EMPTY)) {
+ return (NULL);
+ }
+
+ do {
+ db = cnat_main_db + index;
+ if (PREDICT_TRUE(db->in2out_key.key64 == ki->k.key64)) {
+ return db;
+ }
+ index = db->in2out_hash.next;
+ } while (index != EMPTY);
+
+ return (NULL);
+}
+
+void cnat_user_db_delete (cnat_user_db_entry_t *up)
+{
+ u64 a, b, c;
+ u32 index, bucket;
+ cnat_user_db_entry_t *this, *prev;
+
+ if (PREDICT_FALSE(up->flags & CNAT_USER_DB_NAT64_FLAG) != 0) {
+ /* Preventive check - Not a NAT44 entry */
+ return;
+ }
+
+ pthread_spin_lock(cnat_db_v2_main.user_db_lockp);
+#if 1
+ if(PREDICT_FALSE(up->flags & CNAT_USER_DB_DSLITE_FLAG)) {
+ dslite_key_t dk = {
+ {up->ipv6[0], up->ipv6[1], up->ipv6[2], up->ipv6[3]} ,
+ {{up->key.k.ipv4, up->key.k.port, up->key.k.vrf}}
+ };
+
+ DSLITE_V6_GET_HASH((&dk),
+ bucket,
+ CNAT_USER_HASH_MASK);
+ DSLITE_PRINTF(1, "Delete1 DSL user hash bucket ..%u\n", bucket);
+ } else {
+ CNAT_V4_GET_HASH(up->key.key64,
+ bucket, CNAT_USER_HASH_MASK)
+ DSLITE_PRINTF(1, "Delete1 NAT44 user hash bucket ..%u\n", bucket);
+ }
+#else
+ CNAT_V4_GET_HASH(up->key.key64,
+ bucket, CNAT_USER_HASH_MASK)
+ DSLITE_PRINTF(1, "Delete2 NAT44 user hash bucket ..%u\n", bucket);
+#endif
+
+ index = cnat_user_hash[bucket].next;
+
+ ASSERT(index != EMPTY);
+
+ prev = 0;
+ do {
+ this = cnat_user_db + index;
+ if (PREDICT_TRUE(this == up)) {
+ if (prev == 0) {
+ cnat_user_hash[bucket].next = up->user_hash.next;
+ goto found;
+ } else {
+ prev->user_hash.next = up->user_hash.next;
+ goto found;
+ }
+ }
+ prev = this;
+ index = this->user_hash.next;
+ } while (index != EMPTY);
+
+ ASSERT(0);
+
+ found:
+ pool_put(cnat_user_db, up);
+ pthread_spin_unlock(cnat_db_v2_main.user_db_lockp);
+}
+
+cnat_user_db_entry_t*
+cnat_user_db_lookup_entry(cnat_db_key_bucket_t *uki)
+{
+ u64 a, b, c;
+ u32 index;
+ cnat_user_db_entry_t *udb=NULL;
+
+ CNAT_V4_GET_HASH(uki->k.key64,
+ uki->bucket,
+ CNAT_USER_HASH_MASK)
+
+ /* now: index in user vector */
+ index = cnat_user_hash[uki->bucket].next;
+ if (PREDICT_TRUE(index != EMPTY)) {
+ do {
+ udb = cnat_user_db + index;
+ if (PREDICT_FALSE(udb->key.key64 == uki->k.key64)) {
+ return udb;
+ }
+ index = udb->user_hash.next;
+ } while (index != EMPTY);
+ }
+ return (NULL);
+}
+
+cnat_user_db_entry_t*
+cnat_user_db_create_entry(cnat_db_key_bucket_t *uki,
+ u32 portmap_index)
+{
+ cnat_user_db_entry_t *udb = NULL;
+
+ pthread_spin_lock(cnat_db_v2_main.user_db_lockp);
+ pool_get(cnat_user_db, udb);
+ memset(udb, 0, sizeof(*udb));
+
+ udb->ntranslations = 1;
+ udb->portmap_index = portmap_index;
+ udb->key.key64 = uki->k.key64;
+ /* Add this user to the head of the bucket chain */
+ udb->user_hash.next =
+ cnat_user_hash[uki->bucket].next;
+ cnat_user_hash[uki->bucket].next = udb - cnat_user_db;
+
+#ifndef NO_BULK_LOGGING
+ INIT_BULK_CACHE(udb)
+#endif /* NO_BULK_LOGGING */
+ pthread_spin_unlock(cnat_db_v2_main.user_db_lockp);
+ return udb;
+}
+
+cnat_main_db_entry_t*
+cnat_create_main_db_entry_and_hash(cnat_db_key_bucket_t *ki,
+ cnat_db_key_bucket_t *ko,
+ cnat_user_db_entry_t *udb)
+{
+ u64 a, b, c;
+ u32 db_index;
+ cnat_main_db_entry_t *db = NULL;
+
+ pool_get(cnat_main_db, db);
+ memset(db, 0, sizeof(*db));
+
+ db_index = db - cnat_main_db;
+ db->in2out_key.k.ipv4 = ki->k.k.ipv4;
+ db->in2out_key.k.port = ki->k.k.port;
+ db->in2out_key.k.vrf = ki->k.k.vrf;
+ db->out2in_key.k.ipv4 = ko->k.k.ipv4;
+ db->out2in_key.k.port = ko->k.k.port;
+ db->out2in_key.k.vrf = ko->k.k.vrf;
+
+ db->user_ports.next = db_index;
+ db->user_ports.prev = db_index;
+ db->user_index = udb - cnat_user_db;
+ //db->portmap_index = udb->portmap_index;
+ db->flags &= ~(CNAT_DB_DSLITE_FLAG); // Mark that it is not dslite
+ if (PREDICT_FALSE(udb->ntranslations == 1)) {
+ /*
+ * first port for this src vrf/src ip addr
+ */
+ udb->translation_list_head_index = db_index;
+ } else {
+ index_dlist_addtail(udb->translation_list_head_index,
+ (u8 *)cnat_main_db, sizeof(cnat_main_db[0]),
+ STRUCT_OFFSET_OF(cnat_main_db_entry_t, user_ports),
+ db_index);
+ }
+
+ /*
+ * setup o2i hash key
+ */
+ CNAT_V4_GET_HASH(ko->k.key64,
+ ko->bucket,
+ CNAT_MAIN_HASH_MASK)
+ db->out2in_hash.next = cnat_out2in_hash[ko->bucket].next;
+ cnat_out2in_hash[ko->bucket].next = db_index;
+ /*
+ * setup i2o hash key, bucket is already calculate
+ */
+ db->in2out_hash.next = cnat_in2out_hash[ki->bucket].next;
+ cnat_in2out_hash[ki->bucket].next = db_index;
+
+#if DEBUG > 1
+ printf("\nMy_Instance_Number %d: Bucket %d, Db_Index %d",
+ my_instance_number, ki->bucket, db_index);
+ printf("\nInside (VRF 0x%x, IP 0x%x, PORT 0x%x)",
+ db->in2out_key.k.vrf, db->in2out_key.k.ipv4, db->in2out_key.k.port);
+ printf("\nOutside (VRF 0x%x, IP 0x%x, PORT 0x%x)",
+ db->out2in_key.k.vrf, db->out2in_key.k.ipv4, db->out2in_key.k.port);
+ printf("\nUser Index %d, IP 0x%x",
+ db->user_index, udb->key.k.ipv4);
+#endif
+
+ NAT44_COMMON_STATS.active_translations++;
+
+ return db;
+}
+
+static inline void pptp_clear_all_channels(
+ cnat_main_db_entry_t *db)
+{
+ u32 db_index, current_db_index;
+ cnat_main_db_entry_t *temp_db;
+
+ /* clear all channels */
+
+ db_index = db->proto_data.pptp_list.next;
+ current_db_index = db - cnat_main_db;
+
+ while( db_index != EMPTY) {
+ temp_db = cnat_main_db + db_index;
+ db_index = temp_db->proto_data.pptp_list.next;
+ temp_db->entry_expires = 0;
+ if(PREDICT_FALSE(temp_db->proto_data.pptp_list.prev
+ == current_db_index)) { // Decouple child GREs from parent
+ temp_db->proto_data.pptp_list.prev = EMPTY;
+ }
+ }
+
+ db->proto_data.pptp_list.next = EMPTY;
+}
+
+void pptp_remove_channel_from_tunnel(cnat_main_db_entry_t *db) {
+
+ cnat_main_db_entry_t *prev_db, *next_db;
+
+ prev_db = cnat_main_db + db->proto_data.pptp_list.prev;
+ next_db = cnat_main_db + db->proto_data.pptp_list.next;
+
+ /* remove entry from the tunnel list */
+ if(PREDICT_TRUE(db->proto_data.pptp_list.prev != EMPTY)) {
+ prev_db->proto_data.pptp_list.next =
+ db->proto_data.pptp_list.next ;
+ }
+
+ if(db->proto_data.pptp_list.next != EMPTY) {
+ next_db->proto_data.pptp_list.prev
+ = db->proto_data.pptp_list.prev;
+ }
+
+}
+
+void cnat_delete_main_db_entry_v2 (cnat_main_db_entry_t *ep)
+{
+ u32 main_db_index;
+ u32 vrfmap_len, udb_len;
+ cnat_user_db_entry_t *up =0;
+ cnat_portmap_v2_t *pm =0;
+ cnat_portmap_v2_t *my_pm =0;
+ cnat_vrfmap_t *my_vrfmap =0;
+ u16 static_port_range;
+#ifndef NO_BULK_LOGGING
+ bulk_alloc_size_t bulk_size;
+ int nfv9_log_req = BULK_ALLOC_NOT_ATTEMPTED;
+#endif
+ pool_header_t *h = pool_header(cnat_user_db);
+ u16 instance = 0;
+ u32 my_index;
+
+
+ if (PREDICT_FALSE(ep->flags & CNAT_DB_NAT64_FLAG) != 0) {
+ /* Preventive check - Not a NAT44 entry */
+ return;
+ }
+
+ pthread_spin_lock(cnat_db_v2_main.main_db_lockp);
+ if(PREDICT_FALSE(ep->flags &
+ CNAT_DB_FLAG_PPTP_TUNNEL_ACTIVE)) {
+ pptp_clear_all_channels(ep);
+ PPTP_DECR(active_tunnels);
+ }
+
+ if(PREDICT_FALSE(ep->flags &
+ CNAT_DB_FLAG_PPTP_GRE_ENTRY)) {
+ pptp_remove_channel_from_tunnel(ep);
+ PPTP_DECR(active_channels);
+ }
+
+ /* This function gets called from various locations..
+ * many times from config handler.. so we
+ * to ensure that multiple sessions if any are
+ * released
+ */
+
+ if(PREDICT_FALSE(ep->nsessions > 1)) {
+ cnat_session_entry_t *sdb;
+ while(ep->nsessions > 1 &&
+ ep->session_head_index != EMPTY) {
+ sdb = cnat_session_db + ep->session_head_index;
+ cnat_delete_session_db_entry(sdb, TRUE);
+ }
+ }
+
+ /* Find the set of portmaps for the outside vrf */
+ vrfmap_len = vec_len(cnat_map_by_vrf);
+ udb_len = vec_len(cnat_user_db);
+
+ /* In case of invalid user just return, deleting only main db
+ * is not a good idea, since some valid user db entry might be pointing
+ * to that main db and hence leave the dbs in a inconsistent state
+ */
+ if (PREDICT_FALSE((ep->user_index >= udb_len) ||
+ (clib_bitmap_get(h->free_bitmap, ep->user_index)))) {
+#ifdef DEBUG_PRINTF_ENABLED
+ printf("invalid/unused user index in db %d\n", ep->user_index);
+#endif
+ spp_printf(CNAT_INV_UNUSED_USR_INDEX, 1, (u32 *) &(ep->user_index));
+ cnat_main_db_entry_dump(ep);
+ goto unlock;
+ }
+
+ up = cnat_user_db + ep->user_index;
+
+/* Point to the right portmap list */
+if (PREDICT_FALSE(ep->flags & CNAT_DB_DSLITE_FLAG)) {
+ instance = ep->dslite_nat44_inst_id;
+ pm = dslite_table_db_ptr[instance].portmap_list;
+ if(PREDICT_FALSE((pm == NULL))) {
+ DSLITE_PRINTF(3, "NULL portmap list for dslite_id %u, state %u\n",
+ instance, dslite_table_db_ptr[instance].state);
+ cnat_main_db_entry_dump(ep);
+ goto delete_entry;
+ }
+ static_port_range =
+ STAT_PORT_RANGE_FROM_INST_PTR(&(dslite_table_db_ptr[instance]));
+ /*
+ * Netflow logging API for delete event
+ */
+ bulk_size =
+ BULKSIZE_FROM_VRFMAP(&(dslite_table_db_ptr[instance]));
+} else {
+ if (PREDICT_FALSE(ep->vrfmap_index >= vrfmap_len)) {
+#ifdef DEBUG_PRINTF_ENABLED
+ printf("invalid vrfmap index in db\n");
+#endif
+ spp_printf(CNAT_INVALID_VRFMAP_INDEX, 0, NULL);
+ cnat_main_db_entry_dump(ep);
+ goto delete_entry;
+ }
+ instance = NAT44_RESERVED_INST_ID;
+ my_vrfmap = cnat_map_by_vrf + ep->vrfmap_index;
+ pm = my_vrfmap->portmap_list;
+ static_port_range = cnat_static_port_range;
+ bulk_size = BULKSIZE_FROM_VRFMAP(my_vrfmap);
+}
+
+ if (PREDICT_FALSE(ep->flags & CNAT_DB_FLAG_PORT_PAIR)) {
+ /* Give back the port(s) */
+ cnat_port_free_v2_bulk(pm, up->portmap_index,
+ PORT_PAIR, ep->out2in_key.k.port, up, static_port_range
+#ifndef NO_BULK_LOGGING
+ , bulk_size, &nfv9_log_req
+#endif
+ );
+ } else {
+ /* Give back the port(s) */
+ cnat_port_free_v2_bulk (pm, up->portmap_index,
+ PORT_SINGLE, ep->out2in_key.k.port, up, static_port_range
+#ifndef NO_BULK_LOGGING
+ , bulk_size, &nfv9_log_req
+#endif
+ );
+ }
+
+ if (PREDICT_TRUE(!(ep->flags & CNAT_DB_DSLITE_FLAG))) {
+ if(PREDICT_FALSE(nfv9_log_req != CACHE_ALLOC_NO_LOG_REQUIRED)) {
+ if(PREDICT_FALSE(my_vrfmap->nf_logging_policy == SESSION_LOG_ENABLE)) {
+ if(ep->nsessions != 0) {
+ cnat_nfv9_nat44_log_session_delete(ep, NULL, my_vrfmap);
+ }
+ } else {
+ cnat_nfv9_log_mapping_delete(ep, my_vrfmap
+#ifndef NO_BULK_LOGGING
+ , nfv9_log_req
+#endif
+ );
+ }
+ if(PREDICT_TRUE((my_vrfmap->syslog_logging_policy != SESSION_LOG_ENABLE) ||
+ (ep->nsessions != 0))) {
+ cnat_syslog_nat44_mapping_delete(ep, my_vrfmap, NULL
+#ifndef NO_BULK_LOGGING
+ , nfv9_log_req
+#endif
+ );
+ }
+ }
+ } else {
+ if(PREDICT_FALSE(nfv9_log_req != CACHE_ALLOC_NO_LOG_REQUIRED)) {
+ if(PREDICT_FALSE( dslite_table_db_ptr[instance].nf_logging_policy ==
+ SESSION_LOG_ENABLE)) {
+ cnat_nfv9_ds_lite_log_session_delete(ep,
+ (dslite_table_db_ptr + instance),NULL);
+ } else {
+ cnat_nfv9_ds_lite_mapping_delete(ep,
+ (dslite_table_db_ptr + instance)
+#ifndef NO_BULK_LOGGING
+ , nfv9_log_req
+#endif
+ );
+ }
+#ifdef TOBE_PORTED
+ cnat_syslog_ds_lite_mapping_delete(ep,
+ (dslite_table_db_ptr + instance), NULL
+#ifndef NO_BULK_LOGGING
+ , nfv9_log_req
+#endif
+ );
+#endif /* TOBE_PORTED */
+ }
+ }
+
+delete_entry:
+
+ main_db_index = ep - cnat_main_db;
+
+ pthread_spin_lock(cnat_db_v2_main.user_db_lockp);
+ up->ntranslations--;
+ pthread_spin_unlock(cnat_db_v2_main.user_db_lockp);
+
+ /*
+ * when user reaches max allowed port limit
+ * we generate icmp msg and inc the counter
+ * when counter reach the icmp msg rate limit
+ * we stop icmp msg gen
+ * when a user port is freed
+ * that means we need to clear the msg gen counter
+ * so that next time
+ * reach max port limit, we can generate new icmp msg again
+ */
+ up->icmp_msg_count = 0;
+
+ up->translation_list_head_index = index_dlist_remelem (
+ up->translation_list_head_index, (u8 *)cnat_main_db,
+ sizeof (cnat_main_db[0]),
+ STRUCT_OFFSET_OF(cnat_main_db_entry_t, user_ports),
+ main_db_index);
+
+ cnat_db_in2out_hash_delete(ep, up);
+
+ if (PREDICT_FALSE(up->ntranslations == 0)) {
+ ASSERT(up->translation_list_head_index == EMPTY);
+ nat44_dslite_common_stats[instance].num_subscribers--;
+ my_index = up->portmap_index;
+ my_pm = pm + my_index;
+ if(PREDICT_TRUE(my_pm->private_ip_users_count)) {
+ my_pm->private_ip_users_count--;
+#ifdef DEBUG_PRINTF_IP_N_TO_1_ENABLED
+ PLATFORM_DEBUG_PRINT("\n cnat_delete_main_db_entry_v2 "
+ "private_ip_users_count = %d",
+ my_pm->private_ip_users_count);
+#endif
+
+ }
+ cnat_user_db_delete(up);
+
+ }
+
+ /* Remove from main DB hashes */
+ //cnat_db_in2out_hash_delete(ep);
+ cnat_db_out2in_hash_delete(ep);
+
+ pool_put(cnat_main_db, ep);
+
+ if(PREDICT_FALSE(ep->flags & CNAT_DB_FLAG_STATIC_PORT)) {
+ nat44_dslite_common_stats[instance].num_static_translations--;
+ } else {
+ nat44_dslite_common_stats[instance].num_dynamic_translations--;
+ }
+ nat44_dslite_common_stats[instance].active_translations--;
+ nat44_dslite_global_stats[!!(instance - 1)].translation_delete_count ++;
+unlock:
+ pthread_spin_unlock(cnat_db_v2_main.main_db_lockp);
+}
+
+cnat_main_db_entry_t*
+cnat_main_db_lookup_entry_out2in (cnat_db_key_bucket_t *ko)
+{
+ u64 a, b, c;
+ u32 index;
+ cnat_main_db_entry_t *db;
+
+ CNAT_V4_GET_HASH(ko->k.key64,
+ ko->bucket,
+ CNAT_MAIN_HASH_MASK);
+
+ index = cnat_out2in_hash[ko->bucket].next;
+ if (PREDICT_TRUE(index == EMPTY)) {
+ return (NULL);
+ }
+
+ do {
+ db = cnat_main_db + index;
+ if (PREDICT_TRUE(db->out2in_key.key64 == ko->k.key64)) {
+ return db;
+ }
+ index = db->out2in_hash.next;
+ } while (index != EMPTY);
+
+ return (NULL);
+}
+
+/* Creates 2 sessions.
+ * Moves the default dest info from mdb to first session
+ * Fills the dest_info details in to second session and
+ * returns the pointer to second session
+ */
+cnat_session_entry_t *cnat_handle_1to2_session(
+ cnat_main_db_entry_t *mdb,
+ cnat_key_t *dest_info)
+{
+ cnat_key_t old_dest_info;
+ pool_header_t *h;
+ u32 free_session = 0;
+ u16 instance;
+ cnat_session_entry_t *session_db1 = NULL, *session_db2 = NULL;
+
+ h = pool_header(cnat_session_db);
+ free_session = vec_len(h->free_indices) - 1;
+
+ if (PREDICT_FALSE(free_session < 2)) {
+ if (mdb->flags & CNAT_DB_DSLITE_FLAG) {
+ instance = mdb->dslite_nat44_inst_id;
+ } else {
+ instance = NAT44_RESERVED_INST_ID;
+ }
+
+ /* we need 2 sessions here, return NULL */
+ nat44_dslite_common_stats[instance].drops_sessiondb_limit_exceeded++;
+ return NULL;
+ }
+
+ old_dest_info.k.ipv4 = mdb->dst_ipv4;
+ old_dest_info.k.port = mdb->dst_port;
+ old_dest_info.k.vrf = mdb->in2out_key.k.vrf;
+
+ /* create 2 new sessions */
+ session_db1 = cnat_create_session_db_entry(&old_dest_info,
+ mdb, FALSE);
+
+ if(PREDICT_FALSE(session_db1 == NULL)) {
+ return NULL;
+ }
+
+ /* update pkt info to session 2 */
+ session_db2 = cnat_create_session_db_entry(dest_info,
+ mdb, TRUE);
+
+ if(PREDICT_FALSE(session_db2 == NULL)) {
+ cnat_delete_session_db_entry(session_db1, FALSE);
+ return NULL;
+ }
+ /* update main db info to session 1 */
+ cnat_dest_update_main2session(mdb, session_db1);
+
+ return session_db2;
+}
+
+/* The below function shold be called only
+ * when a NAT44 STATIC entry received traffic
+ * for the first time. This is to ensure
+ * the destination is noted and logged
+ */
+void cnat_add_dest_n_log(
+ cnat_main_db_entry_t *mdb,
+ cnat_key_t *dest_info)
+{
+
+ if(PREDICT_FALSE(mdb->nsessions != 0)) {
+ return; /* Should not have been called */
+ }
+
+ mdb->dst_ipv4 = dest_info->k.ipv4;
+ mdb->dst_port = dest_info->k.port;
+ mdb->nsessions = 1;
+ mdb->entry_expires = cnat_current_time;
+ u16 instance;
+
+ if (mdb->flags & CNAT_DB_DSLITE_FLAG) {
+ instance = mdb->dslite_nat44_inst_id;
+ cnat_session_log_ds_lite_mapping_create(mdb,
+ (dslite_table_db_ptr + instance),NULL);
+ } else {
+ instance = NAT44_RESERVED_INST_ID;
+ cnat_vrfmap_t *my_vrfmap = cnat_map_by_vrf + mdb->vrfmap_index;
+ cnat_session_log_nat44_mapping_create(mdb, 0, my_vrfmap);
+ }
+}
+
+/*
+ * this function is called by exception node
+ * when lookup is fialed in i2o node
+ *
+ * if reash per user port limit,
+ * set user_db_entry pointer, and error == CNAT_OUT_LIMIT
+ */
+static cnat_main_db_entry_t*
+_cnat_get_main_db_entry_v2(cnat_db_key_bucket_t *ki,
+ port_pair_t port_pair_type,
+ port_type_t port_type,
+ cnat_gen_icmp_info *info,
+ cnat_key_t *dest_info)
+{
+ u16 protocol;
+ cnat_errno_t rv;
+ cnat_db_key_bucket_t u_ki, ko;
+ u32 my_index, free_main, free_user;
+ u32 current_timestamp;
+ u16 my_vrfmap_index;
+ u16 my_vrfmap_entry_found = 0;
+ cnat_vrfmap_t *my_vrfmap =0;
+ cnat_portmap_v2_t *pm =0;
+ cnat_user_db_entry_t *udb = 0;
+ cnat_main_db_entry_t *db = 0;
+ pool_header_t *h;
+ u16 port_limit;
+ cnat_portmap_v2_t *my_pm = 0;
+
+#ifndef NO_BULK_LOGGING
+ int nfv9_log_req = BULK_ALLOC_NOT_ATTEMPTED;
+#endif
+
+
+ /*
+ * need to try lookup again because
+ * second pkt may come here before the entry is created
+ * by receiving first pkt due to high line rate.
+ */
+ info->gen_icmp_msg = CNAT_NO_ICMP_MSG;
+ info->error = CNAT_SUCCESS;
+ db = cnat_main_db_lookup_entry(ki);
+ if (PREDICT_TRUE(db)) {
+ /* what if the source is talking to a
+ * new dest now? We will have to handle this case and
+ * take care of - creating session db and logging
+ */
+ if(PREDICT_FALSE((!dest_info->k.ipv4) && (!dest_info->k.port))) {
+ return db; /* if dest_info is null don't create session */
+ }
+ if(PREDICT_TRUE((db->dst_ipv4 == dest_info->k.ipv4) &&
+ (db->dst_port == dest_info->k.port))) {
+ return db;
+ }
+ dest_info->k.vrf = db->in2out_key.k.vrf;
+ /* Src is indeed talking to a different dest */
+ cnat_session_entry_t *session_db2 = NULL;
+ if(PREDICT_TRUE(db->nsessions == 1)) {
+ session_db2 = cnat_handle_1to2_session(db, dest_info);
+ if(PREDICT_TRUE(session_db2 != NULL)) {
+ CNAT_DB_TIMEOUT_RST(session_db2);
+ return db;
+ } else {
+ info->error = CNAT_ERR_NO_SESSION_DB;
+ return NULL;
+ }
+ } else if(PREDICT_FALSE(db->nsessions == 0)) {
+ /* Should be static entry.. should never happen
+ */
+ if(PREDICT_TRUE(dest_info->k.ipv4 != 0)) {
+ cnat_add_dest_n_log(db, dest_info);
+ }
+ return db;
+ } else {
+ /* The src has already created multiple sessions.. very rare
+ */
+ session_db2 = cnat_create_session_db_entry(dest_info,
+ db, TRUE);
+ if(PREDICT_TRUE(session_db2 != NULL)) {
+ CNAT_DB_TIMEOUT_RST(session_db2);
+ return db;
+ } else {
+ info->error = CNAT_ERR_NO_SESSION_DB;
+ return NULL;
+ }
+ }
+
+ }
+
+ /*
+ * step 1. check if outside vrf is configured or not
+ * and Find the set of portmaps for the outside vrf
+ * insider vrf is one to one mappted to outside vrf
+ * key is vrf and ip only
+ * ki.k.k.vrf has protocol bits, mask out
+ */
+ protocol = ki->k.k.vrf & CNAT_PRO_MASK;
+ u_ki.k.k.vrf = ki->k.k.vrf & CNAT_VRF_MASK;
+ u_ki.k.k.ipv4 = ki->k.k.ipv4;
+ u_ki.k.k.port = 0;
+
+ my_vrfmap_index = vrf_map_array[u_ki.k.k.vrf];
+ my_vrfmap = cnat_map_by_vrf + my_vrfmap_index;
+
+ my_vrfmap_entry_found = ((my_vrfmap_index != VRF_MAP_ENTRY_EMPTY) &&
+ (my_vrfmap->status == S_RUN) &&
+ (my_vrfmap->i_vrf == u_ki.k.k.vrf));
+
+ if (PREDICT_FALSE(!my_vrfmap_entry_found)) {
+ u32 arr[] = {ki->k.k.vrf, ki->k.k.ipv4, ki->k.k.port};
+ if ((my_vrfmap_index == VRF_MAP_ENTRY_EMPTY) ||
+ (my_vrfmap->i_vrf == u_ki.k.k.vrf)) {
+ info->error = CNAT_NO_CONFIG;
+ CNAT_DEBUG_INSIDE_ERR(CNAT_NO_CONFIG)
+ spp_printf(CNAT_NO_CONFIG_ERROR, 3, arr);
+ } else {
+ info->error = CNAT_NO_VRF_RUN;
+ CNAT_DEBUG_INSIDE_ERR(CNAT_NO_VRF_RUN)
+ spp_printf(CNAT_NO_VRF_RUN_ERROR, 3, arr);
+ }
+
+ return (NULL);
+ }
+
+ pm = my_vrfmap->portmap_list;
+
+ port_limit = my_vrfmap->port_limit;
+ if(PREDICT_FALSE(!port_limit)) {
+ port_limit = cnat_main_db_max_ports_per_user;
+ }
+ /*
+ * set o2i key with protocl bits
+ */
+ ko.k.k.vrf = my_vrfmap->o_vrf | protocol;
+
+ /*
+ * step 2. check if src vrf, src ip addr is alreay
+ * in the user db
+ * if yes, use PORT_ALLOC_DIRECTED
+ * if no, use PORT_ALLOC_ANY since it is first time
+ */
+ udb = cnat_user_db_lookup_entry(&u_ki);
+ if (PREDICT_TRUE(udb)) {
+ /*
+ * not first time allocate port for this user
+ * check limit
+ */
+ if (PREDICT_FALSE(udb->ntranslations >=
+ port_limit)) {
+ /* Check for the port type here. If we are getting
+ * a STATIC PORT, allow the config.
+ */
+ if (PREDICT_TRUE(port_type != PORT_TYPE_STATIC)) {
+ info->error = CNAT_OUT_LIMIT;
+ CNAT_SET_ICMP_MSG_INFO
+ CNAT_DEBUG_INSIDE_ERR(CNAT_OUT_LIMIT)
+ port_exceeded_msg_log(u_ki.k.k.ipv4, u_ki.k.k.vrf);
+ in2out_drops_port_limit_exceeded ++;
+ u_ki.k.k.port = ki->k.k.port;
+ u_ki.k.k.vrf = ki->k.k.vrf;
+ handle_cnat_port_exceeded_logging(udb, &u_ki.k, my_vrfmap);
+ return (NULL);
+ }
+ }
+ CHECK_CLEAR_PORT_LIMIT_EXCEED_FLAG(udb,
+ port_limit)
+
+ /*
+ * check if main db has space to accomodate new entry
+ */
+ h = pool_header(cnat_main_db);
+
+ free_main = vec_len(h->free_indices) - 1;
+ if (PREDICT_FALSE(!free_main)) {
+ info->error = CNAT_MAIN_DB_LIMIT;
+ CNAT_SET_ICMP_MSG_INFO
+ in2out_drops_system_limit_reached ++;
+ CNAT_DEBUG_INSIDE_ERR(CNAT_MAIN_DB_LIMIT)
+
+ current_timestamp = spp_trace_log_get_unix_time_in_seconds();
+ if (PREDICT_FALSE((current_timestamp - last_log_timestamp) >
+ 1800)) {
+ spp_printf(CNAT_SESSION_THRESH_EXCEEDED, 0, NULL);
+ last_log_timestamp = current_timestamp;
+ }
+
+#ifdef UT_TEST_CODE
+ printf("Limit reached : OLD USER");
+#endif
+ return NULL;
+ }
+
+ /*
+ * allocate port, from existing mapping
+ */
+ my_index = udb->portmap_index;
+
+ if (PREDICT_FALSE(port_type == PORT_TYPE_STATIC)) {
+ rv = cnat_static_port_alloc_v2_bulk(pm,
+ PORT_ALLOC_DIRECTED,
+ port_pair_type,
+ ki->k.k.ipv4,
+ ki->k.k.port,
+ &my_index,
+ &(ko.k.k.ipv4),
+ &(ko.k.k.port),
+ cnat_static_port_range
+#ifndef NO_BULK_LOGGING
+ ,
+ udb, BULKSIZE_FROM_VRFMAP(my_vrfmap),
+ &nfv9_log_req
+#endif
+ , my_vrfmap->ip_n_to_1
+ );
+
+ } else if (PREDICT_TRUE(port_type != PORT_TYPE_RTSP) ) {
+
+ rv = cnat_dynamic_port_alloc_v2_bulk(pm,
+ PORT_ALLOC_DIRECTED,
+ port_pair_type,
+ &my_index,
+ &(ko.k.k.ipv4),
+ &(ko.k.k.port),
+ cnat_static_port_range
+#ifndef NO_BULK_LOGGING
+ ,
+ udb, BULKSIZE_FROM_VRFMAP(my_vrfmap),
+ &nfv9_log_req
+#endif
+ , my_vrfmap->ip_n_to_1,
+ &(my_vrfmap->rseed_ip)
+ );
+
+ } else {
+ /*
+ * For RTSP, two translation entries are created,
+ * check if main db has space to accomodate two new entry
+ */
+ free_main = free_main - 1;
+ if (PREDICT_FALSE(!free_main)) {
+ info->error = CNAT_MAIN_DB_LIMIT;
+ CNAT_SET_ICMP_MSG_INFO
+ in2out_drops_system_limit_reached ++;
+ CNAT_DEBUG_INSIDE_ERR(CNAT_MAIN_DB_LIMIT)
+
+ return NULL;
+ } else {
+ rv = cnat_dynamic_port_alloc_rtsp_bulk(pm,
+ PORT_ALLOC_DIRECTED,
+ port_pair_type,
+ ki->k.k.port,
+ &my_index,
+ &(ko.k.k.ipv4),
+ &(ko.k.k.port),
+ cnat_static_port_range
+#ifndef NO_BULK_LOGGING
+ ,
+ udb, BULKSIZE_FROM_VRFMAP(my_vrfmap),
+ &nfv9_log_req
+#endif
+ , &(my_vrfmap->rseed_ip)
+ );
+ }
+ }
+
+
+ if (PREDICT_FALSE(rv != CNAT_SUCCESS)) {
+ info->error = rv;
+ CNAT_SET_ICMP_MSG_INFO
+ CNAT_DEBUG_INSIDE_ERR(rv)
+ in2out_drops_resource_depletion++;
+ log_port_alloc_error(rv, &(ki->k));
+ return (NULL);
+ }
+ /*
+ * increment port in use for this user
+ */
+ pthread_spin_lock(cnat_db_v2_main.user_db_lockp);
+ udb->ntranslations += 1;
+ pthread_spin_unlock(cnat_db_v2_main.user_db_lockp);
+
+ } else {
+ /*
+ * first time allocate port for this user
+ */
+
+ /*
+ * Do not create entry if port limit is invalid
+ */
+
+ if (PREDICT_FALSE(!port_limit)) {
+ if (PREDICT_TRUE(port_type != PORT_TYPE_STATIC)) {
+ info->error = CNAT_OUT_LIMIT;
+ in2out_drops_port_limit_exceeded ++;
+ port_exceeded_msg_log(u_ki.k.k.ipv4, u_ki.k.k.vrf);
+ CNAT_SET_ICMP_MSG_INFO
+ CNAT_DEBUG_INSIDE_ERR(CNAT_OUT_LIMIT)
+ return (NULL);
+ }
+ }
+
+ /*
+ * Check if main db has space for new entry
+ * Allowing a user db entry to be created if main db is not free
+ * will cause a port to be allocated to that user, which results in
+ * wastage of that port, hence the check is done here.
+ */
+ h = pool_header(cnat_main_db);
+ free_main = vec_len(h->free_indices) - 1;
+ h = pool_header(cnat_user_db);
+ free_user = vec_len(h->free_indices) - 1;
+
+ /*
+ * If either main_db or user_db does not have entries
+ * bail out, with appropriate error
+ */
+ if (PREDICT_FALSE(!(free_main && free_user))) {
+ u32 log_error;
+ if(free_main) {
+ info->error = CNAT_USER_DB_LIMIT;
+ log_error = CNAT_USER_DB_LIMIT_ERROR;
+ } else {
+ info->error = CNAT_MAIN_DB_LIMIT;
+ log_error = CNAT_MAIN_DB_LIMIT_ERROR;
+ }
+ in2out_drops_system_limit_reached ++;
+ CNAT_SET_ICMP_MSG_INFO
+ CNAT_DEBUG_INSIDE_ERR(info->error)
+ spp_printf(log_error, 0, 0);
+ return NULL;
+ }
+
+ if (PREDICT_FALSE(port_type == PORT_TYPE_STATIC)) {
+ rv = cnat_static_port_alloc_v2_bulk(pm,
+ PORT_ALLOC_ANY,
+ port_pair_type,
+ ki->k.k.ipv4,
+ ki->k.k.port,
+ &my_index,
+ &(ko.k.k.ipv4),
+ &(ko.k.k.port),
+ cnat_static_port_range
+#ifndef NO_BULK_LOGGING
+ ,
+ udb, BULKSIZE_FROM_VRFMAP(my_vrfmap),
+ &nfv9_log_req
+#endif
+ , my_vrfmap->ip_n_to_1
+ );
+
+ } else if (PREDICT_TRUE(port_type != PORT_TYPE_RTSP)) {
+ rv = cnat_dynamic_port_alloc_v2_bulk(pm,
+ PORT_ALLOC_ANY,
+ port_pair_type,
+ &my_index,
+ &(ko.k.k.ipv4),
+ &(ko.k.k.port),
+ cnat_static_port_range
+#ifndef NO_BULK_LOGGING
+ , NULL, BULKSIZE_FROM_VRFMAP(my_vrfmap),
+ &nfv9_log_req
+#endif
+ , my_vrfmap->ip_n_to_1,
+ &(my_vrfmap->rseed_ip)
+ );
+ } else {
+ /*
+ * For RTSP, two translation entries are created,
+ * check if main db has space to accomodate two new entry
+ */
+ free_main = free_main - 1;
+ if (PREDICT_FALSE(!free_main)) {
+ info->error = CNAT_MAIN_DB_LIMIT;
+ CNAT_SET_ICMP_MSG_INFO
+ in2out_drops_system_limit_reached ++;
+ CNAT_DEBUG_INSIDE_ERR(CNAT_MAIN_DB_LIMIT)
+
+ return NULL;
+ } else {
+
+ rv = cnat_dynamic_port_alloc_rtsp_bulk(pm,
+ PORT_ALLOC_ANY,
+ port_pair_type,
+ ki->k.k.port,
+ &my_index,
+ &(ko.k.k.ipv4),
+ &(ko.k.k.port),
+ cnat_static_port_range
+#ifndef NO_BULK_LOGGING
+ , NULL, BULKSIZE_FROM_VRFMAP(my_vrfmap),
+ &nfv9_log_req
+#endif
+ , &(my_vrfmap->rseed_ip)
+ );
+ /* TODO: Add the port pair flag here */
+ }
+ }
+
+
+
+ if (PREDICT_FALSE(rv != CNAT_SUCCESS)) {
+ info->error = rv;
+ in2out_drops_resource_depletion ++;
+ CNAT_SET_ICMP_MSG_INFO
+ CNAT_DEBUG_INSIDE_ERR(rv)
+ log_port_alloc_error(rv, &(ki->k));
+ return (NULL);
+ }
+ /*
+ * create entry in user db
+ */
+ udb = cnat_user_db_create_entry(&u_ki, my_index);
+ NAT44_COMMON_STATS.num_subscribers++;
+ my_pm = pm + my_index;
+ if(PREDICT_TRUE(my_pm->private_ip_users_count < PORTS_PER_ADDR)) {
+ my_pm->private_ip_users_count++;
+#ifdef DEBUG_PRINTF_IP_N_TO_1_ENABLED
+ PLATFORM_DEBUG_PRINT("\n cnat_get_main_db_entry_v2 "
+ "dynamic alloc private_ip_users_count = %d",
+ my_pm->private_ip_users_count);
+#endif
+ } else {
+ PLATFORM_DEBUG_PRINT("\n ERROR: private_ip_users_count has "
+ "reached MAX PORTS_PER_ADDR");
+ }
+#ifndef NO_BULK_LOGGING
+ if(PREDICT_TRUE(udb && (BULK_ALLOC_NOT_ATTEMPTED != nfv9_log_req))) {
+ cnat_update_bulk_range_cache(udb, ko.k.k.port,
+ BULKSIZE_FROM_VRFMAP(my_vrfmap));
+ }
+#endif /* #ifndef NO_BULK_LOGGING */
+
+ }
+
+ /*
+ * step 3:
+ * outside port is allocated for this src vrf/src ip addr
+ * 1)create a new entry in main db
+ * 2)setup cnat_out2in_hash key
+ * 3)setup cnat_in2out_hash key
+ */
+ db = cnat_create_main_db_entry_and_hash(ki, &ko, udb);
+
+ translation_create_count ++;
+#ifdef DSLITE_DEF
+ db->dslite_nat44_inst_id = NAT44_RESERVED_INST_ID;
+#endif
+ db->vrfmap_index = my_vrfmap - cnat_map_by_vrf;
+
+ /*
+ * don't forget logging
+ * logging API is unconditional,
+ * logging configuration check is done inside the inline function
+ */
+
+ db->dst_ipv4 = dest_info->k.ipv4;
+ db->dst_port = dest_info->k.port;
+ if(PREDICT_TRUE(db->dst_ipv4 || db->dst_port)) {
+ db->nsessions++;
+ }
+
+ if(PREDICT_FALSE(nfv9_log_req != CACHE_ALLOC_NO_LOG_REQUIRED)) {
+ if(PREDICT_FALSE(my_vrfmap->nf_logging_policy == SESSION_LOG_ENABLE)) {
+ /* do not log for static entries.. we will log when traffic flows */
+ if(PREDICT_TRUE(db->dst_ipv4 || db->dst_port)) {
+ cnat_nfv9_nat44_log_session_create(db, 0, my_vrfmap);
+ }
+ } else {
+ cnat_nfv9_log_mapping_create(db, my_vrfmap
+#ifndef NO_BULK_LOGGING
+ , nfv9_log_req
+#endif
+ );
+ }
+ if(PREDICT_TRUE((my_vrfmap->syslog_logging_policy != SESSION_LOG_ENABLE) ||
+ (db->dst_ipv4 || db->dst_port))) {
+ cnat_syslog_nat44_mapping_create(db, my_vrfmap, 0
+#ifndef NO_BULK_LOGGING
+ , nfv9_log_req
+#endif
+ );
+ }
+ }
+ if (PREDICT_FALSE(port_pair_type == PORT_PAIR)) {
+ cnat_main_db_entry_t *db2 = 0;
+ cnat_db_key_bucket_t new_ki = *ki;
+ u64 a, b, c;
+
+ new_ki.k.k.port += 1;
+ ko.k.k.port += 1;
+
+ CNAT_V4_GET_HASH(new_ki.k.key64, new_ki.bucket,
+ CNAT_MAIN_HASH_MASK);
+
+ db2 = cnat_create_main_db_entry_and_hash(&new_ki, &ko, udb);
+
+ translation_create_count ++;
+#ifdef DSLITE_DEF
+ db2->dslite_nat44_inst_id = NAT44_RESERVED_INST_ID;
+#endif
+ db2->vrfmap_index = my_vrfmap - cnat_map_by_vrf;
+ db2->entry_expires = cnat_current_time;
+ db2->flags |= CNAT_DB_FLAG_ALG_ENTRY;
+ pthread_spin_lock(cnat_db_v2_main.user_db_lockp);
+ udb->ntranslations += 1;
+ pthread_spin_unlock(cnat_db_v2_main.user_db_lockp);
+ db2->dst_ipv4 = dest_info->k.ipv4;
+ db2->dst_port = dest_info->k.port;
+ db2->nsessions = 0; /* For ALG db, set sessions to 0 - CSCuf78420 */
+
+ if(PREDICT_FALSE(nfv9_log_req != CACHE_ALLOC_NO_LOG_REQUIRED)) {
+ if(PREDICT_FALSE(my_vrfmap->nf_logging_policy == SESSION_LOG_ENABLE)) {
+ /* do not log for static entries.. we will log when traffic flows */
+ if(PREDICT_TRUE(db2->dst_ipv4 || db2->dst_port)) {
+ cnat_nfv9_nat44_log_session_create(db2, 0, my_vrfmap);
+ }
+ } else {
+ cnat_nfv9_log_mapping_create(db2, my_vrfmap
+#ifndef NO_BULK_LOGGING
+ , nfv9_log_req
+#endif
+ );
+ }
+ if(PREDICT_TRUE((my_vrfmap->syslog_logging_policy != SESSION_LOG_ENABLE) ||
+ (db2->dst_ipv4 || db2->dst_port))) {
+ cnat_syslog_nat44_mapping_create(db2, my_vrfmap, 0
+#ifndef NO_BULK_LOGGING
+ , nfv9_log_req
+#endif
+ );
+ }
+ }
+ }
+
+ return db;
+}
+
+cnat_main_db_entry_t*
+cnat_get_main_db_entry_v2(cnat_db_key_bucket_t *ki,
+ port_pair_t port_pair_type,
+ port_type_t port_type,
+ cnat_gen_icmp_info *info,
+ cnat_key_t *dest_info)
+{
+
+ cnat_main_db_entry_t *db;
+ pthread_spin_lock(cnat_db_v2_main.main_db_lockp);
+ db = _cnat_get_main_db_entry_v2(ki, port_pair_type,
+ port_type, info, dest_info);
+ pthread_spin_unlock(cnat_db_v2_main.main_db_lockp);
+ return db;
+}
+
+/*
+ * this function is called from config handler only
+ * to allocate a static port based db entry
+ *
+ * the actual mapped address and port are already specified
+ */
+cnat_main_db_entry_t*
+cnat_create_static_main_db_entry_v2 (cnat_db_key_bucket_t *ki,
+ cnat_db_key_bucket_t *ko,
+ cnat_vrfmap_t *my_vrfmap,
+ cnat_gen_icmp_info *info)
+{
+ u16 protocol;
+ u32 head;
+ cnat_errno_t rv;
+ cnat_db_key_bucket_t u_ki;
+ u32 my_index, free_main, free_user;
+ cnat_portmap_v2_t *pm =0;
+ cnat_portmap_v2_t *my_pm =0;
+ cnat_user_db_entry_t *udb = 0;
+ cnat_main_db_entry_t *db = 0;
+ pool_header_t *h;
+#ifndef NO_BULK_LOGGING
+ int nfv9_log_req = BULK_ALLOC_NOT_ATTEMPTED;
+#endif
+
+ /* UNUSED. Therefore not ported to be multi-thread friendly */
+ ASSERT(0);
+
+ /*
+ * need to try lookup again because
+ * second pkt may come here before the entry is created
+ * by receiving first pkt due to high line rate.
+ */
+ info->gen_icmp_msg = CNAT_NO_ICMP_MSG;
+ info->error = CNAT_SUCCESS;
+ db = cnat_main_db_lookup_entry(ki);
+
+ /*
+ * If we already have an entry with this inside address, port
+ * check delete the entry and proceed further. This should
+ * If yes, something is terribly wrong. Bail out
+ */
+ if (PREDICT_FALSE(db)) {
+
+ if (db->flags & CNAT_DB_FLAG_STATIC_PORT) {
+
+ if ((db->out2in_key.k.ipv4 == ko->k.k.ipv4) &&
+ (db->out2in_key.k.port == ko->k.k.port) &&
+ (db->out2in_key.k.vrf == ko->k.k.vrf)) {
+
+#ifdef DEBUG_PRINTF_ENABLED
+ printf("Same Static Port Exists ki 0x%16llx ko 0x%16llx",
+ ki->k, ko->k);
+#endif
+ /*
+ * We have already programmed this, return
+ */
+ return (db);
+ }
+
+ /*
+ * We already have a static port with different mapping
+ * Return an error for this case.
+ */
+ info->error = CNAT_ERR_PARSER;
+
+#ifdef DEBUG_PRINTF_ENABLED
+ printf("Static Port Existing and Diff ki 0x%16llx ko 0x%16llx",
+ ki, db->out2in_key);
+#endif
+ {
+ u32 arr[] = {STAT_PORT_CONFIG_IN_USE, (ki->k.k.vrf & CNAT_VRF_MASK),
+ ki->k.k.ipv4, ki->k.k.port, (ki->k.k.vrf & CNAT_PRO_MASK) };
+ spp_printf(CNAT_CONFIG_ERROR, 5, arr);
+ }
+ return (db);
+ }
+
+#ifdef DEBUG_PRINTF_ENABLED
+ printf("Deleting Dynamic entry ki 0x%16llx ko 0x%16llx",
+ ki, db->out2in_key);
+#endif
+
+ /*
+ * If for some reason we have dynamic entries, just delete them
+ * and proceed.
+ */
+ cnat_delete_main_db_entry_v2(db);
+
+ db = NULL;
+ }
+
+ protocol = ki->k.k.vrf & CNAT_PRO_MASK;
+ u_ki.k.k.vrf = ki->k.k.vrf & CNAT_VRF_MASK;
+ u_ki.k.k.ipv4 = ki->k.k.ipv4;
+ u_ki.k.k.port = 0;
+
+ pm = my_vrfmap->portmap_list;
+
+ /*
+ * check if src vrf, src ip addr is already
+ * in the user db
+ * if yes, use PORT_ALLOC_DIRECTED
+ * if no, use PORT_ALLOC_ANY since it is first time
+ */
+ udb = cnat_user_db_lookup_entry(&u_ki);
+ if (PREDICT_TRUE(udb)) {
+ /*
+ * check if main db has space to accomodate new entry
+ */
+ h = pool_header(cnat_main_db);
+
+ free_main = vec_len(h->free_indices) - 1;
+ if (PREDICT_FALSE(!free_main)) {
+ info->error = CNAT_MAIN_DB_LIMIT;
+ CNAT_SET_ICMP_MSG_INFO
+ in2out_drops_system_limit_reached ++;
+ CNAT_DEBUG_INSIDE_ERR(CNAT_MAIN_DB_LIMIT)
+#ifdef UT_TEST_CODE
+ printf("Limit reached : OLD USER");
+#endif
+ spp_printf(CNAT_MAIN_DB_LIMIT_ERROR, 0, 0);
+ return NULL;
+ }
+
+ /*
+ * allocate port, from existing mapping
+ */
+ my_index = udb->portmap_index;
+ my_pm = pm + my_index;
+ /* It is quite possible that we hit the scenario of CSCtj17774.
+ * Delete all the main db entries and add the ipv4 address sent by
+ * CGN-MA as Static port alloc any
+ */
+
+ if (PREDICT_FALSE(my_pm->ipv4_address != ko->k.k.ipv4)) {
+ if (PREDICT_FALSE(global_debug_flag && CNAT_DEBUG_GLOBAL_ALL)) {
+ printf("Delete Main db entry and check for"
+ " ipv4 address sanity pm add = 0x%x ip add = 0x%x\n",
+ my_pm->ipv4_address, ko->k.k.ipv4);
+ }
+ do {
+ /* udb is not NULL when we begin with for sure */
+ head = udb->translation_list_head_index;
+ db = cnat_main_db + head;
+ cnat_delete_main_db_entry_v2(db);
+ } while (!pool_is_free(cnat_user_db, udb));
+
+ rv = cnat_mapped_static_port_alloc_v2_bulk (pm,
+ PORT_ALLOC_ANY, &my_index, ko->k.k.ipv4, ko->k.k.port,
+ udb, BULKSIZE_FROM_VRFMAP(my_vrfmap), &nfv9_log_req,
+ my_vrfmap->ip_n_to_1);
+
+ if (PREDICT_FALSE(rv != CNAT_SUCCESS)) {
+ info->error = rv;
+ in2out_drops_resource_depletion ++;
+ CNAT_SET_ICMP_MSG_INFO
+ CNAT_DEBUG_INSIDE_ERR(rv)
+ return (NULL);
+ }
+ /*
+ * create entry in user db
+ */
+ udb = cnat_user_db_create_entry(&u_ki, my_index);
+ my_pm = pm + my_index;
+ if(PREDICT_TRUE(my_pm->private_ip_users_count < PORTS_PER_ADDR)) {
+ my_pm->private_ip_users_count++;
+#ifdef DEBUG_PRINTF_IP_N_TO_1_ENABLED
+ PLATFORM_DEBUG_PRINT("\n cnat_create_static_main_db_entry_v2 "
+ "static del n alloc private_ip_users_count = "
+ "%d",my_pm->private_ip_users_count);
+#endif
+ } else {
+ PLATFORM_DEBUG_PRINT("\n ERROR: private_ip_users_count has "
+ "reached MAX PORTS_PER_ADDR");
+ }
+ NAT44_COMMON_STATS.num_subscribers++;
+#ifndef NO_BULK_LOGGING
+ cnat_update_bulk_range_cache(udb, ko->k.k.port,
+ BULKSIZE_FROM_VRFMAP(my_vrfmap));
+#endif /* #ifndef NO_BULK_LOGGING */
+ } else {
+
+ rv = cnat_mapped_static_port_alloc_v2_bulk (pm,
+ PORT_ALLOC_DIRECTED, &my_index, ko->k.k.ipv4, ko->k.k.port,
+ udb, BULKSIZE_FROM_VRFMAP(my_vrfmap), &nfv9_log_req,
+ my_vrfmap->ip_n_to_1);
+
+ if (PREDICT_FALSE(rv != CNAT_SUCCESS)) {
+ info->error = rv;
+ CNAT_SET_ICMP_MSG_INFO
+ CNAT_DEBUG_INSIDE_ERR(rv)
+ log_port_alloc_error(rv, &(ki->k));
+ return (NULL);
+ }
+
+ /*
+ * increment port in use for this user
+ */
+ udb->ntranslations += 1;
+ }
+ } else {
+ if (PREDICT_FALSE(global_debug_flag && CNAT_DEBUG_GLOBAL_ALL)) {
+ printf ("Static port alloc any\n");
+ }
+ /*
+ * first time allocate port for this user
+ */
+
+ /*
+ * Check if main db has space for new entry
+ * Allowing a user db entry to be created if main db is not free
+ * will cause a port to be allocated to that user, which results in
+ * wastage of that port, hence the check is done here.
+ */
+ h = pool_header(cnat_main_db);
+ free_main = vec_len(h->free_indices) - 1;
+ h = pool_header(cnat_user_db);
+ free_user = vec_len(h->free_indices) - 1;
+
+ /*
+ * If either main_db or user_db does not have entries
+ * bail out, with appropriate error
+ */
+ if (PREDICT_FALSE(!(free_main && free_user))) {
+ u32 log_error;
+ if(free_main) {
+ info->error = CNAT_USER_DB_LIMIT;
+ log_error = CNAT_USER_DB_LIMIT_ERROR;
+ } else {
+ info->error = CNAT_MAIN_DB_LIMIT;
+ log_error = CNAT_MAIN_DB_LIMIT_ERROR;
+ }
+ in2out_drops_system_limit_reached ++;
+ CNAT_SET_ICMP_MSG_INFO
+ CNAT_DEBUG_INSIDE_ERR(info->error)
+ spp_printf(log_error, 0, 0);
+ return NULL;
+ }
+
+ rv = cnat_mapped_static_port_alloc_v2_bulk (pm,
+ PORT_ALLOC_ANY, &my_index, ko->k.k.ipv4, ko->k.k.port,
+ udb, BULKSIZE_FROM_VRFMAP(my_vrfmap), &nfv9_log_req,
+ my_vrfmap->ip_n_to_1);
+
+ if (PREDICT_FALSE(rv != CNAT_SUCCESS)) {
+ info->error = rv;
+ in2out_drops_resource_depletion ++;
+ CNAT_SET_ICMP_MSG_INFO
+ CNAT_DEBUG_INSIDE_ERR(rv)
+ log_port_alloc_error(rv, &(ki->k));
+ return (NULL);
+ }
+ /*
+ * create entry in user db
+ */
+ udb = cnat_user_db_create_entry(&u_ki, my_index);
+ my_pm = pm + my_index;
+ if(PREDICT_TRUE(my_pm->private_ip_users_count < PORTS_PER_ADDR)) {
+ my_pm->private_ip_users_count++;
+#ifdef DEBUG_PRINTF_IP_N_TO_1_ENABLED
+ PLATFORM_DEBUG_PRINT("\n cnat_create_static_main_db_entry_v2 "
+ "static alloc private_ip_users_count = %d",
+ my_pm->private_ip_users_count);
+#endif
+ } else {
+ PLATFORM_DEBUG_PRINT("\n ERROR: private_ip_users_count has "
+ "reached MAX PORTS_PER_ADDR");
+ }
+ NAT44_COMMON_STATS.num_subscribers++;
+#ifndef NO_BULK_LOGGING
+ cnat_update_bulk_range_cache(udb, ko->k.k.port,
+ BULKSIZE_FROM_VRFMAP(my_vrfmap));
+#endif /* #ifndef NO_BULK_LOGGING */
+ }
+
+ /*
+ * step 3:
+ * outside port is allocated for this src vrf/src ip addr
+ * 1)create a new entry in main db
+ * 2)setup cnat_out2in_hash key
+ * 3)setup cnat_in2out_hash key
+ */
+ db = cnat_create_main_db_entry_and_hash(ki, ko, udb);
+
+ translation_create_count ++;
+ db->vrfmap_index = my_vrfmap - cnat_map_by_vrf;
+
+ /*
+ * don't forget logging
+ * logging API is unconditional,
+ * logging configuration check is done inside the inline function
+ */
+
+ if(PREDICT_FALSE(nfv9_log_req != CACHE_ALLOC_NO_LOG_REQUIRED)) {
+ /* if session logging is enabled .. do not log as there is no
+ * traffic yet
+ */
+ if(PREDICT_FALSE(my_vrfmap->nf_logging_policy != SESSION_LOG_ENABLE)) {
+ cnat_nfv9_log_mapping_create(db, my_vrfmap
+#ifndef NO_BULK_LOGGING
+ , nfv9_log_req
+#endif
+ );
+ }
+ if(PREDICT_FALSE(my_vrfmap->syslog_logging_policy != SESSION_LOG_ENABLE)) {
+ cnat_syslog_nat44_mapping_create(db, my_vrfmap, 0
+#ifndef NO_BULK_LOGGING
+ , nfv9_log_req
+#endif
+ );
+ }
+ }
+
+ return db;
+}
+
+
+cnat_main_db_entry_t*
+dslite_main_db_lookup_entry(dslite_db_key_bucket_t *ki);
+
+cnat_user_db_entry_t*
+dslite_user_db_lookup_entry(dslite_db_key_bucket_t *uki);
+
+cnat_user_db_entry_t*
+dslite_user_db_create_entry(dslite_db_key_bucket_t *uki, u32 portmap_index);
+
+cnat_main_db_entry_t*
+dslite_create_main_db_entry_and_hash(dslite_db_key_bucket_t *ki,
+ cnat_db_key_bucket_t *ko,
+ cnat_user_db_entry_t *udb);
+
+#ifdef TOBE_PORTED
+/*
+ * this function is called from config handler only
+ * to allocate a static port based db entry
+ *
+ * the actual mapped address and port are already specified
+ */
+cnat_main_db_entry_t*
+dslite_create_static_main_db_entry_v2 (dslite_db_key_bucket_t *ki,
+ cnat_db_key_bucket_t *ko,
+ dslite_table_entry_t *dslite_entry_ptr,
+ cnat_gen_icmp_info *info)
+{
+ u16 protocol;
+ u32 head;
+ cnat_errno_t rv;
+ dslite_db_key_bucket_t u_ki;
+ u32 my_index, free_main, free_user;
+ cnat_portmap_v2_t *pm =0;
+ cnat_portmap_v2_t *my_pm =0;
+ cnat_user_db_entry_t *udb = 0;
+ cnat_main_db_entry_t *db = 0;
+ pool_header_t *h;
+ u16 dslite_id = dslite_entry_ptr->dslite_id;
+#ifndef NO_BULK_LOGGING
+ int nfv9_log_req = BULK_ALLOC_NOT_ATTEMPTED;
+#endif
+ cnat_vrfmap_t *my_vrfmap =0;
+ u16 my_vrfmap_index;
+
+ /* UNUSED. Therefore not ported to be multi-thread friendly */
+ ASSERT(0);
+ /*
+ * need to try lookup again because
+ * second pkt may come here before the entry is created
+ * by receiving first pkt due to high line rate.
+ */
+ info->gen_icmp_msg = CNAT_NO_ICMP_MSG;
+ info->error = CNAT_SUCCESS;
+ db = dslite_main_db_lookup_entry(ki);
+
+ /*
+ * If we already have an entry with this inside address, port
+ * check delete the entry and proceed further. This should
+ * If yes, something is terribly wrong. Bail out
+ */
+ if (PREDICT_FALSE(db)) {
+
+ if (db->flags & CNAT_DB_FLAG_STATIC_PORT) {
+
+ if ((db->out2in_key.k.ipv4 == ko->k.k.ipv4) &&
+ (db->out2in_key.k.port == ko->k.k.port) &&
+ (db->out2in_key.k.vrf == ko->k.k.vrf)) {
+
+#ifdef DEBUG_PRINTF_ENABLED
+ printf("Same Static Port Exists ki 0x%16llx ko 0x%16llx",
+ ki->k, ko->k);
+#endif
+ /*
+ * We have already programmed this, return
+ */
+ return (db);
+ }
+
+ /*
+ * We already have a static port with different mapping
+ * Return an error for this case.
+ */
+ info->error = CNAT_ERR_PARSER;
+
+#ifdef DEBUG_PRINTF_ENABLED
+ printf("Static Port Existing and Diff ki 0x%16llx ko 0x%16llx",
+ ki, db->out2in_key);
+#endif
+ {
+ u32 arr[] = {STAT_PORT_CONFIG_IN_USE, (ki->dk.ipv4_key.k.vrf & CNAT_VRF_MASK),
+ ki->dk.ipv4_key.k.ipv4, ki->dk.ipv4_key.k.port, (ki->dk.ipv4_key.k.vrf & CNAT_PRO_MASK) };
+ spp_printf(CNAT_CONFIG_ERROR, 5, arr);
+ }
+ return (db);
+ }
+
+#ifdef DEBUG_PRINTF_ENABLED
+ printf("Deleting Dynamic entry ki 0x%16llx ko 0x%16llx",
+ ki, db->out2in_key);
+#endif
+
+ /*
+ * If for some reason we have dynamic entries, just delete them
+ * and proceed.
+ */
+ cnat_delete_main_db_entry_v2(db);
+
+ db = NULL;
+ }
+
+
+ protocol = ki->dk.ipv4_key.k.vrf & CNAT_PRO_MASK;
+ u_ki.dk.ipv4_key.k.vrf = ki->dk.ipv4_key.k.vrf & CNAT_VRF_MASK;
+ u_ki.dk.ipv4_key.k.ipv4 = ki->dk.ipv4_key.k.ipv4;
+ u_ki.dk.ipv4_key.k.port = 0;
+ u_ki.dk.ipv6[0] = ki->dk.ipv6[0];
+ u_ki.dk.ipv6[1] = ki->dk.ipv6[1];
+ u_ki.dk.ipv6[2] = ki->dk.ipv6[2];
+ u_ki.dk.ipv6[3] = ki->dk.ipv6[3];
+
+ my_vrfmap_index = vrf_map_array[u_ki.dk.ipv4_key.k.vrf];
+ my_vrfmap = cnat_map_by_vrf + my_vrfmap_index;
+
+ pm = dslite_entry_ptr->portmap_list;
+
+ /*
+ * check if src vrf, src ip addr is already
+ * in the user db
+ * if yes, use PORT_ALLOC_DIRECTED
+ * if no, use PORT_ALLOC_ANY since it is first time
+ */
+ udb = dslite_user_db_lookup_entry(&u_ki);
+ if (PREDICT_TRUE(udb)) {
+ /*
+ * check if main db has space to accomodate new entry
+ */
+ h = pool_header(cnat_main_db);
+
+ free_main = vec_len(h->free_indices) - 1;
+ if (PREDICT_FALSE(!free_main)) {
+ info->error = CNAT_MAIN_DB_LIMIT;
+ nat44_dslite_common_stats[dslite_id].in2out_drops_port_limit_exceeded ++;
+ DSLITE_DEBUG_INSIDE_ERR(CNAT_MAIN_DB_LIMIT)
+#ifdef UT_TEST_CODE
+ printf("Limit reached : OLD USER");
+#endif
+ spp_printf(CNAT_MAIN_DB_LIMIT_ERROR, 0, 0);
+ return NULL;
+ }
+
+ /*
+ * allocate port, from existing mapping
+ */
+ my_index = udb->portmap_index;
+ my_pm = pm + my_index;
+ /* It is quite possible that we hit the scenario of CSCtj17774.
+ * Delete all the main db entries and add the ipv4 address sent by
+ * CGN-MA as Static port alloc any
+ */
+
+ if (PREDICT_FALSE(my_pm->ipv4_address != ko->k.k.ipv4)) {
+ if (PREDICT_FALSE(global_debug_flag && CNAT_DEBUG_GLOBAL_ALL)) {
+ printf("Delete Main db entry and check for"
+ " ipv4 address sanity pm add = 0x%x ip add = 0x%x\n",
+ my_pm->ipv4_address, ko->k.k.ipv4);
+ }
+ do {
+ /* udb is not NULL when we begin with for sure */
+ head = udb->translation_list_head_index;
+ db = cnat_main_db + head;
+ cnat_delete_main_db_entry_v2(db);
+ } while (!pool_is_free(cnat_user_db, udb));
+
+ rv = cnat_mapped_static_port_alloc_v2_bulk (pm,
+ PORT_ALLOC_ANY, &my_index, ko->k.k.ipv4, ko->k.k.port,
+ udb, BULKSIZE_FROM_VRFMAP(dslite_entry_ptr), &nfv9_log_req,
+ my_vrfmap->ip_n_to_1);
+
+ if (PREDICT_FALSE(rv != CNAT_SUCCESS)) {
+ info->error = rv;
+ nat44_dslite_common_stats[dslite_id].in2out_drops_port_limit_exceeded ++;
+ DSLITE_DEBUG_INSIDE_ERR(rv)
+ return (NULL);
+ }
+ /*
+ * create entry in user db
+ */
+ udb = dslite_user_db_create_entry(&u_ki, my_index);
+ nat44_dslite_common_stats[dslite_id].num_subscribers++;
+#ifndef NO_BULK_LOGGING
+ if(PREDICT_FALSE(udb && (BULK_ALLOC_NOT_ATTEMPTED != nfv9_log_req))) {
+ cnat_update_bulk_range_cache(udb, ko->k.k.port,
+ BULKSIZE_FROM_VRFMAP(dslite_entry_ptr));
+ }
+#endif /* #ifndef NO_BULK_LOGGING */
+ } else {
+
+ rv = cnat_mapped_static_port_alloc_v2_bulk (pm,
+ PORT_ALLOC_DIRECTED, &my_index, ko->k.k.ipv4, ko->k.k.port,
+ udb, BULKSIZE_FROM_VRFMAP(dslite_entry_ptr), &nfv9_log_req,
+ my_vrfmap->ip_n_to_1);
+
+ if (PREDICT_FALSE(rv != CNAT_SUCCESS)) {
+ info->error = rv;
+ DSLITE_DEBUG_INSIDE_ERR(rv)
+ log_port_alloc_error(rv, &(ki->dk.ipv4_key));
+ return (NULL);
+ }
+
+ /*
+ * increment port in use for this user
+ */
+ udb->ntranslations += 1;
+ }
+ } else {
+ if (PREDICT_FALSE(global_debug_flag && CNAT_DEBUG_GLOBAL_ALL)) {
+ printf ("Static port alloc any\n");
+ }
+ /*
+ * first time allocate port for this user
+ */
+
+ /*
+ * Check if main db has space for new entry
+ * Allowing a user db entry to be created if main db is not free
+ * will cause a port to be allocated to that user, which results in
+ * wastage of that port, hence the check is done here.
+ */
+ h = pool_header(cnat_main_db);
+ free_main = vec_len(h->free_indices) - 1;
+ h = pool_header(cnat_user_db);
+ free_user = vec_len(h->free_indices) - 1;
+
+ /*
+ * If either main_db or user_db does not have entries
+ * bail out, with appropriate error
+ */
+ if (PREDICT_FALSE(!(free_main && free_user))) {
+ u32 log_error;
+ if(free_main) {
+ info->error = CNAT_USER_DB_LIMIT;
+ log_error = CNAT_USER_DB_LIMIT_ERROR;
+ } else {
+ info->error = CNAT_MAIN_DB_LIMIT;
+ log_error = CNAT_MAIN_DB_LIMIT_ERROR;
+ }
+ nat44_dslite_common_stats[dslite_id].in2out_drops_port_limit_exceeded ++;
+ DSLITE_DEBUG_INSIDE_ERR(info->error)
+ spp_printf(log_error, 0, 0);
+ return NULL;
+ }
+
+ rv = cnat_mapped_static_port_alloc_v2_bulk (pm,
+ PORT_ALLOC_ANY, &my_index, ko->k.k.ipv4, ko->k.k.port,
+ udb, BULKSIZE_FROM_VRFMAP(dslite_entry_ptr), &nfv9_log_req,
+ my_vrfmap->ip_n_to_1);
+
+ if (PREDICT_FALSE(rv != CNAT_SUCCESS)) {
+ info->error = rv;
+ nat44_dslite_common_stats[dslite_id].in2out_drops_port_limit_exceeded ++;
+ DSLITE_DEBUG_INSIDE_ERR(rv)
+ log_port_alloc_error(rv, &(ki->dk.ipv4_key));
+ return (NULL);
+ }
+ /*
+ * create entry in user db
+ */
+ udb = dslite_user_db_create_entry(&u_ki, my_index);
+ nat44_dslite_common_stats[dslite_id].num_subscribers++;
+#ifndef NO_BULK_LOGGING
+ if(PREDICT_FALSE(udb && (BULK_ALLOC_NOT_ATTEMPTED != nfv9_log_req))) {
+ cnat_update_bulk_range_cache(udb, ko->k.k.port,
+ BULKSIZE_FROM_VRFMAP(dslite_entry_ptr));
+ }
+#endif /* #ifndef NO_BULK_LOGGING */
+ }
+
+ /*
+ * step 3:
+ * outside port is allocated for this src vrf/src ip addr
+ * 1)create a new entry in main db
+ * 2)setup cnat_out2in_hash key
+ * 3)setup cnat_in2out_hash key
+ */
+ db = dslite_create_main_db_entry_and_hash(ki, ko, udb);
+ db->dslite_nat44_inst_id = dslite_id;
+ nat44_dslite_common_stats[dslite_id].active_translations++;
+ dslite_translation_create_count++;
+
+ /*
+ * don't forget logging
+ * logging API is unconditional,
+ * logging configuration check is done inside the inline function
+ */
+#if 0 /* TBD - NEED TO DECIDE ON LOGGING */
+ if(PREDICT_FALSE(nfv9_log_req != CACHE_ALLOC_NO_LOG_REQUIRED)) {
+ /* if session logging is enabled .. do not log as there is no
+ * traffic yet
+ */
+#endif /* #if 0 - this has to be removed later */
+
+ return db;
+}
+#endif /* TOBE_PORTED */
+
+
+/* Per port/ip timeout related routines */
+static
+u32 cnat_timeout_db_hash_lookup (cnat_key_t t_key)
+{
+ cnat_key_t key;
+ u64 a, b, c;
+ u32 index;
+ cnat_timeout_db_entry_t *db;
+
+ key.k.ipv4 = t_key.k.ipv4;
+ key.k.port = t_key.k.port;
+ key.k.vrf = t_key.k.vrf;
+
+ CNAT_V4_GET_HASH(key.key64,
+ index, CNAT_TIMEOUT_HASH_MASK)
+
+
+ index = cnat_timeout_hash[index].next;
+
+ if (PREDICT_FALSE(index == EMPTY))
+ return EMPTY;
+
+ do {
+ db = cnat_timeout_db + index;
+ if (PREDICT_TRUE((db->t_key.timeout_key.key64 & CNAT_TIMEOUT_FULL_MASK)
+ == (key.key64 & CNAT_TIMEOUT_FULL_MASK)))
+ break;
+ index = db->t_hash.next;
+ } while (index != EMPTY);
+
+ return index;
+}
+
+/* Pass db_type as MAIN_DB_TYPE if you are passing
+ * cnat_main_db_entry_t * casted as void * for db
+ * else pass db_type as SESSION_DB_TYPE
+ */
+u16
+query_and_update_db_timeout(void *db, u8 db_type)
+{
+ cnat_key_t t_search_key;
+ u32 index;
+ cnat_timeout_db_entry_t *timeout_db_entry;
+ pool_header_t *h;
+ u32 free;
+
+ cnat_main_db_entry_t *mdb = NULL;
+ cnat_session_entry_t *sdb = NULL;
+
+ if(PREDICT_TRUE(db_type == MAIN_DB_TYPE)) {
+ mdb = (cnat_main_db_entry_t *)db;
+ } else if(db_type == SESSION_DB_TYPE) {
+ sdb = (cnat_session_entry_t *)db;
+ } else {
+ return 0;
+ }
+
+ h = pool_header(cnat_timeout_db);
+ free = vec_len(h->free_indices) - 1;
+
+ if(free == CNAT_TIMEOUT_HASH_SIZE) {
+ /* No timeout db configured */
+ return 0;
+ }
+
+ /* First search for ip/port pair */
+ if(PREDICT_TRUE(db_type == MAIN_DB_TYPE)) {
+ t_search_key.k.ipv4 = mdb->dst_ipv4;
+ t_search_key.k.port = mdb->dst_port;
+ t_search_key.k.vrf = mdb->in2out_key.k.vrf;
+ } else {
+ t_search_key.k.ipv4 = sdb->v4_dest_key.k.ipv4;
+ t_search_key.k.port = sdb->v4_dest_key.k.port;
+ t_search_key.k.vrf = sdb->v4_dest_key.k.vrf;
+ }
+
+ index = cnat_timeout_db_hash_lookup(t_search_key);
+
+ if(index == EMPTY) {
+ /* Search for port map */
+ t_search_key.k.ipv4 = 0;
+
+ index = cnat_timeout_db_hash_lookup(t_search_key);
+
+ if(index == EMPTY) {
+ /* Search for ip only map */
+ if(PREDICT_TRUE(db_type == MAIN_DB_TYPE)) {
+ t_search_key.k.ipv4 = mdb->dst_ipv4;
+ } else {
+ t_search_key.k.ipv4 = sdb->v4_dest_key.k.ipv4;
+ }
+ t_search_key.k.port = 0;
+
+ index = cnat_timeout_db_hash_lookup(t_search_key);
+ if(index != EMPTY) {
+#ifdef DEBUG_PRINTF_ENABLED
+ printf("%s: ip only map sucess\n","query_and_update_db_timeout");
+#endif
+ }
+ } else {
+#ifdef DEBUG_PRINTF_ENABLED
+ printf("%s: port only map sucess\n", "query_and_update_db_timeout");
+#endif
+ }
+
+ } else {
+#ifdef DEBUG_PRINTF_ENABLED
+ printf("%s: ip port map sucess\n","query_and_update_db_timeout");
+#endif
+
+ }
+
+ if(index == EMPTY) {
+ /* No match found, clear timeout */
+ if(PREDICT_TRUE(db_type == MAIN_DB_TYPE)) {
+ mdb->timeout = 0;
+ } else {
+ sdb->timeout = 0;
+ }
+#ifdef DEBUG_PRINTF_ENABLED
+ printf("%s: No match\n","query_and_update_db_timeout");
+#endif
+ } else {
+ /* Match found, update timeout */
+ timeout_db_entry = cnat_timeout_db + index;
+ if(PREDICT_TRUE(db_type == MAIN_DB_TYPE)) {
+ mdb->timeout = timeout_db_entry->t_key.timeout_value;
+ } else {
+ sdb->timeout = timeout_db_entry->t_key.timeout_value;
+ }
+ return timeout_db_entry->t_key.timeout_value;
+ }
+ return 0;
+}
+
+
+
+static
+void cnat_timeout_db_hash_add (cnat_timeout_db_entry_t *t_entry)
+{
+ cnat_key_t key;
+ u64 a, b, c;
+ u32 index, bucket;
+ cnat_key_t t_key = t_entry->t_key.timeout_key;
+
+ key.k.ipv4 = t_key.k.ipv4;
+ key.k.port = t_key.k.port;
+ key.k.vrf = t_key.k.vrf;
+
+ CNAT_V4_GET_HASH(key.key64,
+ bucket, CNAT_TIMEOUT_HASH_MASK)
+
+
+ index = cnat_timeout_hash[bucket].next;
+
+ /* Add this db entry to the head of the bucket chain */
+ t_entry->t_hash.next = index;
+ cnat_timeout_hash[bucket].next = t_entry - cnat_timeout_db;
+}
+
+
+
+u16
+cnat_timeout_db_create (cnat_timeout_t t_entry)
+{
+ cnat_timeout_db_entry_t *db;
+ cnat_key_t t_key = t_entry.timeout_key;
+ u32 db_index;
+
+ pool_header_t *h;
+ u32 free;
+
+ /* UNUSED. Therefore not ported to be multi-thread friendly */
+ ASSERT(0);
+
+ db_index = cnat_timeout_db_hash_lookup(t_key);
+
+ if(db_index != EMPTY) {
+ /* Entry already exists. Check if it is replay or update */
+ db = cnat_timeout_db + db_index;
+ db->t_key.timeout_value = t_entry.timeout_value;
+ return CNAT_SUCCESS;
+ }
+
+ h = pool_header(cnat_timeout_db);
+ free = vec_len(h->free_indices) - 1;
+
+ if(free == 0) {
+ return CNAT_OUT_LIMIT;
+ }
+
+
+ pool_get(cnat_timeout_db, db);
+ ASSERT(db);
+
+ memset(db, 0, sizeof(*db));
+
+ db_index = db - cnat_timeout_db;
+
+ db->t_key.timeout_key.k.ipv4 = t_key.k.ipv4;
+ db->t_key.timeout_key.k.port = t_key.k.port;
+ db->t_key.timeout_key.k.vrf = t_key.k.vrf;
+ db->t_key.timeout_value = t_entry.timeout_value;
+
+
+ cnat_timeout_db_hash_add(db);
+ return CNAT_SUCCESS;
+}
+
+void cnat_timeout_db_delete(cnat_key_t t_key)
+{
+ cnat_key_t key;
+ u64 a, b, c;
+ u32 index, bucket;
+ cnat_timeout_db_entry_t *this, *prev;
+
+ /* UNUSED. Therefore not ported to be multi-thread friendly */
+ ASSERT(0);
+
+ key.k.ipv4 = t_key.k.ipv4;
+ key.k.port = t_key.k.port;
+ key.k.vrf = t_key.k.vrf;
+
+
+ CNAT_V4_GET_HASH(key.key64,
+ bucket, CNAT_TIMEOUT_HASH_MASK)
+
+
+ index = cnat_timeout_hash[bucket].next;
+
+ if(index == EMPTY) return;
+
+ prev = 0;
+ do {
+ this = cnat_timeout_db + index;
+ if (PREDICT_TRUE(
+ (this->t_key.timeout_key.key64 & CNAT_TIMEOUT_FULL_MASK) ==
+ (key.key64 & CNAT_TIMEOUT_FULL_MASK))) {
+ if (prev == 0) {
+ cnat_timeout_hash[bucket].next = this->t_hash.next;
+ goto found;
+ } else {
+ prev->t_hash.next = this->t_hash.next;
+ goto found;
+ }
+ }
+
+ prev = this;
+ index = this->t_hash.next;
+ } while (index != EMPTY);
+
+ if(index == EMPTY) return;
+
+ found:
+ pool_put(cnat_timeout_db, this);
+
+}
+
+void cnat_session_db_hash_delete (cnat_session_entry_t *ep)
+{
+ u32 a, b, c;
+ u32 index, bucket;
+ cnat_session_entry_t *this, *prev;
+
+ CNAT_V4_GET_SESSION_HASH(ep->main_db_index, ep->v4_dest_key.k.ipv4,
+ ep->v4_dest_key.k.port, ep->v4_dest_key.k.vrf, bucket,
+ CNAT_SESSION_HASH_MASK)
+
+
+ index = cnat_session_hash[bucket].next;
+
+ ASSERT(index != EMPTY);
+
+ prev = 0;
+ do {
+ this = cnat_session_db + index;
+ if (PREDICT_TRUE(this == ep)) {
+ if (prev == 0) {
+ cnat_session_hash[bucket].next =
+ ep->cnat_session_hash.next;
+ return;
+ } else {
+ prev->cnat_session_hash.next =
+ ep->cnat_session_hash.next;
+ return;
+ }
+ }
+ prev = this;
+ index = this->cnat_session_hash.next;
+ } while (index != EMPTY);
+
+ ASSERT(0);
+
+}
+
+cnat_session_entry_t *
+cnat_session_db_edm_lookup_entry(cnat_key_t *ko,u32 session_head_index,
+ u32 main_db_index)
+{
+ u32 index;
+ cnat_session_entry_t *db;
+
+
+ index = session_head_index;
+ if (PREDICT_TRUE(index == EMPTY)) {
+ return (NULL);
+ }
+
+ do {
+ db = cnat_session_db + index;
+ if(PREDICT_TRUE((db->main_db_index == main_db_index) &&
+ (db->v4_dest_key.k.vrf == ko->k.vrf) &&
+ (db->v4_dest_key.k.ipv4 == ko->k.ipv4))) {
+
+ return db;
+ }
+ index = db->cnat_session_hash.next;
+ } while (index != EMPTY);
+
+ return (NULL);
+}
+
+
+
+cnat_session_entry_t *
+cnat_session_db_lookup_entry(cnat_key_t *ko,u32 main_db_index)
+{
+ u32 a, b, c;
+ u32 index, bucket;
+ cnat_session_entry_t *db;
+
+ CNAT_V4_GET_SESSION_HASH(main_db_index, ko->k.ipv4, ko->k.port,
+ ko->k.vrf, bucket, CNAT_SESSION_HASH_MASK)
+
+
+ index = cnat_session_hash[bucket].next;
+ if (PREDICT_TRUE(index == EMPTY)) {
+ return (NULL);
+ }
+
+ do {
+ db = cnat_session_db + index;
+ if(PREDICT_TRUE((db->main_db_index == main_db_index) &&
+ (db->v4_dest_key.k.vrf == ko->k.vrf) &&
+ (db->v4_dest_key.k.port == ko->k.port) &&
+ (db->v4_dest_key.k.ipv4 == ko->k.ipv4))) {
+
+ return db;
+ }
+ index = db->cnat_session_hash.next;
+ } while (index != EMPTY);
+
+ return (NULL);
+}
+
+cnat_session_entry_t *
+cnat_create_session_db_entry(cnat_key_t *ko,
+ cnat_main_db_entry_t *bdb, u8 log)
+{
+ u32 a, b, c;
+ u32 db_index, bucket_out;
+ cnat_session_entry_t *db = NULL;
+ pool_header_t *h;
+ u32 free_session;
+ u16 instance;
+
+ db = cnat_session_db_lookup_entry(ko, bdb - cnat_main_db);
+ if (PREDICT_FALSE(db != NULL)) {
+ /*printf("Create Session - Entry already Exists\n");*/
+ return db;
+ }
+
+ h = pool_header(cnat_session_db);
+ free_session = vec_len(h->free_indices) - 1;
+
+ if (bdb->flags & CNAT_DB_DSLITE_FLAG) {
+ instance = bdb->dslite_nat44_inst_id;
+ } else {
+ instance = NAT44_RESERVED_INST_ID;
+ }
+
+ if (PREDICT_FALSE(!free_session)) {
+ nat44_dslite_common_stats[instance].drops_sessiondb_limit_exceeded++;
+ return NULL;
+ }
+
+ if( PREDICT_FALSE(bdb->nsessions == CNAT_MAX_SESSIONS_PER_BIB)) {
+ /* printf("Create Session - Max sessions per BIB reached\n"); */
+ return NULL;
+ }
+
+ pthread_spin_lock(cnat_db_v2_main.session_db_lockp);
+ pool_get(cnat_session_db, db);
+ memset(db, 0, sizeof(*db));
+
+ db_index = db - cnat_session_db;
+ db->v4_dest_key.k.port = ko->k.port;
+ db->v4_dest_key.k.ipv4 = ko->k.ipv4;
+ db->v4_dest_key.k.vrf = ko->k.vrf;
+
+ db->main_list.next = db_index;
+ db->main_list.prev = db_index;
+ db->main_db_index = bdb - cnat_main_db;
+
+ db->tcp_seq_num = 0;
+ db->ack_no = 0;
+ db->window = 0;
+
+ if(PREDICT_FALSE(log)) {
+ bdb->nsessions++;
+ query_and_update_db_timeout(db, SESSION_DB_TYPE);
+ }
+
+ if (PREDICT_FALSE(bdb->nsessions == 1)) {
+ /*
+ * first port for this src vrf/src ip addr
+ */
+ bdb->session_head_index = db_index;
+ } else {
+ index_dlist_addtail(bdb->session_head_index,
+ (u8 *)cnat_session_db, sizeof(cnat_session_db[0]),
+ STRUCT_OFFSET_OF(cnat_session_entry_t, main_list),
+ db_index);
+ }
+
+ /*
+ * setup o2i hash key
+ */
+ CNAT_V4_GET_SESSION_HASH(db->main_db_index, ko->k.ipv4, ko->k.port,
+ ko->k.vrf, bucket_out, CNAT_SESSION_HASH_MASK)
+
+
+ db->cnat_session_hash.next =
+ cnat_session_hash[bucket_out].next;
+ cnat_session_hash[bucket_out].next = db_index;
+
+
+ if(PREDICT_FALSE(log)) {
+ if (bdb->flags & CNAT_DB_DSLITE_FLAG) {
+ cnat_session_log_ds_lite_mapping_create(bdb,
+ (dslite_table_db_ptr + instance),db);
+ } else {
+ cnat_vrfmap_t *my_vrfmap = cnat_map_by_vrf + bdb->vrfmap_index;
+ cnat_session_log_nat44_mapping_create(bdb, db, my_vrfmap);
+ }
+ }
+
+ /* Need to set entry_expires here, as we need to override 0 check for
+ newly established sessions */
+ db->entry_expires = cnat_current_time;
+ nat44_dslite_common_stats[instance].sessions++;
+ pthread_spin_unlock(cnat_db_v2_main.session_db_lockp);
+ return db;
+}
+
+void
+cnat_dest_update_main2session(cnat_main_db_entry_t *mdb,
+ cnat_session_entry_t *sdb)
+{
+
+ sdb->flags = mdb->flags;
+ sdb->timeout = mdb->timeout;
+ sdb->entry_expires = mdb->entry_expires;
+ sdb->alg.delta = mdb->alg.delta;
+ sdb->tcp_seq_num = mdb->proto_data.seq_pcp.tcp_seq_num;
+
+ /* Reset Main db values to 0 */
+ /* Reset only session specific flags */
+ mdb->flags &= ~(CNAT_DB_FLAG_TCP_ACTIVE | CNAT_DB_FLAG_UDP_ACTIVE
+ | CNAT_DB_FLAG_ALG_ENTRY | CNAT_DB_FLAG_ALG_CTRL_FLOW);
+ mdb->timeout = 0;
+ mdb->entry_expires = 0;
+ mdb->alg.delta = 0;
+ if(PREDICT_FALSE(!((mdb->flags & CNAT_DB_FLAG_PPTP_TUNNEL_ACTIVE) ||
+ (mdb->flags & CNAT_DB_FLAG_PPTP_TUNNEL_INIT)))) {
+ mdb->proto_data.seq_pcp.tcp_seq_num = 0;
+ }
+
+ mdb->dst_ipv4 = 0;
+ mdb->dst_port = 0;
+}
+
+
+void
+cnat_dest_update_session2main(cnat_main_db_entry_t *mdb,
+ cnat_session_entry_t *sdb)
+{
+
+ u16 flags = sdb->flags & (CNAT_DB_FLAG_TCP_ACTIVE |
+ CNAT_DB_FLAG_UDP_ACTIVE | CNAT_DB_FLAG_ALG_ENTRY |
+ CNAT_DB_FLAG_ALG_CTRL_FLOW);
+ mdb->flags |= flags;
+ mdb->timeout = sdb->timeout;
+ mdb->entry_expires = sdb->entry_expires;
+ mdb->alg.delta = sdb->alg.delta;
+ if(PREDICT_FALSE(!((mdb->flags & CNAT_DB_FLAG_PPTP_TUNNEL_ACTIVE) ||
+ (mdb->flags & CNAT_DB_FLAG_PPTP_TUNNEL_INIT)))) {
+ mdb->proto_data.seq_pcp.tcp_seq_num = sdb->tcp_seq_num;
+ }
+ mdb->dst_ipv4 = sdb->v4_dest_key.k.ipv4;
+ mdb->dst_port = sdb->v4_dest_key.k.port;
+}
+
+static void
+_cnat_delete_session_db_entry (cnat_session_entry_t *ep, u8 log)
+{
+ u32 session_db_index;
+ u32 bdb_len;
+ cnat_main_db_entry_t *be =0;
+ cnat_session_entry_t *sdb_last = NULL;
+ u16 instance;
+
+ if (PREDICT_FALSE(ep->flags & CNAT_DB_NAT64_FLAG) != 0) {
+ /* Preventive check - Not a NAT44 entry */
+ return;
+ }
+
+ pool_header_t *h = pool_header(cnat_main_db);
+
+ /* Validate .. just in case we are trying to delete a non existing one */
+ bdb_len = vec_len(cnat_main_db);
+
+ /* In case of invalid user just return, deleting only main db
+ * is not a good idea, since some valid user db entry might be pointing
+ * to that main db and hence leave the dbs in a inconsistent state
+ */
+ if (PREDICT_FALSE((ep->main_db_index >= bdb_len) ||
+ (clib_bitmap_get(h->free_bitmap, ep->main_db_index)))) {
+#ifdef DEBUG_PRINTF_ENABLED
+ printf("invalid/unused user index in db %d\n", ep->main_db_index);
+#endif
+ spp_printf(CNAT_INV_UNUSED_USR_INDEX, 1, (u32 *) &(ep->main_db_index));
+ return;
+ }
+
+ be = cnat_main_db + ep->main_db_index;
+
+ session_db_index = ep - cnat_session_db;
+
+ be->session_head_index = index_dlist_remelem (
+ be->session_head_index, (u8 *)cnat_session_db,
+ sizeof (cnat_session_db[0]),
+ STRUCT_OFFSET_OF(cnat_session_entry_t, main_list),
+ session_db_index);
+
+ if (be->flags & CNAT_DB_DSLITE_FLAG) {
+ instance = be->dslite_nat44_inst_id;
+ } else {
+ instance = NAT44_RESERVED_INST_ID;
+ }
+
+ if(PREDICT_TRUE(log)) {
+ if (be->flags & CNAT_DB_DSLITE_FLAG) {
+ cnat_session_log_ds_lite_mapping_delete(be,
+ (dslite_table_db_ptr + instance),ep);
+ } else {
+ cnat_vrfmap_t *my_vrfmap = cnat_map_by_vrf + be->vrfmap_index;
+ cnat_session_log_nat44_mapping_delete(be, ep, my_vrfmap);
+ }
+ be->nsessions--;
+ }
+
+ if (PREDICT_FALSE(be->nsessions == 1 && log)) {
+ /* There is only 1 session left
+ * Copy the info back to main db and release the last
+ * existing session
+ */
+
+ sdb_last = cnat_session_db + be->session_head_index;
+ ASSERT(sdb_last != NULL);
+
+ cnat_dest_update_session2main(be, sdb_last);
+ _cnat_delete_session_db_entry(sdb_last, FALSE);
+ }
+
+ /* Remove from session DB hashes */
+ cnat_session_db_hash_delete(ep);
+ nat44_dslite_common_stats[instance].sessions--;
+
+ pool_put(cnat_session_db, ep);
+}
+
+void cnat_delete_session_db_entry (cnat_session_entry_t *ep, u8 log)
+{
+ pthread_spin_lock(cnat_db_v2_main.session_db_lockp);
+ _cnat_delete_session_db_entry (ep, log);
+ pthread_spin_unlock(cnat_db_v2_main.session_db_lockp);
+}
+
+cnat_main_db_entry_t*
+dslite_main_db_lookup_entry(dslite_db_key_bucket_t *ki)
+{
+ u64 a, b, c;
+ u32 index;
+ cnat_main_db_entry_t *db;
+ cnat_user_db_entry_t *userdb;
+
+ DSLITE_V6_GET_HASH((&(ki->dk)),
+ ki->bucket,
+ CNAT_MAIN_HASH_MASK);
+
+ DSLITE_PRINTF(1,"MDBLU hash..%u\n", ki->bucket);
+
+ index = cnat_in2out_hash[ki->bucket].next;
+ if (PREDICT_TRUE(index == EMPTY)) {
+ DSLITE_PRINTF(1,"MDBLU index MT..\n");
+ return (NULL);
+ }
+
+ do {
+/* We can add a flag here to indicate if the db entry is for nat44 or
+ * dslite. If the db entry is for nat44 then we can simply move to the
+ * one.
+ */
+ db = cnat_main_db + index;
+ userdb = cnat_user_db + db->user_index;
+ if (PREDICT_TRUE(db->in2out_key.key64 == ki->dk.ipv4_key.key64)
+ && userdb->ipv6[0] == ki->dk.ipv6[0]
+ && userdb->ipv6[1] == ki->dk.ipv6[1]
+ && userdb->ipv6[2] == ki->dk.ipv6[2]
+ && userdb->ipv6[3] == ki->dk.ipv6[3]) {
+ DSLITE_PRINTF(1,"MDBLU success..%u\n", index);
+ return db;
+ }
+ index = db->in2out_hash.next;
+ } while (index != EMPTY);
+
+ DSLITE_PRINTF(1,"MDBLU Entry does not exist..\n");
+ return (NULL);
+}
+
+cnat_user_db_entry_t*
+dslite_user_db_lookup_entry(dslite_db_key_bucket_t *uki)
+{
+ u64 a, b, c;
+ u32 index;
+ cnat_user_db_entry_t *udb=NULL;
+
+ DSLITE_V6_GET_HASH((&(uki->dk)),
+ uki->bucket,
+ CNAT_USER_HASH_MASK)
+
+ DSLITE_PRINTF(1,"UDBLU hash..%u\n", uki->bucket);
+
+ /* now: index in user vector */
+ index = cnat_user_hash[uki->bucket].next;
+ if (PREDICT_TRUE(index != EMPTY)) {
+ DSLITE_PRINTF(1,"UDBLU hash table entry not MT..\n");
+ do {
+ udb = cnat_user_db + index;
+ if (PREDICT_FALSE(udb->key.key64 == uki->dk.ipv4_key.key64)
+ && udb->ipv6[0] == uki->dk.ipv6[0]
+ && udb->ipv6[1] == uki->dk.ipv6[1]
+ && udb->ipv6[2] == uki->dk.ipv6[2]
+ && udb->ipv6[3] == uki->dk.ipv6[3]) {
+ DSLITE_PRINTF(1,"UDBLU success..%u\n", index);
+ return udb;
+ }
+ index = udb->user_hash.next;
+ } while (index != EMPTY);
+ }
+ DSLITE_PRINTF(1,"UDBLU Entry doesnt exist..\n");
+ return (NULL);
+}
+
+cnat_user_db_entry_t*
+dslite_user_db_create_entry(dslite_db_key_bucket_t *uki,
+ u32 portmap_index)
+{
+ cnat_user_db_entry_t *udb = NULL;
+
+ /* UNUSED. Therefore not ported to be multi-thread friendly */
+ ASSERT(0);
+
+ pool_get(cnat_user_db, udb);
+ memset(udb, 0, sizeof(*udb));
+
+ udb->ntranslations = 1;
+ udb->portmap_index = portmap_index;
+// udb->key.key64 = uki->k.key64;
+
+ udb->key.key64 = uki->dk.ipv4_key.key64;
+ udb->ipv6[0] = uki->dk.ipv6[0];
+ udb->ipv6[1] = uki->dk.ipv6[1];
+ udb->ipv6[2] = uki->dk.ipv6[2];
+ udb->ipv6[3] = uki->dk.ipv6[3];
+
+ udb->flags |= CNAT_USER_DB_DSLITE_FLAG;
+ /* Add this user to the head of the bucket chain */
+ udb->user_hash.next =
+ cnat_user_hash[uki->bucket].next;
+ cnat_user_hash[uki->bucket].next = udb - cnat_user_db;
+
+#ifndef NO_BULK_LOGGING
+ INIT_BULK_CACHE(udb)
+#endif /* NO_BULK_LOGGING */
+
+ return udb;
+}
+
+#ifndef TOBE_PORTED
+cnat_main_db_entry_t*
+dslite_create_main_db_entry_and_hash(dslite_db_key_bucket_t *ki,
+ cnat_db_key_bucket_t *ko,
+ cnat_user_db_entry_t *udb)
+{
+ return 0;
+}
+#else
+cnat_main_db_entry_t*
+dslite_create_main_db_entry_and_hash(dslite_db_key_bucket_t *ki,
+ cnat_db_key_bucket_t *ko,
+ cnat_user_db_entry_t *udb)
+{
+ u64 a, b, c;
+ u32 db_index;
+ cnat_main_db_entry_t *db = NULL;
+
+ /* UNUSED. Therefore not ported to be multi-thread friendly */
+ ASSERT(0);
+
+ pool_get(cnat_main_db, db);
+ memset(db, 0, sizeof(*db));
+
+ db_index = db - cnat_main_db;
+ db->in2out_key.k.ipv4 = ki->dk.ipv4_key.k.ipv4;
+ db->in2out_key.k.port = ki->dk.ipv4_key.k.port;
+ db->in2out_key.k.vrf = ki->dk.ipv4_key.k.vrf;
+ db->out2in_key.k.ipv4 = ko->k.k.ipv4;
+ db->out2in_key.k.port = ko->k.k.port;
+ db->out2in_key.k.vrf = ko->k.k.vrf;
+
+ db->user_ports.next = db_index;
+ db->user_ports.prev = db_index;
+ db->user_index = udb - cnat_user_db;
+ //db->portmap_index = udb->portmap_index;
+ db->flags |= CNAT_DB_DSLITE_FLAG;
+
+ if (PREDICT_FALSE(udb->ntranslations == 1)) {
+ /*
+ * first port for this src vrf/src ip addr
+ */
+ udb->translation_list_head_index = db_index;
+ DSLITE_PRINTF(1,"First translation of this user..\n");
+ } else {
+ index_dlist_addtail(udb->translation_list_head_index,
+ (u8 *)cnat_main_db, sizeof(cnat_main_db[0]),
+ STRUCT_OFFSET_OF(cnat_main_db_entry_t, user_ports),
+ db_index);
+ }
+
+ /*
+ * setup o2i hash key
+ */
+ CNAT_V4_GET_HASH(ko->k.key64,
+ ko->bucket,
+ CNAT_MAIN_HASH_MASK)
+ db->out2in_hash.next = cnat_out2in_hash[ko->bucket].next;
+ cnat_out2in_hash[ko->bucket].next = db_index;
+ /*
+ * setup i2o hash key, bucket is already calculate
+ */
+ db->in2out_hash.next = cnat_in2out_hash[ki->bucket].next;
+ cnat_in2out_hash[ki->bucket].next = db_index;
+
+ DSLITE_PRINTF(1,"Create main db and hash..%u %u %u %u %x\n",
+ ki->bucket, ko->bucket,
+ db_index, db->user_index, ko->k.key64);
+
+#if DEBUG > 1
+ printf("\nMy_Instance_Number %d: Bucket %d, Db_Index %d",
+ my_instance_number, ki->bucket, db_index);
+ printf("\nInside (VRF 0x%x, IP 0x%x, PORT 0x%x)",
+ db->in2out_key.k.vrf, db->in2out_key.k.ipv4, db->in2out_key.k.port);
+ printf("\nOutside (VRF 0x%x, IP 0x%x, PORT 0x%x)",
+ db->out2in_key.k.vrf, db->out2in_key.k.ipv4, db->out2in_key.k.port);
+ printf("\nUser Index %d, IP 0x%x",
+ db->user_index, udb->key.k.ipv4);
+#endif
+
+ //nat44_dslite_common_stats[DSLITE_COMMON_STATS].active_translations++;
+
+ return db;
+}
+
+static inline void handle_dslite_port_exceeded_logging(
+ cnat_user_db_entry_t *udb,
+ dslite_key_t * key,
+ dslite_table_entry_t *dslite_entry_ptr)
+{
+
+ if(PREDICT_TRUE(udb->flags & CNAT_USER_DB_PORT_LIMIT_EXCEEDED)) {
+ /* Already logged ..*/
+ return;
+ }
+
+ /* else, set the flag and call the log API */
+ udb->flags = udb->flags | CNAT_USER_DB_PORT_LIMIT_EXCEEDED;
+ cnat_log_ds_lite_port_limit_exceeded(key, dslite_entry_ptr);
+ return;
+}
+#endif
+
+void handle_cnat_port_exceeded_logging(
+ cnat_user_db_entry_t *udb,
+ cnat_key_t * key,
+ cnat_vrfmap_t *vrfmap)
+{
+
+ if(PREDICT_TRUE(udb->flags & CNAT_USER_DB_PORT_LIMIT_EXCEEDED)) {
+ /* Already logged ..*/
+ return;
+ }
+
+ /* else, set the flag and call the log API */
+ udb->flags = udb->flags | CNAT_USER_DB_PORT_LIMIT_EXCEEDED;
+ cnat_log_nat44_port_limit_exceeded(key,vrfmap);
+ return;
+}
+
+#ifndef TOBE_PORTED
+cnat_main_db_entry_t*
+dslite_get_main_db_entry_v2(dslite_db_key_bucket_t *ki,
+ port_pair_t port_pair_type,
+ port_type_t port_type,
+ cnat_gen_icmp_info *info,
+ dslite_table_entry_t *dslite_entry_ptr,
+ cnat_key_t *dest_info)
+{
+ return 0;
+}
+#else
+/*
+ * this function is called by exception node
+ * when lookup is fialed in i2o node
+ *
+ * if reash per user port limit,
+ * set user_db_entry pointer, and error == CNAT_OUT_LIMIT
+ */
+cnat_main_db_entry_t*
+dslite_get_main_db_entry_v2(dslite_db_key_bucket_t *ki,
+ port_pair_t port_pair_type,
+ port_type_t port_type,
+ cnat_gen_icmp_info *info,
+ dslite_table_entry_t *dslite_entry_ptr,
+ cnat_key_t *dest_info)
+{
+ u16 protocol;
+ cnat_errno_t rv;
+ dslite_db_key_bucket_t u_ki;
+ cnat_db_key_bucket_t ko;
+ u32 my_index, free_main, free_user;
+ u32 current_timestamp;
+ cnat_vrfmap_t *my_vrfmap =0;
+ u16 my_vrfmap_index;
+ cnat_portmap_v2_t *pm =0;
+ cnat_user_db_entry_t *udb = 0;
+ cnat_main_db_entry_t *db = 0;
+ pool_header_t *h;
+ u16 dslite_id = dslite_entry_ptr->dslite_id;
+
+#ifndef NO_BULK_LOGGING
+ int nfv9_log_req = BULK_ALLOC_NOT_ATTEMPTED;
+#endif
+
+ /* UNUSED. Therefore not ported to be multi-thread friendly */
+ ASSERT(0);
+
+ /*
+ * need to try lookup again because
+ * second pkt may come here before the entry is created
+ * by receiving first pkt due to high line rate.
+ */
+ info->gen_icmp_msg = CNAT_NO_ICMP_MSG;
+ info->error = CNAT_SUCCESS;
+ db = dslite_main_db_lookup_entry(ki);
+ if (PREDICT_TRUE(db)) {
+ /* what if the source is talking to a
+ * new dest now? We will have to handle this case and
+ * take care of - creating session db and logging
+ */
+ if(PREDICT_FALSE((!dest_info->k.ipv4) && (!dest_info->k.port))) {
+ return db; /* if dest_info is null don't create session */
+ }
+
+ if(PREDICT_TRUE((db->dst_ipv4 == dest_info->k.ipv4) &&
+ (db->dst_port == dest_info->k.port))) {
+ return db;
+ }
+ dest_info->k.vrf = db->in2out_key.k.vrf;
+ /* Src is indeed talking to a different dest */
+ cnat_session_entry_t *session_db2 = NULL;
+ if(PREDICT_TRUE(db->nsessions == 1)) {
+ session_db2 = cnat_handle_1to2_session(db, dest_info);
+ if(PREDICT_TRUE(session_db2 != NULL)) {
+ CNAT_DB_TIMEOUT_RST(session_db2);
+ return db;
+ } else {
+ info->error = CNAT_ERR_NO_SESSION_DB;
+ return NULL;
+ }
+ } else if(PREDICT_FALSE(db->nsessions == 0)) {
+ /* Should be static entry.. should never happen
+ */
+ if(PREDICT_TRUE(dest_info->k.ipv4 != 0)) {
+ cnat_add_dest_n_log(db, dest_info);
+ }
+ return db;
+ } else {
+ /* The src has already created multiple sessions.. very rare
+ */
+ session_db2 = cnat_create_session_db_entry(dest_info,
+ db, TRUE);
+ if(PREDICT_TRUE(session_db2 != NULL)) {
+ CNAT_DB_TIMEOUT_RST(session_db2);
+ return db;
+ } else {
+ info->error = CNAT_ERR_NO_SESSION_DB;
+ return NULL;
+ }
+ }
+
+ }
+
+ /*
+ * step 1. check if outside vrf is configured or not
+ * and Find the set of portmaps for the outside vrf
+ * insider vrf is one to one mappted to outside vrf
+ * key is vrf and ip only
+ * ki.k.k.vrf has protocol bits, mask out
+ */
+ protocol = ki->dk.ipv4_key.k.vrf & CNAT_PRO_MASK;
+ u_ki.dk.ipv4_key.k.vrf = ki->dk.ipv4_key.k.vrf & CNAT_VRF_MASK;
+#ifdef DSLITE_USER_IPV4
+ u_ki.dk.ipv4_key.k.ipv4 = ki->dk.ipv4_key.k.ipv4;
+#else
+ /*
+ * Inside ipv4 address should be masked, if port limit
+ * need to be done at B4 element level.
+ */
+ u_ki.dk.ipv4_key.k.ipv4 = 0;
+#endif
+ u_ki.dk.ipv4_key.k.port = 0;
+
+ u_ki.dk.ipv6[0] = ki->dk.ipv6[0];
+ u_ki.dk.ipv6[1] = ki->dk.ipv6[1];
+ u_ki.dk.ipv6[2] = ki->dk.ipv6[2];
+ u_ki.dk.ipv6[3] = ki->dk.ipv6[3];
+
+ my_vrfmap_index = vrf_map_array[u_ki.dk.ipv4_key.k.vrf];
+ my_vrfmap = cnat_map_by_vrf + my_vrfmap_index;
+/* Checking if the inst entry is active or not is done much earlier
+ */
+#if 0
+ my_vrfmap_index = vrf_map_array[u_ki.k.k.vrf];
+ my_vrfmap = cnat_map_by_vrf + my_vrfmap_index;
+ my_vrfmap_entry_found = ((my_vrfmap_index != VRF_MAP_ENTRY_EMPTY) &&
+ (my_vrfmap->status == S_RUN) &&
+ (my_vrfmap->i_vrf == u_ki.k.k.vrf));
+
+ if (PREDICT_FALSE(!my_vrfmap_entry_found)) {
+ u32 arr[] = {ki->k.k.vrf, ki->k.k.ipv4, ki->k.k.port};
+ if ((my_vrfmap_index == VRF_MAP_ENTRY_EMPTY) ||
+ (my_vrfmap->i_vrf == u_ki.k.k.vrf)) {
+ info->error = CNAT_NO_CONFIG;
+ CNAT_DEBUG_INSIDE_ERR(CNAT_NO_CONFIG)
+ spp_printf(CNAT_NO_CONFIG_ERROR, 3, arr);
+ } else {
+ info->error = CNAT_NO_VRF_RUN;
+ CNAT_DEBUG_INSIDE_ERR(CNAT_NO_VRF_RUN)
+ spp_printf(CNAT_NO_VRF_RUN_ERROR, 3, arr);
+ }
+
+ return (NULL);
+ }
+#endif
+/*
+ dslite_inst_ptr = dslite_nat44_config_table[dslite_inst_id];
+*/
+ pm = dslite_entry_ptr->portmap_list;
+ //pm = my_vrfmap->portmap_list;
+
+ /*
+ * set o2i key with protocl bits
+ */
+ ko.k.k.vrf = dslite_entry_ptr->o_vrf | protocol;
+ //ko.k.k.vrf = my_vrfmap->o_vrf | protocol;
+
+ /*
+ * step 2. check if src vrf, src ip addr is alreay
+ * in the user db
+ * if yes, use PORT_ALLOC_DIRECTED
+ * if no, use PORT_ALLOC_ANY since it is first time
+ */
+ udb = dslite_user_db_lookup_entry(&u_ki);
+ if (PREDICT_TRUE(udb)) {
+ /*
+ * not first time allocate port for this user
+ * check limit
+ */
+ if (PREDICT_FALSE(udb->ntranslations >=
+ dslite_entry_ptr->cnat_main_db_max_ports_per_user)) {
+ //cnat_main_db_max_ports_per_user))
+
+ /* Check for the port type here. If we are getting
+ * a STATIC PORT, allow the config.
+ */
+ if (PREDICT_TRUE(port_type != PORT_TYPE_STATIC)) {
+ info->error = CNAT_OUT_LIMIT;
+ DSLITE_DEBUG_INSIDE_ERR(CNAT_OUT_LIMIT)
+ port_exceeded_msg_log(u_ki.dk.ipv4_key.k.ipv4, u_ki.dk.ipv4_key.k.vrf);
+ nat44_dslite_common_stats[dslite_id].in2out_drops_port_limit_exceeded ++;
+ u_ki.dk.ipv4_key.k.vrf = ki->dk.ipv4_key.k.vrf;
+ u_ki.dk.ipv4_key.k.port = ki->dk.ipv4_key.k.port;
+ handle_dslite_port_exceeded_logging(udb, &u_ki.dk, dslite_entry_ptr);
+ return (NULL);
+ }
+ }
+
+ CHECK_CLEAR_PORT_LIMIT_EXCEED_FLAG(udb,
+ dslite_entry_ptr->cnat_main_db_max_ports_per_user)
+
+ /*
+ * check if main db has space to accomodate new entry
+ */
+ h = pool_header(cnat_main_db);
+
+ free_main = vec_len(h->free_indices) - 1;
+ if (PREDICT_FALSE(!free_main)) {
+ info->error = CNAT_MAIN_DB_LIMIT;
+ nat44_dslite_common_stats[dslite_id].in2out_drops_system_limit_reached ++;
+ DSLITE_DEBUG_INSIDE_ERR(CNAT_MAIN_DB_LIMIT)
+
+ current_timestamp = spp_trace_log_get_unix_time_in_seconds();
+ if (PREDICT_FALSE((current_timestamp - last_log_timestamp) >
+ 1800)) {
+ spp_printf(CNAT_SESSION_THRESH_EXCEEDED, 0, NULL);
+ last_log_timestamp = current_timestamp;
+ }
+
+#ifdef UT_TEST_CODE
+ printf("Limit reached : OLD USER");
+#endif
+ return NULL;
+ }
+
+ /*
+ * allocate port, from existing mapping
+ */
+ my_index = udb->portmap_index;
+
+ if (PREDICT_FALSE(port_type == PORT_TYPE_STATIC)) {
+ rv = cnat_static_port_alloc_v2_bulk(pm,
+ PORT_ALLOC_DIRECTED,
+ port_pair_type,
+ ki->dk.ipv4_key.k.ipv4,
+ ki->dk.ipv4_key.k.port,
+ &my_index,
+ &(ko.k.k.ipv4),
+ &(ko.k.k.port),
+ STAT_PORT_RANGE_FROM_INST_PTR(dslite_entry_ptr)
+#ifndef NO_BULK_LOGGING
+ , udb,
+ BULKSIZE_FROM_VRFMAP(dslite_entry_ptr),
+ &nfv9_log_req
+#endif
+ , my_vrfmap->ip_n_to_1
+ );
+ } else if (PREDICT_TRUE(port_type != PORT_TYPE_RTSP) ) {
+
+ rv = cnat_dynamic_port_alloc_v2_bulk(pm,
+ PORT_ALLOC_DIRECTED,
+ port_pair_type,
+ &my_index,
+ &(ko.k.k.ipv4),
+ &(ko.k.k.port),
+ STAT_PORT_RANGE_FROM_INST_PTR(dslite_entry_ptr)
+#ifndef NO_BULK_LOGGING
+ , udb,
+ BULKSIZE_FROM_VRFMAP(dslite_entry_ptr),
+ &nfv9_log_req
+#endif
+ , 0,
+ &(dslite_entry_ptr->rseed_ip)
+ );
+ DSLITE_PRINTF(1,"D_PORT_ALLOC %x %u\n", ko.k.k.ipv4, ko.k.k.port);
+ } else {
+ /*
+ * For RTSP, two translation entries are created,
+ * check if main db has space to accomodate two new entry
+ */
+ free_main = free_main - 1;
+
+ if (PREDICT_FALSE(!free_main)) {
+ info->error = CNAT_MAIN_DB_LIMIT;
+ nat44_dslite_common_stats[dslite_id].in2out_drops_system_limit_reached ++;
+ DSLITE_DEBUG_INSIDE_ERR(CNAT_MAIN_DB_LIMIT)
+
+ return NULL;
+ } else {
+
+ rv = cnat_dynamic_port_alloc_rtsp_bulk(pm,
+ PORT_ALLOC_DIRECTED,
+ port_pair_type,
+ ki->dk.ipv4_key.k.port,
+ &my_index,
+ &(ko.k.k.ipv4),
+ &(ko.k.k.port),
+ STAT_PORT_RANGE_FROM_INST_PTR(dslite_entry_ptr)
+#ifndef NO_BULK_LOGGING
+ , udb,
+ BULKSIZE_FROM_VRFMAP(dslite_entry_ptr),
+ &nfv9_log_req
+#endif
+ , &(dslite_entry_ptr->rseed_ip)
+ );
+ }
+ }
+
+ if (PREDICT_FALSE(rv != CNAT_SUCCESS)) {
+ DSLITE_PRINTF(1,"D_PORT_ALLOC port alloc error\n");
+ info->error = rv;
+ DSLITE_DEBUG_INSIDE_ERR(rv)
+ nat44_dslite_common_stats[dslite_id].in2out_drops_resource_depletion ++;
+ log_port_alloc_error(rv, &(ki->dk.ipv4_key));
+ return (NULL);
+ }
+ /*
+ * increment port in use for this user
+ */
+ udb->ntranslations += 1;
+ } else {
+ /*
+ * first time allocate port for this user
+ */
+
+ /*
+ * Do not create entry if port limit is invalid
+ */
+ if (PREDICT_FALSE(!(dslite_entry_ptr->cnat_main_db_max_ports_per_user))) {
+ if (PREDICT_TRUE(port_type != PORT_TYPE_STATIC)) {
+ info->error = CNAT_OUT_LIMIT;
+ nat44_dslite_common_stats[dslite_id].in2out_drops_port_limit_exceeded ++;
+ port_exceeded_msg_log(u_ki.dk.ipv4_key.k.ipv4, u_ki.dk.ipv4_key.k.vrf);
+ DSLITE_DEBUG_INSIDE_ERR(CNAT_OUT_LIMIT)
+ return (NULL);
+ }
+ }
+
+ /*
+ * Check if main db has space for new entry
+ * Allowing a user db entry to be created if main db is not free
+ * will cause a port to be allocated to that user, which results in
+ * wastage of that port, hence the check is done here.
+ */
+ h = pool_header(cnat_main_db);
+ free_main = vec_len(h->free_indices) - 1;
+
+ h = pool_header(cnat_user_db);
+ free_user = vec_len(h->free_indices) - 1;
+
+ /*
+ * If either main_db or user_db does not have entries
+ * bail out, with appropriate error
+ */
+ if (PREDICT_FALSE(!(free_main && free_user))) {
+ u32 log_error;
+ if(free_main) {
+ info->error = CNAT_USER_DB_LIMIT;
+ log_error = CNAT_USER_DB_LIMIT_ERROR;
+ } else {
+ info->error = CNAT_MAIN_DB_LIMIT;
+ log_error = CNAT_MAIN_DB_LIMIT_ERROR;
+ }
+ nat44_dslite_common_stats[dslite_id].in2out_drops_system_limit_reached ++;
+ DSLITE_DEBUG_INSIDE_ERR(info->error)
+ spp_printf(log_error, 0, 0);
+ return NULL;
+ }
+
+ if (PREDICT_FALSE(port_type == PORT_TYPE_STATIC)) {
+ rv = cnat_static_port_alloc_v2_bulk(pm,
+ PORT_ALLOC_ANY,
+ port_pair_type,
+ ki->dk.ipv4_key.k.ipv4,
+ ki->dk.ipv4_key.k.port,
+ &my_index,
+ &(ko.k.k.ipv4),
+ &(ko.k.k.port),
+ STAT_PORT_RANGE_FROM_INST_PTR(dslite_entry_ptr)
+#ifndef NO_BULK_LOGGING
+ , NULL,
+ BULKSIZE_FROM_VRFMAP(dslite_entry_ptr),
+ &nfv9_log_req
+#endif
+ , my_vrfmap->ip_n_to_1
+
+ );
+ } else if (PREDICT_TRUE(port_type != PORT_TYPE_RTSP)) {
+ rv = cnat_dynamic_port_alloc_v2_bulk(pm,
+ PORT_ALLOC_ANY,
+ port_pair_type,
+ &my_index,
+ &(ko.k.k.ipv4),
+ &(ko.k.k.port),
+ STAT_PORT_RANGE_FROM_INST_PTR(dslite_entry_ptr)
+#ifndef NO_BULK_LOGGING
+ , NULL,
+ BULKSIZE_FROM_VRFMAP(dslite_entry_ptr),
+ &nfv9_log_req
+#endif
+ , 0,
+ &(dslite_entry_ptr->rseed_ip)
+ );
+ DSLITE_PRINTF(1,"NU:D PORT ALLOC..%x %u\n", ko.k.k.ipv4,
+ ko.k.k.port);
+
+ } else {
+ /*
+ * For RTSP, two translation entries are created,
+ * check if main db has space to accomodate two new entry
+ */
+ free_main = free_main - 1;
+
+ if (PREDICT_FALSE(!free_main)) {
+ info->error = CNAT_MAIN_DB_LIMIT;
+ nat44_dslite_common_stats[dslite_id].in2out_drops_system_limit_reached ++;
+ DSLITE_DEBUG_INSIDE_ERR(CNAT_MAIN_DB_LIMIT)
+
+ return NULL;
+ } else {
+
+ rv = cnat_dynamic_port_alloc_rtsp_bulk(pm,
+ PORT_ALLOC_DIRECTED,
+ port_pair_type,
+ ki->dk.ipv4_key.k.port,
+ &my_index,
+ &(ko.k.k.ipv4),
+ &(ko.k.k.port),
+ STAT_PORT_RANGE_FROM_INST_PTR(dslite_entry_ptr)
+#ifndef NO_BULK_LOGGING
+ , NULL,
+ BULKSIZE_FROM_VRFMAP(dslite_entry_ptr),
+ &nfv9_log_req
+#endif
+ , &(dslite_entry_ptr->rseed_ip)
+ );
+ /* TODO: Add the port pair flag here */
+ }
+ }
+
+
+
+ if (PREDICT_FALSE(rv != CNAT_SUCCESS)) {
+ DSLITE_PRINTF(1,"NU:D_PORT_ALLOC port alloc error\n");
+ info->error = rv;
+ nat44_dslite_common_stats[dslite_id].in2out_drops_resource_depletion ++;
+ DSLITE_DEBUG_INSIDE_ERR(rv)
+ log_port_alloc_error(rv, &(ki->dk.ipv4_key));
+ return (NULL);
+ }
+ /*
+ * create entry in user db
+ */
+ udb = dslite_user_db_create_entry(&u_ki, my_index);
+ nat44_dslite_common_stats[dslite_id].num_subscribers++;
+ DSLITE_PRINTF(1,"UDB crete entry done..\n");
+#ifndef NO_BULK_LOGGING
+ if(PREDICT_TRUE(udb && (BULK_ALLOC_NOT_ATTEMPTED != nfv9_log_req))) {
+ cnat_update_bulk_range_cache(udb, ko.k.k.port,
+ BULKSIZE_FROM_VRFMAP(dslite_entry_ptr));
+ }
+#endif /* #ifndef NO_BULK_LOGGING */
+ }
+
+ /*
+ * step 3:
+ * outside port is allocated for this src vrf/src ip addr
+ * 1)create a new entry in main db
+ * 2)setup cnat_out2in_hash key
+ * 3)setup cnat_in2out_hash key
+ */
+ db = dslite_create_main_db_entry_and_hash(ki, &ko, udb);
+ DSLITE_PRINTF(1,"dslite_create_main_db_entry_and_hash done..\n");
+ //db->vrfmap_index = my_vrfmap - cnat_map_by_vrf;
+ db->dslite_nat44_inst_id = dslite_id;
+ nat44_dslite_common_stats[dslite_id].active_translations++;
+ if (PREDICT_FALSE(port_type == PORT_TYPE_STATIC)) {
+ nat44_dslite_common_stats[dslite_id].num_static_translations++;
+ } else {
+ nat44_dslite_common_stats[dslite_id].num_dynamic_translations++;
+ }
+
+ dslite_translation_create_count++;
+
+ db->dst_ipv4 = dest_info->k.ipv4;
+ db->dst_port = dest_info->k.port;
+ if(PREDICT_TRUE(db->dst_ipv4 || db->dst_port)) {
+ /* for static fwding, let the nsessions remain zero */
+ db->nsessions++;
+ }
+
+ /*
+ * don't forget logging
+ * logging API is unconditional,
+ * logging configuration check is done inside the inline function
+ */
+ if(PREDICT_FALSE(nfv9_log_req != CACHE_ALLOC_NO_LOG_REQUIRED)) {
+ if(PREDICT_FALSE( dslite_entry_ptr->nf_logging_policy ==
+ SESSION_LOG_ENABLE)) {
+ if(PREDICT_TRUE(db->dst_ipv4 || db->dst_port)) {
+ cnat_nfv9_ds_lite_log_session_create(db,
+ dslite_entry_ptr,NULL);
+ }
+ } else {
+ cnat_nfv9_ds_lite_mapping_create(db,dslite_entry_ptr
+#ifndef NO_BULK_LOGGING
+ ,nfv9_log_req
+#endif
+ );
+ }
+ if(PREDICT_TRUE((dslite_entry_ptr->syslog_logging_policy != SESSION_LOG_ENABLE) ||
+ (db->dst_ipv4 || db->dst_port))) {
+ cnat_syslog_ds_lite_mapping_create(db,dslite_entry_ptr,NULL
+#ifndef NO_BULK_LOGGING
+ ,nfv9_log_req
+#endif
+ );
+ }
+ }
+
+#if 0
+ if (PREDICT_FALSE(port_pair_type == PORT_PAIR)) {
+ cnat_main_db_entry_t *db2 = 0;
+ dslite_db_key_bucket_t new_ki = *ki;
+ u64 a, b, c;
+
+ new_ki.k.k.port += 1;
+ ko.k.k.port += 1;
+
+ CNAT_V4_GET_HASH(new_ki.k.key64, new_ki.bucket,
+ CNAT_MAIN_HASH_MASK);
+
+ db2 = cnat_create_main_db_entry_and_hash(&new_ki, &ko, udb);
+
+ translation_create_count ++;
+ db2->dslite_nat44_inst_id = dslite_id;
+ db2->entry_expires = cnat_current_time;
+ db2->flags |= CNAT_DB_FLAG_ALG_ENTRY;
+ udb->ntranslations += 1;
+#ifndef NO_BULK_LOGGING
+ if(PREDICT_FALSE(nfv9_log_req == BULK_ALLOC_NOT_ATTEMPTED))
+ cnat_nfv9_log_mapping_create(db2, my_vrfmap, nfv9_log_req);
+#else
+ cnat_nfv9_log_mapping_create(db2, my_vrfmap);
+#endif
+ }
+#endif
+ return db;
+}
+#endif /* TOBE_PORTED */
+
+#if 0
+/* TOBE_PORTED */
+uword
+cnat_db_v2_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return 0;
+}
+VLIB_REGISTER_NODE (cnat_db_v2_node) = {
+ .function = cnat_db_v2_node_fn,
+ .name = "vcgn-db-v2",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(cnat_db_v2_error_strings),
+ .error_strings = cnat_db_v2_error_strings,
+
+ .n_next_nodes = CNAT_DB_V2_DROP,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [CNAT_DB_V2_DROP] = "error-drop",
+ },
+};
+#endif
+void cnat_db_v2_init (void)
+{
+
+ u32 i, n;
+ cnat_timeout_db_entry_t * tdb __attribute__((unused));
+
+ cgse_nat_db_entry_t *comb_db __attribute__((unused));
+ cgse_nat_user_db_entry_t *comb_user __attribute__((unused));
+ cgse_nat_session_db_entry_t *comb_session __attribute__((unused));
+
+ n = CNAT_DB_SIZE*1.15; /* add 15% LB margin */
+
+ /*
+ * We also make it multiple of NUM_BITS_IN_UWORD for better
+ * DB scanning algorithm
+ */
+ if (n % NUM_BITS_IN_UWORD)
+ n += (NUM_BITS_IN_UWORD - (n % NUM_BITS_IN_UWORD));
+
+ pool_alloc(cgse_nat_db,n);
+ for(i=0; i< n; i++) {
+ pool_get(cgse_nat_db, comb_db);
+ }
+
+ for(i=0; i< n; i++) {
+ pool_put(cgse_nat_db, cgse_nat_db + i);
+ }
+
+ cnat_main_db = &cgse_nat_db->nat44_main_db;
+
+ /* For Sessions */
+ if(PLATFORM_DBL_SUPPORT) {
+ /* create session table for NAT44 and NAT64 itself */
+ printf("DBL Support exist %d\n", PLATFORM_DBL_SUPPORT);
+ n = CNAT_SESSION_DB_SIZE * 1.15; /* add 15% LB margin */
+ } else {
+ /* Create session table for NAT64 only */
+ printf("DBL Support Not exist\n");
+ n = NAT64_MAIN_DB_SIZE * 1.15; /* add 15% LB margin */
+ }
+
+ /*
+ * We also make it multiple of NUM_BITS_IN_UWORD for better
+ * DB scanning algorithm
+ */
+ if (n % NUM_BITS_IN_UWORD)
+ n += (NUM_BITS_IN_UWORD - (n % NUM_BITS_IN_UWORD));
+
+ pool_alloc(cgse_session_db,n);
+ for(i=0; i< n; i++) {
+ pool_get(cgse_session_db, comb_session);
+ }
+
+ for(i=0; i< n; i++) {
+ pool_put(cgse_session_db, cgse_session_db + i);
+ }
+
+ cnat_session_db = &cgse_session_db->nat44_session_db;
+
+ vec_validate(cnat_out2in_hash, CNAT_MAIN_HASH_MASK);
+ memset(cnat_out2in_hash, 0xff, CNAT_MAIN_HASH_SIZE*sizeof(index_slist_t));
+
+ vec_validate(cnat_in2out_hash, CNAT_MAIN_HASH_MASK);
+ memset(cnat_in2out_hash, 0xff, CNAT_MAIN_HASH_SIZE*sizeof(index_slist_t));
+
+ vec_validate(cnat_session_hash, CNAT_SESSION_HASH_MASK);
+ memset(cnat_session_hash, 0xff, CNAT_SESSION_HASH_SIZE*sizeof(index_slist_t));
+
+ n = CNAT_USER_DB_SIZE * 1.15; /* use hash size as db size for LB margin */
+ if (n % NUM_BITS_IN_UWORD)
+ n += (NUM_BITS_IN_UWORD - (n % NUM_BITS_IN_UWORD));
+
+ pool_alloc(cgse_user_db,n);
+ for(i=0; i< n; i++) {
+ pool_get(cgse_user_db, comb_user);
+ }
+
+ for(i=0; i< n; i++) {
+ pool_put(cgse_user_db, cgse_user_db + i);
+ }
+
+ cnat_user_db = &cgse_user_db->nat44_user_db;
+
+ vec_validate(cnat_user_hash, CNAT_USER_HASH_MASK);
+ memset(cnat_user_hash, 0xff, CNAT_USER_HASH_SIZE*sizeof(index_slist_t));
+
+ n = CNAT_TIMEOUT_HASH_SIZE; /* use hash size as db size for LB margin */
+ for(i=0; i< n; i++) {
+ pool_get(cnat_timeout_db, tdb);
+ }
+
+ for(i=0; i< n; i++) {
+ pool_put(cnat_timeout_db, cnat_timeout_db + i);
+ }
+
+ vec_validate(cnat_timeout_hash, CNAT_TIMEOUT_HASH_MASK);
+ memset(cnat_timeout_hash, 0xff, CNAT_TIMEOUT_HASH_SIZE*sizeof(index_slist_t));
+
+#ifdef TOBE_PORTED
+ for (i=0;i<CNAT_MAX_VRFMAP_ENTRIES; i++) {
+ svi_params_array[i].svi_type = CGSE_SVI_TYPE_INFRA;
+ }
+#endif
+
+ cnat_db_v2_main.main_db_lockp =
+ clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+ CLIB_CACHE_LINE_BYTES);
+
+ cnat_db_v2_main.user_db_lockp =
+ clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+ CLIB_CACHE_LINE_BYTES);
+
+ cnat_db_v2_main.session_db_lockp =
+ clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+ CLIB_CACHE_LINE_BYTES);
+
+ ASSERT (pthread_spin_init(cnat_db_v2_main.main_db_lockp,
+ PTHREAD_PROCESS_PRIVATE) == 0);
+ ASSERT (pthread_spin_init(cnat_db_v2_main.user_db_lockp,
+ PTHREAD_PROCESS_PRIVATE) == 0);
+ ASSERT (pthread_spin_init(cnat_db_v2_main.session_db_lockp,
+ PTHREAD_PROCESS_PRIVATE) == 0);
+
+ cnat_db_init_done = 1;
+ printf("CNAT DB init is successful\n");
+ return;
+ //return 0;
+}
diff --git a/plugins/vcgn-plugin/vcgn/cnat_debug_msg_handler.c b/plugins/vcgn-plugin/vcgn/cnat_debug_msg_handler.c
new file mode 100644
index 00000000000..519f4b64939
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_debug_msg_handler.c
@@ -0,0 +1,1780 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_debug_msg_handler.c - debug command
+ *
+ * Copyright (c) 2007-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "cnat_cli.h"
+
+u32 global_debug_flag = CNAT_DEBUG_NONE;
+u16 debug_i_vrf = CNAT_DEBUG_NONE;
+u32 debug_i_flag = CNAT_DEBUG_NONE;
+u32 debug_i_addr_start = CNAT_DEBUG_NONE;
+u32 debug_i_addr_end = CNAT_DEBUG_NONE;
+
+u16 debug_o_vrf = CNAT_DEBUG_NONE;
+u32 debug_o_flag = CNAT_DEBUG_NONE;
+u32 debug_o_addr_start = CNAT_DEBUG_NONE;
+u32 debug_o_addr_end = CNAT_DEBUG_NONE;
+
+u32 udp_inside_checksum_disable = 0;
+u32 udp_outside_checksum_disable = 0;
+u32 udp_inside_packet_dump_enable = 0;
+u32 udp_outside_packet_dump_enable = 0;
+
+u32 tcp_logging_enable_flag = 0;
+
+u32 icmp_debug_flag = 0;
+u32 frag_debug_flag = 0;
+
+u32 nfv9_logging_debug_flag = 0;
+u32 syslog_debug_flag = 0;
+
+u32 summary_stats_debug_flag = 0;
+
+/*
+ * By defaut we set the config debug level to 1
+ */
+u32 config_debug_level = 1;
+
+#ifdef TOBE_PORTED
+extern void show_bulk_port_stats();
+extern void clear_bulk_port_stats();
+extern void show_bulk_port_allocation(u16 in_vrfid, u32 inside_ip);
+extern void set_bulk_size_to_all_vrfs(int bulk_size);
+
+u32 *cnat_debug_addr_list;
+
+extern int global_pd_dbg_lvl;
+extern int global_pi_dbg_lvl;
+extern int global_l2_dbg_lvl;
+extern u32 cnat_pptp_debug_flag;
+extern u32 cnat_pcp_debug_flag;
+
+void spp_api_cnat_get_cgn_db_summary
+(spp_api_cnat_generic_command_request_t *);
+
+void spp_api_cnat_v4_debug_dummy_t_handler
+(spp_api_cnat_v4_debug_dummy_t *mp)
+{
+ u32 arr[] = { DEBUG_DUMMY };
+ spp_printf(CNAT_DUMMY_HANDLER_HIT, 1, arr);
+ if(global_pd_dbg_lvl) {
+ PLATFORM_DEBUG_PRINT("\n invalid debug command received: message id is 0\n");
+ }
+ mp->rc = CNAT_ERR_INVALID_MSG_ID;
+
+}
+
+void spp_api_cnat_v4_debug_dummy_max_t_handler
+(spp_api_cnat_v4_debug_dummy_max_t *mp)
+{
+ u32 arr[] = { DEBUG_DUMMY_MAX };
+ spp_printf(CNAT_DUMMY_HANDLER_HIT, 1, arr);
+ if(global_pd_dbg_lvl) {
+ PLATFORM_DEBUG_PRINT("\n invalid debug command received: message id is out of range\n");
+ }
+ mp->rc = CNAT_ERR_INVALID_MSG_ID;
+
+}
+
+
+void spp_api_cnat_v4_debug_global_t_handler
+(spp_api_cnat_v4_debug_global_t *mp)
+{
+ if ((mp->debug_flag == CNAT_DEBUG_GLOBAL_ERR) ||
+ (mp->debug_flag == CNAT_DEBUG_GLOBAL_ALL) ||
+ (mp->debug_flag == CNAT_DEBUG_NONE)) {
+ mp->rc = CNAT_SUCCESS;
+ global_debug_flag = mp->debug_flag;
+ return;
+ }
+
+ mp->rc = CNAT_ERR_PARSER;
+ if(global_pd_dbg_lvl) {
+ PLATFORM_DEBUG_PRINT("invalid global debug flag %x\n",
+ mp->debug_flag);
+ }
+ return;
+}
+
+void spp_node_print_cnat_counters()
+{
+ if (cnat_global_counters.nfv9_downstream_constipation_count) {
+ PLATFORM_DEBUG_PRINT("\nNF downstream constipation count: %llu\n",
+ cnat_global_counters.nfv9_downstream_constipation_count);
+ }
+
+ if (xlat_global_counters.v4_to_v6_frag_invalid_uidb_drop_count ||
+ xlat_global_counters.v6_to_v4_frag_invalid_uidb_drop_count ||
+ xlat_global_counters.v4_to_v6_icmp_invalid_uidb_drop_count ||
+ xlat_global_counters.v6_to_v4_icmp_invalid_uidb_drop_count ||
+ xlat_global_counters.v4_to_v6_tcp_invalid_uidb_drop_count ||
+ xlat_global_counters.v6_to_v4_tcp_invalid_uidb_drop_count ||
+ xlat_global_counters.v4_to_v6_udp_invalid_uidb_drop_count ||
+ xlat_global_counters.v6_to_v4_udp_invalid_uidb_drop_count ||
+ xlat_global_counters.v4_to_v6_udp_crc_zero_invalid_uidb_drop_count) {
+
+ PLATFORM_DEBUG_PRINT("\nMy_instance %d: v4_to_v6 frag invalid uidb drop count %lld",
+ my_instance_number,
+ xlat_global_counters.v4_to_v6_frag_invalid_uidb_drop_count);
+
+ PLATFORM_DEBUG_PRINT("\nMy_instance %d: v6_to_v4 frag invalid uidb drop count %lld",
+ my_instance_number,
+ xlat_global_counters.v6_to_v4_frag_invalid_uidb_drop_count);
+
+ PLATFORM_DEBUG_PRINT("\nMy_instance %d: v4_to_v6 icmp invalid uidb drop count %lld",
+ my_instance_number,
+ xlat_global_counters.v4_to_v6_icmp_invalid_uidb_drop_count);
+
+ PLATFORM_DEBUG_PRINT("\nMy_instance %d: v6_to_v4 icmp invalid uidb drop count %lld",
+ my_instance_number,
+ xlat_global_counters.v6_to_v4_icmp_invalid_uidb_drop_count);
+
+ PLATFORM_DEBUG_PRINT("\nMy_instance %d: v4_to_v6 tcp invalid uidb drop count %lld",
+ my_instance_number,
+ xlat_global_counters.v4_to_v6_tcp_invalid_uidb_drop_count);
+
+ PLATFORM_DEBUG_PRINT("\nMy_instance %d: v6_to_v4 tcp invalid uidb drop count %lld",
+ my_instance_number,
+ xlat_global_counters.v6_to_v4_tcp_invalid_uidb_drop_count);
+
+ PLATFORM_DEBUG_PRINT("\nMy_instance %d: v4_to_v6 udp invalid uidb drop count %lld",
+ my_instance_number,
+ xlat_global_counters.v4_to_v6_udp_invalid_uidb_drop_count);
+
+ PLATFORM_DEBUG_PRINT("\nMy_instance %d: v6_to_v4 udp invalid uidb drop count %lld",
+ my_instance_number,
+ xlat_global_counters.v6_to_v4_udp_invalid_uidb_drop_count);
+
+ PLATFORM_DEBUG_PRINT("\nMy_instance %d: v4_to_v6 udp crc0 invld uidb drop count %lld",
+ my_instance_number,
+ xlat_global_counters.v4_to_v6_udp_crc_zero_invalid_uidb_drop_count);
+
+ PLATFORM_DEBUG_PRINT("\n");
+ }
+
+
+}
+
+void spp_log_p2mp_req(spp_api_cnat_p2mp_debug_request_t *mp)
+{
+ u8 i = 0;
+ u32 num_rec = spp_net_to_host_byte_order_32(&mp->param[i++]);
+ u32 err_c_num_args;
+
+ while (num_rec--) {
+ u8 j = 0;
+ u16 err_c;
+ u16 num_args;
+ u32 argv[32];
+
+ err_c_num_args = spp_net_to_host_byte_order_32(&mp->param[i++]);
+ err_c = (err_c_num_args >> 16) & 0xFFFF;
+ num_args = err_c_num_args & 0xFFFF;
+
+ num_args = (num_args <= 32) ? num_args : 32;
+ while (j < num_args) {
+ argv[j++] = spp_net_to_host_byte_order_32(&mp->param[i++]);
+ }
+
+ i += ((num_args - 32) > 0) ? (num_args - 32) : 0;
+ spp_printf(err_c, num_args, argv);
+ }
+}
+
+void nat64_debug_addr_pool_add_del()
+{
+ cnat_portmap_v2_t *my_pm = NULL;
+ cnat_portmap_v2_t *pm = NULL;
+ u32 len, i, pm_len;
+
+ PLATFORM_DEBUG_PRINT("\n sizeof port_map =%d\n", sizeof( cnat_portmap_v2_t));
+ len = 10;
+ PLATFORM_DEBUG_PRINT("\n adding 10 entries in vector 1-10\n ");
+ vec_add2(pm, my_pm, len);
+ pm = my_pm;
+
+ PLATFORM_DEBUG_PRINT(" pm =%p , my_pm = %p\n", pm, my_pm);
+ for(i=0;i<len;i++){
+ my_pm->ipv4_address = i+1;
+ my_pm++;
+ }
+ PLATFORM_DEBUG_PRINT(" pm =%p , my_pm = %p\n", pm, my_pm);
+
+ pm_len = vec_len(pm);
+ PLATFORM_DEBUG_PRINT("\n printing vector contents : vec_len = %d \n", pm_len);
+ my_pm = pm;
+ for(i=0;i<pm_len ; i++)
+ {
+ PLATFORM_DEBUG_PRINT(" %d ,",my_pm->ipv4_address);
+ my_pm++;
+ }
+ PLATFORM_DEBUG_PRINT(" pm =%p , my_pm = %p\n", pm, my_pm);
+
+ PLATFORM_DEBUG_PRINT("\n adding 5 entries in vector 11-15\n ");
+ len = 5;
+ vec_add2(pm, my_pm, len);
+
+ PLATFORM_DEBUG_PRINT(" pm =%p , my_pm = %p\n", pm, my_pm);
+ for(i=0;i<len;i++) {
+ my_pm->ipv4_address = 11+i;
+ my_pm++;
+ }
+
+ PLATFORM_DEBUG_PRINT(" pm =%p , my_pm = %p\n", pm, my_pm);
+ pm_len = vec_len(pm);
+ PLATFORM_DEBUG_PRINT("\n printing vector contents : vec_len = %d \n", pm_len);
+ my_pm = pm;
+ for(i=0;i<pm_len ; i++)
+ {
+ PLATFORM_DEBUG_PRINT(" %d ,",my_pm->ipv4_address);
+ my_pm++;
+ }
+ PLATFORM_DEBUG_PRINT(" pm =%p , my_pm = %p\n", pm, my_pm);
+
+ PLATFORM_DEBUG_PRINT("\n adding 6 entries in vector 16-21\n ");
+ len = 6;
+ vec_add2(pm, my_pm, len);
+ PLATFORM_DEBUG_PRINT(" pm =%p , my_pm = %p\n", pm, my_pm);
+ for(i=0;i<len;i++) {
+ my_pm->ipv4_address = 16+i;
+ my_pm++;
+ }
+
+ PLATFORM_DEBUG_PRINT(" pm =%p , my_pm = %p\n", pm, my_pm);
+ pm_len = vec_len(pm);
+ PLATFORM_DEBUG_PRINT("\n printing vector contents : vec_len = %d \n", pm_len);
+ my_pm = pm;
+ for(i=0;i<pm_len ; i++)
+ {
+ PLATFORM_DEBUG_PRINT(" %d ,",my_pm->ipv4_address);
+ my_pm++;
+ }
+
+ PLATFORM_DEBUG_PRINT(" pm =%p , my_pm = %p\n", pm, my_pm);
+ PLATFORM_DEBUG_PRINT("\nDeleting 7 entries starting from entry value=8\n");
+ pm_len = vec_len(pm);
+ my_pm = pm;
+ PLATFORM_DEBUG_PRINT(" pm_len =%d\n", pm_len);
+ for(i=0;i<pm_len;i++)
+ {
+ if(my_pm->ipv4_address == 8){
+ PLATFORM_DEBUG_PRINT("\n match found brraeaking..\n");
+ break;
+ }
+ my_pm++;
+ }
+
+ PLATFORM_DEBUG_PRINT(" pm =%p , my_pm = %p i= %d\n", pm, my_pm, i);
+// vec_delete(pm, 7, my_pm);
+ vec_delete(pm, 7, i);
+ PLATFORM_DEBUG_PRINT(" pm =%p , my_pm = %p\n", pm, my_pm);
+
+ PLATFORM_DEBUG_PRINT(" printing entries aftr deletion from 8-14\n");
+ pm_len = vec_len(pm);
+ PLATFORM_DEBUG_PRINT("\n printing vector contents : vec_len = %d \n", pm_len);
+ my_pm = pm;
+ for(i=0;i<pm_len ; i++)
+ {
+ PLATFORM_DEBUG_PRINT(" %d ,",my_pm->ipv4_address);
+ my_pm++;
+ }
+
+
+ PLATFORM_DEBUG_PRINT(" pm =%p , my_pm = %p\n", pm, my_pm);
+
+ PLATFORM_DEBUG_PRINT("\nadding deleted items again 8-14\n");
+ len =7;
+ vec_add2(pm, my_pm, len);
+
+ PLATFORM_DEBUG_PRINT(" pm =%p , my_pm = %p\n", pm, my_pm);
+ for(i=0;i<len;i++) {
+ my_pm->ipv4_address = 8+i;
+ my_pm++;
+ }
+
+ PLATFORM_DEBUG_PRINT(" pm =%p , my_pm = %p\n", pm, my_pm);
+ pm_len = vec_len(pm);
+ PLATFORM_DEBUG_PRINT("\n printing vector contents : vec_len = %d \n", pm_len);
+ my_pm = pm;
+ for(i=0;i<pm_len ; i++)
+ {
+ PLATFORM_DEBUG_PRINT(" %d ,",my_pm->ipv4_address);
+ my_pm++;
+ }
+ PLATFORM_DEBUG_PRINT(" pm =%p , my_pm = %p\n", pm, my_pm);
+ PLATFORM_DEBUG_PRINT("\n");
+}
+
+
+void uidb_mapping_dump_timeout() {
+
+ u32 i;
+
+ PLATFORM_DEBUG_PRINT("\nCGSE uidb mapping table \n");
+ for(i = 0;i < 30;i++) {
+ PLATFORM_DEBUG_PRINT("%d ",*(cgse_uidb_index_cgse_id_mapping_ptr + i));
+ }
+
+}
+
+void nat64_debug_dump_info(u32 debug_value)
+{
+
+ switch(debug_value) {
+
+ case 1 :
+ bib_add_v6_entry1();
+ break;
+
+ case 2 :
+ bib_add_v6_entry2();
+ break;
+
+ case 3 :
+ bib_add_v6_entry1_new();
+ break;
+
+ case 4 :
+ bib_add_v6_entry1_new_static();
+ break;
+
+ case 5 :
+ bib_add_v6_entry3();
+ break;
+
+ case 6 :
+ bib_add_v6_entry_new2();
+ break;
+
+ case 7 :
+ nat64_fill_table_entry();
+ break;
+
+ case 10 :
+ nat64_db_dump_main();
+ break;
+
+ case 11 :
+ nat64_db_dump_user();
+ break;
+
+ case 12 :
+ nat64_db_dump_session();
+ break;
+
+ case 13 :
+ nat64_dump_table();
+ break;
+
+ case 14 :
+ bib_del_v6_entry1_static();
+ break;
+
+ case 15 :
+ nat64_debug_addr_pool_add_del();
+ break;
+
+ case 16 :
+ nat64_db_dump_timeout(0);
+ break;
+
+ case 17 :
+ uidb_mapping_dump_timeout();
+ break;
+
+ default : break;
+ }
+}
+
+
+void cnat_debug_flags_set (spp_api_cnat_p2mp_debug_request_t *mp)
+{
+ u32 debug_variable = spp_net_to_host_byte_order_32(&mp->param[0]);
+ u32 debug_value = spp_net_to_host_byte_order_32(&mp->param[1]);
+
+ cnat_key_t t_key;
+
+ switch (debug_variable) {
+
+ case CNAT_DEBUG_FLAG_UDP_INSIDE_CHECKSUM_DISABLE:
+ udp_inside_checksum_disable = debug_value;
+ PLATFORM_DEBUG_PRINT("\nudp_inside_checksum_disable set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_UDP_OUTSIDE_CHECKSUM_DISABLE:
+ udp_outside_checksum_disable = debug_value;
+ PLATFORM_DEBUG_PRINT("\nudp_outside_checksum_disable set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_UDP_OUTSIDE_PKT_DUMP_ENABLE:
+ udp_outside_packet_dump_enable = debug_value;
+ PLATFORM_DEBUG_PRINT("\nudp_outside_packet_dump_enable set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_UDP_INSIDE_PKT_DUMP_ENABLE:
+ udp_inside_packet_dump_enable = debug_value;
+ PLATFORM_DEBUG_PRINT("\nudp_inside_packet_dump_enable set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_ICMP_PKT_DUMP_ENABLE:
+ icmp_debug_flag = debug_value;
+ PLATFORM_DEBUG_PRINT("\nicmp_debug_flag set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_FRAG_PKT_DUMP_ENABLE:
+ frag_debug_flag = debug_value;
+ PLATFORM_DEBUG_PRINT("\nfrag_debug_flag set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_XLAT_CONFIG_DEBUG_ENABLE:
+ xlat_config_debug_level = debug_value;
+ PLATFORM_DEBUG_PRINT("\nxlat_config_debug_level set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_NAT64_CONFIG_DEBUG_ENABLE:
+ nat64_config_debug_level = debug_value;
+ PLATFORM_DEBUG_PRINT("\nnat64_config_debug_level set to %d\n", debug_value);
+ nat64_debug_dump_info(debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_NAT64_DATA_PATH_DEBUG_ENABLE:
+ nat64_data_path_debug_level = debug_value;
+ PLATFORM_DEBUG_PRINT("\nnat64_data_path_debug_level set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_DSLITE_CONFIG_DEBUG_ENABLE:
+ ds_lite_config_debug_level = debug_value;
+ PLATFORM_DEBUG_PRINT("\nds_lite_config_debug_level set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_XLAT_DATA_PATH_DEBUG_ENABLE:
+ xlat_data_path_debug_level = debug_value;
+ PLATFORM_DEBUG_PRINT("\nxlat_data_path_debug_level set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_CONFIG_DEBUG_ENABLE:
+ config_debug_level = debug_value;
+
+ PLATFORM_DEBUG_PRINT("\nconfig_debug_level set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_CONFIG_PPTP_ENABLE:
+ cnat_pptp_debug_flag = debug_value;
+
+ if(debug_value == 0) {
+ pptp_dump_counters();
+ }
+
+ PLATFORM_DEBUG_PRINT("\ncnat_pptp_debug_level set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_CONFIG_PCP_ENABLE:
+ cnat_pcp_debug_flag = debug_value;
+
+ if(debug_value == 0) {
+ pcp_dump_counters();
+ }
+ PLATFORM_DEBUG_PRINT("\ncnat_pcp_debug_level set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_GLOBAL_DEBUG_ALL_ENABLE:
+ global_debug_flag = debug_value;
+ PLATFORM_DEBUG_PRINT("\nglobal_debug_flag set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_SUMMARY_STATS_DEBUG_ENABLE:
+ summary_stats_debug_flag = debug_value;
+ PLATFORM_DEBUG_PRINT("\nsummary_stats_debug_flag set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_SHOW_DEBUG_ENABLE:
+ show_debug_level = debug_value;
+ PLATFORM_DEBUG_PRINT("\nshow_debug_level set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_TCP_LOGGING_ENABLE:
+ tcp_debug_logging_enable_disable(debug_value);
+ break;
+ case CNAT_DEBUG_FLAG_V6RD_DATA_PATH_DEBUG_ENABLE:
+ v6rd_data_path_debug_level = debug_value;
+ PLATFORM_DEBUG_PRINT("\nv6rd_data_path_debug_level set to %d\n", debug_value);
+ break;
+ case CNAT_DEBUG_FLAG_V6RD_CONFIG_DEBUG_ENABLE:
+ v6rd_config_debug_level = debug_value;
+ PLATFORM_DEBUG_PRINT("\nv6rd_config_debug_level set to %d\n", debug_value);
+ break;
+ case CNAT_DEBUG_FLAG_V6RD_DEFRAG_DEBUG_ENABLE:
+ /* set debug atleast to 1, so that critical errors are always
+ * enabled
+ */
+ v6rd_defrag_debug_level = debug_value ? debug_value : 1;
+ PLATFORM_DEBUG_PRINT("\nv6rd_config_debug_level set to %d\n", debug_value);
+ break;
+
+
+ case CNAT_DEBUG_SET_STATIC_PORT_RANGE:
+ PLATFORM_DEBUG_PRINT("\nChange Static Port Range from %d --> %d\n",
+ cnat_static_port_range, debug_value);
+ cnat_static_port_range = debug_value;
+ break;
+
+ case CNAT_DEBUG_FLAG_DSLITE_DP_ENABLE:
+ PLATFORM_DEBUG_PRINT("\n Changing dslite debug flag from %d --> %d\n",
+ dslite_debug_level, debug_value);
+ dslite_debug_level = debug_value;
+ break;
+
+ case CNAT_DEBUG_FLAG_NFV9_LOGGING_DUMP_ENABLE:
+ nfv9_logging_debug_flag = debug_value;
+ PLATFORM_DEBUG_PRINT("\nnfv9_logging_debug_flag set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_SYSLOG_LOGGING_DUMP_ENABLE:
+ syslog_debug_flag = debug_value;
+ PLATFORM_DEBUG_PRINT("\nsyslog_debug_flag set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_MAPE_CONFIG_DEBUG_ENABLE:
+ mape_config_debug_level = debug_value;
+ PLATFORM_DEBUG_PRINT("\nmape_config_debug_level set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAG_MAPE_DATA_PATH_DEBUG_ENABLE:
+ mape_data_path_debug_level = debug_value;
+ PLATFORM_DEBUG_PRINT("\nmape_data_path_debug_level set to %d\n", debug_value);
+ break;
+
+ case CNAT_DEBUG_FLAGS_DUMP:
+ default:
+ {
+ PLATFORM_DEBUG_PRINT("\nCurrent values of Debug Variables\n");
+ PLATFORM_DEBUG_PRINT("\nTo modify an item chose its index and provide the value\n");
+ PLATFORM_DEBUG_PRINT("\n%d: udp_inside_checksum_disable %d\n",
+ CNAT_DEBUG_FLAG_UDP_INSIDE_CHECKSUM_DISABLE,
+ udp_inside_checksum_disable);
+ PLATFORM_DEBUG_PRINT("%d: udp_outside_checksum_disable %d\n",
+ CNAT_DEBUG_FLAG_UDP_OUTSIDE_CHECKSUM_DISABLE,
+ udp_outside_checksum_disable);
+ PLATFORM_DEBUG_PRINT("%d: udp_inside_packet_dump_enable %d\n",
+ CNAT_DEBUG_FLAG_UDP_OUTSIDE_PKT_DUMP_ENABLE,
+ udp_inside_packet_dump_enable);
+ PLATFORM_DEBUG_PRINT("%d: udp_outside_packet_dump_enable %d\n",
+ CNAT_DEBUG_FLAG_UDP_INSIDE_PKT_DUMP_ENABLE,
+ udp_outside_packet_dump_enable);
+ PLATFORM_DEBUG_PRINT("%d: icmp_debug_flag %d\n",
+ CNAT_DEBUG_FLAG_ICMP_PKT_DUMP_ENABLE,
+ icmp_debug_flag);
+ PLATFORM_DEBUG_PRINT("%d: frag_debug_flag %d\n",
+ CNAT_DEBUG_FLAG_FRAG_PKT_DUMP_ENABLE,
+ frag_debug_flag);
+ PLATFORM_DEBUG_PRINT("%d: config_debug_level %d\n",
+ CNAT_DEBUG_FLAG_CONFIG_DEBUG_ENABLE,
+ config_debug_level);
+ PLATFORM_DEBUG_PRINT("%d: global_debug_flag %d\n",
+ CNAT_DEBUG_FLAG_GLOBAL_DEBUG_ALL_ENABLE,
+ global_debug_flag);
+ PLATFORM_DEBUG_PRINT("%d: summary_stats_debug_flag %d\n",
+ CNAT_DEBUG_FLAG_SUMMARY_STATS_DEBUG_ENABLE,
+ summary_stats_debug_flag);
+ PLATFORM_DEBUG_PRINT("%d: show_debug_level %d\n",
+ CNAT_DEBUG_FLAG_SHOW_DEBUG_ENABLE,
+ show_debug_level);
+ PLATFORM_DEBUG_PRINT("%d: xlat_config_debug_level %d\n",
+ CNAT_DEBUG_FLAG_XLAT_CONFIG_DEBUG_ENABLE,
+ xlat_config_debug_level);
+ PLATFORM_DEBUG_PRINT("%d: xlat_data_path_debug_level %d\n",
+ CNAT_DEBUG_FLAG_XLAT_DATA_PATH_DEBUG_ENABLE,
+ xlat_data_path_debug_level);
+ PLATFORM_DEBUG_PRINT("%d: tcp_logging_enable_flag %d\n",
+ CNAT_DEBUG_FLAG_TCP_LOGGING_ENABLE,
+ tcp_logging_enable_flag);
+ PLATFORM_DEBUG_PRINT(" tcp_logging_enable_options DISABLE %d, ENABLE %d, PKT_DUMP %d, SUMMARY_DUMP %d\n",
+ TCP_LOGGING_DISABLE, TCP_LOGGING_ENABLE,
+ TCP_LOGGING_PACKET_DUMP, TCP_LOGGING_SUMMARY_DUMP);
+ PLATFORM_DEBUG_PRINT("%d: nfv9_logging_debug_flag %d\n",
+ CNAT_DEBUG_FLAG_NFV9_LOGGING_DUMP_ENABLE,
+ nfv9_logging_debug_flag);
+ PLATFORM_DEBUG_PRINT("%d: syslog_debug_flag %d\n",
+ CNAT_DEBUG_FLAG_SYSLOG_LOGGING_DUMP_ENABLE,
+ syslog_debug_flag);
+ PLATFORM_DEBUG_PRINT("%d: cnat_static_port_range %d\n",
+ CNAT_DEBUG_SET_STATIC_PORT_RANGE,
+ cnat_static_port_range);
+ PLATFORM_DEBUG_PRINT("%d: v6rd_data_path_debug_level %d\n",
+ CNAT_DEBUG_FLAG_V6RD_DATA_PATH_DEBUG_ENABLE,
+ v6rd_data_path_debug_level);
+ PLATFORM_DEBUG_PRINT("%d: v6rd_config_debug_level %d\n",
+ CNAT_DEBUG_FLAG_V6RD_CONFIG_DEBUG_ENABLE,
+ v6rd_config_debug_level);
+ PLATFORM_DEBUG_PRINT("%d: v6rd_defrag_debug_level %d\n",
+ CNAT_DEBUG_FLAG_V6RD_DEFRAG_DEBUG_ENABLE,
+ v6rd_defrag_debug_level);
+ PLATFORM_DEBUG_PRINT("%d: nat64_stful_debug %d\n",
+ CNAT_DEBUG_FLAG_NAT64_CONFIG_DEBUG_ENABLE,
+ nat64_config_debug_level);
+ PLATFORM_DEBUG_PRINT("%d: nat64_data_path_debug_level %d\n",
+ CNAT_DEBUG_FLAG_NAT64_DATA_PATH_DEBUG_ENABLE,
+ nat64_data_path_debug_level);
+ PLATFORM_DEBUG_PRINT("%d: dslite_debug_level %d\n",
+ CNAT_DEBUG_FLAG_DSLITE_DP_ENABLE,
+ dslite_debug_level);
+ PLATFORM_DEBUG_PRINT("%d: ds_lite_config_debug_level %d\n",
+ CNAT_DEBUG_FLAG_DSLITE_CONFIG_DEBUG_ENABLE,
+ ds_lite_config_debug_level);
+ PLATFORM_DEBUG_PRINT("%d: mape_config_debug_level %d\n",
+ CNAT_DEBUG_FLAG_MAPE_CONFIG_DEBUG_ENABLE,
+ mape_config_debug_level);
+ PLATFORM_DEBUG_PRINT("%d: mape_data_path_debug_level %d\n",
+ CNAT_DEBUG_FLAG_MAPE_DATA_PATH_DEBUG_ENABLE,
+ mape_data_path_debug_level);
+ }
+ break;
+ }
+}
+
+extern void dump_cnat_frag_stats(void);
+
+void spp_api_cnat_p2mp_debug_request_t_handler
+(spp_api_cnat_p2mp_debug_request_t *mp)
+{
+ u16 command_type;
+
+/*
+ if (mp->core_num != my_instance_number) {
+ mp->rc = CNAT_NOT_THIS_CORE;
+ return;
+ }
+*/
+
+ command_type = spp_net_to_host_byte_order_16(&mp->dump_type);
+ PLATFORM_DEBUG_PRINT("-->> Core%d: Received debug msg ... cmd type: %d\n",
+ my_instance_number, command_type);
+
+ switch (command_type) {
+
+ case CNAT_DEBUG_GENERIC_COMMAND_DUMP_POLICY:
+ PLATFORM_DEBUG_PRINT("Core%d: policy\n", my_instance_number);
+ cnat_db_dump_policy();
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_DUMP_MAIN_DB:
+ PLATFORM_DEBUG_PRINT("Core%d: Main db\n", my_instance_number);
+ cnat_db_dump_main();
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_DUMP_MAIN_DB_SUMMARY:
+ PLATFORM_DEBUG_PRINT("Core%d: Main db Summary\n", my_instance_number);
+ cnat_db_dump_main_summary();
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_DUMP_USER_DB:
+ PLATFORM_DEBUG_PRINT("Core%d: User db\n", my_instance_number);
+ cnat_db_dump_user();
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_DUMP_USER_DB_SUMMARY:
+ PLATFORM_DEBUG_PRINT("Core%d: User db Summary\n", my_instance_number);
+ cnat_db_dump_user_summary();
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_DUMP_HASHES_DB:
+ PLATFORM_DEBUG_PRINT("Core%d: Hashes db\n", my_instance_number);
+ cnat_db_dump_hashes();
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_DUMP_VRF_MAP:
+ PLATFORM_DEBUG_PRINT("Core%d: Vrf map \n", my_instance_number);
+ cnat_db_dump_portmaps();
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_DUMP_SUMMARY_DB:
+ PLATFORM_DEBUG_PRINT("Core%d: dump summary DB \n", my_instance_number);
+ cnat_db_summary();
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_DUMP_STATS:
+ PLATFORM_DEBUG_PRINT("Core%d: dump stats \n", my_instance_number);
+ spp_node_print_stats(1, NULL);
+ break;
+
+ /* Currently does same as clear node ctr, may change */
+ case CNAT_DEBUG_GENERIC_COMMAND_CLEAR_STATS:
+ PLATFORM_DEBUG_PRINT("Core%d: clear stats \n", my_instance_number);
+ spp_node_clear_stats();
+ break;
+
+ case CNAT_DEBUG_SPP_LOG:
+ PLATFORM_DEBUG_PRINT("Core%d: SPP LOG \n", my_instance_number);
+ spp_log_p2mp_req(mp);
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_DUMP_NODE_COUNTER:
+ PLATFORM_DEBUG_PRINT("Core%d: NODE Counter dump \n", my_instance_number);
+ spp_node_print_counters();
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_CLEAR_NODE_COUNTER:
+ PLATFORM_DEBUG_PRINT("Core%d: clear node counter \n", my_instance_number);
+ spp_node_clear_stats();
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_DUMP_CNAT_COUNTER:
+ PLATFORM_DEBUG_PRINT("Core%d: CNAT Counter dump \n", my_instance_number);
+ spp_node_print_cnat_counters();
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_DUMP_VA:
+ PLATFORM_DEBUG_PRINT("Core%d: VA dump \n", my_instance_number);
+ {
+ int argc = 1;
+ u32 arg[2] = {spp_net_to_host_byte_order_32(&mp->param[0]), 0};
+
+ cnat_va_dump(argc, arg);
+ }
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_SHOW_CONFIG:
+ PLATFORM_DEBUG_PRINT("Core%d: Show config dump \n", my_instance_number);
+ {
+ int argc = 0;
+ unsigned long arg[3];
+
+ if (arg[argc++] = spp_net_to_host_byte_order_32(&mp->param[0])) {
+ if (arg[argc++] = spp_net_to_host_byte_order_32(&mp->param[1])) {
+ ;
+ } else {
+ argc--;
+ }
+ }
+
+ cnat_show_cdb_command_v2(argc, arg);
+/*
+ xlat_show_config();
+ cnat_alg_show();
+*/
+ v6rd_show_config();
+ dslite_show_config();
+ nat64_dump_table();
+ mape_show_config();
+ }
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_SHOW_NFV9:
+ PLATFORM_DEBUG_PRINT("Core%d: NFv9 dump \n", my_instance_number);
+ #if 0 /* Currently not calling this */
+ cnat_nfv9_show_cmd();
+ #endif
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_SHOW_IVRF:
+ PLATFORM_DEBUG_PRINT("Core%d: IVRF dump \n", my_instance_number);
+ {
+ int argc = 0;
+ unsigned long arg[3];
+
+ if (arg[argc++] = spp_net_to_host_byte_order_32(&mp->param[0])) {
+ if (arg[argc++] = spp_net_to_host_byte_order_32(&mp->param[1])) {
+ if (arg[argc++] = spp_net_to_host_byte_order_32(&mp->param[2])) {
+ ;
+ } else {
+ argc--;
+ }
+ } else {
+ argc--;
+ }
+ }
+
+
+ PLATFORM_DEBUG_PRINT("VRF: %d \n", spp_net_to_host_byte_order_32(&mp->param[0]));
+ PLATFORM_DEBUG_PRINT("2nd arg: %d \n",
+ spp_net_to_host_byte_order_32(&mp->param[1]));
+
+ cnat_show_ivrf_command_v2(argc, arg);
+ }
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_SHOW_OVRF:
+ PLATFORM_DEBUG_PRINT("Core%d: OVRF dump \n", my_instance_number);
+ {
+ int argc = 0;
+ unsigned long arg[3];
+ if (arg[argc++] = spp_net_to_host_byte_order_32(&mp->param[0])) {
+ if (arg[argc++] = spp_net_to_host_byte_order_32(&mp->param[1])) {
+ if (arg[argc++] = spp_net_to_host_byte_order_32(&mp->param[2])) {
+ ;
+ } else {
+ argc--;
+ }
+ } else {
+ argc--;
+ }
+ }
+
+ PLATFORM_DEBUG_PRINT("VRF: %d \n", spp_net_to_host_byte_order_32(&mp->param[0]));
+ PLATFORM_DEBUG_PRINT("2nd arg: %d \n",
+ spp_net_to_host_byte_order_32(&mp->param[1]));
+
+ cnat_show_ovrf_command_v2(argc, arg);
+ }
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_DEBUG_OPTIONS:
+ PLATFORM_DEBUG_PRINT("Core%d: Debug option dump \n", my_instance_number);
+ {
+ global_pd_dbg_lvl = 0;
+ global_pi_dbg_lvl = 0;
+ global_l2_dbg_lvl = 0;
+
+ global_pd_dbg_lvl =
+ spp_net_to_host_byte_order_32(&mp->param[0]);
+ global_pi_dbg_lvl =
+ spp_net_to_host_byte_order_32(&mp->param[1]);
+ global_l2_dbg_lvl =
+ spp_net_to_host_byte_order_32(&mp->param[2]);
+
+ PLATFORM_DEBUG_PRINT("global_pd_dbg_lvl: %d, global_pi_dbg_lvl: %d, global_l2_dbg_lvl: %d\n",
+ global_pd_dbg_lvl, global_pi_dbg_lvl, global_l2_dbg_lvl);
+ }
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_DUMP_DEBUG_LEVELS:
+ PLATFORM_DEBUG_PRINT("Core%d: PD Debug level: %d \n", my_instance_number, global_pd_dbg_lvl);
+ PLATFORM_DEBUG_PRINT("Core%d: PI Debug level: %d \n", my_instance_number, global_pi_dbg_lvl);
+ PLATFORM_DEBUG_PRINT("Core%d: L2 Debug level: %d \n", my_instance_number, global_l2_dbg_lvl);
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_DEBUG_FLAGS:
+ PLATFORM_DEBUG_PRINT("Core%d: Debug flags \n", my_instance_number);
+ cnat_debug_flags_set(mp);
+ break;
+
+ case CNAT_READ_TEMP_SENSORS:
+ PLATFORM_INIT_TEMP_SENSORS();
+ PLATFORM_READ_CPU_SENSORS(TEMPERATURE_SENSOR_TEST_MODE);
+ break;
+
+ case CNAT_BLOCK_OCTEON_SENSOR_READ:
+
+ PLATFORM_SET_TEMP_READ_BLOCK(temperature_read_blocked , mp);
+#ifdef TARGET_RODDICK
+ temperature_read_blocked =
+ spp_net_to_host_byte_order_32(&mp->param[0]);
+#endif
+ break;
+
+ case CNAT_DEBUG_TIMEOUT_DB_SUMMARY:
+ cnat_db_dump_timeout();
+ break;
+
+ /* This option has to be removed later */
+ case CNAT_DEBUG_SET_BULK_SIZE:
+ PLATFORM_DEBUG_PRINT("\nSetting bulk size to %d\n",
+ spp_net_to_host_byte_order_32(&mp->param[0]));
+ set_bulk_size_to_all_vrfs(
+ spp_net_to_host_byte_order_32(&mp->param[0]));
+ break;
+
+ case CNAT_DEBUG_SHOW_BULK_STAT:
+ show_bulk_port_stats();
+ break;
+
+ case CNAT_DEBUG_CLEAR_BULK_STAT:
+ clear_bulk_port_stats();
+ break;
+
+ case CNAT_DEBUG_SHOW_BULK_ALLOC:
+ {
+ u16 in_vrfid = spp_net_to_host_byte_order_32(&mp->param[0]);
+ u32 inside_ip = spp_net_to_host_byte_order_32(&mp->param[1]);
+ show_bulk_port_allocation(in_vrfid, inside_ip);
+ }
+ break;
+
+ case CNAT_DEBUG_NAT44_IN2OUT_FRAG_STATS:
+ dump_cnat_frag_stats();
+ break;
+
+ default:
+ mp->rc = CNAT_ERR_INVALID_MSG_ID;
+ break;
+ }
+
+ mp->rc = CNAT_SUCCESS;
+ return;
+}
+
+
+void spp_api_cnat_v4_debug_in2out_private_addr_t_handler
+(spp_api_cnat_v4_debug_in2out_private_addr_t *mp)
+{
+ u16 i_vrf;
+ u32 debug_flag;
+ u32 start_addr, end_addr;
+
+
+ start_addr =
+ spp_net_to_host_byte_order_32(&mp->start_addr);
+ end_addr =
+ spp_net_to_host_byte_order_32(&mp->end_addr);
+ i_vrf =
+ spp_net_to_host_byte_order_16(&mp->i_vrf);
+ debug_flag =
+ spp_net_to_host_byte_order_32(&mp->debug_flag);
+
+ if ((i_vrf > MAX_UIDX) || (start_addr > end_addr) ||
+ ((debug_flag != CNAT_DEBUG_NONE) &&
+ ((debug_flag & CNAT_DEBUG_ALL) == CNAT_DEBUG_NONE))) {
+ mp->rc = CNAT_ERR_PARSER;
+ PLATFORM_DEBUG_PRINT("invalid debug ivrf 0x%x flag 0x%x "
+ "start addr 0x%x end addr 0x%x\n",
+ i_vrf, debug_flag,
+ start_addr, end_addr);
+ return;
+ }
+
+ PLATFORM_DEBUG_PRINT("debug ivrf 0x%x flag 0x%x "
+ "start addr 0x%x end addr 0x%x\n",
+ i_vrf, debug_flag,
+ start_addr, end_addr);
+
+ mp->rc = CNAT_SUCCESS;
+ debug_i_vrf = i_vrf;
+ debug_i_flag = debug_flag;
+ debug_i_addr_start = start_addr;
+ debug_i_addr_end = end_addr;
+
+}
+
+void spp_api_cnat_v4_debug_out2in_public_addr_t_handler
+(spp_api_cnat_v4_debug_out2in_public_addr_t *mp)
+{
+ u16 o_vrf;
+ u32 debug_flag;
+ u32 start_addr, end_addr;
+
+ start_addr =
+ spp_net_to_host_byte_order_32(&mp->start_addr);
+ end_addr =
+ spp_net_to_host_byte_order_32(&mp->end_addr);
+ o_vrf =
+ spp_net_to_host_byte_order_16(&mp->o_vrf);
+ debug_flag =
+ spp_net_to_host_byte_order_32(&mp->debug_flag);
+
+ if ((o_vrf > MAX_UIDX) || (start_addr > end_addr) ||
+ ((debug_flag != CNAT_DEBUG_NONE) &&
+ ((debug_flag & CNAT_DEBUG_ALL) == CNAT_DEBUG_NONE))) {
+ mp->rc = CNAT_ERR_PARSER;
+ PLATFORM_DEBUG_PRINT("invalid debug ovrf 0x%x flag 0x%x "
+ "start addr 0x%x end addr 0x%x\n",
+ o_vrf, debug_flag,
+ start_addr, end_addr);
+ return;
+ }
+
+ mp->rc = CNAT_SUCCESS;
+ debug_o_vrf = o_vrf;
+ debug_o_flag = debug_flag;
+ debug_o_addr_start = start_addr;
+ debug_o_addr_end = end_addr;
+
+ PLATFORM_DEBUG_PRINT(" o2i debug currently is not supported\n");
+}
+
+void nat64_reset_session_expiry(nat64_bib_entry_t *db)
+{
+ NAT64_STFUL_DEBUG_PRINT(3, " invoking nat64_clean_bib_db_entry\n " );
+ nat64_clean_bib_db_entry(db);
+ NAT64_STFUL_DEBUG_PRINT(3, "done with clean_bib_db_entry\n " );
+}
+
+void spp_api_nat64_clear_db_request_t_handler
+(spp_api_nat64_clear_db_request_t *mp)
+{
+ u16 port, proto, flag;
+ u32 index;
+ u32 i;
+ nat64_bib_entry_t* db;
+ nat64_v6_key_t ki;
+ nat64_table_entry_t *my_nat64_table_db_ptr;
+ u16 nat64_id;
+
+ NAT64_STFUL_FUNC_ENTER;
+ NAT64_STFUL_DEBUG_DUMP_MSG(mp);
+
+ nat64_id = spp_net_to_host_byte_order_16(&mp->nat64_id);
+ my_nat64_table_db_ptr = nat64_table_ptr + nat64_id;
+
+ port = spp_net_to_host_byte_order_16(&mp->port_num);
+ proto = mp->protocol;
+
+ ki.vrf = nat64_id;
+ ki.vrf |= ((u16)proto << CNAT_PRO_SHIFT);
+
+ for(i =0 ; i< 4 ; i++)
+ ki.ipv6[i] = spp_net_to_host_byte_order_32(&mp->ip_addr[i]);
+
+ ki.port = port;
+
+ flag = mp->flags;
+
+ mp->rc = CNAT_SUCCESS;
+
+ NAT64_STFUL_DEBUG_PRINT(3, "\n Nat64_id = %d, port =%d, \
+ proto =%d, flags=0x%08X",\
+ nat64_id, port, proto, flag);
+
+ NAT64_STFUL_DEBUG_PRINT(3, "\n IPv6 Addr = %08X : %08X: %08X: %08X",\
+ ki.ipv6[0], ki.ipv6[1], ki.ipv6[2], ki.ipv6[3]);
+
+ if (flag == CNAT_DB_CLEAR_SPECIFIC) {
+ NAT64_STFUL_DEBUG_PRINT(3, "\n clear specific \n");
+
+ db = nat64_bib_db_lookup_entry(&ki);
+ if (db == NULL) {
+ NAT64_STFUL_DEBUG_PRINT(3, "\n clear specific - not present\n");
+ mp->rc = CNAT_NOT_FOUND_ANY;
+ return;
+ }
+
+ if( !(db->flags & CNAT_DB_NAT64_FLAG) ||
+ (db->nat64_inst_id != nat64_id))
+ return;
+
+
+ nat64_reset_session_expiry(db);
+ return;
+ }
+
+ pool_header_t *p = pool_header(nat64_bib_db);
+
+ for(index = 0; index < vec_len(nat64_bib_db); index++) {
+
+ /* check is it nat44, if yes skip , do it n nat44 as well */
+
+ if (PREDICT_FALSE(!clib_bitmap_get(p->free_bitmap, index))) {
+ db = nat64_bib_db + index;
+
+ if( !(db->flags & CNAT_DB_NAT64_FLAG) ||
+ (db->nat64_inst_id != nat64_id))
+ continue;
+
+ if (flag == CNAT_DB_CLEAR_ALL) {
+ nat64_reset_session_expiry(db);
+ continue;
+ }
+
+ if (flag & CNAT_DB_CLEAR_ADDR) {
+ if ((db->v6_in_key.ipv6[0] != ki.ipv6[0]) ||
+ (db->v6_in_key.ipv6[1] != ki.ipv6[1]) ||
+ (db->v6_in_key.ipv6[2] != ki.ipv6[2]) ||
+ (db->v6_in_key.ipv6[3] != ki.ipv6[3])){
+ NAT64_STFUL_DEBUG_PRINT(3, "\n%s:%d\n", __FUNCTION__, \
+ __LINE__ );
+ continue;
+ }
+ }
+
+ if (flag & CNAT_DB_CLEAR_PROTO) {
+ if (((db->v6_in_key.vrf & CNAT_PRO_MASK) >> CNAT_PRO_SHIFT)
+ != proto) {
+ NAT64_STFUL_DEBUG_PRINT(3, "\n%s:%d\n", __FUNCTION__, \
+ __LINE__ );
+ continue;
+ }
+ }
+
+ if (flag & CNAT_DB_CLEAR_PORT) {
+ if (db->v6_in_key.port != port) {
+ NAT64_STFUL_DEBUG_PRINT(3, "\n%s:%d\n", __FUNCTION__, \
+ __LINE__ );
+ continue;
+ }
+ }
+
+ NAT64_STFUL_DEBUG_PRINT(3, "\n%s:%d\n", __FUNCTION__, \
+ __LINE__ );
+ /*
+ * Delete if the db entry matches and it is not a
+ */
+ nat64_reset_session_expiry(db);
+ }
+ }
+}
+
+void inline cnat_clear_session_db(cnat_main_db_entry_t *db)
+{
+ if(PREDICT_FALSE(db->nsessions > 1)) {
+ u32 session_index = db->session_head_index;
+ cnat_session_entry_t *sdb;
+ do {
+ sdb = cnat_session_db + session_index;
+ if(PREDICT_FALSE(!sdb)) {
+ //TO DO: Debug msg?
+ break;
+ }
+ sdb->entry_expires = 0;
+ session_index = sdb->main_list.next;
+ } while(session_index != db->session_head_index
+ && db->session_head_index != EMPTY);
+ }
+ return;
+}
+
+#ifdef CGSE_DS_LITE
+extern dslite_table_entry_t dslite_table_array[];
+
+void spp_api_ds_lite_clear_db_request_t_handler
+(spp_api_ds_lite_clear_db_request_t *mp)
+{
+ u16 port, proto, flag;
+ u32 index;
+ u32 i;
+ cnat_main_db_entry_t *db;
+ cnat_user_db_entry_t *udb;
+ dslite_key_t ki;
+ dslite_table_entry_t *my_table_db_ptr;
+ u16 id;
+ u16 i_vrf;
+
+
+ id = spp_net_to_host_byte_order_16(&mp->ds_lite_id);
+ id = DS_LITE_CONFIG_TO_ARRAY_ID(id);
+
+ my_table_db_ptr = &dslite_table_array[id];
+ i_vrf = my_table_db_ptr->i_vrf;
+
+ port = spp_net_to_host_byte_order_16(&mp->port_num);
+ proto = mp->protocol;
+
+ ki.ipv4_key.k.vrf = i_vrf;
+ ki.ipv4_key.k.vrf |= ((u16)proto << CNAT_PRO_SHIFT);
+
+ for(i =0 ; i< 4 ; i++)
+ ki.ipv6[i] = spp_net_to_host_byte_order_32(&mp->ip_addr[i]);
+
+ ki.ipv4_key.k.port = port;
+
+ flag = mp->flags;
+
+ mp->rc = CNAT_SUCCESS;
+
+ DSLITE_PRINTF(3, "\n dslite id = %d, port =%d"
+ "proto =%d, flags=0x%08X",\
+ id, port, proto, flag);
+
+ DSLITE_PRINTF(3, "\n IPv6 Addr = %08X : %08X: %08X: %08X",\
+ ki.ipv6[0], ki.ipv6[1], ki.ipv6[2], ki.ipv6[3]);
+
+ if (flag == CNAT_DB_CLEAR_SPECIFIC) {
+ DSLITE_PRINTF(3, "\n Clear specific NOT supported for DS Lite \n");
+ return;
+ }
+
+ pool_header_t *p = pool_header(cnat_main_db);
+
+ for(index = 0; index < vec_len(cnat_main_db); index++) {
+
+ /* check is it dslite entry, if not skip */
+
+ if (PREDICT_FALSE(!clib_bitmap_get(p->free_bitmap, index))) {
+ db = cnat_main_db + index;
+
+ if( !(db->flags & CNAT_DB_DSLITE_FLAG) ||
+ ((db->in2out_key.k.vrf & CNAT_VRF_MASK) != i_vrf) ||
+ (db->flags & CNAT_DB_FLAG_STATIC_PORT)) {
+ continue;
+ }
+
+ if (flag == CNAT_DB_CLEAR_ALL) {
+
+ /*
+ * Make the entry time as very old (0), and wait
+ * for a timeout to auto-expire the entry.
+ */
+ db->entry_expires = 0;
+ /* Handle sessions as well.. */
+ cnat_clear_session_db(db);
+ continue;
+ }
+
+ if (flag & CNAT_DB_CLEAR_ADDR) {
+ udb = cnat_user_db + db->user_index;
+ if(PREDICT_FALSE(!udb)) {
+ continue;
+ }
+ if ((udb->ipv6[0] != ki.ipv6[0]) ||
+ (udb->ipv6[1] != ki.ipv6[1]) ||
+ (udb->ipv6[2] != ki.ipv6[2]) ||
+ (udb->ipv6[3] != ki.ipv6[3])) {
+ continue;
+ }
+ }
+
+ if (flag & CNAT_DB_CLEAR_PROTO) {
+ if (((db->in2out_key.k.vrf & CNAT_PRO_MASK) >> CNAT_PRO_SHIFT)
+ != proto) {
+ continue;
+ }
+ }
+
+ if (flag & CNAT_DB_CLEAR_PORT) {
+ if (db->in2out_key.k.port != port) {
+ continue;
+ }
+ }
+
+ /*
+ * Mark for expiry in the next round of DB scan
+ */
+ db->entry_expires = 0;
+ /* Handle sessions as well.. */
+ cnat_clear_session_db(db);
+ }
+ }
+}
+#endif /* #ifdef CGSE_DS_LITE */
+
+void spp_api_cnat_clear_db_request_t_handler
+(spp_api_cnat_clear_db_request_t *mp)
+{
+ u16 i_vrf, port, proto, flag;
+ u32 ip_addr, index;
+ u64 a,b,c;
+ cnat_main_db_entry_t * db;
+ cnat_db_key_bucket_t ki;
+
+#if defined(TARGET_LINUX_UDVR) || defined(CNAT_PG)
+ i_vrf = mp->inside_vrf;
+ ip_addr = mp->ip_addr;
+ port = mp->port_num;
+ proto = mp->protocol;
+#else
+ i_vrf = spp_net_to_host_byte_order_16(&mp->inside_vrf);
+ ip_addr = spp_net_to_host_byte_order_32(&mp->ip_addr);
+ port = spp_net_to_host_byte_order_16(&mp->port_num);
+ proto = spp_net_to_host_byte_order_16(&mp->protocol);
+#endif
+
+
+
+ ki.k.k.vrf = i_vrf;
+ ki.k.k.vrf |= ((u16)proto << CNAT_PRO_SHIFT);
+ ki.k.k.ipv4 = ip_addr;
+ ki.k.k.port = port;
+
+ flag = mp->wildcard;
+
+ mp->rc = CNAT_SUCCESS;
+
+ if (flag == CNAT_DB_CLEAR_SPECIFIC) {
+ CNAT_V4_GET_HASH(ki.k.key64,
+ ki.bucket,
+ CNAT_MAIN_HASH_MASK);
+ index = cnat_in2out_hash[ki.bucket].next;
+ if (PREDICT_TRUE(index == EMPTY)) {
+ mp->rc = CNAT_NOT_FOUND_ANY;
+ return;
+ }
+
+ do {
+ db = cnat_main_db + index;
+
+ /*
+ * Delete if the db entry matches and it is not a
+ * STATIC port entry
+ */
+ if ((db->in2out_key.key64 == ki.k.key64) &&
+ !(db->flags & CNAT_DB_FLAG_STATIC_PORT) &&
+ !(db->flags & CNAT_DB_NAT64_FLAG) &&
+ !(db->flags & CNAT_DB_DSLITE_FLAG)) {
+
+ /*
+ * Make the entry time as very old (0), and wait
+ * for a timeout to auto-expire the entry.
+ */
+ db->entry_expires = 0;
+ /* Handle sessions as well.. */
+ cnat_clear_session_db(db);
+ return;
+ }
+ index = db->in2out_hash.next;
+ } while (index != EMPTY);
+
+ mp->rc = CNAT_NOT_FOUND_ANY;
+ return;
+ }
+
+ pool_header_t *p = vec_header(cnat_main_db, sizeof(pool_header_t));
+
+ for(index = 0; index < vec_len(cnat_main_db); index++) {
+
+ if (PREDICT_TRUE(!clib_bitmap_get(p->free_bitmap, index))) {
+ db = cnat_main_db + index;
+
+ if(PREDICT_FALSE(db->flags & CNAT_DB_NAT64_FLAG)) {
+ continue;
+ }
+
+ if(PREDICT_FALSE(db->flags & CNAT_DB_DSLITE_FLAG)) {
+ continue;
+ }
+
+ if (flag == CNAT_DB_CLEAR_ALL) {
+ if (!(db->flags & CNAT_DB_FLAG_STATIC_PORT)) {
+ db->entry_expires = 0;
+ /* Handle sessions as well.. */
+ cnat_clear_session_db(db);
+ }
+ continue;
+ }
+
+ if (flag & CNAT_DB_CLEAR_VRF) {
+ if (((db->in2out_key.k.vrf & CNAT_VRF_MASK) != i_vrf)) {
+ continue;
+ }
+ }
+
+ if (flag & CNAT_DB_CLEAR_ADDR) {
+ if ((db->in2out_key.k.ipv4 != ip_addr)) {
+ continue;
+ }
+ }
+
+ if (flag & CNAT_DB_CLEAR_PROTO) {
+ if (((db->in2out_key.k.vrf & CNAT_PRO_MASK) >> CNAT_PRO_SHIFT)
+ != proto) {
+ continue;
+ }
+ }
+
+ if (flag & CNAT_DB_CLEAR_PORT) {
+ if (db->in2out_key.k.port != port) {
+ continue;
+ }
+ }
+
+ /*
+ * Delete if the db entry matches and it is not a
+ * STATIC port entry
+ */
+ if (!(db->flags & CNAT_DB_FLAG_STATIC_PORT)) {
+ db->entry_expires = 0;
+ /* Handle sessions as well.. */
+ cnat_clear_session_db(db);
+ }
+ }
+ }
+}
+
+void
+spp_api_cnat_generic_command_debug (cnat_generic_command_resp *mp_resp)
+{
+#ifdef SHOW_DEBUG
+ u32 i, j;
+
+ i = spp_net_to_host_byte_order_32(&(mp_resp->num_bytes));
+
+ PLATFORM_DEBUG_PRINT("\nNum_Bytes %d\n", i);
+
+ for (j = 0; j < i; j++) {
+ PLATFORM_DEBUG_PRINT("0x%02X ", mp_resp->raw_data[j]);
+ if ((j % 16) == 15) {
+ PLATFORM_DEBUG_PRINT("\n");
+ }
+ }
+#endif
+}
+
+/*
+ * The following commands implements command to dump the
+ * user-db information
+ * port-map information
+ * for a give user source IP address
+ *
+ * The format of the output is:
+ * Word 0: Address of udb
+ * Word 1: udb->translation_list_head_index
+ * Word 2:
+ * Bytes 0..1: udb->ntranslations
+ * Bytes 2..2: udb->icmp_msg_coung
+ * Bytes 3..3: udb->unused
+ * Word 3: udb->portmap_index
+ * Word 4: udb->key.k.ipv4
+ * Word 5:
+ * Bytes 0..1: udb->key.k.port = 0
+ * Bytes 2..3: udb->key.k.vrf
+ * Word 6: udb->user_hash
+ * Word 7: Address of my_pm
+ * Word 8: my_pm->status
+ * Word 9: my_pm->inuse
+ * Word A: my_pm->delete_time
+ * Word B: my_pm->ipv4_address
+ */
+void spp_api_cnat_generic_command_user_db_pm
+(spp_api_cnat_generic_command_request_t *mp)
+{
+ u32 i;
+ cnat_db_key_bucket_t u_ki;
+ u16 my_vrfmap_index;
+ u32 *result_array;
+ cnat_generic_command_resp *mp_resp;
+ cnat_user_db_entry_t *udb;
+ cnat_user_db_entry_t *mp_udb;
+ cnat_vrfmap_t *my_vrfmap;
+ cnat_portmap_v2_t *pm;
+ cnat_portmap_v2_t *my_pm;
+
+ /*
+ * Request structure is used to send the response
+ */
+ mp_resp = (cnat_generic_command_resp *) mp;
+
+ u_ki.k.k.vrf = spp_net_to_host_byte_order_32(&mp->params[1]);
+ u_ki.k.k.ipv4 = spp_net_to_host_byte_order_32(&mp->params[2]);
+ u_ki.k.k.port = 0;
+
+ udb = cnat_user_db_lookup_entry(&u_ki);
+
+ if (!udb) {
+ mp_resp->num_bytes = spp_host_to_net_byte_order_32(0);
+ goto no_udb_found;
+ }
+
+ result_array = (u32 *) (&(mp_resp->raw_data[0]));
+
+ i = 0;
+ result_array[i++] = spp_host_to_net_byte_order_32((u32) udb);
+
+ mp_udb = (cnat_user_db_entry_t *) &(result_array[i]);
+
+ /*
+ * Align the entry to the next 4 byte boundary
+ */
+ i = i + ((sizeof(cnat_user_db_entry_t)+3)/4);
+
+ /*
+ * Fill in the UDB information
+ */
+ mp_udb->translation_list_head_index =
+ spp_host_to_net_byte_order_32(udb->translation_list_head_index);
+ mp_udb->ntranslations =
+ spp_host_to_net_byte_order_16(udb->ntranslations);
+ mp_udb->icmp_msg_count = udb->icmp_msg_count;
+ mp_udb->flags = udb->flags;
+ mp_udb->portmap_index =
+ spp_host_to_net_byte_order_32(udb->portmap_index);
+ mp_udb->key.k.ipv4 =
+ spp_host_to_net_byte_order_32(udb->key.k.ipv4);
+ mp_udb->key.k.port =
+ spp_host_to_net_byte_order_16(udb->key.k.port);
+ mp_udb->key.k.vrf =
+ spp_host_to_net_byte_order_16(udb->key.k.vrf);
+ mp_udb->user_hash.next =
+ spp_host_to_net_byte_order_32(udb->user_hash.next);
+
+ my_vrfmap_index = vrf_map_array[u_ki.k.k.vrf];
+ my_vrfmap = cnat_map_by_vrf + my_vrfmap_index;
+ pm = my_vrfmap->portmap_list;
+ my_pm = pm + udb->portmap_index;
+
+ /*
+ * Fill in the port_map information
+ */
+ result_array[i++] = spp_host_to_net_byte_order_32((u32) my_pm);
+ result_array[i++] = spp_host_to_net_byte_order_32(my_pm->inuse);
+ result_array[i++] = spp_host_to_net_byte_order_32(my_pm->delete_time);
+ result_array[i++] = spp_host_to_net_byte_order_32(my_pm->ipv4_address);
+
+ mp_resp->num_bytes = spp_host_to_net_byte_order_32(i*4);
+
+no_udb_found:
+ spp_api_cnat_generic_command_debug(mp_resp);
+}
+
+/*
+ * The following commands implements command to dump the
+ * DB usage stats for
+ * main-db
+ * user-db
+ * in2out hash
+ * out2in hash
+ *
+ * The format of the output is:
+ * Word 0: Main-DB - Total
+ * Word 1: Main-DB - Active
+ * Word 2: Main-DB - Free
+ * Word 3: User-DB - Total
+ * Word 4: User-DB - Active
+ * Word 5: User-DB - Free
+ * Word 6: Hash In2Out - Size
+ * Word 7: Hash In2Out - Used
+ * Word 8: Hash In2Out - Used Percentage
+ * Word 9: Hash Out2In - Size
+ * Word A: Hash Out2In - Used
+ * Word B: Hash Out2In - Used Percentage
+ */
+void spp_api_cnat_generic_command_db_summary
+(spp_api_cnat_generic_command_request_t *mp)
+{
+ u32 count1, count2, count3;
+ u32 i = 0;
+ u32 k = 0;
+ cnat_generic_command_resp *mp_resp;
+ u32 *result_array;
+
+ /*
+ * Request structure is used to send the response
+ */
+ mp_resp = (cnat_generic_command_resp *) mp;
+ result_array = (u32 *) (&(mp_resp->raw_data[0]));
+
+ /*
+ * Find entries free and used in main-db
+ */
+ count1 = vec_len(cnat_main_db);
+ count2 = db_free_entry(cnat_main_db);
+ count3 = count1 - count2;
+
+ *(result_array + i++) = spp_host_to_net_byte_order_32(count1);
+ *(result_array + i++) = spp_host_to_net_byte_order_32(count3);
+ *(result_array + i++) = spp_host_to_net_byte_order_32(count2);
+
+ /*
+ * Find entries free and used in user-db
+ */
+ count1 = vec_len(cnat_user_db);
+ count2 = db_free_entry(cnat_user_db);
+ count3 = count1 - count2;
+
+ *(result_array + i++) = spp_host_to_net_byte_order_32(count1);
+ *(result_array + i++) = spp_host_to_net_byte_order_32(count3);
+ *(result_array + i++) = spp_host_to_net_byte_order_32(count2);
+
+ /*
+ * Find entries used in in2out and out2in hash tables
+ * and percentage utilization.
+ */
+ count1 = count2 = 0;
+ for (k = 0; k < CNAT_MAIN_HASH_SIZE; k++) {
+ if(cnat_in2out_hash[k].next != ~0) count1++;
+ if(cnat_out2in_hash[k].next != ~0) count2++;
+
+ }
+
+ count3 = count1*100/CNAT_MAIN_HASH_SIZE;
+
+ *(result_array + i++) = spp_host_to_net_byte_order_32(CNAT_MAIN_HASH_SIZE);
+ *(result_array + i++) = spp_host_to_net_byte_order_32(count1);
+ *(result_array + i++) = spp_host_to_net_byte_order_32(count3);
+
+ count3 = count2*100/CNAT_MAIN_HASH_SIZE;
+
+ *(result_array + i++) = spp_host_to_net_byte_order_32(CNAT_MAIN_HASH_SIZE);
+ *(result_array + i++) = spp_host_to_net_byte_order_32(count2);
+ *(result_array + i++) = spp_host_to_net_byte_order_32(count3);
+
+ mp_resp->num_bytes = spp_host_to_net_byte_order_32(i*4);
+
+ spp_api_cnat_generic_command_debug(mp_resp);
+}
+
+/*
+ * The following commands implements generic commands such as:
+ *
+ * Command 1:
+ * Reads num_bytes octets from a start_locn
+ * generic command <core_num> <cmd_type=1> <start_locn> <num_bytes> 0 0 0 0 0
+ *
+ * Command 2:
+ * Writes upto 8 octets from a start_locn
+ * generic command <core_num> <cmd_type=2> <start_locn> <num_bytes> 0 0 0 0 0
+ *
+ * Command 3:
+ * Dump the db summary stats
+ * generic command <core_num> <cmd_type=3>
+ *
+ * Command 4:
+ * Dump the user db entry
+ * generic command <core_num> <cmd_type=4> <vrf_id> <src_ip_addr>
+ *
+ * The following structures are referenced by this command:
+ * typedef struct _spp_api_cnat_generic_command_request {
+ * u16 _spp_msg_id;
+ * u8 rc;
+ * u8 core_num;
+ * u32 params[8];
+ * } spp_api_cnat_generic_command_request_t;
+ *
+ * typedef struct {
+ * u16 spp_msg_id;
+ * u8 rc;
+ * u8 core;
+ * u32 num_bytes;
+ * u8 raw_data[0];
+ * } cnat_generic_command_resp;
+ *
+ */
+void spp_api_cnat_generic_command_request_t_handler
+(spp_api_cnat_generic_command_request_t *mp)
+{
+ cnat_generic_command_resp *resp_ptr;
+ u32 command_type, start_locn, num_bytes;
+
+ command_type = spp_net_to_host_byte_order_32(&mp->params[0]);
+ resp_ptr = (cnat_generic_command_resp *) mp;
+
+ switch (command_type) {
+ case CNAT_DEBUG_GENERIC_COMMAND_READ_MEM:
+ start_locn = spp_net_to_host_byte_order_32(&mp->params[1]);
+ num_bytes = spp_net_to_host_byte_order_32(&mp->params[2]);
+ clib_memcpy(&(resp_ptr->raw_data[0]), (u8 *) start_locn, num_bytes);
+ resp_ptr->num_bytes = spp_host_to_net_byte_order_32(num_bytes);
+
+#ifdef SHOW_DEBUG
+ {
+ u32 i;
+
+ for (i = 0; i < num_bytes; i++) {
+ PLATFORM_DEBUG_PRINT("0x%02X ", resp_ptr->raw_data[i]);
+ if ((i % 16) == 15) {
+ PLATFORM_DEBUG_PRINT("\n");
+ }
+ }
+ }
+#endif
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_WRITE_MEM:
+ start_locn = spp_net_to_host_byte_order_32(&mp->params[1]);
+ num_bytes = spp_net_to_host_byte_order_32(&mp->params[2]);
+
+ if (num_bytes > sizeof(u64)) {
+ mp->rc = CNAT_ERR_INVALID_MSG_SIZE;
+ return;
+ }
+
+ clib_memcpy((u8 *) start_locn, &(mp->params[3]), num_bytes);
+ resp_ptr->num_bytes = 0;
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_DB_SUMMARY:
+ spp_api_cnat_generic_command_db_summary(mp);
+ break;
+
+ case CNAT_DEBUG_GENERIC_COMMAND_USER_DB_PM:
+ spp_api_cnat_generic_command_user_db_pm(mp);
+ break;
+
+ case CNAT_DEBUG_GET_CGN_DB_SUMMARY:
+ spp_api_cnat_get_cgn_db_summary(mp);
+ break;
+
+ default:
+ mp->rc = CNAT_ERR_INVALID_MSG_ID;
+ break;
+ }
+}
+
+
+static int cnat_debug_init (void *notused)
+{
+ spp_msg_api_set_handler(SPP_API_CNAT_V4_DEBUG_DUMMY,
+ spp_api_cnat_v4_debug_dummy_t_handler);
+
+ spp_msg_api_set_handler(SPP_API_CNAT_V4_DEBUG_DUMMY_MAX,
+ spp_api_cnat_v4_debug_dummy_max_t_handler);
+
+ spp_msg_api_set_handler(SPP_API_CNAT_V4_DEBUG_GLOBAL,
+ spp_api_cnat_v4_debug_global_t_handler);
+
+ spp_msg_api_set_handler(SPP_API_CNAT_V4_DEBUG_IN2OUT_PRIVATE_ADDR,
+ spp_api_cnat_v4_debug_in2out_private_addr_t_handler);
+
+ spp_msg_api_set_handler(SPP_API_CNAT_V4_DEBUG_OUT2IN_PUBLIC_ADDR,
+ spp_api_cnat_v4_debug_out2in_public_addr_t_handler);
+
+ spp_msg_api_set_handler(SPP_API_CNAT_CLEAR_DB_REQUEST,
+ spp_api_cnat_clear_db_request_t_handler);
+
+ spp_msg_api_set_handler(SPP_API_CNAT_GENERIC_COMMAND_REQUEST,
+ spp_api_cnat_generic_command_request_t_handler);
+
+ spp_msg_api_set_handler(SPP_API_CNAT_P2MP_DEBUG_REQUEST,
+ spp_api_cnat_p2mp_debug_request_t_handler);
+
+ spp_msg_api_set_handler(SPP_API_NAT64_CLEAR_DB_REQUEST,
+ spp_api_nat64_clear_db_request_t_handler);
+
+ spp_msg_api_set_handler(SPP_API_DS_LITE_CLEAR_DB_REQUEST,
+ spp_api_ds_lite_clear_db_request_t_handler);
+
+ return 0;
+}
+
+/*
+************************
+* spp_api_cnat_get_cgn_db_summary
+* This is for finding out the per core CPU users and utilization
+************************
+*/
+
+void spp_api_cnat_get_cgn_db_summary
+(spp_api_cnat_generic_command_request_t *mp)
+{
+ u32 total_db_entries, total_free_entries, used_entries;
+ u32 i = 0;
+ cnat_generic_command_resp *mp_resp;
+ u32 *result_array;
+
+ /*
+ * Request structure is used to send the response
+ */
+ mp_resp = (cnat_generic_command_resp *) mp;
+ result_array = (u32 *) (&(mp_resp->raw_data[0]));
+
+ /*
+ * Find entries free and used in main-db
+ */
+ total_db_entries = vec_len(cnat_main_db);
+ total_free_entries = db_free_entry(cnat_main_db);
+ used_entries = total_db_entries - total_free_entries;
+
+ *(result_array + i++) = spp_host_to_net_byte_order_32(total_db_entries);
+ *(result_array + i++) = spp_host_to_net_byte_order_32(used_entries);
+ *(result_array + i++) = spp_host_to_net_byte_order_32(total_free_entries);
+
+ /*
+ * Find entries free and used in user-db
+ */
+ total_db_entries = vec_len(cnat_user_db);
+ total_free_entries = db_free_entry(cnat_user_db);
+ used_entries = total_db_entries - total_free_entries;
+
+ *(result_array + i++) = spp_host_to_net_byte_order_32(total_db_entries);
+ *(result_array + i++) = spp_host_to_net_byte_order_32(used_entries);
+ *(result_array + i++) = spp_host_to_net_byte_order_32(total_free_entries);
+
+ mp_resp->num_bytes = spp_host_to_net_byte_order_32(i*sizeof(u32));
+}
+
+SPP_INIT_FUNCTION(cnat_debug_init);
+#endif /* TOBE_PORTED */
diff --git a/plugins/vcgn-plugin/vcgn/cnat_global.c b/plugins/vcgn-plugin/vcgn/cnat_global.c
new file mode 100644
index 00000000000..7177083466e
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_global.c
@@ -0,0 +1,79 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_global.c - global variables
+ *
+ * Copyright (c) 2008-2009, 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+/* gloable variables */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/buffer.h>
+
+#include "dslite_defs.h"
+#include "tcp_header_definitions.h"
+u32 cnat_current_time;
+u8 nfv9_configured = 0;
+/* ctx/sf alloc error counters */
+u32 null_enq_pkt;
+u32 null_deq_pkt;
+
+u32 null_enq_ctx;
+u32 null_deq_ctx;
+
+u32 null_enq_wqe;
+u32 null_deq_wqe;
+
+u32 ctx_alloc_errs;
+u32 sf_alloc_errs;
+
+u32 rcv_pkt_errs;
+
+/* TOBE_PORTED : Remove following once we bring DSLite */
+u32 dslite_config_debug_level = 1;
+u32 dslite_data_path_debug_level = 1;
+u32 dslite_defrag_debug_level = 1;
+u32 dslite_debug_level = 1;
+
+dslite_table_entry_t *dslite_table_db_ptr;
+
+/*
+ * ipv4_decr_ttl_n_calc_csum()
+ * - It decrements the TTL and calculates the incremental IPv4 checksum
+ */
+
+/* TOBE_PORTED: Following is in cnat_util.c */
+always_inline __attribute__((unused))
+void ipv4_decr_ttl_n_calc_csum(ipv4_header *ipv4)
+{
+ u32 checksum;
+ u16 old;
+ u16 ttl;
+
+ ttl = ipv4->ttl;
+ old = clib_net_to_host_u16(ttl);
+
+ /* Decrement TTL */
+ ipv4->ttl--;
+
+ /* Calculate incremental checksum */
+ checksum = old + (~clib_net_to_host_u16(ttl) & 0xFFFF);
+ checksum += clib_net_to_host_u16(ipv4->checksum);
+ checksum = (checksum & 0xFFFF) + (checksum >> 16);
+ ipv4->checksum = clib_host_to_net_u32(checksum + (checksum >> 16));
+}
+
diff --git a/plugins/vcgn-plugin/vcgn/cnat_global.h b/plugins/vcgn-plugin/vcgn/cnat_global.h
new file mode 100644
index 00000000000..823a47974d4
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_global.h
@@ -0,0 +1,87 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_global.h - global definition and variables
+ * to be used by non cnat files
+ *
+ * Copyright (c) 2007-2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_GLOBAL_H__
+#define __CNAT_GLOBAL_H__
+
+/* gloable variables */
+
+extern u8 cnat_db_init_done;
+extern u32 cnat_current_time;
+extern u64 in2out_drops_port_limit_exceeded;
+extern u64 in2out_drops_system_limit_reached;
+extern u64 in2out_drops_resource_depletion;
+extern u64 no_translation_entry_drops;
+extern u8 nfv9_configured;
+extern u32 translation_create_count;
+extern u32 translation_create_rate;
+
+extern u32 translation_delete_count;
+extern u32 translation_delete_rate;
+
+extern u32 in2out_forwarding_count;
+extern u32 in2out_forwarding_rate;
+
+extern u32 out2in_forwarding_count;
+extern u32 out2in_forwarding_rate;
+
+extern u32 total_address_pool_allocated;
+
+extern u32 nat44_active_translations;
+
+#if 1 //DSLITE_DEF
+extern u32 dslite_translation_create_rate;
+extern u32 dslite_translation_delete_rate;
+extern u32 dslite_translation_create_count;
+extern u32 dslite_in2out_forwarding_count;
+extern u32 dslite_in2out_forwarding_count;
+extern u32 dslite_out2in_forwarding_rate;
+#endif
+/* sf/ctx allocation error collection declarations */
+#define COLLECT_FREQ_FACTOR 100
+#define NUM_SECONDS_TO_WAIT 10
+#define COUNTER_BUFFER_SIZE 25
+
+extern u32 null_enq_pkt;
+extern u32 null_deq_pkt;
+
+extern u32 null_enq_ctx;
+extern u32 null_deq_ctx;
+
+extern u32 null_enq_wqe;
+extern u32 null_deq_wqe;
+
+extern u32 ctx_alloc_errs;
+extern u32 sf_alloc_errs;
+
+extern u32 rcv_pkt_errs;
+
+struct counter_array_t {
+ u32 sf_error_counter;
+ u32 ctx_error_counter;
+ u32 timestamp;
+} counter_array_t;
+
+#define COUNTER_BUFFER_SIZE 25
+struct counter_array_t err_cnt_arr[COUNTER_BUFFER_SIZE];
+
+//#define DISABLE_ICMP_THROTTLE_FOR_DEBUG_PURPOSE
+
+#endif /*__CNAT_GLOBAL_H__*/
diff --git a/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp.h b/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp.h
new file mode 100644
index 00000000000..664b62ac3dc
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp.h
@@ -0,0 +1,60 @@
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * Filename: cnat_ipv4_icmp.h
+ *
+ * Description: common functions for icmp node
+ *
+ * Assumptions and Constraints:
+ *
+ * Copyright (c) 2000-2009, 2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *-----------------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_IPV4_ICMP_H__
+#define __CNAT_IPV4_ICMP_H__
+
+#include "tcp_header_definitions.h"
+#include "cnat_db.h"
+#include "cnat_v4_functions.h"
+#include "cnat_global.h"
+#include "cnat_config.h"
+
+typedef struct {
+ icmp_v4_t *icmp;
+ ipv4_header *em_ip;
+ u16 *em_port;
+ u16 *em_l4_checksum;
+} icmp_em_ip_info;
+
+extern void swap_ip_src_icmp_id(ipv4_header *ip,
+ icmp_v4_t *icmp,
+ cnat_main_db_entry_t *db,
+ u16 vrf);
+
+extern void swap_ip_dst_icmp_id(ipv4_header *ip,
+ icmp_v4_t *icmp,
+ cnat_main_db_entry_t *db,
+ u16 vrf);
+
+extern void swap_ip_src_emip_dst(ipv4_header *ip,
+ icmp_em_ip_info *icmp_info,
+ cnat_main_db_entry_t *db, u16 vrf);
+
+extern void swap_ip_dst_emip_src(ipv4_header *ip,
+ icmp_em_ip_info *icmp_info,
+ cnat_main_db_entry_t *db, u16 vrf);
+
+
+#endif /* __CNAT_IPV4_ICMP_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_error_inside_input.c b/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_error_inside_input.c
new file mode 100644
index 00000000000..218d7e538fa
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_error_inside_input.c
@@ -0,0 +1,476 @@
+/*
+ *---------------------------------------------------------------------------
+ * cnat_ipv4_icmp_error_inside_input.c - cnat_ipv4_icmp_error_inside_input node pipeline stage functions
+ *
+ * Copyright (c) 2008-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/buffer.h>
+
+#include "cnat_ipv4_icmp.h"
+
+#define foreach_cnat_ipv4_icmp_e_inside_input_error \
+_(CNAT_V4_ICMP_E_I2O_T_PKT, "cnat v4 icmp_e i2o packet transmit") \
+_(CNAT_V4_ICMP_E_I2O_D_PKT, "cnat v4 icmp_e i2o packet drop") \
+_(CNAT_V4_ICMP_E_I2O_TTL_DROP, "cnat v4 icmp_e i2o ttl drop")
+
+typedef enum {
+#define _(sym,str) sym,
+ foreach_cnat_ipv4_icmp_e_inside_input_error
+#undef _
+ CNAT_IPV4_ICMP_E_INSIDE_INPUT_N_ERROR,
+} cnat_ipv4_icmp_e_inside_input_t;
+
+static char * cnat_ipv4_icmp_e_inside_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_cnat_ipv4_icmp_e_inside_input_error
+#undef _
+};
+
+typedef struct {
+ u32 cached_next_index;
+ /* $$$$ add data here */
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} cnat_ipv4_icmp_e_inside_input_main_t;
+
+typedef enum {
+ CNAT_V4_ICMP_E_I2O_T,
+ CNAT_V4_ICMP_E_I2O_D,
+ CNAT_V4_ICMP_E_I2O_NEXT,
+} cnat_ipv4_icmp_e_inside_input_next_t;
+
+cnat_ipv4_icmp_e_inside_input_main_t cnat_ipv4_icmp_e_inside_input_main;
+vlib_node_registration_t cnat_ipv4_icmp_e_inside_input_node;
+
+#define NSTAGES 5
+
+inline void swap_ip_src_emip_dst(ipv4_header *ip,
+ icmp_em_ip_info *icmp_info,
+ cnat_main_db_entry_t *db, u16 vrf)
+{
+ icmp_v4_t *icmp;
+ ipv4_header *em_ip;
+ u16 *em_port;
+ u32 old_ip;
+ u16 old_port;
+ u16 old_ip_checksum;
+
+ /*
+ * declear variable
+ */
+ CNAT_UPDATE_L3_CHECKSUM_DECLARE
+ CNAT_UPDATE_ICMP_ERR_CHECKSUM_DECLARE
+
+ /*
+ * fix inner layer ip & l4 checksum
+ */
+ em_ip = icmp_info->em_ip;
+ em_port = icmp_info->em_port;
+
+ CNAT_UPDATE_L3_CHECKSUM(((u16)(db->in2out_key.k.ipv4)),
+ ((u16)(db->in2out_key.k.ipv4 >> 16)),
+ (clib_net_to_host_u16(em_ip->checksum)),
+ ((u16)(db->out2in_key.k.ipv4)),
+ ((u16)(db->out2in_key.k.ipv4 >> 16)))
+
+ old_ip = clib_net_to_host_u32(em_ip->dest_addr);
+ old_port = clib_net_to_host_u16(*em_port);
+ old_ip_checksum = clib_net_to_host_u16(em_ip->checksum);
+
+ em_ip->dest_addr =
+ clib_host_to_net_u32(db->out2in_key.k.ipv4);
+ em_ip->checksum =
+ clib_host_to_net_u16(new_l3_c);
+ *em_port =
+ clib_host_to_net_u16(db->out2in_key.k.port);
+
+ /*
+ * fix outter layer ip & icmp checksum
+ */
+ icmp = icmp_info->icmp;
+ CNAT_UPDATE_ICMP_ERR_CHECKSUM(((u16)(old_ip & 0xFFFF)),
+ ((u16)(old_ip >> 16)),
+ (old_port),
+ (old_ip_checksum),
+ (clib_net_to_host_u16(icmp->checksum)),
+ ((u16)(db->out2in_key.k.ipv4 & 0xffff)),
+ ((u16)(db->out2in_key.k.ipv4 >> 16)),
+ ((u16)(db->out2in_key.k.port)),
+ ((u16)(new_l3_c)))
+
+ icmp->checksum =
+ clib_host_to_net_u16(new_icmp_c);
+
+ old_ip = clib_net_to_host_u32(ip->src_addr);
+
+ ip->src_addr =
+ clib_host_to_net_u32(db->out2in_key.k.ipv4);
+
+ CNAT_UPDATE_L3_CHECKSUM(((u16)(old_ip & 0xFFFF)),
+ ((u16)(old_ip >> 16)),
+ (clib_net_to_host_u16(ip->checksum)),
+ ((u16)(db->out2in_key.k.ipv4)),
+ ((u16)(db->out2in_key.k.ipv4 >> 16)))
+ ip->checksum =
+ clib_host_to_net_u16(new_l3_c);
+
+#if 0
+ if(is_static_dest_nat_enabled(vrf) == CNAT_SUCCESS) {
+ /*
+ * fix inner layer ip & l4 checksum
+ */
+ em_snat_ip = icmp_info->em_ip;
+ em_snat_port = icmp_info->em_port;
+
+ old_ip = spp_net_to_host_byte_order_32(&(em_snat_ip->src_addr));
+ old_port = spp_net_to_host_byte_order_16(em_snat_port);
+ old_ip_checksum = spp_net_to_host_byte_order_16(&(em_snat_ip->checksum));
+ direction = 0;
+ if(cnat_static_dest_db_get_translation(em_snat_ip->src_addr, &postmap_ip, vrf, direction) == CNAT_SUCCESS) {
+ old_postmap_ip = spp_net_to_host_byte_order_32(&postmap_ip);
+
+ CNAT_UPDATE_L3_CHECKSUM(((u16)(old_ip)),
+ ((u16)(old_ip >> 16)),
+ (spp_net_to_host_byte_order_16(&(em_snat_ip->checksum))),
+ ((u16)(old_postmap_ip)),
+ ((u16)(old_postmap_ip >> 16)))
+ em_snat_ip->src_addr = postmap_ip;
+ em_snat_ip->checksum =
+ spp_host_to_net_byte_order_16(new_l3_c);
+
+ /*
+ * fix outter layer ip & icmp checksum
+ */
+ icmp = icmp_info->icmp;
+ CNAT_UPDATE_ICMP_ERR_CHECKSUM(((u16)(old_ip & 0xFFFF)),
+ ((u16)(old_ip >> 16)),
+ (old_port),
+ (old_ip_checksum),
+ (spp_net_to_host_byte_order_16(&(icmp->checksum))),
+ ((u16)(old_postmap_ip & 0xffff)),
+ ((u16)(old_postmap_ip >> 16)),
+ ((u16)(old_port)),
+ ((u16)(new_l3_c)))
+
+ icmp->checksum =
+ spp_host_to_net_byte_order_16(new_icmp_c);
+
+ }
+ }
+
+ if(is_static_dest_nat_enabled(vrf) == CNAT_SUCCESS) {
+ direction = 0;
+ if(cnat_static_dest_db_get_translation(ip->dest_addr, &postmap_ip, vrf, direction) == CNAT_SUCCESS) {
+
+ old_ip = spp_net_to_host_byte_order_32(&(ip->dest_addr));
+ old_postmap_ip = spp_net_to_host_byte_order_32(&postmap_ip);
+
+ CNAT_UPDATE_L3_CHECKSUM(((u16)(old_ip & 0xFFFF)),
+ ((u16)(old_ip >> 16)),
+ (spp_net_to_host_byte_order_16(&(ip->checksum))),
+ ((u16)(old_postmap_ip & 0xFFFF)),
+ ((u16)(old_postmap_ip >> 16)))
+ ip->dest_addr = postmap_ip;
+
+ ip->checksum =
+ clib_host_to_net_u16(new_l3_c);
+ }
+ }
+#endif /* if 0 */
+
+}
+
+/*
+ * Use the generic buffer metadata + first line of packet data prefetch
+ * stage function from <api/pipeline.h>. This is usually a Good Idea.
+ */
+#define stage0 generic_stage0
+
+
+static inline void
+stage1(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ u64 a, b, c;
+ u32 bucket;
+ u8 *prefetch_target;
+
+ vlib_buffer_t * b0 = vlib_get_buffer (vm, buffer_index);
+ ipv4_header *ip = vlib_buffer_get_current (b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ icmp_v4_t *icmp = (icmp_v4_t *)((u8*)ip + ipv4_hdr_len);
+ ipv4_header *em_ip = (ipv4_header*)((u8*)icmp + 8); /* embedded pkt's v4 hdr */
+ u8 em_ip_hdr_len = (em_ip->version_hdr_len_words & 0xf) << 2;
+
+ u64 tmp = 0;
+ u32 protocol = CNAT_ICMP;
+
+ /* Check L4 header for embedded packet */
+ if (em_ip->protocol == TCP_PROT) {
+ tcp_hdr_type *tcp = (tcp_hdr_type*)((u8 *)em_ip + em_ip_hdr_len);
+ vnet_buffer(b0)->vcgn_uii.key.k.port =
+ clib_net_to_host_u16(tcp->dest_port);
+ protocol = CNAT_TCP;
+
+ } else if (em_ip->protocol == UDP_PROT) {
+ udp_hdr_type_t *udp = (udp_hdr_type_t *)((u8 *)em_ip + em_ip_hdr_len);
+ vnet_buffer(b0)->vcgn_uii.key.k.port =
+ clib_net_to_host_u16(udp->dest_port);
+ protocol = CNAT_UDP;
+
+ } else {
+ icmp_v4_t *icmp = (icmp_v4_t*)((u8 *)em_ip + em_ip_hdr_len);
+ vnet_buffer(b0)->vcgn_uii.key.k.port =
+ clib_net_to_host_u16(icmp->identifier);
+
+ if (PREDICT_FALSE((icmp->type != ICMPV4_ECHOREPLY) &&
+ (icmp->type != ICMPV4_ECHO))) {
+ /*
+ * Try to set invalid protocol for these cases, so that
+ * hash lookup does not return valid main_db. This approach
+ * may optimize the regular cases with valid protocols
+ * as it avoids one more check for regular cases in stage3
+ */
+ protocol = CNAT_INVALID_PROTO;
+ }
+ }
+
+ tmp = vnet_buffer(b0)->vcgn_uii.key.k.ipv4 =
+ clib_net_to_host_u32(em_ip->dest_addr);
+
+ tmp |= ((u64)vnet_buffer(b0)->vcgn_uii.key.k.port) << 32;
+
+ PLATFORM_CNAT_SET_RX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_RX],
+ vnet_buffer(b0)->vcgn_uii.key.k.vrf,
+ protocol)
+ tmp |= ((u64)vnet_buffer(b0)->vcgn_uii.key.k.vrf) << 48;
+
+ CNAT_V4_GET_HASH(tmp, bucket, CNAT_MAIN_HASH_MASK)
+
+ prefetch_target = (u8 *)(&cnat_in2out_hash[bucket]);
+ vnet_buffer(b0)->vcgn_uii.bucket = bucket;
+
+ /* Prefetch the hash bucket */
+ CLIB_PREFETCH(prefetch_target, CLIB_CACHE_LINE_BYTES, LOAD);
+}
+
+#define SPP_LOG2_CACHE_LINE_BYTES 6
+#define SPP_CACHE_LINE_BYTES (1 << SPP_LOG2_CACHE_LINE_BYTES)
+
+static inline void
+stage2(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ vlib_buffer_t * b0 = vlib_get_buffer(vm, buffer_index);
+ uword prefetch_target0, prefetch_target1;
+ u32 bucket = vnet_buffer(b0)->vcgn_uii.bucket;
+
+ /* read the hash bucket */
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket
+ = cnat_in2out_hash[bucket].next;
+
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+ /*
+ * Prefetch database keys. We save space by not cache-line
+ * aligning the DB entries. We don't want to waste LSU
+ * bandwidth prefetching stuff we won't need.
+ */
+ prefetch_target0 = (uword)(cnat_main_db + db_index);
+ CLIB_PREFETCH((void*)prefetch_target0, CLIB_CACHE_LINE_BYTES, LOAD);
+ /* Just beyond DB key #2 */
+ prefetch_target1 = prefetch_target0 +
+ STRUCT_OFFSET_OF(cnat_main_db_entry_t, user_ports);
+ /* If the targets are in different lines, do the second prefetch */
+ if (PREDICT_FALSE((prefetch_target0 & ~(SPP_CACHE_LINE_BYTES-1)) !=
+ (prefetch_target1 & ~(SPP_CACHE_LINE_BYTES-1)))) {
+ CLIB_PREFETCH((void *)prefetch_target1, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+ }
+}
+
+static inline void
+stage3(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ cnat_main_db_entry_t *db;
+ vlib_buffer_t * b0 = vlib_get_buffer(vm, buffer_index);
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket;
+
+ /*
+ * Note: if the search already failed (empty bucket),
+ * the answer is already in the pipeline context structure
+ */
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+
+ /*
+ * Note: hash collisions suck. We can't easily prefetch around them.
+ * The first trip around the track will be fast. After that, maybe
+ * not so much...
+ */
+ do {
+ db = cnat_main_db + db_index;
+ if (PREDICT_TRUE(db->in2out_key.key64 ==
+ vnet_buffer(b0)->vcgn_uii.key.key64)) {
+ break;
+ }
+ db_index = db->in2out_hash.next;
+ } while (db_index != EMPTY);
+
+ /* Stick the answer back into the pipeline context structure */
+ vnet_buffer(b0)->vcgn_uii.bucket = db_index;
+ }
+}
+
+
+
+static inline u32 last_stage (vlib_main_t *vm, vlib_node_runtime_t *node,
+ u32 bi)
+{
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi);
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket;
+ int disposition = CNAT_V4_ICMP_E_I2O_T;
+ int counter = CNAT_V4_ICMP_E_I2O_T_PKT;
+
+ ipv4_header *ip = (ipv4_header *)vlib_buffer_get_current(b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ icmp_v4_t *icmp = (icmp_v4_t *)((u8*)ip + ipv4_hdr_len);
+ ipv4_header *em_ip = (ipv4_header*)((u8*)icmp + 8); /* embedded pkt's v4 hdr */
+ u8 em_ip_hdr_len = (em_ip->version_hdr_len_words & 0xf) << 2;
+ vlib_node_t *n = vlib_get_node (vm, cnat_ipv4_icmp_e_inside_input_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t * em = &vm->error_main;
+ cnat_main_db_entry_t *db = NULL;
+ icmp_em_ip_info icmp_info;
+
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ if (PREDICT_FALSE(ip->ttl <= 1)) {
+ /*
+ * As it is ICMP error packet with TTL <= 1,
+ * let's drop the packet (no need to genereate
+ * another ICMP error).
+ */
+
+ disposition = CNAT_V4_ICMP_E_I2O_D;
+ counter = CNAT_V4_ICMP_E_I2O_TTL_DROP;
+
+ goto drop_pkt;
+ }
+ }
+
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+ icmp_info.em_ip = em_ip;
+ icmp_info.icmp = icmp;
+ //icmp_info.em_port = vnet_buffer(b0)->vcgn_uii.key.k.port;
+
+ /* Note: This could have been done in stage1 itself,
+ * but we need to introduce one u16 * in vnet_buffer_opaque_t
+ * Since this flow is expected to be very rare in actual
+ * deployment scenario, we may afford to do these steps here
+ * as well. Lets confirm during core review. */
+ if (em_ip->protocol == TCP_PROT) {
+ tcp_hdr_type *tcp = (tcp_hdr_type*)((u8 *)em_ip + em_ip_hdr_len);
+ icmp_info.em_port = &(tcp->dest_port);
+ } else if (em_ip->protocol == UDP_PROT) {
+ udp_hdr_type_t *udp = (udp_hdr_type_t *)
+ ((u8 *)em_ip + em_ip_hdr_len);
+ icmp_info.em_port = &(udp->dest_port);
+ } else {
+ icmp_v4_t *icmp_inner = (icmp_v4_t*)((u8 *)em_ip + em_ip_hdr_len);
+ icmp_info.em_port = &(icmp_inner->identifier);
+ }
+
+ db = cnat_main_db + db_index;
+ /*
+ * 1. update dst addr:dst port of embedded ip pkt
+ * update src addr of icmp pkt
+ * 2. fix udp/tcp/ip checksum of embedded pkt
+ * fix icmp, ip check of icmp pkt
+ * don need to update the timer
+ */
+
+ if (PREDICT_FALSE(icmp_debug_flag)) {
+ printf("\nDUMPING ICMP PKT BEFORE\n");
+ print_icmp_pkt(ip);
+ }
+
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ /*
+ * Decrement TTL and update IPv4 checksum
+ */
+ ipv4_decr_ttl_n_calc_csum(ip);
+ }
+
+ swap_ip_src_emip_dst(ip, &icmp_info,
+ db, db->in2out_key.k.vrf);
+
+ if (PREDICT_FALSE(icmp_debug_flag)) {
+ printf("\nDUMPING ICMP PKT AFTER\n");
+ print_icmp_pkt(ip);
+ }
+ in2out_forwarding_count++;
+
+ } else {
+ disposition = CNAT_V4_ICMP_E_I2O_D;
+ counter = CNAT_V4_ICMP_E_I2O_D_PKT;
+ }
+
+drop_pkt:
+
+ em->counters[node_counter_base_index + counter] += 1;
+ return disposition;
+}
+
+#include <vnet/pipeline.h>
+
+static uword cnat_ipv4_icmp_e_inside_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return dispatch_pipeline (vm, node, frame);
+}
+
+
+VLIB_REGISTER_NODE (cnat_ipv4_icmp_e_inside_input_node) = {
+ .function = cnat_ipv4_icmp_e_inside_input_node_fn,
+ .name = "vcgn-v4-icmp-e-i2o",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(cnat_ipv4_icmp_e_inside_input_error_strings),
+ .error_strings = cnat_ipv4_icmp_e_inside_input_error_strings,
+
+ .n_next_nodes = CNAT_V4_ICMP_E_I2O_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [CNAT_V4_ICMP_E_I2O_T] = "ip4-input",
+ [CNAT_V4_ICMP_E_I2O_D] = "error-drop",
+ },
+};
+
+clib_error_t *cnat_ipv4_icmp_e_inside_input_init (vlib_main_t *vm)
+{
+ cnat_ipv4_icmp_e_inside_input_main_t * mp = &cnat_ipv4_icmp_e_inside_input_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (cnat_ipv4_icmp_e_inside_input_init);
diff --git a/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_error_outside_input.c b/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_error_outside_input.c
new file mode 100644
index 00000000000..f25f4d022c7
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_error_outside_input.c
@@ -0,0 +1,452 @@
+/*
+ *---------------------------------------------------------------------------
+ * cnat_ipv4_icmp_error_outside_input.c - cnat_ipv4_icmp_error_outside_input node pipeline stage functions
+ *
+ * Copyright (c) 2008-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/buffer.h>
+
+#include "cnat_ipv4_icmp.h"
+
+#define foreach_cnat_ipv4_icmp_e_outside_input_error \
+_(CNAT_V4_ICMP_E_O2I_T_PKT, "cnat v4 icmp_e o2i packet transmit") \
+_(CNAT_V4_ICMP_E_O2I_D_PKT, "cnat v4 icmp_e o2i packet drop") \
+_(CNAT_V4_ICMP_E_O2I_TTL_DROP, "cnat v4 icmp_e o2i ttl drop")
+
+typedef enum {
+#define _(sym,str) sym,
+ foreach_cnat_ipv4_icmp_e_outside_input_error
+#undef _
+ CNAT_IPV4_ICMP_E_OUTSIDE_INPUT_N_ERROR,
+} cnat_ipv4_icmp_e_outside_input_t;
+
+static char * cnat_ipv4_icmp_e_outside_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_cnat_ipv4_icmp_e_outside_input_error
+#undef _
+};
+
+typedef struct {
+ u32 cached_next_index;
+ /* $$$$ add data here */
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} cnat_ipv4_icmp_e_outside_input_main_t;
+
+typedef enum {
+ CNAT_V4_ICMP_E_O2I_T,
+ CNAT_V4_ICMP_E_O2I_D,
+ CNAT_V4_ICMP_E_O2I_NEXT,
+} cnat_ipv4_icmp_e_outside_input_next_t;
+
+cnat_ipv4_icmp_e_outside_input_main_t cnat_ipv4_icmp_e_outside_input_main;
+vlib_node_registration_t cnat_ipv4_icmp_e_outside_input_node;
+
+#define NSTAGES 5
+
+inline void swap_ip_dst_emip_src(ipv4_header *ip,
+ icmp_em_ip_info *icmp_info,
+ cnat_main_db_entry_t *db, u16 vrf)
+{
+ icmp_v4_t *icmp;
+ ipv4_header *em_ip;
+ u16 *em_port;
+ u32 old_ip;
+ u16 old_port;
+ u16 old_ip_checksum;
+
+ /*
+ * declear variable
+ */
+ CNAT_UPDATE_L3_CHECKSUM_DECLARE
+ CNAT_UPDATE_ICMP_ERR_CHECKSUM_DECLARE
+
+ /*
+ * fix inner layer ip & l4 checksum
+ */
+ em_ip = icmp_info->em_ip;
+ em_port = icmp_info->em_port;
+
+ CNAT_UPDATE_L3_CHECKSUM(((u16)(db->out2in_key.k.ipv4)),
+ ((u16)(db->out2in_key.k.ipv4 >> 16)),
+ (clib_net_to_host_u16(em_ip->checksum)),
+ ((u16)(db->in2out_key.k.ipv4)),
+ ((u16)(db->in2out_key.k.ipv4 >> 16)))
+
+ old_ip = clib_net_to_host_u32(em_ip->src_addr);
+ old_port = clib_net_to_host_u16(*em_port);
+ old_ip_checksum = clib_net_to_host_u16(em_ip->checksum);
+
+ em_ip->src_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+ em_ip->checksum =
+ clib_host_to_net_u16(new_l3_c);
+ *em_port =
+ clib_host_to_net_u16(db->in2out_key.k.port);
+
+ /*
+ * fix outter layer ip & icmp checksum
+ */
+ icmp = icmp_info->icmp;
+ CNAT_UPDATE_ICMP_ERR_CHECKSUM(((u16)(old_ip & 0xFFFF)),
+ ((u16)(old_ip >> 16)),
+ (old_port),
+ (old_ip_checksum),
+ (clib_net_to_host_u16(icmp->checksum)),
+ ((u16)(db->in2out_key.k.ipv4 & 0xffff)),
+ ((u16)(db->in2out_key.k.ipv4 >> 16)),
+ ((u16)(db->in2out_key.k.port)),
+ ((u16)(new_l3_c)))
+
+ icmp->checksum =
+ clib_host_to_net_u16(new_icmp_c);
+
+ old_ip = clib_net_to_host_u32(ip->dest_addr);
+
+ ip->dest_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+
+ CNAT_UPDATE_L3_CHECKSUM(((u16)(old_ip & 0xFFFF)),
+ ((u16)(old_ip >> 16)),
+ (clib_net_to_host_u16(ip->checksum)),
+ ((u16)(db->in2out_key.k.ipv4)),
+ ((u16)(db->in2out_key.k.ipv4 >> 16)))
+ ip->checksum =
+ clib_host_to_net_u16(new_l3_c);
+
+#if 0
+ if(is_static_dest_nat_enabled(vrf) == CNAT_SUCCESS) {
+ /*
+ * fix inner layer ip & l4 checksum
+ */
+ em_snat_ip = icmp_info->em_ip;
+ em_snat_port = icmp_info->em_port;
+
+ old_ip = spp_net_to_host_byte_order_32(&(em_snat_ip->dest_addr));
+ old_port = spp_net_to_host_byte_order_16(em_snat_port);
+ old_ip_checksum = spp_net_to_host_byte_order_16(&(em_snat_ip->checksum));
+ direction = 1;
+ if(cnat_static_dest_db_get_translation(em_snat_ip->dest_addr, &postmap_ip, vrf, direction) == CNAT_SUCCESS) {
+ old_postmap_ip = spp_net_to_host_byte_order_32(&postmap_ip);
+
+ CNAT_UPDATE_L3_CHECKSUM(((u16)(old_ip)),
+ ((u16)(old_ip >> 16)),
+ (spp_net_to_host_byte_order_16(&(em_snat_ip->checksum))),
+ ((u16)(old_postmap_ip)),
+ ((u16)(old_postmap_ip >> 16)))
+ em_snat_ip->dest_addr = postmap_ip;
+ em_snat_ip->checksum =
+ spp_host_to_net_byte_order_16(new_l3_c);
+
+ /*
+ * fix outter layer ip & icmp checksum
+ */
+ icmp = icmp_info->icmp;
+ CNAT_UPDATE_ICMP_ERR_CHECKSUM(((u16)(old_ip & 0xFFFF)),
+ ((u16)(old_ip >> 16)),
+ (old_port),
+ (old_ip_checksum),
+ (spp_net_to_host_byte_order_16(&(icmp->checksum))),
+ ((u16)(old_postmap_ip & 0xffff)),
+ ((u16)(old_postmap_ip >> 16)),
+ ((u16)(old_port)),
+ ((u16)(new_l3_c)))
+
+ icmp->checksum =
+ spp_host_to_net_byte_order_16(new_icmp_c);
+
+ }
+ }
+
+ if(is_static_dest_nat_enabled(vrf) == CNAT_SUCCESS) {
+ direction = 1;
+ if(cnat_static_dest_db_get_translation(ip->src_addr, &postmap_ip, vrf, direction) == CNAT_SUCCESS) {
+ CNAT_UPDATE_L3_CHECKSUM_DECLARE
+
+ old_ip = spp_net_to_host_byte_order_32(&(ip->src_addr));
+ old_postmap_ip = spp_net_to_host_byte_order_32(&postmap_ip);
+
+ CNAT_UPDATE_L3_CHECKSUM(((u16)(old_ip & 0xFFFF)),
+ ((u16)(old_ip >> 16)),
+ (spp_net_to_host_byte_order_16(&(ip->checksum))),
+ ((u16)(old_postmap_ip & 0xFFFF)),
+ ((u16)(old_postmap_ip >> 16)))
+ ip->checksum =
+ spp_host_to_net_byte_order_16(new_l3_c);
+ ip->src_addr = postmap_ip;
+ }
+ }
+#endif /* if 0 */
+}
+
+/*
+ * Use the generic buffer metadata + first line of packet data prefetch
+ * stage function from <api/pipeline.h>. This is usually a Good Idea.
+ */
+#define stage0 generic_stage0
+
+
+static inline void
+stage1(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ u64 a, b, c;
+ u32 bucket;
+ u8 *prefetch_target;
+
+ vlib_buffer_t * b0 = vlib_get_buffer (vm, buffer_index);
+ ipv4_header *ip = vlib_buffer_get_current (b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ icmp_v4_t *icmp = (icmp_v4_t *)((u8*)ip + ipv4_hdr_len);
+ ipv4_header *em_ip = (ipv4_header*)((u8*)icmp + 8); /* embedded pkt's v4 hdr */
+ u8 em_ip_hdr_len = (em_ip->version_hdr_len_words & 0xf) << 2;
+
+ u64 tmp = 0;
+ u32 protocol = CNAT_ICMP;
+
+ /* Check L4 header for embedded packet */
+ if (em_ip->protocol == TCP_PROT) {
+ tcp_hdr_type *tcp = (tcp_hdr_type*)((u8 *)em_ip + em_ip_hdr_len);
+ vnet_buffer(b0)->vcgn_uii.key.k.port =
+ clib_net_to_host_u16(tcp->src_port);
+ protocol = CNAT_TCP;
+
+ } else if (em_ip->protocol == UDP_PROT) {
+ udp_hdr_type_t *udp = (udp_hdr_type_t *)((u8 *)em_ip + em_ip_hdr_len);
+ vnet_buffer(b0)->vcgn_uii.key.k.port =
+ clib_net_to_host_u16(udp->src_port);
+ protocol = CNAT_UDP;
+
+ } else {
+ icmp_v4_t *icmp = (icmp_v4_t*)((u8 *)em_ip + em_ip_hdr_len);
+ vnet_buffer(b0)->vcgn_uii.key.k.port =
+ clib_net_to_host_u16(icmp->identifier);
+
+ if (PREDICT_FALSE((icmp->type != ICMPV4_ECHOREPLY) &&
+ (icmp->type != ICMPV4_ECHO))) {
+ /*
+ * Try to set invalid protocol for these cases, so that
+ * hash lookup does not return valid main_db. This approach
+ * may optimize the regular cases with valid protocols
+ * as it avoids one more check for regular cases in stage3
+ */
+ protocol = CNAT_INVALID_PROTO;
+ }
+ }
+
+ tmp = vnet_buffer(b0)->vcgn_uii.key.k.ipv4 =
+ clib_net_to_host_u32(em_ip->src_addr);
+
+ tmp |= ((u64)vnet_buffer(b0)->vcgn_uii.key.k.port) << 32;
+
+ PLATFORM_CNAT_SET_RX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_RX],
+ vnet_buffer(b0)->vcgn_uii.key.k.vrf,
+ protocol)
+ tmp |= ((u64)vnet_buffer(b0)->vcgn_uii.key.k.vrf) << 48;
+
+ CNAT_V4_GET_HASH(tmp, bucket, CNAT_MAIN_HASH_MASK)
+
+ prefetch_target = (u8 *)(&cnat_out2in_hash[bucket]);
+ vnet_buffer(b0)->vcgn_uii.bucket = bucket;
+
+ /* Prefetch the hash bucket */
+ CLIB_PREFETCH(prefetch_target, CLIB_CACHE_LINE_BYTES, LOAD);
+}
+
+
+#define SPP_LOG2_CACHE_LINE_BYTES 6
+#define SPP_CACHE_LINE_BYTES (1 << SPP_LOG2_CACHE_LINE_BYTES)
+
+static inline void
+stage2(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ vlib_buffer_t * b0 = vlib_get_buffer(vm, buffer_index);
+ uword prefetch_target0, prefetch_target1;
+ u32 bucket = vnet_buffer(b0)->vcgn_uii.bucket;
+
+ /* read the hash bucket */
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket
+ = cnat_out2in_hash[bucket].next;
+
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+ /*
+ * Prefetch database keys. We save space by not cache-line
+ * aligning the DB entries. We don't want to waste LSU
+ * bandwidth prefetching stuff we won't need.
+ */
+ prefetch_target0 = (uword)(cnat_main_db + db_index);
+ CLIB_PREFETCH((void*)prefetch_target0, CLIB_CACHE_LINE_BYTES, LOAD);
+ /* Just beyond DB key #2 */
+ prefetch_target1 = prefetch_target0 +
+ STRUCT_OFFSET_OF(cnat_main_db_entry_t, user_ports);
+ /* If the targets are in different lines, do the second prefetch */
+ if (PREDICT_FALSE((prefetch_target0 & ~(SPP_CACHE_LINE_BYTES-1)) !=
+ (prefetch_target1 & ~(SPP_CACHE_LINE_BYTES-1)))) {
+ CLIB_PREFETCH((void *)prefetch_target1, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+ }
+}
+
+
+static inline void
+stage3(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ cnat_main_db_entry_t *db;
+ vlib_buffer_t * b0 = vlib_get_buffer(vm, buffer_index);
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket;
+
+ /*
+ * Note: if the search already failed (empty bucket),
+ * the answer is already in the pipeline context structure
+ */
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+
+ /*
+ * Note: hash collisions suck. We can't easily prefetch around them.
+ * The first trip around the track will be fast. After that, maybe
+ * not so much...
+ */
+ do {
+ db = cnat_main_db + db_index;
+ if (PREDICT_TRUE(db->out2in_key.key64 ==
+ vnet_buffer(b0)->vcgn_uii.key.key64)) {
+ break;
+ }
+ db_index = db->out2in_hash.next;
+ } while (db_index != EMPTY);
+
+ /* Stick the answer back into the pipeline context structure */
+ vnet_buffer(b0)->vcgn_uii.bucket = db_index;
+ }
+}
+
+static inline u32 last_stage (vlib_main_t *vm, vlib_node_runtime_t *node,
+ u32 bi)
+{
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi);
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket;
+ int disposition = CNAT_V4_ICMP_E_O2I_T;
+ int counter = CNAT_V4_ICMP_E_O2I_T_PKT;
+
+ ipv4_header *ip = (ipv4_header *)vlib_buffer_get_current(b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ icmp_v4_t *icmp = (icmp_v4_t *)((u8*)ip + ipv4_hdr_len);
+ ipv4_header *em_ip = (ipv4_header*)((u8*)icmp + 8); /* embedded pkt's v4 hdr */
+ u8 em_ip_hdr_len = (em_ip->version_hdr_len_words & 0xf) << 2;
+ vlib_node_t *n = vlib_get_node (vm, cnat_ipv4_icmp_e_outside_input_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t * em = &vm->error_main;
+ cnat_main_db_entry_t *db = NULL;
+ icmp_em_ip_info icmp_info;
+
+
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+
+ icmp_info.em_ip = em_ip;
+ icmp_info.icmp = icmp;
+
+ /* Note: This could have been done in stage1 itself,
+ * but we need to introduce one u16 * in vnet_buffer_opaque_t
+ * Since this flow is expected to be very rare in actual
+ * deployment scenario, we may afford to do these steps here
+ * as well. Lets confirm during core review. */
+
+ if (em_ip->protocol == TCP_PROT) {
+ tcp_hdr_type *tcp = (tcp_hdr_type*)((u8 *)em_ip + em_ip_hdr_len);
+ icmp_info.em_port = &(tcp->src_port);
+ } else if (em_ip->protocol == UDP_PROT) {
+ udp_hdr_type_t *udp = (udp_hdr_type_t *)
+ ((u8 *)em_ip + em_ip_hdr_len);
+ icmp_info.em_port = &(udp->src_port);
+ } else {
+ icmp_v4_t *icmp_inner = (icmp_v4_t*)((u8 *)em_ip + em_ip_hdr_len);
+ icmp_info.em_port = &(icmp_inner->identifier);
+ }
+
+ db = cnat_main_db + db_index;
+
+ if (PREDICT_FALSE(icmp_debug_flag)) {
+ printf("\nDUMPING ICMP PKT BEFORE\n");
+ print_icmp_pkt(ip);
+ }
+
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ /*
+ * Decrement TTL and update IPv4 checksum
+ */
+ ipv4_decr_ttl_n_calc_csum(ip);
+ }
+
+ swap_ip_dst_emip_src(ip, &icmp_info,
+ db, db->in2out_key.k.vrf);
+
+ if (PREDICT_FALSE(icmp_debug_flag)) {
+ printf("\nDUMPING ICMP PKT AFTER\n");
+ print_icmp_pkt(ip);
+ }
+
+ } else {
+ disposition = CNAT_V4_ICMP_E_O2I_D;
+ counter = CNAT_V4_ICMP_E_O2I_D_PKT;
+ }
+
+ em->counters[node_counter_base_index + counter] += 1;
+ return disposition;
+}
+
+#include <vnet/pipeline.h>
+
+static uword cnat_ipv4_icmp_e_outside_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return dispatch_pipeline (vm, node, frame);
+}
+
+
+VLIB_REGISTER_NODE (cnat_ipv4_icmp_e_outside_input_node) = {
+ .function = cnat_ipv4_icmp_e_outside_input_node_fn,
+ .name = "vcgn-v4-icmp-e-o2i",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(cnat_ipv4_icmp_e_outside_input_error_strings),
+ .error_strings = cnat_ipv4_icmp_e_outside_input_error_strings,
+
+ .n_next_nodes = CNAT_V4_ICMP_E_O2I_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [CNAT_V4_ICMP_E_O2I_T] = "ip4-input",
+ [CNAT_V4_ICMP_E_O2I_D] = "error-drop",
+ },
+};
+
+clib_error_t *cnat_ipv4_icmp_e_outside_input_init (vlib_main_t *vm)
+{
+ cnat_ipv4_icmp_e_outside_input_main_t * mp = &cnat_ipv4_icmp_e_outside_input_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (cnat_ipv4_icmp_e_outside_input_init);
diff --git a/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_query_inside_input.c b/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_query_inside_input.c
new file mode 100644
index 00000000000..1b9f0266d71
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_query_inside_input.c
@@ -0,0 +1,404 @@
+/*
+ *---------------------------------------------------------------------------
+ * cnat_ipv4_icmp_query_inside_input.c - cnat_ipv4_icmp_query_inside_input node pipeline stage functions
+ *
+ *
+ * Copyright (c) 2008-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/buffer.h>
+
+#include "cnat_ipv4_icmp.h"
+
+#define foreach_cnat_ipv4_icmp_q_inside_input_error \
+_(CNAT_V4_ICMP_Q_I2O_T_PKT, "cnat v4 icmp_q i2o packet transmit") \
+_(CNAT_V4_ICMP_Q_I2O_MISS_PKT, "cnat v4 icmp_q i2o db miss") \
+_(CNAT_V4_ICMP_Q_I2O_TTL_GEN, "cnat v4 icmp_q i2o ttl generate") \
+_(CNAT_V4_ICMP_Q_I2O_TTL_DROP, "cnat v4 icmp_q i2o ttl drop") \
+_(CNAT_V4_ICMP_Q_I2O_NO_SESSION_DROP, "cnat v4 icmp_q i2o no session drop")
+
+typedef enum {
+#define _(sym,str) sym,
+ foreach_cnat_ipv4_icmp_q_inside_input_error
+#undef _
+ CNAT_IPV4_ICMP_Q_INSIDE_INPUT_N_ERROR,
+} cnat_ipv4_icmp_q_inside_input_t;
+
+static char * cnat_ipv4_icmp_q_inside_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_cnat_ipv4_icmp_q_inside_input_error
+#undef _
+};
+
+typedef struct {
+ u32 cached_next_index;
+ /* $$$$ add data here */
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} cnat_ipv4_icmp_q_inside_input_main_t;
+
+typedef enum {
+ CNAT_V4_ICMP_Q_I2O_T,
+ CNAT_V4_ICMP_Q_I2O_E,
+ CNAT_V4_ICMP_Q_I2O_D,
+ CNAT_V4_ICMP_Q_I2O_NEXT,
+} cnat_ipv4_icmp_q_inside_input_next_t;
+
+cnat_ipv4_icmp_q_inside_input_main_t cnat_ipv4_icmp_q_inside_input_main;
+vlib_node_registration_t cnat_ipv4_icmp_q_inside_input_node;
+
+#define NSTAGES 5
+
+inline void swap_ip_src_icmp_id(ipv4_header *ip,
+ icmp_v4_t *icmp,
+ cnat_main_db_entry_t *db, u16 vrf)
+{
+#if 0
+ u32 postmap_ip;
+ u8 direction;
+ u32 old_ip;
+ u32 old_postmap_ip;
+
+
+ if(is_static_dest_nat_enabled(vrf) == CNAT_SUCCESS) {
+ direction = 0;
+ if(cnat_static_dest_db_get_translation(ip->dest_addr, &postmap_ip, vrf, direction) == CNAT_SUCCESS) {
+ CNAT_UPDATE_L3_CHECKSUM_DECLARE
+
+ old_ip = spp_net_to_host_byte_order_32(&(ip->dest_addr));
+ old_postmap_ip = spp_net_to_host_byte_order_32(&postmap_ip);
+
+ CNAT_UPDATE_L3_CHECKSUM(((u16)(old_ip & 0xFFFF)),
+ ((u16)(old_ip >> 16)),
+ (spp_net_to_host_byte_order_16(&(ip->checksum))),
+ ((u16)(old_postmap_ip & 0xFFFF)),
+ ((u16)(old_postmap_ip >> 16)))
+ ip->dest_addr = postmap_ip;
+
+ ip->checksum =
+ spp_host_to_net_byte_order_16(new_l3_c);
+ }
+ }
+#endif /* if 0 */
+ /*
+ * declare variable
+ */
+ CNAT_UPDATE_L3_L4_CHECKSUM_DECLARE
+ /*
+ * calculate checksum
+ */
+ CNAT_UPDATE_L3_ICMP_CHECKSUM(((u16)(db->in2out_key.k.ipv4)),
+ ((u16)(db->in2out_key.k.ipv4 >> 16)),
+ (db->in2out_key.k.port),
+ (clib_net_to_host_u16(ip->checksum)),
+ (clib_net_to_host_u16(icmp->checksum)),
+ ((u16)(db->out2in_key.k.ipv4)),
+ ((u16)(db->out2in_key.k.ipv4 >> 16)),
+ (db->out2in_key.k.port))
+ //set ip header
+ ip->src_addr =
+ clib_host_to_net_u32(db->out2in_key.k.ipv4);
+ ip->checksum =
+ clib_host_to_net_u16(new_l3_c);
+
+ //set icmp header
+ icmp->identifier =
+ clib_host_to_net_u16(db->out2in_key.k.port);
+ icmp->checksum =
+ clib_host_to_net_u16(new_l4_c);
+}
+
+/*
+ * Use the generic buffer metadata + first line of packet data prefetch
+ * stage function from <api/pipeline.h>. This is usually a Good Idea.
+ */
+#define stage0 generic_stage0
+
+static inline void
+stage1(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ u64 a, b, c;
+ u32 bucket;
+ u8 *prefetch_target;
+
+ vlib_buffer_t * b0 = vlib_get_buffer (vm, buffer_index);
+ ipv4_header *ip = vlib_buffer_get_current (b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ icmp_v4_t *icmp = (icmp_v4_t *)((u8*)ip + ipv4_hdr_len);
+
+ u64 tmp = 0;
+ tmp = vnet_buffer(b0)->vcgn_uii.key.k.ipv4 =
+ clib_net_to_host_u32(ip->src_addr);
+ vnet_buffer(b0)->vcgn_uii.key.k.port =
+ clib_net_to_host_u16 (icmp->identifier);
+
+ tmp |= ((u64)vnet_buffer(b0)->vcgn_uii.key.k.port) << 32;
+
+ PLATFORM_CNAT_SET_RX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_RX],
+ vnet_buffer(b0)->vcgn_uii.key.k.vrf,
+ CNAT_ICMP)
+ tmp |= ((u64)vnet_buffer(b0)->vcgn_uii.key.k.vrf) << 48;
+
+ CNAT_V4_GET_HASH(tmp, bucket, CNAT_MAIN_HASH_MASK)
+
+ prefetch_target = (u8 *)(&cnat_in2out_hash[bucket]);
+ vnet_buffer(b0)->vcgn_uii.bucket = bucket;
+
+ /* Prefetch the hash bucket */
+ CLIB_PREFETCH(prefetch_target, CLIB_CACHE_LINE_BYTES, LOAD);
+}
+
+#define SPP_LOG2_CACHE_LINE_BYTES 6
+#define SPP_CACHE_LINE_BYTES (1 << SPP_LOG2_CACHE_LINE_BYTES)
+
+static inline void
+stage2(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ vlib_buffer_t * b0 = vlib_get_buffer(vm, buffer_index);
+ uword prefetch_target0, prefetch_target1;
+ u32 bucket = vnet_buffer(b0)->vcgn_uii.bucket;
+
+ /* read the hash bucket */
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket
+ = cnat_in2out_hash[bucket].next;
+
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+ /*
+ * Prefetch database keys. We save space by not cache-line
+ * aligning the DB entries. We don't want to waste LSU
+ * bandwidth prefetching stuff we won't need.
+ */
+ prefetch_target0 = (uword)(cnat_main_db + db_index);
+ CLIB_PREFETCH((void*)prefetch_target0, CLIB_CACHE_LINE_BYTES, LOAD);
+ /* Just beyond DB key #2 */
+ prefetch_target1 = prefetch_target0 +
+ STRUCT_OFFSET_OF(cnat_main_db_entry_t, user_ports);
+ /* If the targets are in different lines, do the second prefetch */
+ if (PREDICT_FALSE((prefetch_target0 & ~(SPP_CACHE_LINE_BYTES-1)) !=
+ (prefetch_target1 & ~(SPP_CACHE_LINE_BYTES-1)))) {
+ CLIB_PREFETCH((void *)prefetch_target1, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+ }
+}
+
+static inline void
+stage3(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ cnat_main_db_entry_t *db;
+ vlib_buffer_t * b0 = vlib_get_buffer(vm, buffer_index);
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket;
+
+ /*
+ * Note: if the search already failed (empty bucket),
+ * the answer is already in the pipeline context structure
+ */
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+
+ /*
+ * Note: hash collisions suck. We can't easily prefetch around them.
+ * The first trip around the track will be fast. After that, maybe
+ * not so much...
+ */
+ do {
+ db = cnat_main_db + db_index;
+ if (PREDICT_TRUE(db->in2out_key.key64 ==
+ vnet_buffer(b0)->vcgn_uii.key.key64)) {
+ break;
+ }
+ db_index = db->in2out_hash.next;
+ } while (db_index != EMPTY);
+
+ /* Stick the answer back into the pipeline context structure */
+ vnet_buffer(b0)->vcgn_uii.bucket = db_index;
+ }
+}
+
+static inline u32 last_stage (vlib_main_t *vm, vlib_node_runtime_t *node,
+ u32 bi)
+{
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi);
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket;
+ spp_ctx_t *ctx = (spp_ctx_t *) &vnet_buffer(b0)->vcgn_uii;
+ int disposition = CNAT_V4_ICMP_Q_I2O_T;
+ int counter = CNAT_V4_ICMP_Q_I2O_T_PKT;
+
+ ipv4_header *ip = (ipv4_header *)vlib_buffer_get_current(b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ icmp_v4_t *icmp = (icmp_v4_t *)((u8*)ip + ipv4_hdr_len);
+ vlib_node_t *n = vlib_get_node (vm, cnat_ipv4_icmp_q_inside_input_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t * em = &vm->error_main;
+ cnat_session_entry_t *session_db = NULL;
+ cnat_main_db_entry_t *db = NULL;
+ cnat_key_t dest_info;
+
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ if (PREDICT_FALSE(ip->ttl <= 1)) {
+ /* Try to generate ICMP error msg, as TTL is <= 1 */
+
+ if (icmpv4_generate_with_throttling
+ (ctx, ip, ctx->ru.rx.uidb_index)) {
+
+ /* Generated ICMP */
+ disposition = CNAT_V4_ICMP_Q_I2O_T;
+ counter = CNAT_V4_ICMP_Q_I2O_TTL_GEN;
+ } else {
+ /* Could not generated ICMP - drop the packet */
+ disposition = CNAT_V4_ICMP_Q_I2O_D;
+ counter = CNAT_V4_ICMP_Q_I2O_TTL_DROP;
+ }
+ goto drop_pkt;
+ }
+ }
+
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+ db = cnat_main_db + db_index;
+ dest_info.k.port = 0;
+ dest_info.k.ipv4 = clib_net_to_host_u32(ip->dest_addr);
+
+ if(PREDICT_TRUE(!PLATFORM_DBL_SUPPORT)) {
+
+ /* No DBL support, so just update the destn and proceed */
+ db->dst_ipv4 = dest_info.k.ipv4;
+ db->dst_port = dest_info.k.port;
+ goto update_pkt;
+ }
+
+ if(PREDICT_FALSE(db->dst_ipv4 != dest_info.k.ipv4)) {
+ if(PREDICT_TRUE(db->nsessions == 1)) {
+ /* Handle one to 2 dest scenarion */
+ dest_info.k.vrf = db->in2out_key.k.vrf;
+ session_db = cnat_handle_1to2_session(db, &dest_info);
+
+ if(PREDICT_FALSE(session_db == NULL)) {
+ disposition = CNAT_V4_ICMP_Q_I2O_D;
+ counter = CNAT_V4_ICMP_Q_I2O_NO_SESSION_DROP;
+ goto drop_pkt;
+ }
+ } else if (PREDICT_FALSE(db->nsessions == 0)) {
+ /* Should be a static entry
+ * Note this session as the first session and log
+ */
+ cnat_add_dest_n_log(db, &dest_info);
+ } else { /* Many translations exist already */
+ dest_info.k.vrf = db->in2out_key.k.vrf;
+ /* If session already exists,
+ * cnat_create_session_db_entry will return the existing db
+ * else create a new db
+ * If could not create, return NULL
+ */
+ session_db = cnat_create_session_db_entry(&dest_info,
+ db, TRUE);
+
+ if(PREDICT_FALSE(session_db == NULL)) {
+ disposition = CNAT_V4_ICMP_Q_I2O_D;
+ counter = CNAT_V4_ICMP_Q_I2O_NO_SESSION_DROP;
+ goto drop_pkt;
+ }
+ }
+ }
+
+update_pkt:
+
+ if (PREDICT_FALSE(icmp_debug_flag)) {
+ printf("\nDUMPING ICMP PKT BEFORE\n");
+ print_icmp_pkt(ip);
+ }
+
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ /*
+ * Decrement TTL and update IPv4 checksum
+ */
+ ipv4_decr_ttl_n_calc_csum(ip);
+ }
+
+ /*
+ * 1. update src ipv4 addr and src icmp identifier
+ * 2. update ipv4 checksum and icmp checksum
+ */
+ swap_ip_src_icmp_id(ip, icmp, db, db->in2out_key.k.vrf);
+
+ if (PREDICT_FALSE(icmp_debug_flag)) {
+ printf("\nDUMPING ICMP PKT AFTER\n");
+ print_icmp_pkt(ip);
+ }
+
+ /*
+ * update db counter, timer
+ */
+
+ if(PREDICT_FALSE(session_db != 0)) {
+ CNAT_DB_TIMEOUT_RST(session_db);
+ } else {
+ CNAT_DB_TIMEOUT_RST(db);
+ }
+ db->in2out_pkts++;
+ in2out_forwarding_count++;
+
+ } else {
+ disposition = CNAT_V4_ICMP_Q_I2O_E;
+ counter = CNAT_V4_ICMP_Q_I2O_MISS_PKT;
+ }
+
+drop_pkt:
+
+ em->counters[node_counter_base_index + counter] += 1;
+ return disposition;
+}
+
+#include <vnet/pipeline.h>
+
+static uword cnat_ipv4_icmp_q_inside_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return dispatch_pipeline (vm, node, frame);
+}
+
+
+VLIB_REGISTER_NODE (cnat_ipv4_icmp_q_inside_input_node) = {
+ .function = cnat_ipv4_icmp_q_inside_input_node_fn,
+ .name = "vcgn-v4-icmp-q-i2o",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(cnat_ipv4_icmp_q_inside_input_error_strings),
+ .error_strings = cnat_ipv4_icmp_q_inside_input_error_strings,
+
+ .n_next_nodes = CNAT_V4_ICMP_Q_I2O_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [CNAT_V4_ICMP_Q_I2O_E] = "vcgn-v4-icmp-q-i2o-e",
+ [CNAT_V4_ICMP_Q_I2O_T] = "ip4-input",
+ [CNAT_V4_ICMP_Q_I2O_D] = "error-drop",
+ },
+};
+
+clib_error_t *cnat_ipv4_icmp_q_inside_input_init (vlib_main_t *vm)
+{
+ cnat_ipv4_icmp_q_inside_input_main_t * mp = &cnat_ipv4_icmp_q_inside_input_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (cnat_ipv4_icmp_q_inside_input_init);
diff --git a/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_query_inside_input_exception.c b/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_query_inside_input_exception.c
new file mode 100644
index 00000000000..9b5e280e571
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_query_inside_input_exception.c
@@ -0,0 +1,235 @@
+/*
+ *---------------------------------------------------------------------------
+ * cnat_ipv4_icmp_query_inside_input_exception.c - cnat_ipv4_icmp_query_inside_input_exception node pipeline stage functions
+ *
+ * Copyright (c) 2008-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/buffer.h>
+
+#include "cnat_ipv4_icmp.h"
+
+#define foreach_cnat_ipv4_icmp_q_inside_input_exc_error \
+_(CNAT_V4_ICMP_Q_I2O_E_T_PKT, "v4 icmp query i2o-e transmit") \
+_(CNAT_V4_ICMP_Q_I2O_E_G_PKT, "v4 icmp query i2o-e gen icmp msg") \
+_(CNAT_V4_ICMP_Q_I2O_E_D_PKT, "v4 icmp query i2o-e pkt drop") \
+_(CNAT_V4_ICMP_Q_I2O_E_DC_PKT, "v4 icmp query i2o-e drop (no config)") \
+_(CNAT_V4_ICMP_Q_I2O_E_DR_PKT, "v4 icmp query i2o-e drop (not in run state)") \
+_(CNAT_V4_ICMP_Q_I2O_E_DD_PKT, "v4 icmp query i2o-e drop (no direct port)") \
+_(CNAT_V4_ICMP_Q_I2O_E_DA_PKT, "v4 icmp query i2o-e drop (no any port)") \
+_(CNAT_V4_ICMP_Q_I2O_E_DO_PKT, "v4 icmp query i2o-e drop (out of port limit)") \
+_(CNAT_V4_ICMP_Q_I2O_E_DS_PKT, "v4 icmp query i2o_e drop (out of session db)")
+
+typedef enum {
+#define _(sym,str) sym,
+ foreach_cnat_ipv4_icmp_q_inside_input_exc_error
+#undef _
+ CNAT_IPV4_ICMP_Q_INSIDE_INPUT_EXCEPTIONS_N_ERROR,
+} cnat_ipv4_icmp_q_inside_input_exc_error_t;
+
+
+static char * cnat_ipv4_icmp_q_inside_input_exc_error_strings[] = {
+#define _(sym,string) string,
+ foreach_cnat_ipv4_icmp_q_inside_input_exc_error
+#undef _
+};
+
+typedef struct {
+ u32 cached_next_index;
+ /* $$$$ add data here */
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} cnat_ipv4_icmp_q_inside_input_exc_main_t;
+
+typedef enum {
+ CNAT_V4_ICMP_Q_E_I2O_T,
+ //CNAT_V4_ICMP_Q_E_I2O_GEN,
+ CNAT_V4_ICMP_Q_E_I2O_D,
+ CNAT_V4_ICMP_Q_E_I2O_NEXT,
+} cnat_ipv4_icmp_q_inside_input_exc_next_t;
+
+#define CNAT_V4_ICMP_Q_E_I2O_GEN CNAT_V4_ICMP_Q_E_I2O_T
+
+cnat_ipv4_icmp_q_inside_input_exc_main_t cnat_ipv4_icmp_q_inside_input_exc_main;
+vlib_node_registration_t cnat_ipv4_icmp_q_inside_input_exc_node;
+
+#define NSTAGES 2
+
+/*
+ * Use the generic buffer metadata + first line of packet data prefetch
+ * stage function from <api/pipeline.h>. This is usually a Good Idea.
+ */
+#define stage0 generic_stage0
+
+static inline u32 last_stage (vlib_main_t *vm, vlib_node_runtime_t *node,
+ u32 bi)
+{
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi);
+ int disposition = CNAT_V4_ICMP_Q_E_I2O_T;
+ int counter = CNAT_V4_ICMP_Q_I2O_E_T_PKT;
+
+ ipv4_header *ip = (ipv4_header *)vlib_buffer_get_current(b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ icmp_v4_t *icmp = (icmp_v4_t *)((u8*)ip + ipv4_hdr_len);
+ vlib_node_t *n = vlib_get_node (vm, cnat_ipv4_icmp_q_inside_input_exc_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t * em = &vm->error_main;
+
+ cnat_key_t dest_info;
+ cnat_gen_icmp_info info;
+ cnat_db_key_bucket_t ki;
+ cnat_main_db_entry_t *db = NULL;
+
+ PLATFORM_CNAT_SET_RX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_RX],
+ ki.k.k.vrf, CNAT_ICMP)
+
+ ki.k.k.ipv4 =
+ clib_net_to_host_u32(ip->src_addr);
+ ki.k.k.port =
+ clib_net_to_host_u16(icmp->identifier);
+
+ dest_info.k.port = 0;
+ dest_info.k.ipv4 = clib_net_to_host_u32(ip->dest_addr);
+ PLATFORM_CNAT_SET_RX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_RX],
+ dest_info.k.vrf, CNAT_ICMP)
+
+ db = cnat_get_main_db_entry_v2(&ki, PORT_SINGLE, PORT_TYPE_DYNAMIC,
+ &info, &dest_info);
+ if (PREDICT_TRUE(db != 0)) {
+
+ if (PREDICT_FALSE(icmp_debug_flag)) {
+ printf("\nDUMPING ICMP PKT BEFORE\n");
+ print_icmp_pkt(ip);
+ }
+
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ /*
+ * Decrement TTL and update IPv4 checksum
+ */
+ ipv4_decr_ttl_n_calc_csum(ip);
+ }
+
+ /*
+ * step 6 do nat before fwd pkt
+ */
+ swap_ip_src_icmp_id(ip, icmp, db, db->in2out_key.k.vrf);
+
+ if (PREDICT_FALSE(icmp_debug_flag)) {
+ printf("\nDUMPING ICMP PKT AFTER\n");
+ print_icmp_pkt(ip);
+ }
+
+ /*
+ * update db for this pkt
+ */
+ CNAT_DB_UPDATE_IN2OUT_TIMER
+ in2out_forwarding_count++;
+
+ } else {
+ switch (info.error) {
+ case (CNAT_NO_VRF_RUN):
+ counter = CNAT_V4_ICMP_Q_I2O_E_DR_PKT;
+ break;
+ case (CNAT_OUT_LIMIT):
+ counter = CNAT_V4_ICMP_Q_I2O_E_DO_PKT;
+ break;
+ case (CNAT_NO_PORT_ANY):
+ case (CNAT_NO_POOL_ANY):
+ case (CNAT_BAD_INUSE_ANY):
+ case (CNAT_NOT_FOUND_ANY):
+ counter = CNAT_V4_ICMP_Q_I2O_E_DA_PKT;
+ break;
+ case (CNAT_INV_PORT_DIRECT):
+ case (CNAT_DEL_PORT_DIRECT):
+ case (CNAT_BAD_INUSE_DIRECT):
+ case (CNAT_NOT_FOUND_DIRECT):
+ counter = CNAT_V4_ICMP_Q_I2O_E_DD_PKT;
+ break;
+ case (CNAT_ERR_NO_SESSION_DB):
+ counter = CNAT_V4_ICMP_Q_I2O_E_DS_PKT;
+ break;
+ default:
+ counter = CNAT_V4_ICMP_Q_I2O_E_DC_PKT;
+ break;
+ }
+ /*
+ * send to icmp msg generate node
+ */
+ if (info.gen_icmp_msg == CNAT_ICMP_MSG) {
+ #if 0
+ u32 *fd = (u32*)ctx->feature_data;
+ fd[0] = info.svi_addr;
+ fd[1] = CNAT_ICMP_DEST_UNREACHABLE;
+ #endif
+ disposition = CNAT_V4_ICMP_Q_E_I2O_GEN;
+ counter = CNAT_V4_ICMP_Q_I2O_E_G_PKT;
+ } else {
+ disposition = CNAT_V4_ICMP_Q_E_I2O_D;
+ counter = CNAT_V4_ICMP_Q_I2O_E_D_PKT;
+ }
+ DEBUG_I2O_DROP(CNAT_DEBUG_DROP_ICMP)
+ }
+
+ em->counters[node_counter_base_index + counter] += 1;
+
+ return disposition;
+}
+
+#include <vnet/pipeline.h>
+
+static uword cnat_ipv4_icmp_q_inside_input_exc_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return dispatch_pipeline (vm, node, frame);
+}
+
+VLIB_REGISTER_NODE (cnat_ipv4_icmp_q_inside_input_exc_node) = {
+ .function = cnat_ipv4_icmp_q_inside_input_exc_node_fn,
+ .name = "vcgn-v4-icmp-q-i2o-e",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(cnat_ipv4_icmp_q_inside_input_exc_error_strings),
+ .error_strings = cnat_ipv4_icmp_q_inside_input_exc_error_strings,
+
+ .n_next_nodes = CNAT_V4_ICMP_Q_E_I2O_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ //[CNAT_V4_ICMP_Q_E_I2O_GEN] = "icmp_msg_gen", /* Currently it will go
+ //to ip4-input node. We have to port icmp msg generator node */
+ [CNAT_V4_ICMP_Q_E_I2O_T] = "ip4-input",
+ [CNAT_V4_ICMP_Q_E_I2O_D] = "error-drop",
+ },
+};
+
+
+clib_error_t *cnat_ipv4_icmp_q_inside_input_exc_init (vlib_main_t *vm)
+{
+ cnat_ipv4_icmp_q_inside_input_exc_main_t * mp = &cnat_ipv4_icmp_q_inside_input_exc_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (cnat_ipv4_icmp_q_inside_input_exc_init);
diff --git a/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_query_outside_input.c b/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_query_outside_input.c
new file mode 100644
index 00000000000..2c05e0b400e
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_ipv4_icmp_query_outside_input.c
@@ -0,0 +1,381 @@
+/*
+ *---------------------------------------------------------------------------
+ * cnat_ipv4_icmp_query_outside_input.c - cnat_ipv4_icmp_query_outside_input node pipeline stage functions
+ *
+ *
+ * Copyright (c) 2008-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/buffer.h>
+
+#include "cnat_ipv4_icmp.h"
+
+#define foreach_cnat_ipv4_icmp_q_outside_input_error \
+_(CNAT_V4_ICMP_Q_O2I_T_PKT, "cnat v4 icmp_q o2i packet transmit") \
+_(CNAT_V4_ICMP_Q_O2I_MISS_PKT, "cnat v4 icmp_q o2i drop") \
+_(CNAT_V4_ICMP_Q_O2I_TTL_GEN, "cnat v4 icmp_q o2i ttl generate") \
+_(CNAT_V4_ICMP_Q_O2I_TTL_DROP, "cnat v4 icmp_q o2i ttl drop") \
+_(CNAT_V4_ICMP_Q_O2I_NO_SESSION_DROP, "cnat v4 icmp_q o2i no session drop")
+
+typedef enum {
+#define _(sym,str) sym,
+ foreach_cnat_ipv4_icmp_q_outside_input_error
+#undef _
+ CNAT_IPV4_ICMP_Q_OUTSIDE_INPUT_N_ERROR,
+} cnat_ipv4_icmp_q_outside_input_t;
+
+static char * cnat_ipv4_icmp_q_outside_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_cnat_ipv4_icmp_q_outside_input_error
+#undef _
+};
+
+typedef struct {
+ u32 cached_next_index;
+ /* $$$$ add data here */
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} cnat_ipv4_icmp_q_outside_input_main_t;
+
+typedef enum {
+ CNAT_V4_ICMP_Q_O2I_T,
+ CNAT_V4_ICMP_Q_O2I_D,
+ CNAT_V4_ICMP_Q_O2I_NEXT,
+} cnat_ipv4_icmp_q_outside_input_next_t;
+
+cnat_ipv4_icmp_q_outside_input_main_t cnat_ipv4_icmp_q_outside_input_main;
+vlib_node_registration_t cnat_ipv4_icmp_q_outside_input_node;
+
+#define NSTAGES 5
+
+inline void swap_ip_dst_icmp_id(ipv4_header *ip,
+ icmp_v4_t *icmp,
+ cnat_main_db_entry_t *db, u16 vrf)
+{
+#if 0
+ u32 postmap_ip;
+ u8 direction;
+ u32 old_ip;
+ u32 old_postmap_ip;
+
+ if(is_static_dest_nat_enabled(vrf) == CNAT_SUCCESS) {
+ direction = 1;
+ if(cnat_static_dest_db_get_translation(ip->src_addr, &postmap_ip, vrf, direction) == CNAT_SUCCESS) {
+ CNAT_UPDATE_L3_CHECKSUM_DECLARE
+
+ old_ip = spp_net_to_host_byte_order_32(&(ip->src_addr));
+ old_postmap_ip = spp_net_to_host_byte_order_32(&postmap_ip);
+
+ CNAT_UPDATE_L3_CHECKSUM(((u16)(old_ip & 0xFFFF)),
+ ((u16)(old_ip >> 16)),
+ (spp_net_to_host_byte_order_16(&(ip->checksum))),
+ ((u16)(old_postmap_ip & 0xFFFF)),
+ ((u16)(old_postmap_ip >> 16)))
+ ip->checksum =
+ spp_host_to_net_byte_order_16(new_l3_c);
+ ip->src_addr = postmap_ip;
+ }
+ }
+#endif /* if 0 */
+ /*
+ * declare variable
+ */
+ CNAT_UPDATE_L3_L4_CHECKSUM_DECLARE
+ /*
+ * calculate checksum
+ */
+ CNAT_UPDATE_L3_ICMP_CHECKSUM(((u16)(db->out2in_key.k.ipv4)),
+ ((u16)(db->out2in_key.k.ipv4 >> 16)),
+ (db->out2in_key.k.port),
+ (clib_net_to_host_u16(ip->checksum)),
+ (clib_net_to_host_u16(icmp->checksum)),
+ ((u16)(db->in2out_key.k.ipv4)),
+ ((u16)(db->in2out_key.k.ipv4 >> 16)),
+ (db->in2out_key.k.port))
+ //set ip header
+ ip->dest_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+ ip->checksum =
+ clib_host_to_net_u16(new_l3_c);
+
+ //set icmp header
+ icmp->identifier =
+ clib_host_to_net_u16(db->in2out_key.k.port);
+ icmp->checksum =
+ clib_host_to_net_u16(new_l4_c);
+}
+
+/*
+ * Use the generic buffer metadata + first line of packet data prefetch
+ * stage function from <api/pipeline.h>. This is usually a Good Idea.
+ */
+#define stage0 generic_stage0
+
+static inline void
+stage1(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ u64 a, b, c;
+ u32 bucket;
+ u8 *prefetch_target;
+
+ vlib_buffer_t * b0 = vlib_get_buffer (vm, buffer_index);
+ ipv4_header *ip = vlib_buffer_get_current (b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ icmp_v4_t *icmp = (icmp_v4_t *)((u8*)ip + ipv4_hdr_len);
+
+ u64 tmp = 0;
+ tmp = vnet_buffer(b0)->vcgn_uii.key.k.ipv4 =
+ clib_net_to_host_u32(ip->dest_addr);
+ vnet_buffer(b0)->vcgn_uii.key.k.port =
+ clib_net_to_host_u16 (icmp->identifier);
+
+ tmp |= ((u64)vnet_buffer(b0)->vcgn_uii.key.k.port) << 32;
+
+ PLATFORM_CNAT_SET_RX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_RX],
+ vnet_buffer(b0)->vcgn_uii.key.k.vrf,
+ CNAT_ICMP)
+ tmp |= ((u64)vnet_buffer(b0)->vcgn_uii.key.k.vrf) << 48;
+
+ CNAT_V4_GET_HASH(tmp, bucket, CNAT_MAIN_HASH_MASK)
+
+ prefetch_target = (u8 *)(&cnat_out2in_hash[bucket]);
+ vnet_buffer(b0)->vcgn_uii.bucket = bucket;
+
+ /* Prefetch the hash bucket */
+ CLIB_PREFETCH(prefetch_target, CLIB_CACHE_LINE_BYTES, LOAD);
+}
+
+#define SPP_LOG2_CACHE_LINE_BYTES 6
+#define SPP_CACHE_LINE_BYTES (1 << SPP_LOG2_CACHE_LINE_BYTES)
+
+static inline void
+stage2(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ vlib_buffer_t * b0 = vlib_get_buffer(vm, buffer_index);
+ uword prefetch_target0, prefetch_target1;
+ u32 bucket = vnet_buffer(b0)->vcgn_uii.bucket;
+
+ /* read the hash bucket */
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket
+ = cnat_out2in_hash[bucket].next;
+
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+ /*
+ * Prefetch database keys. We save space by not cache-line
+ * aligning the DB entries. We don't want to waste LSU
+ * bandwidth prefetching stuff we won't need.
+ */
+ prefetch_target0 = (uword)(cnat_main_db + db_index);
+ CLIB_PREFETCH((void*)prefetch_target0, CLIB_CACHE_LINE_BYTES, LOAD);
+ /* Just beyond DB key #2 */
+ prefetch_target1 = prefetch_target0 +
+ STRUCT_OFFSET_OF(cnat_main_db_entry_t, user_ports);
+ /* If the targets are in different lines, do the second prefetch */
+ if (PREDICT_FALSE((prefetch_target0 & ~(SPP_CACHE_LINE_BYTES-1)) !=
+ (prefetch_target1 & ~(SPP_CACHE_LINE_BYTES-1)))) {
+ CLIB_PREFETCH((void *)prefetch_target1, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+ }
+}
+
+static inline void
+stage3(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ cnat_main_db_entry_t *db;
+ vlib_buffer_t * b0 = vlib_get_buffer(vm, buffer_index);
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket;
+
+ /*
+ * Note: if the search already failed (empty bucket),
+ * the answer is already in the pipeline context structure
+ */
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+
+ /*
+ * Note: hash collisions suck. We can't easily prefetch around them.
+ * The first trip around the track will be fast. After that, maybe
+ * not so much...
+ */
+ do {
+ db = cnat_main_db + db_index;
+ if (PREDICT_TRUE(db->out2in_key.key64 ==
+ vnet_buffer(b0)->vcgn_uii.key.key64)) {
+ break;
+ }
+ db_index = db->out2in_hash.next;
+ } while (db_index != EMPTY);
+
+ /* Stick the answer back into the pipeline context structure */
+ vnet_buffer(b0)->vcgn_uii.bucket = db_index;
+ }
+}
+
+
+static inline u32 last_stage (vlib_main_t *vm, vlib_node_runtime_t *node,
+ u32 bi)
+{
+
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi);
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket;
+ int disposition = CNAT_V4_ICMP_Q_O2I_T;
+ int counter = CNAT_V4_ICMP_Q_O2I_T_PKT;
+
+ ipv4_header *ip = (ipv4_header *)vlib_buffer_get_current(b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ icmp_v4_t *icmp = (icmp_v4_t *)((u8*)ip + ipv4_hdr_len);
+ vlib_node_t *n = vlib_get_node (vm, cnat_ipv4_icmp_q_outside_input_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t * em = &vm->error_main;
+ cnat_session_entry_t *session_db = NULL;
+ cnat_main_db_entry_t *db = NULL;
+ cnat_key_t dest_info;
+ cnat_vrfmap_t * vrf_map_p __attribute__((unused)) = NULL;
+ u32 vrf_index __attribute__((unused)) = 0;
+
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+
+ db = cnat_main_db + db_index;
+ dest_info.k.port = 0;
+ dest_info.k.ipv4 = clib_net_to_host_u32(ip->src_addr);
+
+ if (PREDICT_FALSE(icmp_debug_flag)) {
+ printf("\nDUMPING ICMP PKT BEFORE\n");
+ print_icmp_pkt(ip);
+ }
+
+ vrf_map_p = cnat_map_by_vrf + db->vrfmap_index;
+ vrf_index = (db->in2out_key.k.vrf & CNAT_VRF_MASK);
+
+ if(PREDICT_TRUE(!PLATFORM_DBL_SUPPORT)) {
+
+ /* No DBL support, so just update the destn and proceed */
+ db->dst_ipv4 = dest_info.k.ipv4;
+ db->dst_port = dest_info.k.port;
+ goto update_pkt;
+ }
+
+ if(PREDICT_FALSE(db->dst_ipv4 != dest_info.k.ipv4)) {
+
+ if(PREDICT_TRUE(db->nsessions == 1)) {
+ /* Handle one to 2 dest scenarion */
+ dest_info.k.vrf = db->in2out_key.k.vrf;
+ session_db = cnat_handle_1to2_session(db, &dest_info);
+
+ if(PREDICT_FALSE(session_db == NULL)) {
+ disposition = CNAT_V4_ICMP_Q_O2I_D;
+ counter = CNAT_V4_ICMP_Q_O2I_NO_SESSION_DROP;
+ goto drop_pkt;
+ }
+ } else if (PREDICT_FALSE(db->nsessions == 0)) {
+ /* Should be a static entry
+ * Note this session as the first session and log
+ */
+ cnat_add_dest_n_log(db, &dest_info);
+ } else { /* Many translations exist already */
+ dest_info.k.vrf = db->in2out_key.k.vrf;
+ /* If session already exists,
+ * cnat_create_session_db_entry will return the existing db
+ * else create a new db
+ * If could not create, return NULL
+ */
+ session_db = cnat_create_session_db_entry(&dest_info,
+ db, TRUE);
+
+ if(PREDICT_FALSE(session_db == NULL)) {
+ disposition = CNAT_V4_ICMP_Q_O2I_D;
+ counter = CNAT_V4_ICMP_Q_O2I_NO_SESSION_DROP;
+ goto drop_pkt;
+ }
+ }
+ }
+
+update_pkt:
+
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ /*
+ * Decrement TTL and update IPv4 checksum
+ */
+ ipv4_decr_ttl_n_calc_csum(ip);
+ }
+
+ /*
+ * 1. update dest ipv4 addr and icmp id
+ * 2. update ipv4 checksum and icmp checksum
+ */
+ swap_ip_dst_icmp_id(ip, icmp, db, db->in2out_key.k.vrf);
+
+ if (PREDICT_FALSE(icmp_debug_flag)) {
+ printf("\nDUMPING ICMP PKT AFTER\n");
+ print_icmp_pkt(ip);
+ }
+
+ db->out2in_pkts++;
+
+ //nat44_dslite_global_stats[dslite_flag].out2in_forwarding_count++;
+
+ } else {
+ disposition = CNAT_V4_ICMP_Q_O2I_D;
+ counter = CNAT_V4_ICMP_Q_O2I_MISS_PKT;
+ }
+
+drop_pkt:
+ em->counters[node_counter_base_index + counter] += 1;
+ return disposition;
+
+}
+
+#include <vnet/pipeline.h>
+
+static uword cnat_ipv4_icmp_q_outside_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return dispatch_pipeline (vm, node, frame);
+}
+
+
+VLIB_REGISTER_NODE (cnat_ipv4_icmp_q_outside_input_node) = {
+ .function = cnat_ipv4_icmp_q_outside_input_node_fn,
+ .name = "vcgn-v4-icmp-q-o2i",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(cnat_ipv4_icmp_q_outside_input_error_strings),
+ .error_strings = cnat_ipv4_icmp_q_outside_input_error_strings,
+
+ .n_next_nodes = CNAT_V4_ICMP_Q_O2I_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [CNAT_V4_ICMP_Q_O2I_T] = "ip4-input",
+ [CNAT_V4_ICMP_Q_O2I_D] = "error-drop",
+ },
+};
+
+clib_error_t *cnat_ipv4_icmp_q_outside_input_init (vlib_main_t *vm)
+{
+ cnat_ipv4_icmp_q_outside_input_main_t * mp = &cnat_ipv4_icmp_q_outside_input_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (cnat_ipv4_icmp_q_outside_input_init);
diff --git a/plugins/vcgn-plugin/vcgn/cnat_ipv4_tcp_inside_input.c b/plugins/vcgn-plugin/vcgn/cnat_ipv4_tcp_inside_input.c
new file mode 100644
index 00000000000..5bea707385c
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_ipv4_tcp_inside_input.c
@@ -0,0 +1,424 @@
+/*
+ *---------------------------------------------------------------------------
+ * cnat_ipv4_tcp_inside_input.c - cnat_ipv4_tcp_inside_input node pipeline
+ * stage functions
+ *
+ *
+ * Copyright (c) 2008-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/buffer.h>
+
+#include "cnat_db.h"
+#include "tcp_header_definitions.h"
+#include "cnat_config.h"
+#include "cnat_global.h"
+#include "cnat_v4_functions.h"
+
+#define foreach_cnat_ipv4_tcp_inside_input_error \
+_(CNAT_V4_TCP_I2O_PKT_IN, "tcp i2o packets received") \
+_(CNAT_V4_TCP_I2O_PKT_T, "tcp i2o packets natted") \
+_(CNAT_V4_TCP_I2O_EXCEPTION, "packets to tcp i2o exception") \
+_(CNAT_V4_TCP_I2O_TTL_GEN, "generated TTL expiry ICMP packets") \
+_(CNAT_V4_TCP_I2O_TTL_GEN_DROP, "could not generate TTL expiry ICMP packets") \
+_(CNAT_V4_TCP_I2O_SESSION_DROP, "could not generate session") \
+_(CNAT_V4_UDP_I2O_FRAG_DROP, "non-first fragment drop")
+
+typedef enum {
+#define _(sym,str) sym,
+ foreach_cnat_ipv4_tcp_inside_input_error
+#undef _
+ CNAT_IPV4_TCP_INSIDE_INPUT_N_ERROR,
+} cnat_ipv4_tcp_inside_input_t;
+
+static char * cnat_ipv4_tcp_inside_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_cnat_ipv4_tcp_inside_input_error
+#undef _
+};
+
+typedef struct {
+ u32 cached_next_index;
+ /* $$$$ add data here */
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} cnat_ipv4_tcp_inside_input_main_t;
+
+typedef enum {
+ CNAT_V4_TCP_I2O_E,
+ CNAT_V4_TCP_I2O_T,
+ CNAT_V4_TCP_I2O_D,
+ CNAT_V4_TCP_I2O_NEXT,
+} cnat_ipv4_tcp_inside_input_next_t;
+
+#define CNAT_REWRITE_OUTPUT CNAT_V4_TCP_I2O_T
+#define CNAT_V4_ICMP_GEN CNAT_V4_TCP_I2O_D
+
+//#define CNAT_V4_TCP_I2O_E CNAT_V4_TCP_I2O_D //remove it once exception node is created
+cnat_ipv4_tcp_inside_input_main_t cnat_ipv4_tcp_inside_input_main;
+vlib_node_registration_t cnat_ipv4_tcp_inside_input_node;
+
+#define NSTAGES 6
+
+/*
+ * Use the generic buffer metadata + first line of packet data prefetch
+ * stage function from <api/pipeline.h>. This is usually a Good Idea.
+ */
+#define stage0 generic_stage0
+
+
+static inline void
+stage1(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ u64 a, b, c;
+ u32 bucket;
+ u8 *prefetch_target;
+ //cnat_feature_data_t *fd = (cnat_feature_data_t *)ctx->feature_data;
+
+
+ vlib_buffer_t * b0 = vlib_get_buffer (vm, buffer_index);
+ ipv4_header *ip = vlib_buffer_get_current (b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ tcp_hdr_type *tcp = (tcp_hdr_type *)((u8*)ip + ipv4_hdr_len);
+
+ u64 tmp = 0;
+ tmp = vnet_buffer(b0)->vcgn_uii.key.k.ipv4 =
+ clib_net_to_host_u32(ip->src_addr);
+ vnet_buffer(b0)->vcgn_uii.key.k.port =
+ clib_net_to_host_u16 (tcp->src_port);
+
+ tmp |= ((u64)vnet_buffer(b0)->vcgn_uii.key.k.port) << 32;
+
+ PLATFORM_CNAT_SET_RX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_RX],
+ vnet_buffer(b0)->vcgn_uii.key.k.vrf,
+ CNAT_TCP)
+ tmp |= ((u64)vnet_buffer(b0)->vcgn_uii.key.k.vrf) << 48;
+
+ CNAT_V4_GET_HASH(tmp, bucket, CNAT_MAIN_HASH_MASK)
+
+ prefetch_target = (u8 *)(&cnat_in2out_hash[bucket]);
+ vnet_buffer(b0)->vcgn_uii.bucket = bucket;
+
+ /* Prefetch the hash bucket */
+ CLIB_PREFETCH(prefetch_target, CLIB_CACHE_LINE_BYTES, LOAD);
+}
+
+static inline void
+stage2(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{ /* nothing */ }
+
+
+#define SPP_LOG2_CACHE_LINE_BYTES 6
+#define SPP_CACHE_LINE_BYTES (1 << SPP_LOG2_CACHE_LINE_BYTES)
+
+static inline void
+stage3(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ vlib_buffer_t * b0 = vlib_get_buffer(vm, buffer_index);
+ uword prefetch_target0, prefetch_target1;
+ u32 bucket = vnet_buffer(b0)->vcgn_uii.bucket;
+
+ /* read the hash bucket */
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket
+ = cnat_in2out_hash[bucket].next;
+
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+ /*
+ * Prefetch database keys. We save space by not cache-line
+ * aligning the DB entries. We don't want to waste LSU
+ * bandwidth prefetching stuff we won't need.
+ */
+ prefetch_target0 = (uword)(cnat_main_db + db_index);
+ CLIB_PREFETCH((void*)prefetch_target0, CLIB_CACHE_LINE_BYTES, LOAD);
+ /* Just beyond DB key #2 */
+ prefetch_target1 = prefetch_target0 +
+ STRUCT_OFFSET_OF(cnat_main_db_entry_t, user_ports);
+ /* If the targets are in different lines, do the second prefetch */
+ if (PREDICT_FALSE((prefetch_target0 & ~(SPP_CACHE_LINE_BYTES-1)) !=
+ (prefetch_target1 & ~(SPP_CACHE_LINE_BYTES-1)))) {
+ CLIB_PREFETCH((void *)prefetch_target1, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+ }
+}
+
+static inline void
+stage4(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ cnat_main_db_entry_t *db;
+ vlib_buffer_t * b0 = vlib_get_buffer(vm, buffer_index);
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket;
+
+ /*
+ * Note: if the search already failed (empty bucket),
+ * the answer is already in the pipeline context structure
+ */
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+
+ /*
+ * Note: hash collisions suck. We can't easily prefetch around them.
+ * The first trip around the track will be fast. After that, maybe
+ * not so much...
+ */
+ do {
+ db = cnat_main_db + db_index;
+ if (PREDICT_TRUE(db->in2out_key.key64 ==
+ vnet_buffer(b0)->vcgn_uii.key.key64)) {
+ break;
+ }
+ db_index = db->in2out_hash.next;
+ } while (db_index != EMPTY);
+
+ /* Stick the answer back into the pipeline context structure */
+ vnet_buffer(b0)->vcgn_uii.bucket = db_index;
+ }
+}
+
+
+static inline u32 last_stage (vlib_main_t *vm, vlib_node_runtime_t *node,
+ u32 bi)
+{
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi);
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket;
+ spp_ctx_t *ctx = (spp_ctx_t *) &vnet_buffer(b0)->vcgn_uii;
+ int disposition = CNAT_V4_TCP_I2O_T;
+ int counter = CNAT_V4_TCP_I2O_PKT_T;
+
+ ipv4_header *ip = (ipv4_header *)vlib_buffer_get_current(b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ tcp_hdr_type *tcp = (tcp_hdr_type *)((u8*)ip + ipv4_hdr_len);
+ vlib_node_t *n = vlib_get_node (vm, cnat_ipv4_tcp_inside_input_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t * em = &vm->error_main;
+ cnat_session_entry_t *session_db = NULL;
+ cnat_main_db_entry_t *db = NULL;
+ cnat_key_t dest_info;
+ u32 window;
+ u8 scale;
+
+
+ INCREMENT_NODE_COUNTER(CNAT_V4_TCP_I2O_PKT_IN);
+
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ if (PREDICT_FALSE(ip->ttl <= 1)) {
+ /* Try to generate ICMP error msg, as TTL is <= 1 */
+
+ if (icmpv4_generate_with_throttling
+ (ctx, ip, ctx->ru.rx.uidb_index)) {
+
+ /* Generated ICMP */
+ disposition = CNAT_REWRITE_OUTPUT;
+ counter = CNAT_V4_TCP_I2O_TTL_GEN;
+ } else {
+ /* Could not generated ICMP - drop the packet */
+ disposition = CNAT_V4_TCP_I2O_D;
+ counter = CNAT_V4_TCP_I2O_TTL_GEN_DROP;
+ }
+ goto drop_pkt;
+ }
+ }
+
+ if (PREDICT_FALSE(db_index == EMPTY)) {
+ /* Deleted fragment code from here */
+ disposition = CNAT_V4_TCP_I2O_E;
+ counter = CNAT_V4_TCP_I2O_EXCEPTION;
+ } else {
+ db = cnat_main_db + db_index;
+
+ /* Handle destination sessions */
+ dest_info.k.port = clib_net_to_host_u16(tcp->dest_port);
+ dest_info.k.ipv4 = clib_net_to_host_u32(ip->dest_addr);
+
+ if(PREDICT_TRUE(!PLATFORM_DBL_SUPPORT)) {
+
+ /* No DBL support, so just update the destn and proceed */
+ db->dst_ipv4 = dest_info.k.ipv4;
+ db->dst_port = dest_info.k.port;
+ goto update_pkt;
+ }
+
+ if(PREDICT_FALSE(db->dst_ipv4 != dest_info.k.ipv4 ||
+ db->dst_port != dest_info.k.port)) {
+ if(PREDICT_TRUE(db->nsessions == 0)) {
+ /* Should be a static entry
+ * Note this session as the first session and log
+ */
+ cnat_add_dest_n_log(db, &dest_info);
+ } else if(PREDICT_FALSE(db->nsessions == 1)) {
+ /* Destn is not same as in main db. Multiple session
+ * scenario
+ */
+ dest_info.k.vrf = db->in2out_key.k.vrf;
+ session_db = cnat_handle_1to2_session(db, &dest_info);
+ if(PREDICT_FALSE(session_db == NULL)) {
+ disposition = CNAT_V4_TCP_I2O_D;
+ counter = CNAT_V4_TCP_I2O_SESSION_DROP;
+ goto drop_pkt;
+ }
+ } else { /* There are already multiple destinations */
+ dest_info.k.vrf = db->in2out_key.k.vrf;
+ /* If session already exists,
+ * cnat_create_session_db_entry will return the existing db
+ * else create a new db
+ * If could not create, return NULL
+ */
+ session_db = cnat_create_session_db_entry(&dest_info,
+ db, TRUE);
+ if(PREDICT_FALSE(session_db == NULL)) {
+ disposition = CNAT_V4_TCP_I2O_D;
+ counter = CNAT_V4_TCP_I2O_SESSION_DROP;
+ goto drop_pkt;
+ }
+ }
+ if(PREDICT_TRUE(session_db != 0)) {
+ /* Have to repeat the window size check for new destinations */
+ window = (u32)clib_net_to_host_u16(tcp->window_size);
+ window = window << session_db->scale;
+ if(PREDICT_TRUE(!session_db->window)) {
+ calculate_window_scale(tcp, &scale);
+ session_db->scale = scale;
+ session_db->window = window;
+ } else if (PREDICT_FALSE(session_db->window <
+ window)) {
+ /* Update the db entry with window option from packet */
+ session_db->window = window;
+ } else {
+ /* Do nothing */
+ }
+ session_db->tcp_seq_num = clib_net_to_host_u32(tcp->seq_num);
+ session_db->ack_no = clib_net_to_host_u32(tcp->ack_num);
+#if DEBUG > 1
+ printf("\n In2out SDB stages seq no = %u,"
+ " ack no = %u, window = %u\n",
+ session_db->tcp_seq_num,
+ session_db->ack_no,
+ session_db->window);
+#endif
+
+ }
+ } else {
+ //Update the seq no and ack no for subsequent communication
+ //after connection establishment
+ //No need to update window here. Window is already updated
+ //during connection establishment
+ window = (u32)clib_net_to_host_u16(tcp->window_size);
+ window = window << db->scale;
+ if(PREDICT_FALSE(!ALG_ENABLED_DB(db))) {
+ //This check is done since proto_data is part of union in main
+ //db entry
+ db->proto_data.tcp_seq_chk.seq_no =
+ clib_net_to_host_u32(tcp->seq_num);
+ db->proto_data.tcp_seq_chk.ack_no =
+ clib_net_to_host_u32(tcp->ack_num);
+ }
+ if (PREDICT_FALSE(db->diff_window < window)) {
+ /* Update the db entry with window option from packet */
+ db->diff_window = window;
+ }
+#if DEBUG > 1
+ printf("\n In2out MainDB seq no = %u,"
+ "\n ack no = %u\n",
+ db->proto_data.tcp_seq_chk.seq_no,
+ db->proto_data.tcp_seq_chk.ack_no);
+ printf("\n In2out MAINDB window = %u\n",
+ db->diff_window);
+#endif
+ }
+update_pkt:
+
+ counter = CNAT_V4_TCP_I2O_PKT_T;
+ disposition = CNAT_V4_TCP_I2O_T;
+
+ /* NO FRAGMENT & ALG HANDLING. DELETING THE CODE */
+
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ /*
+ * Decrement TTL and update IPv4 checksum
+ */
+ ipv4_decr_ttl_n_calc_csum(ip);
+ }
+
+ tcp_in2out_nat_mss_n_checksum(ip,
+ tcp,
+ db->out2in_key.k.ipv4,
+ db->out2in_key.k.port,
+ db
+ /*, db->in2out_key.k.vrf */);
+
+ /* update transaltion counters */
+ db->in2out_pkts++;
+ in2out_forwarding_count++;
+
+ /* update the timer for good mode, or evil mode dst_ip match */
+
+ if(PREDICT_FALSE(session_db != NULL)) {
+ V4_TCP_UPDATE_SESSION_DB_FLAG(session_db, tcp);
+ CNAT_DB_TIMEOUT_RST(session_db);
+ } else {
+ V4_TCP_UPDATE_SESSION_FLAG(db, tcp);
+ CNAT_DB_TIMEOUT_RST(db);
+ }
+ }
+
+drop_pkt:
+
+ em->counters[node_counter_base_index + counter] += 1;
+ return disposition;
+}
+
+#include <vnet/pipeline.h>
+
+static uword cnat_ipv4_tcp_inside_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return dispatch_pipeline (vm, node, frame);
+}
+
+
+VLIB_REGISTER_NODE (cnat_ipv4_tcp_inside_input_node) = {
+ .function = cnat_ipv4_tcp_inside_input_node_fn,
+ .name = "vcgn-v4-tcp-i2o",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(cnat_ipv4_tcp_inside_input_error_strings),
+ .error_strings = cnat_ipv4_tcp_inside_input_error_strings,
+
+ .n_next_nodes = CNAT_V4_TCP_I2O_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [CNAT_V4_TCP_I2O_E] = "vcgn-v4-tcp-i2o-e",
+ [CNAT_V4_TCP_I2O_T] = "ip4-input",
+ [CNAT_V4_TCP_I2O_D] = "error-drop",
+ },
+};
+
+clib_error_t *cnat_ipv4_tcp_inside_input_init (vlib_main_t *vm)
+{
+ cnat_ipv4_tcp_inside_input_main_t * mp = &cnat_ipv4_tcp_inside_input_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (cnat_ipv4_tcp_inside_input_init);
diff --git a/plugins/vcgn-plugin/vcgn/cnat_ipv4_tcp_inside_input_exceptions.c b/plugins/vcgn-plugin/vcgn/cnat_ipv4_tcp_inside_input_exceptions.c
new file mode 100644
index 00000000000..bc1bebb04ba
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_ipv4_tcp_inside_input_exceptions.c
@@ -0,0 +1,314 @@
+/*
+ *---------------------------------------------------------------------------
+ * cnat_ipv4_tcp_inside_input_exceptions.c -
+ * cnat_ipv4_tcp_inside_input_exceptions node pipeline stage functions
+ *
+ *
+ * Copyright (c) 2008-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/buffer.h>
+
+#include "cnat_db.h"
+#include "tcp_header_definitions.h"
+#include "cnat_config.h"
+#include "cnat_global.h"
+#include "cnat_v4_functions.h"
+
+
+#define foreach_cnat_ipv4_tcp_inside_input_exc_error \
+_(CNAT_V4_TCP_I2O_E_T_PKT, "v4 tcp i2o-e transmit natted pkt") \
+_(CNAT_V4_TCP_I2O_E_D_NON_SYN_PKT, "v4 tcp i2o-e non syn drop") \
+_(CNAT_V4_TCP_I2O_E_D_INVALID_PKT, "v4 tcp i2o-e invalid pkt drop") \
+_(CNAT_V4_TCP_I2O_E_DROP, "v4 tcp i2o-e drop") \
+_(CNAT_V4_TCP_I2O_E_GEN_ICMP, "v4 tcp i2o-e gen icmp msg") \
+_(CNAT_V4_TCP_I2O_E_D_NO_SESSION, "v4 tcp i2o-e no session db entry drop")
+
+typedef enum {
+#define _(sym,str) sym,
+ foreach_cnat_ipv4_tcp_inside_input_exc_error
+#undef _
+ CNAT_IPV4_TCP_INSIDE_INPUT_EXCEPTIONS_N_ERROR,
+} cnat_ipv4_tcp_inside_input_exc_error_t;
+
+
+static char * cnat_ipv4_tcp_inside_input_exc_error_strings[] = {
+#define _(sym,string) string,
+ foreach_cnat_ipv4_tcp_inside_input_exc_error
+#undef _
+};
+
+typedef struct {
+ u32 cached_next_index;
+ /* $$$$ add data here */
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} cnat_ipv4_tcp_inside_input_exc_main_t;
+
+typedef enum {
+ CNAT_V4_TCP_I2O_E_T,
+ //CNAT_V4_TCP_I2O_E_ICMP,
+ CNAT_V4_TCP_I2O_E_D,
+ CNAT_V4_TCP_I2O_E_NEXT,
+} cnat_ipv4_udp_inside_input_exc_next_t;
+
+#define CNAT_V4_TCP_I2O_E_ICMP CNAT_V4_TCP_I2O_E_D
+
+cnat_ipv4_tcp_inside_input_exc_main_t cnat_ipv4_tcp_inside_input_exc_main;
+vlib_node_registration_t cnat_ipv4_tcp_inside_input_exc_node;
+
+#define NSTAGES 2
+
+/*
+ * Use the generic buffer metadata + first line of packet data prefetch
+ * stage function from <api/pipeline.h>. This is usually a Good Idea.
+ */
+#define stage0 generic_stage0
+
+
+static inline u32 last_stage (vlib_main_t *vm, vlib_node_runtime_t *node,
+ u32 bi)
+{
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi);
+ vlib_node_t *n =
+ vlib_get_node (vm, cnat_ipv4_tcp_inside_input_exc_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t * em = &vm->error_main;
+
+ cnat_gen_icmp_info info;
+ cnat_db_key_bucket_t ki;
+ cnat_main_db_entry_t *db = NULL;
+ ipv4_header *ip = (ipv4_header *)vlib_buffer_get_current(b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ tcp_hdr_type *tcp = (tcp_hdr_type *)((u8*)ip + ipv4_hdr_len);
+ int disposition = CNAT_V4_TCP_I2O_E_T;
+ int counter = CNAT_V4_TCP_I2O_E_T_PKT;
+ cnat_key_t dest_info;
+ u32 window;
+ u8 scale;
+
+ window = (u32)clib_net_to_host_u16(tcp->window_size);
+ calculate_window_scale(tcp, &scale);
+
+ dest_info.k.port = clib_net_to_host_u16(tcp->dest_port);
+ dest_info.k.ipv4 = clib_net_to_host_u32(ip->dest_addr);
+
+ PLATFORM_CNAT_SET_RX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_RX],
+ dest_info.k.vrf, CNAT_TCP)
+
+ /* for TCP if not SYN or if src_port is 0, silently drop the packet */
+ if (PREDICT_FALSE(!((tcp->flags & TCP_FLAG_SYN) && (tcp->src_port)))) {
+
+ /*
+ * If the packet is dropped due to both reasons,
+ * count it as invalid packet drop
+ */
+ if (!tcp->src_port) {
+ counter = CNAT_V4_TCP_I2O_E_D_INVALID_PKT;
+ } else {
+ counter = CNAT_V4_TCP_I2O_E_D_NON_SYN_PKT;
+ }
+ disposition = CNAT_V4_TCP_I2O_E_D;
+ goto in2out_e;
+ }
+
+ PLATFORM_CNAT_SET_RX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_RX],
+ ki.k.k.vrf, CNAT_TCP)
+
+ ki.k.k.ipv4 = clib_net_to_host_u32(ip->src_addr);
+ ki.k.k.port = clib_net_to_host_u16(tcp->src_port);
+
+ db = cnat_get_main_db_entry_v2(&ki, PORT_SINGLE, PORT_TYPE_DYNAMIC, &info,
+ &dest_info);
+
+
+#if DEBUG > 1
+ if(PREDICT_TRUE(db)) {
+ printf("create db %x ip %x->%x port %x->%x dst_ip %x\n", db,
+ db->in2out_key.k.ipv4, db->out2in_key.k.ipv4,
+ db->in2out_key.k.port, db->out2in_key.k.port, db->dst_ipv4);
+ }
+#endif
+
+
+ if (PREDICT_FALSE(db == 0)) {
+ /* failed to create new db entry due to either no more port, or user limit reached,
+ * need to generate ICMP type=3,code=13 msg here,
+ */
+
+ /*
+ * we rate limit the icmp msg per private user,
+ * so we don't flood a user with icmp msg
+ * in case the per user port limit reached
+ */
+ if (PREDICT_TRUE(info.gen_icmp_msg == CNAT_ICMP_MSG)) {
+ /* KEEPING THINGS COMMENTED HERE..MAY NEED TO REVISIT AGAIN */
+ #if 0
+ u32 *fd = (u32*)ctx->feature_data;
+ fd[0] = info.svi_addr;
+ fd[1] = CNAT_ICMP_DEST_UNREACHABLE;
+
+ /*
+ * Let's reverse the direction from i2o to o2i.
+ * This will help using the correct VRF in the fib lookup (AVSM)
+ * especially for the o2i_vrf_override case
+ */
+ ctx->ru.rx.direction = 0; // 0 - o2i, 1 - i2o
+ #endif
+ disposition = CNAT_V4_TCP_I2O_E_ICMP;
+ counter = CNAT_V4_TCP_I2O_E_GEN_ICMP;
+
+ } else {
+ disposition = CNAT_V4_TCP_I2O_E_D;
+ counter = CNAT_V4_TCP_I2O_E_DROP;
+ }
+ //DEBUG_I2O_DROP(CNAT_DEBUG_DROP_TCP)
+ } else {
+
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ /*
+ * Decrement TTL and update IPv4 checksum
+ */
+ ipv4_decr_ttl_n_calc_csum(ip);
+ }
+
+ /* NAT the packet and fix checksum */
+
+ tcp_in2out_nat_mss_n_checksum(ip,
+ tcp,
+ db->out2in_key.k.ipv4,
+ db->out2in_key.k.port,
+ db
+ /*, db->in2out_key.k.vrf */);
+
+ /* this must be inside to outside SYN, do mss here */
+
+ /* update translation counters */
+ db->in2out_pkts++;
+
+ /* set keepalive timer */
+
+ if(PREDICT_TRUE((dest_info.k.ipv4 == db->dst_ipv4) &&
+ (dest_info.k.port == db->dst_port))) {
+ if(PREDICT_FALSE(!ALG_ENABLED_DB(db))) {
+ //This check is done since proto_data is part of union in main
+ //db entry
+
+ db->proto_data.tcp_seq_chk.seq_no =
+ clib_net_to_host_u32(tcp->seq_num);
+ db->proto_data.tcp_seq_chk.ack_no =
+ clib_net_to_host_u32(tcp->ack_num);
+ db->scale = scale;
+ db->diff_window = window;
+ }
+#if DEBUG > 1
+ PLATFORM_DEBUG_PRINT("\nMain DB seq no = %u,"
+ "ack no = %u, window = %u,"
+ "scale = %u",
+ db->proto_data.tcp_seq_chk.seq_no,
+ db->proto_data.tcp_seq_chk.ack_no,
+ db->diff_window
+ db->scale);
+#endif
+ V4_TCP_UPDATE_SESSION_FLAG(db, tcp);
+ /* Check timeout db if there is config for this */
+ (void) query_and_update_db_timeout((void *)db, MAIN_DB_TYPE);
+ db->entry_expires = cnat_current_time;
+ } else {
+ /* Got to find out the session entry corresponding to this..*/
+ cnat_session_entry_t *sdb;
+ sdb = cnat_session_db_lookup_entry(
+ &dest_info, db - cnat_main_db);
+ if(PREDICT_FALSE(sdb == NULL)) {
+ disposition = CNAT_V4_TCP_I2O_E_D;
+ counter = CNAT_V4_TCP_I2O_E_D_NO_SESSION;
+ goto in2out_e;
+ }
+ sdb->tcp_seq_num = clib_net_to_host_u32(tcp->seq_num);
+ sdb->ack_no = clib_net_to_host_u32(tcp->ack_num);
+ sdb->scale = scale;
+ sdb->window = window;
+
+#if DEBUG > 1
+ PLATFORM_DEBUG_PRINT("\nSDB seq no = %u, ack no = %u, window = %u"
+ "\nSDB scale = %u" ,
+ sdb->tcp_seq_num,
+ sdb->ack_no,
+ sdb->window,
+ sdb->scale);
+#endif
+ V4_TCP_UPDATE_SESSION_DB_FLAG(sdb, tcp);
+ /* Check timeout db if there is config for this */
+ (void) query_and_update_db_timeout((void *)sdb, SESSION_DB_TYPE);
+ sdb->entry_expires = cnat_current_time;
+ }
+
+ //PLATFORM_CNAT_SET_TX_VRF(ctx,db->out2in_key.k.vrf)
+
+ counter = CNAT_V4_TCP_I2O_E_T_PKT;
+ in2out_forwarding_count++;
+ }
+
+in2out_e:
+
+ em->counters[node_counter_base_index + counter] += 1;
+
+ return disposition;
+}
+
+#include <vnet/pipeline.h>
+
+static uword cnat_ipv4_tcp_inside_input_exc_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return dispatch_pipeline (vm, node, frame);
+}
+
+VLIB_REGISTER_NODE (cnat_ipv4_tcp_inside_input_exc_node) = {
+ .function = cnat_ipv4_tcp_inside_input_exc_node_fn,
+ .name = "vcgn-v4-tcp-i2o-e",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(cnat_ipv4_tcp_inside_input_exc_error_strings),
+ .error_strings = cnat_ipv4_tcp_inside_input_exc_error_strings,
+
+ .n_next_nodes = CNAT_V4_TCP_I2O_E_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [CNAT_V4_TCP_I2O_E_T] = "ip4-input",
+ [CNAT_V4_TCP_I2O_E_D] = "error-drop",
+ },
+};
+
+
+clib_error_t *cnat_ipv4_tcp_inside_input_exc_init (vlib_main_t *vm)
+{
+ cnat_ipv4_tcp_inside_input_exc_main_t * mp = &cnat_ipv4_tcp_inside_input_exc_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (cnat_ipv4_tcp_inside_input_exc_init);
diff --git a/plugins/vcgn-plugin/vcgn/cnat_ipv4_tcp_outside_input.c b/plugins/vcgn-plugin/vcgn/cnat_ipv4_tcp_outside_input.c
new file mode 100644
index 00000000000..bcf132b1dd7
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_ipv4_tcp_outside_input.c
@@ -0,0 +1,382 @@
+/*
+ *---------------------------------------------------------------------------
+ * cnat_ipv4_tcp_outside_input.c - cnat_v4_tcp_out2in node pipeline stage functions
+ *
+ *
+ * Copyright (c) 2008-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/buffer.h>
+
+#include "cnat_db.h"
+#include "tcp_header_definitions.h"
+#include "cnat_config.h"
+#include "cnat_global.h"
+#include "cnat_ipv4_udp.h"
+#include "cnat_v4_functions.h"
+
+
+#define foreach_cnat_ipv4_tcp_outside_input_error \
+_(CNAT_V4_TCP_O2I_R_PKT, "v4 tcp o2i pkt received") \
+_(CNAT_V4_TCP_O2I_T_PKT, "v4 tcp o2i pkt natted & transmitted") \
+_(CNAT_V4_TCP_O2I_LOOKUP_FAILED, "v4 tcp o2i lookup failed") \
+_(CNAT_V4_TCP_O2I_TTL_GEN, "v4 tcp o2i generated TTL Expiry ICMP packet") \
+_(CNAT_V4_TCP_O2I_TTL_DROP, "v4 tcp o2i drop due to failure in creating TTL expiry ICMP msg") \
+_(CNAT_V4_TCP_O2I_PTB_GEN, "v4 tcp o2i PTB ICMP pkt generation") \
+_(CNAT_V4_UDP_O2I_PTB_DROP, "v4 tcp o2i drop due to failure in creating PTB ICMP pkt") \
+_(CNAT_V4_TCP_O2I_SESSION_DROP, "v4 tcp o2i drop due to failure in creating session db") \
+_(CNAT_V4_TCP_O2I_SEQ_MISMATCH_DROP, "v4 tcp o2i drop due to TCP sequence mismatch") \
+_(CNAT_V4_TCP_O2I_FILTER_DROP, "v4 tcp o2i drop due to endpoint filtering") \
+_(CNAT_V4_TCP_O2I_NON_SYN_RST_DROP, "v4 tcp o2i drop due no syn/rst flag") \
+_(CNAT_V4_TCP_O2I_FIRST_FRAG_DROP, "v4 tcp o2i first fragment drop") \
+_(CNAT_V4_TCP_O2I_SUB_FRAG_NO_DB_DROP, "v4 tcp o2i subsequest frag no DB drop")
+
+typedef enum {
+#define _(sym,str) sym,
+ foreach_cnat_ipv4_tcp_outside_input_error
+#undef _
+ CNAT_IPV4_TCP_OUTSIDE_INPUT_N_ERROR,
+} cnat_ipv4_tcp_outside_input_t;
+
+static char * cnat_ipv4_tcp_outside_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_cnat_ipv4_tcp_outside_input_error
+#undef _
+};
+
+typedef struct {
+ u32 cached_next_index;
+ /* $$$$ add data here */
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} cnat_ipv4_tcp_outside_input_main_t;
+
+typedef enum {
+ //CNAT_V4_TCP_O2I_E,
+ CNAT_V4_TCP_O2I_T,
+ CNAT_V4_TCP_O2I_D,
+ CNAT_V4_TCP_O2I_NEXT,
+} cnat_ipv4_tcp_outside_input_next_t;
+
+cnat_ipv4_tcp_outside_input_main_t cnat_ipv4_tcp_outside_input_main;
+vlib_node_registration_t cnat_ipv4_tcp_outside_input_node;
+
+#define NSTAGES 6
+
+/*
+ * Use the generic buffer metadata + first line of packet data prefetch
+ * stage function from <api/pipeline.h>. This is usually a Good Idea.
+ */
+#define stage0 generic_stage0
+
+
+static inline void
+stage1(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ u64 a, b, c;
+ u32 bucket;
+ u8 *prefetch_target;
+
+
+ vlib_buffer_t * b0 = vlib_get_buffer (vm, buffer_index);
+ ipv4_header *ip = vlib_buffer_get_current (b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ tcp_hdr_type *tcp = (tcp_hdr_type *)((u8*)ip + ipv4_hdr_len);
+
+ u64 tmp = 0;
+ tmp = vnet_buffer(b0)->vcgn_uii.key.k.ipv4 =
+ clib_net_to_host_u32(ip->dest_addr);
+ vnet_buffer(b0)->vcgn_uii.key.k.port =
+ clib_net_to_host_u16 (tcp->dest_port);
+
+ tmp |= ((u64)vnet_buffer(b0)->vcgn_uii.key.k.port) << 32;
+
+ PLATFORM_CNAT_SET_RX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_RX],
+ vnet_buffer(b0)->vcgn_uii.key.k.vrf,
+ CNAT_TCP)
+ tmp |= ((u64)vnet_buffer(b0)->vcgn_uii.key.k.vrf) << 48;
+
+ CNAT_V4_GET_HASH(tmp, bucket, CNAT_MAIN_HASH_MASK)
+
+ prefetch_target = (u8 *)(&cnat_out2in_hash[bucket]);
+ vnet_buffer(b0)->vcgn_uii.bucket = bucket;
+
+ /* Prefetch the hash bucket */
+ CLIB_PREFETCH(prefetch_target, CLIB_CACHE_LINE_BYTES, LOAD);
+}
+
+static inline void
+stage2(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{ /* nothing */ }
+
+#define SPP_LOG2_CACHE_LINE_BYTES 6
+#define SPP_CACHE_LINE_BYTES (1 << SPP_LOG2_CACHE_LINE_BYTES)
+
+static inline void
+stage3(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ vlib_buffer_t * b0 = vlib_get_buffer(vm, buffer_index);
+ uword prefetch_target0, prefetch_target1;
+ u32 bucket = vnet_buffer(b0)->vcgn_uii.bucket;
+
+ /* read the hash bucket */
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket
+ = cnat_out2in_hash[bucket].next;
+
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+ /*
+ * Prefetch database keys. We save space by not cache-line
+ * aligning the DB entries. We don't want to waste LSU
+ * bandwidth prefetching stuff we won't need.
+ */
+ prefetch_target0 = (uword)(cnat_main_db + db_index);
+ CLIB_PREFETCH((void*)prefetch_target0, CLIB_CACHE_LINE_BYTES, STORE);
+ /* Just beyond DB key #2 */
+ prefetch_target1 = prefetch_target0 +
+ STRUCT_OFFSET_OF(cnat_main_db_entry_t, user_ports);
+ /* If the targets are in different lines, do the second prefetch */
+ if (PREDICT_FALSE((prefetch_target0 & ~(SPP_CACHE_LINE_BYTES-1)) !=
+ (prefetch_target1 & ~(SPP_CACHE_LINE_BYTES-1)))) {
+ CLIB_PREFETCH((void *)prefetch_target1, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+ }
+}
+
+static inline void
+stage4(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ cnat_main_db_entry_t *db;
+ vlib_buffer_t * b0 = vlib_get_buffer(vm, buffer_index);
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket;
+
+ /*
+ * Note: if the search already failed (empty bucket),
+ * the answer is already in the pipeline context structure
+ */
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+
+ /*
+ * Note: hash collisions suck. We can't easily prefetch around them.
+ * The first trip around the track will be fast. After that, maybe
+ * not so much...
+ */
+ do {
+ db = cnat_main_db + db_index;
+ if (PREDICT_TRUE(db->out2in_key.key64 ==
+ vnet_buffer(b0)->vcgn_uii.key.key64)) {
+ break;
+ }
+ db_index = db->out2in_hash.next;
+ } while (db_index != EMPTY);
+
+ /* Stick the answer back into the pipeline context structure */
+ vnet_buffer(b0)->vcgn_uii.bucket = db_index;
+ }
+}
+
+static inline u32 last_stage (vlib_main_t *vm, vlib_node_runtime_t *node,
+ u32 bi)
+{
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi);
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket;
+ spp_ctx_t *ctx = (spp_ctx_t *) &vnet_buffer(b0)->vcgn_uii;
+ int disposition = CNAT_V4_TCP_O2I_T;
+ int counter = CNAT_V4_TCP_O2I_T_PKT;
+
+ ipv4_header *ip = (ipv4_header *)vlib_buffer_get_current(b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ tcp_hdr_type *tcp = (tcp_hdr_type *)((u8*)ip + ipv4_hdr_len);
+ vlib_node_t *n = vlib_get_node (vm, cnat_ipv4_tcp_outside_input_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t * em = &vm->error_main;
+ cnat_session_entry_t *session_db = NULL;
+ cnat_main_db_entry_t *db = NULL;
+ cnat_key_t dest_info;
+
+ INCREMENT_NODE_COUNTER(CNAT_V4_TCP_O2I_R_PKT);
+
+ if (PREDICT_FALSE(db_index == EMPTY)) {
+ nat44_dslite_common_stats[0].no_translation_entry_drops ++;
+ counter = CNAT_V4_TCP_O2I_LOOKUP_FAILED;
+ disposition = CNAT_V4_TCP_O2I_D;
+ } else {
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ if (PREDICT_FALSE(ip->ttl <= 1)) {
+ /* Try to generate ICMP error msg, as TTL is <= 1 */
+ if (icmpv4_generate_with_throttling(ctx,
+ ip, ctx->ru.rx.uidb_index)) {
+ /* Generated ICMP */
+ disposition = CNAT_V4_TCP_O2I_T_PKT; //CNAT_REWRITE_OUTPUT;
+ counter = CNAT_V4_TCP_O2I_TTL_GEN;
+ } else {
+ /* Could not generated ICMP - drop the packet */
+ disposition = CNAT_V4_TCP_O2I_D;
+ counter = CNAT_V4_TCP_O2I_TTL_DROP;
+ }
+ goto drop_pkt;
+ }
+ }
+ db = cnat_main_db + db_index;
+#if 0
+ window = db->diff_window;
+ stored_seq_no = db->proto_data.tcp_seq_chk.seq_no;
+ stored_ack_no = db->proto_data.tcp_seq_chk.ack_no;
+ vrf_map_p = cnat_map_by_vrf + db->vrfmap_index;
+ vrf_index = (db->in2out_key.k.vrf & CNAT_VRF_MASK);
+#endif
+ /* For Out2In packet, the dest info is src address and port */
+ dest_info.k.port = clib_net_to_host_u16(tcp->src_port);
+ dest_info.k.ipv4 = clib_net_to_host_u32(ip->src_addr);
+
+ if(PREDICT_TRUE(!PLATFORM_DBL_SUPPORT)) {
+
+ /* No DBL support, so just update the destn and proceed */
+ db->dst_ipv4 = dest_info.k.ipv4;
+ db->dst_port = dest_info.k.port;
+ goto update_pkt;
+ }
+
+
+ if(PREDICT_FALSE(db->dst_ipv4 != dest_info.k.ipv4 ||
+ db->dst_port != dest_info.k.port)) {
+
+ if(PREDICT_TRUE(db->nsessions == 0)) {
+ /* Should be a static entry
+ * Note this session as the first session and log
+ */
+ cnat_add_dest_n_log(db, &dest_info);
+ //goto packet_upd;
+ } else if(PREDICT_FALSE(db->nsessions == 1)) {
+ /* Destn is not same as in main db. Multiple session
+ * scenario
+ */
+ dest_info.k.vrf = db->in2out_key.k.vrf;
+ session_db = cnat_handle_1to2_session(db, &dest_info);
+ if(PREDICT_FALSE(session_db == NULL)) {
+ disposition = CNAT_V4_TCP_O2I_D;
+ counter = CNAT_V4_TCP_O2I_SESSION_DROP;
+ goto drop_pkt;
+ }
+ } else { /* There are already multiple destinations */
+ dest_info.k.vrf = db->in2out_key.k.vrf;
+ /* If session already exists,
+ * cnat_create_session_db_entry will return the existing db
+ * else create a new db
+ * If could not create, return NULL
+ */
+ session_db = cnat_create_session_db_entry(&dest_info, db, TRUE);
+ if(PREDICT_FALSE(session_db == NULL)) {
+ disposition = CNAT_V4_TCP_O2I_D;
+ counter = CNAT_V4_TCP_O2I_SESSION_DROP;
+ goto drop_pkt;
+ }
+ }
+ /* useful for ALG only */
+ #if 0
+ if(PREDICT_TRUE(session_db)) {
+ stored_seq_no = session_db->tcp_seq_num;
+ stored_ack_no = session_db->ack_no;
+ window = session_db->window;
+ }
+ #endif
+ }
+
+
+update_pkt:
+
+ counter = CNAT_V4_TCP_O2I_T_PKT;
+
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ /*
+ * Decrement TTL and update IPv4 checksum
+ */
+ ipv4_decr_ttl_n_calc_csum(ip);
+ }
+
+ /* update ip checksum, newchecksum = ~(~oldchecksum + ~old + new) */
+ cnat_v4_recalculate_tcp_checksum(ip, tcp,
+ &(ip->dest_addr),
+ &(tcp->dest_port),
+ db->in2out_key.k.ipv4,
+ db->in2out_key.k.port);
+
+ /* CNAT_PPTP_ALG_SUPPORT */
+ db->out2in_pkts++;
+
+ nat44_dslite_global_stats[0].out2in_forwarding_count++;;
+
+ V4_TCP_UPDATE_SESSION_FLAG(db, tcp);
+
+
+ if(PREDICT_FALSE(session_db != NULL)) {
+ V4_TCP_UPDATE_SESSION_DB_FLAG(session_db, tcp);
+ CNAT_DB_TIMEOUT_RST(session_db);
+ } else {
+ V4_TCP_UPDATE_SESSION_FLAG(db, tcp);
+ CNAT_DB_TIMEOUT_RST(db);
+ }
+
+ }
+
+drop_pkt:
+ em->counters[node_counter_base_index + counter] += 1;
+ return disposition;
+}
+
+#include <vnet/pipeline.h>
+
+static uword cnat_ipv4_tcp_outside_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return dispatch_pipeline (vm, node, frame);
+}
+
+
+VLIB_REGISTER_NODE (cnat_ipv4_tcp_outside_input_node) = {
+ .function = cnat_ipv4_tcp_outside_input_node_fn,
+ .name = "vcgn-v4-tcp-o2i",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(cnat_ipv4_tcp_outside_input_error_strings),
+ .error_strings = cnat_ipv4_tcp_outside_input_error_strings,
+
+ .n_next_nodes = CNAT_V4_TCP_O2I_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ //[CNAT_V4_TCP_O2I_E] = "vcgn-v4-tcp-o2i-e",
+ [CNAT_V4_TCP_O2I_T] = "ip4-input",
+ [CNAT_V4_TCP_O2I_D] = "error-drop",
+ },
+};
+
+clib_error_t *cnat_ipv4_tcp_outside_input_init (vlib_main_t *vm)
+{
+ cnat_ipv4_tcp_outside_input_main_t * mp = &cnat_ipv4_tcp_outside_input_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (cnat_ipv4_tcp_outside_input_init);
diff --git a/plugins/vcgn-plugin/vcgn/cnat_ipv4_udp.h b/plugins/vcgn-plugin/vcgn/cnat_ipv4_udp.h
new file mode 100644
index 00000000000..1ccf74a004e
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_ipv4_udp.h
@@ -0,0 +1,41 @@
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * Filename: cnat_ipv4_udp.h
+ *
+ * Description: common functions for udp node
+ *
+ * Assumptions and Constraints:
+ *
+ * Copyright (c) 2000-2009 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *-----------------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_IPV4_UDP_H__
+#define __CNAT_IPV4_UDP_H__
+
+#include "tcp_header_definitions.h"
+#include "cnat_db.h"
+#include "cnat_v4_functions.h"
+#include "cnat_global.h"
+#include "cnat_config.h"
+
+extern void swap_ip_src_udp_port(ipv4_header *ip,
+ udp_hdr_type_t *udp,
+ cnat_main_db_entry_t *db);
+extern void swap_ip_dst_udp_port(ipv4_header *ip,
+ udp_hdr_type_t *udp,
+ cnat_main_db_entry_t *db,
+ u16 vrf);
+#endif /* __CNAT_IPV4_UDP_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/cnat_ipv4_udp_inside_input.c b/plugins/vcgn-plugin/vcgn/cnat_ipv4_udp_inside_input.c
new file mode 100644
index 00000000000..657c5f1e64e
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_ipv4_udp_inside_input.c
@@ -0,0 +1,508 @@
+/*
+ *---------------------------------------------------------------------------
+ * cnat_ipv4_udp_inside_input.c - cnat_ipv4_udp_inside_input node functions
+ *
+ *
+ * Copyright (c) 2008-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/buffer.h>
+
+#include "cnat_global.h"
+#include "cnat_db.h"
+#include "cnat_ipv4_udp.h"
+#include "cnat_pcp_server.h"
+
+
+#define foreach_cnat_ipv4_udp_inside_input_error \
+_(CNAT_V4_UDP_I2O_T_PKT, "v4 udp i2o transmit") \
+_(CNAT_V4_UDP_I2O_MISS_PKT, "v4 udp i2o db miss") \
+_(CNAT_V4_UDP_I2O_TTL_GEN, "v4 udp i2o TTL gen") \
+_(CNAT_V4_UDP_I2O_TTL_DROP, "v4 udp i2o TTL drop") \
+_(CNAT_V4_PCP_PKT, "v4 pcp pkt") \
+_(CNAT_V4_UDP_I2O_SESSION_DROP, "v4 udp i2o session drop")
+
+typedef enum {
+#define _(sym,str) sym,
+ foreach_cnat_ipv4_udp_inside_input_error
+#undef _
+ CNAT_IPV4_UDP_INSIDE_INPUT_N_ERROR,
+} cnat_ipv4_udp_inside_input_t;
+
+static char * cnat_ipv4_udp_inside_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_cnat_ipv4_udp_inside_input_error
+#undef _
+};
+
+typedef struct {
+ u32 cached_next_index;
+ /* $$$$ add data here */
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} cnat_ipv4_udp_inside_input_main_t;
+
+typedef enum {
+ CNAT_V4_I2O_FIXME,
+ CNAT_V4_UDP_I2O_E,
+ CNAT_REWRITE_OUTPUT,
+ CNAT_V4_UDP_I2O_T = CNAT_REWRITE_OUTPUT,
+ CNAT_N_NEXT,
+} cnat_ipv4_udp_inside_input_next_t;
+
+#define CNAT_V4_UDP_I2O_D CNAT_V4_I2O_FIXME
+#define CNAT_V4_PCP_T CNAT_V4_I2O_FIXME
+
+cnat_ipv4_udp_inside_input_main_t cnat_ipv4_udp_inside_input_main;
+vlib_node_registration_t cnat_ipv4_udp_inside_input_node;
+
+#define NSTAGES 6
+
+/*
+ * Use the generic buffer metadata + first line of packet data prefetch
+ * stage function from <api/pipeline.h>. This is usually a Good Idea.
+ */
+#define stage0 generic_stage0
+
+#ifndef TOBE_PORTED
+static inline u32
+is_pcp_pkt(u32 addr, u16 port)
+{
+ return CNAT_NO_CONFIG;
+}
+#else
+static inline u32
+is_pcp_pkt(spp_ctx_t *ctx, u32 addr, u16 port)
+{
+ cnat_vrfmap_t *my_vrfmap = NULL;
+ u16 my_vrfmap_index;
+
+ my_vrfmap_index = vrf_map_array[ctx->ru.rx.uidb_index];
+
+ if (PREDICT_TRUE(my_vrfmap_index != VRF_MAP_ENTRY_EMPTY)) {
+
+ my_vrfmap = cnat_map_by_vrf + my_vrfmap_index;
+
+ if (PREDICT_FALSE( port == my_vrfmap->pcp_server_port)) {
+ if(PREDICT_TRUE(addr == my_vrfmap->pcp_server_addr)) {
+ return CNAT_SUCCESS;
+ }
+ }
+ }
+
+ return CNAT_NO_CONFIG;
+}
+#endif
+
+inline void swap_ip_src_udp_port(ipv4_header *ip,
+ udp_hdr_type_t *udp,
+ cnat_main_db_entry_t *db)
+{
+ /*
+ * declare varibale
+ */
+ CNAT_UPDATE_L3_L4_CHECKSUM_DECLARE
+ /*
+ * calculate checksum
+ */
+ CNAT_UPDATE_L3_L4_CHECKSUM(((u16)(db->in2out_key.k.ipv4)),
+ ((u16)(db->in2out_key.k.ipv4 >> 16)),
+ (db->in2out_key.k.port),
+ (clib_net_to_host_u16(ip->checksum)),
+ (clib_net_to_host_u16(udp->udp_checksum)),
+ ((u16)(db->out2in_key.k.ipv4)),
+ ((u16)(db->out2in_key.k.ipv4 >> 16)),
+ (db->out2in_key.k.port))
+
+/* #define UDP_PACKET_DEBUG 1 */
+
+// Temporary debugs which will be suppressed later
+#ifdef UDP_PACKET_DEBUG
+ if (PREDICT_FALSE(udp_inside_packet_dump_enable)) {
+ printf("\nIn2Out UDP packet before translation");
+ print_udp_pkt(ip);
+ }
+#endif
+
+ //set ip header
+ ip->src_addr =
+ clib_host_to_net_u32(db->out2in_key.k.ipv4);
+ ip->checksum =
+ clib_host_to_net_u16(new_l3_c);
+
+ u16 frag_offset =
+ clib_net_to_host_u16(ip->frag_flags_offset);
+
+ if(PREDICT_FALSE(frag_offset & IP_FRAG_OFFSET_MASK)) {
+ return; /* No need to update UDP fields */
+ }
+ //set udp header
+ udp->src_port =
+ clib_host_to_net_u16(db->out2in_key.k.port);
+
+ /*
+ * No easy way to avoid this if check except by using
+ * complex logic - may not be worth it.
+ */
+ if (PREDICT_TRUE(udp->udp_checksum)) {
+ udp->udp_checksum =
+ clib_host_to_net_u16(new_l4_c);
+ }
+
+// Temporary debugs which will be suppressed later
+#ifdef UDP_PACKET_DEBUG
+ if (PREDICT_FALSE(udp_inside_checksum_disable)) {
+ printf("\nIn2Out UDP checksum 0x%x disabled by force", new_l4_c);
+ udp->udp_checksum = 0;
+ }
+ if (PREDICT_FALSE(udp_inside_packet_dump_enable)) {
+ printf("\nIn2Out UDP packet after translation");
+ print_udp_pkt(ip);
+ }
+#endif
+}
+
+static inline void
+stage1(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ u64 a, b, c;
+ u32 bucket;
+ u8 *prefetch_target;
+
+ vlib_buffer_t * b0 = vlib_get_buffer (vm, buffer_index);
+ ipv4_header *ip = vlib_buffer_get_current (b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ udp_hdr_type_t *udp = (udp_hdr_type_t *)((u8*)ip + ipv4_hdr_len);
+
+ u64 tmp = 0;
+ tmp = vnet_buffer(b0)->vcgn_uii.key.k.ipv4 =
+ clib_net_to_host_u32(ip->src_addr);
+ vnet_buffer(b0)->vcgn_uii.key.k.port =
+ clib_net_to_host_u16 (udp->src_port);
+
+ tmp |= ((u64)vnet_buffer(b0)->vcgn_uii.key.k.port) << 32;
+
+ PLATFORM_CNAT_SET_RX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_RX],
+ vnet_buffer(b0)->vcgn_uii.key.k.vrf,
+ CNAT_UDP)
+ tmp |= ((u64)vnet_buffer(b0)->vcgn_uii.key.k.vrf) << 48;
+
+ CNAT_V4_GET_HASH(tmp, bucket, CNAT_MAIN_HASH_MASK)
+
+ prefetch_target = (u8 *)(&cnat_in2out_hash[bucket]);
+ vnet_buffer(b0)->vcgn_uii.bucket = bucket;
+
+ /* Prefetch the hash bucket */
+ CLIB_PREFETCH(prefetch_target, CLIB_CACHE_LINE_BYTES, LOAD);
+}
+
+static inline void
+stage2(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{ /* nothing */ }
+
+#define SPP_LOG2_CACHE_LINE_BYTES 6
+#define SPP_CACHE_LINE_BYTES (1 << SPP_LOG2_CACHE_LINE_BYTES)
+
+static inline void
+stage3(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ vlib_buffer_t * b0 = vlib_get_buffer(vm, buffer_index);
+ uword prefetch_target0, prefetch_target1;
+ u32 bucket = vnet_buffer(b0)->vcgn_uii.bucket;
+
+ /* read the hash bucket */
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket
+ = cnat_in2out_hash[bucket].next;
+
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+ /*
+ * Prefetch database keys. We save space by not cache-line
+ * aligning the DB entries. We don't want to waste LSU
+ * bandwidth prefetching stuff we won't need.
+ */
+ prefetch_target0 = (uword)(cnat_main_db + db_index);
+ CLIB_PREFETCH((void*)prefetch_target0, CLIB_CACHE_LINE_BYTES, LOAD);
+ /* Just beyond DB key #2 */
+ prefetch_target1 = prefetch_target0 +
+ STRUCT_OFFSET_OF(cnat_main_db_entry_t, user_ports);
+ /* If the targets are in different lines, do the second prefetch */
+ if (PREDICT_FALSE((prefetch_target0 & ~(SPP_CACHE_LINE_BYTES-1)) !=
+ (prefetch_target1 & ~(SPP_CACHE_LINE_BYTES-1)))) {
+ CLIB_PREFETCH((void *)prefetch_target1, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+ }
+}
+
+static inline void
+stage4(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ cnat_main_db_entry_t *db;
+ vlib_buffer_t * b0 = vlib_get_buffer(vm, buffer_index);
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket;
+
+ /*
+ * Note: if the search already failed (empty bucket),
+ * the answer is already in the pipeline context structure
+ */
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+
+ /*
+ * Note: hash collisions suck. We can't easily prefetch around them.
+ * The first trip around the track will be fast. After that, maybe
+ * not so much...
+ */
+ do {
+ db = cnat_main_db + db_index;
+ if (PREDICT_TRUE(db->in2out_key.key64 ==
+ vnet_buffer(b0)->vcgn_uii.key.key64)) {
+ break;
+ }
+ db_index = db->in2out_hash.next;
+ } while (db_index != EMPTY);
+
+ /* Stick the answer back into the pipeline context structure */
+ vnet_buffer(b0)->vcgn_uii.bucket = db_index;
+ }
+}
+
+static u64 pkt_num = 0;
+static inline u32 last_stage (vlib_main_t *vm, vlib_node_runtime_t *node,
+ u32 bi)
+{
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi);
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket;
+ spp_ctx_t *ctx = (spp_ctx_t *) &vnet_buffer(b0)->vcgn_uii;
+ int disposition = CNAT_V4_UDP_I2O_T;
+ int counter = CNAT_V4_UDP_I2O_T_PKT;
+ ipv4_header *ip = (ipv4_header *)vlib_buffer_get_current(b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ udp_hdr_type_t *udp = (udp_hdr_type_t *)((u8*)ip + ipv4_hdr_len);
+ vlib_node_t *n = vlib_get_node (vm, cnat_ipv4_udp_inside_input_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t * em = &vm->error_main;
+ cnat_session_entry_t *session_db = NULL;
+ cnat_key_t dest_info;
+
+ pkt_num++;
+
+ if(PREDICT_FALSE(is_pcp_pkt(ip->dest_addr, udp->dest_port) ==
+ CNAT_SUCCESS))
+ {
+ PCP_INCR(input);
+ disposition = CNAT_V4_PCP_T;
+ counter = CNAT_V4_PCP_PKT;
+
+ goto pcp_pkt;
+ }
+
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ if (PREDICT_FALSE(ip->ttl <= 1)) {
+ /* Try to generate ICMP error msg, as TTL is <= 1 */
+
+ if (icmpv4_generate_with_throttling
+ (ctx, ip, ctx->ru.rx.uidb_index)) {
+ /* Generated ICMP */
+ disposition = CNAT_REWRITE_OUTPUT;
+ counter = CNAT_V4_UDP_I2O_TTL_GEN;
+ } else {
+ /* Could not generated ICMP - drop the packet */
+ disposition = CNAT_V4_UDP_I2O_D;
+ counter = CNAT_V4_UDP_I2O_TTL_DROP;
+ }
+ goto drop_pkt;
+ }
+ }
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+ cnat_main_db_entry_t *db = cnat_main_db + db_index;
+
+ dest_info.k.ipv4 = clib_net_to_host_u32(ip->dest_addr);
+
+ /* MUST revisit: it seems farg is set to 1 for few packets & because of
+ * this the port is not updated & it becomes 0. Commenting teporarily
+ * this fargment check & setting dst port with udp dst port value */
+ dest_info.k.port = clib_net_to_host_u16(udp->dest_port);
+ #if 0 // DONOT REMOVE THIS if 0
+ if(PREDICT_FALSE(ctx->ru.rx.frag)) {
+#ifdef TOBE_PORTED
+ /* Must have routed through cnat_v4_frag_in2out node */
+ u16 *feature_data_ports = (u16 *)&ctx->feature_data[4];
+ dest_info.k.port = *feature_data_ports;
+#endif
+ } else {
+ dest_info.k.port = clib_net_to_host_u16(udp->dest_port);
+ }
+ #endif
+
+
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ /*
+ * Decrement TTL and update IPv4 checksum
+ */
+ ipv4_decr_ttl_n_calc_csum(ip);
+ }
+
+ if(PREDICT_TRUE(!PLATFORM_DBL_SUPPORT)) {
+
+ /* No DBL support, so just update the destn and proceed */
+ db->dst_ipv4 = dest_info.k.ipv4;
+ db->dst_port = dest_info.k.port;
+ CNAT_DB_TIMEOUT_RST(db);
+ goto update_pkt;
+ }
+
+ if(PREDICT_TRUE((db->dst_ipv4 == dest_info.k.ipv4) &&
+ (db->dst_port == dest_info.k.port))) {
+
+ CNAT_DB_TIMEOUT_RST(db);
+ goto update_pkt;
+ } else {
+ if (PREDICT_FALSE(db->nsessions == 0)) {
+ /* Should be a static entry
+ * Note this session as the first session and log
+ */
+ cnat_add_dest_n_log(db, &dest_info);
+ /*
+ * update db counter, timer
+ */
+
+ CNAT_DB_TIMEOUT_RST(db);
+
+ } else if(PREDICT_TRUE(db->nsessions == 1)) {
+ /* Destn is not same as in main db. Multiple session
+ * scenario
+ */
+ //printf(">>> [pkt# %lu] src_ip: 0x%x, db ip: 0x%x, db port: %u; dest ip: 0x%x, dest port: %u\n",
+ // pkt_num, ntohl(ip->src_addr), db->dst_ipv4, db->dst_port, dest_info.k.ipv4, dest_info.k.port);
+
+ dest_info.k.vrf = db->in2out_key.k.vrf;
+ session_db = cnat_handle_1to2_session(db, &dest_info);
+
+ if(PREDICT_TRUE(session_db != NULL)) {
+ /* session exists */
+ CNAT_DB_TIMEOUT_RST(session_db);
+ } else {
+ /* could not create session db - drop packet */
+ disposition = CNAT_V4_UDP_I2O_D;
+ counter = CNAT_V4_UDP_I2O_SESSION_DROP;
+ goto drop_pkt;
+ }
+
+ } else {
+ /* More than 2 sessions exists */
+
+ dest_info.k.vrf = db->in2out_key.k.vrf;
+
+ /* If session already exists,
+ * cnat_create_session_db_entry will return the existing db
+ * else create a new db
+ * If could not create, return NULL
+ */
+ session_db = cnat_create_session_db_entry(&dest_info,
+ db, TRUE);
+
+ if(PREDICT_FALSE(session_db != NULL)) {
+ /* session exists */
+ CNAT_DB_TIMEOUT_RST(session_db);
+ } else {
+ /* could not create session db - drop packet */
+ disposition = CNAT_V4_UDP_I2O_D;
+ counter = CNAT_V4_UDP_I2O_SESSION_DROP;
+ goto drop_pkt;
+ }
+ }
+ }
+
+update_pkt:
+ /*
+ * 1. update src ipv4 addr and src udp port
+ * 2. update ipv4 checksum and udp checksum
+ */
+ swap_ip_src_udp_port(ip, udp, db);
+ /*
+ * update db counter, timer
+ */
+
+ db->in2out_pkts++;
+
+ /*
+ * need to set outside vrf
+ * from db->out2in_key.k.vrf
+ */
+
+ /* Temporarily keeping this commented */
+ //PLATFORM_CNAT_SET_TX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_TX],
+ // db->out2in_key.k.vrf)
+
+ in2out_forwarding_count++;
+
+ } else {
+ disposition = CNAT_V4_UDP_I2O_E;
+ counter = CNAT_V4_UDP_I2O_MISS_PKT;
+ }
+
+drop_pkt:
+pcp_pkt:
+
+ em->counters[node_counter_base_index + counter] += 1;
+
+ return disposition;
+}
+
+#include <vnet/pipeline.h>
+
+static uword cnat_ipv4_udp_inside_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return dispatch_pipeline (vm, node, frame);
+}
+
+
+VLIB_REGISTER_NODE (cnat_ipv4_udp_inside_input_node) = {
+ .function = cnat_ipv4_udp_inside_input_node_fn,
+ .name = "vcgn-v4-udp-i2o",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(cnat_ipv4_udp_inside_input_error_strings),
+ .error_strings = cnat_ipv4_udp_inside_input_error_strings,
+
+ .n_next_nodes = CNAT_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [CNAT_V4_I2O_FIXME] = "error-drop",
+ // [CNAT_V4_UDP_I2O_T] = "ip4-input",
+ [CNAT_V4_UDP_I2O_E] = "vcgn-v4-udp-i2o-e",
+ [CNAT_REWRITE_OUTPUT] = "ip4-input",
+ },
+};
+
+clib_error_t *cnat_ipv4_udp_inside_input_init (vlib_main_t *vm)
+{
+ cnat_ipv4_udp_inside_input_main_t * mp = &cnat_ipv4_udp_inside_input_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (cnat_ipv4_udp_inside_input_init);
diff --git a/plugins/vcgn-plugin/vcgn/cnat_ipv4_udp_inside_input_exceptions.c b/plugins/vcgn-plugin/vcgn/cnat_ipv4_udp_inside_input_exceptions.c
new file mode 100644
index 00000000000..f078c8d4391
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_ipv4_udp_inside_input_exceptions.c
@@ -0,0 +1,283 @@
+/*
+ *---------------------------------------------------------------------------
+ * cnat_ipv4_udp_inside_input_exception_stages.c - cnat_ipv4_udp_inside_input_exception node pipeline stage functions
+ *
+ *
+ * Copyright (c) 2008-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/buffer.h>
+
+#include "cnat_global.h"
+#include "cnat_db.h"
+#include "cnat_ipv4_udp.h"
+
+/*
+ * Dump these counters via the "show error" CLI command
+ */
+
+#define foreach_cnat_ipv4_udp_inside_input_exc_error \
+_(CNAT_V4_UDP_I2O_T_PKT, "v4 udp i2o transmit") \
+_(CNAT_V4_UDP_I2O_D_PKT, "v4 udp i2o drop") \
+_(CNAT_V4_ICMP_G_I2O_T_PKT, "v4 udp i2o icmp msg gen") \
+_(CNAT_V4_UDP_I2O_DC_PKT, "v4 udp i2o (no config) drop") \
+_(CNAT_V4_UDP_I2O_DR_PKT, "v4 udp i2o (not in run state) drop") \
+_(CNAT_V4_UDP_I2O_DD_PKT, "v4 udp i2o (no direct port) drop") \
+_(CNAT_V4_UDP_I2O_DA_PKT, "v4 udp i2o (no any port) drop") \
+_(CNAT_V4_UDP_I2O_DO_PKT, "v4 udp i2o (out of port limit) drop") \
+_(CNAT_V4_UDP_I2O_DI_PKT, "v4 udp i2o (invalid packet) drop") \
+_(CNAT_V4_UDP_I2O_DS_PKT, "v4 udp i2o (no sessoon db) drop")
+
+typedef enum {
+#define _(sym,str) sym,
+ foreach_cnat_ipv4_udp_inside_input_exc_error
+#undef _
+ CNAT_IPV4_UDP_INSIDE_INPUT_EXCEPTIONS_N_ERROR,
+} cnat_ipv4_udp_inside_input_exc_error_t;
+
+
+static char * cnat_ipv4_udp_inside_input_exc_error_strings[] = {
+#define _(sym,string) string,
+ foreach_cnat_ipv4_udp_inside_input_exc_error
+#undef _
+};
+
+typedef struct {
+ u32 cached_next_index;
+ /* $$$$ add data here */
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} cnat_ipv4_udp_inside_input_exc_main_t;
+
+typedef enum {
+ CNAT_V4_UDP_I2O_T,
+ CNAT_V4_UDP_I2O_D,
+ CNAT_V4_ICMP_G_I2O_T = CNAT_V4_UDP_I2O_D, /* TOBE_PORTED */
+ CNAT_V4_UDP_INSIDE_INPUT_EXC_N_NEXT,
+} cnat_ipv4_udp_inside_input_exc_next_t;
+
+cnat_ipv4_udp_inside_input_exc_main_t cnat_ipv4_udp_inside_input_exc_main;
+vlib_node_registration_t cnat_ipv4_udp_inside_input_exc_node;
+
+#define NSTAGES 2
+
+/*
+ * Use the generic buffer metadata + first line of packet data prefetch
+ * stage function from <api/pipeline.h>. This is usually a Good Idea.
+ */
+#define stage0 generic_stage0
+
+static inline u32 last_stage (vlib_main_t *vm, vlib_node_runtime_t *node,
+ u32 bi)
+{
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi);
+ vlib_node_t *n =
+ vlib_get_node (vm, cnat_ipv4_udp_inside_input_exc_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t * em = &vm->error_main;
+
+ cnat_gen_icmp_info info;
+ cnat_db_key_bucket_t ki;
+ spp_ctx_t *ctx __attribute__((unused))
+ = (spp_ctx_t *) &vnet_buffer(b0)->vcgn_uii;
+ cnat_main_db_entry_t *db = NULL;
+ ipv4_header *ip = (ipv4_header *)vlib_buffer_get_current(b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ udp_hdr_type_t *udp = (udp_hdr_type_t *)((u8*)ip + ipv4_hdr_len);
+ int disposition = CNAT_V4_UDP_I2O_T;
+ int counter = CNAT_V4_UDP_I2O_T_PKT;
+
+ cnat_key_t dest_info;
+
+ PLATFORM_CNAT_SET_RX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_RX],
+ vnet_buffer(b0)->vcgn_uii.key.k.vrf,
+ CNAT_UDP)
+
+ vnet_buffer(b0)->vcgn_uii.key.k.ipv4 = clib_net_to_host_u32(ip->src_addr);
+
+ PLATFORM_CNAT_SET_RX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_RX],
+ ki.k.k.vrf, CNAT_UDP)
+
+ ki.k.k.ipv4 = clib_net_to_host_u32(ip->src_addr);
+
+
+ /* MUST REVISIT: commentting frag check. Unconditional destination port
+ * update. DONOT remove this #if 0 */
+ ki.k.k.port =
+ clib_net_to_host_u16(udp->src_port);
+ dest_info.k.port =
+ clib_net_to_host_u16(udp->dest_port);
+#if 0
+ if(PREDICT_FALSE(ctx->ru.rx.frag)) {
+#ifdef TOBE_PORTED
+ /* Must have routed through cnat_v4_frag_in2out node */
+ u16 *feature_data_ports = (u16 *)&ctx->feature_data[2];
+ ki.k.k.port = *feature_data_ports;
+ feature_data_ports++;
+ dest_info.k.port = *feature_data_ports;
+#endif
+ } else {
+ ki.k.k.port =
+ clib_net_to_host_u16(udp->src_port);
+ dest_info.k.port =
+ clib_net_to_host_u16(udp->dest_port);
+ }
+#endif /* if 0 */
+
+ dest_info.k.ipv4 = clib_net_to_host_u32(ip->dest_addr);
+ PLATFORM_CNAT_SET_RX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_RX],
+ dest_info.k.vrf, CNAT_UDP)
+
+ if (PREDICT_TRUE(ki.k.k.port)) {
+ if (ki.k.k.port & 0x1) {
+ db = cnat_get_main_db_entry_v2(&ki, PORT_S_ODD, PORT_TYPE_DYNAMIC,
+ &info, &dest_info);
+ } else {
+ db = cnat_get_main_db_entry_v2(&ki, PORT_S_EVEN, PORT_TYPE_DYNAMIC,
+ &info, &dest_info);
+ }
+ } else {
+ /*
+ * No UDP port value of 0 - drop it
+ */
+ db = NULL;
+ info.error = CNAT_ERR_BAD_TCP_UDP_PORT;
+ }
+
+ if (PREDICT_TRUE((u64)db)) {
+
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ /*
+ * Decrement TTL and update IPv4 checksum
+ */
+ ipv4_decr_ttl_n_calc_csum(ip);
+ }
+
+ /*
+ * step 6 do nat before fwd pkt
+ */
+ swap_ip_src_udp_port(ip, udp, db);
+ /*
+ * update db for this pkt
+ */
+ CNAT_DB_UPDATE_IN2OUT_TIMER
+
+ /* Check timeout db if there is config for this */
+ (void) query_and_update_db_timeout((void *)db, MAIN_DB_TYPE);
+
+/* Temporarily keeping it commented */
+ //PLATFORM_CNAT_SET_TX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_TX],
+ // db->out2in_key.k.vrf)
+ in2out_forwarding_count++;
+
+ } else {
+ switch (info.error) {
+ case (CNAT_NO_VRF_RUN):
+ em->counters[node_counter_base_index + CNAT_V4_UDP_I2O_DR_PKT] += 1;
+ break;
+ case (CNAT_OUT_LIMIT):
+ em->counters[node_counter_base_index + CNAT_V4_UDP_I2O_DO_PKT] += 1;
+ break;
+ case (CNAT_NO_PORT_ANY):
+ case (CNAT_NO_POOL_ANY):
+ case (CNAT_BAD_INUSE_ANY):
+ case (CNAT_NOT_FOUND_ANY):
+ em->counters[node_counter_base_index + CNAT_V4_UDP_I2O_DA_PKT] += 1;
+ break;
+ case (CNAT_INV_PORT_DIRECT):
+ case (CNAT_DEL_PORT_DIRECT):
+ case (CNAT_BAD_INUSE_DIRECT):
+ case (CNAT_NOT_FOUND_DIRECT):
+ em->counters[node_counter_base_index + CNAT_V4_UDP_I2O_DD_PKT] += 1;
+ break;
+ case (CNAT_ERR_BAD_TCP_UDP_PORT):
+ em->counters[node_counter_base_index + CNAT_V4_UDP_I2O_DI_PKT] += 1;
+ break;
+ case (CNAT_ERR_NO_SESSION_DB):
+ em->counters[node_counter_base_index + CNAT_V4_UDP_I2O_DS_PKT] += 1;
+ break;
+ default:
+ em->counters[node_counter_base_index + CNAT_V4_UDP_I2O_DC_PKT] += 1;
+ break;
+ }
+ /*
+ * send to icmp msg generate node
+ */
+ if (info.gen_icmp_msg == CNAT_ICMP_MSG) {
+#ifdef TOBE_PORTED
+ u32 *fd = (u32*)ctx->feature_data;
+ fd[0] = info.svi_addr;
+ fd[1] = CNAT_ICMP_DEST_UNREACHABLE;
+#endif
+ disposition = CNAT_V4_ICMP_G_I2O_T;
+ counter = CNAT_V4_ICMP_G_I2O_T_PKT;
+ } else {
+ disposition = CNAT_V4_UDP_I2O_D;
+ counter = CNAT_V4_UDP_I2O_D_PKT;
+ }
+ DEBUG_I2O_DROP(CNAT_DEBUG_DROP_UDP)
+ }
+
+ em->counters[node_counter_base_index + counter] += 1;
+
+ return disposition;
+}
+
+
+#include <vnet/pipeline.h>
+
+static uword cnat_ipv4_udp_inside_input_exc_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return dispatch_pipeline (vm, node, frame);
+}
+
+VLIB_REGISTER_NODE (cnat_ipv4_udp_inside_input_exc_node) = {
+ .function = cnat_ipv4_udp_inside_input_exc_node_fn,
+ .name = "vcgn-v4-udp-i2o-e",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(cnat_ipv4_udp_inside_input_exc_error_strings),
+ .error_strings = cnat_ipv4_udp_inside_input_exc_error_strings,
+
+ .n_next_nodes = CNAT_V4_UDP_INSIDE_INPUT_EXC_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [CNAT_V4_UDP_I2O_T] = "ip4-input",
+ [CNAT_V4_UDP_I2O_D] = "error-drop",
+ },
+};
+
+
+clib_error_t *cnat_ipv4_udp_inside_input_exc_init (vlib_main_t *vm)
+{
+ cnat_ipv4_udp_inside_input_exc_main_t * mp = &cnat_ipv4_udp_inside_input_exc_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (cnat_ipv4_udp_inside_input_exc_init);
+
diff --git a/plugins/vcgn-plugin/vcgn/cnat_ipv4_udp_outside_input.c b/plugins/vcgn-plugin/vcgn/cnat_ipv4_udp_outside_input.c
new file mode 100644
index 00000000000..5a24a111f75
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_ipv4_udp_outside_input.c
@@ -0,0 +1,605 @@
+
+/*
+ *---------------------------------------------------------------------------
+ * cnat_ipv4_udp_outside_input_stages.c - cnat_ipv4_udp_outside_input node pipeline stage functions
+ *
+ *
+ * Copyright (c) 2008-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/buffer.h>
+
+#include "cnat_ipv4_udp.h"
+#include "dslite_db.h"
+#include "cnat_db.h"
+#include "cnat_v4_functions.h"
+
+//#include <dslite_v6_functions.h>
+//#include <pool.h>
+//#include "cnat_va_db.h"
+
+#define foreach_cnat_ipv4_udp_outside_input_error \
+_(CNAT_V4_UDP_O2I_T_PKT, "v4 udp o2i transmit") \
+_(CNAT_V4_DSLITE_ENCAP_CTR, "to dslite encap") \
+_(CNAT_V4_UDP_O2I_MISS_PKT, "v4 udp o2i db miss drop") \
+_(CNAT_V4_UDP_O2I_TTL_GEN, "v4 udp o2i TTL gen") \
+_(CNAT_V4_UDP_O2I_TTL_DROP, "v4 udp o2i TTL drop") \
+_(CNAT_V4_UDP_O2I_PTB_GEN, "v4 ptb gen") \
+_(CNAT_V4_UDP_O2I_PTB_DROP, "v4 ptb throttle drop") \
+_(CNAT_V4_UDP_O2I_SESSION_DROP, "v4 udp o2i session drop") \
+_(CNAT_V4_UDP_O2I_FILTER_DROP, "v4 udp o2i drop: end point filtering") \
+_(CNAT_V4_UDP_O2I_SUB_FRAG_NO_DB_DROP, "v4 udp o2i subsequent frag no DB drop") \
+_(CNAT_V4_UDP_O2I_1ST_FRAG_FILTER_DROP, "v4 udp i2o 1st frag filter drop")
+
+typedef enum {
+#define _(sym,str) sym,
+ foreach_cnat_ipv4_udp_outside_input_error
+#undef _
+ CNAT_IPV4_UDP_OUTSIDE_INPUT_N_ERROR,
+} cnat_ipv4_udp_outside_input_t;
+
+static char * cnat_ipv4_udp_outside_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_cnat_ipv4_udp_outside_input_error
+#undef _
+};
+
+typedef struct {
+ u32 cached_next_index;
+ /* $$$$ add data here */
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} cnat_ipv4_udp_outside_input_main_t;
+
+typedef enum {
+ //CNAT_V4_O2I_FIXME,
+ CNAT_V4_UDP_O2I_E,
+ CNAT_V4_UDP_O2I_T,
+ CNAT_V4_UDP_O2I_NEXT,
+} cnat_ipv4_udp_outside_input_next_t;
+
+//#define CNAT_V4_DSLITE_ENCAP CNAT_V4_O2I_FIXME
+//#define CNAT_V4_UDP_O2I_E CNAT_V4_O2I_FIXME
+
+cnat_ipv4_udp_outside_input_main_t cnat_ipv4_udp_outside_input_main;
+vlib_node_registration_t cnat_ipv4_udp_outside_input_node;
+
+#define NSTAGES 6
+
+/*
+ * Use the generic buffer metadata + first line of packet data prefetch
+ * stage function from <api/pipeline.h>. This is usually a Good Idea.
+ */
+#define stage0 generic_stage0
+
+
+#if 0
+typedef struct cnat_ipv4_udp_outside_input_pipeline_data_ {
+ //spp_node_main_vector_t *nmv;
+ dslite_common_pipeline_data_t common_data;
+ /* Add additional pipeline stage data here... */
+ u32 bucket;
+#ifdef DSLITE_DEF
+ u32 user_bucket;
+ dslite_v4_to_v6_udp_counter_t *udp_counter;
+ dslite_icmp_gen_counter_t *icmp_gen_counter;
+
+#endif
+ cnat_key_t ki;
+ udp_hdr_type_t *udp;
+ u8 frag_pkt;
+} cnat_ipv4_udp_outside_input_pipeline_data_t;
+
+#endif
+
+#define CNAT_UDP_OUTSIDE_UPDATE_FLAG_TIMER(db,dslite_nat44_inst_id) \
+ if (PREDICT_FALSE(!(db->flags & CNAT_DB_FLAG_UDP_ACTIVE))) { \
+ db->flags |= CNAT_DB_FLAG_UDP_ACTIVE; \
+ CNAT_DB_TIMEOUT_RST(db); \
+ } else if (PREDICT_FALSE(db->flags & CNAT_DB_DSLITE_FLAG)) { \
+ if (PREDICT_TRUE(dslite_table_db_ptr[dslite_nat44_inst_id].mapping_refresh_both_direction)) { \
+ CNAT_DB_TIMEOUT_RST(db); \
+ } \
+ } else if (PREDICT_TRUE(mapping_refresh_both_direction)) { \
+ CNAT_DB_TIMEOUT_RST(db); \
+ } \
+
+#if 0
+static cnat_ipv4_udp_outside_input_pipeline_data_t pctx_data[SPP_MAXDISPATCH];
+#define EXTRA_PIPELINE_ARGS_PROTO , cnat_ipv4_udp_outside_input_pipeline_data_t *pctx
+#define EXTRA_PIPELINE_ARGS , pctx
+
+#endif
+
+/*inline u32
+is_static_dest_nat_enabled(u16 vrf)
+{
+ if(static_dest_vrf_map_array[vrf] == 1) {
+ return CNAT_SUCCESS;
+ }
+ return CNAT_NO_CONFIG;
+}*/
+
+static inline void __attribute__((unused))
+swap_ip_dst(ipv4_header *ip, cnat_main_db_entry_t *db, u16 vrf)
+{
+
+ CNAT_UPDATE_L3_CHECKSUM_DECLARE
+ /*
+ * calculate checksum
+ */
+ CNAT_UPDATE_L3_CHECKSUM(((u16)(db->out2in_key.k.ipv4)),
+ ((u16)(db->out2in_key.k.ipv4 >> 16)),
+ (clib_host_to_net_u16(ip->checksum)),
+ ((u16)(db->in2out_key.k.ipv4)),
+ ((u16)(db->in2out_key.k.ipv4 >> 16)))
+ //set ip header
+ ip->dest_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+ ip->checksum =
+ clib_host_to_net_u16(new_l3_c);
+
+#if 0
+
+ if(is_static_dest_nat_enabled(vrf) == CNAT_SUCCESS) {
+ direction = 1;
+ if(cnat_static_dest_db_get_translation(ip->src_addr, &postmap_ip, vrf, direction) == CNAT_SUCCESS) {
+ old_ip = spp_net_to_host_byte_order_32(&(ip->src_addr));
+ old_postmap_ip = spp_net_to_host_byte_order_32(&postmap_ip);
+
+ CNAT_UPDATE_L3_CHECKSUM(((u16)(old_ip & 0xFFFF)),
+ ((u16)(old_ip >> 16)),
+ (spp_net_to_host_byte_order_16(&(ip->checksum))),
+ ((u16)(old_postmap_ip & 0xFFFF)),
+ ((u16)(old_postmap_ip >> 16)))
+ ip->checksum =
+ clib_host_to_net_u16(new_l3_c);
+ ip->src_addr = postmap_ip;
+ }
+ }
+#endif
+}
+
+inline void swap_ip_dst_udp_port(ipv4_header *ip,
+ udp_hdr_type_t *udp,
+ cnat_main_db_entry_t *db, u16 vrf)
+{
+
+#define UDP_PACKET_DEBUG 1
+
+// Temporary debugs which will be suppressed later
+#ifdef UDP_PACKET_DEBUG
+ if (PREDICT_FALSE(udp_outside_packet_dump_enable)) {
+ printf("\nOut2In UDP packet before translation");
+ print_udp_pkt(ip);
+ }
+#endif
+
+#if 0
+ if(is_static_dest_nat_enabled(vrf) == CNAT_SUCCESS) {
+ direction = 1;
+ if(cnat_static_dest_db_get_translation(ip->src_addr, &postmap_ip, vrf, direction) == CNAT_SUCCESS) {
+
+ CNAT_UPDATE_L3_L4_CHECKSUM_DECLARE
+
+ old_ip = spp_net_to_host_byte_order_32(&(ip->src_addr));
+ old_postmap_ip = spp_net_to_host_byte_order_32(&postmap_ip);
+
+ CNAT_UPDATE_L3_L4_CHECKSUM(((u16)(old_ip & 0xFFFF)),
+ ((u16)(old_ip >> 16)),
+ (spp_net_to_host_byte_order_16(&(udp->src_port))),
+ (spp_net_to_host_byte_order_16(&(ip->checksum))),
+ (spp_net_to_host_byte_order_16(&(udp->udp_checksum))),
+ ((u16)(old_postmap_ip & 0xFFFF)),
+ ((u16)(old_postmap_ip >> 16)),
+ (spp_net_to_host_byte_order_16(&(udp->src_port))))
+
+ ip->checksum =
+ clib_host_to_net_u16(new_l3_c);
+ ip->src_addr = postmap_ip;
+ if (PREDICT_TRUE(udp->udp_checksum)) {
+ udp->udp_checksum = clib_host_to_net_u16(new_l4_c);
+ }
+ }
+ }
+#endif
+ /*
+ * declare variable
+ */
+ CNAT_UPDATE_L3_L4_CHECKSUM_DECLARE
+ /*
+ * calculate checksum
+ */
+ CNAT_UPDATE_L3_L4_CHECKSUM(((u16)(db->out2in_key.k.ipv4)),
+ ((u16)(db->out2in_key.k.ipv4 >> 16)),
+ (db->out2in_key.k.port),
+ (clib_net_to_host_u16(ip->checksum)),
+ (clib_net_to_host_u16(udp->udp_checksum)),
+ ((u16)(db->in2out_key.k.ipv4)),
+ ((u16)(db->in2out_key.k.ipv4 >> 16)),
+ (db->in2out_key.k.port))
+
+
+
+
+ //set ip header
+ ip->dest_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+ ip->checksum =
+ clib_host_to_net_u16(new_l3_c);
+
+ //set udp header
+ udp->dest_port =
+ clib_host_to_net_u16(db->in2out_key.k.port);
+
+ /*
+ * No easy way to avoid this if check except by using
+ * complex logic - may not be worth it.
+ */
+ if (PREDICT_TRUE(udp->udp_checksum)) {
+ udp->udp_checksum = clib_host_to_net_u16(new_l4_c);
+ }
+
+
+
+// Temporary debugs which will be suppressed later
+#ifdef UDP_PACKET_DEBUG
+ if (PREDICT_FALSE(udp_outside_checksum_disable)) {
+ printf("\nOut2In UDP checksum 0x%x disabled by force", new_l4_c);
+ udp->udp_checksum = 0;
+ }
+ if (PREDICT_FALSE(udp_outside_packet_dump_enable)) {
+ printf("\nOut2In UDP packet after translation");
+ print_udp_pkt(ip);
+ }
+#endif
+}
+
+static inline void
+stage1(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ u64 a, b, c;
+ u32 bucket;
+ u8 *prefetch_target;
+
+ vlib_buffer_t * b0 = vlib_get_buffer (vm, buffer_index);
+ ipv4_header *ip = vlib_buffer_get_current (b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ udp_hdr_type_t *udp = (udp_hdr_type_t *)((u8*)ip + ipv4_hdr_len);
+
+ u64 tmp = 0;
+ tmp = vnet_buffer(b0)->vcgn_uii.key.k.ipv4 =
+ clib_net_to_host_u32(ip->dest_addr);
+ vnet_buffer(b0)->vcgn_uii.key.k.port =
+ clib_net_to_host_u16 (udp->dest_port);
+
+ tmp |= ((u64)vnet_buffer(b0)->vcgn_uii.key.k.port) << 32;
+
+ PLATFORM_CNAT_SET_RX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_RX],
+ vnet_buffer(b0)->vcgn_uii.key.k.vrf,
+ CNAT_UDP)
+ tmp |= ((u64)vnet_buffer(b0)->vcgn_uii.key.k.vrf) << 48;
+
+ CNAT_V4_GET_HASH(tmp, bucket, CNAT_MAIN_HASH_MASK)
+
+ prefetch_target = (u8 *)(&cnat_out2in_hash[bucket]);
+ vnet_buffer(b0)->vcgn_uii.bucket = bucket;
+
+ /* Prefetch the hash bucket */
+ CLIB_PREFETCH(prefetch_target, CLIB_CACHE_LINE_BYTES, LOAD);
+}
+
+static inline void
+stage2(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{ /* nothing */ }
+
+#define SPP_LOG2_CACHE_LINE_BYTES 6
+#define SPP_CACHE_LINE_BYTES (1 << SPP_LOG2_CACHE_LINE_BYTES)
+
+static inline void
+stage3(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ vlib_buffer_t * b0 = vlib_get_buffer(vm, buffer_index);
+ uword prefetch_target0, prefetch_target1;
+ u32 bucket = vnet_buffer(b0)->vcgn_uii.bucket;
+
+ /* read the hash bucket */
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket
+ = cnat_out2in_hash[bucket].next;
+
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+ /*
+ * Prefetch database keys. We save space by not cache-line
+ * aligning the DB entries. We don't want to waste LSU
+ * bandwidth prefetching stuff we won't need.
+ */
+ prefetch_target0 = (uword)(cnat_main_db + db_index);
+ CLIB_PREFETCH((void*)prefetch_target0, CLIB_CACHE_LINE_BYTES, STORE);
+ /* Just beyond DB key #2 */
+ prefetch_target1 = prefetch_target0 +
+ STRUCT_OFFSET_OF(cnat_main_db_entry_t, user_ports);
+ /* If the targets are in different lines, do the second prefetch */
+ if (PREDICT_FALSE((prefetch_target0 & ~(SPP_CACHE_LINE_BYTES-1)) !=
+ (prefetch_target1 & ~(SPP_CACHE_LINE_BYTES-1)))) {
+ CLIB_PREFETCH((void *)prefetch_target1, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+ }
+}
+
+static inline void
+stage4(vlib_main_t * vm, vlib_node_runtime_t * node, u32 buffer_index)
+{
+ cnat_main_db_entry_t *db;
+ vlib_buffer_t * b0 = vlib_get_buffer(vm, buffer_index);
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket;
+
+ /*
+ * Note: if the search already failed (empty bucket),
+ * the answer is already in the pipeline context structure
+ */
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+
+ /*
+ * Note: hash collisions suck. We can't easily prefetch around them.
+ * The first trip around the track will be fast. After that, maybe
+ * not so much...
+ */
+ do {
+ db = cnat_main_db + db_index;
+ if (PREDICT_TRUE(db->out2in_key.key64 ==
+ vnet_buffer(b0)->vcgn_uii.key.key64)) {
+ break;
+ }
+ db_index = db->out2in_hash.next;
+ } while (db_index != EMPTY);
+
+ /* Stick the answer back into the pipeline context structure */
+ vnet_buffer(b0)->vcgn_uii.bucket = db_index;
+ }
+}
+
+#if 0
+
+ALWAYS_INLINE(
+static inline void
+stage5(spp_ctx_t **ctxs, int index, spp_node_t *np,
+ u8 *disp_used EXTRA_PIPELINE_ARGS_PROTO))
+{
+ spp_ctx_t *ctx = ctxs[index];
+ u32 db_index = pctx[index].bucket;
+ /* for nat44, dslite_id will be 1 */
+ u16 dslite_id = *(pctx[index].common_data.dslite_id_ptr);
+
+ DSLITE_PREFETCH_COUNTER(pctx[index].udp_counter,
+ &dslite_all_counters[dslite_id].v46_udp_counters,
+ dslite_v4_to_v6_udp_counter_t,
+ v4_to_v6_udp_output_count,
+ "V4_TO_V6_UDP")
+
+ DSLITE_PREFETCH_COUNTER(pctx[index].icmp_gen_counter,
+ &dslite_all_counters[dslite_id].dslite_icmp_gen_counters,
+ dslite_icmp_gen_counter_t,
+ v6_icmp_gen_count,
+ "V4_TO_V6_icmp")
+
+if (PREDICT_TRUE(db_index != EMPTY)) {
+ cnat_main_db_entry_t *db = cnat_main_db + db_index;
+
+ u32 user_db_index = db->user_index;
+ DSLITE_PRINTF(1, "UDP o2i, db entry found %u %u %u\n",
+ db_index, user_db_index,
+ db->dslite_nat44_inst_id);
+ uword prefetch_target0 = (uword)(cnat_user_db + user_db_index);
+ SPP_PREFETCH(prefetch_target0, 0, LOAD);
+ pctx[index].user_bucket = user_db_index;
+ DSLITE_PRINTF(1, "UDP: Done with prefetch..\n");
+} else {
+ DSLITE_PRINTF(1, "UDP: Stage 5, db_index empty...\n");
+}
+}
+
+#endif
+
+
+static inline u32 last_stage (vlib_main_t *vm, vlib_node_runtime_t *node,
+ u32 bi)
+{
+
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi);
+ u32 db_index = vnet_buffer(b0)->vcgn_uii.bucket;
+ //spp_ctx_t *ctx = (spp_ctx_t *) &vnet_buffer(b0)->vcgn_uii;
+ int disposition = CNAT_V4_UDP_O2I_T;
+ int counter = CNAT_V4_UDP_O2I_T_PKT;
+ ipv4_header *ip = (ipv4_header *)vlib_buffer_get_current(b0);
+ u8 ipv4_hdr_len = (ip->version_hdr_len_words & 0xf) << 2;
+ udp_hdr_type_t *udp = (udp_hdr_type_t *)((u8*)ip + ipv4_hdr_len);
+ vlib_node_t *n = vlib_get_node (vm, cnat_ipv4_udp_outside_input_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t * em = &vm->error_main;
+ cnat_session_entry_t *session_db = NULL;
+ cnat_main_db_entry_t *db = NULL;
+ cnat_key_t dest_info;
+ u16 dslite_nat44_inst_id __attribute__((unused)) = 0;
+
+ dest_info.k.port = clib_net_to_host_u16(udp->src_port);
+ dest_info.k.ipv4 = clib_net_to_host_u32(ip->src_addr);
+
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+ /* TTL gen was disabled for nat44 earlier
+ * But since dslite has got integrated in this
+ * TTL gen is enabled
+ */
+
+ db = cnat_main_db + db_index;
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ /*
+ * Decrement TTL and update IPv4 checksum
+ */
+ ipv4_decr_ttl_n_calc_csum(ip);
+ }
+ if(PREDICT_TRUE(!PLATFORM_DBL_SUPPORT)) {
+
+ /* No DBL support, so just update the destn and proceed */
+ db->dst_ipv4 = dest_info.k.ipv4;
+ db->dst_port = dest_info.k.port;
+ CNAT_UDP_OUTSIDE_UPDATE_FLAG_TIMER(db, 0)
+ goto update_pkt;
+ }
+
+
+ if(PREDICT_TRUE((db->dst_ipv4 == dest_info.k.ipv4) &&
+ (db->dst_port == dest_info.k.port))) {
+
+ CNAT_UDP_OUTSIDE_UPDATE_FLAG_TIMER(db, 0)
+ goto update_pkt;
+ } else {
+ /* The session entries belonging to this entry are checked to find
+ * if an entry exist whose destination IP and port match with the
+ * source IP and port of the packet being processed
+ */
+ dest_info.k.vrf = db->in2out_key.k.vrf;
+
+ if (PREDICT_FALSE(db->nsessions == 0)) {
+ /* Should be a static entry
+ * Note this session as the first session and log
+ */
+ cnat_add_dest_n_log(db, &dest_info);
+ CNAT_UDP_OUTSIDE_UPDATE_FLAG_TIMER(db, 0)
+
+ } else if(PREDICT_TRUE(db->nsessions == 1)) {
+
+ /* Destn is not same as in main db. Multiple session
+ * scenario
+ */
+ dest_info.k.vrf = db->in2out_key.k.vrf;
+ session_db = cnat_handle_1to2_session(db, &dest_info);
+
+ if(PREDICT_FALSE(session_db == NULL)) {
+ disposition = CNAT_V4_UDP_O2I_E;
+ counter = CNAT_V4_UDP_O2I_SESSION_DROP;
+ goto drop_pkt;
+ }
+
+ /* update session_db(cur packet) timer */
+ CNAT_UDP_OUTSIDE_UPDATE_FLAG_TIMER(session_db, 0)
+ } else {
+ /* More 2 sessions exists */
+
+ dest_info.k.vrf = db->in2out_key.k.vrf;
+
+ /* If session already exists,
+ * cnat_create_session_db_entry will return the existing db
+ * else create a new db
+ * If could not create, return NULL
+ */
+ session_db = cnat_create_session_db_entry(&dest_info,
+ db, TRUE);
+
+ if(PREDICT_FALSE(session_db != NULL)) {
+ /* session exists */
+ CNAT_UDP_OUTSIDE_UPDATE_FLAG_TIMER(session_db, 0)
+ } else {
+ /* could not create session db - drop packet */
+ disposition = CNAT_V4_UDP_O2I_E;
+ counter = CNAT_V4_UDP_O2I_SESSION_DROP;
+ goto drop_pkt;
+ }
+ }
+ }
+
+update_pkt:
+
+ /*
+ * 1. update dest ipv4 addr and dest udp port
+ * 2. update ipv4 checksum and udp checksum
+ */
+ //swap_ip_dst(ip, db, db->in2out_key.k.vrf);
+ swap_ip_dst_udp_port(ip, udp, db, db->in2out_key.k.vrf);
+ //DSLITE_PRINTF(1, "Done with swap_ip_dst_udp_port..\n");
+
+ db->out2in_pkts++;
+
+ nat44_dslite_global_stats[0].out2in_forwarding_count++;
+
+ /* #### Temporarily COMMENTED FOR IP ROUTE LOOKUP ISSUE #### */
+
+ //PLATFORM_CNAT_SET_TX_VRF(vnet_buffer(b0)->sw_if_index[VLIB_TX],
+ // db->in2out_key.k.vrf)
+ } else {
+ disposition = CNAT_V4_UDP_O2I_E;
+ counter = CNAT_V4_UDP_O2I_MISS_PKT;
+ /* for NAT44 dslite_id would be 1 */
+ nat44_dslite_common_stats[0].no_translation_entry_drops ++;
+ }
+
+drop_pkt:
+
+ em->counters[node_counter_base_index + counter] += 1;
+ return disposition;
+}
+
+#include <vnet/pipeline.h>
+
+static uword cnat_ipv4_udp_outside_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return dispatch_pipeline (vm, node, frame);
+}
+
+
+VLIB_REGISTER_NODE (cnat_ipv4_udp_outside_input_node) = {
+ .function = cnat_ipv4_udp_outside_input_node_fn,
+ .name = "vcgn-v4-udp-o2i",
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(cnat_ipv4_udp_outside_input_error_strings),
+ .error_strings = cnat_ipv4_udp_outside_input_error_strings,
+
+ .n_next_nodes = CNAT_V4_UDP_O2I_NEXT,
+
+ /* edit / add dispositions here */
+#if 0
+ .next_nodes = {
+ //[CNAT_V4_O2I_FIXME] = "error-drop",
+ //[CNAT_V4_UDP_O2I_E] = "vcgn-v4-udp-o2i-e",
+ [CNAT_V4_UDP_O2I_E] = "vcgn-v4-udp-o2i-e",
+ [CNAT_V4_UDP_O2I_T] = "ip4-input",
+ },
+#endif
+ .next_nodes = {
+ [CNAT_V4_UDP_O2I_E] = "error-drop",
+ [CNAT_V4_UDP_O2I_T] = "ip4-input",
+ },
+
+};
+
+clib_error_t *cnat_ipv4_udp_outside_input_init (vlib_main_t *vm)
+{
+ cnat_ipv4_udp_outside_input_main_t * mp = &cnat_ipv4_udp_outside_input_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main();
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (cnat_ipv4_udp_outside_input_init);
diff --git a/plugins/vcgn-plugin/vcgn/cnat_log_api.h b/plugins/vcgn-plugin/vcgn/cnat_log_api.h
new file mode 100644
index 00000000000..60cf683697d
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_log_api.h
@@ -0,0 +1,114 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_log_api.h
+ * Declraes the common APIs for logging (both syslog and NFV9)
+ * Copyright (c) 2013, 20122 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_LOG_API_H__
+#define __CNAT_LOG_API_H__
+
+#include "cnat_logging.h"
+
+static inline void cnat_log_ds_lite_mapping_delete(cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ )
+{
+ return;
+}
+
+static inline void cnat_log_ds_lite_mapping_create(cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ )
+{
+ return;
+}
+
+static inline void cnat_log_ds_lite_port_limit_exceeded(
+ dslite_key_t * key,
+ dslite_table_entry_t *dslite_entry_ptr)
+{
+ return;
+
+}
+
+static inline void cnat_log_nat44_port_limit_exceeded(
+ cnat_key_t * key,
+ cnat_vrfmap_t *vrfmap)
+{
+ return;
+}
+static inline void cnat_log_nat44_mapping_create(cnat_main_db_entry_t *db,
+ cnat_vrfmap_t *vrfmap
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ )
+{
+ return;
+}
+
+static inline void cnat_log_nat44_mapping_delete(cnat_main_db_entry_t *db,
+ cnat_vrfmap_t *vrfmap
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ )
+{
+ return;
+}
+
+/* Session Logging API for nat44 */
+static inline void cnat_session_log_nat44_mapping_create (
+ cnat_main_db_entry_t *db,
+ cnat_session_entry_t *sdb,
+ cnat_vrfmap_t *vrfmap )
+{
+ return;
+}
+
+static inline void cnat_session_log_nat44_mapping_delete (
+ cnat_main_db_entry_t *db,
+ cnat_session_entry_t *sdb,
+ cnat_vrfmap_t *vrfmap )
+{
+ return;
+}
+
+/* Session Logging API for dslite */
+static inline void cnat_session_log_ds_lite_mapping_create (
+ cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry,
+ cnat_session_entry_t *sdb )
+{
+ return;
+}
+
+static inline void cnat_session_log_ds_lite_mapping_delete (
+ cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry,
+ cnat_session_entry_t *sdb )
+{
+ return;
+}
+
+#endif /* #ifndef __CNAT_LOG_API_H__ */
+
diff --git a/plugins/vcgn-plugin/vcgn/cnat_log_common.h b/plugins/vcgn-plugin/vcgn/cnat_log_common.h
new file mode 100644
index 00000000000..52731bc0028
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_log_common.h
@@ -0,0 +1,79 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_log_common.h
+ * Contains macros and definitions that are common to both syslog and nfv9
+ * Copyright (c) 2011-2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_LOG_COMMON_H__
+#define __CNAT_LOG_COMMON_H__
+
+#include <stdio.h>
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "cnat_db.h"
+#include "nat64_db.h"
+#include "spp_timers.h"
+#include "spp_ctx.h"
+
+/*
+ * This corresponds to the length of the IMETRO SHIM Header for RODDICK
+ * For non-roddick cases, introduce an Ethernet header as well
+ */
+#if 0
+ #if defined(TARGET_RODDICK)
+ #define CNAT_NFV9_SHIM_HDR_OFFSET 8
+ #define CNAT_NFV9_L2_ENCAPS_OFFSET 0
+ #else
+ #define CNAT_NFV9_SHIM_HDR_OFFSET 0
+ #define CNAT_NFV9_L2_ENCAPS_OFFSET 16
+ #endif
+#endif
+
+ #define CNAT_NFV9_IP_HDR_OFFSET 0
+
+ #define CNAT_NFV9_UDP_HDR_OFFSET \
+ (CNAT_NFV9_IP_HDR_OFFSET + sizeof(ipv4_header))
+
+ #define CNAT_NFV9_HDR_OFFSET \
+ (CNAT_NFV9_UDP_HDR_OFFSET + sizeof(udp_hdr_type_t))
+
+u32 cnat_get_sys_up_time_in_ms(void);
+u32 cnat_get_unix_time_in_seconds(void);
+void cnat_dump_time_change_logs(void);
+void cnat_handle_sys_time_change (time_t current_unix_time);
+/*
+ * Maximum number of time log changes we maintain
+ */
+
+#define MAX_TIME_CHANGE_LOGS (8)
+
+typedef struct {
+ /*
+ * A timer structure to periodically send NFv9 & syslog logging packets
+ * that have been waiting to be full for a long time. This will
+ * ensure add/delete events don't get delayed too much before they
+ * are sent to the collector.
+ */
+ spp_timer_t log_timer;
+
+ /*
+ * Whether we have initialized the NFv9 information
+ */
+ u8 cnat_log_init_done;
+} cnat_log_global_info_t;
+
+#endif /* __CNAT_LOG_COMMON_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/cnat_logging.c b/plugins/vcgn-plugin/vcgn/cnat_logging.c
new file mode 100644
index 00000000000..50805d118ae
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_logging.c
@@ -0,0 +1,3518 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_logging.c
+ *
+ * Copyright (c) 2009-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/buffer.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ip/ip4.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/format.h>
+#include <vnet/ip/udp.h>
+
+
+#include "cnat_config.h"
+#include "cnat_global.h"
+#include "cnat_v4_functions.h"
+#include "tcp_header_definitions.h"
+#include "cnat_v4_ftp_alg.h"
+#include "cnat_logging.h"
+#include "platform_common.h"
+
+#define CNAT_NFV9_DEBUG_CODE 2
+#if CNAT_NFV9_DEBUG_CODE > 3
+
+#define NFV9_COND if ((my_instance_number != 0) && (my_instance_number != 15))
+
+#define NFV9_DEBUG_PRINTF1(a) NFV9_COND printf(a);
+#define NFV9_DEBUG_PRINTF2(a, b) NFV9_COND printf(a, b);
+#define NFV9_DEBUG_PRINTF3(a, b, c) NFV9_COND printf(a, b, c);
+#define NFV9_DEBUG_PRINTF4(a, b, c, d) NFV9_COND printf(a, b, c, d);
+
+#else
+
+#define NFV9_DEBUG_PRINTF1(a)
+#define NFV9_DEBUG_PRINTF2(a, b)
+#define NFV9_DEBUG_PRINTF3(a, b, c)
+#define NFV9_DEBUG_PRINTF4(a, b, c, d)
+
+#endif
+
+static void cnat_nfv9_insert_ingress_vrfid_name_record(cnat_nfv9_logging_info_t *nfv9_logging_info, u16 index);
+void cnat_nfv9_ingress_vrfid_name_mapping_create(
+ cnat_nfv9_logging_info_t *nfv9_logging_info);
+
+
+cnat_nfv9_global_info_t cnat_nfv9_global_info;
+
+cnat_nfv9_template_t cnat_nfv9_template_info;
+
+#define CNAT_NFV9_OPTION_TEMPLATE cnat_nfv9_template_info.cnat_nfv9_option_template
+
+u16 cnat_template_id[MAX_RECORDS] =
+ {0, CNAT_NFV9_ADD_TEMPLATE_ID, CNAT_NFV9_DEL_TEMPLATE_ID,
+ CNAT_NFV9_NAT64_ADD_BIB_TEMPLATE_ID,CNAT_NFV9_NAT64_DEL_BIB_TEMPLATE_ID,
+ CNAT_NFV9_NAT64_ADD_SESSION_TEMPLATE_ID,
+ CNAT_NFV9_NAT64_DEL_SESSION_TEMPLATE_ID,
+ CNAT_NFV9_DS_LITE_ADD_TEMPLATE_ID,
+ CNAT_NFV9_DS_LITE_DEL_TEMPLATE_ID
+#ifndef NO_BULK_LOGGING
+ , CNAT_NFV9_NAT44_BULK_ADD_TEMPLATE_ID,
+ CNAT_NFV9_NAT44_BULK_DEL_TEMPLATE_ID,
+ CNAT_NFV9_DS_LITE_BULK_ADD_TEMPLATE_ID,
+ CNAT_NFV9_DS_LITE_BULK_DEL_TEMPLATE_ID
+#endif /* #ifndef NO_BULK_LOGGING */
+ , CNAT_NFV9_INGRESS_VRF_ID_NAME_TEMPLATE_ID,
+ CNAT_NFV9_NAT44_ADD_SESSION_TEMPLATE_ID,
+ CNAT_NFV9_NAT44_DEL_SESSION_TEMPLATE_ID,
+ CNAT_NFV9_DS_LITE_ADD_SESSION_TEMPLATE_ID,
+ CNAT_NFV9_DS_LITE_DEL_SESSION_TEMPLATE_ID
+ };
+
+/*
+ * Logging information structures
+ */
+cnat_nfv9_logging_info_t cnat_default_nfv9_logging_info;
+cnat_nfv9_logging_info_t *cnat_nfv9_logging_info_pool;
+#define NFV9_SERVER_POOL_SIZE 16
+nfv9_server_info_t *nfv9_server_info_pool;
+
+u32 nfv9_src_id = 0;
+
+u32
+cnat_get_sys_up_time_in_ms (void)
+{
+ vlib_main_t * vm = vlib_get_main();
+ u32 cnat_curr_time;
+
+ cnat_curr_time = (u32)vlib_time_now (vm);
+ return cnat_curr_time;
+}
+
+void
+cnat_dump_time_change_logs (void)
+{
+ return;
+}
+
+inline void cnat_nfv9_handle_sys_time_change(time_t current_unix_time)
+{
+ return;
+ #if 0
+ cnat_handle_sys_time_change(current_unix_time);
+ #endif
+}
+
+void cnat_nfv9_update_sys_time_change()
+{
+ cnat_nfv9_logging_info_t *my_nfv9_logging_info = NULL;
+ pool_foreach (my_nfv9_logging_info, cnat_nfv9_logging_info_pool, ({
+ nfv9_server_info_t *server = nfv9_server_info_pool +
+ my_nfv9_logging_info->server_index;
+ server->last_template_sent_time = 0;
+ }));
+}
+
+void nfv9_params_show(u32 logging_index)
+{
+ cnat_nfv9_logging_info_t *log_info;
+ if(logging_index == EMPTY) {
+ PLATFORM_DEBUG_PRINT("\nNetflow logging not configured\n");
+ return;
+ }
+
+ log_info = cnat_nfv9_logging_info_pool + logging_index;
+ nfv9_server_info_t *server __attribute__((unused))
+ = nfv9_server_info_pool + log_info->server_index;
+
+
+ PLATFORM_DEBUG_PRINT("\nNetflow parameters --\n");
+ PLATFORM_DEBUG_PRINT("Server index %d IPV4 address: %x, port %d, max log size %d\n",
+ log_info->server_index, server->ipv4_address,
+ server->port, log_info->max_length_minus_max_record_size);
+
+ PLATFORM_DEBUG_PRINT("Server ref count %d Refresh rate %d timeout rate %d\n",
+ server->ref_count, server->refresh_rate,
+ server->timeout_rate);
+
+}
+
+/*
+ * Code to dump NFV9 packets before they are sent
+ */
+void
+cnat_nfv9_dump_logging_context (u32 value1,
+ cnat_nfv9_logging_info_t *nfv9_logging_info,
+ u32 value2)
+{
+ u8 *pkt_ptr;
+ u32 i;
+ u32 next_nfv9_template_data_index = 0xffff;
+ u32 next_data_flow_index = 0xffff;
+ u32 next_data_record = 0xffff;
+ u32 data_record_size = 0;
+ vlib_main_t *vm = vlib_get_main();
+
+ nfv9_server_info_t *server = nfv9_server_info_pool +
+ nfv9_logging_info->server_index;
+
+ vlib_cli_output(vm,"\nDumping %s packet at locn %d: time 0x%x",
+ (value2 == 1) ? "CURRENT" : "QUEUED",
+ value1,
+ cnat_nfv9_get_unix_time_in_seconds());
+
+ vlib_cli_output(vm, "\ni_vrf 0x%x, ip_address 0x%x, port %d",
+ nfv9_logging_info->i_vrf,
+ server->ipv4_address,
+ server->port);
+
+ vlib_cli_output(vm,"\nseq_num %d",
+ server->sequence_num);
+
+ vlib_cli_output(vm,"\nlast_template_sent time 0x%x, pkts_since_last_template %d",
+ server->last_template_sent_time,
+ server->pkts_since_last_template);
+
+ vlib_cli_output(vm, "\npkt_len %d, add_rec_len %d, del_rec_len %d, total_rec_count %d",
+ nfv9_logging_info->pkt_length,
+ nfv9_logging_info->record_length[NAT44_ADD_RECORD],
+ nfv9_logging_info->record_length[NAT44_DEL_RECORD],
+ nfv9_logging_info->total_record_count);
+
+ vlib_cli_output(vm,"\nbulk_add_rec_len %d, bulk_del_rec_len %d",
+ nfv9_logging_info->record_length[NAT44_BULK_ADD_RECORD],
+ nfv9_logging_info->record_length[NAT44_BULK_DEL_RECORD]);
+
+ vlib_cli_output(vm,"\ncurr_logging_ctx 0x%p, timestamp 0x%x, queued_logging_ctx 0x%p",
+ nfv9_logging_info->current_logging_context,
+ nfv9_logging_info->current_logging_context_timestamp,
+ nfv9_logging_info->queued_logging_context);
+
+ vlib_cli_output(vm,"\nnfv9_hdr 0x%p, tmpl_hdr 0x%p, dataflow_hdr 0x%p",
+ nfv9_logging_info->nfv9_header,
+ nfv9_logging_info->nfv9_template_header,
+ nfv9_logging_info->dataflow_header);
+
+ vlib_cli_output(vm,"\nadd_rec 0x%p, del_rec 0x%p, next_data_ptr 0x%p",
+ nfv9_logging_info->record[NAT44_ADD_RECORD],
+ nfv9_logging_info->record[NAT44_DEL_RECORD],
+ nfv9_logging_info->next_data_ptr);
+
+ vlib_cli_output(vm,"\n");
+
+ pkt_ptr = vlib_buffer_get_current(nfv9_logging_info->current_logging_context);
+ /*
+ * Dump along with 8 bytes of SHIM header
+ */
+ for (i = 0; i < (nfv9_logging_info->pkt_length + CNAT_NFV9_IP_HDR_OFFSET);
+ i = i + 1) {
+ u8 c1, c2, c3;
+ if (i == CNAT_NFV9_IP_HDR_OFFSET) {
+ vlib_cli_output(vm,"\nIP_HEADER: \n");
+ } else if (i == CNAT_NFV9_UDP_HDR_OFFSET) {
+ vlib_cli_output(vm,"\nUDP_HEADER: \n");
+ } else if (i == CNAT_NFV9_HDR_OFFSET) {
+ vlib_cli_output(vm,"\nNFV9 Header: Version:Count: \n");
+ } else if (i == (CNAT_NFV9_HDR_OFFSET+4)) {
+ vlib_cli_output(vm,"\nBoot_Up_Time_In_ms: \n");
+ } else if (i == (CNAT_NFV9_HDR_OFFSET+8)) {
+ vlib_cli_output(vm, "\nUNIX_Time: \n");
+ } else if (i == (CNAT_NFV9_HDR_OFFSET+12)) {
+ vlib_cli_output(vm,"\nSeq_Num: \n");
+ } else if (i == (CNAT_NFV9_HDR_OFFSET+16)) {
+ vlib_cli_output(vm,"\nSource ID: \n");
+ } else if (i == (CNAT_NFV9_HDR_OFFSET+20)) {
+ if (nfv9_logging_info->nfv9_template_header) {
+ vlib_cli_output(vm,"\nNFV9 TEMPLATE HDR: \n");
+ next_nfv9_template_data_index = i + 4;
+ } else {
+ next_data_flow_index = i;
+ }
+ } else if (i == (CNAT_NFV9_TEMPLATE_OFFSET+CNAT_NFV9_TEMPLATE_LENGTH)) {
+ if (nfv9_logging_info->nfv9_template_header) {
+ next_data_flow_index = i;
+ }
+ }
+
+ if (i == next_nfv9_template_data_index) {
+ vlib_cli_output(vm,"\nNFV9 TEMPLATE DATA: \n");
+ } else if (i == next_data_flow_index) {
+ if (*(pkt_ptr + i) == 0x01) {
+ if (*(pkt_ptr + i + 1) == 0x00) {
+ data_record_size = 21;
+ next_data_record = i + 4;
+ next_data_flow_index = i + *(pkt_ptr + i + 3) +
+ *(pkt_ptr + i + 2)*0x100;
+ vlib_cli_output(vm,"\nADD_RECORD (total %d): next_data_flow_index (%d->%d)\n", (next_data_flow_index - i), i, next_data_flow_index);
+ } else if (*(pkt_ptr + i + 1) == 0x01) {
+ data_record_size = 11;
+ next_data_record = i + 4;
+ next_data_flow_index = i + *(pkt_ptr + i + 3) +
+ *(pkt_ptr + i + 2)*0x100;
+ vlib_cli_output(vm,"\nDEL_RECORD (total %d) : next_data_flow_index (%d->%d)\n", (next_data_flow_index - i), i, next_data_flow_index);
+ } else if (*(pkt_ptr + i + 1) == 0x09) {
+ data_record_size = 20;
+ next_data_record = i + 4;
+ next_data_flow_index = i + *(pkt_ptr + i + 3) +
+ *(pkt_ptr + i + 2)*0x100;
+ vlib_cli_output(vm,"\nBULK_ADD_RECORD (total %d) : next_data_flow_index (%d->%d)\n", (next_data_flow_index - i), i, next_data_flow_index);
+ } else if (*(pkt_ptr + i + 1) == 0x0a) {
+ data_record_size = 10;
+ next_data_record = i + 4;
+ next_data_flow_index = i + *(pkt_ptr + i + 3) +
+ *(pkt_ptr + i + 2)*0x100;
+ vlib_cli_output(vm,"\nBULK_DEL_RECORD (total %d) : next_data_flow_index (%d->%d)\n", (next_data_flow_index - i), i, next_data_flow_index);
+ }
+
+ }
+ } else if (i == next_data_record) {
+ vlib_cli_output(vm,"\n");
+ next_data_record += data_record_size;
+ }
+
+ c3 = *(pkt_ptr + i);
+
+ c2 = c3 & 0xf;
+ c1 = (c3 >> 4) & 0xf;
+
+
+ vlib_cli_output(vm,"%c%c ",
+ ((c1 <= 9) ? (c1 + '0') : (c1 - 10 + 'a')),
+ ((c2 <= 9) ? (c2 + '0') : (c2 - 10 + 'a')));
+
+ }
+ vlib_cli_output(vm,"\n");
+}
+
+/*
+ * edt: * * cnat_nfv9_pad_added_to_an_addr
+ *
+ * Returns the difference (no# of bytes) between new_addr
+ * & org_addr
+ *
+ * Argument: u8 *new_addr, u8 *org_addr
+ * returns the difference
+ */
+
+static inline
+int cnat_nfv9_pad_added_to_an_addr(u8 *new_addr, u8 *org_addr)
+{
+ uword addr1 = (uword) new_addr;
+ uword addr2 = (uword) org_addr;
+ return (addr1 - addr2);
+}
+
+/*
+ * edt: * * cnat_nfv9_add_end_of_record_padding
+ *
+ * Tries to add padding to data_ptr to ensure it is word aligned
+ *
+ * Argument: u8 * data_ptr
+ * pointer to the data pointer
+ */
+
+static inline
+u8 *cnat_nfv9_add_end_of_record_padding (u8 *data_ptr)
+{
+ uword tmp = (uword) data_ptr;
+ uword pad_value = (uword) NFV9_PAD_VALUE;
+
+ tmp = (tmp + pad_value) & (~pad_value);
+
+ return ((u8 *) tmp);
+}
+
+/*
+ * edt: * * cnat_nfv9_pad_end_of_record_length
+ *
+ * Tries to add padding to data_ptr to ensure it is word aligned
+ *
+ * Argument: u8 * data_ptr
+ * pointer to the data pointer
+ */
+
+static inline
+u16 cnat_nfv9_pad_end_of_record_length (u16 record_length)
+{
+ u16 pad_value = NFV9_PAD_VALUE;
+
+ return ((record_length + pad_value) & (~pad_value));
+}
+
+/* get first interface address */
+static ip4_address_t *
+ip4_interface_first_address (ip4_main_t * im, u32 sw_if_index)
+{
+ ip_lookup_main_t * lm = &im->lookup_main;
+ ip_interface_address_t * ia = 0;
+ ip4_address_t * result = 0;
+
+ foreach_ip_interface_address (lm, ia, sw_if_index,
+ 1 /* honor unnumbered */,
+ ({
+ ip4_address_t * a = ip_interface_address_get_address (lm, ia);
+ result = a;
+ break;
+ }));
+ return result;
+}
+
+void fill_ip_n_udp_hdr (u32 ipv4_addr, u16 port,
+ cnat_nfv9_logging_info_t *nfv9_logging_info)
+{
+ vlib_buffer_t * b0 = nfv9_logging_info->current_logging_context;
+ ipv4_header *ip_header = vlib_buffer_get_current(b0);
+ udp_hdr_type_t *udp_header = (udp_hdr_type_t *)((u8*)ip_header + sizeof(ipv4_header));
+ vlib_main_t *vm = vlib_get_main();
+ u16 ip_length __attribute__((unused));
+ u16 pkt_len = nfv9_logging_info->pkt_length;
+ ip4_address_t *ia0 = 0;
+ u16 src_port = 0x0a0a;
+
+ /*
+ * Clear the SHIM header fields. The PD nodes will set it
+ * appropriately.
+ */
+ PLATFORM_MEMSET_CNAT_LOG_PKT_DATA
+
+ /*
+ * Don't need a special define for 0x45 - IP version and hdr len
+ */
+ ip_header->version_hdr_len_words = 0x45;
+ ip_header->tos = 0;
+ ip_header->frag_flags_offset = 0;
+ ip_header->ttl = 0xff;
+ ip_header->protocol = UDP_PROT;
+ ip_header->dest_addr = clib_host_to_net_u32(ipv4_addr);
+ ip_length = vlib_buffer_length_in_chain (vm, b0);
+ ip_header->total_len_bytes = clib_host_to_net_u16(pkt_len);
+ ia0 = ip4_interface_first_address(&ip4_main, nfv9_logging_info->i_vrf_id);
+ ip_header->src_addr = ia0->as_u32;
+ udp_header->src_port = clib_host_to_net_u16(src_port);
+ udp_header->dest_port = clib_host_to_net_u16(port);
+ udp_header->udp_checksum = 0;
+ udp_header->udp_length =
+ clib_host_to_net_u16(pkt_len - sizeof(ipv4_header));
+ ip_header->checksum = ip4_header_checksum((ip4_header_t *)ip_header);
+}
+
+/*
+ * edt: * * cnat_nfv9_fill_nfv9_ip_header
+ *
+ * Tries to fill the fields of the IP header before it
+ * is sent to the L3 infra node.
+ *
+ * Argument: cnat_nfv9_logging_info_t *nfv9_logging_info
+ * structure that contains the packet context
+ */
+
+static inline
+void cnat_nfv9_fill_nfv9_ip_header (cnat_nfv9_logging_info_t *nfv9_logging_info)
+{
+ u16 new_record_length = 0;
+ u16 orig_record_length = 0;
+ vlib_buffer_t * b0 = nfv9_logging_info->current_logging_context;
+
+ /*
+ * Fill in the IP header and port number of the Netflow collector
+ * The L3 Infra node will fill in the rest of the fields
+ */
+
+ nfv9_logging_info->nfv9_header->count =
+ clib_host_to_net_u16(nfv9_logging_info->total_record_count);
+
+ /*
+ * Pad the last add/del record to ensure multiple of 4 bytes
+ */
+
+ if(nfv9_logging_info->last_record != RECORD_INVALID) {
+
+ orig_record_length =
+ nfv9_logging_info->record_length[nfv9_logging_info->last_record];
+
+ new_record_length = cnat_nfv9_pad_end_of_record_length(
+ orig_record_length);
+
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u16(new_record_length);
+ }
+
+ /*
+ * If the record is padded, ensure the padded bytes are ZERO
+ */
+ if (PREDICT_TRUE(new_record_length - orig_record_length)) {
+ u8 *pkt_ptr = (u8 *) (b0 + nfv9_logging_info->pkt_length);
+
+ /*
+ * Blindly copy 3 bytes of data to Zero to avoid for loops
+ * We have sufficient padding bytes for safety and we won't
+ * go over buffer limits
+ */
+ *pkt_ptr++ = 0;
+ *pkt_ptr++ = 0;
+ *pkt_ptr++ = 0;
+
+ nfv9_logging_info->pkt_length +=
+ (new_record_length - orig_record_length);
+ }
+ nfv9_server_info_t *server = nfv9_server_info_pool +
+ nfv9_logging_info->server_index;
+ fill_ip_n_udp_hdr(server->ipv4_address,
+ server->port, nfv9_logging_info);
+ /*
+ * It is important to set the sw_if_index for the new buffer create
+ */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
+
+}
+
+/*
+ * edt: * * cnat_nfv9_send_queued_pkt
+ *
+ * Tries to send a logging pkt that has been queued earlier
+ * because it could not be sent due to downstream constipation
+ *
+ * Argument: cnat_nfv9_logging_info_t *nfv9_logging_info
+ * structure that contains the packet context
+ */
+
+static inline
+void cnat_nfv9_send_queued_pkt (cnat_nfv9_logging_info_t *nfv9_logging_info)
+{
+ return;
+}
+
+/*
+ * edt: * * cnat_nfv9_send_pkt
+ *
+ * Tries to send a logging pkt. If the packet cannot be sent
+ * because of rewrite_output node cannot process it, queue
+ * it temporarily and try to send it later.
+ *
+ * Argument: cnat_nfv9_logging_info_t *nfv9_logging_info
+ * structure that contains the packet context
+ */
+
+static inline
+void cnat_nfv9_send_pkt (cnat_nfv9_logging_info_t *nfv9_logging_info)
+{
+ cnat_nfv9_fill_nfv9_ip_header(nfv9_logging_info);
+
+ nfv9_server_info_t *server = nfv9_server_info_pool +
+ nfv9_logging_info->server_index;
+
+ /* Update sequence number just before sending.
+ * So that, multiple NAT44/NAT64/DSLite instances sharing a
+ * a single server instance can stamp the sequence number
+ * in the right sequence (as seen by the server).
+ */
+ server->sequence_num += 1;
+ nfv9_logging_info->nfv9_header->sequence_num =
+ clib_host_to_net_u32(server->sequence_num);
+
+#if DEBUG
+ cnat_nfv9_dump_logging_context (2, nfv9_logging_info, 1);
+#endif
+#if 0 /* commented out below */
+ send_vpp3_nfv9_pkt(nfv9_logging_info);
+#endif
+ nfv9_logging_info->current_logging_context = NULL;
+ /*
+ * Increase last packet sent count
+ */
+ server->pkts_since_last_template++;
+
+ /*
+ * If we are sending an nfv9 tempate with this packet
+ * log this timestamp
+ */
+ if (nfv9_logging_info->nfv9_template_header) {
+ server->last_template_sent_time =
+ cnat_nfv9_get_unix_time_in_seconds();
+ server->pkts_since_last_template = 0;
+ }
+
+ return;
+}
+
+/*
+ * send_vpp3_nfv9_pkt: to send multiple b0 in a frame
+ */
+
+static inline
+void send_vpp3_nfv9_pkt (cnat_nfv9_logging_info_t *nfv9_logging_info)
+{
+ vlib_node_t *output_node;
+ vlib_main_t *vm = vlib_get_main();
+ vlib_frame_t *f;
+ vlib_buffer_t *b0;
+ u32 *to_next;
+ u32 bi=0;
+ ipv4_header *ip;
+
+ //Lets check and send it to ip4-lookup node
+ output_node = vlib_get_node_by_name (vm, (u8 *) "ip4-lookup");
+ f = vlib_get_frame_to_node (vm, output_node->index);
+
+ if ( nfv9_logging_info->current_logging_context != NULL) {
+ /* Build a pkt from whole cloth */
+ b0 = nfv9_logging_info->current_logging_context;
+ ip = vlib_buffer_get_current(b0);
+ to_next = vlib_frame_vector_args (f);
+ bi = vlib_get_buffer_index (vm, b0);
+ to_next[0] = bi;
+
+ f->n_vectors = 1;
+ b0->current_length = clib_net_to_host_u16(ip->total_len_bytes);
+ vlib_put_frame_to_node (vm, output_node->index, f);
+ }
+ return;
+}
+/*
+ * edt: * * cnat_nfv9_send_pkt_always_success
+ *
+ * Tries to send a logging pkt. This cannot fail due to downstream
+ * constipation because we have already checked if the rewrite_output
+ * node can accept it.
+ *
+ * Argument: cnat_nfv9_logging_info_t *nfv9_logging_info
+ * structure that contains the packet context
+ *
+ * Argument: vlib_node_t *output_node
+ * vlib_node_t structure for rewrite_output node
+ */
+
+static inline
+void cnat_nfv9_send_pkt_always_success (
+ cnat_nfv9_logging_info_t *nfv9_logging_info,
+ vlib_node_t *output_node)
+{
+ nfv9_server_info_t *server = nfv9_server_info_pool +
+ nfv9_logging_info->server_index;
+ vlib_main_t * vm = vlib_get_main();
+
+ /*
+ * At this point we either have a current or queued logging context
+ */
+ if (PREDICT_TRUE(nfv9_logging_info->current_logging_context != NULL)) {
+ server->sequence_num += 1;
+ nfv9_logging_info->nfv9_header->sequence_num =
+ clib_host_to_net_u32(server->sequence_num);
+ cnat_nfv9_fill_nfv9_ip_header(nfv9_logging_info);
+
+ nfv9_logging_info->current_logging_context->current_length =
+ nfv9_logging_info->pkt_length;
+ vlib_cli_output(vm, "\nNFV9: 3. Sending Current packet\n");
+#if DEBUG
+ cnat_nfv9_dump_logging_context (3, nfv9_logging_info, 1);
+#endif
+ send_vpp3_nfv9_pkt(nfv9_logging_info);
+ nfv9_logging_info->current_logging_context = NULL;
+ } else {
+ /*
+ * For queued logging context, nfv9_header-> count is already set
+ */
+ nfv9_logging_info->queued_logging_context->current_length =
+ nfv9_logging_info->pkt_length;
+ vlib_cli_output(vm,"\nNFV9: 4. Sending Queued packet\n");
+#if DEBUG
+ cnat_nfv9_dump_logging_context (4, nfv9_logging_info, 2);
+#endif
+ send_vpp3_nfv9_pkt(nfv9_logging_info);
+
+ nfv9_logging_info->queued_logging_context = NULL;
+ }
+
+ /*
+ * NF Logging info already deleted, just free it and return
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->deleted)) {
+ pool_put(cnat_nfv9_logging_info_pool, nfv9_logging_info);
+ return;
+ }
+
+ /*
+ * Increase last packet sent count and timestamp
+ */
+ server->pkts_since_last_template++;
+
+ /*
+ * If we are sending an nfv9 tempate with this packet
+ * log this timestamp
+ */
+ if (nfv9_logging_info->nfv9_template_header) {
+ server->last_template_sent_time =
+ cnat_nfv9_get_unix_time_in_seconds();
+ server->pkts_since_last_template = 0;
+ }
+}
+
+/*
+ * edt: * * cnat_nfv9_create_logging_context
+ *
+ * Tries to create a logging context with packet buffer
+ * to send a new logging packet
+ *
+ * Argument: cnat_nfv9_logging_info_t *nfv9_logging_info
+ * structure that contains the nfv9 logging info and will store
+ * the packet context as well.
+ */
+
+static inline
+void cnat_nfv9_create_logging_context (
+ cnat_nfv9_logging_info_t *nfv9_logging_info,
+ cnat_nfv9_template_add_flag_t template_flag)
+{
+ vlib_main_t *vm = vlib_get_main();
+ vlib_buffer_t *b0;
+ static u32 bi;
+ u8 i;
+
+ /*
+ * If queued_logging_context_index is non-EMPTY, we already have a logging
+ * packet queued to be sent. First try sending this before allocating
+ * a new context. We can have only one active packet context per
+ * nfv9_logging_info structure
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->queued_logging_context != NULL)) {
+ cnat_nfv9_send_queued_pkt(nfv9_logging_info);
+ /*
+ * If we cannot still send the queued pkt, just return
+ * Downstream Constipation count would have increased anyway
+ */
+ if (nfv9_logging_info->queued_logging_context != NULL) {
+ cnat_global_counters.nfv9_logging_context_creation_deferred_count++;
+ return;
+ }
+ }
+
+
+ /*
+ * No context can be allocated, return silently
+ * calling routine will handle updating the error counters
+ */
+ if (vlib_buffer_alloc (vm, &bi, 1) != 1) {
+ vlib_cli_output(vm, "buffer allocation failure");
+ return;
+ }
+ /* Build a pkt from whole cloth */
+ b0 = vlib_get_buffer (vm, bi);
+ b0->current_data = 0;
+
+ nfv9_server_info_t *server = nfv9_server_info_pool +
+ nfv9_logging_info->server_index;
+
+ nfv9_logging_info->current_logging_context = b0;
+ nfv9_logging_info->current_logging_context_timestamp =
+ cnat_nfv9_get_sys_up_time_in_ms();
+
+
+ nfv9_logging_info->nfv9_header =
+ (nfv9_header_t *) (vlib_buffer_get_current(b0) +
+ (sizeof(ipv4_header)) +
+ (sizeof(udp_hdr_type_t)));
+
+ nfv9_logging_info->nfv9_header->version =
+ clib_host_to_net_u16(CNAT_NFV9_VERSION_NUMBER);
+
+ nfv9_logging_info->nfv9_header->sys_up_time =
+ clib_host_to_net_u32(cnat_nfv9_get_sys_up_time_in_ms());
+
+ nfv9_logging_info->nfv9_header->timestamp =
+ clib_host_to_net_u32(cnat_nfv9_get_unix_time_in_seconds());
+
+
+ nfv9_logging_info->nfv9_header->source_id =
+ clib_host_to_net_u32(nfv9_src_id);
+
+ nfv9_logging_info->dataflow_header = 0;
+
+ for(i = 0; i < MAX_RECORDS;i++) {
+ nfv9_logging_info->record[i] = NULL;
+ nfv9_logging_info->record_length[i] = 0;
+ }
+ nfv9_logging_info->last_record = 0;
+
+
+ nfv9_logging_info->nfv9_template_header = 0;
+ nfv9_logging_info->next_data_ptr =
+ (u8 *) (vlib_buffer_get_current(b0) +
+ sizeof(ipv4_header) + sizeof(udp_hdr_type_t) +
+ sizeof(nfv9_header_t));
+
+ nfv9_logging_info->pkt_length = (CNAT_NFV9_TEMPLATE_OFFSET -
+ CNAT_NFV9_IP_HDR_OFFSET);
+
+
+ /*
+ * Now we have 0 records to start with
+ */
+
+ nfv9_logging_info->total_record_count = 0;
+
+ if ((template_flag == cnat_nfv9_template_add_always) ||
+ (server->pkts_since_last_template >
+ server->refresh_rate) ||
+ ((cnat_nfv9_get_unix_time_in_seconds() -
+ server->last_template_sent_time) >
+ server->timeout_rate)) {
+
+ /*
+ * Send a new template
+ */
+ nfv9_logging_info->nfv9_template_header =
+ (cnat_nfv9_template_t *) nfv9_logging_info->next_data_ptr;
+
+ clib_memcpy(nfv9_logging_info->nfv9_template_header,
+ &cnat_nfv9_template_info,
+ sizeof(cnat_nfv9_template_info));
+
+ /*
+ * Templates are sent irrespective of particular service-type config
+ */
+ nfv9_logging_info->total_record_count = MAX_RECORDS - 1;
+
+ nfv9_logging_info->pkt_length += CNAT_NFV9_TEMPLATE_LENGTH;
+
+ /*
+ * Set the data pointer beyond the template field
+ */
+ nfv9_logging_info->next_data_ptr =
+ (u8 *) (nfv9_logging_info->nfv9_template_header + 1);
+ /*
+ * Setting template_sent flag as TRUE. this will be checked in
+ * handle_vrfid_name_mapping()
+ */
+ server->template_sent = TEMPLATE_SENT_TRUE;
+ }
+}
+
+void cnat_nfv9_record_create (
+ cnat_nfv9_logging_info_t *nfv9_logging_info, u16 cur_record)
+{
+ int byte_diff = 0;
+ u16 last_record = nfv9_logging_info->last_record;
+
+ if(last_record != 0 && last_record != cur_record) {
+ u16 orig_length, new_length;
+
+ orig_length = nfv9_logging_info->record_length[last_record];
+ new_length = cnat_nfv9_pad_end_of_record_length(orig_length);
+
+ /*
+ * The padding bytes are required after the last record
+ * Ensure length of last record accounts for padding bytes
+ */
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u16(new_length);
+
+ /*
+ * Since we are working on the del record, set add record to 0
+ */
+ nfv9_logging_info->record[last_record] = 0;
+
+ nfv9_logging_info->record_length[last_record] = 0;
+
+ nfv9_logging_info->last_record = 0;
+ }
+
+ nfv9_logging_info->last_record = cur_record;
+
+ /*
+ * The padding bytes are required after the last record
+ * Ensure that we skip over the padding bytes
+ */
+ nfv9_logging_info->dataflow_header = (nfv9_dataflow_record_header_t *)
+ cnat_nfv9_add_end_of_record_padding(nfv9_logging_info->next_data_ptr);
+ /*
+ * Get the difference
+ */
+ byte_diff = cnat_nfv9_pad_added_to_an_addr(
+ (u8 *)nfv9_logging_info->dataflow_header,
+ nfv9_logging_info->next_data_ptr);
+ if(byte_diff > 0) {
+ /*
+ * Update the packet length to account for the pad bytes
+ */
+ nfv9_logging_info->pkt_length += byte_diff;
+ u8 *pkt_ptr = nfv9_logging_info->next_data_ptr;
+
+ /*
+ * Blindly copy 3 bytes of data to Zero to avoid for loops
+ * We have sufficient padding bytes for safety and we won't
+ * go over buffer limits
+ */
+ *pkt_ptr++ = 0;
+ *pkt_ptr++ = 0;
+ *pkt_ptr++ = 0;
+ }
+ /*
+ * Initialize the template_id and the length of the add record
+ */
+ nfv9_logging_info->dataflow_header->dataflow_template_id =
+ clib_host_to_net_u16(cnat_template_id[cur_record]);
+
+ nfv9_logging_info->record[cur_record] =
+ (u8 *) (nfv9_logging_info->dataflow_header + 1);
+
+ nfv9_logging_info->record_length[cur_record] =
+ CNAT_NFV9_DATAFLOW_RECORD_HEADER_LENGTH;
+
+ /*
+ * Update the length of the total NFV9 record
+ */
+ nfv9_logging_info->pkt_length +=
+ CNAT_NFV9_DATAFLOW_RECORD_HEADER_LENGTH;
+
+ /*
+ * Set the data pointer beyond the dataflow header field
+ */
+ nfv9_logging_info->next_data_ptr =
+ (u8 *) (nfv9_logging_info->dataflow_header + 1);
+
+}
+
+static void cnat_nfv9_insert_add_record(
+ cnat_nfv9_logging_info_t *nfv9_logging_info,
+ cnat_main_db_entry_t *db,
+ cnat_vrfmap_t *vrfmap)
+{
+ u16 my_proto_mask;
+ u8 my_protocol;
+ nfv9_add_record_t nfv9_logging_add_record;
+ if (PREDICT_FALSE(nfv9_logging_info->record[NAT44_ADD_RECORD] == NULL)) {
+ cnat_nfv9_record_create(nfv9_logging_info, NAT44_ADD_RECORD);
+ }
+
+ /*
+ * We should definitely have add_record now, no need to sanitize
+ */
+
+ nfv9_logging_add_record.inside_vrf_id =
+ clib_host_to_net_u32(vrfmap->i_vrf_id);
+
+ nfv9_logging_add_record.outside_vrf_id =
+ clib_host_to_net_u32(vrfmap->o_vrf_id);
+
+ nfv9_logging_add_record.inside_ip_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+ nfv9_logging_add_record.outside_ip_addr =
+ clib_host_to_net_u32(db->out2in_key.k.ipv4);
+
+ nfv9_logging_add_record.inside_ip_port =
+ clib_host_to_net_u16(db->in2out_key.k.port);
+ nfv9_logging_add_record.outside_ip_port =
+ clib_host_to_net_u16(db->out2in_key.k.port);
+
+ my_proto_mask = db->in2out_key.k.vrf & CNAT_PRO_MASK;
+ my_protocol = ((my_proto_mask == CNAT_UDP) ? UDP_PROT :
+ ((my_proto_mask == CNAT_TCP) ? TCP_PROT :
+ ((my_proto_mask == CNAT_ICMP) ? ICMP_PROT : GRE_PROT)));
+
+ nfv9_logging_add_record.protocol = my_protocol;
+
+ clib_memcpy(nfv9_logging_info->record[NAT44_ADD_RECORD],
+ &nfv9_logging_add_record, CNAT_NFV9_ADD_RECORD_LENGTH);
+
+ nfv9_logging_info->record_length[NAT44_ADD_RECORD]
+ += CNAT_NFV9_ADD_RECORD_LENGTH;
+
+ nfv9_logging_info->pkt_length += CNAT_NFV9_ADD_RECORD_LENGTH;
+
+ nfv9_logging_info->record[NAT44_ADD_RECORD]
+ += CNAT_NFV9_ADD_RECORD_LENGTH;
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[NAT44_ADD_RECORD];
+
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[NAT44_ADD_RECORD]);
+
+}
+
+
+static void cnat_nfv9_ds_lite_insert_add_record(
+ cnat_nfv9_logging_info_t *nfv9_logging_info,
+ cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry)
+{
+
+ nfv9_ds_lite_add_record_t nfv9_logging_add_record = {0};
+ cnat_user_db_entry_t *udb = NULL;
+ u16 my_proto_mask;
+ u8 my_protocol;
+
+ udb = cnat_user_db + db->user_index;
+ if (PREDICT_FALSE(!udb)) {
+ return;
+ }
+ if (PREDICT_FALSE(nfv9_logging_info->record[DS_LITE_ADD_RECORD] == NULL)) {
+ cnat_nfv9_record_create(nfv9_logging_info, DS_LITE_ADD_RECORD);
+ }
+ /*
+ * We should definitely have add_record now, no need to sanitize
+ */
+ nfv9_logging_add_record.inside_vrf_id =
+ clib_host_to_net_u32(dslite_entry->i_vrf_id);
+ nfv9_logging_add_record.outside_vrf_id =
+ clib_host_to_net_u32(dslite_entry->o_vrf_id);
+
+#ifdef DSLITE_USER_IPV4
+ nfv9_logging_add_record.inside_ip_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+#else
+ /*
+ * Inside ipv4 address is sent as 0.0.0.0 for ds-lite case as
+ * ipv6 is user here.
+ */
+ nfv9_logging_add_record.inside_ip_addr = 0;
+#endif
+
+ nfv9_logging_add_record.inside_v6_src_addr[0] =
+ clib_host_to_net_u32(udb->ipv6[0]);
+ nfv9_logging_add_record.inside_v6_src_addr[1] =
+ clib_host_to_net_u32(udb->ipv6[1]);
+ nfv9_logging_add_record.inside_v6_src_addr[2] =
+ clib_host_to_net_u32(udb->ipv6[2]);
+ nfv9_logging_add_record.inside_v6_src_addr[3] =
+ clib_host_to_net_u32(udb->ipv6[3]);
+
+ nfv9_logging_add_record.outside_ip_addr =
+ clib_host_to_net_u32(db->out2in_key.k.ipv4);
+
+ nfv9_logging_add_record.inside_ip_port =
+ clib_host_to_net_u16(db->in2out_key.k.port);
+ nfv9_logging_add_record.outside_ip_port =
+ clib_host_to_net_u16(db->out2in_key.k.port);
+
+ my_proto_mask = db->in2out_key.k.vrf & CNAT_PRO_MASK;
+
+ my_protocol = ((my_proto_mask == CNAT_UDP) ? UDP_PROT :
+ ((my_proto_mask == CNAT_TCP) ? TCP_PROT :
+ ((my_proto_mask == CNAT_ICMP) ? ICMP_PROT : 0)));
+ nfv9_logging_add_record.protocol = my_protocol;
+
+ clib_memcpy(nfv9_logging_info->record[DS_LITE_ADD_RECORD],
+ &nfv9_logging_add_record, CNAT_NFV9_DS_LITE_ADD_RECORD_LENGTH);
+
+ nfv9_logging_info->record_length[DS_LITE_ADD_RECORD]
+ += CNAT_NFV9_DS_LITE_ADD_RECORD_LENGTH;
+
+ nfv9_logging_info->pkt_length += CNAT_NFV9_DS_LITE_ADD_RECORD_LENGTH;
+ nfv9_logging_info->total_record_count += 1;
+
+ nfv9_logging_info->record[DS_LITE_ADD_RECORD]
+ += CNAT_NFV9_DS_LITE_ADD_RECORD_LENGTH;
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[DS_LITE_ADD_RECORD];
+
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[DS_LITE_ADD_RECORD]);
+}
+
+
+static void cnat_nfv9_ds_lite_insert_del_record(
+ cnat_nfv9_logging_info_t *nfv9_logging_info,
+ cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry)
+{
+
+ nfv9_ds_lite_del_record_t nfv9_logging_del_record = {0};
+ cnat_user_db_entry_t *udb = NULL;
+ u16 my_proto_mask;
+ u8 my_protocol;
+
+ udb = cnat_user_db + db->user_index;
+ if (PREDICT_FALSE(!udb)) {
+ return;
+ }
+ if (PREDICT_FALSE(nfv9_logging_info->record[DS_LITE_DEL_RECORD] == NULL)) {
+ cnat_nfv9_record_create(nfv9_logging_info, DS_LITE_DEL_RECORD);
+ }
+ /*
+ * We should definitely have a del record now.
+ * No need to sanitize
+ */
+ nfv9_logging_del_record.inside_vrf_id =
+ clib_host_to_net_u32(dslite_entry->i_vrf_id);
+
+#ifdef DSLITE_USER_IPV4
+ nfv9_logging_del_record.inside_ip_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+#else
+ /*
+ * Inside ipv4 address is sent as 0.0.0.0 for ds-lite case as
+ * ipv6 is user here.
+ */
+ nfv9_logging_del_record.inside_ip_addr = 0;
+#endif
+
+ nfv9_logging_del_record.inside_v6_src_addr[0] =
+ clib_host_to_net_u32(udb->ipv6[0]);
+ nfv9_logging_del_record.inside_v6_src_addr[1] =
+ clib_host_to_net_u32(udb->ipv6[1]);
+ nfv9_logging_del_record.inside_v6_src_addr[2] =
+ clib_host_to_net_u32(udb->ipv6[2]);
+ nfv9_logging_del_record.inside_v6_src_addr[3] =
+ clib_host_to_net_u32(udb->ipv6[3]);
+
+ nfv9_logging_del_record.inside_ip_port =
+ clib_host_to_net_u16(db->in2out_key.k.port);
+
+ my_proto_mask = db->in2out_key.k.vrf & CNAT_PRO_MASK;
+
+ my_protocol = ((my_proto_mask == CNAT_UDP) ? UDP_PROT :
+ ((my_proto_mask == CNAT_TCP) ? TCP_PROT :
+ ((my_proto_mask == CNAT_ICMP) ? ICMP_PROT : 0)));
+ nfv9_logging_del_record.protocol = my_protocol;
+
+ clib_memcpy(nfv9_logging_info->record[DS_LITE_DEL_RECORD],
+ &nfv9_logging_del_record, CNAT_NFV9_DS_LITE_DEL_RECORD_LENGTH);
+
+ nfv9_logging_info->record_length[DS_LITE_DEL_RECORD] +=
+ CNAT_NFV9_DS_LITE_DEL_RECORD_LENGTH;
+
+ nfv9_logging_info->pkt_length += CNAT_NFV9_DS_LITE_DEL_RECORD_LENGTH;
+ nfv9_logging_info->total_record_count += 1;
+
+ nfv9_logging_info->record[DS_LITE_DEL_RECORD]
+ += CNAT_NFV9_DS_LITE_DEL_RECORD_LENGTH;
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[DS_LITE_DEL_RECORD];
+
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[DS_LITE_DEL_RECORD]);
+}
+
+#ifndef NO_BULK_LOGGING
+static void cnat_nfv9_insert_bulk_add_record(
+ cnat_nfv9_logging_info_t *nfv9_logging_info,
+ cnat_main_db_entry_t *db,
+ cnat_vrfmap_t *vrfmap,
+ int bulk_alloc_start_port)
+{
+ nfv9_bulk_add_record_t nfv9_logging_bulk_add_record;
+ bulk_alloc_size_t bulk_size = BULKSIZE_FROM_VRFMAP(vrfmap);
+ if (PREDICT_FALSE(nfv9_logging_info->record[NAT44_BULK_ADD_RECORD] == NULL)) {
+ cnat_nfv9_record_create(nfv9_logging_info, NAT44_BULK_ADD_RECORD);
+ }
+
+ /*
+ * We should definitely have add_record now, no need to sanitize
+ */
+
+ nfv9_logging_bulk_add_record.inside_vrf_id =
+ clib_host_to_net_u32(vrfmap->i_vrf_id);
+ nfv9_logging_bulk_add_record.outside_vrf_id =
+ clib_host_to_net_u32(vrfmap->o_vrf_id);
+
+ nfv9_logging_bulk_add_record.inside_ip_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+ nfv9_logging_bulk_add_record.outside_ip_addr =
+ clib_host_to_net_u32(db->out2in_key.k.ipv4);
+
+ nfv9_logging_bulk_add_record.outside_ip_port_start =
+ clib_host_to_net_u16(bulk_alloc_start_port);
+ nfv9_logging_bulk_add_record.outside_ip_port_end =
+ clib_host_to_net_u16(bulk_alloc_start_port + bulk_size -1);
+
+ clib_memcpy(nfv9_logging_info->record[NAT44_BULK_ADD_RECORD],
+ &nfv9_logging_bulk_add_record, CNAT_NFV9_BULK_ADD_RECORD_LENGTH);
+
+ nfv9_logging_info->record_length[NAT44_BULK_ADD_RECORD]
+ += CNAT_NFV9_BULK_ADD_RECORD_LENGTH;
+
+ nfv9_logging_info->pkt_length += CNAT_NFV9_BULK_ADD_RECORD_LENGTH;
+
+ nfv9_logging_info->record[NAT44_BULK_ADD_RECORD]
+ += CNAT_NFV9_BULK_ADD_RECORD_LENGTH;
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[NAT44_BULK_ADD_RECORD];
+
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[NAT44_BULK_ADD_RECORD]);
+
+}
+
+
+static void cnat_nfv9_ds_lite_insert_bulk_add_record(
+ cnat_nfv9_logging_info_t *nfv9_logging_info,
+ cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry,
+ int bulk_alloc_start_port)
+{
+
+ nfv9_ds_lite_bulk_add_record_t nfv9_logging_bulk_add_record = {0};
+ cnat_user_db_entry_t *udb = NULL;
+ bulk_alloc_size_t bulk_size = BULKSIZE_FROM_VRFMAP(dslite_entry);
+
+ if (PREDICT_FALSE(nfv9_logging_info->record[DS_LITE_BULK_ADD_RECORD] == NULL)) {
+ cnat_nfv9_record_create(nfv9_logging_info, DS_LITE_BULK_ADD_RECORD);
+ }
+ udb = cnat_user_db + db->user_index;
+ if (PREDICT_FALSE(!udb)) {
+ return;
+ }
+ /*
+ * We should definitely have add_record now, no need to sanitize
+ */
+
+ nfv9_logging_bulk_add_record.inside_vrf_id =
+ clib_host_to_net_u32(dslite_entry->i_vrf_id);
+ nfv9_logging_bulk_add_record.outside_vrf_id =
+ clib_host_to_net_u32(dslite_entry->o_vrf_id);
+
+#ifdef DSLITE_USER_IPV4
+ nfv9_logging_bulk_add_record.inside_ip_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+#else
+ /*
+ * Inside ipv4 address is sent as 0.0.0.0 for ds-lite case as
+ * ipv6 is user here.
+ */
+ nfv9_logging_bulk_add_record.inside_ip_addr = 0;
+#endif
+
+ nfv9_logging_bulk_add_record.inside_v6_src_addr[0] =
+ clib_host_to_net_u32(udb->ipv6[0]);
+ nfv9_logging_bulk_add_record.inside_v6_src_addr[1] =
+ clib_host_to_net_u32(udb->ipv6[1]);
+ nfv9_logging_bulk_add_record.inside_v6_src_addr[2] =
+ clib_host_to_net_u32(udb->ipv6[2]);
+ nfv9_logging_bulk_add_record.inside_v6_src_addr[3] =
+ clib_host_to_net_u32(udb->ipv6[3]);
+
+ nfv9_logging_bulk_add_record.outside_ip_addr =
+ clib_host_to_net_u32(db->out2in_key.k.ipv4);
+
+ nfv9_logging_bulk_add_record.outside_ip_port_start =
+ clib_host_to_net_u16(bulk_alloc_start_port);
+ nfv9_logging_bulk_add_record.outside_ip_port_end =
+ clib_host_to_net_u16(bulk_alloc_start_port + bulk_size -1);
+
+ clib_memcpy(nfv9_logging_info->record[DS_LITE_BULK_ADD_RECORD],
+ &nfv9_logging_bulk_add_record, CNAT_NFV9_DS_LITE_BULK_ADD_RECORD_LENGTH);
+
+ nfv9_logging_info->record_length[DS_LITE_BULK_ADD_RECORD]
+ += CNAT_NFV9_DS_LITE_BULK_ADD_RECORD_LENGTH;
+
+ nfv9_logging_info->pkt_length += CNAT_NFV9_DS_LITE_BULK_ADD_RECORD_LENGTH;
+ nfv9_logging_info->total_record_count += 1;
+ nfv9_logging_info->record[DS_LITE_BULK_ADD_RECORD]
+ += CNAT_NFV9_DS_LITE_BULK_ADD_RECORD_LENGTH;
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[DS_LITE_BULK_ADD_RECORD];
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[DS_LITE_BULK_ADD_RECORD]);
+}
+
+
+static void cnat_nfv9_ds_lite_insert_bulk_del_record(
+ cnat_nfv9_logging_info_t *nfv9_logging_info,
+ cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry,
+ int bulk_alloc_start_port)
+{
+
+ nfv9_ds_lite_bulk_del_record_t nfv9_logging_bulk_del_record = {0};
+ cnat_user_db_entry_t *udb = NULL;
+
+ if (PREDICT_FALSE(nfv9_logging_info->record[DS_LITE_BULK_DEL_RECORD] == NULL)) {
+ cnat_nfv9_record_create(nfv9_logging_info, DS_LITE_BULK_DEL_RECORD);
+ }
+ udb = cnat_user_db + db->user_index;
+ if (PREDICT_FALSE(!udb)) {
+ return;
+ }
+ /*
+ * We should definitely have add_record now, no need to sanitize
+ */
+
+ nfv9_logging_bulk_del_record.inside_vrf_id =
+ clib_host_to_net_u32(dslite_entry->i_vrf_id);
+
+#ifdef DSLITE_USER_IPV4
+ nfv9_logging_bulk_del_record.inside_ip_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+#else
+ nfv9_logging_bulk_del_record.inside_ip_addr =
+ clib_host_to_net_u32(0);
+#endif
+
+ nfv9_logging_bulk_del_record.inside_v6_src_addr[0] =
+ clib_host_to_net_u32(udb->ipv6[0]);
+ nfv9_logging_bulk_del_record.inside_v6_src_addr[1] =
+ clib_host_to_net_u32(udb->ipv6[1]);
+ nfv9_logging_bulk_del_record.inside_v6_src_addr[2] =
+ clib_host_to_net_u32(udb->ipv6[2]);
+ nfv9_logging_bulk_del_record.inside_v6_src_addr[3] =
+ clib_host_to_net_u32(udb->ipv6[3]);
+
+ nfv9_logging_bulk_del_record.outside_ip_port_start =
+ clib_host_to_net_u16(bulk_alloc_start_port);
+
+ clib_memcpy(nfv9_logging_info->record[DS_LITE_BULK_DEL_RECORD],
+ &nfv9_logging_bulk_del_record,
+ CNAT_NFV9_DS_LITE_BULK_DEL_RECORD_LENGTH);
+ nfv9_logging_info->record_length[DS_LITE_BULK_DEL_RECORD] +=
+ CNAT_NFV9_DS_LITE_BULK_DEL_RECORD_LENGTH;
+ nfv9_logging_info->pkt_length +=
+ CNAT_NFV9_DS_LITE_BULK_DEL_RECORD_LENGTH;
+ nfv9_logging_info->total_record_count += 1;
+ nfv9_logging_info->record[DS_LITE_BULK_DEL_RECORD] +=
+ CNAT_NFV9_DS_LITE_BULK_DEL_RECORD_LENGTH;
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[DS_LITE_BULK_DEL_RECORD];
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[DS_LITE_BULK_DEL_RECORD]);
+}
+#endif /* #ifndef NO_BULK_LOGGING */
+
+static void cnat_nfv9_insert_del_record(
+ cnat_nfv9_logging_info_t *nfv9_logging_info,
+ cnat_main_db_entry_t *db,
+ cnat_vrfmap_t *vrfmap)
+{
+ u16 my_proto_mask;
+ u8 my_protocol;
+ nfv9_del_record_t nfv9_logging_del_record;
+
+ if (PREDICT_FALSE(nfv9_logging_info->record[NAT44_DEL_RECORD] == NULL)) {
+ cnat_nfv9_record_create(nfv9_logging_info, NAT44_DEL_RECORD);
+ }
+
+ /*
+ * We should definitely have add_record now, no need to sanitize
+ */
+
+ nfv9_logging_del_record.inside_vrf_id =
+ clib_host_to_net_u32(vrfmap->i_vrf_id);
+
+ nfv9_logging_del_record.inside_ip_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+
+ nfv9_logging_del_record.inside_ip_port =
+ clib_host_to_net_u16(db->in2out_key.k.port);
+
+ my_proto_mask = db->in2out_key.k.vrf & CNAT_PRO_MASK;
+ my_protocol = ((my_proto_mask == CNAT_UDP) ? UDP_PROT :
+ ((my_proto_mask == CNAT_TCP) ? TCP_PROT :
+ ((my_proto_mask == CNAT_ICMP) ? ICMP_PROT : GRE_PROT)));
+
+ nfv9_logging_del_record.protocol = my_protocol;
+
+ clib_memcpy(nfv9_logging_info->record[NAT44_DEL_RECORD],
+ &nfv9_logging_del_record, CNAT_NFV9_DEL_RECORD_LENGTH);
+
+ nfv9_logging_info->record_length[NAT44_DEL_RECORD]
+ += CNAT_NFV9_DEL_RECORD_LENGTH;
+
+ nfv9_logging_info->pkt_length += CNAT_NFV9_DEL_RECORD_LENGTH;
+
+ nfv9_logging_info->record[NAT44_DEL_RECORD]
+ += CNAT_NFV9_DEL_RECORD_LENGTH;
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[NAT44_DEL_RECORD];
+
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[NAT44_DEL_RECORD]);
+
+}
+
+#ifndef NO_BULK_LOGGING
+static void cnat_nfv9_insert_bulk_del_record(
+ cnat_nfv9_logging_info_t *nfv9_logging_info,
+ cnat_main_db_entry_t *db,
+ cnat_vrfmap_t *vrfmap,
+ int bulk_alloc_start_port)
+{
+ nfv9_bulk_del_record_t nfv9_logging_bulk_del_record;
+ if (PREDICT_FALSE(nfv9_logging_info->record[NAT44_BULK_DEL_RECORD] == NULL)) {
+ cnat_nfv9_record_create(nfv9_logging_info, NAT44_BULK_DEL_RECORD);
+ }
+
+ /*
+ * We should definitely have add_record now, no need to sanitize
+ */
+
+ nfv9_logging_bulk_del_record.inside_vrf_id =
+ clib_host_to_net_u32(vrfmap->i_vrf_id);
+
+ nfv9_logging_bulk_del_record.inside_ip_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+
+ nfv9_logging_bulk_del_record.outside_ip_port_start =
+ clib_host_to_net_u16(bulk_alloc_start_port);
+
+ clib_memcpy(nfv9_logging_info->record[NAT44_BULK_DEL_RECORD],
+ &nfv9_logging_bulk_del_record, CNAT_NFV9_BULK_DEL_RECORD_LENGTH);
+
+ nfv9_logging_info->record_length[NAT44_BULK_DEL_RECORD]
+ += CNAT_NFV9_BULK_DEL_RECORD_LENGTH;
+
+ nfv9_logging_info->pkt_length += CNAT_NFV9_BULK_DEL_RECORD_LENGTH;
+
+ nfv9_logging_info->record[NAT44_BULK_DEL_RECORD]
+ += CNAT_NFV9_BULK_DEL_RECORD_LENGTH;
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[NAT44_BULK_DEL_RECORD];
+
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[NAT44_BULK_DEL_RECORD]);
+
+}
+
+#endif /* #ifndef NO_BULK_LOGGING */
+/*
+ * edt: * * cnat_nfv9_log_mapping_create
+ *
+ * Tries to log a creation of mapping record
+ *
+ * Argument: cnat_main_db_entry_t *db
+ * Main DB entry being created
+ *
+ * Argument: cnat_vrfmap_t *vrfmap
+ * VRF Map for the Main DB entry being created
+ */
+void cnat_nfv9_log_mapping_create (cnat_main_db_entry_t *db,
+ cnat_vrfmap_t *vrfmap
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ )
+{
+ cnat_nfv9_logging_info_t *nfv9_logging_info = 0;
+ vlib_main_t * vm = vlib_get_main();
+
+ if (PREDICT_FALSE(vrfmap->nfv9_logging_index == EMPTY)) {
+
+ //vlib_cli_output(vm, "\n1. Log Mapping failed");
+ /*
+ * No logging configured, silently return
+ */
+ return;
+ }
+
+ if (cnat_nfv9_logging_info_pool == NULL) {
+ vlib_cli_output(vm, "%s: info_pool pointer is NULL !!!!\n", __func__);
+ return;
+ }
+ nfv9_logging_info =
+ cnat_nfv9_logging_info_pool + vrfmap->nfv9_logging_index;
+
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ cnat_nfv9_create_logging_context(nfv9_logging_info,
+ cnat_nfv9_template_add_default);
+
+ /*
+ * If still empty, return after increasing the count
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ //vlib_cli_output(vm, "\n2. Log Mapping failed");
+ return;
+ }
+
+ }
+
+#ifndef NO_BULK_LOGGING
+ if(bulk_alloc > 0) { /* new bulk alloc - use bulk add template */
+ cnat_nfv9_insert_bulk_add_record(nfv9_logging_info, db, vrfmap,
+ bulk_alloc);
+ } else if(bulk_alloc == CACHE_ALLOC_NO_LOG_REQUIRED)
+ return; /* No logging required.. bulk port usage */
+ else /* Individual logging .. fall back to old method */
+#endif
+ cnat_nfv9_insert_add_record(nfv9_logging_info, db, vrfmap);
+
+ nfv9_logging_info->total_record_count += 1;
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->pkt_length >
+ nfv9_logging_info->max_length_minus_max_record_size)) {
+ cnat_nfv9_send_pkt(nfv9_logging_info);
+ }
+}
+
+/*
+ * edt: * * cnat_nfv9_log_mapping_delete
+ *
+ * Tries to log a deletion of mapping record
+ *
+ * Argument: cnat_main_db_entry_t *db
+ * Main DB entry being deleted
+ *
+ * Argument: cnat_vrfmap_t *vrfmap
+ * VRF Map for the Main DB entry being deleted
+ */
+void cnat_nfv9_log_mapping_delete (cnat_main_db_entry_t * db,
+ cnat_vrfmap_t *vrfmap
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ )
+{
+ cnat_nfv9_logging_info_t *nfv9_logging_info = 0;
+
+ if (PREDICT_FALSE(vrfmap->nfv9_logging_index == EMPTY)) {
+ //vlib_cli_output(vm, "\n3. Log Mapping failed");
+ /*
+ * No logging configured, silently return
+ */
+ return;
+ }
+
+ nfv9_logging_info =
+ cnat_nfv9_logging_info_pool + vrfmap->nfv9_logging_index;
+
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ cnat_nfv9_create_logging_context(nfv9_logging_info,
+ cnat_nfv9_template_add_default);
+
+ /*
+ * If still empty, return after increasing the count
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ //vlib_cli_output(vm, "\n4. Log Mapping failed");
+ return;
+ }
+ }
+#ifndef NO_BULK_LOGGING
+ if(bulk_alloc > 0) { /* new bulk alloc - use bulk add template */
+ cnat_nfv9_insert_bulk_del_record(nfv9_logging_info, db, vrfmap,
+ bulk_alloc);
+ } else if(bulk_alloc == CACHE_ALLOC_NO_LOG_REQUIRED)
+ return; /* No logging required.. bulk port usage */
+ else /* Individual logging .. fall back to old method */
+#endif
+ cnat_nfv9_insert_del_record(nfv9_logging_info, db, vrfmap);
+
+ nfv9_logging_info->total_record_count += 1;
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->pkt_length >
+ nfv9_logging_info->max_length_minus_max_record_size)) {
+ cnat_nfv9_send_pkt(nfv9_logging_info);
+ }
+}
+
+
+/* NAT64 Related routines */
+
+/*
+ * edt: * * cnat_nfv9_bib_mapping_create
+ *
+ * Tries to log a creation of Bib mapping record
+ *
+ * Argument: nat64_bib_entry_t *db
+ * BIB DB entry being created
+ *
+ * Argument: nat64_table_entry_t *nat64_entry
+ * NAT64 Instance where this BIB belongs
+ */
+void cnat_nfv9_bib_mapping_create (nat64_bib_entry_t *db,
+ nat64_table_entry_t *nat64_entry)
+{
+ cnat_nfv9_logging_info_t *nfv9_logging_info = 0;
+ u16 my_proto_mask;
+ u8 my_protocol;
+ nfv9_nat64_add_bib_record_t nfv9_logging_add_record;
+
+ if (PREDICT_FALSE(nat64_entry->logging_index == EMPTY)) {
+ /*
+ * No logging configured, silently return
+ */
+ return;
+ }
+
+ nfv9_logging_info =
+ cnat_nfv9_logging_info_pool + nat64_entry->logging_index;
+
+
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ cnat_nfv9_create_logging_context(nfv9_logging_info,
+ cnat_nfv9_template_add_default);
+
+ /*
+ * If still empty, return after increasing the count
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ return;
+ }
+ }
+
+ if (PREDICT_FALSE(nfv9_logging_info->record[NAT64_ADD_BIB_RECORD] == NULL)){
+ cnat_nfv9_record_create(nfv9_logging_info,NAT64_ADD_BIB_RECORD);
+ }
+
+
+ nfv9_logging_add_record.inside_v6_src_addr[0] =
+ clib_host_to_net_u32(db->v6_in_key.ipv6[0]);
+ nfv9_logging_add_record.inside_v6_src_addr[1] =
+ clib_host_to_net_u32(db->v6_in_key.ipv6[1]);
+ nfv9_logging_add_record.inside_v6_src_addr[2] =
+ clib_host_to_net_u32(db->v6_in_key.ipv6[2]);
+ nfv9_logging_add_record.inside_v6_src_addr[3] =
+ clib_host_to_net_u32(db->v6_in_key.ipv6[3]);
+
+
+ nfv9_logging_add_record.outside_v4_src_addr =
+ clib_host_to_net_u32(db->v4_out_key.k.ipv4);
+
+ nfv9_logging_add_record.inside_src_port =
+ clib_host_to_net_u16(db->v6_in_key.port);
+ nfv9_logging_add_record.outside_src_port =
+ clib_host_to_net_u16(db->v4_out_key.k.port);
+
+ my_proto_mask = db->v6_in_key.vrf & CNAT_PRO_MASK;
+
+ my_protocol = ((my_proto_mask == CNAT_UDP) ? UDP_PROT :
+ ((my_proto_mask == CNAT_TCP) ? TCP_PROT :
+ ((my_proto_mask == CNAT_ICMP) ? IPV6_PROTO_ICMPV6 : 0)));
+ nfv9_logging_add_record.protocol = my_protocol;
+
+
+ clib_memcpy(nfv9_logging_info->record[NAT64_ADD_BIB_RECORD],
+ &nfv9_logging_add_record, CNAT_NFV9_NAT64_ADD_BIB_RECORD_LENGTH);
+
+ nfv9_logging_info->record_length[NAT64_ADD_BIB_RECORD] +=
+ CNAT_NFV9_NAT64_ADD_BIB_RECORD_LENGTH;
+ nfv9_logging_info->pkt_length += CNAT_NFV9_NAT64_ADD_BIB_RECORD_LENGTH;
+ nfv9_logging_info->total_record_count += 1;
+
+ nfv9_logging_info->record[NAT64_ADD_BIB_RECORD]
+ += CNAT_NFV9_NAT64_ADD_BIB_RECORD_LENGTH;
+
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[NAT64_ADD_BIB_RECORD];
+
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[NAT64_ADD_BIB_RECORD]);
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->pkt_length >
+ nfv9_logging_info->max_length_minus_max_record_size)) {
+ cnat_nfv9_send_pkt(nfv9_logging_info);
+ }
+}
+
+
+/*
+ * edt: * * cnat_nfv9_session_mapping_create
+ *
+ * Tries to log a creation of Bib mapping record
+ *
+ * Argument: nat64_bib_entry_t *bdb
+ * BIB DB entry for the session that is created
+ *
+ * Argument: nat64_session_entry_t *sdb
+ * Session DB entry being created
+ *
+ * Argument: nat64_table_entry_t *nat64_entry
+ * NAT64 Instance where this BIB and Session belongs
+ */
+void cnat_nfv9_session_mapping_create (nat64_bib_entry_t *bdb,
+ nat64_session_entry_t *sdb,
+ nat64_table_entry_t *nat64_entry_ptr)
+{
+ cnat_nfv9_logging_info_t *nfv9_logging_info = 0;
+ u16 my_proto_mask;
+ u8 my_protocol;
+ u32 dest_v6[4];
+ nfv9_nat64_add_session_record_t nfv9_logging_add_record;
+ u8 *ipv6_addr_ptr;
+ u8 *ipv4_addr_ptr;
+
+
+ if (PREDICT_FALSE(nat64_entry_ptr->logging_index == EMPTY)) {
+ /*
+ * No logging configured, silently return
+ */
+ return;
+ }
+
+ nfv9_logging_info =
+ cnat_nfv9_logging_info_pool + nat64_entry_ptr->logging_index;
+
+
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ cnat_nfv9_create_logging_context(nfv9_logging_info,
+ cnat_nfv9_template_add_default);
+
+ /*
+ * If still empty, return after increasing the count
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)){
+ return;
+ }
+ }
+
+ if (PREDICT_FALSE(nfv9_logging_info->record[NAT64_ADD_SESSION_RECORD]
+ == NULL)){
+ cnat_nfv9_record_create(nfv9_logging_info, NAT64_ADD_SESSION_RECORD);
+ }
+
+
+ nfv9_logging_add_record.inside_v6_src_addr[0] =
+ clib_host_to_net_u32(bdb->v6_in_key.ipv6[0]);
+ nfv9_logging_add_record.inside_v6_src_addr[1] =
+ clib_host_to_net_u32(bdb->v6_in_key.ipv6[1]);
+ nfv9_logging_add_record.inside_v6_src_addr[2] =
+ clib_host_to_net_u32(bdb->v6_in_key.ipv6[2]);
+ nfv9_logging_add_record.inside_v6_src_addr[3] =
+ clib_host_to_net_u32(bdb->v6_in_key.ipv6[3]);
+
+
+ nfv9_logging_add_record.outside_v4_src_addr =
+ clib_host_to_net_u32(bdb->v4_out_key.k.ipv4);
+
+
+ nfv9_logging_add_record.outside_v4_dest_addr =
+ clib_host_to_net_u32(sdb->v4_dest_key.k.ipv4);
+
+ /* Need to create the V6 address using prefix */
+ dest_v6[0] = nat64_entry_ptr->v6_prefix[0];
+ dest_v6[1] = nat64_entry_ptr->v6_prefix[1];
+ dest_v6[2] = nat64_entry_ptr->v6_prefix[2];
+ dest_v6[3] = nat64_entry_ptr->v6_prefix[3];
+
+ ipv6_addr_ptr = (u8 *) (&(dest_v6[0]));
+ ipv4_addr_ptr = (u8 *) (&(sdb->v4_dest_key.k.ipv4));
+
+ *(ipv6_addr_ptr + nat64_entry_ptr->octet0_position) = *(ipv4_addr_ptr);
+ *(ipv6_addr_ptr + nat64_entry_ptr->octet1_position) = *(ipv4_addr_ptr + 1);
+ *(ipv6_addr_ptr + nat64_entry_ptr->octet2_position) = *(ipv4_addr_ptr + 2);
+ *(ipv6_addr_ptr + nat64_entry_ptr->octet3_position) = *(ipv4_addr_ptr + 3);
+
+ nfv9_logging_add_record.inside_v6_dest_addr[0] =
+ clib_host_to_net_u32(dest_v6[0]);
+ nfv9_logging_add_record.inside_v6_dest_addr[1] =
+ clib_host_to_net_u32(dest_v6[1]);
+ nfv9_logging_add_record.inside_v6_dest_addr[2] =
+ clib_host_to_net_u32(dest_v6[2]);
+ nfv9_logging_add_record.inside_v6_dest_addr[3] =
+ clib_host_to_net_u32(dest_v6[3]);
+
+ nfv9_logging_add_record.outside_v4_dest_addr =
+ clib_host_to_net_u32(sdb->v4_dest_key.k.ipv4);
+
+ nfv9_logging_add_record.inside_src_port =
+ clib_host_to_net_u16(bdb->v6_in_key.port);
+ nfv9_logging_add_record.outside_src_port =
+ clib_host_to_net_u16(bdb->v4_out_key.k.port);
+
+ nfv9_logging_add_record.dest_port =
+ clib_host_to_net_u16(sdb->v4_dest_key.k.port);
+
+
+ my_proto_mask = bdb->v6_in_key.vrf & CNAT_PRO_MASK;
+
+ my_protocol = ((my_proto_mask == CNAT_UDP) ? UDP_PROT :
+ ((my_proto_mask == CNAT_TCP) ? TCP_PROT :
+ ((my_proto_mask == CNAT_ICMP) ? IPV6_PROTO_ICMPV6 : 0)));
+ nfv9_logging_add_record.protocol = my_protocol;
+
+
+ clib_memcpy(nfv9_logging_info->record[NAT64_ADD_SESSION_RECORD],
+ &nfv9_logging_add_record, CNAT_NFV9_NAT64_ADD_SESSION_RECORD_LENGTH);
+
+ nfv9_logging_info->record_length[NAT64_ADD_SESSION_RECORD] +=
+ CNAT_NFV9_NAT64_ADD_SESSION_RECORD_LENGTH;
+ nfv9_logging_info->pkt_length += CNAT_NFV9_NAT64_ADD_SESSION_RECORD_LENGTH;
+ nfv9_logging_info->total_record_count += 1;
+
+ nfv9_logging_info->record[NAT64_ADD_SESSION_RECORD]
+ += CNAT_NFV9_NAT64_ADD_SESSION_RECORD_LENGTH;
+
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[NAT64_ADD_SESSION_RECORD];
+
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[NAT64_ADD_SESSION_RECORD]);
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->pkt_length >
+ nfv9_logging_info->max_length_minus_max_record_size)) {
+ cnat_nfv9_send_pkt(nfv9_logging_info);
+ }
+}
+
+
+/*
+ * edt: * * cnat_nfv9_bib_mapping_delete
+ *
+ * Tries to log a deletion of Bib mapping record
+ *
+ * Argument: nat64_bib_entry_t *db
+ * BIB DB entry being created
+ *
+ * Argument: nat64_table_entry_t *nat64_entry
+ * NAT64 Instance where this BIB belongs
+ */
+void cnat_nfv9_bib_mapping_delete (nat64_bib_entry_t *db,
+ nat64_table_entry_t *nat64_entry)
+{
+ cnat_nfv9_logging_info_t *nfv9_logging_info = 0;
+ u16 my_proto_mask;
+ u8 my_protocol;
+ nfv9_nat64_del_bib_record_t nfv9_logging_del_record;
+ if (PREDICT_FALSE(nat64_entry->logging_index == EMPTY)) {
+ /*
+ * No logging configured, silently return
+ */
+ return;
+ }
+
+ nfv9_logging_info =
+ cnat_nfv9_logging_info_pool + nat64_entry->logging_index;
+
+
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ cnat_nfv9_create_logging_context(nfv9_logging_info,
+ cnat_nfv9_template_add_default);
+
+ /*
+ * If still empty, return after increasing the count
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)){
+ return;
+ }
+ }
+
+ if (PREDICT_FALSE(nfv9_logging_info->record[NAT64_DEL_BIB_RECORD] == NULL)){
+ cnat_nfv9_record_create(nfv9_logging_info,NAT64_DEL_BIB_RECORD);
+ }
+
+
+ nfv9_logging_del_record.inside_v6_src_addr[0] =
+ clib_host_to_net_u32(db->v6_in_key.ipv6[0]);
+ nfv9_logging_del_record.inside_v6_src_addr[1] =
+ clib_host_to_net_u32(db->v6_in_key.ipv6[1]);
+ nfv9_logging_del_record.inside_v6_src_addr[2] =
+ clib_host_to_net_u32(db->v6_in_key.ipv6[2]);
+ nfv9_logging_del_record.inside_v6_src_addr[3] =
+ clib_host_to_net_u32(db->v6_in_key.ipv6[3]);
+
+
+ nfv9_logging_del_record.inside_src_port =
+ clib_host_to_net_u16(db->v6_in_key.port);
+
+ my_proto_mask = db->v6_in_key.vrf & CNAT_PRO_MASK;
+
+ my_protocol = ((my_proto_mask == CNAT_UDP) ? UDP_PROT :
+ ((my_proto_mask == CNAT_TCP) ? TCP_PROT :
+ ((my_proto_mask == CNAT_ICMP) ? IPV6_PROTO_ICMPV6 : 0)));
+ nfv9_logging_del_record.protocol = my_protocol;
+
+
+ clib_memcpy(nfv9_logging_info->record[NAT64_DEL_BIB_RECORD],
+ &nfv9_logging_del_record, CNAT_NFV9_NAT64_DEL_BIB_RECORD_LENGTH);
+
+ nfv9_logging_info->record_length[NAT64_DEL_BIB_RECORD] +=
+ CNAT_NFV9_NAT64_DEL_BIB_RECORD_LENGTH;
+ nfv9_logging_info->pkt_length += CNAT_NFV9_NAT64_DEL_BIB_RECORD_LENGTH;
+ nfv9_logging_info->total_record_count += 1;
+
+ nfv9_logging_info->record[NAT64_DEL_BIB_RECORD]
+ += CNAT_NFV9_NAT64_DEL_BIB_RECORD_LENGTH;
+
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[NAT64_DEL_BIB_RECORD];
+
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[NAT64_DEL_BIB_RECORD]);
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->pkt_length >
+ nfv9_logging_info->max_length_minus_max_record_size)) {
+ cnat_nfv9_send_pkt(nfv9_logging_info);
+ }
+}
+
+
+/*
+ * edt: * * cnat_nfv9_session_mapping_delete
+ *
+ * Tries to log a deletion of Bib mapping record
+ *
+ * Argument: nat64_bib_entry_t *bdb
+ * BIB DB entry for the session that is created
+ *
+ * Argument: nat64_session_entry_t *sdb
+ * Session DB entry being created
+ *
+ * Argument: nat64_table_entry_t *nat64_entry
+ * NAT64 Instance where this BIB and Session belongs
+ */
+void cnat_nfv9_session_mapping_delete (nat64_bib_entry_t *bdb,
+ nat64_session_entry_t *sdb,
+ nat64_table_entry_t *nat64_entry_ptr)
+{
+ cnat_nfv9_logging_info_t *nfv9_logging_info = 0;
+ u16 my_proto_mask;
+ u8 my_protocol;
+ u32 dest_v6[4];
+ nfv9_nat64_del_session_record_t nfv9_logging_del_record;
+ u8 *ipv6_addr_ptr;
+ u8 *ipv4_addr_ptr;
+
+ if (PREDICT_FALSE(nat64_entry_ptr->logging_index == EMPTY)) {
+ /*
+ * No logging configured, silently return
+ */
+ return;
+ }
+
+ nfv9_logging_info =
+ cnat_nfv9_logging_info_pool + nat64_entry_ptr->logging_index;
+
+
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ cnat_nfv9_create_logging_context(nfv9_logging_info,
+ cnat_nfv9_template_add_default);
+
+ /*
+ * If still empty, return after increasing the count
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)){
+ return;
+ }
+ }
+
+ if (PREDICT_FALSE(nfv9_logging_info->record[NAT64_DEL_SESSION_RECORD]
+ == NULL)){
+ cnat_nfv9_record_create(nfv9_logging_info, NAT64_DEL_SESSION_RECORD);
+ }
+
+
+ nfv9_logging_del_record.inside_v6_src_addr[0] =
+ clib_host_to_net_u32(bdb->v6_in_key.ipv6[0]);
+ nfv9_logging_del_record.inside_v6_src_addr[1] =
+ clib_host_to_net_u32(bdb->v6_in_key.ipv6[1]);
+ nfv9_logging_del_record.inside_v6_src_addr[2] =
+ clib_host_to_net_u32(bdb->v6_in_key.ipv6[2]);
+ nfv9_logging_del_record.inside_v6_src_addr[3] =
+ clib_host_to_net_u32(bdb->v6_in_key.ipv6[3]);
+
+ /* Need to create the V6 address using prefix */
+ dest_v6[0] = nat64_entry_ptr->v6_prefix[0];
+ dest_v6[1] = nat64_entry_ptr->v6_prefix[1];
+ dest_v6[2] = nat64_entry_ptr->v6_prefix[2];
+ dest_v6[3] = nat64_entry_ptr->v6_prefix[3];
+
+ ipv6_addr_ptr = (u8 *) (&(dest_v6[0]));
+ ipv4_addr_ptr = (u8 *) (&(sdb->v4_dest_key.k.ipv4));
+
+ *(ipv6_addr_ptr + nat64_entry_ptr->octet0_position) = *(ipv4_addr_ptr);
+ *(ipv6_addr_ptr + nat64_entry_ptr->octet1_position) = *(ipv4_addr_ptr + 1);
+ *(ipv6_addr_ptr + nat64_entry_ptr->octet2_position) = *(ipv4_addr_ptr + 2);
+ *(ipv6_addr_ptr + nat64_entry_ptr->octet3_position) = *(ipv4_addr_ptr + 3);
+
+ nfv9_logging_del_record.inside_v6_dest_addr[0] =
+ clib_host_to_net_u32(dest_v6[0]);
+ nfv9_logging_del_record.inside_v6_dest_addr[1] =
+ clib_host_to_net_u32(dest_v6[1]);
+ nfv9_logging_del_record.inside_v6_dest_addr[2] =
+ clib_host_to_net_u32(dest_v6[2]);
+ nfv9_logging_del_record.inside_v6_dest_addr[3] =
+ clib_host_to_net_u32(dest_v6[3]);
+
+ nfv9_logging_del_record.inside_src_port =
+ clib_host_to_net_u16(bdb->v6_in_key.port);
+
+ nfv9_logging_del_record.dest_port =
+ clib_host_to_net_u16(sdb->v4_dest_key.k.port);
+
+
+ my_proto_mask = bdb->v6_in_key.vrf & CNAT_PRO_MASK;
+
+ my_protocol = ((my_proto_mask == CNAT_UDP) ? UDP_PROT :
+ ((my_proto_mask == CNAT_TCP) ? TCP_PROT :
+ ((my_proto_mask == CNAT_ICMP) ? IPV6_PROTO_ICMPV6 : 0)));
+ nfv9_logging_del_record.protocol = my_protocol;
+
+ clib_memcpy(nfv9_logging_info->record[NAT64_DEL_SESSION_RECORD],
+ &nfv9_logging_del_record, CNAT_NFV9_NAT64_DEL_SESSION_RECORD_LENGTH);
+
+ nfv9_logging_info->record_length[NAT64_DEL_SESSION_RECORD] +=
+ CNAT_NFV9_NAT64_DEL_SESSION_RECORD_LENGTH;
+ nfv9_logging_info->pkt_length += CNAT_NFV9_NAT64_DEL_SESSION_RECORD_LENGTH;
+ nfv9_logging_info->total_record_count += 1;
+
+ nfv9_logging_info->record[NAT64_DEL_SESSION_RECORD]
+ += CNAT_NFV9_NAT64_DEL_SESSION_RECORD_LENGTH;
+
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[NAT64_DEL_SESSION_RECORD];
+
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[NAT64_DEL_SESSION_RECORD]);
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->pkt_length >
+ nfv9_logging_info->max_length_minus_max_record_size)) {
+ cnat_nfv9_send_pkt(nfv9_logging_info);
+ }
+}
+
+/*
+ * edt: * * cnat_nfv9_nat44_log_session_create
+ *
+ * Tries to log a creation of mapping record (session based)
+ *
+ * Argument: cnat_main_db_entry_t *db
+ * Main DB entry being created
+ * Arugment: cnat_session_entry_t *sdb
+ * Session DB entry if the destination is not the first dest
+ * Argument: cnat_vrfmap_t *vrfmap
+ * VRF Map for the Main DB entry being created
+ */
+
+void cnat_nfv9_nat44_log_session_create(cnat_main_db_entry_t *db,
+ cnat_session_entry_t *sdb,
+ cnat_vrfmap_t *vrfmap)
+{
+ cnat_nfv9_logging_info_t *nfv9_logging_info = 0;
+ u16 my_proto_mask;
+ u8 my_protocol;
+ nfv9_add_session_record_t nfv9_logging_add_session_record;
+
+ if (PREDICT_FALSE(vrfmap->nfv9_logging_index == EMPTY)) {
+ //vlib_cli_output(vm,"\n1. Log Mapping failed");
+ /*
+ * No logging configured, silently return
+ */
+ return;
+ }
+
+ nfv9_logging_info =
+ cnat_nfv9_logging_info_pool + vrfmap->nfv9_logging_index;
+
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ cnat_nfv9_create_logging_context(nfv9_logging_info,
+ cnat_nfv9_template_add_default);
+
+ /*
+ * If still empty, return after increasing the count
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ //vlib_cli_output(vm,"\n2. Log Mapping failed");
+ return;
+ }
+ }
+
+ if(PREDICT_FALSE(nfv9_logging_info->record[
+ NAT44_ADD_SESSION_RECORD] == NULL)) {
+ cnat_nfv9_record_create(nfv9_logging_info, NAT44_ADD_SESSION_RECORD);
+ }
+
+ /*
+ * We should definitely have add_record now, no need to sanitize
+ */
+ nfv9_logging_add_session_record.inside_vrf_id =
+ clib_host_to_net_u32(vrfmap->i_vrf_id);
+ nfv9_logging_add_session_record.outside_vrf_id =
+ clib_host_to_net_u32(vrfmap->o_vrf_id);
+
+ nfv9_logging_add_session_record.inside_ip_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+ nfv9_logging_add_session_record.outside_ip_addr =
+ clib_host_to_net_u32(db->out2in_key.k.ipv4);
+
+ /* If sdb is null, it is assumed that logging is being done
+ * for the first destination which is held in the main db
+
+ * itself
+ */
+ if(PREDICT_TRUE(sdb == NULL)) {
+ nfv9_logging_add_session_record.dest_ip_addr =
+ clib_host_to_net_u32(db->dst_ipv4);
+ nfv9_logging_add_session_record.dest_port =
+ clib_host_to_net_u16(db->dst_port);
+ } else {
+ nfv9_logging_add_session_record.dest_ip_addr =
+ clib_host_to_net_u32(sdb->v4_dest_key.k.ipv4);
+ nfv9_logging_add_session_record.dest_port =
+ clib_host_to_net_u16(sdb->v4_dest_key.k.port);
+ }
+
+ nfv9_logging_add_session_record.inside_ip_port =
+ clib_host_to_net_u16(db->in2out_key.k.port);
+ nfv9_logging_add_session_record.outside_ip_port =
+ clib_host_to_net_u16(db->out2in_key.k.port);
+
+
+ my_proto_mask = db->in2out_key.k.vrf & CNAT_PRO_MASK;
+
+ my_protocol = ((my_proto_mask == CNAT_UDP) ? UDP_PROT :
+ ((my_proto_mask == CNAT_TCP) ? TCP_PROT :
+ ((my_proto_mask == CNAT_ICMP) ? ICMP_PROT : GRE_PROT)));
+ nfv9_logging_add_session_record.protocol = my_protocol;
+
+ clib_memcpy(nfv9_logging_info->record[NAT44_ADD_SESSION_RECORD],
+ &nfv9_logging_add_session_record,
+ CNAT_NFV9_NAT44_ADD_SESSION_RECORD_LENGTH);
+
+ nfv9_logging_info->record_length[NAT44_ADD_SESSION_RECORD]
+ += CNAT_NFV9_NAT44_ADD_SESSION_RECORD_LENGTH;
+ nfv9_logging_info->pkt_length += CNAT_NFV9_NAT44_ADD_SESSION_RECORD_LENGTH;
+ nfv9_logging_info->total_record_count += 1;
+
+
+ nfv9_logging_info->record[NAT44_ADD_SESSION_RECORD]
+ += CNAT_NFV9_NAT44_ADD_SESSION_RECORD_LENGTH;
+
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[NAT44_ADD_SESSION_RECORD];
+
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[NAT44_ADD_SESSION_RECORD]);
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->pkt_length >
+ nfv9_logging_info->max_length_minus_max_record_size)) {
+ cnat_nfv9_send_pkt(nfv9_logging_info);
+ }
+}
+
+/*
+ * edt: * * cnat_nfv9_nat44_log_session_delete
+ *
+ * Tries to log a deletion of mapping record (session based)
+ *
+ * Argument: cnat_main_db_entry_t *db
+ * Main DB entry being created
+ * Arugment: cnat_session_entry_t *sdb
+ * Session DB entry if the destination is not the first dest
+ * Argument: cnat_vrfmap_t *vrfmap
+ * VRF Map for the Main DB entry being deleted
+ */
+
+void cnat_nfv9_nat44_log_session_delete(cnat_main_db_entry_t *db,
+ cnat_session_entry_t *sdb,
+ cnat_vrfmap_t *vrfmap)
+{
+ cnat_nfv9_logging_info_t *nfv9_logging_info = 0;
+ u16 my_proto_mask;
+ u8 my_protocol;
+ nfv9_del_session_record_t nfv9_logging_del_session_record;
+
+ if (PREDICT_FALSE(vrfmap->nfv9_logging_index == EMPTY)) {
+ //vlib_cli_output(vm, "\n1. Log Mapping failed");
+ /*
+ * No logging configured, silently return
+ */
+ return;
+ }
+
+ nfv9_logging_info =
+ cnat_nfv9_logging_info_pool + vrfmap->nfv9_logging_index;
+
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ cnat_nfv9_create_logging_context(nfv9_logging_info,
+ cnat_nfv9_template_add_default);
+
+ /*
+ * If still empty, return after increasing the count
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ //vlib_cli_output(vm, "\n2. Log Mapping failed");
+ return;
+ }
+ }
+
+ if(PREDICT_FALSE(nfv9_logging_info->record[
+ NAT44_DEL_SESSION_RECORD] == NULL)) {
+ cnat_nfv9_record_create(nfv9_logging_info, NAT44_DEL_SESSION_RECORD);
+ }
+
+ /*
+ * We should definitely have add_record now, no need to sanitize
+ */
+ nfv9_logging_del_session_record.inside_vrf_id =
+ clib_host_to_net_u32(vrfmap->i_vrf_id);
+
+ nfv9_logging_del_session_record.inside_ip_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+
+ /* If sdb is null, it is assumed that logging is being done
+ * for the first destination which is held in the main db
+ * itself
+ */
+ if(PREDICT_TRUE(sdb == NULL)) {
+ nfv9_logging_del_session_record.dest_ip_addr =
+ clib_host_to_net_u32(db->dst_ipv4);
+ nfv9_logging_del_session_record.dest_port =
+ clib_host_to_net_u16(db->dst_port);
+ } else {
+ nfv9_logging_del_session_record.dest_ip_addr =
+ clib_host_to_net_u32(sdb->v4_dest_key.k.ipv4);
+ nfv9_logging_del_session_record.dest_port =
+ clib_host_to_net_u16(sdb->v4_dest_key.k.port);
+ }
+
+ nfv9_logging_del_session_record.inside_ip_port =
+ clib_host_to_net_u16(db->in2out_key.k.port);
+
+ my_proto_mask = db->in2out_key.k.vrf & CNAT_PRO_MASK;
+ my_protocol = ((my_proto_mask == CNAT_UDP) ? UDP_PROT :
+ ((my_proto_mask == CNAT_TCP) ? TCP_PROT :
+ ((my_proto_mask == CNAT_ICMP) ? ICMP_PROT : GRE_PROT)));
+
+ nfv9_logging_del_session_record.protocol = my_protocol;
+
+ clib_memcpy(nfv9_logging_info->record[NAT44_DEL_SESSION_RECORD],
+ &nfv9_logging_del_session_record,
+ CNAT_NFV9_NAT44_DEL_SESSION_RECORD_LENGTH);
+
+ nfv9_logging_info->record_length[NAT44_DEL_SESSION_RECORD]
+ += CNAT_NFV9_NAT44_DEL_SESSION_RECORD_LENGTH;
+ nfv9_logging_info->pkt_length += CNAT_NFV9_NAT44_DEL_SESSION_RECORD_LENGTH;
+ nfv9_logging_info->total_record_count += 1;
+
+ nfv9_logging_info->record[NAT44_DEL_SESSION_RECORD]
+ += CNAT_NFV9_NAT44_DEL_SESSION_RECORD_LENGTH;
+
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[NAT44_DEL_SESSION_RECORD];
+
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[NAT44_DEL_SESSION_RECORD]);
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->pkt_length >
+ nfv9_logging_info->max_length_minus_max_record_size)) {
+ cnat_nfv9_send_pkt(nfv9_logging_info);
+ }
+}
+
+/*
+ * DS-Lite APIs for netflow logging
+ */
+
+/*
+ * edt: * * cnat_nfv9_ds_lite_mapping_create
+ *
+ * Tries to log a creation of mapping record
+ *
+ * Argument: cnat_main_db_entry_t *db
+ * Main DB entry being created
+ *
+ * Argument: dslite_table_entry_t *dslite_entry
+ * ds-lite instance for the Main DB entry being created
+ */
+void cnat_nfv9_ds_lite_mapping_create(cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ )
+{
+
+ cnat_nfv9_logging_info_t *nfv9_logging_info = NULL;
+
+ if (PREDICT_FALSE(!(db && dslite_entry))) {
+ return;
+ }
+ if (PREDICT_FALSE(dslite_entry->nfv9_logging_index == EMPTY)) {
+ /*
+ * no logging configured, silently return
+ */
+ return;
+ }
+
+ nfv9_logging_info =
+ cnat_nfv9_logging_info_pool + dslite_entry->nfv9_logging_index;
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ cnat_nfv9_create_logging_context(nfv9_logging_info,
+ cnat_nfv9_template_add_default);
+ /*
+ * If still empty, return after increasing the count
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ return;
+ }
+ }
+#ifndef NO_BULK_LOGGING
+ if(bulk_alloc > 0) { /* new bulk alloc - use bulk add template */
+ cnat_nfv9_ds_lite_insert_bulk_add_record(nfv9_logging_info,
+ db, dslite_entry, bulk_alloc);
+ } else if(bulk_alloc == CACHE_ALLOC_NO_LOG_REQUIRED)
+ return; /* No logging required.. bulk port usage */
+ else /* Individual logging .. fall back to old method */
+#endif /*NO_BULK_LOGGING*/
+ cnat_nfv9_ds_lite_insert_add_record(nfv9_logging_info, db, dslite_entry);
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->pkt_length >
+ nfv9_logging_info->max_length_minus_max_record_size)) {
+ cnat_nfv9_send_pkt(nfv9_logging_info);
+ }
+}
+
+/*
+ * edt: * * cnat_nfv9_ds_lite_mapping_delete
+ *
+ * Tries to log a deletion of mapping record
+ *
+ * Argument: cnat_main_db_entry_t *db
+ * Main DB entry being deleted
+ *
+ * Argument: dslite_table_entry_t *dslite_entry
+ * ds-lite instance for the Main DB entry being deleted
+ */
+void cnat_nfv9_ds_lite_mapping_delete(cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ )
+{
+
+ cnat_nfv9_logging_info_t *nfv9_logging_info = NULL;
+ if (PREDICT_FALSE(!(db && dslite_entry))) {
+ return;
+ }
+ if (PREDICT_FALSE(dslite_entry->nfv9_logging_index == EMPTY)) {
+ /*
+ * No logging configured, silently return
+ */
+ return;
+ }
+ nfv9_logging_info =
+ cnat_nfv9_logging_info_pool + dslite_entry->nfv9_logging_index;
+
+
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ cnat_nfv9_create_logging_context(nfv9_logging_info,
+ cnat_nfv9_template_add_default);
+ /*
+ * If still empty, return after increasing the count
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ return;
+ }
+ }
+#ifndef NO_BULK_LOGGING
+ if(bulk_alloc > 0) { /* new bulk alloc - use bulk add template */
+ cnat_nfv9_ds_lite_insert_bulk_del_record(nfv9_logging_info,
+ db, dslite_entry, bulk_alloc);
+ } else if(bulk_alloc == CACHE_ALLOC_NO_LOG_REQUIRED)
+ return; /* No logging required.. bulk port usage */
+ else /* Individual logging .. fall back to old method */
+#endif /*NO_BULK_LOGGING*/
+ cnat_nfv9_ds_lite_insert_del_record(nfv9_logging_info, db, dslite_entry);
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->pkt_length >
+ nfv9_logging_info->max_length_minus_max_record_size)) {
+ cnat_nfv9_send_pkt(nfv9_logging_info);
+ }
+}
+
+/*
+ * edt: * * cnat_nfv9_dslite_log_session_create
+ *
+ * Tries to log a creation of mapping record (session based)
+ * Argument: cnat_main_db_entry_t *db
+ * Main DB entry being created
+ * Arugment: cnat_session_entry_t *sdb
+ * Session DB entry if the destination is not the first dest
+ * Argument: dslite_table_entry_t *dslite_entry,
+ * dslite table entry for dslite instance
+ */
+
+void cnat_nfv9_ds_lite_log_session_create(
+ cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry,
+ cnat_session_entry_t *sdb)
+{
+
+ nfv9_ds_lite_add_session_record_t nfv9_logging_add_record ;
+ cnat_user_db_entry_t *udb = NULL;
+ u16 my_proto_mask;
+ u8 my_protocol;
+ cnat_nfv9_logging_info_t *nfv9_logging_info = 0;
+
+ if (PREDICT_FALSE(dslite_entry->nfv9_logging_index == EMPTY)) {
+ /*
+ * no logging configured, silently return
+ */
+ return;
+ }
+
+ nfv9_logging_info =
+ cnat_nfv9_logging_info_pool + dslite_entry->nfv9_logging_index;
+ udb = cnat_user_db + db->user_index;
+
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ cnat_nfv9_create_logging_context(nfv9_logging_info,
+ cnat_nfv9_template_add_default);
+
+ /*
+ * If still empty, return after increasing the count
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ return;
+ }
+ }
+
+ udb = cnat_user_db + db->user_index;
+ if (PREDICT_FALSE(!udb)) {
+ return;
+ }
+ if (PREDICT_FALSE(nfv9_logging_info->record[DS_LITE_ADD_SESSION_RECORD] == NULL)) {
+ cnat_nfv9_record_create(nfv9_logging_info, DS_LITE_ADD_SESSION_RECORD);
+ }
+ /*
+ * We should definitely have add_record now, no need to sanitize
+ */
+ nfv9_logging_add_record.inside_vrf_id =
+ clib_host_to_net_u32(dslite_entry->i_vrf_id);
+ nfv9_logging_add_record.outside_vrf_id =
+ clib_host_to_net_u32(dslite_entry->o_vrf_id);
+
+ nfv9_logging_add_record.inside_ip_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+
+ nfv9_logging_add_record.inside_v6_src_addr[0] =
+ clib_host_to_net_u32(udb->ipv6[0]);
+ nfv9_logging_add_record.inside_v6_src_addr[1] =
+ clib_host_to_net_u32(udb->ipv6[1]);
+ nfv9_logging_add_record.inside_v6_src_addr[2] =
+ clib_host_to_net_u32(udb->ipv6[2]);
+ nfv9_logging_add_record.inside_v6_src_addr[3] =
+ clib_host_to_net_u32(udb->ipv6[3]);
+
+ nfv9_logging_add_record.outside_ip_addr =
+ clib_host_to_net_u32(db->out2in_key.k.ipv4);
+
+ nfv9_logging_add_record.inside_ip_port =
+ clib_host_to_net_u16(db->in2out_key.k.port);
+ nfv9_logging_add_record.outside_ip_port =
+ clib_host_to_net_u16(db->out2in_key.k.port);
+
+ /* If sdb is null, it is assumed that logging is being done
+ * for the first destination which is held in the main db
+
+ * itself
+ */
+ if(PREDICT_TRUE(sdb == NULL)) {
+ nfv9_logging_add_record.dest_ip_addr =
+ clib_host_to_net_u32(db->dst_ipv4);
+ nfv9_logging_add_record.dest_port =
+ clib_host_to_net_u16(db->dst_port);
+ } else {
+ nfv9_logging_add_record.dest_ip_addr =
+ clib_host_to_net_u32(sdb->v4_dest_key.k.ipv4);
+ nfv9_logging_add_record.dest_port =
+ clib_host_to_net_u16(sdb->v4_dest_key.k.port);
+ }
+
+
+ my_proto_mask = db->in2out_key.k.vrf & CNAT_PRO_MASK;
+
+ my_protocol = ((my_proto_mask == CNAT_UDP) ? UDP_PROT :
+ ((my_proto_mask == CNAT_TCP) ? TCP_PROT :
+ ((my_proto_mask == CNAT_ICMP) ? ICMP_PROT : 0)));
+ nfv9_logging_add_record.protocol = my_protocol;
+
+ clib_memcpy(nfv9_logging_info->record[DS_LITE_ADD_SESSION_RECORD],
+ &nfv9_logging_add_record, CNAT_NFV9_DS_LITE_ADD_SESSION_RECORD_LENGTH);
+
+ nfv9_logging_info->record_length[DS_LITE_ADD_SESSION_RECORD]
+ += CNAT_NFV9_DS_LITE_ADD_SESSION_RECORD_LENGTH;
+
+ nfv9_logging_info->pkt_length += CNAT_NFV9_DS_LITE_ADD_SESSION_RECORD_LENGTH;
+ nfv9_logging_info->total_record_count += 1;
+
+ nfv9_logging_info->record[DS_LITE_ADD_SESSION_RECORD]
+ += CNAT_NFV9_DS_LITE_ADD_SESSION_RECORD_LENGTH;
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[DS_LITE_ADD_SESSION_RECORD];
+
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[DS_LITE_ADD_SESSION_RECORD]);
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->pkt_length >
+ nfv9_logging_info->max_length_minus_max_record_size)) {
+ cnat_nfv9_send_pkt(nfv9_logging_info);
+ }
+
+}
+
+/*
+ * edt: * * cnat_nfv9_dslite_log_session_delete
+ *
+ * Tries to log a creation of mapping record (session based)
+ * Argument: cnat_main_db_entry_t *db
+ * Main DB entry being created
+ * Arugment: cnat_session_entry_t *sdb
+ * Session DB entry if the destination is not the first dest
+ * Argument: dslite_table_entry_t *dslite_entry,
+ * dslite table entry for dslite instance
+ */
+
+void cnat_nfv9_ds_lite_log_session_delete(
+ cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry,
+ cnat_session_entry_t *sdb)
+{
+
+ nfv9_ds_lite_del_session_record_t nfv9_logging_add_record = {0};
+ cnat_user_db_entry_t *udb = NULL;
+ u16 my_proto_mask;
+ u8 my_protocol;
+ cnat_nfv9_logging_info_t *nfv9_logging_info = NULL;
+
+ if (PREDICT_FALSE(dslite_entry->nfv9_logging_index == EMPTY)) {
+ /*
+ * no logging configured, silently return
+ */
+ return;
+ }
+
+ nfv9_logging_info =
+ cnat_nfv9_logging_info_pool + dslite_entry->nfv9_logging_index;
+ udb = cnat_user_db + db->user_index;
+
+ if (PREDICT_FALSE(!udb)) {
+ return;
+ }
+
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ cnat_nfv9_create_logging_context(nfv9_logging_info,
+ cnat_nfv9_template_add_default);
+
+ /*
+ * If still empty, return after increasing the count
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->current_logging_context == NULL)) {
+ return;
+ }
+ }
+
+ if (PREDICT_FALSE(nfv9_logging_info->record[DS_LITE_DEL_SESSION_RECORD] == NULL)) {
+ cnat_nfv9_record_create(nfv9_logging_info, DS_LITE_DEL_SESSION_RECORD);
+ }
+ /*
+ * We should definitely have add_record now, no need to sanitize
+ */
+ nfv9_logging_add_record.inside_vrf_id =
+ clib_host_to_net_u32(dslite_entry->i_vrf_id);
+
+ nfv9_logging_add_record.inside_ip_addr =
+ clib_host_to_net_u32(db->in2out_key.k.ipv4);
+
+ nfv9_logging_add_record.inside_v6_src_addr[0] =
+ clib_host_to_net_u32(udb->ipv6[0]);
+ nfv9_logging_add_record.inside_v6_src_addr[1] =
+ clib_host_to_net_u32(udb->ipv6[1]);
+ nfv9_logging_add_record.inside_v6_src_addr[2] =
+ clib_host_to_net_u32(udb->ipv6[2]);
+ nfv9_logging_add_record.inside_v6_src_addr[3] =
+ clib_host_to_net_u32(udb->ipv6[3]);
+
+ nfv9_logging_add_record.inside_ip_port =
+ clib_host_to_net_u16(db->in2out_key.k.port);
+
+ /* If sdb is null, it is assumed that logging is being done
+ * for the first destination which is held in the main db
+ * itself
+ */
+ if(PREDICT_TRUE(sdb == NULL)) {
+ nfv9_logging_add_record.dest_ip_addr =
+ clib_host_to_net_u32(db->dst_ipv4);
+ nfv9_logging_add_record.dest_port =
+ clib_host_to_net_u16(db->dst_port);
+ } else {
+ nfv9_logging_add_record.dest_ip_addr =
+ clib_host_to_net_u32(sdb->v4_dest_key.k.ipv4);
+ nfv9_logging_add_record.dest_port =
+ clib_host_to_net_u16(sdb->v4_dest_key.k.port);
+ }
+
+
+ my_proto_mask = db->in2out_key.k.vrf & CNAT_PRO_MASK;
+
+ my_protocol = ((my_proto_mask == CNAT_UDP) ? UDP_PROT :
+ ((my_proto_mask == CNAT_TCP) ? TCP_PROT :
+ ((my_proto_mask == CNAT_ICMP) ? ICMP_PROT : 0)));
+ nfv9_logging_add_record.protocol = my_protocol;
+
+ clib_memcpy(nfv9_logging_info->record[DS_LITE_DEL_SESSION_RECORD],
+ &nfv9_logging_add_record, CNAT_NFV9_DS_LITE_DEL_SESSION_RECORD_LENGTH);
+
+ nfv9_logging_info->record_length[DS_LITE_DEL_SESSION_RECORD]
+ += CNAT_NFV9_DS_LITE_DEL_SESSION_RECORD_LENGTH;
+
+ nfv9_logging_info->pkt_length += CNAT_NFV9_DS_LITE_DEL_SESSION_RECORD_LENGTH;
+ nfv9_logging_info->total_record_count += 1;
+
+ nfv9_logging_info->record[DS_LITE_DEL_SESSION_RECORD]
+ += CNAT_NFV9_DS_LITE_DEL_SESSION_RECORD_LENGTH;
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[DS_LITE_DEL_SESSION_RECORD];
+
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[DS_LITE_DEL_SESSION_RECORD]);
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+ if (PREDICT_FALSE(nfv9_logging_info->pkt_length >
+ nfv9_logging_info->max_length_minus_max_record_size)) {
+ cnat_nfv9_send_pkt(nfv9_logging_info);
+ }
+
+}
+
+
+/*
+ * netflow logging API for ingress vrf_id to name mapping
+ */
+
+/*
+ * edt: * * handle_vrfid_name_mapping
+ * It will search for valid natflow entry in netflow pool,
+ * once found one, will send all vrfid name mapping info
+ * using that entry
+ */
+
+
+static inline __attribute__((unused))
+void handle_vrfid_name_mapping(void)
+{
+ cnat_nfv9_logging_info_t *nfv9_logging_info = NULL;
+
+ pool_foreach (nfv9_logging_info, cnat_nfv9_logging_info_pool, ({
+ if(PREDICT_FALSE(nfv9_logging_info == NULL)) {
+ continue;
+ }
+ nfv9_server_info_t *server = nfv9_server_info_pool +
+ nfv9_logging_info->server_index;
+ if(server->template_sent == TEMPLATE_SENT_TRUE) {
+ cnat_nfv9_ingress_vrfid_name_mapping_create(nfv9_logging_info);
+ server->template_sent = TEMPLATE_SENT_FALSE;
+ }
+ }));
+}
+
+/*
+ * edt: * * cnat_nfv9_ingress_vrfid_name_mapping_create
+ *
+ * Tries to log vrfid-name mapping record
+ * Argument: netflow pointer
+ */
+
+
+void cnat_nfv9_ingress_vrfid_name_mapping_create(
+ cnat_nfv9_logging_info_t *nfv9_logging_info)
+{
+ u16 index = 0;
+
+ for (index = 0; index < MAX_VRFID; index++) {
+ if(vrfid_name_map[index].ref_count == 0) {
+ continue;
+ }
+ if (PREDICT_FALSE(
+ nfv9_logging_info->current_logging_context == NULL)) {
+ cnat_nfv9_create_logging_context(nfv9_logging_info,
+ cnat_nfv9_template_add_default);
+ }
+ cnat_nfv9_insert_ingress_vrfid_name_record(
+ nfv9_logging_info,index);
+ if (PREDICT_FALSE(nfv9_logging_info->pkt_length >
+ nfv9_logging_info->max_length_minus_max_record_size) ||
+ PREDICT_FALSE(index == MAX_VRFID - 1)) {
+ if (PREDICT_TRUE(nfv9_logging_info->current_logging_context
+ != NULL)) {
+ cnat_nfv9_send_pkt(nfv9_logging_info);
+ }
+ }
+ }/*for()*/
+ return;
+}
+
+static void cnat_nfv9_insert_ingress_vrfid_name_record(
+ cnat_nfv9_logging_info_t *nfv9_logging_info, u16 index)
+{
+ nfv9_ingress_vrfid_name_record_t nfv9_ingress_vrfid_name_record = {0};
+
+ if (PREDICT_FALSE(
+ nfv9_logging_info->record[INGRESS_VRF_ID_NAME_RECORD] == NULL)) {
+ cnat_nfv9_record_create(nfv9_logging_info, INGRESS_VRF_ID_NAME_RECORD);
+ }
+ nfv9_ingress_vrfid_name_record.ingress_vrf_id =
+ clib_host_to_net_u32(vrfid_name_map[index].vrf_id);
+
+ clib_memcpy(nfv9_ingress_vrfid_name_record.ingress_vrf_name,
+ vrfid_name_map[index].vrf_name, NFV9_VRF_NAME_LEN);
+
+ clib_memcpy(nfv9_logging_info->record[INGRESS_VRF_ID_NAME_RECORD],
+ &nfv9_ingress_vrfid_name_record,
+ CNAT_NFV9_INGRESS_VRFID_NAME_RECORD_LENGTH);
+
+ nfv9_logging_info->record_length[INGRESS_VRF_ID_NAME_RECORD]
+ += CNAT_NFV9_INGRESS_VRFID_NAME_RECORD_LENGTH;
+
+ nfv9_logging_info->pkt_length +=
+ CNAT_NFV9_INGRESS_VRFID_NAME_RECORD_LENGTH;
+
+ nfv9_logging_info->total_record_count += 1;
+
+ nfv9_logging_info->record[INGRESS_VRF_ID_NAME_RECORD]
+ += CNAT_NFV9_INGRESS_VRFID_NAME_RECORD_LENGTH;
+
+ nfv9_logging_info->next_data_ptr =
+ nfv9_logging_info->record[INGRESS_VRF_ID_NAME_RECORD];
+
+ nfv9_logging_info->dataflow_header->dataflow_length =
+ clib_host_to_net_u32(
+ nfv9_logging_info->record_length[INGRESS_VRF_ID_NAME_RECORD]);
+ return;
+}
+/*
+ * edt: * * cnat_log_timer_handler
+ *
+ * Timer handler for sending any pending NFV9 record
+ *
+ * Argument: spp_timer_t * timer_p
+ * Timer handler structure
+ */
+void handle_pending_nfv9_pkts()
+{
+ vlib_node_t *output_node;
+ vlib_main_t * vm = vlib_get_main();
+ cnat_nfv9_logging_info_t *my_nfv9_logging_info = 0;
+ u32 current_timestamp = cnat_nfv9_get_sys_up_time_in_ms();
+ u32 current_unix_time_in_seconds = cnat_nfv9_get_unix_time_in_seconds();
+
+ output_node = vlib_get_node_by_name (vm, (u8 *) "ip4-lookup");
+
+ pool_foreach (my_nfv9_logging_info, cnat_nfv9_logging_info_pool, ({
+ nfv9_server_info_t *server = nfv9_server_info_pool +
+ my_nfv9_logging_info->server_index;
+ if (my_nfv9_logging_info->queued_logging_context ||
+ (my_nfv9_logging_info->current_logging_context &&
+ (current_timestamp -
+ my_nfv9_logging_info->current_logging_context_timestamp)
+ > 1000)) {
+ /*
+ * If there is a current logging context and timestamp
+ * indicates it is pending for long, send it out
+ * Also if there is a queued context send it out as well
+ */
+ vlib_cli_output(vm, "\nNFV9_TIMER: queued %p, curr %p",
+ my_nfv9_logging_info->queued_logging_context,
+ my_nfv9_logging_info->current_logging_context);
+
+
+ cnat_nfv9_send_pkt_always_success(my_nfv9_logging_info,
+ output_node);
+ } else {
+ /*
+ * If the last_template_sent_time is too far back in time
+ * send the template even if there is no NFv9 records to send
+ */
+ if ((my_nfv9_logging_info->queued_logging_context == NULL) &&
+ (my_nfv9_logging_info->current_logging_context == NULL) &&
+ ((current_unix_time_in_seconds -
+ server->last_template_sent_time) >
+ server->timeout_rate)) {
+ cnat_nfv9_create_logging_context(my_nfv9_logging_info,
+ cnat_nfv9_template_add_always);
+ if (PREDICT_TRUE(my_nfv9_logging_info->current_logging_context
+ != NULL)) {
+ cnat_nfv9_send_pkt(my_nfv9_logging_info);
+ }
+ }
+ }
+ }));
+}
+
+/*
+ * Code to initialize NFV9 Template. This is done when a NFV9 is enabled
+ * It is done only once and later used when sending NFV9 template records.
+ */
+static void
+cnat_nfv9_template_init (void)
+{
+ cnat_nfv9_template_info.flowset_id =
+ clib_host_to_net_u16(CNAT_NFV9_TEMPLATE_FLOWSET_ID);
+ cnat_nfv9_template_info.length =
+ clib_host_to_net_u16(CNAT_NFV9_TEMPLATE_LENGTH -
+ CNAT_NFV9_OPTION_TEMPLATE_LENGTH);
+ /*
+ * Create the add Template
+ */
+ cnat_nfv9_template_info.add_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_ADD_TEMPLATE_ID);
+ cnat_nfv9_template_info.add_field_count =
+ clib_host_to_net_u16(CNAT_NFV9_ADD_FIELD_COUNT);
+
+ cnat_nfv9_template_info.add_inside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.add_inside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_outside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.add_outside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_inside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.add_inside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_outside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.add_outside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_inside_ip_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.add_inside_ip_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_outside_ip_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.add_outside_ip_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_protocol_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_TYPE);
+ cnat_nfv9_template_info.add_protocol_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_SIZE);
+
+ /*
+ * Create the delete Template
+ */
+ cnat_nfv9_template_info.del_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_DEL_TEMPLATE_ID);
+ cnat_nfv9_template_info.del_field_count =
+ clib_host_to_net_u16(CNAT_NFV9_DEL_FIELD_COUNT);
+
+ cnat_nfv9_template_info.del_inside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.del_inside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.del_inside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.del_inside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.del_inside_ip_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.del_inside_ip_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.del_protocol_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_TYPE);
+ cnat_nfv9_template_info.del_protocol_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_SIZE);
+
+
+ /* Create NAT64 BIB Add template */
+#if 0
+ cnat_nfv9_template_info.nat64_add_bib_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_NAT64_ADD_BIB_TEMPLATE_ID);
+ cnat_nfv9_template_info.nat64_add_bib_field_count =
+ clib_host_to_net_u16(CNAT_NFV9_NAT64_ADD_BIB_FIELD_COUNT);
+
+
+ cnat_nfv9_template_info.nat64_add_bib_inside_ipv6_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_add_bib_inside_ipv6_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat64_add_bib_outside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_add_bib_outside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat64_add_bib_inside_ip_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_add_bib_inside_ip_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat64_add_bib_outside_ip_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_add_bib_outside_ip_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat64_add_bib_protocol_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_add_bib_protocol_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_SIZE);
+
+
+ /* NAT64 BIB Delete */
+ cnat_nfv9_template_info.nat64_del_bib_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_NAT64_DEL_BIB_TEMPLATE_ID);
+ cnat_nfv9_template_info.nat64_del_bib_field_count =
+ clib_host_to_net_u16(CNAT_NFV9_NAT64_DEL_BIB_FIELD_COUNT);
+
+ cnat_nfv9_template_info.nat64_del_bib_inside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_del_bib_inside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat64_del_bib_inside_ip_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_del_bib_inside_ip_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat64_del_bib_protocol_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_del_bib_protocol_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_SIZE);
+
+
+ /* NAt64 SESSION ADD */
+
+ cnat_nfv9_template_info.nat64_add_session_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_NAT64_ADD_SESSION_TEMPLATE_ID);
+ cnat_nfv9_template_info.nat64_add_session_field_count =
+ clib_host_to_net_u16(CNAT_NFV9_NAT64_ADD_SESSION_FIELD_COUNT);
+
+
+ cnat_nfv9_template_info.nat64_add_session_inside_ipv6_src_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_add_session_inside_ipv6_src_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat64_add_session_outside_ip_src_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_add_session_outside_ip_src_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_SIZE);
+
+
+ cnat_nfv9_template_info.nat64_add_session_inside_ipv6_dst_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_DST_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_add_session_inside_ipv6_dst_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_DST_ADDR_FIELD_SIZE);
+
+
+ cnat_nfv9_template_info.nat64_add_session_outside_ip_dst_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_DST_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_add_session_outside_ip_dst_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_DST_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat64_add_session_inside_ip_src_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_add_session_inside_ip_src_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_SIZE);
+
+
+ cnat_nfv9_template_info.nat64_add_session_outside_ip_src_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_add_session_outside_ip_src_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_FIELD_SIZE);
+
+
+ cnat_nfv9_template_info.nat64_add_session_ip_dest_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_DST_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_add_session_ip_dest_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_DST_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat64_add_session_protocol_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_add_session_protocol_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_SIZE);
+
+
+
+ /* Session Delete */
+ cnat_nfv9_template_info.nat64_del_session_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_NAT64_DEL_SESSION_TEMPLATE_ID);
+ cnat_nfv9_template_info.nat64_del_session_field_count =
+ clib_host_to_net_u16(CNAT_NFV9_NAT64_DEL_SESSION_FIELD_COUNT);
+
+ cnat_nfv9_template_info.nat64_del_session_inside_ip_src_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_del_session_inside_ip_src_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat64_del_session_inside_ip_dst_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_DST_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_del_session_inside_ip_dst_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_DST_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat64_del_session_inside_ip_src_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_del_session_inside_ip_src_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat64_del_session_inside_ip_dst_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_DST_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_del_session_inside_ip_dst_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_DST_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat64_del_session_protocol_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_TYPE);
+ cnat_nfv9_template_info.nat64_del_session_protocol_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_SIZE);
+#endif
+ /*
+ * Create the nat44 session add Template
+ */
+ cnat_nfv9_template_info.nat44_session_add_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_NAT44_ADD_SESSION_TEMPLATE_ID);
+ cnat_nfv9_template_info.nat44_session_add_field_count =
+ clib_host_to_net_u16(CNAT_NFV9_NAT44_ADD_SESSION_FIELD_COUNT);
+
+ cnat_nfv9_template_info.nat44_session_add_inside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.nat44_session_add_inside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat44_session_add_outside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.nat44_session_add_outside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat44_session_add_inside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.nat44_session_add_inside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat44_session_add_outside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.nat44_session_add_outside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat44_session_add_inside_ip_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.nat44_session_add_inside_ip_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat44_session_add_outside_ip_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.nat44_session_add_outside_ip_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat44_session_add_dest_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_DESTINATION_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.nat44_session_add_dest_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_DESTINATION_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat44_session_add_dest_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_DST_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.nat44_session_add_dest_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_DST_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat44_session_add_protocol_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_TYPE);
+ cnat_nfv9_template_info.nat44_session_add_protocol_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_SIZE);
+
+ /*
+ * Create the nat44 session del Template
+ */
+ cnat_nfv9_template_info.nat44_session_del_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_NAT44_DEL_SESSION_TEMPLATE_ID);
+ cnat_nfv9_template_info.nat44_session_del_field_count =
+ clib_host_to_net_u16(CNAT_NFV9_NAT44_DEL_SESSION_FIELD_COUNT);
+
+ cnat_nfv9_template_info.nat44_session_del_inside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.nat44_session_del_inside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat44_session_del_inside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.nat44_session_del_inside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat44_session_del_dest_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_DESTINATION_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.nat44_session_del_dest_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_DESTINATION_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat44_session_del_inside_ip_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.nat44_session_del_inside_ip_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat44_session_del_dest_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_DST_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.nat44_session_del_dest_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_DST_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.nat44_session_del_protocol_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_TYPE);
+ cnat_nfv9_template_info.nat44_session_del_protocol_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_SIZE);
+ /*
+ * Ds-lite add template
+ */
+#if 0
+ cnat_nfv9_template_info.add_dslite_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_DS_LITE_ADD_TEMPLATE_ID);
+ cnat_nfv9_template_info.add_dslite_field_count =
+ clib_host_to_net_u16(CNAT_NFV9_DS_LITE_ADD_FIELD_COUNT);
+
+ cnat_nfv9_template_info.add_dslite_inside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_inside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_dslite_outside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_outside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_dslite_inside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_inside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_dslite_inside_ipv6_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_inside_ipv6_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_dslite_outside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_outside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_dslite_inside_ip_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_inside_ip_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_dslite_outside_ip_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_outside_ip_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_dslite_protocol_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_protocol_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_SIZE);
+
+ /*
+ * Ds-lite delete template
+ */
+ cnat_nfv9_template_info.del_dslite_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_DS_LITE_DEL_TEMPLATE_ID);
+ cnat_nfv9_template_info.del_dslite_field_count =
+ clib_host_to_net_u16(CNAT_NFV9_DS_LITE_DEL_FIELD_COUNT);
+
+ cnat_nfv9_template_info.del_dslite_inside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.del_dslite_inside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.del_dslite_inside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.del_dslite_inside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.del_dslite_inside_ipv6_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.del_dslite_inside_ipv6_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.del_dslite_inside_ip_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.del_dslite_inside_ip_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.del_dslite_protocol_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_TYPE);
+ cnat_nfv9_template_info.del_dslite_protocol_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_SIZE);
+
+ /*
+ * Ds-lite session add template
+ */
+
+ cnat_nfv9_template_info.add_dslite_session_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_DS_LITE_ADD_SESSION_TEMPLATE_ID);
+ cnat_nfv9_template_info.add_dslite_session_field_count =
+ clib_host_to_net_u16(CNAT_NFV9_DS_LITE_ADD_SESSION_FIELD_COUNT);
+
+ cnat_nfv9_template_info.add_dslite_session_inside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_session_inside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_dslite_session_outside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_session_outside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_dslite_session_inside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_session_inside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_dslite_session_inside_ipv6_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_session_inside_ipv6_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_dslite_session_outside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_session_outside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_dslite_session_inside_ip_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_session_inside_ip_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_dslite_session_outside_ip_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_session_outside_ip_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_dslite_session_dest_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_DESTINATION_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_session_dest_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_DESTINATION_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_dslite_session_dest_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_DST_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_session_dest_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_DST_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.add_dslite_session_protocol_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_TYPE);
+ cnat_nfv9_template_info.add_dslite_session_protocol_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_SIZE);
+
+ /*
+ * Ds-lite session delete template
+ */
+ cnat_nfv9_template_info.del_dslite_session_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_DS_LITE_DEL_SESSION_TEMPLATE_ID);
+ cnat_nfv9_template_info.del_dslite_session_field_count =
+ clib_host_to_net_u16(CNAT_NFV9_DS_LITE_DEL_SESSION_FIELD_COUNT);
+
+ cnat_nfv9_template_info.del_dslite_session_inside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.del_dslite_session_inside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.del_dslite_session_inside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.del_dslite_session_inside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.del_dslite_session_inside_ipv6_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.del_dslite_session_inside_ipv6_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.del_dslite_session_inside_ip_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.del_dslite_session_inside_ip_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.del_dslite_session_dest_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_DESTINATION_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.del_dslite_session_dest_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_DESTINATION_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.del_dslite_session_dest_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_DST_PORT_FIELD_TYPE);
+ cnat_nfv9_template_info.del_dslite_session_dest_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_DST_PORT_FIELD_SIZE);
+
+ cnat_nfv9_template_info.del_dslite_session_protocol_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_TYPE);
+ cnat_nfv9_template_info.del_dslite_session_protocol_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_PROTOCOL_FIELD_SIZE);
+
+ /* Create add bulk template */
+ cnat_nfv9_template_info.bulk_add_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_NAT44_BULK_ADD_TEMPLATE_ID);
+ cnat_nfv9_template_info.bulk_add_field_count =
+ clib_host_to_net_u16(CNAT_NFV9_NAT44_BULK_ADD_FIELD_COUNT);
+
+ cnat_nfv9_template_info.bulk_add_inside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_add_inside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.bulk_add_outside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_add_outside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.bulk_add_inside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_add_inside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.bulk_add_outside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_add_outside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.bulk_add_outside_start_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_START_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_add_outside_start_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_START_FIELD_SIZE);
+
+ cnat_nfv9_template_info.bulk_add_outside_end_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_END_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_add_outside_end_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_END_FIELD_SIZE);
+
+ /*
+ * Create the bulk delete Template
+ */
+ cnat_nfv9_template_info.bulk_del_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_NAT44_BULK_DEL_TEMPLATE_ID);
+ cnat_nfv9_template_info.bulk_del_field_count =
+ clib_host_to_net_u16(CNAT_NFV9_NAT44_BULK_DEL_FIELD_COUNT);
+
+ cnat_nfv9_template_info.bulk_del_inside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_del_inside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.bulk_del_inside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_del_inside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.bulk_del_outside_start_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_START_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_del_outside_start_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_START_FIELD_SIZE);
+
+ /*
+ * Ds-lite bulk add template
+ */
+ cnat_nfv9_template_info.bulk_dslite_add_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_DS_LITE_BULK_ADD_TEMPLATE_ID);
+ cnat_nfv9_template_info.bulk_dslite_add_field_count =
+ clib_host_to_net_u16(CNAT_NFV9_DS_LITE_BULK_ADD_FIELD_COUNT);
+
+ cnat_nfv9_template_info.bulk_dslite_add_inside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_dslite_add_inside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.bulk_dslite_add_outside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_dslite_add_outside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.bulk_dslite_add_inside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_dslite_add_inside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.bulk_dslite_add_inside_ipv6_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_dslite_add_inside_ipv6_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.bulk_dslite_add_outside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_dslite_add_outside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.bulk_dslite_add_outside_start_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_START_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_dslite_add_outside_start_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_START_FIELD_SIZE);
+
+ cnat_nfv9_template_info.bulk_dslite_add_outside_end_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_END_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_dslite_add_outside_end_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_END_FIELD_SIZE);
+
+ /*
+ * Ds-lite bulk delete template
+ */
+
+ cnat_nfv9_template_info.bulk_dslite_del_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_DS_LITE_BULK_DEL_TEMPLATE_ID);
+ cnat_nfv9_template_info.bulk_dslite_del_field_count =
+ clib_host_to_net_u16(CNAT_NFV9_DS_LITE_BULK_DEL_FIELD_COUNT);
+
+ cnat_nfv9_template_info.bulk_dslite_del_inside_vrf_id_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_dslite_del_inside_vrf_id_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_SIZE);
+
+ cnat_nfv9_template_info.bulk_dslite_del_inside_ip_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_dslite_del_inside_ip_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IP_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.bulk_dslite_del_inside_ipv6_addr_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_dslite_del_inside_ipv6_addr_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_SIZE);
+
+ cnat_nfv9_template_info.bulk_dslite_del_outside_start_port_field_type =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_START_FIELD_TYPE);
+ cnat_nfv9_template_info.bulk_dslite_del_outside_start_port_field_size =
+ clib_host_to_net_u16(CNAT_NFV9_OUTSIDE_IP_PORT_START_FIELD_SIZE);
+
+#endif /* NO_BULK_LOGGING */
+
+ /*
+ * Ingress vrfid - name mapping
+ */
+ CNAT_NFV9_OPTION_TEMPLATE.flowset_id =
+ clib_host_to_net_u16(CNAT_NFV9_OPTION_TEMPLATE_FLOWSET_ID);
+ CNAT_NFV9_OPTION_TEMPLATE.length =
+ clib_host_to_net_u16(CNAT_NFV9_OPTION_TEMPLATE_LENGTH);
+
+ CNAT_NFV9_OPTION_TEMPLATE.ingress_vrfid_name_map_template_id =
+ clib_host_to_net_u16(CNAT_NFV9_INGRESS_VRF_ID_NAME_TEMPLATE_ID);
+ /* currently no scope field supported */
+ CNAT_NFV9_OPTION_TEMPLATE.ingress_vrfid_name_map_scope_len = 0;
+ CNAT_NFV9_OPTION_TEMPLATE.ingress_vrfid_name_map_option_len =
+ clib_host_to_net_u16(CNAT_NFV9_INGRESS_VRF_ID_NAME_OPTION_LEN);
+ CNAT_NFV9_OPTION_TEMPLATE.ingress_vrfid_name_map_vrfid_option_type =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_TYPE);
+ CNAT_NFV9_OPTION_TEMPLATE.ingress_vrfid_name_map_vrfid_option_len =
+ clib_host_to_net_u16(CNAT_NFV9_INSIDE_VRFID_FIELD_SIZE);
+ CNAT_NFV9_OPTION_TEMPLATE.ingress_vrfid_name_map_vrfname_option_type =
+ clib_host_to_net_u16(CNAT_NFV9_INGRESS_VRF_NAME_FIELD_TYPE);
+ CNAT_NFV9_OPTION_TEMPLATE.ingress_vrfid_name_map_vrfname_option_len =
+ clib_host_to_net_u16(CNAT_NFV9_INGRESS_VRF_NAME_FIELD_SIZE);
+
+ /*
+ * Set the padding (which was added to make the size of template
+ * multiple of 4) to zero
+ */
+ CNAT_NFV9_OPTION_TEMPLATE.padding1 = 0;
+}
+
+/*
+ * one time function
+ * has to be called at the init time
+ */
+void cnat_nfv9_logging_init()
+{
+ if (!cnat_nfv9_global_info.cnat_nfv9_init_done) {
+ cnat_nfv9_template_init();
+
+ /* Pre allocate for NFV9_SERVER_POOL_SIZE. Will be good
+ * enough for most deployments
+ */
+ pool_alloc(nfv9_server_info_pool, NFV9_SERVER_POOL_SIZE);
+ int i;
+ nfv9_server_info_t *server __attribute__((unused));
+ for(i = 0; i < NFV9_SERVER_POOL_SIZE; i++) {
+ pool_get(nfv9_server_info_pool, server);
+ }
+
+ for(i = 0; i < NFV9_SERVER_POOL_SIZE; i++) {
+ pool_put(nfv9_server_info_pool, nfv9_server_info_pool + i);
+ }
+
+ memset(&cnat_nfv9_global_info, 0 , sizeof(cnat_nfv9_global_info_t));
+ ASSERT(cnat_nfv9_global_info.cnat_nfv9_disp_node_index != (u16)~0);
+
+ cnat_nfv9_global_info.cnat_nfv9_global_collector_index = EMPTY;
+ cnat_nfv9_global_info.cnat_nfv9_init_done = 1;
+
+ /*
+ * src id is set to infra IPv4 address + octeon core number
+ */
+ nfv9_src_id = my_instance_number;
+ }
+}
diff --git a/plugins/vcgn-plugin/vcgn/cnat_logging.h b/plugins/vcgn-plugin/vcgn/cnat_logging.h
new file mode 100644
index 00000000000..7bd43ecf21e
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_logging.h
@@ -0,0 +1,1091 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_logging.h
+ *
+ * Copyright (c) 2009, 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_LOGGING_H__
+#define __CNAT_LOGGING_H__
+
+#include <stdio.h>
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/clib.h>
+
+#include "nat64_db.h"
+#include "cnat_log_common.h"
+#include "dslite_defs.h"
+
+#define NFV9_DEF_PATH_MTU 1500
+#define NFV9_VRF_NAME_LEN 12
+
+/* one time call at the beginning */
+void cnat_nfv9_logging_init();
+
+/*
+ * unconditional call
+ * will check logging config inside
+ */
+void cnat_nfv9_log_mapping_create(cnat_main_db_entry_t * db,
+ cnat_vrfmap_t *vrfmap
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ );
+
+void cnat_nfv9_nat44_log_session_create(cnat_main_db_entry_t * db,
+ cnat_session_entry_t * sdb,
+ cnat_vrfmap_t *vrfmap);
+
+void cnat_nfv9_nat44_log_session_delete(cnat_main_db_entry_t * db,
+ cnat_session_entry_t * sdb,
+ cnat_vrfmap_t *vrfmap);
+
+
+/*
+ * unconditional call
+ * will check logging config inside
+ */
+void cnat_nfv9_log_mapping_delete(cnat_main_db_entry_t * db,
+ cnat_vrfmap_t *vrfmap
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ );
+
+/* nat44 syslog APIs */
+void cnat_syslog_nat44_mapping_create(cnat_main_db_entry_t *db,
+ cnat_vrfmap_t *vrfmap, cnat_session_entry_t * sdb
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ );
+
+void cnat_syslog_nat44_mapping_delete(cnat_main_db_entry_t *db,
+ cnat_vrfmap_t *vrfmap, cnat_session_entry_t *sdb
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ );
+
+/*
+ * dslite
+ */
+void cnat_nfv9_ds_lite_mapping_create(cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ );
+
+void cnat_nfv9_ds_lite_mapping_delete(cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ );
+void cnat_nfv9_ds_lite_log_session_create(cnat_main_db_entry_t * db,
+ dslite_table_entry_t *dslite_entry,
+ cnat_session_entry_t * sdb);
+
+void cnat_nfv9_ds_lite_log_session_delete(cnat_main_db_entry_t * db,
+ dslite_table_entry_t *dslite_entry,
+ cnat_session_entry_t * sdb);
+
+/*
+ * nat64
+ */
+
+void cnat_nfv9_bib_mapping_create (nat64_bib_entry_t *db,
+ nat64_table_entry_t *nat64_entry);
+
+void cnat_nfv9_session_mapping_create (nat64_bib_entry_t *bdb,
+ nat64_session_entry_t *sdb,
+ nat64_table_entry_t *nat64_entry_ptr);
+
+void cnat_nfv9_bib_mapping_delete (nat64_bib_entry_t *db,
+ nat64_table_entry_t *nat64_entry);
+
+void cnat_nfv9_session_mapping_delete (nat64_bib_entry_t *bdb,
+ nat64_session_entry_t *sdb,
+ nat64_table_entry_t *nat64_entry_ptr);
+
+typedef enum {
+ RECORD_INVALID = 0,
+ NAT44_ADD_RECORD,
+ NAT44_DEL_RECORD,
+ NAT64_ADD_BIB_RECORD,
+ NAT64_DEL_BIB_RECORD,
+ NAT64_ADD_SESSION_RECORD,
+ NAT64_DEL_SESSION_RECORD,
+ DS_LITE_ADD_RECORD,
+ DS_LITE_DEL_RECORD,
+ NAT44_BULK_ADD_RECORD,
+ NAT44_BULK_DEL_RECORD,
+ DS_LITE_BULK_ADD_RECORD,
+ DS_LITE_BULK_DEL_RECORD,
+ INGRESS_VRF_ID_NAME_RECORD,
+ NAT44_ADD_SESSION_RECORD,
+ NAT44_DEL_SESSION_RECORD,
+ DS_LITE_ADD_SESSION_RECORD,
+ DS_LITE_DEL_SESSION_RECORD,
+ MAX_RECORDS
+} netflow_record;
+
+typedef enum {
+ TEMPLATE_SENT_FALSE = 0,
+ TEMPLATE_SENT_TRUE = 1
+} netflow_template_sent;
+
+#define cnat_nfv9_get_sys_up_time_in_ms cnat_get_sys_up_time_in_ms
+
+#define cnat_nfv9_get_unix_time_in_seconds cnat_get_unix_time_in_seconds
+
+#define cnat_nfv9_dump_time_change_logs cnat_dump_time_change_logs
+
+
+/*
+ * Netflow V9 Specific Defines and structures
+ */
+
+#define CNAT_NFV9_VERSION_NUMBER 9
+
+#define CNAT_NFV9_TEMPLATE_FLOWSET_ID 0
+#define CNAT_NFV9_OPTION_TEMPLATE_FLOWSET_ID 1
+
+#define CNAT_NFV9_ADD_FIELD_COUNT 7
+#define CNAT_NFV9_DEL_FIELD_COUNT 4
+#define CNAT_NFV9_DS_LITE_ADD_FIELD_COUNT 8
+#define CNAT_NFV9_DS_LITE_DEL_FIELD_COUNT 5
+#define CNAT_NFV9_NAT64_ADD_BIB_FIELD_COUNT 5
+#define CNAT_NFV9_NAT64_DEL_BIB_FIELD_COUNT 3
+#define CNAT_NFV9_NAT64_ADD_SESSION_FIELD_COUNT 8
+#define CNAT_NFV9_NAT64_DEL_SESSION_FIELD_COUNT 5
+#define CNAT_NFV9_NAT44_ADD_SESSION_FIELD_COUNT 9
+#define CNAT_NFV9_NAT44_DEL_SESSION_FIELD_COUNT 6
+#define CNAT_NFV9_DS_LITE_ADD_SESSION_FIELD_COUNT 10
+#define CNAT_NFV9_DS_LITE_DEL_SESSION_FIELD_COUNT 7
+
+#define CNAT_NFV9_ADD_TEMPLATE_ID 256
+#define CNAT_NFV9_DEL_TEMPLATE_ID 257
+#define CNAT_NFV9_NAT64_ADD_BIB_TEMPLATE_ID 258
+#define CNAT_NFV9_NAT64_DEL_BIB_TEMPLATE_ID 259
+#define CNAT_NFV9_NAT64_ADD_SESSION_TEMPLATE_ID 260
+#define CNAT_NFV9_NAT64_DEL_SESSION_TEMPLATE_ID 261
+#define CNAT_NFV9_INGRESS_VRF_ID_NAME_TEMPLATE_ID 262
+#define CNAT_NFV9_DS_LITE_ADD_TEMPLATE_ID 267
+#define CNAT_NFV9_DS_LITE_DEL_TEMPLATE_ID 268
+#define CNAT_NFV9_NAT44_ADD_SESSION_TEMPLATE_ID 271
+#define CNAT_NFV9_NAT44_DEL_SESSION_TEMPLATE_ID 272
+#define CNAT_NFV9_DS_LITE_ADD_SESSION_TEMPLATE_ID 273
+#define CNAT_NFV9_DS_LITE_DEL_SESSION_TEMPLATE_ID 274
+
+#ifndef NO_BULK_LOGGING
+#define CNAT_NFV9_NAT44_BULK_ADD_TEMPLATE_ID 265
+#define CNAT_NFV9_NAT44_BULK_DEL_TEMPLATE_ID 266
+#define CNAT_NFV9_DS_LITE_BULK_ADD_TEMPLATE_ID 269
+#define CNAT_NFV9_DS_LITE_BULK_DEL_TEMPLATE_ID 270
+
+#define CNAT_NFV9_NAT44_BULK_ADD_FIELD_COUNT 6
+#define CNAT_NFV9_NAT44_BULK_DEL_FIELD_COUNT 3
+#define CNAT_NFV9_DS_LITE_BULK_ADD_FIELD_COUNT 7
+#define CNAT_NFV9_DS_LITE_BULK_DEL_FIELD_COUNT 4
+
+#define CNAT_NFV9_OUTSIDE_IP_PORT_START_FIELD_TYPE 361
+#define CNAT_NFV9_OUTSIDE_IP_PORT_START_FIELD_SIZE 2
+
+#define CNAT_NFV9_OUTSIDE_IP_PORT_END_FIELD_TYPE 362
+#define CNAT_NFV9_OUTSIDE_IP_PORT_END_FIELD_SIZE 2
+
+#endif /* #ifndef NO_BULK_LOGGING */
+
+#define CNAT_NFV9_INGRESS_VRF_NAME_FIELD_TYPE 236
+#define CNAT_NFV9_INGRESS_VRF_NAME_FIELD_SIZE 12
+/* 4 byte for vrf_id + 4 byte for vrf_name (option fields) */
+#define CNAT_NFV9_INGRESS_VRF_ID_NAME_OPTION_LEN 8
+extern u16 cnat_template_id[MAX_RECORDS];
+
+#define CNAT_NFV9_INSIDE_VRFID_FIELD_TYPE 234
+#define CNAT_NFV9_INSIDE_VRFID_FIELD_SIZE 4
+
+#define CNAT_NFV9_OUTSIDE_VRFID_FIELD_TYPE 235
+#define CNAT_NFV9_OUTSIDE_VRFID_FIELD_SIZE 4
+
+#define CNAT_NFV9_INSIDE_IP_ADDR_FIELD_TYPE 8
+#define CNAT_NFV9_INSIDE_IP_ADDR_FIELD_SIZE 4
+
+#define CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_TYPE 225
+#define CNAT_NFV9_OUTSIDE_IP_ADDR_FIELD_SIZE 4
+
+#define CNAT_NFV9_INSIDE_IP_PORT_FIELD_TYPE 7
+#define CNAT_NFV9_INSIDE_IP_PORT_FIELD_SIZE 2
+
+#define CNAT_NFV9_OUTSIDE_IP_PORT_FIELD_TYPE 227
+#define CNAT_NFV9_OUTSIDE_IP_PORT_FIELD_SIZE 2
+
+#define CNAT_NFV9_PROTOCOL_FIELD_TYPE 4
+#define CNAT_NFV9_PROTOCOL_FIELD_SIZE 1
+
+/* IPv6 related info */
+
+#define CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_TYPE 27
+#define CNAT_NFV9_INSIDE_IPV6_SRC_ADDR_FIELD_SIZE 16
+
+#define CNAT_NFV9_INSIDE_IPV6_DST_ADDR_FIELD_TYPE 28
+#define CNAT_NFV9_INSIDE_IPV6_DST_ADDR_FIELD_SIZE 16
+
+#define CNAT_NFV9_OUTSIDE_IP_DST_ADDR_FIELD_TYPE 226
+#define CNAT_NFV9_OUTSIDE_IP_DST_ADDR_FIELD_SIZE 4
+
+#define CNAT_NFV9_INSIDE_DST_PORT_FIELD_TYPE 11
+#define CNAT_NFV9_INSIDE_DST_PORT_FIELD_SIZE 2
+
+#define CNAT_NFV9_DESTINATION_IP_ADDR_FIELD_TYPE 12
+#define CNAT_NFV9_DESTINATION_IP_ADDR_FIELD_SIZE 4
+
+
+typedef struct {
+ u16 version;
+ u16 count;
+ u32 sys_up_time; /* time in ms since system was booted */
+ u32 timestamp; /* UNIX time in seconds since 1970 */
+ u32 sequence_num;
+ u32 source_id;
+} nfv9_header_t;
+
+/*
+ * Hardcoded - need to be fixed
+ */
+#define CNAT_NFV9_SOURCE_ID_VALUE 0x1234
+
+typedef struct {
+ u16 flowset_id;
+ u16 length;
+
+ u16 ingress_vrfid_name_map_template_id;
+ u16 ingress_vrfid_name_map_scope_len;
+ u16 ingress_vrfid_name_map_option_len;
+ u16 ingress_vrfid_name_map_vrfid_option_type;
+ u16 ingress_vrfid_name_map_vrfid_option_len;
+ u16 ingress_vrfid_name_map_vrfname_option_type;
+ u16 ingress_vrfid_name_map_vrfname_option_len;
+ /*
+ * Adding the padding so as to make the tempalate
+ * structure end on a 4 byte boundary
+ */
+ u16 padding1;
+
+} cnat_nfv9_option_template_t;
+
+/*
+ * The following structure defines the Netflow Template that
+ * will be exported to the Netflow Collector
+ */
+
+typedef struct {
+ u16 flowset_id;
+ u16 length;
+
+ u16 add_template_id;
+ u16 add_field_count;
+ u16 add_inside_vrf_id_field_type;
+ u16 add_inside_vrf_id_field_size;
+ u16 add_outside_vrf_id_field_type;
+ u16 add_outside_vrf_id_field_size;
+ u16 add_inside_ip_addr_field_type;
+ u16 add_inside_ip_addr_field_size;
+ u16 add_outside_ip_addr_field_type;
+ u16 add_outside_ip_addr_field_size;
+ u16 add_inside_ip_port_field_type;
+ u16 add_inside_ip_port_field_size;
+ u16 add_outside_ip_port_field_type;
+ u16 add_outside_ip_port_field_size;
+ u16 add_protocol_field_type;
+ u16 add_protocol_field_size;
+
+ u16 del_template_id;
+ u16 del_field_count;
+ u16 del_inside_vrf_id_field_type;
+ u16 del_inside_vrf_id_field_size;
+ u16 del_inside_ip_addr_field_type;
+ u16 del_inside_ip_addr_field_size;
+ u16 del_inside_ip_port_field_type;
+ u16 del_inside_ip_port_field_size;
+ u16 del_protocol_field_type;
+ u16 del_protocol_field_size;
+#if 0
+ /* NAT64 related info */
+ u16 nat64_add_bib_template_id;
+ u16 nat64_add_bib_field_count;
+ u16 nat64_add_bib_inside_ipv6_addr_field_type;
+ u16 nat64_add_bib_inside_ipv6_addr_field_size;
+ u16 nat64_add_bib_outside_ip_addr_field_type;
+ u16 nat64_add_bib_outside_ip_addr_field_size;
+ u16 nat64_add_bib_inside_ip_port_field_type;
+ u16 nat64_add_bib_inside_ip_port_field_size;
+ u16 nat64_add_bib_outside_ip_port_field_type;
+ u16 nat64_add_bib_outside_ip_port_field_size;
+ u16 nat64_add_bib_protocol_field_type;
+ u16 nat64_add_bib_protocol_field_size;
+
+ u16 nat64_del_bib_template_id;
+ u16 nat64_del_bib_field_count;
+ u16 nat64_del_bib_inside_ip_addr_field_type;
+ u16 nat64_del_bib_inside_ip_addr_field_size;
+ u16 nat64_del_bib_inside_ip_port_field_type;
+ u16 nat64_del_bib_inside_ip_port_field_size;
+ u16 nat64_del_bib_protocol_field_type;
+ u16 nat64_del_bib_protocol_field_size;
+
+
+ u16 nat64_add_session_template_id;
+ u16 nat64_add_session_field_count;
+ u16 nat64_add_session_inside_ipv6_src_addr_field_type;
+ u16 nat64_add_session_inside_ipv6_src_addr_field_size;
+ u16 nat64_add_session_outside_ip_src_addr_field_type;
+ u16 nat64_add_session_outside_ip_src_addr_field_size;
+ u16 nat64_add_session_inside_ipv6_dst_addr_field_type;
+ u16 nat64_add_session_inside_ipv6_dst_addr_field_size;
+ u16 nat64_add_session_outside_ip_dst_addr_field_type;
+ u16 nat64_add_session_outside_ip_dst_addr_field_size;
+ u16 nat64_add_session_inside_ip_src_port_field_type;
+ u16 nat64_add_session_inside_ip_src_port_field_size;
+ u16 nat64_add_session_outside_ip_src_port_field_type;
+ u16 nat64_add_session_outside_ip_src_port_field_size;
+ u16 nat64_add_session_ip_dest_port_field_type;
+ u16 nat64_add_session_ip_dest_port_field_size;
+ u16 nat64_add_session_protocol_field_type;
+ u16 nat64_add_session_protocol_field_size;
+
+ u16 nat64_del_session_template_id;
+ u16 nat64_del_session_field_count;
+ u16 nat64_del_session_inside_ip_src_addr_field_type;
+ u16 nat64_del_session_inside_ip_src_addr_field_size;
+ u16 nat64_del_session_inside_ip_dst_addr_field_type;
+ u16 nat64_del_session_inside_ip_dst_addr_field_size;
+ u16 nat64_del_session_inside_ip_src_port_field_type;
+ u16 nat64_del_session_inside_ip_src_port_field_size;
+ u16 nat64_del_session_inside_ip_dst_port_field_type;
+ u16 nat64_del_session_inside_ip_dst_port_field_size;
+ u16 nat64_del_session_protocol_field_type;
+ u16 nat64_del_session_protocol_field_size;
+
+ /*
+ * Ds-Lite specific info
+ */
+ u16 add_dslite_template_id;
+ u16 add_dslite_field_count;
+ u16 add_dslite_inside_vrf_id_field_type;
+ u16 add_dslite_inside_vrf_id_field_size;
+ u16 add_dslite_outside_vrf_id_field_type;
+ u16 add_dslite_outside_vrf_id_field_size;
+ u16 add_dslite_inside_ip_addr_field_type;
+ u16 add_dslite_inside_ip_addr_field_size;
+ u16 add_dslite_inside_ipv6_addr_field_type;
+ u16 add_dslite_inside_ipv6_addr_field_size;
+ u16 add_dslite_outside_ip_addr_field_type;
+ u16 add_dslite_outside_ip_addr_field_size;
+ u16 add_dslite_inside_ip_port_field_type;
+ u16 add_dslite_inside_ip_port_field_size;
+ u16 add_dslite_outside_ip_port_field_type;
+ u16 add_dslite_outside_ip_port_field_size;
+ u16 add_dslite_protocol_field_type;
+ u16 add_dslite_protocol_field_size;
+
+ u16 del_dslite_template_id;
+ u16 del_dslite_field_count;
+ u16 del_dslite_inside_vrf_id_field_type;
+ u16 del_dslite_inside_vrf_id_field_size;
+ u16 del_dslite_inside_ip_addr_field_type;
+ u16 del_dslite_inside_ip_addr_field_size;
+ u16 del_dslite_inside_ipv6_addr_field_type;
+ u16 del_dslite_inside_ipv6_addr_field_size;
+ u16 del_dslite_inside_ip_port_field_type;
+ u16 del_dslite_inside_ip_port_field_size;
+ u16 del_dslite_protocol_field_type;
+ u16 del_dslite_protocol_field_size;
+#endif
+
+//#ifndef NO_BULK_LOGGING /* commenting for time being */
+#if 0
+ u16 bulk_add_template_id;
+ u16 bulk_add_field_count;
+ u16 bulk_add_inside_vrf_id_field_type;
+ u16 bulk_add_inside_vrf_id_field_size;
+ u16 bulk_add_outside_vrf_id_field_type;
+ u16 bulk_add_outside_vrf_id_field_size;
+ u16 bulk_add_inside_ip_addr_field_type;
+ u16 bulk_add_inside_ip_addr_field_size;
+ u16 bulk_add_outside_ip_addr_field_type;
+ u16 bulk_add_outside_ip_addr_field_size;
+ u16 bulk_add_outside_start_port_field_type;
+ u16 bulk_add_outside_start_port_field_size;
+ u16 bulk_add_outside_end_port_field_type;
+ u16 bulk_add_outside_end_port_field_size;
+
+ u16 bulk_del_template_id;
+ u16 bulk_del_field_count;
+ u16 bulk_del_inside_vrf_id_field_type;
+ u16 bulk_del_inside_vrf_id_field_size;
+ u16 bulk_del_inside_ip_addr_field_type;
+ u16 bulk_del_inside_ip_addr_field_size;
+ u16 bulk_del_outside_start_port_field_type;
+ u16 bulk_del_outside_start_port_field_size;
+
+ /* ds-lite bulk logging create delete event */
+
+ u16 bulk_dslite_add_template_id;
+ u16 bulk_dslite_add_field_count;
+ u16 bulk_dslite_add_inside_vrf_id_field_type;
+ u16 bulk_dslite_add_inside_vrf_id_field_size;
+ u16 bulk_dslite_add_outside_vrf_id_field_type;
+ u16 bulk_dslite_add_outside_vrf_id_field_size;
+ u16 bulk_dslite_add_inside_ip_addr_field_type;
+ u16 bulk_dslite_add_inside_ip_addr_field_size;
+ u16 bulk_dslite_add_inside_ipv6_addr_field_type;
+ u16 bulk_dslite_add_inside_ipv6_addr_field_size;
+ u16 bulk_dslite_add_outside_ip_addr_field_type;
+ u16 bulk_dslite_add_outside_ip_addr_field_size;
+ u16 bulk_dslite_add_outside_start_port_field_type;
+ u16 bulk_dslite_add_outside_start_port_field_size;
+ u16 bulk_dslite_add_outside_end_port_field_type;
+ u16 bulk_dslite_add_outside_end_port_field_size;
+
+ u16 bulk_dslite_del_template_id;
+ u16 bulk_dslite_del_field_count;
+ u16 bulk_dslite_del_inside_vrf_id_field_type;
+ u16 bulk_dslite_del_inside_vrf_id_field_size;
+ u16 bulk_dslite_del_inside_ip_addr_field_type;
+ u16 bulk_dslite_del_inside_ip_addr_field_size;
+ u16 bulk_dslite_del_inside_ipv6_addr_field_type;
+ u16 bulk_dslite_del_inside_ipv6_addr_field_size;
+ u16 bulk_dslite_del_outside_start_port_field_type;
+ u16 bulk_dslite_del_outside_start_port_field_size;
+
+#endif /* NO_BULK_LOGGING */
+
+ u16 nat44_session_add_template_id;
+ u16 nat44_session_add_field_count;
+ u16 nat44_session_add_inside_vrf_id_field_type;
+ u16 nat44_session_add_inside_vrf_id_field_size;
+ u16 nat44_session_add_outside_vrf_id_field_type;
+ u16 nat44_session_add_outside_vrf_id_field_size;
+ u16 nat44_session_add_inside_ip_addr_field_type;
+ u16 nat44_session_add_inside_ip_addr_field_size;
+ u16 nat44_session_add_outside_ip_addr_field_type;
+ u16 nat44_session_add_outside_ip_addr_field_size;
+ u16 nat44_session_add_inside_ip_port_field_type;
+ u16 nat44_session_add_inside_ip_port_field_size;
+ u16 nat44_session_add_outside_ip_port_field_type;
+ u16 nat44_session_add_outside_ip_port_field_size;
+ u16 nat44_session_add_dest_ip_addr_field_type;
+ u16 nat44_session_add_dest_ip_addr_field_size;
+ u16 nat44_session_add_dest_port_field_type;
+ u16 nat44_session_add_dest_port_field_size;
+ u16 nat44_session_add_protocol_field_type;
+ u16 nat44_session_add_protocol_field_size;
+
+ u16 nat44_session_del_template_id;
+ u16 nat44_session_del_field_count;
+ u16 nat44_session_del_inside_vrf_id_field_type;
+ u16 nat44_session_del_inside_vrf_id_field_size;
+ u16 nat44_session_del_inside_ip_addr_field_type;
+ u16 nat44_session_del_inside_ip_addr_field_size;
+ u16 nat44_session_del_dest_ip_addr_field_type;
+ u16 nat44_session_del_dest_ip_addr_field_size;
+ u16 nat44_session_del_inside_ip_port_field_type;
+ u16 nat44_session_del_inside_ip_port_field_size;
+ u16 nat44_session_del_dest_port_field_type;
+ u16 nat44_session_del_dest_port_field_size;
+ u16 nat44_session_del_protocol_field_type;
+ u16 nat44_session_del_protocol_field_size;
+
+#if 0
+ u16 add_dslite_session_template_id;
+ u16 add_dslite_session_field_count;
+ u16 add_dslite_session_inside_vrf_id_field_type;
+ u16 add_dslite_session_inside_vrf_id_field_size;
+ u16 add_dslite_session_outside_vrf_id_field_type;
+ u16 add_dslite_session_outside_vrf_id_field_size;
+ u16 add_dslite_session_inside_ip_addr_field_type;
+ u16 add_dslite_session_inside_ip_addr_field_size;
+ u16 add_dslite_session_inside_ipv6_addr_field_type;
+ u16 add_dslite_session_inside_ipv6_addr_field_size;
+ u16 add_dslite_session_outside_ip_addr_field_type;
+ u16 add_dslite_session_outside_ip_addr_field_size;
+ u16 add_dslite_session_inside_ip_port_field_type;
+ u16 add_dslite_session_inside_ip_port_field_size;
+ u16 add_dslite_session_outside_ip_port_field_type;
+ u16 add_dslite_session_outside_ip_port_field_size;
+ u16 add_dslite_session_dest_ip_addr_field_type;
+ u16 add_dslite_session_dest_ip_addr_field_size;
+ u16 add_dslite_session_dest_port_field_type;
+ u16 add_dslite_session_dest_port_field_size;
+ u16 add_dslite_session_protocol_field_type;
+ u16 add_dslite_session_protocol_field_size;
+
+ u16 del_dslite_session_template_id;
+ u16 del_dslite_session_field_count;
+ u16 del_dslite_session_inside_vrf_id_field_type;
+ u16 del_dslite_session_inside_vrf_id_field_size;
+ u16 del_dslite_session_inside_ip_addr_field_type;
+ u16 del_dslite_session_inside_ip_addr_field_size;
+ u16 del_dslite_session_inside_ipv6_addr_field_type;
+ u16 del_dslite_session_inside_ipv6_addr_field_size;
+ u16 del_dslite_session_dest_ip_addr_field_type;
+ u16 del_dslite_session_dest_ip_addr_field_size;
+ u16 del_dslite_session_inside_ip_port_field_type;
+ u16 del_dslite_session_inside_ip_port_field_size;
+ u16 del_dslite_session_dest_port_field_type;
+ u16 del_dslite_session_dest_port_field_size;
+ u16 del_dslite_session_protocol_field_type;
+ u16 del_dslite_session_protocol_field_size;
+#endif
+
+ /*
+ * Netflow option template
+ * Ingress VRF ID - Name mapping
+ * This template will be sent under flowset id 1
+ */
+ cnat_nfv9_option_template_t cnat_nfv9_option_template;
+} cnat_nfv9_template_t;
+
+/*
+ * The Dataflow header for each add/delete record group
+ */
+typedef struct {
+ u16 dataflow_template_id;
+ u16 dataflow_length;
+} nfv9_dataflow_record_header_t;
+
+/*
+ * NFv9 Add record definition
+ */
+
+/*
+ * pad bytes needed to make the structure a multiple of 4 bytes
+ */
+#define CNAT_NFV9_ADD_RECORD_PAD_BYTES (3)
+#define CNAT_NFV9_DEL_RECORD_PAD_BYTES (1)
+
+#define CNAT_NFV9_NAT64_ADD_BIB_RECORD_PAD_BYTES (3)
+#define CNAT_NFV9_NAT64_DEL_BIB_RECORD_PAD_BYTES (1)
+#define CNAT_NFV9_NAT64_ADD_SESSION_RECORD_PAD_BYTES (1)
+#define CNAT_NFV9_NAT64_DEL_SESSION_RECORD_PAD_BYTES (3)
+#define CNAT_NFV9_NAT44_ADD_SESSION_RECORD_PAD_BYTES (1)
+#define CNAT_NFV9_NAT44_DEL_SESSION_RECORD_PAD_BYTES (3)
+
+#define CNAT_NFV9_DS_LITE_ADD_RECORD_PAD_BYTES (3)
+#define CNAT_NFV9_DS_LITE_DEL_RECORD_PAD_BYTES (1)
+#define CNAT_NFV9_DS_LITE_ADD_SESSION_RECORD_PAD_BYTES (1)
+#define CNAT_NFV9_DS_LITE_DEL_SESSION_RECORD_PAD_BYTES (3)
+
+#define CNAT_NFV9_INGRESS_VRFID_NAME_RECORD_PAD_BYTES (0)
+
+typedef struct {
+ u32 inside_vrf_id;
+ u32 outside_vrf_id;
+ u32 inside_ip_addr;
+ u32 outside_ip_addr;
+ u16 inside_ip_port;
+ u16 outside_ip_port;
+ u8 protocol;
+ u8 pad[CNAT_NFV9_ADD_RECORD_PAD_BYTES];
+} nfv9_add_record_t;
+
+/*
+ * NFv9 Delete record definition
+ */
+typedef struct {
+ u32 inside_vrf_id;
+ u32 inside_ip_addr;
+ u16 inside_ip_port;
+ u8 protocol;
+ u8 pad[CNAT_NFV9_DEL_RECORD_PAD_BYTES];
+} nfv9_del_record_t;
+
+#ifndef NO_BULK_LOGGING
+
+#define CNAT_NFV9_BULK_ADD_RECORD_PAD_BYTES (0)
+#define CNAT_NFV9_BULK_DEL_RECORD_PAD_BYTES (2)
+
+typedef struct {
+ u32 inside_vrf_id;
+ u32 outside_vrf_id;
+ u32 inside_ip_addr;
+ u32 outside_ip_addr;
+ u16 outside_ip_port_start;
+ u16 outside_ip_port_end;
+ u8 pad[CNAT_NFV9_BULK_ADD_RECORD_PAD_BYTES];
+} nfv9_bulk_add_record_t;
+
+/*
+ * NFv9 Delete record definition
+ */
+typedef struct {
+ u32 inside_vrf_id;
+ u32 inside_ip_addr;
+ u16 outside_ip_port_start;
+ u8 pad[CNAT_NFV9_BULK_DEL_RECORD_PAD_BYTES];
+} nfv9_bulk_del_record_t;
+
+/*
+ * DS-lite bulk port (user based) add record definition
+ */
+
+#define CNAT_NFV9_DS_LITE_BULK_ADD_RECORD_PAD_BYTES (0)
+#define CNAT_NFV9_DS_LITE_BULK_DEL_RECORD_PAD_BYTES (2)
+
+typedef struct {
+ u32 inside_vrf_id;
+ u32 outside_vrf_id;
+ u32 inside_ip_addr;
+ u32 inside_v6_src_addr[4];
+ u32 outside_ip_addr;
+ u16 outside_ip_port_start;
+ u16 outside_ip_port_end;
+ u8 pad[CNAT_NFV9_DS_LITE_BULK_ADD_RECORD_PAD_BYTES];
+} nfv9_ds_lite_bulk_add_record_t;
+
+
+/*
+ * DS-lite bulk port (user based) delete record definition
+ */
+
+typedef struct {
+ u32 inside_vrf_id;
+ u32 inside_ip_addr;
+ u32 inside_v6_src_addr[4];
+ u16 outside_ip_port_start;
+ u8 pad[CNAT_NFV9_DS_LITE_BULK_DEL_RECORD_PAD_BYTES];
+} nfv9_ds_lite_bulk_del_record_t;
+
+#endif /* NO_BULK_LOGGING */
+
+/* NAT64 related structures */
+
+typedef struct {
+ u32 inside_v6_src_addr[4];
+ u32 outside_v4_src_addr;
+ u16 inside_src_port;
+ u16 outside_src_port;
+ u8 protocol;
+ u8 pad[CNAT_NFV9_NAT64_ADD_BIB_RECORD_PAD_BYTES];
+} nfv9_nat64_add_bib_record_t;
+
+
+typedef struct {
+ u32 inside_v6_src_addr[4];
+ u32 outside_v4_src_addr;
+ u32 inside_v6_dest_addr[4];
+ u32 outside_v4_dest_addr;
+ u16 inside_src_port;
+ u16 outside_src_port;
+ u16 dest_port;
+ u8 protocol;
+ u8 pad[CNAT_NFV9_NAT64_ADD_SESSION_RECORD_PAD_BYTES];
+} nfv9_nat64_add_session_record_t;
+
+
+typedef struct {
+ u32 inside_v6_src_addr[4];
+ u16 inside_src_port;
+ u8 protocol;
+ u8 pad[CNAT_NFV9_NAT64_DEL_BIB_RECORD_PAD_BYTES];
+} nfv9_nat64_del_bib_record_t;
+
+
+typedef struct {
+ u32 inside_v6_src_addr[4];
+ u32 inside_v6_dest_addr[4];
+ u16 inside_src_port;
+ u16 dest_port;
+ u8 protocol;
+ u8 pad[CNAT_NFV9_NAT64_DEL_SESSION_RECORD_PAD_BYTES];
+} nfv9_nat64_del_session_record_t;
+
+/*
+ * NFv9 Session based Add record definition
+ */
+typedef struct {
+ u32 inside_vrf_id;
+ u32 outside_vrf_id;
+ u32 inside_ip_addr;
+ u32 outside_ip_addr;
+ u16 inside_ip_port;
+ u16 outside_ip_port;
+ u32 dest_ip_addr;
+ u16 dest_port;
+ u8 protocol;
+ u8 pad[CNAT_NFV9_NAT44_ADD_SESSION_RECORD_PAD_BYTES];
+} nfv9_add_session_record_t;
+
+/*
+ * NFv9 Session based del record definition
+ */
+typedef struct {
+ u32 inside_vrf_id;
+ u32 inside_ip_addr;
+ u32 dest_ip_addr;
+ u16 inside_ip_port;
+ u16 dest_port;
+ u8 protocol;
+ u8 pad[CNAT_NFV9_NAT44_DEL_SESSION_RECORD_PAD_BYTES];
+} nfv9_del_session_record_t;
+
+/*
+ * DS-lite NFv9 create record structure
+ */
+typedef struct {
+ u32 inside_vrf_id;
+ u32 outside_vrf_id;
+ u32 inside_ip_addr;
+ u32 inside_v6_src_addr[4];
+ u32 outside_ip_addr;
+ u16 inside_ip_port;
+ u16 outside_ip_port;
+ u8 protocol;
+ u8 pad[CNAT_NFV9_DS_LITE_ADD_RECORD_PAD_BYTES];
+} nfv9_ds_lite_add_record_t;
+
+typedef struct {
+ u32 inside_vrf_id;
+ u32 inside_ip_addr;
+ u32 inside_v6_src_addr[4];
+ u16 inside_ip_port;
+ u8 protocol;
+ u8 pad[CNAT_NFV9_DS_LITE_DEL_RECORD_PAD_BYTES];
+} nfv9_ds_lite_del_record_t;
+
+/*
+ * NFv9 Session based Add record definition
+ */
+typedef struct {
+ u32 inside_vrf_id;
+ u32 outside_vrf_id;
+ u32 inside_ip_addr;
+ u32 inside_v6_src_addr[4];
+ u32 outside_ip_addr;
+ u16 inside_ip_port;
+ u16 outside_ip_port;
+ u32 dest_ip_addr;
+ u16 dest_port;
+ u8 protocol;
+ u8 pad[CNAT_NFV9_DS_LITE_ADD_SESSION_RECORD_PAD_BYTES];
+} nfv9_ds_lite_add_session_record_t;
+
+/*
+ * NFv9 Session based del record definition
+ */
+typedef struct {
+ u32 inside_vrf_id;
+ u32 inside_ip_addr;
+ u32 inside_v6_src_addr[4];
+ u32 dest_ip_addr;
+ u16 inside_ip_port;
+ u16 dest_port;
+ u8 protocol;
+ u8 pad[CNAT_NFV9_DS_LITE_DEL_SESSION_RECORD_PAD_BYTES];
+} nfv9_ds_lite_del_session_record_t;
+
+
+typedef struct {
+ u32 ingress_vrf_id;
+ u8 ingress_vrf_name[NFV9_VRF_NAME_LEN];
+ u8 pad[CNAT_NFV9_INGRESS_VRFID_NAME_RECORD_PAD_BYTES];
+} nfv9_ingress_vrfid_name_record_t;
+
+#define CNAT_NFV9_TEMPLATE_OFFSET \
+ (CNAT_NFV9_HDR_OFFSET + sizeof(nfv9_header_t))
+
+#define CNAT_NFV9_TEMPLATE_LENGTH (sizeof(cnat_nfv9_template_t))
+#define CNAT_NFV9_OPTION_TEMPLATE_LENGTH (sizeof(cnat_nfv9_option_template_t))
+
+#define CNAT_NFV9_DATAFLOW_RECORD_HEADER_LENGTH \
+ (sizeof(nfv9_dataflow_record_header_t))
+
+/*
+ * No padding is needed for the add/delete records - reduce padding bytes
+ */
+
+#define CNAT_NFV9_ADD_RECORD_LENGTH (sizeof(nfv9_add_record_t) - \
+ CNAT_NFV9_ADD_RECORD_PAD_BYTES)
+
+#define CNAT_NFV9_DEL_RECORD_LENGTH (sizeof(nfv9_del_record_t) - \
+ CNAT_NFV9_DEL_RECORD_PAD_BYTES)
+
+#define CNAT_NFV9_DS_LITE_ADD_RECORD_LENGTH (sizeof(nfv9_ds_lite_add_record_t) - \
+ CNAT_NFV9_DS_LITE_ADD_RECORD_PAD_BYTES)
+#define CNAT_NFV9_DS_LITE_DEL_RECORD_LENGTH (sizeof(nfv9_ds_lite_del_record_t) - \
+ CNAT_NFV9_DS_LITE_DEL_RECORD_PAD_BYTES)
+#ifndef NO_BULK_LOGGING
+#define CNAT_NFV9_BULK_ADD_RECORD_LENGTH (sizeof(nfv9_bulk_add_record_t) - \
+ CNAT_NFV9_BULK_ADD_RECORD_PAD_BYTES)
+#define CNAT_NFV9_BULK_DEL_RECORD_LENGTH (sizeof(nfv9_bulk_del_record_t) - \
+ CNAT_NFV9_BULK_DEL_RECORD_PAD_BYTES)
+
+#define CNAT_NFV9_DS_LITE_BULK_ADD_RECORD_LENGTH (sizeof(nfv9_ds_lite_bulk_add_record_t) - \
+ CNAT_NFV9_DS_LITE_BULK_ADD_RECORD_PAD_BYTES)
+#define CNAT_NFV9_DS_LITE_BULK_DEL_RECORD_LENGTH (sizeof(nfv9_ds_lite_bulk_del_record_t) - \
+ CNAT_NFV9_DS_LITE_BULK_DEL_RECORD_PAD_BYTES)
+
+
+#endif /* NO_BULK_LOGGING */
+
+#define CNAT_NFV9_INGRESS_VRFID_NAME_RECORD_LENGTH (sizeof(nfv9_ingress_vrfid_name_record_t) - \
+ CNAT_NFV9_INGRESS_VRFID_NAME_RECORD_PAD_BYTES)
+
+#define CNAT_NFV9_NAT64_ADD_BIB_RECORD_LENGTH \
+ (sizeof(nfv9_nat64_add_bib_record_t) - \
+ CNAT_NFV9_NAT64_ADD_BIB_RECORD_PAD_BYTES)
+
+#define CNAT_NFV9_NAT64_DEL_BIB_RECORD_LENGTH \
+ (sizeof(nfv9_nat64_del_bib_record_t) - \
+ CNAT_NFV9_NAT64_DEL_BIB_RECORD_PAD_BYTES)
+
+#define CNAT_NFV9_NAT64_ADD_SESSION_RECORD_LENGTH \
+ (sizeof(nfv9_nat64_add_session_record_t) - \
+ CNAT_NFV9_NAT64_ADD_SESSION_RECORD_PAD_BYTES)
+
+#define CNAT_NFV9_NAT64_DEL_SESSION_RECORD_LENGTH \
+ (sizeof(nfv9_nat64_del_session_record_t) - \
+ CNAT_NFV9_NAT64_DEL_SESSION_RECORD_PAD_BYTES)
+
+#define CNAT_NFV9_MAX_SINGLE_RECORD_LENGTH \
+ (sizeof(nfv9_ds_lite_add_session_record_t) - \
+ CNAT_NFV9_DS_LITE_ADD_SESSION_RECORD_PAD_BYTES)
+
+#define CNAT_NFV9_NAT44_ADD_SESSION_RECORD_LENGTH \
+ (sizeof(nfv9_add_session_record_t) -\
+ CNAT_NFV9_NAT44_ADD_SESSION_RECORD_PAD_BYTES)
+
+#define CNAT_NFV9_NAT44_DEL_SESSION_RECORD_LENGTH \
+ (sizeof(nfv9_del_session_record_t) -\
+ CNAT_NFV9_NAT44_DEL_SESSION_RECORD_PAD_BYTES)
+
+#define CNAT_NFV9_DS_LITE_ADD_SESSION_RECORD_LENGTH \
+ (sizeof(nfv9_ds_lite_add_session_record_t) -\
+ CNAT_NFV9_DS_LITE_ADD_SESSION_RECORD_PAD_BYTES)
+
+#define CNAT_NFV9_DS_LITE_DEL_SESSION_RECORD_LENGTH \
+ (sizeof(nfv9_ds_lite_del_session_record_t) -\
+ CNAT_NFV9_DS_LITE_DEL_SESSION_RECORD_PAD_BYTES)
+
+/*
+ * Minimum value of the path MTU value
+ */
+#define CNAT_NFV9_MIN_RECORD_SIZE (60 + \
+ CNAT_NFV9_DATAFLOW_RECORD_HEADER_LENGTH + \
+ CNAT_NFV9_TEMPLATE_LENGTH + \
+ CNAT_NFV9_MAX_SINGLE_RECORD_LENGTH)
+
+/*
+ * Let us put the maximum length of the netflow data to be 1400
+ */
+#define CNAT_NFV9_MAX_PKT_LENGTH 1400
+
+/*
+ * Data structures and defines to store NFV9 specific info
+ */
+#define CNAT_NFV9_INVALID_LOGGING_INDEX 0xffffffff
+
+/*
+ * Padding value between ADD and DELETE records. This can be atmost 3 bytes
+ */
+#define NFV9_PAD_VALUE (3)
+
+typedef struct {
+ /* NFV9 server specific info
+ * For now, it will maintain only package sequence count.
+ * Later it will maintain server address, port, etc.
+ * Though it currently has server address and port, it is only for
+ * cross refernce
+ */
+ u32 ipv4_address; /* Destination IP address of the collector */
+ u16 port; /* Destination port number of the collector */
+ u16 refresh_rate; /* Refresh rate in packets after which template is sent */
+ u16 timeout_rate; /* Timeout rate in seconds after which template is sent */
+ u16 ref_count; /* Num of instances using this data */
+ u32 sequence_num; /* Sequence number of the logging packet */
+ /*
+ * Keep track of the time and packets since last template send
+ */
+ u32 last_template_sent_time;
+ u32 pkts_since_last_template;
+ u8 template_sent; /* used while sending vrfid-name mapping */
+
+} nfv9_server_info_t;
+
+/*
+ * This structure store the Netflow Logging information on per NFv9
+ * collector basis. This structure is allocated from a pool and index
+ * to this structure is stored VRF MAP structures
+ */
+typedef struct {
+ /*
+ * nat64_id will be 0 for nat44 config and i_vrf_id, i_vrf will be 0
+ * for nat64 config. Nat64_id will be used while nat64 collector is
+ * search and i_vrf* for nat44 collector
+ */
+ /* Similarly for ds_lite, ds_lite_id will be used and nat64_id,
+ * ivrf_id shall be set to 0
+ */
+ u32 i_vrf_id; /* Inside VRF ID corresponding to this collector */
+ u16 i_vrf; /* Inside VRF (uidb_index) corresponding to this collector */
+ u16 nat64_id; /* NAT64 instance for to this collector */
+ u16 ds_lite_id; /* DS Lite instance for this collector */
+
+ /*
+ * This field determines the maximum size of the Netflow V9 information
+ * that can be stored in a logging packet
+ */
+ u16 max_length_minus_max_record_size;
+
+ /*
+ * Indicates if the entry is already deleted
+ */
+ u16 deleted;
+
+ u16 pkt_length; /* Length of the currently NFv9 information */
+ u16 record_length[MAX_RECORDS]; /* Length of delete record */
+ u16 total_record_count; /* Total number of records including templates */
+
+ u8 logging_policy;
+
+ /*
+ * Keep track of the time and packets since last template send
+ */
+ u32 last_template_sent_time;
+ u32 pkts_since_last_template;
+
+ /* Server info */
+ u32 server_index;
+
+ /*
+ * current logging context
+ */
+ vlib_buffer_t *current_logging_context;
+
+ /*
+ * Timestamp in UNIX seconds corresponding to when the current
+ * logging packet was created
+ */
+ u32 current_logging_context_timestamp;
+
+ /*
+ * Queued logging context waiting to be sent to the l3 infra node
+ */
+ vlib_buffer_t *queued_logging_context;
+
+ /*
+ * Headers corresponding to various records in this
+ * current nfv9 logging context
+ */
+ nfv9_header_t *nfv9_header;
+ cnat_nfv9_template_t *nfv9_template_header;
+ nfv9_dataflow_record_header_t *dataflow_header;
+ u8 *record[MAX_RECORDS];
+ u8 *next_data_ptr;
+ u8 last_record;
+ u32 nfv9_logging_next_index;
+ u32 ip4_input_node_index;
+ vlib_frame_t *f;
+ u32 *to_next;
+} cnat_nfv9_logging_info_t;
+
+
+/*
+ * Global structure for CGN APP configuration
+ */
+typedef struct {
+ /*
+ * Global NFv9 Logging Collector Index
+ */
+ u32 cnat_nfv9_global_collector_index;
+
+ /*
+ * Node index corresponding to the infra L3 output node
+ * to which the nfv9 logging node will send the packet
+ */
+ u16 cnat_nfv9_disp_node_index;
+
+ /*
+ * Whether we have initialized the NFv9 information
+ */
+ u8 cnat_nfv9_init_done;
+} cnat_nfv9_global_info_t;
+
+typedef enum {
+ cnat_nfv9_template_add_default,
+ cnat_nfv9_template_add_always
+} cnat_nfv9_template_add_flag_t;
+
+extern cnat_nfv9_template_t cnat_nfv9_template_info;
+
+extern cnat_nfv9_logging_info_t cnat_default_nfv9_logging_info;
+extern cnat_nfv9_logging_info_t *cnat_nfv9_logging_info_pool;
+
+extern cnat_nfv9_global_info_t cnat_nfv9_global_info;
+extern nfv9_server_info_t *nfv9_server_info_pool;
+
+/* #define DEBUG_NF_SERVER_CONFIG 1 */
+static inline void nfv9_delete_server_info(cnat_nfv9_logging_info_t *nfv9_info)
+{
+ nfv9_server_info_t *server = nfv9_server_info_pool +
+ nfv9_info->server_index;
+ if(nfv9_info->server_index == EMPTY) {
+#ifdef DEBUG_NF_SERVER_CONFIG
+ if(my_instance_number == 1) {
+ PLATFORM_DEBUG_PRINT("Deleting empty server info\n");
+ }
+#endif /* #ifdef DEBUG_NF_SERVER_CONFIG */
+ return;
+ }
+
+ /* Check if this server is not used by anyone.. if not delete */
+ /* Caller of this function does not need it..so decrement ref count */
+ server->ref_count--;
+ if(!(server->ref_count)) {
+#ifdef DEBUG_NF_SERVER_CONFIG
+ if(my_instance_number == 1) {
+ PLATFORM_DEBUG_PRINT("Deleting nfv9 server %x, %d at %d\n",
+ server->ipv4_address,
+ server->port,
+ nfv9_info->server_index);
+ }
+#endif /* #ifdef DEBUG_NF_SERVER_CONFIG */
+ pool_put(nfv9_server_info_pool, server);
+ nfv9_info->server_index = EMPTY;
+ }
+#ifdef DEBUG_NF_SERVER_CONFIG
+ else {
+ if(my_instance_number == 1) {
+ PLATFORM_DEBUG_PRINT("Not Deleting nfv9 server %x, %d rc %d\n",
+ server->ipv4_address,
+ server->port,
+ server->ref_count);
+ }
+ }
+#endif /* #ifdef DEBUG_NF_SERVER_CONFIG */
+ return;
+}
+
+void handle_pending_nfv9_pkts();
+#endif /* __CNAT_LOGGING_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/cnat_pcp_server.h b/plugins/vcgn-plugin/vcgn/cnat_pcp_server.h
new file mode 100644
index 00000000000..c77c6a875f8
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_pcp_server.h
@@ -0,0 +1,398 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_pcp_server.h
+ *
+ * Copyright (c) 2009-2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_PCP_SERVER_H__
+#define __CNAT_PCP_SERVER_H__
+
+#include "dslite_defs.h"
+
+/* Debug utils of PCP */
+#define PCP_DBG(debug, ...) \
+ if(PREDICT_FALSE(cnat_pcp_debug_flag >= debug)) { \
+ printf("%s:%s:%d - ", \
+ __FILE__, __FUNCTION__, __LINE__);\
+ printf(__VA_ARGS__);\
+ printf("\n"); \
+ }
+
+#define PCP_DUMP_PDATA \
+ if(PREDICT_FALSE(cnat_pcp_debug_flag >= 100)) { \
+ printf("%s:%s:%d - \n", \
+ __FILE__, __FUNCTION__, __LINE__);\
+ printf("src - ip = %X, proto = %d, port = %d i_vrf = %d, o_vrf = %d\n", \
+ pcp_data.src_ip[3], pcp_data.proto, pcp_data.src_port, pcp_data.i_vrf, pcp_data.o_vrf); \
+ printf(" third party ip = %X\n", pcp_data.third_party_ip[3]); \
+ printf("map - ip = %X, port = %d \n", \
+ pcp_data.ext_ip[3], pcp_data.ext_port);\
+ printf("remote - ip = %X, port = %d \n", \
+ pcp_data.peer_ip[3], pcp_data.peer_port); \
+ printf("req life time = %d \n", pcp_data.req_lifetime); \
+ printf("drop = %d \n", pcp_data.drop);\
+ printf("udp_len = %d \n", pcp_data.udp_len); \
+ printf("pm = %p \n", pcp_data.pm); \
+ printf("cnat_proto = %X \n", pcp_data.cnat_proto); \
+ printf("inst_id = %X \n", pcp_data.inst_id); \
+ printf("======================================================\n"); \
+ }
+
+#define PCP_DUMP_PACKET(ip, len) pcp_hex_dump(ip, len)
+
+#ifdef TOBE_PORTED
+#define PCP_INCR(counter) pcp_counters.pcp_##counter++ ;
+#else
+#define PCP_INCR(counter)
+#endif
+
+typedef struct pcp_debug_counters {
+ u64 pcp_input;
+ u64 pcp_output;
+ u64 pcp_service_nat44;
+ u64 pcp_service_dslite;
+ /* below all are drops */
+ u64 pcp_drops;
+ u64 pcp_i2o_key_inuse;
+ u64 pcp_throttle_drops;
+ u64 pcp_udp_len;
+ u64 pcp_nrequest;
+ u64 pcp_min_udp_len;
+ u64 pcp_max_udp_len;
+ u64 pcp_mod4_len;
+ u64 pcp_invalid_3rd_len;
+ u64 pcp_invalid_option;
+ u64 pcp_version;
+ u64 pcp_invalid_opcode;
+ u64 pcp_invalid_client_ip;
+ u64 pcp_invalid_proto;
+ u64 pcp_invalid_port;
+ u64 pcp_invalid_vrfmap;
+ u64 pcp_invalid_ext_addr;
+ u64 pcp_out_addr_inuse;
+ u64 pcp_exact_match;
+ u64 pcp_exact_entry_created;
+ u64 pcp_exact_db_alloc_failed;
+ u64 pcp_udb_mismatch;
+ u64 pcp_noexact_db_allocated;
+ u64 pcp_static_entry_present;
+ u64 pcp_entry_deleted;
+ u64 pcp_3rd_party_option;
+
+ /* map counters */
+ u64 pcp_map_input;
+ u64 pcp_map_min_len;
+ u64 pcp_map_max_len;
+ u64 pcp_map_invalid_option;
+ u64 pcp_map_invalid_option_len;
+ u64 pcp_map_pref_fail_option;
+ u64 pcp_map_invalid_delete_req;
+ u64 pcp_map_delete_req;
+ u64 pcp_map_create_req;
+ u64 pcp_map_refresh;
+
+ /* peer counters */
+ u64 pcp_peer_input;
+ u64 pcp_peer_invalid_len;
+ u64 pcp_peer_delete_req;
+ u64 pcp_peer_create_req;
+ u64 pcp_peer_addr_mistmatch;
+ u64 pcp_peer_refresh;
+
+} pcp_debug_counters_t;
+
+typedef struct {
+ u16 msg_id;
+ u8 rc;
+ u8 pad[5];
+
+ /* better to have a group structures rather than individual
+ variables, any change in counters is will automatically
+ reflect here */
+ pcp_debug_counters_t counters;
+} pcp_show_counters_resp_t ;
+
+
+
+/* PCP opcodes */
+typedef enum pcp_opcode {
+ PCP_OPCODE_MAP = 1,
+ PCP_OPCODE_PEER = 2
+}pcp_opcode_t;
+
+
+/* PCP opcodes */
+typedef enum pcp_options {
+ PCP_OPTION_3RD_PARTY = 1,
+ PCP_OPTION_PREF_FAIL = 2,
+ PCP_OPTION_FILTER = 3
+} pcp_options_t;
+
+/* PCP Result codes */
+typedef enum pcp_result_codes {
+ PCP_SUCCESS = 0,
+ PCP_ERR_UNSUPP_VERSION = 1,
+ PCP_ERR_NOT_AUTHORIZED = 2,
+ PCP_ERR_MALFORMED_REQUEST = 3,
+ PCP_ERR_UNSUPP_OPCODE = 4,
+ PCP_ERR_UNSUPP_OPTION = 5,
+ PCP_ERR_MALFORMED_OPTION = 6,
+ PCP_ERR_NETWORK_FAILURE = 7,
+ PCP_ERR_NO_RESOURCES = 8,
+ PCP_ERR_UNSUPP_PROTOCOL = 9,
+ PCP_ERR_USER_EX_QUOTA = 10,
+ PCP_ERR_CANNOT_PROVIDE_EXTERNAL = 11,
+ PCP_ERR_ADDRESS_MISMATCH = 12,
+ PCP_ERR_EXCESSIVE_REMOTE_PEERS = 13
+} pcp_result_codes_t;
+
+#define PCP_DISABLED 0
+#define PCP_ENABLED 1
+
+#define PCP_DROP 1
+
+#define PCP_STATIC_LIFETIME 0xFFFFFFFF
+#define PCP_MAX_LIFETIME 0x00015180 /* 24 hours = 86400 seconds*/
+
+#define PCP_VERSION_SUPPORTED 1
+
+#define PCP_NO_PREF_FAIL_OPTION 0
+#define PCP_PREF_FAIL_OPTION 1
+
+#define CNAT_DEF_PCP_PORT 5351
+
+#define PCP_REQ_RESP_BIT 0x80
+#define PCP_RESPONSE(r_opcode) (r_opcode & PCP_REQ_RESP_BIT)
+#define PCP_REQUEST(r_opcode) !(PCP_RESPONSE(r_opcode))
+
+#define PCP_REQ_OPCODE(r_opcode) (r_opcode & 0x7F)
+
+/* 24 bytes */
+#define PCP_COMMON_HDR_LEN sizeof(pcp_request_t)
+
+/* 8 bytes */
+#define UDP_HDR_LEN sizeof(udp_hdr_type_t)
+
+#define PCP_PREF_FAIL_OPTION_SIZE \
+ sizeof(pcp_prefer_fail_option_t)
+
+#define PCP_3RD_PARTY_OPTION_SIZE \
+ sizeof(pcp_3rd_party_option_t)
+
+#define PCP_MIN_LEN PCP_COMMON_HDR_LEN
+
+/* 24+8=32 bytes */
+#define PCP_MIN_UDP_LEN (PCP_MIN_LEN + UDP_HDR_LEN)
+
+#define PCP_MAX_LEN 1024
+
+/* 1024+8 = 1032 bytes */
+#define PCP_MAX_UDP_LEN (PCP_MAX_LEN + UDP_HDR_LEN)
+
+/* 24+ 24 = 48 bytes */
+#define PCP_MAP_OPCODE_MIN_LEN (PCP_COMMON_HDR_LEN + \
+ sizeof( pcp_map_option_specific_data_t))
+
+/* 24 + 44 = 68 bytes */
+#define PCP_PEER_OPCODE_MIN_LEN (PCP_COMMON_HDR_LEN + \
+ sizeof( pcp_peer_option_specific_data_t))
+
+/* 48 + 8 = 56 bytes */
+#define PCP_MAP_OPCODE_MIN_UDP_LEN (PCP_MAP_OPCODE_MIN_LEN + \
+ UDP_HDR_LEN )
+
+#define PCP_GET_MAP_OPTION_OFFSET(req) \
+ ((u8*)req + PCP_MAP_OPCODE_MIN_LEN)
+
+#define PCP_GET_PEER_OPTION_OFFSET(req) \
+ ((u8*)req + PCP_PEER_OPCODE_MIN_LEN)
+
+
+#define PCP_REQ_TOTAL_LEN(udp) (udp->udp_length - \
+ UDP_HDR_LEN)
+/* 56 + 4 = 60 bytes */
+#define PCP_MAP_OPCODE_PREF_FAIL_OPTION_LEN \
+ (PCP_MAP_OPCODE_MIN_UDP_LEN + \
+ sizeof(pcp_prefer_fail_option_t))
+
+
+/* 68 + 8 = 76 bytes */
+#define PCP_PEER_OPCODE_MIN_UDP_LEN (PCP_PEER_OPCODE_MIN_LEN + \
+ UDP_HDR_LEN)
+
+#define PCP_MUST_OPTION(option_code) (option_code & 0x80)
+
+
+
+/* 56 + 20 = 76*/
+#define PCP_DSLITE_MAP_OPCODE_MIN_UDP_LEN \
+ ( PCP_MAP_OPCODE_MIN_UDP_LEN + \
+ PCP_3RD_PARTY_OPTION_SIZE)
+
+/* 60 + 20 = 80 */
+#define PCP_DSLITE_MAP_OPCODE_MAX_UDP_LEN \
+ ( PCP_MAP_OPCODE_PREF_FAIL_OPTION_LEN + \
+ PCP_3RD_PARTY_OPTION_SIZE)
+
+/* 76 + 20 = 96 */
+#define PCP_DSLITE_PEER_OPCODE_MIN_UDP_LEN \
+ ( PCP_PEER_OPCODE_MIN_UDP_LEN + \
+ PCP_3RD_PARTY_OPTION_SIZE)
+
+
+#define PCP_SET_CNAT_PROTO(proto) \
+ pcp_data.cnat_proto = (proto == TCP_PROT) ? CNAT_TCP: \
+ (proto == UDP_PROT)? CNAT_UDP : CNAT_ICMP;
+
+#define PCP_SET_REQ_LIFETIME() \
+ if(pcp_data.db->flags & CNAT_DB_FLAG_STATIC_PORT) { \
+ pcp_data.db->proto_data.seq_pcp.pcp_lifetime = \
+ PCP_STATIC_LIFETIME; \
+ pcp_data.req_lifetime = PCP_STATIC_LIFETIME; \
+ } else { \
+ pcp_data.db->proto_data.seq_pcp.pcp_lifetime = \
+ pcp_data.req_lifetime + cnat_current_time ; \
+ }
+
+
+/* per second not more than PCP_THROTTLE_LIMIT
+ * delete requests will be handled.
+ * this excludes , specific entries, in which
+ * only one entry needs to be deleted
+ */
+#define PCP_THROTTLE_LIMIT 2
+
+typedef struct pcp_request {
+ u8 ver;
+ u8 r_opcode;
+ u16 reserved;
+ u32 req_lifetime;
+ u32 ip[4]; /* ipv4 will be represented
+ by the ipv4 mapped ipv6 */
+} pcp_request_t;
+
+typedef struct pcp_response {
+ u8 ver;
+ u8 r_opcode;
+ u8 reserved;
+ u8 result_code;
+ u32 lifetime;
+ u32 epochtime;
+ u32 reserved1[3];
+} pcp_response_t;
+
+
+typedef struct pcp_options_hdr {
+ u8 code;
+ u8 reserved;
+ u16 len;
+ u8 data[0];
+} pcp_options_hdr_t;
+
+
+/* same for both request and response */
+typedef struct pcp_map_option_specific_data {
+ u8 protocol;
+ u8 reserved[3];
+ u16 int_port;
+ u16 ext_port;
+ u32 ext_ip[4]; /* ipv4 will be represnted
+ by the ipv4 mapped ipv6 */
+} pcp_map_option_specific_data_t;
+
+/* same for both request and response */
+typedef struct pcp_peer_option_specific_data {
+ u8 protocol;
+ u8 reserved[3];
+ u16 int_port;
+ u16 ext_port;
+ u32 ext_ip[4]; /* ipv4 will be represented
+ by the ipv4 mapped ipv6 */
+ u16 peer_port;
+ u16 reserved1;
+ u32 peer_ip[4];
+} pcp_peer_option_specific_data_t;
+
+typedef struct pcp_prefer_fail_option {
+ u8 option;
+ u8 reserved;
+ u16 len;
+} pcp_prefer_fail_option_t;
+
+
+typedef struct pcp_3rd_party_option{
+ u8 option;
+ u8 reserved;
+ u16 len;
+ u32 ip[4];
+} pcp_3rd_party_option_t;
+
+/* structure used as pipeline data */
+
+typedef struct pcp_pipeline_data {
+
+ union {
+
+ u8 *p;
+ ipv4_header *ip ;
+ ipv6_header_t *ipv6 ;
+
+ } l3addr;
+
+ udp_hdr_type_t *udp;
+ pcp_request_t *req;
+ pcp_response_t *resp;
+ pcp_opcode_t opcode;
+ u32 src_ip[4];
+ u16 src_port;
+ u8 proto;
+ u16 i_vrf;
+ u16 o_vrf;
+ u32 ext_ip[4];
+ u16 ext_port;
+ u32 third_party_ip[4];
+
+ /* valid for peer opcode */
+ u32 peer_ip[4];
+ u32 peer_port;
+ u32 req_lifetime;
+ u32 udp_len;
+ pcp_options_t pref_fail;
+ pcp_options_t third_party;
+ u8 *option_spec;
+ pcp_result_codes_t ret_code;
+ cnat_portmap_v2_t *pm;
+ cnat_main_db_entry_t *db;
+ cnat_vrfmap_t *vrfmap;
+ dslite_table_entry_t *inst_ptr;
+ u16 inst_id;
+ u32 flags;
+ u16 cnat_proto;
+
+ /* is packet needs to be dropped ? */
+ u8 drop;
+ /* nat44, dslite, nat64 */
+#define PCP_SERVICE_NAT44 1
+#define PCP_SERVICE_DSLITE 2
+#define PCP_SERVICE_NAT64 3
+ u8 service_type;
+
+#define PCP_REQ_ENTRY_PRESENT 1
+#define PCP_REQ_EXT_MAP_PRESENT 1
+ u8 state;
+} pcp_pipeline_data_t;
+
+#endif /* __CNAT_PCP_sERVER_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/cnat_ports.c b/plugins/vcgn-plugin/vcgn/cnat_ports.c
new file mode 100644
index 00000000000..943fb3ed38c
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_ports.c
@@ -0,0 +1,1113 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_ports.c - port allocator
+ *
+ * Copyright (c) 2008-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/clib.h>
+#include <vppinfra/bitmap.h>
+
+#include "cnat_db.h"
+#include "cnat_config.h"
+#include "cnat_global.h"
+#include "cnat_logging.h"
+#include "spp_timers.h"
+#include "platform_common.h"
+#include "cgn_bitmap.h"
+#include "spp_platform_trace_log.h"
+#include "cnat_ports.h"
+
+#if 1 /* TOBE_PORTED */
+/* Following is defined elsewhere. */
+#define msg_spp_err(s) \
+do { \
+ fprintf(stderr,(i8 *)s); \
+ fputs("\n", stderr); \
+} while(0);
+#endif
+
+
+#define PM_90_PERCENT_USE 58980
+/*
+ * instance number provisioned from HW
+ */
+u8 my_instance_number = 0;
+
+typedef struct {
+ u32 cached_next_index;
+ /* $$$$ add data here */
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+} cnat_ports_main_t;
+
+cnat_ports_main_t cnat_ports_main;
+
+static u32 rseed_port; /* random number generator seed */
+
+void
+cnat_db_dump_portmap_for_vrf (u32 vrfmap_index)
+{
+ u32 i, pm_len;
+ cnat_vrfmap_t *my_vrfmap = cnat_map_by_vrf + vrfmap_index;
+ cnat_portmap_v2_t *pm, *my_pm __attribute__((unused));
+
+ pm = my_vrfmap->portmap_list;
+ pm_len = vec_len(pm);
+
+ for (i = 0; i < pm_len; i++) {
+ my_pm = pm + i;
+
+ PLATFORM_DEBUG_PRINT("pm %d: IPv4 Addr 0x%x - in use %d private_ip_users_count %d\n",
+ i, my_pm->ipv4_address, my_pm->inuse,
+ my_pm->private_ip_users_count);
+
+ PLATFORM_DEBUG_PRINT("pm %d: IPv4 Addr 0x%x - in use %d "
+ "private_ip_users_count %d\n",
+ i, my_pm->ipv4_address, my_pm->inuse,
+ my_pm->private_ip_users_count);
+ }
+}
+
+void
+cnat_db_dump_portmaps ()
+{
+ u32 i, vrfmap_index;
+
+ for (i = 0; i < CNAT_MAX_VRFMAP_ENTRIES; i++) {
+ vrfmap_index = vrf_map_array[i];
+
+ if (vrfmap_index == VRF_MAP_ENTRY_EMPTY) {
+ continue;
+ }
+
+ PLATFORM_DEBUG_PRINT("\n\nDumping the port map for uidb_index %d\n", i);
+ cnat_db_dump_portmap_for_vrf(vrfmap_index);
+ }
+}
+
+#ifndef NO_BULK_LOGGING
+static int check_if_stat_alloc_ok_for_bulk(cnat_portmap_v2_t *pm,
+ u16 i_port, bulk_alloc_size_t bulk_size,
+ u16 static_port_range)
+{
+ uword bit_test_result;
+ if(BULK_ALLOC_SIZE_NONE == bulk_size) return 1; /* No issues */
+
+ if(i_port < static_port_range) return 1; /* we don't want bulk */
+
+ i_port = (i_port/bulk_size) * bulk_size;
+ bit_test_result = cgn_clib_bitmap_check_if_all(pm->bm, i_port, bulk_size);
+ return(bit_test_result);
+}
+#else /* dummy */
+inline static int check_if_stat_alloc_ok_for_bulk(cnat_portmap_v2_t *pm,
+ u16 i_port, bulk_alloc_size_t bulk_size,
+ u16 static_port_range)
+{
+ return 1;
+}
+#endif /* NO_BULK_LOGGING */
+/*
+ * cnat_port_alloc_static_v2
+ * public ipv4 address/port allocator for Static Port commands
+ * tries to allocate same outside port as inside port
+ */
+cnat_errno_t
+cnat_static_port_alloc_v2 (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ port_pair_t pair_type,
+ u32 i_ipv4_address,
+ u16 i_port,
+ u32 *index,
+ u32 *o_ipv4_address,
+ u16 *o_port,
+ u16 static_port_range
+#ifndef NO_BULK_LOGGING
+ , bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req
+#endif
+ , u16 ip_n_to_1
+ )
+{
+ u32 i, hash_value, my_index, found, max_attempts;
+ u16 start_bit, new_port;
+ cnat_portmap_v2_t *my_pm = 0;
+ u32 pm_len = vec_len(pm);
+ uword bit_test_result;
+
+#ifndef NO_BULK_LOGGING
+ *nfv9_log_req = BULK_ALLOC_NOT_ATTEMPTED;
+#endif
+
+ if (PREDICT_FALSE(pm_len == 0)) {
+ return (CNAT_NO_POOL_ANY);
+ }
+
+ switch (atype) {
+
+ case PORT_ALLOC_ANY:
+
+ found = 0;
+
+ /*
+ * Try to hash the IPv4 address to get an index value to select the pm
+ */
+ hash_value = (i_ipv4_address & 0xffff) ^
+ ((i_ipv4_address > 16) & 0xffff);
+
+ /*
+ * If pm_len <= 256, compact the hash to 8 bits
+ */
+ if (PREDICT_TRUE(pm_len <= 256)) {
+ hash_value = (hash_value & 0xff) ^ ((hash_value > 8) & 0xff);
+ }
+
+ /*
+ * Ensure that the hash value is in the range 0 .. (pm_len-1)
+ */
+ my_index = hash_value % pm_len;
+
+ for (i = 0; i < PORT_PROBE_LIMIT; i++) {
+ my_pm = pm + my_index;
+ if(PREDICT_TRUE(ip_n_to_1)) {
+ if(PREDICT_TRUE(my_pm->private_ip_users_count < ip_n_to_1)) {
+ /*
+ * Try to find a PM with atlest 33% free and my_port free
+ */
+ if (PREDICT_TRUE((my_pm->inuse < ((BITS_PER_INST*2)/3)) &&
+ clib_bitmap_get_no_check(my_pm->bm,
+ i_port) == 1)
+#ifndef NO_BULK_LOGGING
+ && check_if_stat_alloc_ok_for_bulk(my_pm, i_port,
+ bulk_size,
+ static_port_range)
+#endif
+ ) {
+ found = 1;
+ break;
+ }
+ }
+
+ } else {
+ /*
+ * Try to find a PM with atlest 33% free and my_port free
+ */
+ if (PREDICT_TRUE((my_pm->inuse < ((BITS_PER_INST*2)/3)) &&
+ clib_bitmap_get_no_check(my_pm->bm,
+ i_port) == 1)
+#ifndef NO_BULK_LOGGING
+ && check_if_stat_alloc_ok_for_bulk(my_pm, i_port,
+ bulk_size,
+ static_port_range)
+#endif
+ ) {
+ found = 1;
+ break;
+ }
+ }
+ my_index = (my_index + 1) % pm_len;
+ }
+
+ /*
+ * If not found do it the hard way .
+ * "hard" way, best-fit.
+ */
+ if (!found) {
+ u32 min_inuse_any, min_inuse_myport;
+ u32 min_index_any, min_index_myport;
+
+ min_inuse_any = min_inuse_myport = PORTS_PER_ADDR + 1;
+ min_index_any = min_index_myport = ~0;
+ for (i = 0; i < pm_len; i++) {
+ my_pm = pm + i;
+ if(PREDICT_TRUE(ip_n_to_1)) {
+ if(PREDICT_TRUE(my_pm->private_ip_users_count < ip_n_to_1)) {
+ if (PREDICT_FALSE(my_pm->inuse < min_inuse_any)) {
+ min_inuse_any = my_pm->inuse;
+ min_index_any = my_pm - pm;
+ }
+ if (PREDICT_FALSE(my_pm->inuse < min_inuse_myport)) {
+ if (PREDICT_TRUE(clib_bitmap_get_no_check(
+ my_pm->bm,i_port) == 1)
+#ifndef NO_BULK_LOGGING
+ && check_if_stat_alloc_ok_for_bulk(my_pm,
+ i_port,bulk_size,static_port_range)
+#endif
+ ) {
+ min_inuse_myport = my_pm->inuse;
+ min_index_myport = my_pm - pm;
+ }
+ }
+
+ }
+
+ } else {
+ if (PREDICT_FALSE(my_pm->inuse < min_inuse_any)) {
+ min_inuse_any = my_pm->inuse;
+ min_index_any = my_pm - pm;
+ }
+ if (PREDICT_FALSE(my_pm->inuse < min_inuse_myport)) {
+ if (PREDICT_TRUE(clib_bitmap_get_no_check(
+ my_pm->bm, i_port) == 1)
+#ifndef NO_BULK_LOGGING
+ && check_if_stat_alloc_ok_for_bulk(my_pm, i_port,
+ bulk_size, static_port_range)
+#endif
+ ) {
+ min_inuse_myport = my_pm->inuse;
+ min_index_myport = my_pm - pm;
+ }
+ }
+ }
+ }
+
+ /*
+ * Check if we have an exactly matching PM that has
+ * myport free. If so use it. If no such PM is
+ * available, use any PM
+ */
+ if (PREDICT_TRUE(min_inuse_myport < PORTS_PER_ADDR)) {
+ my_pm = pm + min_index_myport;
+ my_index = min_index_myport;
+ found = 1;
+ } else if (PREDICT_TRUE(min_inuse_any < PORTS_PER_ADDR)) {
+ my_pm = pm + min_index_any;
+ my_index = min_index_any;
+ found = 1;
+ }
+ }
+
+ if (!found) {
+ return (CNAT_NO_PORT_ANY);
+ }
+ break;
+
+ case PORT_ALLOC_DIRECTED:
+ my_index = *index;
+ if (PREDICT_FALSE(my_index > pm_len)) {
+ return (CNAT_INV_PORT_DIRECT);
+ }
+ my_pm = pm + my_index;
+ break;
+
+ default:
+ return (CNAT_ERR_PARSER);
+ }
+
+ /* Allocate a matching port if possible */
+ start_bit = i_port;
+ found = 0;
+ max_attempts = BITS_PER_INST;
+#ifndef NO_BULK_LOGGING
+ if((BULK_ALLOC_SIZE_NONE != bulk_size) &&
+ (i_port >= static_port_range)) {
+ start_bit = (start_bit/bulk_size) * bulk_size;
+ max_attempts = BITS_PER_INST/bulk_size;
+ }
+#endif /* NO_BULK_LOGGING */
+
+ for (i = 0; i < max_attempts; i++) {
+#ifndef NO_BULK_LOGGING
+ if((BULK_ALLOC_SIZE_NONE != bulk_size) &&
+ (i_port >= static_port_range)) {
+ bit_test_result = cgn_clib_bitmap_check_if_all(my_pm->bm,
+ start_bit, bulk_size);
+ }
+ else
+#endif /* #ifndef NO_BULK_LOGGING */
+ bit_test_result = clib_bitmap_get_no_check(my_pm->bm, start_bit);
+
+ if (PREDICT_TRUE(bit_test_result)) {
+#ifndef NO_BULK_LOGGING
+ if((BULK_ALLOC_SIZE_NONE != bulk_size) &&
+ (i_port >= static_port_range)) {
+ *nfv9_log_req = start_bit;
+ if(i==0) new_port = i_port; /* First go */
+ else {
+ new_port = bit2port(start_bit);
+ if (pair_type == PORT_S_ODD && (new_port & 0x1) == 0)
+ new_port++;
+ }
+ found = 1;
+ break;
+ }
+ else {
+#endif /* NO_BULK_LOGGING */
+ new_port = bit2port(start_bit);
+ if (pair_type == PORT_S_ODD) {
+ if ((new_port & 0x1) == 1) {
+ found = 1;
+ break;
+ }
+ } else if (pair_type == PORT_S_EVEN) {
+ if ((new_port & 0x1) == 0) {
+ found = 1;
+ break;
+ }
+ } else {
+ found = 1;
+ break;
+ }
+#ifndef NO_BULK_LOGGING
+ }
+#endif
+ }
+#ifndef NO_BULK_LOGGING
+ if((BULK_ALLOC_SIZE_NONE != bulk_size) &&
+ (i_port >= static_port_range))
+ start_bit = (start_bit + bulk_size) % BITS_PER_INST;
+ else {
+#endif /* NO_BULK_LOGGING */
+ start_bit = (start_bit + 1) % BITS_PER_INST;
+ if(PREDICT_FALSE(start_bit == 0)) {
+ start_bit = 1; /* Port 0 is invalid, so start from 1 */
+ }
+#ifndef NO_BULK_LOGGING
+ }
+#endif
+ } /* End of for loop */
+
+ if (!found) {
+ /* Port allocation failure */
+ if (atype == PORT_ALLOC_DIRECTED) {
+ return (CNAT_NOT_FOUND_DIRECT);
+ } else {
+ return (CNAT_NOT_FOUND_ANY);
+ }
+ }
+
+ /* Accounting */
+ cgn_clib_bitmap_clear_no_check(my_pm->bm, new_port);
+ (my_pm->inuse)++;
+
+ *index = my_pm - pm;
+ *o_ipv4_address = my_pm->ipv4_address;
+
+ *o_port = new_port;
+
+ return (CNAT_SUCCESS);
+}
+
+/*
+ * Try to allocate a portmap structure based on atype field
+ */
+cnat_portmap_v2_t *
+cnat_dynamic_addr_alloc_from_pm (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ u32 *index,
+ cnat_errno_t *err,
+ u16 ip_n_to_1,
+ u32 *rseed_ip)
+{
+ u32 i, pm_len;
+ int my_index;
+ int min_inuse, min_index;
+
+ cnat_portmap_v2_t *my_pm = 0;
+ *err = CNAT_NO_POOL_ANY;
+
+ pm_len = vec_len(pm);
+
+ switch(atype) {
+ case PORT_ALLOC_ANY:
+ if (PREDICT_FALSE(pm_len == 0)) {
+ my_pm = 0;
+ *err = CNAT_NO_POOL_ANY;
+ goto done;
+ }
+
+ /* "Easy" way, first address with at least 200 free ports */
+ for (i = 0; i < PORT_PROBE_LIMIT; i++) {
+ *rseed_ip = randq1(*rseed_ip);
+ my_index = (*rseed_ip) % pm_len;
+ my_pm = pm + my_index;
+ if (PREDICT_FALSE(ip_n_to_1)) {
+ if(PREDICT_TRUE(ip_n_to_1 == 1)) {
+ if (PREDICT_FALSE(0 == my_pm->inuse)) {
+ goto done;
+ }
+ } else {
+ if(PREDICT_TRUE(my_pm->private_ip_users_count < ip_n_to_1)) {
+ if (PREDICT_FALSE(my_pm->inuse < ((BITS_PER_INST*2)/3))) {
+ goto done;
+ }
+ }
+ }
+ } else {
+ if (PREDICT_FALSE(my_pm->inuse < ((BITS_PER_INST*2)/3))) {
+ goto done;
+ }
+ }
+ }
+
+ /* "hard" way, best-fit. $$$$ Throttle complaint */
+ min_inuse = PORTS_PER_ADDR + 1;
+ min_index = ~0;
+ for (i = 0; i < pm_len; i++) {
+ my_pm = pm + i;
+ if (PREDICT_FALSE(ip_n_to_1)) {
+ if(PREDICT_TRUE(ip_n_to_1 == 1)) {
+ if (PREDICT_FALSE(!my_pm->inuse)) {
+ min_inuse = my_pm->inuse;
+ min_index = my_pm - pm;
+ }
+ } else {
+ if(PREDICT_TRUE(my_pm->private_ip_users_count < ip_n_to_1)) {
+ if (PREDICT_TRUE(my_pm->inuse < min_inuse)) {
+ min_inuse = my_pm->inuse;
+ min_index = my_pm - pm;
+ }
+
+ }
+ }
+
+ } else {
+ if (PREDICT_TRUE(my_pm->inuse < min_inuse)) {
+ min_inuse = my_pm->inuse;
+ min_index = my_pm - pm;
+ }
+ }
+ }
+
+ if (PREDICT_TRUE(min_inuse < PORTS_PER_ADDR)) {
+ my_pm = pm + min_index;
+ my_index = min_index;
+ goto done;
+ }
+
+ /* Completely out of ports */
+#ifdef DEBUG_PRINTF_ENABLED
+ PLATFORM_DEBUG_PRINT("%s out of ports\n", __FUNCTION__);
+#endif
+
+ my_pm = 0;
+ *err = CNAT_NO_PORT_ANY;
+ break;
+
+
+ case PORT_ALLOC_DIRECTED:
+ //ASSERT(*index < pm_len);
+ if (PREDICT_FALSE(*index > pm_len)) {
+ my_pm = 0;
+ *err = CNAT_INV_PORT_DIRECT;
+ goto done;
+ }
+ my_pm = pm + *index;
+ my_index = *index;
+ break;
+
+ default:
+ msg_spp_err("bad allocation type in cnat_port_alloc");
+ my_pm = 0;
+ *err = CNAT_ERR_PARSER;
+ break;
+ }
+
+ done:
+ if (PREDICT_FALSE(my_pm == NULL)) {
+ return (my_pm);
+ }
+
+ if (PREDICT_FALSE(my_pm->inuse >= BITS_PER_INST)) {
+ my_pm = 0;
+ if (atype == PORT_ALLOC_DIRECTED) {
+ *err = CNAT_BAD_INUSE_DIRECT;
+ } else {
+ *err = CNAT_BAD_INUSE_ANY;
+ }
+ }
+
+ return (my_pm);
+}
+
+
+/*
+ * cnat_port_alloc_v2
+ * public ipv4 address/port allocator for dynamic ports
+ *
+ * 200K users / 20M translations means vec_len(cnat_portmap) will be
+ * around 300.
+ *
+ */
+cnat_errno_t
+cnat_dynamic_port_alloc_v2 (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ port_pair_t pair_type,
+ u32 *index,
+ u32 *o_ipv4_address,
+ u16 *o_port,
+ u16 static_port_range
+#ifndef NO_BULK_LOGGING
+ , bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req
+#endif
+ , u16 ip_n_to_1,
+ u32 *rseed_ip
+ )
+{
+ int i;
+ cnat_errno_t my_err = CNAT_NO_POOL_ANY;
+ cnat_portmap_v2_t *my_pm = 0;
+ u16 start_bit;
+ u16 new_port;
+ uword bit_test_result;
+ uword max_trys_to_find_port;
+
+ ASSERT(index);
+ ASSERT(o_ipv4_address);
+ ASSERT(o_port);
+
+ my_pm = cnat_dynamic_addr_alloc_from_pm(pm, atype, index, &my_err, ip_n_to_1,
+ rseed_ip);
+
+ if (PREDICT_FALSE(my_pm == NULL)) {
+ return (my_err);
+ }
+ if(PREDICT_FALSE(my_pm->dyn_full == 1)) {
+ if (atype == PORT_ALLOC_DIRECTED) {
+ return (CNAT_NOT_FOUND_DIRECT);
+ } else {
+ return (CNAT_NOT_FOUND_ANY);
+ }
+ }
+
+#if DEBUG > 1
+ PLATFORM_DEBUG_PRINT("ALLOC_PORT_V2: My_Instance_Number %d: IP addr 0x%x, Inuse %d\n",
+ my_instance_number, my_pm->ipv4_address, my_pm->inuse);
+#endif
+
+ rseed_port = randq1(rseed_port);
+
+ /*
+ * Exclude the static port range for allocating dynamic ports
+ */
+ start_bit = (rseed_port) % (BITS_PER_INST - static_port_range);
+ start_bit = start_bit + static_port_range;
+
+#ifndef NO_BULK_LOGGING
+ *nfv9_log_req = BULK_ALLOC_NOT_ATTEMPTED;
+ if(BULK_ALLOC_SIZE_NONE != bulk_size)
+ {
+ /* We need the start port of the range to be alined on integer multiple
+ * of bulk_size */
+ max_trys_to_find_port = BITS_PER_INST/bulk_size;
+ start_bit= ((start_bit + bulk_size -1)/bulk_size) * bulk_size;
+ }
+ else
+#endif /* #ifndef NO_BULK_LOGGING */
+ max_trys_to_find_port = BITS_PER_INST;
+
+ /* Allocate a random port / port-pair */
+ for (i = 0; i < max_trys_to_find_port; i++) {
+
+ /* start_bit is only a u16.. so it can rollover and become zero */
+ if (PREDICT_FALSE( /* (start_bit >= BITS_PER_INST) || FIXME u16 cannot be >= 65536 */
+ (start_bit < static_port_range))) {
+ start_bit = static_port_range;
+#ifndef NO_BULK_LOGGING
+ if(BULK_ALLOC_SIZE_NONE != bulk_size) {
+ start_bit= ((start_bit + bulk_size -1)/bulk_size) * bulk_size;
+ }
+#endif /* #ifndef NO_BULK_LOGGING */
+ }
+ /* Scan forward from random position */
+#ifndef NO_BULK_LOGGING
+ if(BULK_ALLOC_SIZE_NONE != bulk_size) {
+ bit_test_result = cgn_clib_bitmap_check_if_all(my_pm->bm,
+ start_bit, bulk_size);
+ }
+ else
+#endif /* #ifndef NO_BULK_LOGGING */
+ bit_test_result = clib_bitmap_get_no_check(my_pm->bm, start_bit);
+
+ if (PREDICT_TRUE(bit_test_result)) {
+ new_port = bit2port(start_bit);
+#ifndef NO_BULK_LOGGING
+ if(BULK_ALLOC_SIZE_NONE != bulk_size)
+ *nfv9_log_req = new_port;
+#endif
+ if ((pair_type == PORT_S_ODD) &&
+ (!(new_port & 0x1))) {
+#ifndef NO_BULK_LOGGING
+ if(BULK_ALLOC_SIZE_NONE != bulk_size) {
+ start_bit++; /* Just use the next one in the bulk range */
+ new_port++;
+ goto found2;
+ }
+#endif /* #ifndef NO_BULK_LOGGING */
+ goto notfound;
+ } else if ((pair_type == PORT_S_EVEN) &&
+ (new_port & 0x1)) {
+ goto notfound;
+ }
+
+ /* OK we got one or two suitable ports */
+ goto found2;
+ }
+
+ notfound:
+#ifndef NO_BULK_LOGGING
+ if(BULK_ALLOC_SIZE_NONE != bulk_size)
+ start_bit += bulk_size;
+ else
+#endif /* #ifndef NO_BULK_LOGGING */
+ start_bit++;
+
+ } /* end of for loop */
+
+ /* Completely out of ports */
+
+ /* Port allocation failure */
+ /* set dyn_full flag. This would be used to verify
+ * for further dyn session before searching for port
+ */
+ if (atype == PORT_ALLOC_DIRECTED) {
+ my_pm->dyn_full = 1;
+ return (CNAT_NOT_FOUND_DIRECT);
+ } else {
+ my_pm->dyn_full = 1;
+ return (CNAT_NOT_FOUND_ANY);
+ }
+
+
+ found2:
+
+ /* Accounting */
+ cgn_clib_bitmap_clear_no_check (my_pm->bm, start_bit);
+ (my_pm->inuse)++;
+
+ *index = my_pm - pm;
+ *o_ipv4_address = my_pm->ipv4_address;
+
+ *o_port = new_port;
+ return (CNAT_SUCCESS);
+}
+
+#ifdef TOBE_PORTED
+/*
+ * cnat_alloc_port_from_pm
+ * Given a portmap structure find port/port_pair that are free
+ *
+ * The assumption in this function is that bit in bm corresponds
+ * to a port number. This is TRUE and hence there is no call
+ * to the function bit2port here, though it is done in other
+ * places in this file.
+ *
+ */
+static u32
+cnat_alloc_port_from_pm (
+ u32 start_port,
+ u32 end_port,
+ cnat_portmap_v2_t *my_pm,
+ port_pair_t pair_type
+#ifndef NO_BULK_LOGGING
+ , bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req
+#endif /* #ifnded NO_BULK_ALLOCATION */
+ )
+{
+ u32 i;
+ u32 start_bit;
+ u32 total_ports = end_port - start_port + 1;
+ uword bit_test_result;
+ uword max_trys_to_find_port;
+
+ rseed_port = randq1(rseed_port);
+
+ start_bit = rseed_port % total_ports;
+ start_bit = start_bit + start_port;
+#ifndef NO_BULK_LOGGING
+ *nfv9_log_req = BULK_ALLOC_NOT_ATTEMPTED;
+ if(BULK_ALLOC_SIZE_NONE != bulk_size)
+ {
+ /* We need the start port of the range to be alined on integer multiple
+ * of bulk_size */
+ max_trys_to_find_port = total_ports/bulk_size;
+ start_bit= ((start_bit + bulk_size -1)/bulk_size) * bulk_size;
+ }
+ else
+#endif /* #ifndef NO_BULK_LOGGING */
+ max_trys_to_find_port = total_ports;
+
+ /* Allocate a random port / port-pair */
+ for (i = 0; i < max_trys_to_find_port; i++) {
+ /* start_bit is only a u16.. so it can rollover and become zero */
+ if (PREDICT_FALSE((start_bit >= end_port) ||
+ (start_bit < start_port))) {
+ start_bit = start_port;
+#ifndef NO_BULK_LOGGING
+ if(BULK_ALLOC_SIZE_NONE != bulk_size) {
+ start_bit= ((start_bit + bulk_size -1)/bulk_size) * bulk_size;
+ }
+#endif /* #ifndef NO_BULK_LOGGING */
+ }
+
+ /* Scan forward from random position */
+#ifndef NO_BULK_LOGGING
+ if(BULK_ALLOC_SIZE_NONE != bulk_size) {
+ bit_test_result = cgn_clib_bitmap_check_if_all(my_pm->bm,
+ start_bit, bulk_size);
+ }
+ else
+#endif /* #ifndef NO_BULK_LOGGING */
+ bit_test_result = clib_bitmap_get_no_check(my_pm->bm, start_bit);
+
+ if (PREDICT_TRUE(bit_test_result)) {
+#ifndef NO_BULK_LOGGING
+ if(BULK_ALLOC_SIZE_NONE != bulk_size) {
+ /* Got the entire bulk range */
+ *nfv9_log_req = bit2port(start_bit);
+ return start_bit;
+ } else {
+#endif /* #ifndef NO_BULK_LOGGING */
+ /*
+ * For PORT_PAIR, first port has to be Even
+ * subsequent port <= end_port
+ * subsequent port should be unallocated
+ */
+ if ((start_bit & 0x1) ||
+ ((start_bit + 1) > end_port) ||
+ (clib_bitmap_get_no_check(my_pm->bm,
+ (start_bit + 1)) == 0)) {
+ goto notfound;
+ }
+ return (start_bit);
+#ifndef NO_BULK_LOGGING
+ }
+#endif /* #ifndef NO_BULK_LOGGING */
+ } /* if( free port found ) */
+
+notfound:
+#ifndef NO_BULK_LOGGING
+ if(BULK_ALLOC_SIZE_NONE != bulk_size) {
+ start_bit += bulk_size;
+ } else
+#endif /* #ifndef NO_BULK_LOGGING */
+ start_bit++;
+
+ }
+ return (BITS_PER_INST);
+}
+
+/*
+ * cnat_dynamic_port_alloc_rtsp
+ * public ipv4 address/port allocator for dynamic ports
+ *
+ * 200K users / 20M translations means vec_len(cnat_portmap) will be
+ * around 300.
+ *
+ */
+
+cnat_errno_t
+cnat_dynamic_port_alloc_rtsp (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ port_pair_t pair_type,
+ u16 start_range,
+ u16 end_range,
+ u32 *index,
+ u32 *o_ipv4_address,
+ u16 *o_port
+#ifndef NO_BULK_LOGGING
+ , bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req
+#endif
+ , u32 *rseed_ip
+ )
+{
+
+ u32 current_timestamp;
+ cnat_errno_t my_err = CNAT_NO_POOL_ANY;
+ cnat_portmap_v2_t *my_pm = 0;
+ u32 alloc_bit;
+
+ ASSERT(index);
+ ASSERT(o_ipv4_address);
+ ASSERT(o_port);
+
+ my_pm = cnat_dynamic_addr_alloc_from_pm(pm, atype, index, &my_err, 0,rseed_ip);
+
+ if (PREDICT_FALSE(my_pm == NULL)) {
+ return (my_err);
+ }
+
+#if DEBUG > 1
+ PLATFORM_DEBUG_PRINT("ALLOC_PORT_V2: My_Instance_Number %d: IP addr 0x%x, Inuse %d\n",
+ my_instance_number, my_pm->ipv4_address, my_pm->inuse);
+#endif
+
+ alloc_bit =
+ cnat_alloc_port_from_pm(start_range, end_range, my_pm, pair_type
+#ifndef NO_BULK_LOGGING
+ , bulk_size, nfv9_log_req
+#endif /* #ifndef NO_BULK_LOGGING */
+ );
+
+ if (alloc_bit < BITS_PER_INST) {
+ if (pair_type == PORT_PAIR) {
+ /* Accounting */
+ cgn_clib_bitmap_clear_no_check (my_pm->bm, alloc_bit);
+ cgn_clib_bitmap_clear_no_check (my_pm->bm, alloc_bit+1);
+ (my_pm->inuse) += 2;
+ } else {
+ /* Accounting */
+ cgn_clib_bitmap_clear_no_check (my_pm->bm, alloc_bit);
+ (my_pm->inuse)++;
+ }
+
+ *index = my_pm - pm;
+ *o_ipv4_address = my_pm->ipv4_address;
+
+ *o_port = bit2port(alloc_bit);;
+
+ return (CNAT_SUCCESS);
+ }
+
+ /* Completely out of ports */
+ current_timestamp = spp_trace_log_get_unix_time_in_seconds();
+ if (PREDICT_FALSE((current_timestamp - my_pm->last_sent_timestamp) >
+ 1000)) {
+ spp_printf(CNAT_NO_EXT_PORT_AVAILABLE, 0, NULL);
+ my_pm->last_sent_timestamp = current_timestamp;
+ }
+
+
+ /* Port allocation failure */
+ if (atype == PORT_ALLOC_DIRECTED) {
+ return (CNAT_NOT_FOUND_DIRECT);
+ } else {
+ return (CNAT_NOT_FOUND_ANY);
+ }
+}
+#else
+cnat_errno_t
+cnat_dynamic_port_alloc_rtsp (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ port_pair_t pair_type,
+ u16 start_range,
+ u16 end_range,
+ u32 *index,
+ u32 *o_ipv4_address,
+ u16 *o_port
+#ifndef NO_BULK_LOGGING
+ , bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req
+#endif
+ , u32 *rseed_ip
+ )
+{
+ return (CNAT_NOT_FOUND_ANY);
+}
+#endif
+
+
+/*
+ * cnat_mapped_static_port_alloc_v2
+ * /
+ */
+cnat_errno_t
+cnat_mapped_static_port_alloc_v2 (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ u32 *index,
+ u32 ipv4_address,
+ u16 port
+#ifndef NO_BULK_LOGGING
+ , int *nfv9_log_req,
+ bulk_alloc_size_t bulk_size
+#endif
+ , u16 ip_n_to_1
+ )
+{
+ int i;
+ u32 pm_len;
+ u16 bm_bit;
+ cnat_portmap_v2_t *my_pm = 0;
+ u32 my_index;
+
+ ASSERT(index);
+
+ /*
+ * Map the port to the bit in the pm bitmap structure.
+ * Note that we use ports from 1024..65535, so
+ * port number x corresponds to (x-1024) position in bitmap
+ */
+ bm_bit = port2bit(port);
+
+ pm_len = vec_len(pm);
+
+ switch(atype) {
+ case PORT_ALLOC_ANY:
+ if (PREDICT_FALSE(pm_len == 0)) {
+ return (CNAT_NO_POOL_ANY);
+ }
+
+ /*
+ * Find the pm that is allocated for this translated IP address
+ */
+ my_index = pm_len;
+
+ for (i = 0; i < pm_len; i++) {
+ my_pm = pm + i;
+ if (PREDICT_FALSE(my_pm->ipv4_address == ipv4_address)) {
+ my_index = i;
+ break;
+ }
+ }
+
+ if ((PREDICT_FALSE(my_index >= pm_len)) ||
+ ((PREDICT_FALSE(ip_n_to_1)) && (PREDICT_TRUE(my_pm->private_ip_users_count >= ip_n_to_1)))) {
+ return (CNAT_NO_POOL_ANY);
+ }
+
+ break;
+
+ case PORT_ALLOC_DIRECTED:
+ if (PREDICT_FALSE(*index > pm_len)) {
+ return (CNAT_INV_PORT_DIRECT);
+ }
+
+ my_index = *index;
+ my_pm = pm + my_index;
+ if (PREDICT_FALSE(my_pm->ipv4_address != ipv4_address)) {
+ if (PREDICT_FALSE(global_debug_flag && CNAT_DEBUG_GLOBAL_ALL)) {
+ PLATFORM_DEBUG_PRINT("Delete all main db entry for that particular in ipv4 address\n");
+ }
+ return (CNAT_INV_PORT_DIRECT);
+ }
+
+ break;
+
+ default:
+ msg_spp_err("bad allocation type in cnat_port_alloc");
+ return (CNAT_ERR_PARSER);
+ }
+
+
+ if (PREDICT_FALSE(my_pm == NULL)) {
+ return (CNAT_NO_POOL_ANY);
+ }
+
+ /*
+ * Check if the port is already allocated to some other mapping
+ */
+ if (PREDICT_FALSE(clib_bitmap_get_no_check (my_pm->bm, bm_bit) == 0)) {
+ return (CNAT_NO_POOL_ANY);
+ }
+
+#if DEBUG > 1
+ PLATFORM_DEBUG_PRINT("ALLOC_PORT_V2: My_Instance_Number %d: IP addr 0x%x, Inuse %d\n",
+ my_instance_number, my_pm->ipv4_address, my_pm->inuse);
+#endif
+
+ /*
+ * Indicate that the port is already allocated
+ */
+ cgn_clib_bitmap_clear_no_check (my_pm->bm, bm_bit);
+ (my_pm->inuse)++;
+
+ *index = my_index;
+
+ return (CNAT_SUCCESS);
+}
+
+void cnat_port_free_v2 (
+ cnat_portmap_v2_t *pm,
+ int index,
+ port_pair_t pair_type,
+ u16 base_port,
+ u16 static_port_range)
+{
+ cnat_portmap_v2_t *my_pm;
+ uword bit;
+
+ /* check for valid portmap */
+ if (PREDICT_FALSE(index > vec_len(pm))) {
+ spp_printf(CNAT_INVALID_INDEX_TO_FREE_PORT, 0, 0);
+ return;
+ }
+
+ my_pm = pm + index;
+ bit = port2bit(base_port);
+
+#if DEBUG > 0
+ if(clib_bitmap_get_no_check(my_pm->bm, bit))
+ ASSERT(clib_bitmap_get_no_check(my_pm->bm, bit) == 0);
+#endif
+
+ cgn_clib_bitmap_set_no_check(my_pm->bm, bit);
+
+ my_pm->inuse -= 1;
+ if(base_port >= static_port_range) {
+ /* Clear the full flag. we can have a new dynamic session now */
+ my_pm->dyn_full = 0;
+ }
+
+ return;
+}
+
+void cnat_portmap_dump_v2 (cnat_portmap_v2_t *pm, u16 print_limit)
+{
+ int i;
+ u32 inuse =0;
+
+ ASSERT(pm);
+
+ for (i = 0; i < BITS_PER_INST; i++) {
+ if (PREDICT_FALSE(clib_bitmap_get_no_check (pm->bm, i) == 0)) {
+ if (PREDICT_TRUE(inuse++ < print_limit))
+ PLATFORM_DEBUG_PRINT(" %d", bit2port(i));
+ }
+ }
+ if (PREDICT_FALSE(inuse >= print_limit)) {
+ PLATFORM_DEBUG_PRINT("%d printed, print limit is %d\n",
+ inuse, print_limit);
+ }
+ PLATFORM_DEBUG_PRINT("\n");
+}
+
+
+/*
+ * cnat_ports_init
+ */
+clib_error_t *cnat_ports_init(vlib_main_t *vm)
+{
+ cnat_ports_main_t *mp = &cnat_ports_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main();
+
+ /* suppress crypto-random port numbering */
+#ifdef SOON
+ if (spp_get_int_prop("no_crypto_random_ports") == 0)
+ crypto_random32(&seed);
+#endif
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION(cnat_ports_init);
+
diff --git a/plugins/vcgn-plugin/vcgn/cnat_ports.h b/plugins/vcgn-plugin/vcgn/cnat_ports.h
new file mode 100644
index 00000000000..bc1fb0d24a8
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_ports.h
@@ -0,0 +1,208 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_ports.h - port database definitions
+ *
+ * Copyright (c) 2007-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_PORTS_H__
+#define __CNAT_PORTS_H__
+
+#include "platform_common.h"
+#include "cnat_bulk_port_defs.h"
+
+#define PORTS_PER_ADDR 65536
+
+#define CNAT_INSTS PLATFORM_CNAT_INSTS
+
+#define BITS_PER_INST (PORTS_PER_ADDR)
+
+/*
+ * Ensure that atleast few 4 bit ports are available for RTSP
+ * in case we want to map 4 digit inside ports to 4 digit outside ports
+ */
+#define MIN_STATIC_PORT_RANGE_FOR_RTSP (9900)
+
+extern u8 my_instance_number;
+
+/*
+ * Now it is a 1-to-1 mapping between bit and port values
+ */
+static inline u16 bit2port (u32 bit)
+{
+ return bit;
+}
+
+static inline uword port2bit (u16 port)
+{
+ return port;
+}
+
+/*
+ * Port bitmap structure
+ * THIS structure is not used to be REMOVED....
+ */
+
+
+typedef struct {
+ u32 ipv4_address; /* native bit order */
+ u16 vrf;
+ u16 pad;
+ u32 threshold_crossed;
+ uword bm[(BITS_PER_INST + BITS(uword)-1)/BITS(uword)];
+} cnat_portmap_t;
+
+//cnat_portmap_t *cnat_portmap;
+
+
+typedef struct {
+ u32 inuse;
+ u32 delete_time;
+ u32 ipv4_address; /* native bit order */
+ u32 last_sent_timestamp;
+ uword bm[(BITS_PER_INST + BITS(uword)-1)/BITS(uword)];
+ u32 dyn_full;
+ u32 private_ip_users_count; /* number of private ip's(subscribers) to this
+ public ip */
+} cnat_portmap_v2_t;
+
+
+typedef enum {
+ PORT_SINGLE=0,
+ PORT_PAIR=1,
+ PORT_S_EVEN=2,
+ PORT_S_ODD=3,
+} port_pair_t;
+
+typedef enum {
+ PORT_TYPE_DYNAMIC=0,
+ PORT_TYPE_STATIC=1,
+ PORT_TYPE_RTSP=2,
+} port_type_t;
+
+
+typedef enum {
+ PORT_ALLOC_ANY=1,
+ PORT_ALLOC_DIRECTED=2,
+} port_alloc_t;
+
+#define PORT_PROBE_LIMIT 20
+
+
+/*
+ * randq1
+ * Linear congruential random number generator with
+ * extensively studied properties. See Numerical Recipes in C
+ * 2nd Ed. page 284. Known to behave according to the test vector
+ * supplied in the text, on X86 and Octeon.
+ */
+static inline u32 randq1 (u32 prev)
+{
+ return (1664525L*prev + 1013904223L);
+}
+
+cnat_errno_t
+cnat_static_port_alloc_v2(
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ port_pair_t pair_type,
+ u32 i_ipv4_address,
+ u16 i_port,
+ u32 *index,
+ u32 *o_ipv4_address,
+ u16 *o_port,
+ u16 static_port_range
+#ifndef NO_BULK_LOGGING
+ , bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req
+#endif /* NO_BULK_LOGGING */
+ , u16 ip_n_to_1
+ );
+
+cnat_errno_t
+cnat_mapped_static_port_alloc_v2 (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ u32 *index,
+ u32 ipv4_address,
+ u16 port
+#ifndef NO_BULK_LOGGING
+ , int *nfv9_log_req,
+ bulk_alloc_size_t bulk_size
+#endif
+ , u16 ip_n_to_1
+ );
+
+cnat_errno_t
+cnat_dynamic_port_alloc_v2(
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ port_pair_t pair_type,
+ u32 *index,
+ u32 *o_ipv4_address,
+ u16 *o_port,
+ u16 static_port_range
+#ifndef NO_BULK_LOGGING
+ , bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req
+#endif
+ , u16 ip_n_to_1,
+ u32 *rseed_ip
+ );
+
+
+cnat_errno_t
+cnat_dynamic_port_alloc_rtsp (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ port_pair_t pair_type,
+ u16 start_range,
+ u16 end_range,
+ u32 *index,
+ u32 *o_ipv4_address,
+ u16 *o_port
+#ifndef NO_BULK_LOGGING
+ , bulk_alloc_size_t bulk_size,
+ int *nfv9_log_req
+#endif
+ , u32 *rseed_ip
+ );
+
+void cnat_port_free_v2(
+ cnat_portmap_v2_t *pm,
+ int index,
+ port_pair_t ptype,
+ u16 base_port,
+ u16 static_port_range);
+
+void cnat_portmap_dump_v2(cnat_portmap_v2_t *pm,
+ u16 print_limit);
+
+
+
+cnat_errno_t
+nat64_static_port_alloc (
+ cnat_portmap_v2_t *pm,
+ port_alloc_t atype,
+ port_pair_t pair_type,
+ u32 *i_ipv6_address,
+ u16 i_port,
+ u32 *index,
+ u32 *o_ipv4_address,
+ u16 *o_port);
+
+
+
+#endif /* __CNAT_PORTS_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/cnat_show.c b/plugins/vcgn-plugin/vcgn/cnat_show.c
new file mode 100644
index 00000000000..68c52756d2a
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_show.c
@@ -0,0 +1,810 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_show.c - translation database definitions
+ *
+ * Copyright (c) 2007-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/clib.h>
+
+#include "cnat_db.h"
+#include "cnat_config.h"
+#include "cnat_global.h"
+#include "cnat_logging.h"
+#include "spp_ctx.h"
+#include "spp_timers.h"
+#include "platform_common.h"
+#include "cnat_syslog.h"
+#include "cnat_v4_pptp_alg.h"
+#include "platform_common.h"
+
+#ifndef TOBE_PORTED
+/* The following variable is in cnat_config_msg_handler.c which
+ * is to be ported later.. if required
+ */
+u32 total_address_pool_allocated = 0;
+#endif
+
+#ifndef NO_BULK_LOGGING
+#define CNAT_MY_VRFMAP_PRINT \
+PLATFORM_DEBUG_PRINT("i-uidx 0x%x o-uidx 0x%x i-vrfid 0x%x o-vrfid 0x%x\n" \
+ "status %d del time 0x%x tcp mss 0x%x pm list 0x%x\n" \
+ "bulk size %d\n" \
+ "ip n:1 %d\n" \
+ "NFV9 template index 0x%x\n" \
+ "SYSLOG template index 0x%x\n" \
+ "Netflow Session Logging %d \n" \
+ "Syslog Session Logging %d \n" \
+ "PCP Server 0x%x, Port %u \n", \
+ my_vrfmap->i_vrf, my_vrfmap->o_vrf, my_vrfmap->i_vrf_id, \
+ my_vrfmap->o_vrf_id, my_vrfmap->status, my_vrfmap->delete_time, \
+ my_vrfmap->tcp_mss, my_vrfmap->portmap_list, \
+ BULKSIZE_FROM_VRFMAP(my_vrfmap), \
+ my_vrfmap->ip_n_to_1, \
+ my_vrfmap->nfv9_logging_index, \
+ my_vrfmap->syslog_logging_index,\
+ my_vrfmap->nf_logging_policy, \
+ my_vrfmap->syslog_logging_policy, \
+ my_vrfmap->pcp_server_addr, \
+ my_vrfmap->pcp_server_port);
+#else
+#define CNAT_MY_VRFMAP_PRINT \
+PLATFORM_DEBUG_PRINT("i-uidx 0x%x o-uidx 0x%x i-vrfid 0x%x o-vrfid 0x%x\n" \
+ "status %d del time 0x%x tcp mss 0x%x pm list 0x%x\n" \
+ "NFV9 template index 0x%x\n ip n:1 %d\n", \
+ my_vrfmap->i_vrf, my_vrfmap->o_vrf, my_vrfmap->i_vrf_id, \
+ my_vrfmap->o_vrf_id, my_vrfmap->status, my_vrfmap->delete_time, \
+ my_vrfmap->tcp_mss, my_vrfmap->portmap_list, \
+ my_vrfmap->nfv9_logging_index, my_vrfmap->ip_n_to_1);
+#endif /* NO_BULK_LOGGING */
+
+#define CNAT_MY_LOGGING_INFO_PRINT \
+do { \
+ cnat_syslog_logging_info_t *my_syslog_info = 0; \
+ PLATFORM_DEBUG_PRINT("SYSLOG config: \n"); \
+ pool_foreach (my_syslog_info, cnat_syslog_logging_info_pool, ({ \
+ if (my_syslog_info->i_vrf == my_vrfmap->i_vrf) { \
+ PLATFORM_DEBUG_PRINT(" \
+ ipv4[0x%x], port[%u], hostname[%s]\n", \
+ my_syslog_info->ipv4_address, my_syslog_info->port, \
+ my_syslog_info->header_hostname); \
+ break; \
+ } \
+ })); \
+}while (0) \
+;
+
+
+void printf_ipv4(u32 ad)
+{
+ u8 a __attribute__((unused)), b __attribute__((unused)),
+ c __attribute__((unused)), d __attribute__((unused));
+
+ a = ad>>24;
+ b = (ad>>16) & 0xFF;
+ c = (ad>>8) & 0xFF;
+ d = (ad>>0) & 0xFF;
+
+ PLATFORM_DEBUG_PRINT("%d.%d.%d.%d", a, b, c, d);
+}
+void cnat_main_db_entry_dump (cnat_main_db_entry_t *db)
+{
+ PLATFORM_DEBUG_PRINT("Main DB entry at %p, index %ld dst_ip %x\n",
+ db, db - cnat_main_db, db->dst_ipv4);
+ /* only dump hash next index if it's non EMPTY */
+ if (db->out2in_hash.next != EMPTY || db->in2out_hash.next != EMPTY)
+ PLATFORM_DEBUG_PRINT("out2in hash %u, in2out hash %u\n",
+ db->out2in_hash.next,
+ db->in2out_hash.next);
+ PLATFORM_DEBUG_PRINT("out2in key ipv4 0x%08X, port 0x%04X (%5d), vrf %d, protocol %s\n",
+ db->out2in_key.k.ipv4,
+ db->out2in_key.k.port,
+ db->out2in_key.k.port,
+ db->out2in_key.k.vrf & CNAT_VRF_MASK,
+ (db->out2in_key.k.vrf & CNAT_PRO_MASK) == CNAT_UDP ? "UDP" :
+ ((db->in2out_key.k.vrf & CNAT_PRO_MASK) == CNAT_TCP ? "TCP" :
+ ((db->in2out_key.k.vrf & CNAT_PRO_MASK) == CNAT_ICMP ? "ICMP" : "PPTP ALG")));
+
+ PLATFORM_DEBUG_PRINT("in2out key ipv4 0x%08X, port 0x%04X (%5d), vrf %d, protocol %s\n",
+ db->in2out_key.k.ipv4,
+ db->in2out_key.k.port,
+ db->in2out_key.k.port,
+ db->in2out_key.k.vrf & CNAT_VRF_MASK,
+ (db->in2out_key.k.vrf & CNAT_PRO_MASK) == CNAT_UDP ? "UDP" :
+ ((db->in2out_key.k.vrf & CNAT_PRO_MASK) == CNAT_TCP ? "TCP" :
+ ((db->in2out_key.k.vrf & CNAT_PRO_MASK) == CNAT_ICMP ? "ICMP" : "UNKNOWN")));
+
+ PLATFORM_DEBUG_PRINT("user %d, user ports (nxt) %d (prev) %d, vrfmap_index 0x%x\n",
+ db->user_index, db->user_ports.next, db->user_ports.prev,
+ db->vrfmap_index);
+ PLATFORM_DEBUG_PRINT("timeout %d \n", db->timeout);
+ PLATFORM_DEBUG_PRINT("flags 0x%x ", db->flags);
+
+ if (db->flags & CNAT_DB_FLAG_TCP_ACTIVE) {
+ PLATFORM_DEBUG_PRINT(" TCP_ACTIVE ");
+ } else if (db->flags & CNAT_DB_FLAG_UDP_ACTIVE) {
+ PLATFORM_DEBUG_PRINT(" UDP_ACTIVE ");
+ } else if (db->flags & CNAT_DB_FLAG_STATIC_PORT) {
+ PLATFORM_DEBUG_PRINT(" STATIC_PORT ");
+ }
+
+ PLATFORM_DEBUG_PRINT(" ALG dlt0 0x%02X dlt1 0x%02X\n", db->alg.alg_dlt[0], db->alg.alg_dlt[1]);
+ PLATFORM_DEBUG_PRINT("\n");
+
+ PLATFORM_DEBUG_PRINT("out2in_pkts: %u\n", db->out2in_pkts);
+ PLATFORM_DEBUG_PRINT("in2out_pkts: %u\n", db->in2out_pkts);
+ PLATFORM_DEBUG_PRINT("entry_expires: %u current time: %u\n", db->entry_expires, cnat_current_time);
+ PLATFORM_DEBUG_PRINT("-------------------------\n");
+}
+
+void cnat_user_db_entry_dump (cnat_user_db_entry_t *up)
+{
+ u32 db_entry_index, first_db_entry_index;
+ cnat_main_db_entry_t *ep;
+
+ PLATFORM_DEBUG_PRINT("User DB entry at %p, index %ld\n",
+ up, up - cnat_user_db);
+ PLATFORM_DEBUG_PRINT("translation list head index %u, %u translations portmapindex 0x%x\n",
+ up->translation_list_head_index,
+ up->ntranslations, up->portmap_index);
+ PLATFORM_DEBUG_PRINT("source ipv4 0x%x, source port 0x%x, vrf %d\n",
+ up->key.k.ipv4,
+ up->key.k.port,
+ up->key.k.vrf);
+ first_db_entry_index = db_entry_index = up->translation_list_head_index;
+ if (first_db_entry_index != EMPTY) {
+ PLATFORM_DEBUG_PRINT("Port translation list:\n");
+ do {
+ PLATFORM_DEBUG_PRINT(" [%d]\n", db_entry_index);
+ ep = cnat_main_db + db_entry_index;
+ db_entry_index = ep->user_ports.next;
+ } while (first_db_entry_index != db_entry_index);
+ } else {
+ PLATFORM_DEBUG_PRINT("WARNING: empty translation list!\n");
+ }
+ PLATFORM_DEBUG_PRINT("-------------------------\n");
+}
+
+void cnat_user_db_entry_dump_summary (cnat_user_db_entry_t *up)
+{
+ u32 db_entry_index, first_db_entry_index;
+ u32 total_entries = 0;
+
+ PLATFORM_DEBUG_PRINT("User DB entry at %p, index %ld\n",
+ up, up - cnat_user_db);
+ PLATFORM_DEBUG_PRINT("translation list head index %u, %u translations portmapindex 0x%x\n",
+ up->translation_list_head_index,
+ up->ntranslations, up->portmap_index);
+ PLATFORM_DEBUG_PRINT("source ipv4 0x%x, source port 0x%x, vrf %d\n",
+ up->key.k.ipv4,
+ up->key.k.port,
+ up->key.k.vrf);
+ first_db_entry_index = db_entry_index = up->translation_list_head_index;
+ if (first_db_entry_index != EMPTY) {
+ PLATFORM_DEBUG_PRINT("Port translation list:\n");
+ do {
+ total_entries++;
+ } while (first_db_entry_index != db_entry_index);
+ PLATFORM_DEBUG_PRINT("TOTAL_ENTRIES: %d\n", total_entries);
+ } else {
+ PLATFORM_DEBUG_PRINT("WARNING: empty translation list!\n");
+ }
+ PLATFORM_DEBUG_PRINT("-------------------------\n");
+}
+
+/* for internal development and UT only */
+void cnat_db_dump_main_by_index (int argc, unsigned long *argv)
+{
+ u32 index, i, len;
+ u32 active_count, scan_count;
+
+ if (argc != 1) {
+ PLATFORM_DEBUG_PRINT("invalid input %d\n", argc);
+ return;
+ }
+
+ index = argv[0];
+
+ len = vec_len(cnat_main_db);
+
+ active_count = pool_elts(cnat_main_db);
+
+ if (index >= active_count) {
+ PLATFORM_DEBUG_PRINT("Index %u >= total active entries %u\n", index, active_count);
+ return;
+ }
+
+ scan_count = 0;
+ for (i=0; i< len; i++) {
+ if(pool_is_free_index(cnat_main_db, i)) continue;
+
+ if (index == scan_count) {
+ cnat_main_db_entry_dump(cnat_main_db + i);
+ break;
+ }
+ scan_count++;
+ }
+}
+
+void cnat_db_dump_main (int argc, unsigned long *argv)
+{
+ cnat_main_db_entry_t *db;
+
+ pool_foreach(db, cnat_main_db, ({
+ cnat_main_db_entry_dump(db);
+ }));
+}
+
+void cnat_db_dump_main_summary (int argc, unsigned long *argv)
+{
+ cnat_main_db_entry_t *db;
+ u32 num_entries = 0;
+
+ pool_foreach(db, cnat_main_db, ({
+ num_entries++;
+ }));
+
+ PLATFORM_DEBUG_PRINT("\nNum main entries %d\n", num_entries);
+}
+
+void cnat_db_dump_user (int argc, unsigned long *argv)
+{
+ cnat_user_db_entry_t *up;
+
+ pool_foreach(up, cnat_user_db, ({
+ cnat_user_db_entry_dump(up);
+ }));
+}
+
+void cnat_db_dump_user_summary (int argc, unsigned long *argv)
+{
+ cnat_user_db_entry_t *up;
+
+ pool_foreach(up, cnat_user_db, ({
+ cnat_user_db_entry_dump_summary(up);
+ }));
+}
+
+void cnat_db_dump_hashes (int argc, unsigned long *argv)
+{
+ int i;
+
+ PLATFORM_DEBUG_PRINT("Main DB out2in hash:\n");
+ for (i = 0; i < vec_len(cnat_out2in_hash); i++) {
+ if (cnat_out2in_hash[i].next != EMPTY) {
+ PLATFORM_DEBUG_PRINT("[%d]: %u\n", i, cnat_out2in_hash[i].next);
+ }
+ }
+ PLATFORM_DEBUG_PRINT("Main DB in2out hash:\n");
+ for (i = 0; i < vec_len(cnat_in2out_hash); i++) {
+ if (cnat_in2out_hash[i].next != EMPTY) {
+ PLATFORM_DEBUG_PRINT("[%d]: %u\n", i, cnat_in2out_hash[i].next);
+ }
+ }
+
+ PLATFORM_DEBUG_PRINT("User hash:\n");
+ for (i = 0; i < vec_len(cnat_user_hash); i++) {
+ if (cnat_user_hash[i].next != EMPTY) {
+ PLATFORM_DEBUG_PRINT("[%d]: %u\n", i, cnat_user_hash[i].next);
+ }
+ }
+ PLATFORM_DEBUG_PRINT("-------------------------\n");
+}
+
+
+#ifdef OLD_VRFMAP
+
+void cnat_db_dump_cdb (int argc, unsigned long *argv)
+{
+ int k;
+ int verbose=0;
+ int all = 0;
+
+ if (argc > 0) {
+ verbose = 1;
+ }
+
+ if (argc > 1) {
+ all = 1;
+ }
+
+ PLATFORM_DEBUG_PRINT ("%d vrfmap vectors \n", vec_len(cnat_portmap_by_vrf));
+
+ for (k = 0; k < vec_len(cnat_portmap_by_vrf); k++) {
+ PLATFORM_DEBUG_PRINT("index%d: status %d i_vrf 0x%x o_vrf 0x%x\n", k,
+ cnat_portmap_by_vrf[k].status,
+ cnat_portmap_by_vrf[k].i_vrf,
+ cnat_portmap_by_vrf[k].o_vrf);
+ cnat_db_dump_address_portmap(verbose, all,
+ cnat_portmaps[k],
+ cnat_portmaps_inuse[k]);
+ }
+}
+
+void cnat_db_dump_i_vrf (int argc, unsigned long *argv)
+{
+ u32 k;
+ u32 vrf =0;
+ int verbose=0;
+ int all = 0;
+
+ if (!argc) {
+ PLATFORM_DEBUG_PRINT("need vrf input ,return\n");
+ return;
+ }
+
+ if (argc > 0) {
+ vrf = argv[0];
+ }
+
+ if (argc > 1) {
+ verbose = 1;
+ }
+
+ if (argc > 2) {
+ all = 1;
+ }
+
+ PLATFORM_DEBUG_PRINT ("%d vrfmap vectors \n", vec_len(cnat_portmap_by_vrf));
+
+ for (k = 0; k < vec_len(cnat_portmap_by_vrf); k++) {
+ if (cnat_portmap_by_vrf[k].i_vrf == vrf) {
+ PLATFORM_DEBUG_PRINT("%d: i_vrf 0x%x o_vrf 0x%x\n", k,
+ cnat_portmap_by_vrf[k].i_vrf,
+ cnat_portmap_by_vrf[k].o_vrf);
+ cnat_db_dump_address_portmap(verbose, all,
+ cnat_portmaps[k],
+ cnat_portmaps_inuse[k]);
+ return;
+ }
+ }
+ PLATFORM_DEBUG_PRINT("not found\n");
+}
+
+void cnat_db_dump_o_vrf (int argc, unsigned long *argv)
+{
+ u32 k;
+ int verbose=0;
+ int all = 0;
+ u32 vrf =0;
+
+ if (!argc) {
+ PLATFORM_DEBUG_PRINT("need vrf input ,return\n");
+ return;
+ }
+
+ if (argc > 0) {
+ vrf = argv[0];
+ }
+
+ if (argc > 1) {
+ verbose = 1;
+ }
+
+ if (argc > 2) {
+ all = 1;
+ }
+
+ PLATFORM_DEBUG_PRINT ("%d vrfmap vectors \n", vec_len(cnat_portmap_by_vrf));
+
+ for (k = 0; k < vec_len(cnat_portmap_by_vrf); k++) {
+ if (cnat_portmap_by_vrf[k].o_vrf == vrf) {
+ PLATFORM_DEBUG_PRINT("index%d: status %d i_vrf 0x%x o_vrf 0x%x\n", k,
+ cnat_portmap_by_vrf[k].status,
+ cnat_portmap_by_vrf[k].i_vrf,
+ cnat_portmap_by_vrf[k].o_vrf);
+ cnat_db_dump_address_portmap(verbose, all,
+ cnat_portmaps[k],
+ cnat_portmaps_inuse[k]);
+ return;
+ }
+ }
+ PLATFORM_DEBUG_PRINT("not found\n");
+}
+#endif
+
+#ifdef TOBE_PORTED
+/* This does not seem to be used */
+void cnat_db_mem_usage_cmd (int argc, unsigned long *argv)
+{
+ pool_header_t * p;
+ _VEC *_v;
+ u32 bitmap_bytes=0, free_indices_bytes=0, vec_bytes=0, total_bytes=0;
+
+ if (cnat_main_db) {
+ p = pool_header(cnat_main_db);
+ if (p->free_bitmap) {
+ _v = _vec_find(p->free_bitmap);
+ bitmap_bytes = _v->alen;
+ } else {
+ bitmap_bytes = 0;
+ }
+ if (p->free_indices) {
+ _v = _vec_find(p->free_indices);
+ free_indices_bytes = _v->alen;
+ } else {
+ free_indices_bytes = 0;
+ }
+ _v = _vec_find(cnat_main_db);
+ vec_bytes = _v->alen;
+ } else {
+ vec_bytes = 0;
+ }
+
+ total_bytes = bitmap_bytes + free_indices_bytes + vec_bytes;
+
+ PLATFORM_DEBUG_PRINT ("Main DB: %d total bytes, %d bitmap, %d indices, %d vec\n",
+ total_bytes, bitmap_bytes, free_indices_bytes, vec_bytes);
+ PLATFORM_DEBUG_PRINT (" vector length %d\n", vec_len(cnat_main_db));
+
+ if (cnat_user_db) {
+ p = pool_header(cnat_user_db);
+ if (p->free_bitmap) {
+ _v = _vec_find(p->free_bitmap);
+ bitmap_bytes = _v->alen;
+ } else {
+ bitmap_bytes = 0;
+ }
+ if (p->free_indices) {
+ _v = _vec_find(p->free_indices);
+ free_indices_bytes = _v->alen;
+ } else {
+ free_indices_bytes = 0;
+ }
+ _v = _vec_find(cnat_user_db);
+ vec_bytes = _v->alen;
+ } else {
+ vec_bytes = 0;
+ }
+
+ total_bytes = bitmap_bytes + free_indices_bytes + vec_bytes;
+
+ PLATFORM_DEBUG_PRINT ("User DB: %d total bytes, %d bitmap, %d indices, %d vec\n",
+ total_bytes, bitmap_bytes, free_indices_bytes, vec_bytes);
+ PLATFORM_DEBUG_PRINT (" vector length %d\n", vec_len(cnat_user_db));
+
+ _v = _vec_find(cnat_out2in_hash);
+ PLATFORM_DEBUG_PRINT("out2in hash: %d total bytes\n", _v->alen);
+
+ _v = _vec_find(cnat_in2out_hash);
+ PLATFORM_DEBUG_PRINT("in2out hash: %d total bytes\n", _v->alen);
+}
+#endif
+
+static void print_server_ip_address (vlib_main_t *vm, u32 ip)
+{
+ unsigned char bytes[4];
+ bytes[0] = ip & 0xFF;
+ bytes[1] = (ip >> 8) & 0xFF;
+ bytes[2] = (ip >> 16) & 0xFF;
+ bytes[3] = (ip >> 24) & 0xFF;
+ vlib_cli_output(vm, "\tIP Address : %d.%d.%d.%d\n", bytes[0], bytes[1], bytes[2], bytes[3]);
+}
+
+void cnat_nfv9_show_collector (vlib_main_t *vm, cnat_nfv9_logging_info_t *my_nfv9_logging_info)
+{
+ nfv9_server_info_t *server = nfv9_server_info_pool +
+ my_nfv9_logging_info->server_index;
+#if 0
+ vlib_cli_output(vm,"\tVRF - 0x%x - %s\n", my_nfv9_logging_info->i_vrf,
+ my_nfv9_logging_info->deleted?"DELETED":"ACTIVE");
+#endif
+ print_server_ip_address(vm, clib_net_to_host_u32(server->ipv4_address));
+ vlib_cli_output(vm,"\tPort : %d\n", server->port);
+ vlib_cli_output(vm,"\tTimeout : %d\n", server->timeout_rate);
+ vlib_cli_output(vm,"\tRefresh Rate : %d\n", server->refresh_rate);
+ vlib_cli_output(vm,"\tMax Pkt Size : %d\n", my_nfv9_logging_info->max_length_minus_max_record_size);
+
+ return;
+}
+
+void cnat_db_dump_policy (int argc, unsigned long *argv)
+{
+
+ PLATFORM_CNAT_DB_DUMP_POLICY_PRINT();
+
+ if (cnat_nfv9_global_info.cnat_nfv9_init_done) {
+ if (cnat_nfv9_global_info.cnat_nfv9_global_collector_index != EMPTY) {
+ cnat_nfv9_logging_info_t *my_nfv9_logging_info;
+ nfv9_server_info_t *server __attribute__((unused));
+
+ my_nfv9_logging_info = cnat_nfv9_logging_info_pool +
+ cnat_nfv9_global_info.cnat_nfv9_global_collector_index;
+ server = nfv9_server_info_pool +
+ my_nfv9_logging_info->server_index;
+
+ PLATFORM_DEBUG_PRINT("NFv9 logging ip 0x%x port 0x%x refresh-rate %d timeout %d\n",
+ server->ipv4_address,
+ server->port,
+ server->refresh_rate,
+ server->timeout_rate);
+ PLATFORM_DEBUG_PRINT("NFv9 path_mtu = %d\n",
+ my_nfv9_logging_info->max_length_minus_max_record_size);
+ } else {
+ PLATFORM_DEBUG_PRINT("NFv9 global logging is not configured\n");
+ }
+ } else {
+ PLATFORM_DEBUG_PRINT("NFv9 LOGGING is not configured\n");
+ }
+
+}
+
+#ifdef OLD_VRFMAP
+void cnat_show_cdb (int verbose)
+{
+ int k, l, i;
+ for (i = 0; i < vec_len(cnat_portmap_by_vrf); i++) {
+ PLATFORM_DEBUG_PRINT("i_vrf %d : o_vrf %d\n",
+ cnat_portmap_by_vrf[i].i_vrf,
+ cnat_portmap_by_vrf[i].o_vrf);
+ }
+
+ PLATFORM_DEBUG_PRINT("port limit %d\n", cnat_main_db_max_ports_per_user);
+
+ PLATFORM_DEBUG_PRINT ("%d portmap vectors\n", vec_len(cnat_portmaps));
+
+ for (k = 0; k < vec_len(cnat_portmaps); k++) {
+ cnat_portmap_t *pm;
+ u16 *inuse;
+ pm = cnat_portmaps[k];
+ inuse = cnat_portmaps_inuse[k];
+ for (l = 0; l < vec_len(pm); l++) {
+ if (inuse[l] || verbose ) {
+ u32 net_address;
+ net_address =
+ spp_host_to_net_byte_order_32((pm+l)->ipv4_address);
+ printf_ipv4(net_address);
+ PLATFORM_DEBUG_PRINT (": %d inuse\n", inuse[l]);
+ if (verbose && inuse[l]) {
+ cnat_portmap_dump (pm+l, inuse+l);
+ }
+ }
+ }
+ }
+}
+#endif
+
+
+
+/* v2 show command */
+void cnat_show_address_portmap_sumary (cnat_portmap_v2_t *pm)
+{
+ cnat_portmap_v2_t *my_pm =0;
+ u32 first_address = 0;
+ u32 second_address = 0;
+ u32 last_address = 0;
+ u32 i, pm_len;
+
+ if ((pm_len = vec_len(pm))) {
+ PLATFORM_DEBUG_PRINT("%d portmap in this list 0x%lx\n",
+ pm_len, (u32)pm);
+ for (i = 0; i < pm_len; i++) {
+ my_pm = pm + i;
+ if (!first_address) {
+ first_address = my_pm->ipv4_address;
+ } else if (!second_address) {
+ second_address = my_pm->ipv4_address;
+ }
+ last_address = my_pm->ipv4_address;
+ }
+
+ if (first_address) {
+ PLATFORM_DEBUG_PRINT("1. 0x%08x", first_address);
+ }
+ if (second_address) {
+ PLATFORM_DEBUG_PRINT(", 2. 0x%08x", second_address);
+ }
+
+ if ((last_address != first_address) &&
+ (last_address != second_address)) {
+ PLATFORM_DEBUG_PRINT(", ....., %d. 0x%08x", pm_len, last_address);
+ }
+ PLATFORM_DEBUG_PRINT("\n");
+ } else {
+ PLATFORM_DEBUG_PRINT("ZERO POOL ADDRESSES in this list 0x%x \n", (u32)pm);
+ }
+}
+
+
+void cnat_show_address_portmap (int verbose, int all,
+ cnat_portmap_v2_t *pm, u16 port_limit)
+{
+ cnat_portmap_v2_t *my_pm =0;
+ u32 i, pm_len;
+
+ pm_len = vec_len(pm);
+ if (!all) {
+ cnat_show_address_portmap_sumary(pm);
+ } else {
+ PLATFORM_DEBUG_PRINT("%d portmap in this list 0x%x \n", pm_len, (u32)pm);
+ }
+
+ for (i = 0; i < pm_len; i++) {
+
+ my_pm = pm + i;
+ if (all) {
+ PLATFORM_DEBUG_PRINT("pm:0x%x ip address:0x%x del_time 0x%x inuse:%d\n",
+ (u32)my_pm, my_pm->ipv4_address, my_pm->delete_time, my_pm->inuse);
+ } else if (my_pm->inuse) {
+ PLATFORM_DEBUG_PRINT("pm:0x%x ip address:0x%x inuse:%d\n",
+ (u32)my_pm, my_pm->ipv4_address, my_pm->inuse);
+ }
+
+ if (verbose && (my_pm->inuse)) {
+ if(PREDICT_FALSE(!port_limit)) {
+ cnat_portmap_dump_v2 (my_pm, cnat_main_db_max_ports_per_user);
+ }
+ else {
+ cnat_portmap_dump_v2 (my_pm, port_limit);
+ }
+ }
+ }
+
+ PLATFORM_DEBUG_PRINT("\n");
+}
+
+
+void cnat_show_cdb_v2 (int verbose, int all)
+{
+ cnat_vrfmap_t *my_vrfmap = 0;
+ cnat_portmap_v2_t *pm =0;
+ PLATFORM_DEBUG_PRINT("port limit %d\n", cnat_main_db_max_ports_per_user);
+ PLATFORM_DEBUG_PRINT("total address pool allocated %d\n", total_address_pool_allocated);
+ PLATFORM_DEBUG_PRINT("icmp rate limit %d (per core %d)\n",
+ cnat_main_db_icmp_rate_limit, cnat_main_db_icmp_rate_limit_core);
+ PLATFORM_DEBUG_PRINT("dynamic port range start %d\n", cnat_static_port_range);
+ if (pptp_cfg.enable == PPTP_DISABLED) {
+ PLATFORM_DEBUG_PRINT("PPTP alg disabled \n");
+ } else {
+ PLATFORM_DEBUG_PRINT("PPTP alg enabled \n");
+ }
+
+ if (ftp_alg_enabled) {
+ PLATFORM_DEBUG_PRINT("FTP alg enabled\n");
+ } else {
+ PLATFORM_DEBUG_PRINT("FTP alg disabled\n");
+ }
+
+ pool_foreach (my_vrfmap, cnat_map_by_vrf, ({
+ CNAT_MY_VRFMAP_PRINT
+ CNAT_MY_LOGGING_INFO_PRINT
+ PLATFORM_DEBUG_PRINT("per vrf port limit %d\n", my_vrfmap->port_limit);
+ pm = my_vrfmap->portmap_list;
+ cnat_show_address_portmap(verbose, all, pm, my_vrfmap->port_limit);
+
+ }));
+}
+
+
+void cnat_show_cdb_command_v2(int argc, unsigned long *argv)
+{
+ int verbose=0;
+ int all = 0;
+
+ if (argc > 0) {
+ verbose = 1;
+ }
+
+ if (argc > 1) {
+ all = 1;
+ }
+
+ cnat_show_cdb_v2(verbose, all);
+}
+
+void cnat_show_ivrf_command_v2 (int argc, unsigned long *argv)
+{
+ u32 vrf =0;
+ int verbose=0;
+ int all = 0;
+ cnat_vrfmap_t *my_vrfmap = 0;
+ cnat_portmap_v2_t *pm =0;
+
+ if (!argc) {
+ PLATFORM_DEBUG_PRINT("need vrf input ,return\n");
+ return;
+ }
+ if (argc > 0) {
+ vrf = argv[0];
+ }
+ if (argc > 1) {
+ verbose = 1;
+ }
+ if (argc > 2) {
+ all = 1;
+ }
+ PLATFORM_DEBUG_PRINT ("%lld vrfmap vectors \n", pool_elts(cnat_map_by_vrf));
+ pool_foreach (my_vrfmap, cnat_map_by_vrf, ({
+ if (my_vrfmap->i_vrf == vrf) {
+ CNAT_MY_VRFMAP_PRINT
+ pm = my_vrfmap->portmap_list;
+ cnat_show_address_portmap(verbose, all, pm,my_vrfmap->port_limit);
+ return;
+ }
+ }));
+ PLATFORM_DEBUG_PRINT("not found\n");
+}
+
+void cnat_show_ovrf_command_v2 (int argc, unsigned long *argv)
+{
+ u32 not_found =1;
+ u32 vrf =0;
+ int verbose=0;
+ int all = 0;
+ cnat_vrfmap_t *my_vrfmap = 0;
+ cnat_portmap_v2_t *pm =0;
+
+ if (!argc) {
+ PLATFORM_DEBUG_PRINT("need vrf input ,return\n");
+ return;
+ }
+ if (argc > 0) {
+ vrf = argv[0];
+ }
+ if (argc > 1) {
+ verbose = 1;
+ }
+ if (argc > 2) {
+ all = 1;
+ }
+ PLATFORM_DEBUG_PRINT("%d vrfmap vectors \n", pool_elts(cnat_map_by_vrf));
+ pool_foreach (my_vrfmap, cnat_map_by_vrf, ({
+ if (my_vrfmap->o_vrf == vrf) {
+ CNAT_MY_VRFMAP_PRINT
+ pm = my_vrfmap->portmap_list;
+ cnat_show_address_portmap(verbose, all, pm,my_vrfmap->port_limit);
+ not_found = 0;
+ }
+ }));
+ if (not_found) {
+ PLATFORM_DEBUG_PRINT("not found\n");
+ }
+}
+
+void cnat_timeout_db_entry_dump (cnat_timeout_db_entry_t *up)
+{
+ u32 db_entry_index __attribute__((unused)),
+ first_db_entry_index __attribute__((unused));
+
+ PLATFORM_DEBUG_PRINT("Timeout DB entry at index %ld\n", up - cnat_timeout_db);
+ PLATFORM_DEBUG_PRINT("Desnt key 0x%16llx\n", up->t_key.timeout_key.key64);
+ PLATFORM_DEBUG_PRINT("Timeout value %d\n", up->t_key.timeout_value);
+ PLATFORM_DEBUG_PRINT("Hash Next 0x%x\n", up->t_hash.next);
+
+}
+
+void cnat_db_dump_timeout ()
+{
+ cnat_timeout_db_entry_t *up;
+ pool_header_t *h;
+ u32 used __attribute__((unused)), free __attribute__((unused));
+
+ h = pool_header(cnat_timeout_db);
+ free = vec_len(h->free_indices);
+ used = (vec_len(cnat_timeout_db) - free);
+
+ PLATFORM_DEBUG_PRINT("Timeout DB Free %d, Used %d\n",free, used);
+
+ pool_foreach(up, cnat_timeout_db, ({
+ cnat_timeout_db_entry_dump(up);
+ }));
+}
+
diff --git a/plugins/vcgn-plugin/vcgn/cnat_show_api.h b/plugins/vcgn-plugin/vcgn/cnat_show_api.h
new file mode 100644
index 00000000000..5904c7e2dd6
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_show_api.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __CNAT_SHOW_API_H__
+#define __CNAT_SHOW_API_H__
+
+typedef struct _spp_api_cnat_v4_show_inside_entry_req {
+ u16 _spp_msg_id;
+ u16 vrf_id;
+ u32 ipv4_addr;
+ u16 start_port;
+ u16 end_port;
+ u8 flags;
+ u8 all_entries;
+ u8 protocol;
+} spp_api_cnat_v4_show_inside_entry_req_t;
+
+typedef struct _spp_api_cnat_v4_show_outside_entry_req {
+ u16 _spp_msg_id;
+ u16 vrf_id;
+ u32 ipv4_addr;
+ u16 start_port;
+ u16 end_port;
+ u8 flags;
+ u8 protocol;
+} spp_api_cnat_v4_show_outside_entry_req_t;
+
+
+#endif
diff --git a/plugins/vcgn-plugin/vcgn/cnat_show_response.h b/plugins/vcgn-plugin/vcgn/cnat_show_response.h
new file mode 100644
index 00000000000..bec1bd97245
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_show_response.h
@@ -0,0 +1,580 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_show_response.h show command response structs
+ *
+ * Copyright (c) 2007-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_SHOW_RESPONSE_H__
+#define __CNAT_SHOW_RESPONSE_H__
+
+/*
+ * Flags indicating the type of translation entry
+ */
+#define CNAT_TRANSLATION_ENTRY_ALL 0x0
+#define CNAT_TRANSLATION_ENTRY_STATIC 0x1
+#define CNAT_TRANSLATION_ENTRY_ALG 0x2
+#define CNAT_TRANSLATION_ENTRY_DYNAMIC 0x4
+
+/* for PCP support */
+#define CNAT_TRANSLATION_ENTRY_PCPI_DYNAMIC 0x08
+#define CNAT_TRANSLATION_ENTRY_PCPE_DYNAMIC 0x10
+
+#define MAX_NODE_NAME_LEN 18
+#define MAX_CTR_NAME_LEN 10
+
+/*
+ * show translation entry response structures
+ */
+typedef struct {
+ u16 call_id;
+ u16 cnat_call_id; /* mapped call Id */
+ u16 dst_call_id; /* dest call id */
+} cnat_v4_show_gre_entry;
+
+typedef struct {
+ u16 msg_id;
+ u16 rc; /* o/p parameter. */
+ u16 num_entries; /* Number of entries sent as output */
+ u16 vrf_id; /* vrf id */
+ u32 pns_ip;
+ cnat_v4_show_gre_entry entries[0];
+} cnat_v4_show_gre_entry_resp;
+
+/*
+ * show translation entry response structures
+ */
+typedef struct {
+ u32 ipv4_addr;
+ u16 src_port;
+ u16 cnat_port; /* port which replaced the src port */
+ u8 protocol;
+ u8 pad;
+ u16 flags;
+ u16 nsessions;
+ u32 in2out_packets;
+ u32 out2in_packets;
+} cnat_v4_show_translation_entry;
+
+typedef struct {
+ u16 msg_id;
+ u8 rc; /* o/p parameter. */
+ u8 num_entries; /* Number of entries sent as output */
+ u16 vrf_id; /* vrf id */
+ u16 pad;
+ cnat_v4_show_translation_entry entries[0];
+} cnat_v4_show_entry_resp;
+
+/*
+ * show free/used ipv4 address/port response structure
+ */
+typedef struct {
+ u32 ip_addr;
+ u32 free_used_ports;
+} cnat_v4_show_free_used_entry;
+
+typedef struct {
+ u16 msg_id;
+ u8 rc;
+ u8 count;
+ u32 max_ports;
+ cnat_v4_show_free_used_entry entry_list[0];
+} cnat_v4_show_free_used_entry_resp;
+
+/*
+ * Node name to id mapping
+ */
+typedef struct {
+ u8 node_id;
+ u8 pad;
+ char node_name[MAX_NODE_NAME_LEN];
+} cnat_statistics_node_name;
+
+typedef struct {
+ u16 msg_id;
+ u8 rc;
+ u8 num_nodes;
+ cnat_statistics_node_name node_name_array[0];
+} cnat_statistics_node_name_mapping_resp;
+
+/*
+ * Counter name to id mapping
+ */
+typedef struct {
+ u8 node_id;
+ u8 counter_id;
+ char counter_name[MAX_CTR_NAME_LEN];
+} cnat_statistics_counter_name;
+
+typedef struct {
+ u16 msg_id;
+ u8 rc;
+ u8 num_counters;
+ cnat_statistics_counter_name counter_name_array[0];
+} cnat_statistics_counter_name_mapping_resp;
+
+
+/*
+ * Node name to id mapping
+ */
+typedef struct {
+ u16 msg_id;
+ u8 rc;
+ u8 num_nodes;
+ u32 pad;
+ u64 counters [0];
+} cnat_statistics_counter_values;
+
+/*
+ * Summary Stats
+ */
+typedef struct {
+ u32 eaddr;
+ u32 ports_used;
+} pool_address_usage_t;
+
+typedef struct {
+ u16 msg_id;
+ u8 rc;
+ u8 pad;
+ u16 max_pkt_size;
+ u16 pool_address_copied;
+ u32 active_translations;
+ u32 translation_create_rate;
+ u32 translation_delete_rate;
+ u32 in2out_forwarding_rate;
+ u32 out2in_forwarding_rate;
+ u32 dummy;
+ u64 in2out_drops_port_limit_exceeded;
+ u64 in2out_drops_system_limit_reached;
+ u64 in2out_drops_resource_depletion;
+ u64 no_translation_entry_drops;
+ u64 pptp_active_tunnels;
+ u64 pptp_active_channels;
+ u64 pptp_ctrlmsg_drops;
+ u32 no_sessions;
+
+ u32 pool_address_totally_free;
+ u32 pool_address_used; /* The following array size will be lesser of
+ (pool_address_used, 200) */
+ u32 num_subscribers;
+ u64 drops_sessiondb_limit_exceeded;
+ u64 in2out_drops_src_ip_no_config; // for deterministic nat on brahmos
+ pool_address_usage_t pool_address_usage[0];
+} cnat_show_statistics_summary_resp;
+
+
+typedef struct {
+ u16 msg_id;
+ u8 rc;
+ u8 pad;
+ u64 o2i_tcp_seq_mismatch_drop;
+ u64 o2i_tcp_seq_mismatch;
+ u64 o2i_sessions_created;
+ u64 o2i_end_point_filter_drop;
+} cnat_show_counters_summary_resp;
+
+
+typedef struct {
+ u16 msg_id;
+ u8 rc;
+ u8 pad;
+
+ /*
+ * XLAT statistics
+ */
+ u64 v6_to_v4_tcp_input_count;
+ u64 v6_to_v4_tcp_non_translatable_drop_count;
+ u64 v6_to_v4_tcp_invalid_next_hdr_drop_count;
+ u64 v6_to_v4_tcp_no_db_drop_count;
+ u64 v6_to_v4_tcp_output_count;
+
+ u64 v4_to_v6_tcp_input_count;
+ u64 v4_to_v6_tcp_no_db_drop_count;
+ u64 v4_to_v6_tcp_output_count;
+
+ u64 v6_to_v4_udp_input_count;
+ u64 v6_to_v4_udp_non_translatable_drop_count;
+ u64 v6_to_v4_udp_invalid_next_hdr_drop_count;
+ u64 v6_to_v4_udp_no_db_drop_count;
+ u64 v6_to_v4_udp_output_count;
+
+ u64 v4_to_v6_udp_input_count;
+ u64 v4_to_v6_udp_no_db_drop_count;
+ u64 v4_to_v6_udp_output_count;
+ u64 v4_to_v6_udp_frag_crc_zero_drop_count;
+ u64 v4_to_v6_udp_crc_zero_recycle_sent_count;
+ u64 v4_to_v6_udp_crc_zero_recycle_drop_count;
+
+ u64 v6_to_v4_icmp_qry_input_count;
+ u64 v6_to_v4_icmp_no_db_drop_count;
+ u64 v6_to_v4_icmp_frag_drop_count;
+ u64 v6_to_v4_icmp_invalid_next_hdr_drop_count;
+ u64 v6_to_v4_icmp_non_translatable_drop_count;
+ u64 v6_to_v4_icmp_non_translatable_fwd_count;
+ u64 v6_to_v4_icmp_unsupported_type_drop_count;
+ u64 v6_to_v4_icmp_err_output_count;
+ u64 v6_to_v4_icmp_qry_output_count;
+
+ u64 v4_to_v6_icmp_qry_input_count;
+ u64 v4_to_v6_icmp_no_db_drop_count;
+ u64 v4_to_v6_icmp_frag_drop_count;
+ u64 v4_to_v6_icmp_unsupported_type_drop_count;
+ u64 v4_to_v6_icmp_err_output_count;
+ u64 v4_to_v6_icmp_qry_output_count;
+
+ u64 v6_to_v4_subsequent_frag_input_count;
+ u64 v6_to_v4_subsequent_frag_non_translatable_drop_count;
+ u64 v6_to_v4_subsequent_frag_invalid_next_hdr_drop_count;
+ u64 v6_to_v4_subsequent_frag_no_db_drop_count;
+ u64 v6_to_v4_subsequent_frag_output_count;
+
+ u64 v4_to_v6_subsequent_frag_input_count;
+ u64 v4_to_v6_subsequent_frag_no_db_drop_count;
+ u64 v4_to_v6_subsequent_frag_output_count;
+
+ u64 v4_to_v6_subsequent_frag_drop_count;
+ u64 v4_to_v6_subsequent_frag_throttled_count;
+ u64 v4_to_v6_subsequent_frag_timeout_drop_count;
+ u64 v4_to_v6_subsequent_frag_tcp_input_count;
+ u64 v4_to_v6_subsequent_frag_udp_input_count;
+ u64 v4_to_v6_subsequent_frag_icmp_input_count;
+
+ u64 v6_to_v4_options_input_count;
+ u64 v6_to_v4_options_drop_count;
+ u64 v6_to_v4_options_forward_count;
+ u64 v6_to_v4_options_no_db_drop_count;
+ u64 v6_to_v4_unsupp_proto_count;
+
+ u64 v4_to_v6_options_input_count;
+ u64 v4_to_v6_options_drop_count;
+ u64 v4_to_v6_options_forward_count;
+ u64 v4_to_v6_options_no_db_drop_count;
+ u64 v4_to_v6_unsupp_proto_count;
+
+ u64 v4_icmp_gen_count;
+ u64 v6_icmp_gen_count;
+} xlat_show_statistics_summary_resp;
+
+typedef struct {
+ u16 msg_id;
+ u8 rc;
+ u8 pad;
+ /* Total v4 packets to BR */
+ u64 v4_to_v6_input_total_count;
+ /* Total v4 tunneled packets to BR */
+ u64 v4_to_v6_41_input_total_count;
+ /* proto 41 packets without minimum, of 40, v6 payload */
+ u64 v4_to_v6_41_insuff_v6payld_count;
+ /* total proto 41 packets being considered for decap */
+ u64 v4_to_v6_41_valid_count;
+ /* proto 41 packets that failed security check*/
+ u64 v4_to_v6_41_sec_check_fail_count;
+ /* packets with no active db entry */
+ u64 v4_to_v6_no_db_drop_count;
+ /* proto 41 packets actually getting decapped */
+ u64 v4_to_v6_41_decap_count;
+ /* total v4 packets which are neither icmp nor 41 */
+ u64 v4_to_v6_unsupported_protocol_count;
+ /* v4 tunneled packets with invalid v6 source address */
+ u64 v4_to_v6_41_invalid_v6_source;
+ /* total icmpv4 packets destined to BR */
+ u64 v4_forus_icmp_input_count;
+ /* total icmpv4 echo replies by BR */
+ u64 v4_icmp_reply_count;
+ /* total icmpv4 error messages translated to icmpv6 by BR */
+ u64 v4_to_v6_icmp_translation_count;
+ /* total packets with icmpv4 type/code which are not supported by BR */
+ u64 v4_icmp_unsupported_count;
+ /* total icmpv4 packets which are rate-limited by BR */
+ u64 v4_icmp_throttled_count;
+ /* total ICMPv4 error messages which could not be translated */
+ u64 v4_icmp_non_translatable_drop_count;
+
+ /* ipv4 defrag stats */
+ u64 v4_to_v6_frag_input_count;
+ u64 v4_to_v6_frag_sec_check_fail_count;
+ u64 v4_to_v6_frag_reassem_count;
+ u64 v4_to_v6_frag_timeout_drop_count;
+ u64 v4_to_v6_frag_icmp_input_count;
+ u64 v4_to_v6_frag_41_insuff_v6payld_count;
+ u64 v4_to_v6_frag_no_db_drop_count;
+ u64 v4_to_v6_frag_unsupported_protocol_count;
+ u64 v4_to_v6_frag_41_invalid_v6_source;
+ u64 v4_to_v6_frag_throttled_count;
+ u64 v4_to_v6_frag_dup_count;
+ u64 v4_to_v6_frag_reassem_frag_count;
+ u64 v4_to_v6_frag_disable_count;
+ u64 v4_to_v6_frag_drop_count;
+
+ /* total v6 packets input to BR */
+ u64 v6_to_v4_total_input_count;
+ /* v6 packets with no active db entry */
+ u64 v6_to_v4_no_db_drop_count;
+ /* forus v6 packets with next header other than icmpv6 */
+ u64 v6_to_v4_forus_unsupp_proto_count;
+ /* total v6 packets that got tunneled */
+ u64 v6_to_v4_encap_count;
+ /* total icmpv6 packets destined to BR */
+ u64 v6_forus_icmp_input_count;
+ /* total icmpv6 echo replies by BR */
+ u64 v6_icmp_reply_count;
+ /* total icmpv6 PTB messages generated by BR */
+ u64 v6_ptb_generated_count;
+ /* total ipv6 packets for which PTBv6 was NOT generated by BR */
+ u64 v6_ptb_not_generated_drop_count;
+ /* total icmpv6 Neighbor Advertisements generated by BR */
+ u64 v6_na_generated_count;
+ /* total icmpv6 TTL expiry messages generated by BR */
+ u64 v6_ttl_expiry_generated_count;
+ /* total ICMPv6 fragments, which are dropped by BR */
+ u64 v6_to_v4_frag_icmp_input_count;
+ /* total packets with icmpv6 type/code which are not supported by BR */
+ u64 v6_icmp_unsupported_count;
+ /* total icmpv6 packets which are rate-limited by BR */
+ u64 v6_icmp_throttled_count;
+} v6rd_show_statistics_summary_resp;
+
+typedef struct {
+ u16 msg_id;
+ u8 rc;
+ u8 pad;
+
+ /* Total Incoming Count */
+ u64 v4_input_count;
+ /* Total Drop Count */
+ u64 v4_drop_count;
+ /* Total Output Count */
+ u64 v4_to_v6_output_count;
+ /* TCP Incoming Count */
+ u64 v4_tcp_input_count;
+ /* TCP Output Count */
+ u64 v4_tcp_output_count;
+ /* UDP Incoming Count */
+ u64 v4_udp_input_count;
+ /* UDP Output Count */
+ u64 v4_udp_output_count;
+ /* ICMPv4 Incoming Count */
+ u64 v4_icmp_input_count;
+ /* ICMPv4 Output Count */
+ u64 v4_to_v6_icmp_output_count;
+ /* Invalid UIDB Drop Count */
+ u64 v4_invalid_uidb_drop_count;
+ /* NoDb Drop Count */
+ u64 v4_no_db_drop_count;
+ /* TTL Expire Drop Count */
+ u64 v4_ttl_expire_drop_count;
+ /* Invalid IP Destination Drop Count */
+ u64 v4_invalid_destination_prefix_drop_count;
+ /* Packet Execeeding Path MTU Drop Count */
+ u64 v4_path_mtu_exceed_count;
+ /* Unsupported Protocol Drop Count */
+ u64 v4_invalid_proto_type_drop_count;
+ /* ICMPv4 Generated for TTL Expire Count */
+ u64 v4_ttl_expiry_generated_count;
+ /* ICMPv4 Generated for Error Count */
+ u64 v4_icmp_error_gen_count;
+ /* ICMPv4 Packets Rate-Limited Count */
+ u64 v4_icmp_throttled_drop_count;
+ /* TCP MSS Changed Count */
+ u64 v4_tcp_mss_changed_count;
+
+ /* Total Incoming Count */
+ u64 v6_input_count;
+ /* Total Drop Count */
+ u64 v6_drop_count;
+ /* Total Output Count */
+ u64 v6_to_v4_output_count;
+ /* TCP Incoming Count */
+ u64 v6_tcp_input_count;
+ /* TCP Output Count */
+ u64 v6_tcp_output_count;
+ /* UDP Incoming Count */
+ u64 v6_udp_input_count;
+ /* UDP Output Count */
+ u64 v6_udp_output_count;
+ /* ICMPv4 Incoming Count */
+ u64 v6_icmpv4_input_count;
+ /* ICMPv4 Output Count */
+ u64 v6_icmpv4_output_count;
+ /* Invalid UIDB Drop Count */
+ u64 v6_invalid_uidb_drop_count;
+ /* NoDb Drop Count */
+ u64 v6_no_db_drop_count;
+ /* TTL Expire Drop Count */
+ u64 v6_ttl_expire_drop_count;
+ /* Invalid IPv6 Destination Drop Count */
+ u64 v6_invalid_destination_drop_count;
+ /* Invalid Source Prefix Drop Count */
+ u64 v6_invalid_source_prefix_drop_count;
+ /* Unsupported Protocol Drop Count */
+ u64 v6_invalid_proto_type_drop_count;
+ /* ICMPv6 Input Count */
+ u64 v6_icmp_input_count;
+ /* ICMPv6 Invalid UIDB Drop Count */
+ u64 v6_icmp_invalid_uidb_drop_count;
+ /* ICMPv6 NoDb Drop Count */
+ u64 v6_icmp_no_db_drop_count;
+ /* ICMPv6 TTL Expire Drop Count */
+ u64 v6_icmp_ttl_expire_drop_count;
+ /* ICMPv6 Invalid IPv6 Destination Drop Count */
+ u64 v6_icmp_invalid_destination_drop_count;
+ /* ICMPv6 Unsupported Type Drop Count */
+ u64 v6_icmp_unsupported_type_drop_count;
+ /* ICMPv6 Invalid NxtHdr Drop Count*/
+ u64 v6_icmp_unsupported_nxthdr_drop_count;
+ /* ICMPv6 Frag Drop Count */
+ u64 v6_icmp_frag_drop_count;
+ /* ICMPv6 Forus Count */
+ u64 v6_forus_icmp_input_count;
+ /* ICMPv6 Echo Response Received Count */
+ u64 v6_received_echo_response_count;
+ /* ICMPv6 Echo Replies Count */
+ u64 v6_echo_reply_count;
+ /* ICMPv6 Translated to ICMPV4 Output Count*/
+ u64 v6_to_v4_icmp_output_count;
+ /* ICMPv6 Generated for TTL Expire Count */
+ u64 v6_ttl_expiry_generated_count;
+ /* ICMPv6 Generated for Error Count */
+ u64 v6_icmp_error_gen_count;
+ /* ICMPv6 Packets Rate-Limited Count */
+ u64 v6_icmp_throttled_drop_count;
+ /* TCP MSS Changed Count */
+ u64 v6_tcp_mss_changed_count;
+
+ /*Total Input Count*/
+ u64 v4_to_v6_frag_input_count;
+ /*Total Drop Count*/
+ u64 v4_to_v6_frag_drop_count;
+ /*Reassembled Output Count*/
+ u64 v4_to_v6_frag_reassem_count;
+
+ /*TCP Input Count*/
+ u64 v4_to_v6_frag_tcp_input_count;
+ /*UDP Input Count*/
+ u64 v4_to_v6_frag_udp_input_count;
+ /*ICMPv4 Input Count*/
+ u64 v4_to_v6_frag_icmp_input_count;
+
+ /*Invalid UIDB Drop Count */
+ u64 v4_to_v6_frag_invalid_uidb_drop_count;
+ /*NoDb Drop Count*/
+ u64 v4_to_v6_frag_no_db_drop_count;
+ /*Unsupported Protocol Drop Count*/
+ u64 v4_to_v6_frag_invalid_proto_type_drop_count;
+ /*Throttled Count*/
+ u64 v4_to_v6_frag_throttled_count;
+ /*Timeout Drop Count*/
+ u64 v4_to_v6_frag_timeout_drop_count;
+ /*Duplicates Drop Count*/
+ u64 v4_to_v6_frag_dup_count;
+
+ /*Total Input Count*/
+ u64 v6_to_v4_inner_frag_input_count;
+ /*Total Drop Count*/
+ u64 v6_to_v4_inner_frag_drop_count;
+ /*Total Output Count*/
+ u64 v6_to_v4_inner_frag_output_count;
+
+ /*TCP Input Count*/
+ u64 v6_to_v4_inner_frag_tcp_input_count;
+ /*UDP Input Count*/
+ u64 v6_to_v4_inner_frag_udp_input_count;
+ /*ICMPv4 Input Count*/
+ u64 v6_to_v4_inner_frag_icmp_input_count;
+
+ /*Invalid Source Prefix Drop Count*/
+ u64 v6_to_v4_inner_frag_invalid_source_prefix_drop_count;
+ /*Unsupported Protocol Drop Count*/
+ u64 v6_to_v4_inner_frag_invalid_proto_type_drop_count;
+ /*Throttled Count*/
+ u64 v6_to_v4_inner_frag_throttled_count;
+ /*Timeout Drop Count*/
+ u64 v6_to_v4_inner_frag_timeout_drop_count;
+ /*Duplicates Drop Count*/
+ u64 v6_to_v4_inner_frag_dup_count;
+
+ /*ICMPv6 Generated for Error Count */
+ u64 v6_to_v4_inner_frag_icmp_error_gen_count;
+ /*ICMPv6 Packets Rate-Limited Count */
+ u64 v6_to_v4_inner_frag_icmp_throttled_drop_count;
+
+ /*TCP MSS Changed Count */
+ u64 v6_to_v4_inner_frag_tcp_mss_changed_count;
+
+} mape_show_statistics_summary_resp;
+
+/*
+ * The following are the command types for Generic Command cases
+ */
+#define CNAT_DEBUG_GENERIC_COMMAND_READ_MEM 1
+#define CNAT_DEBUG_GENERIC_COMMAND_WRITE_MEM 2
+#define CNAT_DEBUG_GENERIC_COMMAND_DB_SUMMARY 3
+#define CNAT_DEBUG_GENERIC_COMMAND_USER_DB_PM 4
+#define CNAT_DEBUG_GET_CGN_DB_SUMMARY 5
+
+typedef enum {
+ CNAT_DEBUG_GENERIC_COMMAND_DUMP_POLICY,
+ CNAT_DEBUG_GENERIC_COMMAND_DUMP_MAIN_DB,
+ CNAT_DEBUG_GENERIC_COMMAND_DUMP_USER_DB,
+ CNAT_DEBUG_GENERIC_COMMAND_DUMP_HASHES_DB,
+ CNAT_DEBUG_GENERIC_COMMAND_DUMP_VRF_MAP,
+ CNAT_DEBUG_GENERIC_COMMAND_DUMP_SUMMARY_DB,
+ CNAT_DEBUG_GENERIC_COMMAND_DUMP_STATS,
+ CNAT_DEBUG_GENERIC_COMMAND_CLEAR_STATS,
+ CNAT_DEBUG_GENERIC_COMMAND_DUMP_NODE_COUNTER,
+ CNAT_DEBUG_GENERIC_COMMAND_CLEAR_NODE_COUNTER,
+ CNAT_DEBUG_GENERIC_COMMAND_DUMP_CNAT_COUNTER,
+ CNAT_DEBUG_GENERIC_COMMAND_DUMP_VA,
+ CNAT_DEBUG_GENERIC_COMMAND_SHOW_CONFIG,
+ CNAT_DEBUG_GENERIC_COMMAND_SHOW_NFV9,
+ CNAT_DEBUG_GENERIC_COMMAND_SHOW_IVRF,
+ CNAT_DEBUG_GENERIC_COMMAND_SHOW_OVRF,
+ CNAT_DEBUG_SPP_LOG,
+ CNAT_DEBUG_GENERIC_COMMAND_DEBUG_OPTIONS,
+ CNAT_DEBUG_GENERIC_COMMAND_DUMP_DEBUG_LEVELS,
+ CNAT_DEBUG_GENERIC_COMMAND_DEBUG_FLAGS,
+ CNAT_READ_TEMP_SENSORS,
+ CNAT_BLOCK_OCTEON_SENSOR_READ,
+ CNAT_DEBUG_GENERIC_COMMAND_DUMP_MAIN_DB_SUMMARY,
+ CNAT_DEBUG_GENERIC_COMMAND_DUMP_USER_DB_SUMMARY,
+ CNAT_DEBUG_DUMP_6RD_STATS,
+ CNAT_DEBUG_TIMEOUT_DB_SUMMARY,
+ CNAT_NAT64_STFUL_DEBUG_COMMAND,
+ CNAT_DEBUG_SET_BULK_SIZE,
+ CNAT_DEBUG_SHOW_BULK_STAT,
+ CNAT_DEBUG_CLEAR_BULK_STAT,
+ CNAT_DEBUG_SHOW_BULK_ALLOC,
+ CNAT_DEBUG_NAT64,
+ CNAT_DEBUG_NAT44_IN2OUT_FRAG_STATS,
+} cnat_debug_dump_type_t;
+
+typedef enum {
+ CNAT_DEBUG_FLAG_UDP_INSIDE_CHECKSUM_MODIFY,
+ CNAT_DEBUG_FLAG_UDP_OUTSIDE_CHECKSUM_MODIFY,
+ CNAT_DEBUG_FLAG_UDP_INSIDE_PACKET_DUMP,
+ CNAT_DEBUG_FLAG_UDP_OUTSIDE_PACKET_DUMP,
+} cnat_debug_flag_type_t;
+
+typedef struct {
+ u16 spp_msg_id;
+ u8 rc;
+ u8 core;
+ u32 num_bytes;
+ u8 raw_data[0];
+} cnat_generic_command_resp;
+
+extern u32 db_free_entry (void * p);
+#endif /*__CNAT_SHOW_RESPONSE_H__*/
diff --git a/plugins/vcgn-plugin/vcgn/cnat_syslog.c b/plugins/vcgn-plugin/vcgn/cnat_syslog.c
new file mode 100644
index 00000000000..91758f141a9
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_syslog.c
@@ -0,0 +1,1787 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_syslog.c
+ *
+ * Copyright (c) 2011-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <arpa/inet.h>
+#include "cnat_syslog.h"
+#include "platform_common.h"
+#include "cnat_db.h"
+#include "cnat_log_common.h"
+#include <vppinfra/pool.h>
+
+#define SYSLOG_DELIMITER ' '
+#define SYSLOG_FIELD_ABSENT '-'
+/* #define SHOW_SYSLOG_TIMESTAMP 1 TO DO. Remove this later */
+/*
+ * Defining the below macro here for now. Assumption is, syslog packets
+ * are sent out via same channel as that of NFV9.
+ * Has to be overridden if this assumption is false.
+ */
+#define PLATFORM_SYSLOG_DISP_NODE_IDX PLATFORM_NFV9_DISP_NODE_IDX
+
+cnat_syslog_global_info_t cnat_syslog_global_info;
+cnat_syslog_logging_info_t *cnat_syslog_logging_info_pool;
+cnat_syslog_global_counters_t cnat_syslog_global_counter;
+extern u32 syslog_debug_flag;
+
+#define CNAT_SYSLOG_DEBUG_CODE 2
+
+#if CNAT_SYSLOG_DEBUG_CODE > 3
+#define SYSLOG_COND if(my_instance_number == 0)
+
+#define SYSLOG_DEBUG_PRINTF1(a) SYSLOG_COND printf(a);
+#define SYSLOG_DEBUG_PRINTF2(a, b) SYSLOG_COND printf(a, b);
+#define SYSLOG_DEBUG_PRINTF3(a, b, c) SYSLOG_COND printf(a, b, c);
+#define SYSLOG_DEBUG_PRINTF4(a, b, c, d) SYSLOG_COND printf(a, b, c, d);
+
+#else
+
+#define SYSLOG_DEBUG_PRINTF1(a)
+#define SYSLOG_DEBUG_PRINTF2(a, b)
+#define SYSLOG_DEBUG_PRINTF3(a, b, c)
+#define SYSLOG_DEBUG_PRINTF4(a, b, c, d)
+
+#endif
+
+
+void syslog_params_show(u32 logging_index)
+{
+ cnat_syslog_logging_info_t *log_info __attribute__((unused));
+ if(logging_index == EMPTY) {
+ PLATFORM_DEBUG_PRINT("\nSyslog logging not configured\n");
+ return;
+ }
+
+ log_info = cnat_syslog_logging_info_pool + logging_index;
+
+ PLATFORM_DEBUG_PRINT("\nSyslog parameters --\n");
+ PLATFORM_DEBUG_PRINT("IPV4 address: %x, port %d, max log size %d\n",
+ log_info->ipv4_address,
+ log_info->port, log_info->max_length_minus_max_record_size);
+ PLATFORM_DEBUG_PRINT("Host name: %s, priority %d",
+ log_info->header_hostname, log_info->header_priority);
+
+}
+
+/* Util function to copy a number as ASCII in to a buf in a
+ * faster way (should be faster than sprintf)
+ */
+
+const unsigned char ascii_numbers[][3] =
+ { {'0', '0', '0'},
+ {'1', '0', '0'},
+ {'2', '0', '0'},
+ {'3', '0', '0'},
+ {'4', '0', '0'},
+ {'5', '0', '0'},
+ {'6', '0', '0'},
+ {'7', '0', '0'},
+ {'8', '0', '0'},
+ {'9', '0', '0'},
+ {'1', '0', '0'},
+ {'1', '1', '0'},
+ {'1', '2', '0'},
+ {'1', '3', '0'},
+ {'1', '4', '0'},
+ {'1', '5', '0'},
+ {'1', '6', '0'},
+ {'1', '7', '0'},
+ {'1', '8', '0'},
+ {'1', '9', '0'},
+ {'2', '0', '0'},
+ {'2', '1', '0'},
+ {'2', '2', '0'},
+ {'2', '3', '0'},
+ {'2', '4', '0'},
+ {'2', '5', '0'},
+ {'2', '6', '0'},
+ {'2', '7', '0'},
+ {'2', '8', '0'},
+ {'2', '9', '0'},
+ {'3', '0', '0'},
+ {'3', '1', '0'},
+ {'3', '2', '0'},
+ {'3', '3', '0'},
+ {'3', '4', '0'},
+ {'3', '5', '0'},
+ {'3', '6', '0'},
+ {'3', '7', '0'},
+ {'3', '8', '0'},
+ {'3', '9', '0'},
+ {'4', '0', '0'},
+ {'4', '1', '0'},
+ {'4', '2', '0'},
+ {'4', '3', '0'},
+ {'4', '4', '0'},
+ {'4', '5', '0'},
+ {'4', '6', '0'},
+ {'4', '7', '0'},
+ {'4', '8', '0'},
+ {'4', '9', '0'},
+ {'5', '0', '0'},
+ {'5', '1', '0'},
+ {'5', '2', '0'},
+ {'5', '3', '0'},
+ {'5', '4', '0'},
+ {'5', '5', '0'},
+ {'5', '6', '0'},
+ {'5', '7', '0'},
+ {'5', '8', '0'},
+ {'5', '9', '0'},
+ {'6', '0', '0'},
+ {'6', '1', '0'},
+ {'6', '2', '0'},
+ {'6', '3', '0'},
+ {'6', '4', '0'},
+ {'6', '5', '0'},
+ {'6', '6', '0'},
+ {'6', '7', '0'},
+ {'6', '8', '0'},
+ {'6', '9', '0'},
+ {'7', '0', '0'},
+ {'7', '1', '0'},
+ {'7', '2', '0'},
+ {'7', '3', '0'},
+ {'7', '4', '0'},
+ {'7', '5', '0'},
+ {'7', '6', '0'},
+ {'7', '7', '0'},
+ {'7', '8', '0'},
+ {'7', '9', '0'},
+ {'8', '0', '0'},
+ {'8', '1', '0'},
+ {'8', '2', '0'},
+ {'8', '3', '0'},
+ {'8', '4', '0'},
+ {'8', '5', '0'},
+ {'8', '6', '0'},
+ {'8', '7', '0'},
+ {'8', '8', '0'},
+ {'8', '9', '0'},
+ {'9', '0', '0'},
+ {'9', '1', '0'},
+ {'9', '2', '0'},
+ {'9', '3', '0'},
+ {'9', '4', '0'},
+ {'9', '5', '0'},
+ {'9', '6', '0'},
+ {'9', '7', '0'},
+ {'9', '8', '0'},
+ {'9', '9', '0'},
+ {'1', '0', '0'},
+ {'1', '0', '1'},
+ {'1', '0', '2'},
+ {'1', '0', '3'},
+ {'1', '0', '4'},
+ {'1', '0', '5'},
+ {'1', '0', '6'},
+ {'1', '0', '7'},
+ {'1', '0', '8'},
+ {'1', '0', '9'},
+ {'1', '1', '0'},
+ {'1', '1', '1'},
+ {'1', '1', '2'},
+ {'1', '1', '3'},
+ {'1', '1', '4'},
+ {'1', '1', '5'},
+ {'1', '1', '6'},
+ {'1', '1', '7'},
+ {'1', '1', '8'},
+ {'1', '1', '9'},
+ {'1', '2', '0'},
+ {'1', '2', '1'},
+ {'1', '2', '2'},
+ {'1', '2', '3'},
+ {'1', '2', '4'},
+ {'1', '2', '5'},
+ {'1', '2', '6'},
+ {'1', '2', '7'},
+ {'1', '2', '8'},
+ {'1', '2', '9'},
+ {'1', '3', '0'},
+ {'1', '3', '1'},
+ {'1', '3', '2'},
+ {'1', '3', '3'},
+ {'1', '3', '4'},
+ {'1', '3', '5'},
+ {'1', '3', '6'},
+ {'1', '3', '7'},
+ {'1', '3', '8'},
+ {'1', '3', '9'},
+ {'1', '4', '0'},
+ {'1', '4', '1'},
+ {'1', '4', '2'},
+ {'1', '4', '3'},
+ {'1', '4', '4'},
+ {'1', '4', '5'},
+ {'1', '4', '6'},
+ {'1', '4', '7'},
+ {'1', '4', '8'},
+ {'1', '4', '9'},
+ {'1', '5', '0'},
+ {'1', '5', '1'},
+ {'1', '5', '2'},
+ {'1', '5', '3'},
+ {'1', '5', '4'},
+ {'1', '5', '5'},
+ {'1', '5', '6'},
+ {'1', '5', '7'},
+ {'1', '5', '8'},
+ {'1', '5', '9'},
+ {'1', '6', '0'},
+ {'1', '6', '1'},
+ {'1', '6', '2'},
+ {'1', '6', '3'},
+ {'1', '6', '4'},
+ {'1', '6', '5'},
+ {'1', '6', '6'},
+ {'1', '6', '7'},
+ {'1', '6', '8'},
+ {'1', '6', '9'},
+ {'1', '7', '0'},
+ {'1', '7', '1'},
+ {'1', '7', '2'},
+ {'1', '7', '3'},
+ {'1', '7', '4'},
+ {'1', '7', '5'},
+ {'1', '7', '6'},
+ {'1', '7', '7'},
+ {'1', '7', '8'},
+ {'1', '7', '9'},
+ {'1', '8', '0'},
+ {'1', '8', '1'},
+ {'1', '8', '2'},
+ {'1', '8', '3'},
+ {'1', '8', '4'},
+ {'1', '8', '5'},
+ {'1', '8', '6'},
+ {'1', '8', '7'},
+ {'1', '8', '8'},
+ {'1', '8', '9'},
+ {'1', '9', '0'},
+ {'1', '9', '1'},
+ {'1', '9', '2'},
+ {'1', '9', '3'},
+ {'1', '9', '4'},
+ {'1', '9', '5'},
+ {'1', '9', '6'},
+ {'1', '9', '7'},
+ {'1', '9', '8'},
+ {'1', '9', '9'},
+ {'2', '0', '0'},
+ {'2', '0', '1'},
+ {'2', '0', '2'},
+ {'2', '0', '3'},
+ {'2', '0', '4'},
+ {'2', '0', '5'},
+ {'2', '0', '6'},
+ {'2', '0', '7'},
+ {'2', '0', '8'},
+ {'2', '0', '9'},
+ {'2', '1', '0'},
+ {'2', '1', '1'},
+ {'2', '1', '2'},
+ {'2', '1', '3'},
+ {'2', '1', '4'},
+ {'2', '1', '5'},
+ {'2', '1', '6'},
+ {'2', '1', '7'},
+ {'2', '1', '8'},
+ {'2', '1', '9'},
+ {'2', '2', '0'},
+ {'2', '2', '1'},
+ {'2', '2', '2'},
+ {'2', '2', '3'},
+ {'2', '2', '4'},
+ {'2', '2', '5'},
+ {'2', '2', '6'},
+ {'2', '2', '7'},
+ {'2', '2', '8'},
+ {'2', '2', '9'},
+ {'2', '3', '0'},
+ {'2', '3', '1'},
+ {'2', '3', '2'},
+ {'2', '3', '3'},
+ {'2', '3', '4'},
+ {'2', '3', '5'},
+ {'2', '3', '6'},
+ {'2', '3', '7'},
+ {'2', '3', '8'},
+ {'2', '3', '9'},
+ {'2', '4', '0'},
+ {'2', '4', '1'},
+ {'2', '4', '2'},
+ {'2', '4', '3'},
+ {'2', '4', '4'},
+ {'2', '4', '5'},
+ {'2', '4', '6'},
+ {'2', '4', '7'},
+ {'2', '4', '8'},
+ {'2', '4', '9'},
+ {'2', '5', '0'},
+ {'2', '5', '1'},
+ {'2', '5', '2'},
+ {'2', '5', '3'},
+ {'2', '5', '4'},
+ {'2', '5', '5'}
+ };
+
+inline static int
+byte_to_ascii_decimal_unaligned(
+ unsigned char *ptr, unsigned char num)
+{
+ *ptr++ = ascii_numbers[num][0];
+ if(PREDICT_FALSE(num < 10)) {
+ return 1;
+ }
+ *ptr++ = ascii_numbers[num][1];
+ if(PREDICT_FALSE(num < 100)) {
+ return 2;
+ }
+ *ptr++ = ascii_numbers[num][2];
+ return 3;
+}
+
+/* Copies the dotted decimal format of ipv4
+ * in to the space provided and
+ * returns the number of bytes copied
+ */
+inline static int __attribute__((unused))
+copy_ipv4_addr(unsigned char *ptr, u32 ipv4)
+{
+ unsigned char *temp = ptr;
+ temp += byte_to_ascii_decimal_unaligned(temp, (ipv4 >> 24));
+ *temp++ = '.';
+ temp += byte_to_ascii_decimal_unaligned(temp, ((ipv4 >> 16) & 0xFF));
+ *temp++ = '.';
+ temp += byte_to_ascii_decimal_unaligned(temp, ((ipv4 >> 8) & 0xFF));
+ *temp++ = '.';
+ temp += byte_to_ascii_decimal_unaligned(temp, (ipv4 & 0xFF));
+
+ return (temp - ptr);
+}
+
+#ifdef TOBE_PORTED
+/*
+ * edt: * * cnat_syslog_fill_ip_header
+ *
+ * Tries to fill the fields of the IP header before it
+ * is sent to the L3 infra node.
+ *
+ * Argument: cnat_syslog_logging_info_t *logging_info
+ * structure that contains the packet context
+ */
+inline
+void cnat_syslog_fill_ip_header (cnat_syslog_logging_info_t *logging_info)
+{
+ spp_ctx_t *ctx;
+
+ /*
+ * Fill in the IP header and port number of the Netflow collector
+ * The L3 Infra node will fill in the rest of the fields
+ */
+ ctx = logging_info->current_logging_context;
+ fill_ip_n_udp_hdr(ctx, logging_info->ipv4_address,
+ logging_info->port, logging_info->pkt_length);
+
+}
+#else
+inline
+void cnat_syslog_fill_ip_header (cnat_syslog_logging_info_t *logging_info)
+{
+ return;
+}
+#endif
+
+#ifndef TOBE_PORTED
+void cnat_syslog_logging_init()
+{
+ return;
+}
+
+void cnat_syslog_log_mapping_create(cnat_main_db_entry_t * db,
+ cnat_vrfmap_t *vrfmap)
+{
+ return;
+}
+
+void cnat_syslog_log_mapping_delete(cnat_main_db_entry_t * db,
+ cnat_vrfmap_t *vrfmap)
+{
+ return;
+}
+
+void cnat_syslog_ds_lite_port_limit_exceeded(
+ dslite_key_t * key,
+ dslite_table_entry_t *dslite_entry)
+{
+ return;
+}
+
+void cnat_syslog_nat44_mapping_create(cnat_main_db_entry_t *db,
+ cnat_vrfmap_t *vrfmap, cnat_session_entry_t * sdb
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ )
+{
+ return;
+}
+
+/* Following are in cnat_util.c which are not ported */
+/* This function is defined in cnat_util.c which need to be ported */
+cnat_icmp_msg_t icmp_msg_gen_allowed ()
+{
+ return 1;
+}
+
+void cnat_syslog_nat44_mapping_delete(cnat_main_db_entry_t *db,
+ cnat_vrfmap_t *vrfmap, cnat_session_entry_t *sdb
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ )
+{
+ return;
+}
+
+u32
+cnat_get_unix_time_in_seconds (void)
+{
+ return 0;
+}
+#else /* TOBE_PORTED */
+void
+cnat_syslog_dump_logging_context (u32 value1,
+ cnat_syslog_logging_info_t *logging_info,
+ u32 value2)
+{
+ u8 *pkt_ptr;
+ u32 i;
+
+ if (PREDICT_TRUE(syslog_debug_flag == 0)) {
+ return;
+ }
+ /*
+ * Reduce the logging to few cores, to enable easier debugging
+ */
+ if ((my_instance_number & 0x7) != 0) {
+ return;
+ }
+ printf("\nDumping %s packet at locn %d: time 0x%x",
+ (value2 == 1) ? "CURRENT" : "QUEUED",
+ value1,
+ cnat_get_unix_time_in_seconds());
+
+ printf("\ni_vrf 0x%x, ip_address 0x%x, port %d, pkt len %d",
+ 0 /* TO DP Add vrf like nfv9_logging_info->i_vrf */,
+ logging_info->ipv4_address,
+ logging_info->port,
+ logging_info->pkt_length);
+ printf("\n");
+
+ if (value2 == 1) {
+ pkt_ptr = logging_info->current_logging_context->packet_data;
+ } else {
+ pkt_ptr = logging_info->queued_logging_context->packet_data;
+ }
+
+ /*
+ * Dump along with 8 bytes of SHIM header
+ */
+ for (i = 0; i <
+ (logging_info->pkt_length + CNAT_NFV9_IP_HDR_OFFSET);
+ i = i + 1) {
+ u8 c1, c2, c3;
+
+ if (i == 0) {
+ printf("\nL2_HEADER + SHIM_HEADER: \n");
+ } else if (i == CNAT_NFV9_IP_HDR_OFFSET) {
+ printf("\nIP_HEADER: \n");
+ } else if (i == CNAT_NFV9_UDP_HDR_OFFSET) {
+ printf("\nUDP_HEADER: \n");
+ } else if (i == CNAT_NFV9_HDR_OFFSET) {
+ printf("\nSyslog content..\n");
+ while(i <
+ (logging_info->pkt_length + CNAT_NFV9_HDR_OFFSET)) {
+ printf("%c", (u8)(*(pkt_ptr + i)));
+ i++;
+ if((u8)(*(pkt_ptr + i)) == '[') /* new record begins */
+ printf("\n");
+ }
+ return;
+ }
+
+ c3 = *(pkt_ptr + i);
+ c2 = c3 & 0xf;
+ c1 = (c3 >> 4) & 0xf;
+
+ printf("%c%c ",
+ ((c1 <= 9) ? (c1 + '0') : (c1 - 10 + 'a')),
+ ((c2 <= 9) ? (c2 + '0') : (c2 - 10 + 'a')));
+
+ }
+
+ printf("\n");
+}
+
+
+/*
+ * edt: * * cnat_syslog_send_pkt
+ *
+ * Tries to send a logging pkt. If the packet cannot be sent
+ * because of rewrite_output node cannot process it, queue
+ * it temporarily and try to send it later.
+ *
+ * Argument: cnat_syslog_logging_info_t *logging_info
+ * structure that contains the packet context
+ */
+inline
+void cnat_syslog_send_pkt (cnat_syslog_logging_info_t *logging_info)
+{
+ spp_node_t *output_node;
+
+ cnat_syslog_fill_ip_header(logging_info);
+
+ output_node = spp_get_nodes() +
+ cnat_syslog_global_info.cnat_syslog_disp_node_index;
+
+ cnat_syslog_dump_logging_context (2, logging_info, 1);
+
+ if (PREDICT_TRUE(output_node->sf.nused < SPP_MAXDISPATCH)) {
+ /*
+ * Move the logging context to output node
+ */
+ logging_info->current_logging_context->current_length =
+ logging_info->pkt_length;
+ PLATFORM_SET_CTX_RU_TX_FROM_NODE(logging_info->current_logging_context, \
+ NODE_LOGGING);
+ spp_dispatch_make_node_runnable(output_node);
+ output_node->sf.ctxs[output_node->sf.nused++] =
+ logging_info->current_logging_context;
+
+ if(PREDICT_FALSE(syslog_debug_flag > 10))
+ printf("\nSyslog: 2. Sending Current packet\n");
+ } else {
+ /*
+ * Queue the context into the logging_info structure,
+ * We will try to send it later. Currently, we will
+ * restrict to only one context queued.
+ */
+ cnat_syslog_global_counter.downstream_constipation_count++;
+ if(PREDICT_FALSE(syslog_debug_flag > 10))
+ printf("\nSyslog: 2. Downstream congestion \n");
+
+ /*
+ * Attach the current logging context which is full to the
+ * queued context list in logging_info structure
+ */
+ logging_info->queued_logging_context =
+ logging_info->current_logging_context;
+ }
+
+ /*
+ * Whether the context is queued or not, set the current context index
+ * to EMPTY, as the earlier context can no more be used to send
+ * more logging records.
+ */
+ logging_info->current_logging_context = NULL;
+}
+
+
+/*
+ * edt: * * cnat_syslog_send_queued_pkt
+ *
+ * Tries to send a logging pkt that has been queued earlier
+ * because it could not be sent due to downstream constipation
+ *
+ * Argument: cnat_syslog_logging_info_t *logging_info
+ * structure that contains the packet context
+ */
+inline
+void cnat_syslog_send_queued_pkt (cnat_syslog_logging_info_t *logging_info)
+{
+ spp_node_t *output_node;
+
+ output_node = spp_get_nodes() +
+ cnat_syslog_global_info.cnat_syslog_disp_node_index;
+
+ cnat_syslog_dump_logging_context(1, logging_info, 2);
+
+ if(PREDICT_TRUE(output_node->sf.nused < SPP_MAXDISPATCH)) {
+ /*
+ * Move the logging context to output node
+ */
+ /** This looks like a bug to me .. need to confirm *****
+ logging_info->queued_logging_context->current_length =
+ nfv9_logging_info->pkt_length; ***/
+ PLATFORM_SET_CTX_RU_TX_FROM_NODE(logging_info->queued_logging_context,
+ NODE_LOGGING)
+ spp_dispatch_make_node_runnable(output_node);
+ output_node->sf.ctxs[output_node->sf.nused++] =
+ logging_info->queued_logging_context;
+
+ SYSLOG_DEBUG_PRINTF1("\nSYSLOG: 1. Sending Queued packet\n")
+
+ /*
+ * Context has been queued, it will be freed after the pkt
+ * is sent. Clear this from the logging_context_info structure
+ */
+ logging_info->queued_logging_context = NULL;
+
+ } else {
+ cnat_syslog_global_counter.downstream_constipation_count++;
+ }
+}
+
+/*
+ * edt: * * handle_pending_syslog_pkts
+ *
+ * Timer handler for sending any pending syslog record
+ *
+ */
+inline
+void handle_pending_syslog_pkts()
+{
+ spp_node_t *output_node;
+ cnat_syslog_logging_info_t *my_logging_info = 0;
+ u32 current_timestamp = cnat_get_sys_up_time_in_ms();
+ i16 sf_nused;
+
+ output_node = spp_get_nodes() +
+ cnat_syslog_global_info.cnat_syslog_disp_node_index;
+
+ sf_nused = output_node->sf.nused;
+
+ pool_foreach (my_logging_info, cnat_syslog_logging_info_pool, ({
+ /*
+ * Check if no more logging contexts can be queued
+ */
+ if (PREDICT_FALSE(sf_nused >= SPP_MAXDISPATCH)) {
+ break;
+ }
+ if (my_logging_info->queued_logging_context)
+ cnat_syslog_send_queued_pkt (my_logging_info);
+
+ if(my_logging_info->current_logging_context &&
+ ((current_timestamp -
+ my_logging_info->current_logging_context_timestamp)
+ > 1000)) {
+ /*
+ * If there is a current logging context and timestamp
+ * indicates it is pending for long, send it out
+ * Also if there is a queued context send it out as well
+ */
+ SYSLOG_DEBUG_PRINTF4("\nLOG_TIMER: queued %p, curr %p, sf_nused %d",
+ my_logging_info->queued_logging_context,
+ my_logging_info->current_logging_context,
+ sf_nused);
+ cnat_syslog_send_pkt(my_logging_info);
+ }
+ }));
+}
+
+const unsigned char hex_numbers_single_digit[] =
+ { '0', '1', '2', '3', '4', '5', '6', '7', '8',
+ '9', 'a', 'b', 'c', 'd', 'e', 'f' };
+
+inline static int u16_to_ascii_decimal_aligned(
+ unsigned char *ptr, u16 num, u16 min_digits)
+{
+ /* The logic below is replicated in
+ * function u16_to_ascii_decimal_unaligned
+ * except the use of min_digits
+ * Replication is done to optimize run time
+ * if you fix a bug here, check u16_to_ascii_decimal_unaligned
+ * as well (and vice versa)
+ */
+ unsigned char *temp = ptr;
+ int no_leading_zeros = 0;
+
+ if(num > 9999 || min_digits == 5) {
+ *temp++ = hex_numbers_single_digit[num/10000];
+ num = num%10000;
+ no_leading_zeros = 1;
+ }
+
+ if(no_leading_zeros || num > 999 || min_digits == 4) {
+ *temp++ = hex_numbers_single_digit[num/1000];
+ num = num%1000;
+ no_leading_zeros = 1;
+ }
+
+ if(no_leading_zeros || num > 99 || min_digits == 3) {
+ *temp++ = hex_numbers_single_digit[num/100];
+ num = num%100;
+ no_leading_zeros = 1;
+ }
+
+ if(no_leading_zeros || num > 9 || min_digits == 2) {
+ *temp++ = hex_numbers_single_digit[num/10];
+ num = num%10;
+ }
+
+ *temp++ = hex_numbers_single_digit[num];
+
+ return temp-ptr;
+}
+
+inline static int u16_to_ascii_decimal_unaligned(
+ unsigned char *ptr, u16 num)
+{
+ /*
+ * return u16_to_ascii_decimal_aligned(ptr, num, 0);
+ * should do the job.. however, to opimize the run time
+ * the code of u16_to_ascii_decimal_aligned is being
+ * repeated here without the use of min_digits
+ * if you fix a bug here, please check
+ * u16_to_ascii_decimal_aligned as well (and vice versa)
+ */
+ unsigned char *temp = ptr;
+ int no_leading_zeros = 0;
+
+ if(num > 9999) {
+ *temp++ = hex_numbers_single_digit[num/10000];
+ num = num%10000;
+ no_leading_zeros = 1;
+ }
+
+ if(no_leading_zeros || num > 999) {
+ *temp++ = hex_numbers_single_digit[num/1000];
+ num = num%1000;
+ no_leading_zeros = 1;
+ }
+
+ if(no_leading_zeros || num > 99) {
+ *temp++ = hex_numbers_single_digit[num/100];
+ num = num%100;
+ no_leading_zeros = 1;
+ }
+
+ if(no_leading_zeros || num > 9) {
+ *temp++ = hex_numbers_single_digit[num/10];
+ num = num%10;
+ }
+
+ *temp++ = hex_numbers_single_digit[num];
+
+ return temp-ptr;
+}
+
+static int syslog_get_timestamp(unsigned char *ts)
+{
+ static const char *months[] = {"Jan ", "Feb ", "Mar ", "Apr ", "May ",
+ "Jun ", "Jul ", "Aug ", "Sep ", "Oct ", "Nov ", "Dec " };
+
+ unsigned char *temp = ts;
+ /* Inserts time stamp in the syslog format and returns lenght
+ * assumes that ts has sufficient space
+ */
+ /* China Telecom has demanded that the time stamp has to be
+ * in the format '2011 Jun 7 12:34:08'
+ */
+ time_t time = (time_t)cnat_get_unix_time_in_seconds();
+ struct tm tm1;
+
+ gmtime_r(&time, &tm1);
+ /* Now put the pieces together */
+ /* Year */
+ ts += u16_to_ascii_decimal_unaligned(ts, (tm1.tm_year + 1900));
+ *ts++ = SYSLOG_DELIMITER;
+ /* Month */
+ clib_memcpy(ts, months[tm1.tm_mon], 4);
+ ts += 4; /* DELIMITER taken care */
+ /* day */
+ ts += u16_to_ascii_decimal_unaligned(ts, tm1.tm_mday);
+ *ts++ = SYSLOG_DELIMITER;
+ /* hours */
+ ts += u16_to_ascii_decimal_aligned(ts, tm1.tm_hour, 2);
+ *ts++ = ':';
+ /* minutes */
+ ts += u16_to_ascii_decimal_aligned(ts, tm1.tm_min, 2);
+ *ts++ = ':';
+ /* seconds */
+ ts += u16_to_ascii_decimal_aligned(ts, tm1.tm_sec, 2);
+ return ts - temp;
+}
+
+/* Ensure that the order of the below array matches with
+ * syslog_service_type enum
+ */
+static char *syslog_service_string[] = { "NAT44", "DSLITE" };
+
+/* Ensure that the order of below array matches with
+ * syslog_event_type_t enum
+ */
+typedef struct {
+ char *event_name;
+ int name_length;
+} syslog_event_description_type;
+
+const static syslog_event_description_type sys_log_event[] = {
+ { "UserbasedA", 10 }, /* yes, 10 is strlen of "UserbasedA" */
+ { "UserbasedW", 10 },
+ { "SessionbasedA", 13 },
+ { "SessionbasedW", 13 },
+ { "SessionbasedAD", 14 },
+ { "SessionbasedWD", 14 },
+ { "Portblockrunout", 15 },
+ { "TCPseqmismatch", 14},
+ { "Invalid", 7 }
+};
+
+inline static int syslog_fill_header(const cnat_syslog_logging_info_t *log_info,
+ syslog_service_type_t s_type)
+{
+ /* Forms the syslog header and returns the lenght
+ * Assumes that header has sufficient space
+ */
+
+ /* Sample header (as agreed for China Telecom requirements --
+ * <134> 1 2011 May 31 10:30:45 192.168.2.3 - - NAT44 -
+ */
+
+ unsigned char *temp, *header;
+ int count;
+ temp = header = (unsigned char *)
+ &(log_info->current_logging_context->packet_data[CNAT_NFV9_HDR_OFFSET]);
+ *temp++ = '<';
+ temp += byte_to_ascii_decimal_unaligned(temp,
+ log_info->header_priority);
+ *temp++ = '>';
+ *temp++ = SYSLOG_DELIMITER;
+ *temp++ = '1'; /* Syslog version -- always set to 1 */
+ *temp++ = SYSLOG_DELIMITER;
+ temp += syslog_get_timestamp(temp);
+ *temp++ = SYSLOG_DELIMITER;
+ count = strlen(log_info->header_hostname);
+ clib_memcpy(temp, log_info->header_hostname, count);
+ temp += count;
+ *temp++ = SYSLOG_DELIMITER;
+ *temp++ = SYSLOG_FIELD_ABSENT; /* App name - nil value */
+ *temp++ = SYSLOG_DELIMITER;
+ *temp++ = SYSLOG_FIELD_ABSENT; /* Proc ID - nil value for now */
+ *temp++ = SYSLOG_DELIMITER;
+ /* Now the msg id */
+ count = strlen(syslog_service_string[s_type]);
+ clib_memcpy(temp, syslog_service_string[s_type], count);
+ temp += count;
+ *temp++ = SYSLOG_DELIMITER;
+ *temp++ = SYSLOG_FIELD_ABSENT; /* No structured elements */
+ *temp++ = SYSLOG_DELIMITER;
+#ifdef SHOW_SYSLOG_TIMESTAMP
+ printf("\nSysLog TS: %s : Length %d", header, temp - header);
+#endif /* SHOW_SYSLOG_TIMESTAMP */
+ return temp-header;
+}
+
+extern void cnat_logging_init();
+
+/* one time call at the beginning */
+void cnat_syslog_logging_init()
+{
+ if(PREDICT_TRUE(cnat_syslog_global_info.cnat_syslog_init_done))
+ return; /* Already done */
+
+ cnat_logging_init();
+ cnat_syslog_global_info.cnat_syslog_disp_node_index =
+ spp_lookup_node_index(PLATFORM_SYSLOG_DISP_NODE_IDX);
+ ASSERT(cnat_syslog_global_info.cnat_syslog_disp_node_index != (u16)~0);
+
+ cnat_syslog_global_info.cnat_syslog_init_done = 1;
+}
+
+/*
+ * edt: * * cnat_syslog_create_logging_context
+ *
+ * Tries to create a logging context with packet buffer
+ * to send a new logging packet
+ *
+ * Argument: cnat_syslog_logging_info_t *logging_info
+ * structure that contains the logging info and will store
+ * the packet context as well.
+ */
+inline
+void cnat_syslog_create_logging_context (
+ cnat_syslog_logging_info_t *logging_info,
+ syslog_service_type_t s_type)
+{
+ spp_ctx_t *ctx;
+
+ /*
+ * If queued_logging_context_index is non-EMPTY, we already have a logging
+ * packet queued to be sent. First try sending this before allocating
+ * a new context. We can have only one active packet context per
+ * logging_info structure
+ */
+
+ if (PREDICT_FALSE(logging_info->queued_logging_context != NULL)) {
+ cnat_syslog_send_queued_pkt(logging_info);
+ /*
+ * If we cannot still send the queued pkt, just return
+ * Downstream Constipation count would have increased anyway
+ */
+ if (logging_info->queued_logging_context != NULL) {
+ cnat_syslog_global_counter.logging_context_creation_deferred_count++;
+ return;
+ }
+ }
+
+ /*
+ * If no context can be allocated, return silently
+ * calling routine will handle updating the error counters
+ */
+ if (spp_ctx_alloc(&ctx, 1) < 1) {
+ cnat_syslog_global_counter.logging_context_creation_fail_count++;
+ SYSLOG_DEBUG_PRINTF1("\nCould not allocate ctx for syslog");
+ return;
+ }
+
+ // Allocate packet buffer (used for AVSM currently)
+ PLATFORM_ALLOC_NFV9_PKT_BUFFER(ctx, 0);
+
+ logging_info->current_logging_context = ctx;
+
+ PLATFORM_SET_CTX_RU_TX_FROM_NODE(ctx, NODE_LOGGING);
+
+ ctx->flags = SPP_CTX_END_OF_PACKET;
+ ctx->next_ctx_this_packet = (spp_ctx_t*) SPP_CTX_NO_NEXT_CTX;
+ ctx->current_header = &ctx->packet_data[CNAT_NFV9_HDR_OFFSET];
+
+ logging_info->pkt_length = syslog_fill_header(logging_info, s_type);
+ logging_info->pkt_length += (CNAT_NFV9_HDR_OFFSET -
+ CNAT_NFV9_IP_HDR_OFFSET);
+ logging_info->current_logging_context_timestamp =
+ cnat_get_sys_up_time_in_ms();
+
+}
+
+inline static int u16_to_ascii_hex_unaligned(
+ unsigned char *ptr, u16 num)
+{
+ unsigned char nibble, *temp;
+ int no_leading_zeros = 0;
+ temp = ptr;
+ nibble = (num >> 12);
+ if(nibble) {
+ *temp++ = hex_numbers_single_digit[nibble];
+ no_leading_zeros = 1;
+ }
+
+ nibble = (num >> 8) & 0xF;
+ if(nibble || no_leading_zeros) {
+ *temp++ = hex_numbers_single_digit[nibble];
+ no_leading_zeros = 1;
+ }
+
+ nibble = (num >> 4) & 0xF;
+ if(nibble || no_leading_zeros) {
+ *temp++ = hex_numbers_single_digit[nibble];
+ }
+
+ *temp++ = hex_numbers_single_digit[num & 0xF];
+
+ return temp-ptr;
+}
+
+inline static int ipv6_int_2_str(u32 ipv6[], unsigned char *ipv6_str)
+{
+/* DC stands for Double Colon.
+ * Refer http://tools.ietf.org/html/rfc5952 for
+ * more details on text representations of
+ * IPV6 address
+ */
+#define DC_NOT_USED_YET 0
+#define DC_IN_USE 1 /* Zeros are skipped */
+#define DC_ALREADY_USED 2 /* Cannot skip zeros anymore */
+ int i;
+ u16 *ipv6_temp = (u16 *)ipv6;
+ unsigned char *temp = ipv6_str;
+ int double_colon = DC_NOT_USED_YET;
+ for(i = 0; i < 7; i++) {
+ if(ipv6_temp[i]) {
+ ipv6_str += u16_to_ascii_hex_unaligned(ipv6_str, ipv6_temp[i]);
+ *ipv6_str++ = ':';
+ if(double_colon == DC_IN_USE) { /* Cannot use DC anymore */
+ double_colon = DC_ALREADY_USED;
+ }
+ } else {
+ if(double_colon == DC_IN_USE) {
+ /* Skip this zero as well */
+ continue;
+ } else if((ipv6_temp[i+1])
+ /* DC makes sense if there is more than one contiguous zero */
+ || (double_colon != DC_NOT_USED_YET)) {
+ ipv6_str += u16_to_ascii_hex_unaligned(ipv6_str,
+ ipv6_temp[i]);
+ *ipv6_str++ = ':';
+ } else { /* Start using DC */
+ *ipv6_str++ = ':'; /* The 2nd colon */
+ double_colon = DC_IN_USE;
+ }
+ }
+ }
+ if(ipv6_temp[7]) {
+ ipv6_str += u16_to_ascii_hex_unaligned(ipv6_str, ipv6_temp[7]);
+ } else if(double_colon != DC_IN_USE) {
+ *ipv6_str++ = '0';
+ }
+ *ipv6_str = 0;
+
+ return ipv6_str - temp;
+}
+
+/* insert syslog record for nat44 */
+
+void cnat_syslog_insert_nat44_record(
+ cnat_syslog_logging_info_t *log_info,
+ cnat_main_db_entry_t *db, cnat_vrfmap_t *vrfmap,
+ cnat_session_entry_t *sdb, int bulk_alloc, syslog_event_type_t e_type)
+{
+ /* This record should like this -
+ * [EventName <L4> <Original Source IP> <Inside VRF Name>
+ * <Original Source IPv6> < Translated Source IP> <Original Port>
+ * <Translated First Source Port> <Translated Last Source Port>
+ * <Destination ip address> <destination port>]
+ */
+ u32 original_source = db->in2out_key.k.ipv4;
+ u32 translated_ip = db->out2in_key.k.ipv4;
+ cnat_user_db_entry_t *udb = cnat_user_db + db->user_index;
+ unsigned char *temp, *record;
+ u32 network_order_ipv6[4];
+
+ SYSLOG_CONFIG_DEBUG_PRINTF(4,"In Function %s\n", __func__);
+ temp = record = &(log_info->current_logging_context->packet_data[
+ CNAT_NFV9_IP_HDR_OFFSET + log_info->pkt_length]);
+
+ if (PREDICT_FALSE(!udb)) {
+ SYSLOG_DEBUG_PRINTF1("\nnull udb!");
+ return;
+ }
+
+ /* Now we point to the location where record needs to be inserted */
+ *record++ = '['; /* Open the record */
+
+ /* Copy the record type */
+ clib_memcpy(record, sys_log_event[e_type].event_name,
+ sys_log_event[e_type].name_length);
+ record += sys_log_event[e_type].name_length;
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the Protocol type */
+ if(PREDICT_FALSE(
+ e_type == sessionbased_assign || e_type == sessionbased_withdraw ||
+ e_type == sessionbased_assignD || e_type == sessionbased_withdrawD)) {
+ u16 my_proto_mask;
+ my_proto_mask = db->in2out_key.k.vrf & CNAT_PRO_MASK;
+ if(PREDICT_TRUE(my_proto_mask == CNAT_TCP)) {
+ *record++ = '6';
+ } else if(PREDICT_TRUE(my_proto_mask == CNAT_UDP)) {
+ *record++ = '1';
+ *record++ = '7';
+ } else if(PREDICT_TRUE(my_proto_mask == CNAT_ICMP)) {
+ *record++ = '1';
+ } else { /* Default, assume GRE (for PPTP) */
+ *record++ = '4';
+ *record++ = '7';
+ }
+ } else {
+ *record++ = SYSLOG_FIELD_ABSENT;
+ }
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the Original Source IP */
+ record += copy_ipv4_addr(record, original_source);
+ *record++ = SYSLOG_DELIMITER;
+
+ /* copy configured VRF NAME */
+ clib_memcpy(record, log_info->vrf_name, log_info->vrf_name_len);
+ record += log_info->vrf_name_len;
+ *record++ = SYSLOG_DELIMITER;
+
+ /* No IPV6 source address for nat44 */
+ *record++ = SYSLOG_FIELD_ABSENT;
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the translated IP address */
+ record += copy_ipv4_addr(record, translated_ip);
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the Original port */
+ if(e_type == sessionbased_assign || e_type == sessionbased_withdraw ||
+ e_type == sessionbased_assignD || e_type == sessionbased_withdrawD) {
+ record += u16_to_ascii_decimal_unaligned(
+ record, db->in2out_key.k.port);
+ } else {
+ *record++ = SYSLOG_FIELD_ABSENT;
+ }
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the start outside port */
+ record += u16_to_ascii_decimal_unaligned(record, bulk_alloc);
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the last outside port */
+ if(e_type == userbased_assign || e_type == userbased_withdraw) {
+ record += u16_to_ascii_decimal_unaligned(record,
+ (bulk_alloc + BULKSIZE_FROM_VRFMAP(vrfmap) - 1));
+ } else {
+ *record++ = SYSLOG_FIELD_ABSENT;
+ }
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy destination ip and port in case for DBL*/
+ if(PREDICT_FALSE(e_type == sessionbased_assignD || e_type == sessionbased_withdrawD)) {
+ if(PREDICT_TRUE(sdb == NULL)) {
+ record += copy_ipv4_addr(record,db->dst_ipv4);
+ *record++ = SYSLOG_DELIMITER;
+ record += u16_to_ascii_decimal_unaligned(record, db->dst_port);
+ } else {
+ record += copy_ipv4_addr(record, sdb->v4_dest_key.k.ipv4);
+ *record++ = SYSLOG_DELIMITER;
+ record += u16_to_ascii_decimal_unaligned(record, sdb->v4_dest_key.k.port);
+ }
+ } else {
+ *record++ = '-';
+ *record++ = SYSLOG_DELIMITER;
+ *record++ = '-';
+ }
+ *record++ = SYSLOG_DELIMITER;
+
+ *record++ = ']'; /* End of the reocrd */
+
+ log_info->pkt_length += record - temp;
+}
+
+void cnat_syslog_insert_record(
+ cnat_syslog_logging_info_t *log_info,
+ cnat_main_db_entry_t *db, dslite_table_entry_t *dslite_entry,
+ cnat_session_entry_t *sdb, int bulk_alloc, syslog_event_type_t e_type)
+{
+ /* This record should like this -
+ * [EventName <L4> <Original Source IP> <Inside VRF Name>
+ * <Original Source IPv6> < Translated Source IP> <Original Port>
+ * <Translated First Source Port> <Translated Last Source Port>
+ * <Destination ip address> <destination port>]
+ */
+ u32 original_source = db->in2out_key.k.ipv4;
+ u32 translated_ip = db->out2in_key.k.ipv4;
+ cnat_user_db_entry_t *udb = cnat_user_db + db->user_index;
+ unsigned char *temp, *record;
+ u32 network_order_ipv6[4];
+
+ temp = record = &(log_info->current_logging_context->packet_data[
+ CNAT_NFV9_IP_HDR_OFFSET + log_info->pkt_length]);
+
+ if (PREDICT_FALSE(!udb)) {
+ SYSLOG_DEBUG_PRINTF1("\nnull udb!");
+ return;
+ }
+
+ /* Now we point to the location where record needs to be inserted */
+ *record++ = '['; /* Open the record */
+
+ /* Copy the record type */
+ clib_memcpy(record, sys_log_event[e_type].event_name,
+ sys_log_event[e_type].name_length);
+ record += sys_log_event[e_type].name_length;
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the Protocol type */
+ if(PREDICT_FALSE(
+ e_type == sessionbased_assign || e_type == sessionbased_withdraw ||
+ e_type == sessionbased_assignD || e_type == sessionbased_withdrawD)) {
+ u16 my_proto_mask;
+ my_proto_mask = db->in2out_key.k.vrf & CNAT_PRO_MASK;
+ if(PREDICT_TRUE(my_proto_mask == CNAT_TCP)) {
+ *record++ = '6';
+ } else if(PREDICT_TRUE(my_proto_mask == CNAT_UDP)) {
+ *record++ = '1';
+ *record++ = '7';
+ } else {
+ *record++ = '1';
+ }
+ } else {
+ *record++ = SYSLOG_FIELD_ABSENT;
+ }
+
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the Original Source IP */
+#ifdef DSLITE_USER_IPV4
+ record += copy_ipv4_addr(record, original_source);
+#else
+ /*
+ * Do not include inside ipv4 address for B4 element level port limiting
+ */
+ *record++ = SYSLOG_FIELD_ABSENT;
+#endif
+ *record++ = SYSLOG_DELIMITER;
+
+ /* copy configured VRF NAME */
+ clib_memcpy(record, log_info->vrf_name, log_info->vrf_name_len);
+ record += log_info->vrf_name_len;
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the IPV6 source address */
+ /* CSCtt16960 Fix. */
+ network_order_ipv6[0] = htonl(udb->ipv6[0]);
+ network_order_ipv6[1] = htonl(udb->ipv6[1]);
+ network_order_ipv6[2] = htonl(udb->ipv6[2]);
+ network_order_ipv6[3] = htonl(udb->ipv6[3]);
+
+ inet_ntop(AF_INET6,network_order_ipv6,record,INET6_ADDRSTRLEN);
+ record += strlen(record);
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the translated IP address */
+ record += copy_ipv4_addr(record, translated_ip);
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the Original port */
+ if(e_type == sessionbased_assign || e_type == sessionbased_withdraw ||
+ e_type == sessionbased_assignD || e_type == sessionbased_withdrawD) {
+ record += u16_to_ascii_decimal_unaligned(
+ record, db->in2out_key.k.port);
+ } else {
+ *record++ = SYSLOG_FIELD_ABSENT;
+ }
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the start outside port */
+ record += u16_to_ascii_decimal_unaligned(record, bulk_alloc);
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the last outside port */
+ if(e_type == userbased_assign || e_type == userbased_withdraw) {
+ record += u16_to_ascii_decimal_unaligned(record,
+ (bulk_alloc + BULKSIZE_FROM_VRFMAP(dslite_entry) - 1));
+ } else {
+ *record++ = SYSLOG_FIELD_ABSENT;
+ }
+ *record++ = SYSLOG_DELIMITER;
+
+ if(PREDICT_FALSE(e_type == sessionbased_assignD || e_type == sessionbased_withdrawD)) {
+ if(sdb == NULL) {
+ record += copy_ipv4_addr(record, db->dst_ipv4);
+ *record++ = SYSLOG_DELIMITER;
+ record += u16_to_ascii_decimal_unaligned(record, db->dst_port);
+ } else {
+ record += copy_ipv4_addr(record, sdb->v4_dest_key.k.ipv4);
+ *record++ = SYSLOG_DELIMITER;
+ record += u16_to_ascii_decimal_unaligned(record, sdb->v4_dest_key.k.port);
+ }
+ } else {
+ *record++ = '-';
+ *record++ = SYSLOG_DELIMITER;
+ *record++ = '-';
+ }
+ *record++ = SYSLOG_DELIMITER;
+
+ *record++ = ']'; /* End of the reocrd */
+
+ log_info->pkt_length += record - temp;
+}
+
+#define SYSLOG_PRECHECK(entry, s_type) \
+ if(PREDICT_FALSE((entry)->syslog_logging_index == EMPTY)) { \
+ SYSLOG_DEBUG_PRINTF1("\n1. Log Mapping failed") \
+ return; \
+ } \
+ logging_info = \
+ cnat_syslog_logging_info_pool + (entry)->syslog_logging_index; \
+ if(PREDICT_FALSE(logging_info->current_logging_context == NULL)) { \
+ cnat_syslog_create_logging_context(logging_info, s_type); \
+ if(PREDICT_FALSE(logging_info->current_logging_context == NULL)) { \
+ SYSLOG_DEBUG_PRINTF1("\n2. Log Mapping failed") \
+ return; \
+ } \
+ }
+
+void cnat_syslog_nat44_mapping_create(cnat_main_db_entry_t *db,
+ cnat_vrfmap_t *vrfmap, cnat_session_entry_t * sdb
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ )
+{
+ cnat_syslog_logging_info_t *logging_info = 0;
+ syslog_event_type_t e_type;
+ int start_port;
+
+ SYSLOG_CONFIG_DEBUG_PRINTF(4,"In Function %s\n", __func__);
+ SYSLOG_PRECHECK(vrfmap, NAT44)
+
+#ifndef NO_BULK_LOGGING
+ if(bulk_alloc > 0) { /* new bulk alloc - use bulk add template */
+ e_type = userbased_assign;
+ start_port = bulk_alloc;
+ } else if(bulk_alloc == CACHE_ALLOC_NO_LOG_REQUIRED) {
+ return; /* No logging required.. bulk port usage */
+ }
+ else { /* Individual logging .. fall back to old method */
+#endif
+ if(vrfmap->syslog_logging_policy == SESSION_LOG_ENABLE) {
+ e_type = sessionbased_assignD;
+ } else {
+ e_type = sessionbased_assign;
+ }
+ start_port = db->out2in_key.k.port;
+#ifndef NO_BULK_LOGGING
+ }
+#endif
+
+ cnat_syslog_insert_nat44_record(logging_info, db, vrfmap, sdb,
+ start_port, e_type);
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+
+ if (PREDICT_FALSE(logging_info->pkt_length >
+ logging_info->max_length_minus_max_record_size)) {
+ cnat_syslog_send_pkt(logging_info);
+ }
+}
+
+void cnat_syslog_ds_lite_mapping_create(cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry, cnat_session_entry_t *sdb
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ )
+{
+ cnat_syslog_logging_info_t *logging_info = 0;
+ syslog_event_type_t e_type;
+ int start_port;
+
+ SYSLOG_PRECHECK(dslite_entry, DSLite)
+
+#ifndef NO_BULK_LOGGING
+ if(bulk_alloc > 0) { /* new bulk alloc - use bulk add template */
+ e_type = userbased_assign;
+ start_port = bulk_alloc;
+ } else if(bulk_alloc == CACHE_ALLOC_NO_LOG_REQUIRED) {
+ return; /* No logging required.. bulk port usage */
+ }
+ else { /* Individual logging .. fall back to old method */
+#endif
+ if(PREDICT_FALSE(dslite_entry->syslog_logging_policy == SESSION_LOG_ENABLE)) {
+ e_type = sessionbased_assignD;
+ } else {
+ e_type = sessionbased_assign;
+ }
+ start_port = db->out2in_key.k.port;
+#ifndef NO_BULK_LOGGING
+ }
+#endif
+
+ cnat_syslog_insert_record(logging_info, db, dslite_entry, sdb,
+ start_port, e_type);
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+
+ if (PREDICT_FALSE(logging_info->pkt_length >
+ logging_info->max_length_minus_max_record_size)) {
+ cnat_syslog_send_pkt(logging_info);
+ }
+}
+
+void cnat_syslog_nat44_mapping_delete(cnat_main_db_entry_t *db,
+ cnat_vrfmap_t *vrfmap, cnat_session_entry_t *sdb
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ )
+{
+ cnat_syslog_logging_info_t *logging_info = 0;
+ syslog_event_type_t e_type;
+ int start_port;
+
+ SYSLOG_CONFIG_DEBUG_PRINTF(4,"In Function %s\n", __func__);
+ SYSLOG_PRECHECK(vrfmap, NAT44)
+
+#ifndef NO_BULK_LOGGING
+ if(bulk_alloc > 0) { /* new bulk alloc - use bulk add template */
+ e_type = userbased_withdraw;
+ start_port = bulk_alloc;
+ } else if(bulk_alloc == CACHE_ALLOC_NO_LOG_REQUIRED) {
+ return; /* No logging required.. bulk port usage */
+ }
+ else { /* Individual logging .. fall back to old method */
+#endif
+ if(vrfmap->syslog_logging_policy == SESSION_LOG_ENABLE) {
+ e_type = sessionbased_withdrawD;
+ } else {
+ e_type = sessionbased_withdraw;
+ }
+ start_port = db->out2in_key.k.port;
+#ifndef NO_BULK_LOGGING
+ }
+#endif
+ cnat_syslog_insert_nat44_record(logging_info, db, vrfmap, sdb,
+ start_port, e_type);
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+ if (PREDICT_FALSE(logging_info->pkt_length >
+ logging_info->max_length_minus_max_record_size)) {
+ cnat_syslog_send_pkt(logging_info);
+ }
+}
+
+void cnat_syslog_ds_lite_mapping_delete(cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry, cnat_session_entry_t *sdb
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ )
+{
+ cnat_syslog_logging_info_t *logging_info = 0;
+ syslog_event_type_t e_type;
+ int start_port;
+
+ SYSLOG_PRECHECK(dslite_entry, DSLite)
+
+#ifndef NO_BULK_LOGGING
+ if(bulk_alloc > 0) { /* new bulk alloc - use bulk add template */
+ e_type = userbased_withdraw;
+ start_port = bulk_alloc;
+ } else if(bulk_alloc == CACHE_ALLOC_NO_LOG_REQUIRED) {
+ return; /* No logging required.. bulk port usage */
+ }
+ else { /* Individual logging .. fall back to old method */
+#endif
+ if(PREDICT_FALSE(dslite_entry->syslog_logging_policy == SESSION_LOG_ENABLE)) {
+ e_type = sessionbased_withdrawD;
+ } else {
+ e_type = sessionbased_withdraw;
+ }
+ start_port = db->out2in_key.k.port;
+#ifndef NO_BULK_LOGGING
+ }
+#endif
+ cnat_syslog_insert_record(logging_info, db, dslite_entry, sdb,
+ start_port, e_type);
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+
+ if (PREDICT_FALSE(logging_info->pkt_length >
+ logging_info->max_length_minus_max_record_size)) {
+ cnat_syslog_send_pkt(logging_info);
+ }
+}
+
+void cnat_syslog_dslite_insert_port_exceeded(
+ cnat_syslog_logging_info_t *log_info,
+ dslite_key_t * key)
+{
+ /* This record should like this -
+ * [Portblockrunout <L4> <Original Source IP> <Inside VRF Name>
+ * <Original Source IPv6> - <Original Port> - - - -]
+ */
+ u32 network_order_ipv6[4];
+ unsigned char *temp, *record;
+
+ temp = record = &(log_info->current_logging_context->packet_data[
+ CNAT_NFV9_IP_HDR_OFFSET + log_info->pkt_length]);
+
+ /* Now we point to the location where record needs to be inserted */
+ *record++ = '['; /* Open the record */
+
+ /* Copy the record type */
+ clib_memcpy(record, sys_log_event[port_block_runout].event_name,
+ sys_log_event[port_block_runout].name_length);
+ record += sys_log_event[port_block_runout].name_length;
+ *record++ = SYSLOG_DELIMITER;
+
+ u16 my_proto_mask;
+ my_proto_mask = key->ipv4_key.k.vrf & CNAT_PRO_MASK;
+ if(PREDICT_TRUE(my_proto_mask == CNAT_TCP)) {
+ *record++ = '6';
+ } else if(PREDICT_TRUE(my_proto_mask == CNAT_UDP)) {
+ *record++ = '1';
+ *record++ = '7';
+ } else {
+ *record++ = '1';
+ }
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the Original Source IP */
+ record += copy_ipv4_addr(record, key->ipv4_key.k.ipv4);
+ *record++ = SYSLOG_DELIMITER;
+
+ /* copy configured VRF NAME */
+ clib_memcpy(record, log_info->vrf_name, log_info->vrf_name_len);
+ record += log_info->vrf_name_len;
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the IPV6 source address */
+ network_order_ipv6[0] = htonl(key->ipv6[0]);
+ network_order_ipv6[1] = htonl(key->ipv6[1]);
+ network_order_ipv6[2] = htonl(key->ipv6[2]);
+ network_order_ipv6[3] = htonl(key->ipv6[3]);
+
+ inet_ntop(AF_INET6,network_order_ipv6,record,INET6_ADDRSTRLEN);
+ record += strlen(record);
+ *record++ = SYSLOG_DELIMITER;
+
+ *record++ = SYSLOG_FIELD_ABSENT; /* No translated source ip */
+ *record++ = SYSLOG_DELIMITER;
+
+ record += u16_to_ascii_decimal_unaligned(
+ record, key->ipv4_key.k.port);
+ *record++ = SYSLOG_DELIMITER;
+
+ *record++ = SYSLOG_FIELD_ABSENT; /* No translated start port */
+ *record++ = SYSLOG_DELIMITER;
+
+ *record++ = SYSLOG_FIELD_ABSENT; /* No translated end port */
+ *record++ = SYSLOG_DELIMITER;
+
+ /*No Destination Info*/
+ *record++ = '-';
+ *record++ = SYSLOG_DELIMITER;
+ *record++ = '-';
+ *record++ = SYSLOG_DELIMITER;
+
+ *record++ = ']'; /* End of the reocrd */
+
+ log_info->pkt_length += record - temp;
+}
+
+void cnat_syslog_ds_lite_port_limit_exceeded(
+ dslite_key_t * key,
+ dslite_table_entry_t *dslite_entry)
+{
+ cnat_syslog_logging_info_t *logging_info = 0;
+
+ SYSLOG_PRECHECK(dslite_entry, DSLite)
+
+ cnat_syslog_dslite_insert_port_exceeded(logging_info, key);
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+
+ if (PREDICT_FALSE(logging_info->pkt_length >
+ logging_info->max_length_minus_max_record_size)) {
+ cnat_syslog_send_pkt(logging_info);
+ }
+}
+
+void cnat_syslog_nat44_insert_port_exceeded(
+ cnat_syslog_logging_info_t *log_info,
+ cnat_key_t * key)
+{
+ /* This record should like this -
+ * [Portblockrunout <L4> <Original Source IP> <Inside VRF Name>
+ * - - <Original Port> - - - -]
+ */
+ unsigned char *temp, *record;
+
+ temp = record = &(log_info->current_logging_context->packet_data[
+ CNAT_NFV9_IP_HDR_OFFSET + log_info->pkt_length]);
+
+ /* Now we point to the location where record needs to be inserted */
+ *record++ = '['; /* Open the record */
+
+ /* Copy the record type */
+ clib_memcpy(record, sys_log_event[port_block_runout].event_name,
+ sys_log_event[port_block_runout].name_length);
+ record += sys_log_event[port_block_runout].name_length;
+ *record++ = SYSLOG_DELIMITER;
+
+ u16 my_proto_mask;
+ my_proto_mask = key->k.vrf & CNAT_PRO_MASK;
+ if(PREDICT_TRUE(my_proto_mask == CNAT_TCP)) {
+ *record++ = '6';
+ } else if(PREDICT_TRUE(my_proto_mask == CNAT_UDP)) {
+ *record++ = '1';
+ *record++ = '7';
+ } else {
+ *record++ = '1';
+ }
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the Original Source IP */
+ record += copy_ipv4_addr(record, key->k.ipv4);
+ *record++ = SYSLOG_DELIMITER;
+
+ /* copy configured VRF NAME */
+ clib_memcpy(record, log_info->vrf_name, log_info->vrf_name_len);
+ record += log_info->vrf_name_len;
+ *record++ = SYSLOG_DELIMITER;
+
+ /* No IPV6 source address for nat44 */
+ *record++ = '-';
+ *record++ = SYSLOG_DELIMITER;
+
+ *record++ = '-'; /* No translated source ip */
+ *record++ = SYSLOG_DELIMITER;
+
+ record += u16_to_ascii_decimal_unaligned(
+ record, key->k.port);
+ *record++ = SYSLOG_DELIMITER;
+
+ *record++ = '-'; /* No translated start port */
+ *record++ = SYSLOG_DELIMITER;
+
+ *record++ = '-'; /* No translated end port */
+ *record++ = SYSLOG_DELIMITER;
+
+ /*No Destination Info*/
+ *record++ = '-';
+ *record++ = SYSLOG_DELIMITER;
+ *record++ = '-';
+ *record++ = SYSLOG_DELIMITER;
+
+ *record++ = ']'; /* End of the reocrd */
+
+ log_info->pkt_length += record - temp;
+}
+
+void cnat_syslog_nat44_port_limit_exceeded(
+ cnat_key_t * key,
+ cnat_vrfmap_t *vrfmap)
+{
+ cnat_syslog_logging_info_t *logging_info = 0;
+
+ SYSLOG_PRECHECK(vrfmap, NAT44)
+
+ cnat_syslog_nat44_insert_port_exceeded(logging_info, key);
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+
+ if (PREDICT_FALSE(logging_info->pkt_length >
+ logging_info->max_length_minus_max_record_size)) {
+ cnat_syslog_send_pkt(logging_info);
+ }
+}
+
+void cnat_syslog_nat44_insert_tcp_seq_mismatch(
+ cnat_syslog_logging_info_t *log_info,
+ cnat_main_db_entry_t *db)
+{
+ /* This record should like this -
+ * [TCPseqmismatch <L4> <Original Source IP> <Inside VRF Name>
+ * - <Translated Source IP> <Original Port> <Translated Source Port> - - -]
+ */
+ unsigned char *temp, *record;
+
+ temp = record = &(log_info->current_logging_context->packet_data[
+ CNAT_NFV9_IP_HDR_OFFSET + log_info->pkt_length]);
+
+ /* Now we point to the location where record needs to be inserted */
+ *record++ = '['; /* Open the record */
+
+ /* Copy the record type */
+ clib_memcpy(record, sys_log_event[tcp_seq_mismatch].event_name,
+ sys_log_event[tcp_seq_mismatch].name_length);
+ record += sys_log_event[tcp_seq_mismatch].name_length;
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Next field is TCP */
+ *record++ = '6';
+ *record++ = SYSLOG_DELIMITER;
+
+ /* Copy the Original Source IP */
+ record += copy_ipv4_addr(record, db->in2out_key.k.ipv4);
+ *record++ = SYSLOG_DELIMITER;
+
+ /* copy configured VRF NAME */
+ clib_memcpy(record, log_info->vrf_name, log_info->vrf_name_len);
+ record += log_info->vrf_name_len;
+ *record++ = SYSLOG_DELIMITER;
+
+ /* No IPV6 source address for nat44 */
+ *record++ = '-';
+ *record++ = SYSLOG_DELIMITER;
+
+ record += copy_ipv4_addr(record, db->out2in_key.k.ipv4);
+ *record++ = SYSLOG_DELIMITER;
+
+ record += u16_to_ascii_decimal_unaligned(
+ record, db->in2out_key.k.port);
+ *record++ = SYSLOG_DELIMITER;
+
+ record += u16_to_ascii_decimal_unaligned(
+ record, db->out2in_key.k.port);
+ *record++ = SYSLOG_DELIMITER;
+
+ *record++ = '-'; /* No translated end port */
+ *record++ = SYSLOG_DELIMITER;
+
+ /*No Destination Info*/
+ *record++ = '-';
+ *record++ = SYSLOG_DELIMITER;
+ *record++ = '-';
+ *record++ = SYSLOG_DELIMITER;
+
+ *record++ = ']'; /* End of the reocrd */
+
+ log_info->pkt_length += record - temp;
+}
+
+void cnat_syslog_nat44_tcp_seq_mismatch(
+ cnat_main_db_entry_t *db,
+ cnat_vrfmap_t *vrfmap)
+{
+ cnat_syslog_logging_info_t *logging_info = 0;
+
+ SYSLOG_PRECHECK(vrfmap, NAT44)
+
+ cnat_syslog_nat44_insert_tcp_seq_mismatch(logging_info, db);
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+
+ if (PREDICT_FALSE(logging_info->pkt_length >
+ logging_info->max_length_minus_max_record_size)) {
+ cnat_syslog_send_pkt(logging_info);
+ }
+}
+#endif
diff --git a/plugins/vcgn-plugin/vcgn/cnat_syslog.h b/plugins/vcgn-plugin/vcgn/cnat_syslog.h
new file mode 100644
index 00000000000..931f4b9cd22
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_syslog.h
@@ -0,0 +1,190 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_syslog.h
+ *
+ * Copyright (c) 2011-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_SYSLOG_H__
+#define __CNAT_SYSLOG_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+
+#include "cnat_db.h"
+#include "nat64_db.h"
+#include "cnat_log_common.h"
+#include "dslite_defs.h"
+
+#define SYSLOG_CONFIG_DEBUG_PRINTF(level, ...) \
+ if (config_debug_level > level) PLATFORM_DEBUG_PRINT(__VA_ARGS__);
+
+
+/* one time call at the beginning */
+void cnat_syslog_logging_init();
+
+/*
+ * unconditional call
+ * will check logging config inside
+ */
+void cnat_syslog_log_mapping_create(cnat_main_db_entry_t * db,
+ cnat_vrfmap_t *vrfmap);
+
+/*
+ * unconditional call
+ * will check logging config inside
+ */
+void cnat_syslog_log_mapping_delete(cnat_main_db_entry_t * db,
+ cnat_vrfmap_t *vrfmap);
+
+void cnat_syslog_ds_lite_mapping_create(cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry, cnat_session_entry_t *sdb
+#ifndef NO_BULK_LOGGING
+ , int bulk_alloc
+#endif
+ );
+
+void cnat_syslog_ds_lite_port_limit_exceeded(
+ dslite_key_t * key,
+ dslite_table_entry_t *dslite_entry);
+
+#define SYSLOG_TIMESTAMP_LENGTH 20
+
+#define CNAT_SYSLOG_VERSION_NUMBER 1
+#define CNAT_SYSLOG_PRIORITY 16*8+6
+/* facility = local0 + severity = info */
+
+#define MAX_SYSLOG_HOSTNAME_LEN 32
+
+/* 6 for priority + space
+ * 2 for version + space
+ * 21 YYYY MMM DD HH:MM:SS + space
+ * 33 for hostname + space
+ * 4 for App Name (-) + space + Proc ID (-) + space
+ * 7 for Msg ID (DSLite is the longest Msg ID so far + space
+ * 2 for Structured data (-) + space
+ */
+#define MAX_SYSLOG_HEADER_LEN 75
+
+/* 18 for Event Name (Portblockrunout is the longest as of now)
+ * 3 for L4 (including space)
+ * 16 for original souce IP + space
+ * 33 for inside vrf name + space
+ * 40 for original source IPV6 + space
+ * 16 for translated source IP + space
+ * 6 for original port + space
+ * 6 for translated first source port + space
+ * 5 for translated last source port
+ * 2 for [] enclosure
+ */
+#define MAX_SYSLOG_RECORD_LEN 145
+
+typedef enum {
+ NAT44,
+ DSLite
+} syslog_service_type_t;
+
+typedef enum {
+ userbased_assign,
+ userbased_withdraw,
+ sessionbased_assign,
+ sessionbased_withdraw,
+ sessionbased_assignD,
+ sessionbased_withdrawD,
+ port_block_runout,
+ tcp_seq_mismatch,
+ max_syslog_event_type
+} syslog_event_type_t;
+
+/*
+ * This structure store the Syslog Logging information on per
+ * collector basis. This structure is allocated from a pool and index
+ * to this structure is stored VRF MAP structures
+ */
+typedef struct {
+ /*
+ * nat64_id will be 0 for nat44 config and i_vrf_id, i_vrf will be 0
+ * for nat64 config. Nat64_id will be used while nat64 collector is
+ * search and i_vrf* for nat44 collector
+ */
+ /* Similarly for ds_lite, ds_lite_id will be used and nat64_id,
+ * ivrf_id shall be set to 0
+ */
+ u32 i_vrf_id; /* Inside VRF ID corresponding to this collector */
+ u16 i_vrf; /* Inside VRF (uidb_index) corresponding to this collector */
+ u16 ds_lite_id; /* DS Lite instance for this collector */
+ u16 port; /* Destination port number of the collector */
+
+ /*
+ * This field determines the maximum size of the Syslog information
+ * that can be stored in a logging packet
+ */
+ u16 max_length_minus_max_record_size;
+ u32 ipv4_address; /* Destination IP address of the collector */
+ /*
+ * Timestamp in UNIX seconds corresponding to when the current
+ * logging packet was created
+ */
+ u32 current_logging_context_timestamp;
+
+ /*
+ * Indicates if the entry is already deleted
+ */
+ u8 deleted;
+
+ u8 header_priority;
+ u16 pkt_length;
+
+ char header_hostname[MAX_SYSLOG_HOSTNAME_LEN];
+ char vrf_name[VRF_NAME_LEN_STORED];
+ u16 vrf_name_len;
+ u8 logging_policy;
+ /*
+ * current logging context
+ */
+ spp_ctx_t *current_logging_context;
+ spp_ctx_t *queued_logging_context;
+
+} cnat_syslog_logging_info_t;
+
+
+/*
+ * Global structure for CGN APP configuration
+ */
+typedef struct {
+
+ u16 cnat_syslog_disp_node_index;
+
+ /*
+ * Whether we have initialized the Syslog information
+ */
+ u8 cnat_syslog_init_done;
+
+} cnat_syslog_global_info_t;
+
+typedef struct {
+ u64 logging_context_creation_fail_count;
+ u64 downstream_constipation_count;
+ u64 logging_context_creation_deferred_count;
+} cnat_syslog_global_counters_t;
+
+extern cnat_syslog_logging_info_t *cnat_syslog_logging_info_pool;
+extern cnat_syslog_global_info_t cnat_syslog_global_info;
+
+#define SYSLOG_DEF_PATH_MTU 1500
+
+#endif /* __CNAT_SYSLOG_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/cnat_util.c b/plugins/vcgn-plugin/vcgn/cnat_util.c
new file mode 100644
index 00000000000..c3697b6be2f
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_util.c
@@ -0,0 +1,2256 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_util.c - cnat helpers
+ *
+ * Copyright (c) 2009-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/clib.h>
+#include <vppinfra/error.h>
+
+#include "tcp_header_definitions.h"
+
+#if 0
+void spp_api_cnat_v4_config_dummy_t_handler
+(spp_api_cnat_v4_config_dummy_t *mp);
+
+void spp_api_cnat_v4_config_dummy_max_t_handler
+(spp_api_cnat_v4_config_dummy_max_t *mp);
+
+void spp_api_cnat_v4_config_icmp_timeout_t_handler
+(spp_api_cnat_v4_config_icmp_timeout_t *mp);
+
+void spp_api_cnat_clear_db_request_t_handler
+(spp_api_cnat_clear_db_request_t *mp);
+
+void spp_api_cnat_v4_debug_global_t_handler
+(spp_api_cnat_v4_debug_global_t *mp);
+
+void spp_api_cnat_v4_show_outside_entry_req_t_handler
+(spp_api_cnat_v4_show_outside_entry_req_t *mp);
+
+void spp_api_cnat_v4_show_inside_entry_req_t_handler
+(spp_api_cnat_v4_show_inside_entry_req_t *mp);
+
+void spp_api_cnat_show_statistics_summary_req_t_handler
+(spp_api_cnat_show_statistics_summary_req_t *mp);
+
+void cnat_db_create_db_entries_cmd (int argc, unsigned long *argv)
+{
+ int i, j ;
+ int nusers = 3000;
+
+ cnat_db_key_bucket_t key_info;
+ cnat_main_db_entry_t *db;
+ cnat_gen_icmp_info info;
+ cnat_key_t dest_info_dummy;
+
+ memset(&dest_info_dummy, 0, sizeof(cnat_key_t));
+ printf ("Create %d users, 100 translations each...\n", nusers);
+
+ for (i = 0; i < nusers; i++) {
+ for (j = 0; j < 100; j++) {
+
+ key_info.k.k.vrf = CNAT_TCP | (1 & CNAT_VRF_MASK);
+ key_info.k.k.ipv4 = 0x0c000001+i;
+ key_info.k.k.port = 1024+j;
+
+ db = cnat_get_main_db_entry_v2(&key_info, PORT_SINGLE,
+ PORT_TYPE_DYNAMIC, &info, &dest_info_dummy);
+
+ if (db == 0) {
+ printf ("OOPS: cnat_main_db_create failed users %d trans %d\n", i, j);
+ return; /*jli*/
+ }
+
+ db->entry_expires = cnat_current_time;
+
+ }
+ }
+}
+
+void db_test_clear (int argc, unsigned long *argv)
+{
+ spp_api_cnat_clear_db_request_t mp;
+
+ mp.wildcard = argv[0];
+ mp.protocol = argv[1];
+ mp.port_num = argv[2];
+ mp.inside_vrf = argv[3];
+ mp.ip_addr = argv[4];
+ spp_api_cnat_clear_db_request_t_handler(&mp);
+}
+
+/* test code*/
+void cnat_db_test_show (int argc, unsigned long *argv)
+{
+
+ spp_api_cnat_v4_show_inside_entry_req_t mp1[2000];
+ spp_api_cnat_v4_show_outside_entry_req_t mp2[30000];
+
+ if (argc > 1) {
+ if (argc != 7) {
+ printf("Usage: db test show dec((which)) dec((vrf)) dec((proto)) dec((ip)) dec((start_port)) dec((end_port)) dec((flags))\n");
+ return;
+ }
+
+ mp1[0].vrf_id = argv[1];
+ mp1[0].protocol = argv[2];;
+ mp1[0].ipv4_addr = argv[3];
+ mp1[0].start_port = argv[4];
+ mp1[0].end_port = argv[5];
+ mp1[0].flags = argv[6];
+ mp1[0].all_entries = 0;
+
+ if (argv[0] == 1) {
+ spp_api_cnat_v4_show_inside_entry_req_t_handler (&(mp1[0]));
+ } else {
+ spp_api_cnat_v4_show_outside_entry_req_t_handler (&(mp2[0]));
+ }
+
+ return;
+ } else {
+ printf("inside entries \n");
+ mp1[0].ipv4_addr = 0x016994CA;
+ mp1[0].vrf_id = 1;
+ mp1[0].all_entries = 0;
+ mp1[0].start_port = 32765;
+ mp1[0].end_port = 65535;
+ mp1[0].protocol = 2;
+ mp1[0].flags = 3;
+
+ spp_api_cnat_v4_show_inside_entry_req_t_handler (&(mp1[0]));
+
+ mp2[0].ipv4_addr = 0x640200c1;
+ mp2[0].vrf_id = 0;
+ mp2[0].start_port = 1025;
+ mp2[0].end_port = 62235;
+ mp2[0].protocol = 2;
+ mp2[0].flags = 3;
+
+ spp_api_cnat_v4_show_outside_entry_req_t_handler (&(mp2[0]));
+ }
+
+#if 1
+ {
+ spp_api_cnat_stats_node_mapping_t mp3[20000];
+ spp_api_cnat_stats_counter_mapping_t mp4[20000];
+ spp_api_cnat_stats_counter_values_t mp5[23000];
+
+ mp3[0].rc = 0;
+ spp_api_cnat_stats_node_mapping_t_handler (&mp3);
+ mp4[0].rc = 0;
+ spp_api_cnat_stats_counter_mapping_t_handler (&mp4);
+
+ mp5[0].flag = 1;
+ spp_api_cnat_stats_counter_values_t_handler(&mp5);
+ }
+#endif
+
+#if 0
+ mp1.ipv4_addr = 0x0A010102;
+ mp1.vrf_id = 1;
+ mp1.all_entries = 1;
+ mp1.protocol = 1;
+
+ spp_api_cnat_v4_show_inside_entry_req_t_handler (&mp1);
+
+
+ mp1.ipv4_addr = 0x0A010103;
+ mp1.vrf_id = 1;
+ mp1.all_entries = 1;
+ mp1.protocol = 2;
+
+ spp_api_cnat_v4_show_inside_entry_req_t_handler (&mp1);
+
+ mp6[0].inside_vrf_id = 1;
+ mp6[0].start_ipv4_address = 0x64020001;
+ mp6[0].end_ipv4_address = 0x64020101;
+ mp6[0].free_addr = 0;
+ mp6[0].flags = CNAT_TRANSLATION_ENTRY_STATIC;
+
+ spp_api_cnat_v4_show_freeUsed_entry_req_t_handler(&mp6);
+
+#endif
+ printf("returned here");
+
+ return;
+}
+
+
+
+void cnat_db_clear_all_entries (int argc, unsigned long *argv)
+{
+ cnat_main_db_entry_t * db;
+ u32 index;
+
+ pool_header_t * p = vec_header(cnat_main_db, sizeof(pool_header_t) );
+
+ for(index = 0; index < vec_len(cnat_main_db); index++) {
+
+ if ( !clib_bitmap_get(p->free_bitmap, index)) {
+
+ db = cnat_main_db + index;
+ cnat_delete_main_db_entry_v2(db);
+
+ }
+ }
+
+}
+
+
+void spp_log_cmd (int argc, unsigned long *argv)
+{
+ u16 num_traces;
+ u16 error_code;
+ u16 num_args;
+ u32 arg[7];
+ u8 i;
+
+ num_traces = argv[0];
+
+ for (i = 0; i < num_traces; i++) {
+ error_code = argv[1 + 4*i];
+ num_args = argv[2 + 4*i];
+ arg[0] = argv[3 + 4*i];
+ arg[1] = argv[4 + 4*i];
+
+ spp_printf(error_code, num_args, arg);
+ }
+}
+
+
+void cnat_db_create_random_entries (int argc, unsigned long *argv)
+{
+
+ platform_cnat_db_create_random_entries();
+}
+
+#define MAX_DEPTH 10
+
+void show_user_db_hash_chain_len() {
+
+ u32 max_len, len, n, i, max_idx, index, used;
+ cnat_user_db_entry_t * udb;
+ u32 hash_depth[MAX_DEPTH];
+
+ memset(hash_depth, 0, sizeof(u32)*MAX_DEPTH);
+
+ n = vec_len(cnat_user_hash);
+
+ used = 0;
+ max_len = 0;
+ for(i=0;i<n;i++) {
+
+ index = cnat_user_hash[i].next;
+
+ len = 0;
+ if (index != EMPTY) used++;
+
+ while (index != EMPTY) {
+ len++ ;
+ udb = cnat_user_db + index;
+ index = udb->user_hash.next;
+ }
+
+ if(len < (MAX_DEPTH-1) ) {
+ hash_depth[len]++;
+ } else {
+ hash_depth[MAX_DEPTH-1]++;
+ }
+
+ if (max_len < len) {
+ max_len = len;
+ max_idx = cnat_user_hash[i].next;
+ }
+ }
+
+ printf("Max user db hash length %u, total buckets %u used %u\n",
+ max_len, n, used);
+
+ for( i=1; i<(MAX_DEPTH - 1); i++) {
+ printf("Hash chain len %02d, entries count %d\n", i, hash_depth[i]);
+ }
+
+ printf("Hash chain len >%02d, entries count %d\n",
+ MAX_DEPTH-1, hash_depth[MAX_DEPTH-1]);
+
+}
+
+void show_main_db_hash_chain_len() {
+
+ u32 max_len, len, n, i, max_idx, index, used;
+ cnat_main_db_entry_t * db;
+ u32 hash_depth[MAX_DEPTH];
+
+ memset(hash_depth, 0, sizeof(u32)*MAX_DEPTH);
+
+ n = vec_len(cnat_in2out_hash);
+
+ used = 0;
+ max_len = 0;
+ for(i=0;i<n;i++) {
+
+ index = cnat_in2out_hash[i].next;
+
+ len = 0;
+ if (index != EMPTY) used++;
+
+ while (index != EMPTY) {
+ len++ ;
+ db = cnat_main_db + index;
+ index = db->in2out_hash.next;
+ }
+
+ if(len < (MAX_DEPTH-1) ) {
+ hash_depth[len]++;
+ } else {
+ hash_depth[MAX_DEPTH-1]++;
+ }
+
+ if (max_len < len) {
+ max_len = len;
+ max_idx = cnat_in2out_hash[i].next;
+ }
+ }
+
+ printf("Max main db I2O hash length %u, total buckets %u used %u\n",
+ max_len, n, used);
+
+ for( i=1; i<(MAX_DEPTH - 1); i++) {
+ printf("Hash chain len %02d, entries count %d\n", i, hash_depth[i]);
+ }
+
+ printf("Hash chain len >%02d, entries count %d\n",
+ MAX_DEPTH-1, hash_depth[MAX_DEPTH-1]);
+
+
+ memset(hash_depth, 0, sizeof(u32)*MAX_DEPTH);
+
+ n = vec_len(cnat_out2in_hash);
+ used = 0;
+ max_len = 0;
+
+ for(i=0;i<n;i++) {
+
+ index = cnat_out2in_hash[i].next;
+ len = 0;
+
+ if (index != EMPTY) used++;
+
+ while (index != EMPTY) {
+ len++ ;
+ db = cnat_main_db + index;
+ index = db->out2in_hash.next;
+ }
+
+ if(len < (MAX_DEPTH-1) ) {
+ hash_depth[len]++;
+ } else {
+ hash_depth[MAX_DEPTH-1]++;
+ }
+
+ if (max_len < len) {
+ max_len = len;
+ max_idx = cnat_in2out_hash[i].next;
+ }
+ }
+
+ printf("Max main db O2I hash length %u, total buckets %u used %u\n",
+ max_len, n, used);
+
+ for( i=1; i<(MAX_DEPTH - 1); i++) {
+ printf("Hash chain len %02d, entries count %d\n", i, hash_depth[i]);
+ }
+
+ printf("Hash chain len >%02d, entries count %d\n",
+ MAX_DEPTH-1, hash_depth[MAX_DEPTH-1]);
+
+
+}
+
+u32 db_free_entry (void * p) {
+
+ pool_header_t * h;
+ u32 free;
+
+ h = pool_header(p);
+
+ free = p == 0 ? 0: vec_len(h->free_indices);
+
+ return free;
+}
+
+void cnat_db_summary (int argc, unsigned long *argv) {
+
+ PLATFORM_DEBUG_PRINT("\n-----------------------------------------");
+ PLATFORM_DEBUG_PRINT("\nSummary DB");
+ PLATFORM_DEBUG_PRINT("\n-----------------------------------------\n");
+ u32 count1, count2, i;
+#ifndef NO_NAT64_DEF
+ extern void nat64_session_db_summary();
+#endif
+ /* main db active entry count*/
+ count1 = vec_len(cnat_main_db);
+ count2 = db_free_entry(cnat_main_db);
+
+ PLATFORM_DEBUG_PRINT("main db entries: total %u, active %u, free %u\n", count1, count1 - count2, count2);
+
+ /* user db active entry count */
+ count1 = vec_len(cnat_user_db);
+ count2 = db_free_entry(cnat_user_db);
+
+ PLATFORM_DEBUG_PRINT("user db entries: total %u, active %u, free %u\n", count1, count1 - count2, count2);
+
+
+ /* user db active entry count */
+#ifndef NO_NAT64_DEF
+ nat64_session_db_summary();
+#endif
+
+ /* main db hash i2o o2i usage count */
+ count1 = 0;
+ count2 = 0;
+
+ for (i=0; i< CNAT_MAIN_HASH_SIZE; i++) {
+
+ if(cnat_in2out_hash[i].next != ~0) count1++;
+ if(cnat_out2in_hash[i].next != ~0) count2++;
+
+ }
+
+ PLATFORM_DEBUG_PRINT("main hash in2out: total %6u, used %u (%.2f%%)\n",
+ CNAT_MAIN_HASH_SIZE, count1,
+ (100.0*count1)/CNAT_MAIN_HASH_SIZE);
+
+ PLATFORM_DEBUG_PRINT("main hash out2in: total %6u, used %u (%.2f%%)\n",
+ CNAT_MAIN_HASH_SIZE, count2,
+ (100.0 * count1)/CNAT_MAIN_HASH_SIZE);
+
+ /* use db hashing usage count */
+
+ count1 = 0;
+
+ for (i=0; i< CNAT_USER_HASH_SIZE; i++) {
+ if(cnat_user_hash[i].next != ~0) count1++;
+ }
+
+ PLATFORM_DEBUG_PRINT("user db hash: total %6u, used %u (%.2f%%)\n",
+ CNAT_USER_HASH_SIZE, count1,
+ (100.0*count1)/CNAT_USER_HASH_SIZE);
+
+ PLATFORM_DEBUG_PRINT("\nNull pointer exceptions:\n");
+ PLATFORM_DEBUG_PRINT("packet_pool: null enq : %10u, null deq : %10u\n",null_enq_pkt, null_deq_pkt);
+ PLATFORM_DEBUG_PRINT("ctx_pool : null enq : %10u, null deq : %10u\n",null_enq_ctx, null_deq_ctx);
+ PLATFORM_DEBUG_PRINT("wqe_pool : null enq : %10u, null deq : %10u\n",null_enq_wqe, null_deq_wqe);
+
+ PLATFORM_DEBUG_PRINT("\nReceived Packet Errors on SPI:\n");
+ PLATFORM_DEBUG_PRINT("rcv_pkt_errs: %10u\n",rcv_pkt_errs);
+
+ PLATFORM_DEBUG_PRINT("\nctx/sf allocation failure errors: \n");
+#ifndef CGN_PERF_SCALE_DEBUG
+ PLATFORM_DEBUG_PRINT("Warning: collection of error counts <with timestamp> is disabled.\n");
+ PLATFORM_DEBUG_PRINT("sf alloc errors: %10u, ctx alloc errors: %10u\n",sf_alloc_errs,ctx_alloc_errs);
+#else
+ for(i=0;i<COUNTER_BUFFER_SIZE;i++)
+ PLATFORM_DEBUG_PRINT("<%2d>Timestamp <sec>: %10u, sf errors: %10u, ctx errors: %10u\n",\
+ i,err_cnt_arr[i].timestamp,\
+ err_cnt_arr[i].sf_error_counter, \
+ err_cnt_arr[i].ctx_error_counter);
+#endif
+}
+
+void cnat_db_hash_summary (int argc, unsigned long *argv) {
+
+ show_main_db_hash_chain_len();
+
+ show_user_db_hash_chain_len();
+}
+
+/*
+ * cnat_port_alloc
+ * This function is now deprecated...
+ *
+ */
+#ifdef LB_PORT
+int cnat_port_alloc (cnat_portmap_t *cnat_portmap, u16 *portmap_inuse,
+ int cnat_instance,
+ port_alloc_t atype, port_pair_t ptype,
+ int *index, u32 *ipv4_address, u16 *base_port)
+#else
+int cnat_port_alloc (cnat_portmap_t *cnat_portmap, u16 *portmap_inuse,
+ port_alloc_t atype, port_pair_t ptype,
+ int *index, u32 *ipv4_address, u16 *base_port)
+#endif
+{
+
+ return (0);
+}
+
+/*
+ * cnat_port_free
+ * This function is now deprecated...
+ *
+ */
+#ifdef LB_PORT
+void cnat_port_free (cnat_portmap_t *cnat_portmap, u16 *portmap_inuse,
+ int instance, int index, port_pair_t ptype, u16 base_port)
+#else
+void cnat_port_free (cnat_portmap_t *cnat_portmap, u16 *portmap_inuse,
+ int index, port_pair_t ptype, u16 base_port)
+#endif
+{
+}
+
+void spp_api_cnat_port_allocate_t_handler(spp_api_cnat_port_allocate_t *mp)
+{
+ int i, j, k1, k2;
+ int pm_index;
+ u32 ipv4_address;
+ u16 aport;
+ int rv;
+ char *out1, *out2, *out_f;
+ port_alloc_t pt1, pt2;
+ cnat_portmap_t *pm = 0;
+ u16 *pm_inuse = 0;
+ u32 *firstp =0;
+ u32 nr_ports =0;
+ u32 nodd_ports = 0;
+ u32 neven_ports = 0;
+#ifdef LB_PORT
+ u32 my_instance = 1;
+#endif
+ char out_r[12] = "allocated-r";
+ char out_o[12] = "allocated-o";
+ char out_e[12] = "allocated-e";
+
+
+ /*
+ * this command is run after db create portmap
+ * vrf is hardcode to 1
+ */
+
+ /* Already have a portmap vector for this VRF? */
+ for (i = 0; i < vec_len(cnat_portmap_indices_by_vrf); i++) {
+ if (cnat_portmap_indices_by_vrf[i] == mp->vrf) {
+ pm = cnat_portmaps[i];
+ pm_inuse = cnat_portmaps_inuse[i];
+ goto found_portmaps;
+ }
+ }
+
+ printf("need to run db create portmaps first 0x%d\n",
+ vec_len(cnat_portmap_indices_by_vrf));
+ return;
+
+found_portmaps:
+ nr_ports = mp->nr_ports;
+ nodd_ports = mp->nodd_ports;
+ neven_ports = mp->neven_ports;
+
+ if ((nr_ports + nodd_ports + neven_ports ) > (PORTS_PER_ADDR)) {
+ printf("invalid port# nr_ports %d + odd %d + even %d "
+ "should be less than 200 \n", nr_ports, nodd_ports, neven_ports);
+ return;
+ }
+
+ /*
+ * first port
+ */
+ firstp = nr_ports ? (&nr_ports) : (nodd_ports ? (&nodd_ports) : (&neven_ports));
+ if (!(*firstp)) {
+ printf("invalid port# nr_ports %d odd %d even %d ",
+ nr_ports, nodd_ports, neven_ports);
+ }
+ out_f = nr_ports ? out_r : (nodd_ports ? out_o : out_e);
+
+#ifdef LB_PORT
+ rv = cnat_port_alloc (pm, pm_inuse, my_instance,
+ PORT_ALLOC_ANY, PORT_S_ODD,
+ &pm_index, &ipv4_address, &aport);
+#else
+ rv = cnat_port_alloc (pm, pm_inuse,
+ PORT_ALLOC_ANY, PORT_S_ODD,
+ &pm_index, &ipv4_address, &aport);
+#endif
+
+ if (!rv) {
+ printf("failed-o\n");
+ return;
+ }
+ printf("%s %8d %10x %8d\n", out_f,
+ pm_index, ipv4_address, aport);
+
+ (*firstp)--;
+
+ for (i=0; i < nr_ports; i++) {
+#ifdef LB_PORT
+ rv = cnat_port_alloc (pm, pm_inuse, my_instance,
+ PORT_ALLOC_DIRECTED, PORT_SINGLE,
+ &pm_index, &ipv4_address, &aport);
+#else
+ rv = cnat_port_alloc (pm, pm_inuse,
+ PORT_ALLOC_DIRECTED, PORT_SINGLE,
+ &pm_index, &ipv4_address, &aport);
+#endif
+ if (rv) {
+ printf("%s %8d %10x %8d\n", out_r,
+ pm_index, ipv4_address, aport);
+ } else {
+ printf("%s failed\n", out_r);
+ return;
+ }
+ }
+
+ if (nodd_ports > neven_ports) {
+ k1 = nodd_ports;
+ k2 = neven_ports;
+ pt1 = PORT_S_ODD;
+ pt2 = PORT_S_EVEN;
+ out1 = out_o;
+ out2 = out_e;
+ } else {
+ k1= neven_ports;
+ pt1 = PORT_S_EVEN;
+ k2 = nodd_ports;
+ pt2 = PORT_S_ODD;
+ out1 = out_e;
+ out2 = out_o;
+ }
+
+ j = 0;
+ for (i=0; i < k1; i++) {
+#ifdef LB_PORT
+ rv = cnat_port_alloc (pm, pm_inuse, my_instance,
+ PORT_ALLOC_DIRECTED, pt1,
+ &pm_index, &ipv4_address, &aport);
+#else
+ rv = cnat_port_alloc (pm, pm_inuse,
+ PORT_ALLOC_DIRECTED, pt1,
+ &pm_index, &ipv4_address, &aport);
+#endif
+ if (rv) {
+ printf("%s %8d %10x %8d\n", out1,
+ pm_index, ipv4_address, aport);
+ } else {
+ printf("%s failed\n", out1);
+ return;
+ }
+
+ if (j < k2) {
+#ifdef LB_PORT
+ rv = cnat_port_alloc (pm, pm_inuse, my_instance,
+ PORT_ALLOC_DIRECTED, pt2,
+ &pm_index, &ipv4_address, &aport);
+#else
+ rv = cnat_port_alloc (pm, pm_inuse,
+ PORT_ALLOC_DIRECTED, pt2,
+ &pm_index, &ipv4_address, &aport);
+#endif
+
+ if (rv) {
+ printf("%s %8d %10x %8d\n", out2,
+ pm_index, ipv4_address, aport);
+ j++;
+ } else {
+ printf("%s failed\n", __FUNCTION__);
+ return;
+ }
+ }
+ }
+}
+
+void cnat_db_summary_stats (int argc, unsigned long *argv)
+{
+ spp_api_cnat_show_statistics_summary_req_t mp[50000];
+
+ spp_api_cnat_show_statistics_summary_req_t_handler(&(mp[0]));
+}
+
+void cnat_debug_global_test (int argc, unsigned long *argv)
+{
+ spp_api_cnat_v4_debug_global_t *mp;
+ spp_api_cnat_v4_config_dummy_t mp1;
+ spp_api_cnat_v4_config_icmp_timeout_t mp2[10];
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+
+ mp->_spp_msg_id = SPP_API_CNAT_V4_DEBUG_GLOBAL;
+ mp->debug_flag = argv[0];
+
+ platform_send_msg(mp);
+
+ mp2[0].default_value = 3;
+
+ spp_api_cnat_v4_config_dummy_t_handler(&mp1);
+ spp_api_cnat_v4_config_icmp_timeout_t_handler(&(mp2[0]));
+}
+
+void cnat_debug_inside_test (int argc, unsigned long *argv)
+{
+
+ spp_api_cnat_v4_debug_in2out_private_addr_t *mp;
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+
+ mp->_spp_msg_id = SPP_API_CNAT_V4_DEBUG_IN2OUT_PRIVATE_ADDR;
+
+ mp->start_addr = spp_host_to_net_byte_order_32(argv[0]);
+ mp->end_addr = spp_host_to_net_byte_order_32(argv[1]);
+ mp->i_vrf = spp_host_to_net_byte_order_16(argv[2]);
+ mp->debug_flag = spp_host_to_net_byte_order_32(argv[3]);
+
+ platform_send_msg(mp);
+}
+
+void cnat_config_ports_user (int argc, unsigned long *argv)
+{
+ spp_api_cnat_v4_config_port_limit_t *mp;
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+
+ mp->_spp_msg_id = SPP_API_CNAT_V4_CONFIG_PORT_LIMIT;
+
+ mp->port_limit = spp_host_to_net_byte_order_16(argv[0]);
+
+ platform_send_msg(mp);
+
+}
+
+void cnat_debug_outside_test (int argc, unsigned long *argv)
+{
+
+ spp_api_cnat_v4_debug_out2in_public_addr_t *mp;
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+
+ mp->_spp_msg_id = SPP_API_CNAT_V4_DEBUG_OUT2IN_PUBLIC_ADDR;
+
+ mp->start_addr = spp_host_to_net_byte_order_32(argv[0]);
+ mp->end_addr = spp_host_to_net_byte_order_32(argv[1]);
+ mp->o_vrf = spp_host_to_net_byte_order_16(argv[2]);
+ mp->debug_flag = spp_host_to_net_byte_order_32(argv[3]);
+
+ platform_send_msg(mp);
+}
+
+void cnat_debug_udp_dump (int argc, unsigned long *argv)
+{
+
+ spp_api_cnat_p2mp_debug_request_t *mp;
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+
+ mp->_spp_msg_id = SPP_API_CNAT_P2MP_DEBUG_REQUEST;
+ mp->dump_type =
+ spp_host_to_net_byte_order_16(CNAT_DEBUG_GENERIC_COMMAND_DEBUG_FLAGS);
+
+ if (spp_host_to_net_byte_order_32(argv[0]) == 1) {
+ mp->param[0] = spp_host_to_net_byte_order_32(
+ CNAT_DEBUG_FLAG_UDP_INSIDE_PACKET_DUMP);
+ } else {
+ mp->param[0] = spp_host_to_net_byte_order_32(
+ CNAT_DEBUG_FLAG_UDP_OUTSIDE_PACKET_DUMP);
+ }
+ mp->param[1] = spp_host_to_net_byte_order_32(argv[1]);
+
+ platform_send_msg(mp);
+
+
+
+}
+
+void cnat_debug_udp_crc (int argc, unsigned long *argv)
+{
+ spp_api_cnat_p2mp_debug_request_t *mp;
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+
+ mp->_spp_msg_id = SPP_API_CNAT_P2MP_DEBUG_REQUEST;
+ mp->dump_type =
+ spp_host_to_net_byte_order_16(CNAT_DEBUG_GENERIC_COMMAND_DEBUG_FLAGS);
+
+ if (spp_host_to_net_byte_order_32(argv[0]) == 1) {
+ mp->param[0] = spp_host_to_net_byte_order_32(
+ CNAT_DEBUG_FLAG_UDP_INSIDE_CHECKSUM_MODIFY);
+ } else {
+ mp->param[0] = spp_host_to_net_byte_order_32(
+ CNAT_DEBUG_FLAG_UDP_OUTSIDE_CHECKSUM_MODIFY);
+ }
+ mp->param[1] = spp_host_to_net_byte_order_32(argv[1]);
+
+ platform_send_msg(mp);
+
+}
+
+void cnat_db_allocate_port_cmd (int argc, unsigned long *argv)
+{
+ spp_api_cnat_port_allocate_t *mp;
+
+
+ if (!argc) {
+ printf("no port# defined\n");
+ return;
+ }
+
+ if ( argc < 3) {
+ printf("no port# defined\n");
+ return;
+ }
+
+ if ((argc == 3) && (argv[0] == 0) && (argv[1] == 0) && (argv[2] == 0)) {
+ printf("no port# defined\n");
+ return;
+ }
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+
+ mp->_spp_msg_id = SPP_API_CNAT_PORT_ALLOCATE;
+ mp->nr_ports = argv[0];
+ mp->nodd_ports = argv[1];
+ mp->neven_ports = argv[2];
+ mp->vrf = 1;
+
+ platform_send_msg(mp);
+}
+
+
+void spp_api_cnat_port_clear_t_handler(spp_api_cnat_port_clear_t *mp)
+{
+ u32 i;
+ cnat_portmap_t *pm = 0;
+ u16 *pm_inuse = 0;
+#ifdef LB_PORT
+ u32 my_instance = 1;
+#endif
+
+
+ /*
+ * this command is run after db create port
+ * vrf is hardcode to 1
+ */
+
+ /* Already have a portmap vector for this VRF? */
+ for (i = 0; i < vec_len(cnat_portmap_indices_by_vrf); i++) {
+ if (cnat_portmap_indices_by_vrf[i] == mp->vrf) {
+ pm = cnat_portmaps[i];
+ pm_inuse = cnat_portmaps_inuse[i];
+ goto found_portmaps;
+ }
+ }
+
+ printf("portmap is not created 0x%d\n",
+ vec_len(cnat_portmap_indices_by_vrf));
+ return;
+
+found_portmaps:
+ if (mp->pm_index >= vec_len(pm)) {
+ printf("invalid port_index 0x%d >= 0x%d\n",
+ mp->pm_index, vec_len(pm));
+ return;
+ }
+
+#ifdef LB_PORT
+ cnat_port_free(pm, pm_inuse, my_instance,
+ mp->pm_index, PORT_SINGLE, mp->port);
+#else
+ cnat_port_free(pm, pm_inuse,
+ mp->pm_index, PORT_SINGLE, mp->port);
+#endif
+ printf("\n pm_index %d port %d is deleted\n", mp->pm_index, mp->port);
+}
+
+
+
+void cnat_db_clear_port_cmd (int argc, unsigned long *argv)
+{
+ spp_api_cnat_port_clear_t *mp;
+
+ if (!argc) {
+ printf("no port# defined\n");
+ return;
+ }
+
+ if ( argc < 2 ) {
+ printf("no port# defined\n");
+ return;
+ }
+
+ if (argc > 2) {
+ printf("too many port# defined\n");
+ return;
+ }
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+
+ mp->_spp_msg_id = SPP_API_CNAT_PORT_CLEAR;
+ mp->pm_index = argv[0];
+ mp->port = argv[1];
+ mp->vrf = 1;
+
+ platform_send_msg(mp);
+}
+
+
+void spp_api_cnat_v4_add_vrf_map_t_handler
+(spp_api_cnat_v4_add_vrf_map_t *mp);
+
+void spp_api_cnat_v4_del_vrf_map_t_handler
+(spp_api_cnat_v4_del_vrf_map_t *mp);
+
+void spp_api_cnat_v4_add_static_port_t_handler
+(spp_api_cnat_v4_add_static_port_t *mp);
+
+void spp_api_cnat_v4_del_static_port_t_handler
+(spp_api_cnat_v4_del_static_port_t *mp);
+
+
+void cnat_db_create_vrfmap_cmd (int argc, unsigned long *argv)
+{
+ spp_api_cnat_v4_add_vrf_map_t *mp;
+
+ if ((argc != 4)) {
+ printf("need right input\n");
+ return;
+ }
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_spp_msg_id = SPP_API_CNAT_V4_ADD_VRF_MAP;
+ mp->i_vrf = spp_host_to_net_byte_order_16(argv[0]);
+ mp->o_vrf = spp_host_to_net_byte_order_16(argv[1]);
+ mp->start_addr[0] = spp_host_to_net_byte_order_32(argv[2]);
+ mp->end_addr[0] = spp_host_to_net_byte_order_32(argv[3]);
+
+ /*
+ * Some hardcoded values for the vrf ids
+ */
+ mp->i_vrf_id = spp_host_to_net_byte_order_32(0x00000100 | mp->i_vrf);
+ mp->o_vrf_id = spp_host_to_net_byte_order_32(0x00000200 | mp->o_vrf);
+
+ platform_send_msg(mp);
+}
+
+
+void cnat_db_delete_vrfmap_cmd (int argc, unsigned long *argv)
+{
+ spp_api_cnat_v4_del_vrf_map_t *mp;
+
+ if (argc != 4) {
+ printf("need right input\n");
+ return;
+ }
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_spp_msg_id = SPP_API_CNAT_V4_DEL_VRF_MAP;
+ mp->i_vrf = spp_host_to_net_byte_order_16(argv[0]);
+ mp->start_addr[0] = spp_host_to_net_byte_order_32(argv[2]);
+ mp->end_addr[0] = spp_host_to_net_byte_order_32(argv[3]);
+
+ platform_send_msg(mp);
+}
+
+void cnat_db_add_svi_cmd (int argc, unsigned long *argv)
+{
+ spp_api_cnat_config_svi_params_t *mp;
+
+ if (argc != 3) {
+ printf("need right input\n");
+ return;
+ }
+
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_spp_msg_id = SPP_API_CNAT_CONFIG_SVI_PARAMS;
+ mp->uidb_index = spp_host_to_net_byte_order_16(argv[1]);
+ mp->svi_ipv4_addr = spp_host_to_net_byte_order_32(argv[2]);
+ platform_send_msg(mp);
+ return;
+}
+
+
+
+void spp_api_cnat_port_create_t_handler(spp_api_cnat_port_create_t *mp)
+{
+ int i, j, k1, k2;
+ int my_index;
+ u32 ipv4_address;
+ u16 aport;
+ u32 pm_len =0;
+ cnat_errno_t rv;
+ u16 i_vrf;
+ char *out1, *out2, *out_f;
+ port_alloc_t pt1, pt2;
+ cnat_vrfmap_t *my_vrfmap;
+ cnat_portmap_v2_t *pm = 0;
+ u32 *firstp =0;
+ u32 nr_ports =0;
+ u32 nodd_ports = 0;
+ u32 neven_ports = 0;
+#ifdef LB_PORT
+ u32 my_instance = 1;
+#endif
+ char out_r[12] = "allocated-r";
+ char out_o[12] = "allocated-o";
+ char out_e[12] = "allocated-e";
+#ifndef NO_BULK_LOGGING
+ int nfv9_log_req;
+#endif
+
+ nr_ports = mp->nr_ports;
+ nodd_ports = mp->nodd_ports;
+ neven_ports = mp->neven_ports;
+ i_vrf = mp->vrf;
+
+ /*
+ * this command is run after db create vrfmap
+ * or using vrf id in init function
+ */
+ /* Already have a portmap vector for this VRF? */
+ pool_foreach (my_vrfmap, cnat_map_by_vrf, ({
+ if ((my_vrfmap->status == S_RUN) &&
+ (my_vrfmap->i_vrf == i_vrf)) {
+ pm = my_vrfmap->portmap_list;
+ pm_len = vec_len(pm);
+ if (pm_len) {
+ goto found_portmaps;
+ }
+ }
+ }));
+
+ printf("need to run db create vrfmaps first for this vrf0x%d\n", pm_len);
+ return;
+
+found_portmaps:
+
+ if ((nr_ports + nodd_ports + neven_ports ) > (PORTS_PER_ADDR)) {
+ printf("invalid port# nr_ports %d + odd %d + even %d "
+ "should be less than 200 \n", nr_ports, nodd_ports, neven_ports);
+ return;
+ }
+
+ /*
+ * first port
+ */
+ firstp = nr_ports ? (&nr_ports) : (nodd_ports ? (&nodd_ports) : (&neven_ports));
+ if (!(*firstp)) {
+ printf("invalid port# nr_ports %d odd %d even %d ",
+ nr_ports, nodd_ports, neven_ports);
+ }
+ out_f = nr_ports ? out_r : (nodd_ports ? out_o : out_e);
+
+ rv = cnat_dynamic_port_alloc_v2 (pm, PORT_ALLOC_ANY, PORT_S_ODD,
+ &my_index, &ipv4_address, &aport,
+ cnat_static_port_range
+#ifndef NO_BULK_LOGGING
+ , BULKSIZE_FROM_VRFMAP(my_vrfmap),
+ &nfv9_log_req
+#endif
+ , 0,
+ &(my_vrfmap->rseed_ip)
+ );
+
+ if (rv != CNAT_SUCCESS) {
+ printf("failed-o\n");
+ return;
+ }
+ printf("%s %8d %10x %8d\n", out_f,
+ my_index, ipv4_address, aport);
+
+ (*firstp)--;
+
+ for (i=0; i < nr_ports; i++) {
+ rv = cnat_dynamic_port_alloc_v2 (pm, PORT_ALLOC_DIRECTED, PORT_SINGLE,
+ &my_index, &ipv4_address, &aport,
+ cnat_static_port_range
+#ifndef NO_BULK_LOGGING
+ , BULKSIZE_FROM_VRFMAP(my_vrfmap),
+ &nfv9_log_req
+#endif
+ , 0,
+ &(my_vrfmap->rseed_ip)
+ );
+
+ if (rv == CNAT_SUCCESS) {
+ printf("%s %8d %10x %8d\n", out_r,
+ my_index, ipv4_address, aport);
+ } else {
+ printf("%s failed\n", __FUNCTION__);
+ return;
+ }
+ }
+
+ if (nodd_ports > neven_ports) {
+ k1 = nodd_ports;
+ k2 = neven_ports;
+ pt1 = PORT_S_ODD;
+ pt2 = PORT_S_EVEN;
+ out1 = out_o;
+ out2 = out_e;
+ } else {
+ k1= neven_ports;
+ pt1 = PORT_S_EVEN;
+ k2 = nodd_ports;
+ pt2 = PORT_S_ODD;
+ out1 = out_e;
+ out2 = out_o;
+ }
+
+ j = 0;
+ for (i=0; i < k1; i++) {
+ rv = cnat_dynamic_port_alloc_v2 (pm, PORT_ALLOC_DIRECTED, pt1,
+ &my_index, &ipv4_address, &aport,
+ cnat_static_port_range
+#ifndef NO_BULK_LOGGING
+ , BULKSIZE_FROM_VRFMAP(my_vrfmap),
+ &nfv9_log_req
+#endif
+ , 0,
+ &(my_vrfmap->rseed_ip)
+ );
+
+ if (rv == CNAT_SUCCESS) {
+ printf("%s %8d %10x %8d\n", out1,
+ my_index, ipv4_address, aport);
+ } else {
+ printf("%s failed\n", __FUNCTION__);
+ return;
+ }
+
+ if (j < k2) {
+ rv = cnat_dynamic_port_alloc_v2 (pm, PORT_ALLOC_DIRECTED, pt2,
+ &my_index, &ipv4_address, &aport,
+ cnat_static_port_range
+#ifndef NO_BULK_LOGGING
+ , BULKSIZE_FROM_VRFMAP(my_vrfmap),
+ &nfv9_log_req
+#endif
+ , 0,
+ &(my_vrfmap->rseed_ip)
+ );
+
+ if (rv == CNAT_SUCCESS) {
+ printf("%s %8d %10x %8d\n", out2,
+ my_index, ipv4_address, aport);
+ j++;
+ } else {
+ printf("%s failed\n", __FUNCTION__);
+ return;
+ return;
+ }
+ }
+ }
+}
+
+
+void cnat_db_create_port_cmd (int argc, unsigned long *argv)
+{
+ spp_api_cnat_port_create_t *mp;
+
+ if (argc != 4) {
+ printf("no proper input defined\n");
+ return;
+ }
+
+ if ((argv[0] == 0) && (argv[1] == 0) && (argv[2] == 0)) {
+ printf("no port# defined\n");
+ return;
+ }
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+
+ mp->_spp_msg_id = SPP_API_CNAT_PORT_CREATE;
+ mp->nr_ports = argv[0];
+ mp->nodd_ports = argv[1];
+ mp->neven_ports = argv[2];
+ mp->vrf = argv[3];
+
+ platform_send_msg(mp);
+}
+
+void spp_api_cnat_port_delete_t_handler(spp_api_cnat_port_delete_t *mp)
+{
+ u32 pm_len;
+ cnat_vrfmap_t *my_vrfmap;
+ cnat_portmap_v2_t *pm = 0;
+
+ u32 my_index, my_port;
+ u16 i_vrf;
+#ifdef LB_PORT
+ u32 my_instance = 1;
+#endif
+
+ my_index = mp->pm_index;
+ my_port = mp->port;
+ i_vrf = mp->vrf;
+
+ /*
+ * this command is run after db create port
+ */
+ pool_foreach (my_vrfmap, cnat_map_by_vrf, ({
+ if (my_vrfmap->i_vrf == i_vrf) {
+ pm = my_vrfmap->portmap_list;
+ pm_len = vec_len(pm);
+ if (pm_len) {
+ goto found_portmaps;
+ }
+ }
+ }));
+
+ printf("portmap is not created 0x%d\n",
+ vec_len(cnat_portmap_indices_by_vrf));
+ return;
+
+found_portmaps:
+ if (my_index >= pm_len) {
+ printf("invalid port_index 0x%d >= 0x%d\n",
+ my_index, pm_len);
+ return;
+ }
+
+#ifdef LB_PORT
+ cnat_port_free_v2(pm, my_instance,
+ my_index, PORT_SINGLE, mp->port,cnat_static_port_range);
+#else
+ cnat_port_free_v2(pm, my_index, PORT_SINGLE, mp->port,cnat_static_port_range);
+#endif
+ printf("\n pm_index %d port %d is deleted\n", mp->pm_index, mp->port);
+}
+
+void cnat_db_delete_port_cmd (int argc, unsigned long *argv)
+{
+ spp_api_cnat_port_clear_t *mp;
+
+ if (argc != 3) {
+ printf("no proper input defined\n");
+ return;
+ }
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+
+ mp->_spp_msg_id = SPP_API_CNAT_PORT_DELETE;
+ mp->pm_index = argv[0];
+ mp->port = argv[1];
+ mp->vrf = argv[2];
+ platform_send_msg(mp);
+}
+
+void cnat_db_create_static_fwd_cmd (int argc, unsigned long *argv)
+{
+ spp_api_cnat_v4_add_static_port_t *mp;
+
+ if (argc != 4) {
+ printf("need right input\n");
+ return;
+ }
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_spp_msg_id = SPP_API_CNAT_V4_ADD_STATIC_PORT;
+ mp->i_vrf = spp_host_to_net_byte_order_16(argv[0]);
+ mp->i_ip = spp_host_to_net_byte_order_32(argv[1]);
+ mp->i_port = spp_host_to_net_byte_order_16(argv[2]);
+ mp->proto = argv[3];
+
+ platform_send_msg(mp);
+ return;
+}
+
+void cnat_db_create_static_fwd_stby_cmd (int argc, unsigned long *argv)
+{
+ spp_api_cnat_v4_add_static_port_t *mp;
+
+ if (argc != 7) {
+ printf("need right input\n");
+ return;
+ }
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_spp_msg_id = SPP_API_CNAT_V4_ADD_STATIC_PORT;
+ mp->i_vrf = spp_host_to_net_byte_order_16(argv[0]);
+ mp->i_ip = spp_host_to_net_byte_order_32(argv[1]);
+ mp->i_port = spp_host_to_net_byte_order_16(argv[2]);
+ mp->proto = argv[3];
+ mp->o_vrf_id = spp_host_to_net_byte_order_32(argv[4]);
+ mp->o_ip = spp_host_to_net_byte_order_32(argv[5]);
+ mp->o_port = spp_host_to_net_byte_order_16(argv[6]);
+
+printf("\ni_vrf %d, ip 0x%x, port %d, o_ip, port %d", mp->i_vrf, mp->i_ip, mp->i_port, mp->o_ip, mp->o_port);
+
+ platform_send_msg(mp);
+ return;
+}
+
+void cnat_db_delete_static_fwd_cmd (int argc, unsigned long *argv)
+{
+ spp_api_cnat_v4_del_static_port_t *mp;
+
+ if (argc != 3) {
+ printf("need right input\n");
+ return;
+ }
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_spp_msg_id = SPP_API_CNAT_V4_DEL_STATIC_PORT;
+ mp->i_vrf = spp_host_to_net_byte_order_16(argv[0]);
+ mp->i_ip = spp_host_to_net_byte_order_32(argv[1]);
+ mp->i_port = spp_host_to_net_byte_order_16(argv[2]);
+
+ platform_send_msg(mp);
+ return;
+}
+
+void cnat_nfv9_create_cmd (int argc, unsigned long *argv)
+{
+ spp_api_cnat_v4_config_nfv9_logging_t *mp;
+
+ if (argc < 3) {
+ printf("nfv9 create i_vrf ip_addr port [refresh_rate] [timeout] [mtu]");
+ return;
+ }
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_spp_msg_id = SPP_API_CNAT_V4_CONFIG_NFV9_LOGGING;
+ mp->enable = 1;
+ mp->i_vrf = spp_host_to_net_byte_order_16(argv[0]);
+
+ mp->ipv4_address = spp_host_to_net_byte_order_32(argv[1]);
+ mp->port = spp_host_to_net_byte_order_16(argv[2]);
+
+ if (argc > 3) {
+ mp->refresh_rate = spp_host_to_net_byte_order_16(argv[3]);
+ mp->timeout_rate = spp_host_to_net_byte_order_16(argv[4]);
+ mp->path_mtu = spp_host_to_net_byte_order_16(argv[5]);
+ } else {
+ mp->refresh_rate = spp_host_to_net_byte_order_16(1000);
+ mp->timeout_rate = spp_host_to_net_byte_order_16(30);
+ mp->path_mtu = spp_host_to_net_byte_order_16(1500);
+ }
+ platform_send_msg(mp);
+}
+
+void cnat_delete_cgn (int argc, unsigned long *argv)
+{
+ void *mp_send;
+ spp_api_cnat_del_cgn_t *mp;
+ u32 mp_size;
+
+ mp_size = sizeof(spp_api_cnat_del_cgn_t);
+
+ mp = spp_msg_api_alloc(mp_size);
+ memset(mp, 0, mp_size);
+
+ mp->_spp_msg_id = SPP_API_CNAT_DEL_CGN;
+
+ mp_send = mp;
+
+ platform_send_msg(mp);
+}
+
+void cnat_debug_global_all (int argc, unsigned long *argv)
+{
+ spp_api_cnat_v4_debug_global_t *mp;
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+ mp->_spp_msg_id = SPP_API_CNAT_V4_DEBUG_GLOBAL;
+ mp->debug_flag = CNAT_DEBUG_GLOBAL_ALL;
+
+ platform_send_msg(mp);
+}
+
+void cnat_debug_global_none (int argc, unsigned long *argv)
+{
+ spp_api_cnat_v4_debug_global_t *mp;
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+
+ mp->_spp_msg_id = SPP_API_CNAT_V4_DEBUG_GLOBAL;
+ mp->debug_flag = CNAT_DEBUG_NONE;
+
+ platform_send_msg(mp);
+}
+
+
+void cnat_bulk_cmd (int argc, unsigned long *argv)
+{
+ void *mp_send;
+
+ if (argc < 1) {
+ printf("\nargc = %d", argc);
+ printf("\n1. bulk cmd [0=static-port, 1=bulk_vrf, 2=policy_knob]");
+ return;
+ }
+
+
+ switch (argv[0]) {
+ case 0:
+ {
+ spp_api_cnat_v4_bulk_add_delete_static_port_t *mp;
+ spp_api_cnat_v4_add_static_port_t *mp_sp;
+ u32 mp_size =
+ sizeof(spp_api_cnat_v4_bulk_add_delete_static_port_t) +
+ (sizeof(spp_api_cnat_v4_add_static_port_t))*2;
+
+ mp = spp_msg_api_alloc(mp_size);
+ memset(mp, 0, mp_size);
+
+ mp->_spp_msg_id = SPP_API_CNAT_V4_BULK_ADD_DELETE_STATIC_PORT;
+
+ mp->num_static_port_entries = spp_host_to_net_byte_order_32(3);
+
+ mp_sp = (spp_api_cnat_v4_add_static_port_t *) &(mp->pad2);
+
+ mp_sp->_spp_msg_id = spp_host_to_net_byte_order_16(
+ SPP_API_CNAT_V4_ADD_STATIC_PORT);
+ mp_sp->proto = 2;
+ mp_sp->i_vrf = spp_host_to_net_byte_order_16(0x1);
+ mp_sp->i_ip = spp_host_to_net_byte_order_32(0x11111111);
+ mp_sp->i_port = spp_host_to_net_byte_order_16(0x7777);
+
+ mp_sp++;
+
+
+ mp_sp->_spp_msg_id = spp_host_to_net_byte_order_16(
+ SPP_API_CNAT_V4_ADD_STATIC_PORT);
+ mp_sp->proto = 1;
+ mp_sp->i_vrf = spp_host_to_net_byte_order_16(0x1);
+ mp_sp->i_ip = spp_host_to_net_byte_order_32(0x22222222);
+ mp_sp->i_port = spp_host_to_net_byte_order_16(0x6666);
+
+ mp_sp++;
+
+
+ mp_sp->_spp_msg_id = spp_host_to_net_byte_order_16(
+ SPP_API_CNAT_V4_ADD_STATIC_PORT);
+ mp_sp->proto = 1;
+ mp_sp->i_vrf = spp_host_to_net_byte_order_16(0x1);
+ mp_sp->i_ip = spp_host_to_net_byte_order_32(0x33333333);
+ mp_sp->i_port = spp_host_to_net_byte_order_16(0x5555);
+
+ mp_send = mp;
+
+ }
+ break;
+
+ case 1:
+ {
+ spp_api_cnat_v4_bulk_vrf_map_t *mp;
+ spp_api_cnat_v4_single_vrf_map_req *mp_sp;
+
+ u32 mp_size = sizeof(spp_api_cnat_v4_bulk_vrf_map_t) +
+ (sizeof(spp_api_cnat_v4_single_vrf_map_req))*2;
+
+ mp = spp_msg_api_alloc(mp_size);
+ memset(mp, 0, mp_size);
+
+ mp->_spp_msg_id = SPP_API_CNAT_V4_BULK_VRF_MAP;
+
+ mp->num_vrfmap_entries = spp_host_to_net_byte_order_32(3);
+
+ mp_sp = (spp_api_cnat_v4_single_vrf_map_req *)
+ &(mp->vrf_policy_enable);
+
+ mp_sp->i_vrf_id = spp_host_to_net_byte_order_32(0xe0000001);
+ mp_sp->o_vrf_id = spp_host_to_net_byte_order_32(0xe0000000);
+ mp_sp->i_vrf = spp_host_to_net_byte_order_16(0x1);
+ mp_sp->o_vrf = spp_host_to_net_byte_order_16(0x0);
+ mp_sp->start_addr = spp_host_to_net_byte_order_32(0x11111100);
+ mp_sp->end_addr = spp_host_to_net_byte_order_32(0x111111ff);
+ mp_sp->vrf_policy_enable = spp_host_to_net_byte_order_16(0x3);
+ mp_sp->tcp_mss_value = spp_host_to_net_byte_order_16(0x111);
+ mp_sp->vrf_nfv9_logging_ipv4_address = spp_host_to_net_byte_order_32(0x11000001);
+ mp_sp->vrf_nfv9_logging_udp_port = spp_host_to_net_byte_order_16(0x1001);
+ mp_sp->vrf_nfv9_refresh_rate = spp_host_to_net_byte_order_16(0x100);
+ mp_sp->vrf_nfv9_timeout_rate = spp_host_to_net_byte_order_16(0x10);
+ mp_sp->vrf_nfv9_path_mtu = spp_host_to_net_byte_order_16(0x100);
+
+ mp_sp++;
+
+ mp_sp->i_vrf_id = spp_host_to_net_byte_order_32(0xe0000002);
+ mp_sp->o_vrf_id = spp_host_to_net_byte_order_32(0xe0000000);
+ mp_sp->i_vrf = spp_host_to_net_byte_order_16(0x2);
+ mp_sp->o_vrf = spp_host_to_net_byte_order_16(0x0);
+ mp_sp->start_addr = spp_host_to_net_byte_order_32(0x22220000);
+ mp_sp->end_addr = spp_host_to_net_byte_order_32(0x2222ffff);
+ mp_sp->vrf_policy_enable = spp_host_to_net_byte_order_16(0x1);
+ mp_sp->tcp_mss_value = spp_host_to_net_byte_order_16(0x222);
+ mp_sp->vrf_nfv9_logging_ipv4_address = spp_host_to_net_byte_order_32(0x22000002);
+ mp_sp->vrf_nfv9_logging_udp_port = spp_host_to_net_byte_order_16(0x2002);
+ mp_sp->vrf_nfv9_refresh_rate = spp_host_to_net_byte_order_16(0x200);
+ mp_sp->vrf_nfv9_timeout_rate = spp_host_to_net_byte_order_16(0x20);
+ mp_sp->vrf_nfv9_path_mtu = spp_host_to_net_byte_order_16(0x200);
+
+ mp_sp++;
+
+ mp_sp->i_vrf_id = spp_host_to_net_byte_order_32(0xe0000003);
+ mp_sp->o_vrf_id = spp_host_to_net_byte_order_32(0xe0000007);
+ mp_sp->i_vrf = spp_host_to_net_byte_order_16(0x3);
+ mp_sp->o_vrf = spp_host_to_net_byte_order_16(0x7);
+ mp_sp->start_addr = spp_host_to_net_byte_order_32(0x33333000);
+ mp_sp->end_addr = spp_host_to_net_byte_order_32(0x33333fff);
+ mp_sp->vrf_policy_enable = spp_host_to_net_byte_order_16(0x1);
+ mp_sp->tcp_mss_value = spp_host_to_net_byte_order_16(0x333);
+ mp_sp->vrf_nfv9_logging_ipv4_address = spp_host_to_net_byte_order_32(0x33000003);
+ mp_sp->vrf_nfv9_logging_udp_port = spp_host_to_net_byte_order_16(0x3003);
+ mp_sp->vrf_nfv9_refresh_rate = spp_host_to_net_byte_order_16(0x300);
+ mp_sp->vrf_nfv9_timeout_rate = spp_host_to_net_byte_order_16(0x30);
+ mp_sp->vrf_nfv9_path_mtu = spp_host_to_net_byte_order_16(0x300);
+
+ mp_send = mp;
+ }
+ break;
+
+ case 2:
+ {
+ spp_api_cnat_v4_bulk_policy_knob_t *mp;
+
+ u32 mp_size =
+ sizeof(spp_api_cnat_v4_bulk_policy_knob_t) +
+ (sizeof(spp_api_cnat_v4_single_vrf_map_req))*2;
+
+ mp = spp_msg_api_alloc(mp_size);
+ memset(mp, 0, mp_size);
+
+ mp->_spp_msg_id = SPP_API_CNAT_V4_BULK_POLICY_KNOB;
+
+ mp->port_limit = spp_host_to_net_byte_order_16(345);
+ mp->icmp_timeout = spp_host_to_net_byte_order_16(300);
+ mp->udp_init_timeout = spp_host_to_net_byte_order_16(175);
+ mp->udp_act_timeout = spp_host_to_net_byte_order_16(133);
+ mp->tcp_init_timeout = spp_host_to_net_byte_order_16(222);
+ mp->tcp_act_timeout = spp_host_to_net_byte_order_16(2345);
+
+ mp->nat_policy_enable = spp_host_to_net_byte_order_32(0x7);
+
+ mp->global_nfv9_logging_ipv4_address = spp_host_to_net_byte_order_32(0x77777777);
+ mp->global_nfv9_logging_udp_port = spp_host_to_net_byte_order_16(0x7007);
+ mp->global_nfv9_refresh_rate = spp_host_to_net_byte_order_16(0x700);
+ mp->global_nfv9_timeout_rate = spp_host_to_net_byte_order_16(0x70);
+ mp->global_nfv9_path_mtu = spp_host_to_net_byte_order_16(0x700);
+
+ mp_send = mp;
+ }
+ break;
+
+
+ default:
+ printf("\nargv[2] = %d", argv[2]);
+ printf("\n2. bulk cmd [0=static-port, 1=bulk_vrf, 2=policy_knob+bulk_vrf]");
+ return;
+
+ }
+ platform_send_msg(mp_send);
+}
+
+void cnat_nfv9_delete_cmd (int argc, unsigned long *argv)
+{
+ spp_api_cnat_v4_config_nfv9_logging_t *mp;
+
+ if (argc != 1) {
+ printf("nfv9 delete i_vrf ");
+ return;
+ }
+
+ mp = spp_msg_api_alloc (sizeof (*mp));
+ memset(mp, 0, sizeof (*mp));
+ mp->_spp_msg_id = SPP_API_CNAT_V4_CONFIG_NFV9_LOGGING;
+ mp->enable = 0;
+ mp->i_vrf = spp_host_to_net_byte_order_16(argv[0]);
+ platform_send_msg(mp);
+}
+
+void cnat_generic_cmd (int argc, unsigned long *argv)
+{
+ spp_api_cnat_generic_command_request_t *mp;
+
+ if (argc != 9) {
+ printf("generic command core type p1 p2 p3 p4 p5 p6 p7 ");
+ return;
+ }
+
+ /*
+ * Allocate a large buffer for message req and resp structure
+ */
+ mp = spp_msg_api_alloc (MAX_DEBUG_BUFFER_SIZE);
+ memset(mp, 0, MAX_DEBUG_BUFFER_SIZE);
+ mp->_spp_msg_id = SPP_API_CNAT_GENERIC_COMMAND_REQUEST;
+ mp->core_num = argv[0];
+ mp->params[0] = spp_host_to_net_byte_order_32(argv[1]);
+ mp->params[1] = spp_host_to_net_byte_order_32(argv[2]);
+ mp->params[2] = spp_host_to_net_byte_order_32(argv[3]);
+ mp->params[3] = spp_host_to_net_byte_order_32(argv[4]);
+ mp->params[4] = spp_host_to_net_byte_order_32(argv[5]);
+ mp->params[5] = spp_host_to_net_byte_order_32(argv[6]);
+ mp->params[6] = spp_host_to_net_byte_order_32(argv[7]);
+ mp->params[7] = spp_host_to_net_byte_order_32(argv[8]);
+ platform_send_msg(mp);
+}
+
+u32 icmp_sent_timestamps; /* 32 KB array per core */
+u8 v4_pkt_count = 0;
+
+cnat_icmp_msg_t icmp_msg_gen_allowed ()
+{
+#ifdef DISABLE_ICMP_THROTTLE_FOR_DEBUG_PURPOSE
+ return CNAT_ICMP_MSG;
+#else
+ u32 current_timestamp;
+ spp_node_main_vector_t *nmv;
+ u32 updated_timestamp;
+
+ v4_pkt_count ++;
+
+ nmv = spp_get_node_main_vectorized_inline();
+
+ current_timestamp = nmv->ticks / nmv->ticks_per_second;
+
+ PLATFORM_UPDATE_TIMESTAMP
+ if (PREDICT_FALSE(icmp_sent_timestamps != updated_timestamp)) {
+ v4_pkt_count = 1;
+ /* update timestamp */
+ icmp_sent_timestamps = updated_timestamp;
+ }
+ if (PREDICT_TRUE(v4_pkt_count <= cnat_main_db_icmp_rate_limit_core)) {
+ return CNAT_ICMP_MSG;
+ } else {
+ return CNAT_NO_ICMP_MSG;
+ }
+#endif
+}
+
+u32 v6_icmp_sent_timestamps; /* 32 KB array per core */
+u8 v6_pkt_count = 0;
+
+cnat_icmp_msg_t v6_icmp_msg_gen_allowed ()
+{
+#ifdef DISABLE_ICMP_THROTTLE_FOR_DEBUG_PURPOSE
+ return CNAT_ICMP_MSG;
+#else
+ u32 current_timestamp;
+ spp_node_main_vector_t *nmv;
+ u32 updated_timestamp;
+
+ nmv = spp_get_node_main_vectorized_inline();
+
+ current_timestamp = nmv->ticks / nmv->ticks_per_second;
+ PLATFORM_UPDATE_TIMESTAMP
+ v6_pkt_count ++;
+
+ if (PREDICT_FALSE(v6_icmp_sent_timestamps != updated_timestamp)) {
+ v6_pkt_count = 1;
+ /* update timestamp */
+ v6_icmp_sent_timestamps = updated_timestamp;
+ }
+ if (PREDICT_TRUE(v6_pkt_count <= cnat_main_db_icmp_rate_limit_core)) {
+ return CNAT_ICMP_MSG;
+ } else {
+ return CNAT_NO_ICMP_MSG;
+ }
+#endif
+}
+
+u32 v4_udp_crc_zero_timestamps;
+u32 v4_udp_crc_zero_pkt_count = 0;
+int v4_crc_zero_udp_allowed ()
+{
+ PLATFORM_V4_CRC_ZERO_UDP_ALLOWED
+ /* Currently not supported for Brahmos. we need to take care of this */
+ spp_node_main_vector_t *nmv;
+ u32 hash_value, current_timestamp;
+
+ nmv = spp_get_node_main_vectorized_inline();
+
+ current_timestamp = nmv->ticks / nmv->ticks_per_second;
+ v4_udp_crc_zero_pkt_count++;
+ if (PREDICT_FALSE(v4_udp_crc_zero_timestamps != current_timestamp)) {
+ v4_udp_crc_zero_pkt_count = 1;
+ v4_udp_crc_zero_timestamps = current_timestamp;
+ }
+ if (PREDICT_TRUE(v4_udp_crc_zero_pkt_count <=
+ crc_zero_udp_rate_limit_core)) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/*
+ * ipv4_decr_ttl_n_calc_csum()
+ * - It decrements the TTL and calculates the incremental IPv4 checksum
+ */
+
+ALWAYS_INLINE(
+void ipv4_decr_ttl_n_calc_csum(ipv4_header *ipv4))
+{
+ u32 checksum;
+ u16 old;
+
+ old = ntohs(*(u16 *)&ipv4->ttl);
+
+ /* Decrement TTL */
+ ipv4->ttl--;
+
+ /* Calculate incremental checksum */
+ checksum = old + (~ntohs(*(u16 *)&ipv4->ttl) & 0xFFFF);
+ checksum += ntohs(ipv4->checksum);
+ checksum = (checksum & 0xFFFF) + (checksum >> 16);
+ ipv4->checksum = htons(checksum + (checksum >> 16));
+}
+
+ALWAYS_INLINE(
+void calc_ipv4_checksum (ipv4_header *ipv4))
+{
+ u16 *data = (u16 *) ipv4;
+ u32 checksum = 0;
+
+ checksum = crc_calc(ipv4);
+
+ /* Now produce the 1's complement */
+ ipv4->checksum = spp_host_to_net_byte_order_16(((u16) (~(u16)checksum)));
+}
+
+ALWAYS_INLINE(
+void calc_v4_icmp_checksum (icmp_v4_t *icmp, int ipv4_payload_size))
+{
+ u16 *data = (u16 *) icmp;
+ int num_hwords = (ipv4_payload_size)/2;
+ u32 checksum = 0;
+
+ icmp->checksum = 0;
+ if (PREDICT_FALSE((ipv4_payload_size%2) != 0)) {
+ num_hwords += 1;
+ /* Append 0's in the last octet */
+ *((u8 *)data + ipv4_payload_size) = 0;
+ }
+ while (num_hwords) {
+ checksum += (u32)spp_net_to_host_byte_order_16(data++);
+ num_hwords--;
+ }
+
+ /* Add in the carry of the original sum */
+ checksum = (checksum & 0xFFFF) + (checksum >> 16);
+ /* Add in the carry of the final sum */
+ checksum = (checksum & 0xFFFF) + (checksum >> 16);
+ /* Now produce the 1's complement */
+ icmp->checksum = spp_host_to_net_byte_order_16(((u16) (~(u16)checksum)));
+}
+
+ALWAYS_INLINE(
+void calc_v6_icmp_checksum (ipv6_header_t *ipv6, u16 ip_payload_size))
+{
+ u16 *data;
+ u16 *data1;
+ int i;
+ icmp_v6_t *icmp;
+ int num_hwords = (ip_payload_size)/2;
+ u32 checksum = 0;
+ pseudo_v6_header_t pseudo_header;
+
+ icmp = (icmp_v6_t *) ((u8 *)ipv6 + IPV6_HDR_LEN);
+ data = (u16 *) icmp;
+ icmp->checksum = 0;
+
+#if 1
+ if (PREDICT_FALSE((ip_payload_size%2) != 0)) {
+ num_hwords += 1;
+ /* Append 0's in the last octet */
+ *((u8 *)data + ip_payload_size) = 0;
+ }
+#endif
+
+ /* construct the pseudo header */
+
+ pseudo_header.src_addr[0] = ipv6->src_addr[0];
+ pseudo_header.src_addr[1] = ipv6->src_addr[1];
+ pseudo_header.src_addr[2] = ipv6->src_addr[2];
+ pseudo_header.src_addr[3] = ipv6->src_addr[3];
+ pseudo_header.dst_addr[0] = ipv6->dst_addr[0];
+ pseudo_header.dst_addr[1] = ipv6->dst_addr[1];
+ pseudo_header.dst_addr[2] = ipv6->dst_addr[2];
+ pseudo_header.dst_addr[3] = ipv6->dst_addr[3];
+ pseudo_header.payload_length = spp_host_to_net_byte_order_16(ip_payload_size);
+ pseudo_header.next_header = spp_host_to_net_byte_order_16(ipv6->next_header);
+
+ data1 = (u16 *) &pseudo_header;
+
+ /* sizeof(pseudo_v6_header_t) = 36 */
+ for (i = 0; i < 18; i++) {
+ checksum += (u32)spp_net_to_host_byte_order_16(data1++);
+ }
+
+checksum_calc:
+
+ if (PREDICT_TRUE(num_hwords)) {
+ checksum += (u32)spp_net_to_host_byte_order_16(data);
+ num_hwords--;
+ data++;
+ goto checksum_calc;
+ }
+
+ /* Add in the carry of the original sum */
+ checksum = (checksum & 0xFFFF) + (checksum >> 16);
+ /* Add in the carry of the final sum */
+ checksum = (checksum & 0xFFFF) + (checksum >> 16);
+ /* Now produce the 1's complement */
+ icmp->checksum = spp_host_to_net_byte_order_16(((u16) (~(u16)checksum)));
+}
+
+void icmp_error_generate_v6 (spp_ctx_t *ctx, u8 icmp_type,
+ u8 icmp_code, u16 uidb_index) {
+
+ u16 ip_hdr_len, ip_payload_size;
+ u32 *src_p, * dst_p;
+ icmp_v6_t *icmp;
+ int i;
+ ipv6_header_t *ip_old, *ip_new;
+ u16 icmp_payload_len;
+
+ /*
+ * As per RFC 2463, we limit the maximum size of generated ICMPv6 message to * 1280. And hence if the packet is bigger than 1280, then it needs to be
+ * truncated. Also, if the packet had multiple chained buffers, we need to
+ * free all chained buffers, except the first one.
+ */
+ free_all_but_first_chained_buffers(ctx);
+
+ ip_hdr_len = IPV6_HDR_LEN;
+ /* offset to ip payload */
+
+ ip_old = (ipv6_header_t *)PLATFORM_CTX_CURRENT_HDR;
+ ip_new = (ipv6_header_t *) ((u8 *) PLATFORM_CTX_CURRENT_HDR - ICMPV6_ERR_SIZE);
+ icmp = (icmp_v6_t*) ( (u8*)ip_new + ip_hdr_len);
+
+ icmp_payload_len = ip_hdr_len +
+ spp_net_to_host_byte_order_16(&(ip_old->payload_length)) ;
+
+ ip_payload_size = ICMPV6_HDR_SIZE + icmp_payload_len;
+ /*
+ * There is no easy way to predict this case as the probablity that the IPv6
+ * pkt is big depends on the type of traffic. Let us optimize the big
+ * pkt case as it involves more processing
+ *
+ * If the pkt size exceeds IPV6_MIN_PATH_MTU truncate it to IPV6_MIN_PATH_MTU
+ */
+ if (PREDICT_TRUE((ip_payload_size + ip_hdr_len) > IPV6_MIN_PATH_MTU)) {
+ ip_payload_size = IPV6_MIN_PATH_MTU - ip_hdr_len;
+ }
+
+ /* Following ICMP op has to be after ip header being copied */
+ icmp->type = icmp_type;
+ icmp->code = icmp_code;
+
+ ip_new->version_trafficclass_flowlabel = spp_host_to_net_byte_order_32(
+ VERSION_TRAFFICCLASS_FLOWLABEL);
+ ip_new->payload_length = spp_host_to_net_byte_order_16(ip_payload_size);
+ ip_new->next_header = IPV6_PROTO_ICMPV6;
+ ip_new->hop_limit = 64;
+ ip_new->dst_addr[0] = ip_old->src_addr[0];
+ ip_new->dst_addr[1] = ip_old->src_addr[1];
+ ip_new->dst_addr[2] = ip_old->src_addr[2];
+ ip_new->dst_addr[3] = ip_old->src_addr[3];
+
+ ip_new->src_addr[0] =
+ spp_host_to_net_byte_order_32(svi_params_array[uidb_index].ipv6_addr[0]);
+ ip_new->src_addr[1] =
+ spp_host_to_net_byte_order_32(svi_params_array[uidb_index].ipv6_addr[1]);
+ ip_new->src_addr[2] =
+ spp_host_to_net_byte_order_32(svi_params_array[uidb_index].ipv6_addr[2]);
+ ip_new->src_addr[3] =
+ spp_host_to_net_byte_order_32(svi_params_array[uidb_index].ipv6_addr[3]);
+ /* calc checksum for icmp */
+
+ calc_v6_icmp_checksum(ip_new, ip_payload_size);
+#if 0
+ printf("Flow = 0x%x\n", ip_new->version_trafficclass_flowlabel);
+ printf("Hoplimit = 0x%x\n", ip_new->hop_limit);
+ printf("Length= 0x%x\n", ip_new->payload_length);
+ printf("Next header = 0x%x\n", ip_new->next_header);
+ printf("Src add0 = 0x%x\n", ip_new->src_addr[0]);
+ printf("Src add1 = 0x%x\n", ip_new->src_addr[1]);
+ printf("Src add2 = 0x%x\n", ip_new->src_addr[2]);
+ printf("Src add3 = 0x%x\n", ip_new->src_addr[3]);
+ printf("Dst add0 = 0x%x\n", ip_new->dst_addr[0]);
+ printf("Dst add1 = 0x%x\n", ip_new->dst_addr[1]);
+ printf("Dst add2 = 0x%x\n", ip_new->dst_addr[2]);
+ printf("Dst add3 = 0x%x\n", ip_new->dst_addr[3]);
+ printf("Icmp type = 0x%x\n", icmp->type);
+ printf("Icmp code = 0x%x\n", icmp->code);
+
+ printf("\n\nICMP packet:\n");
+ for (i = 0; i < 10; i ++) {
+ printf("0x%x " , *((u8 *)icmp + i));
+ if ((i%16) == 15) {
+ printf("\n");
+ }
+ }
+#endif
+
+ ctx->current_header -= ICMPV6_ERR_SIZE;
+ ctx->current_length = ip_payload_size + ip_hdr_len;
+ PLATFORM_CNAT_SET_TX_VRF(ctx,uidb_index);
+}
+
+void icmp_error_generate_v2 (ipv4_header *ip, u8 icmp_type,
+ u8 icmp_code, u16 mtu, u32 src_ip)
+{
+
+ u16 ip_hdr_len, ip_payload_size;
+ u32 *src_p, * dst_p;
+ icmp_v4_t *icmp;
+
+ ip_hdr_len = (ip->version_hdr_len_words & 0xf) << 2; /* offset to ip payload */
+ icmp = (icmp_v4_t*) ( (u8*)ip + ip_hdr_len);
+ ip_payload_size = sizeof(icmp_v4_t) + ip_hdr_len +
+ ICMP_UNREACHABLE_IP_PAYLOAD_SIZE;
+
+ src_p = (u32*)
+ ((u8*)ip + ip_hdr_len + ICMP_UNREACHABLE_IP_PAYLOAD_SIZE - 4);
+ dst_p = (u32*) ((u8*)src_p + sizeof(ipv4_header) +
+ sizeof(icmp_v4_t));
+
+ while(src_p >= (u32*)ip) *dst_p-- = *src_p--;
+
+ /* Following ICMP op has to be after ip header being copied */
+ icmp->type = icmp_type;
+ icmp->code = icmp_code;
+ icmp->identifier = 0;
+ icmp->sequence = 0;
+ if(PREDICT_FALSE(mtu != 0)) {
+ icmp->sequence = spp_host_to_net_byte_order_16(mtu);
+ }
+
+
+ /* build icmp header, keep original tos, identification values */
+ ip->version_hdr_len_words = 0x45;
+ ip->total_len_bytes = sizeof(ipv4_header) + ip_payload_size;
+ ip->total_len_bytes = spp_host_to_net_byte_order_16(ip->total_len_bytes);
+ ip->frag_flags_offset = 0;
+ ip->ttl = 64;
+ ip->protocol = ICMP_PROT;
+ ip->checksum = 0;
+ ip->dest_addr = ip->src_addr;
+ ip->src_addr = spp_host_to_net_byte_order_32(src_ip);
+
+ /* calc checksum for ip and icmp */
+
+ calc_ipv4_checksum(ip);
+ calc_v4_icmp_checksum( (icmp_v4_t *) ((u8*) ip + sizeof(ipv4_header)),
+ ip_payload_size);
+}
+
+void icmp_error_generate (ipv4_header *ip, u8 icmp_type,
+ u8 icmp_code, u16 uidb_index) {
+
+ u16 ip_hdr_len, ip_payload_size;
+ u32 *src_p, * dst_p;
+ icmp_v4_t *icmp;
+
+ ip_hdr_len = (ip->version_hdr_len_words & 0xf) << 2; /* offset to ip payload */
+ icmp = (icmp_v4_t*) ( (u8*)ip + ip_hdr_len);
+ ip_payload_size = sizeof(icmp_v4_t) + ip_hdr_len +
+ ICMP_UNREACHABLE_IP_PAYLOAD_SIZE;
+
+ src_p = (u32*)
+ ((u8*)ip + ip_hdr_len + ICMP_UNREACHABLE_IP_PAYLOAD_SIZE - 4);
+ dst_p = (u32*) ((u8*)src_p + sizeof(ipv4_header) +
+ sizeof(icmp_v4_t));
+
+ while(src_p >= (u32*)ip) *dst_p-- = *src_p--;
+
+ /* Following ICMP op has to be after ip header being copied */
+ icmp->type = icmp_type;
+ icmp->code = icmp_code;
+ icmp->identifier = 0;
+ icmp->sequence = 0;
+
+
+ /* build icmp header, keep original tos, identification values */
+ ip->version_hdr_len_words = 0x45;
+ ip->total_len_bytes = sizeof(ipv4_header) + ip_payload_size;
+ ip->total_len_bytes = spp_host_to_net_byte_order_16(ip->total_len_bytes);
+ ip->frag_flags_offset = 0;
+ ip->ttl = 64;
+ ip->protocol = ICMP_PROT;
+ ip->checksum = 0;
+ ip->dest_addr = ip->src_addr;
+
+ ip->src_addr = spp_host_to_net_byte_order_32(svi_params_array[uidb_index].ipv4_addr);
+
+ /* calc checksum for ip and icmp */
+
+ calc_ipv4_checksum(ip);
+ calc_v4_icmp_checksum( (icmp_v4_t *) ((u8*) ip + sizeof(ipv4_header)),
+ ip_payload_size);
+#if 0
+ printf("version_hdr_len_words = 0x%x\n", ip->version_hdr_len_words);
+ printf("total_len_bytes = 0x%x\n", ip->total_len_bytes);
+ printf("Frag = 0x%x\n", ip->frag_flags_offset);
+ printf("ttl = 0x%x\n", ip->ttl);
+ printf("Protocol = 0x%x\n", ip->protocol);
+ printf("checksum = 0x%x\n", ip->checksum);
+ printf("Dest addr = 0x%x\n", ip->dest_addr);
+ printf("Src addr = 0x%x\n", ip->src_addr);
+ printf("Icmp type = 0x%x\n", icmp->type);
+ printf("Icmp code = 0x%x\n", icmp->code);
+#endif
+
+}
+
+int icmpv4_generate_with_throttling_v2 (spp_ctx_t *ctx, ipv4_header *ipv4,
+ int icmp_type, int icmp_code,
+ u16 mtu, u32 src_ip)
+{
+ u16 ip_hdr_len;
+ icmp_v4_t *icmp;
+ u16 rx_uidb_index = ctx->ru.rx.uidb_index;
+ if (icmp_msg_gen_allowed()) {
+ free_all_but_first_chained_buffers(ctx);
+ icmp_error_generate_v2(ipv4, icmp_type, icmp_code, mtu, src_ip);
+ ctx->current_length = (u16)
+ ((u8*)ctx->current_header - ctx->packet_data) +
+ spp_net_to_host_byte_order_16(&ipv4->total_len_bytes);
+ PLATFORM_CNAT_SET_TX_VRF(ctx,rx_uidb_index);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+int icmpv4_generate_with_throttling (spp_ctx_t *ctx, ipv4_header *ipv4,
+ u16 rx_uidb_index)
+{
+ int icmp_type;
+ int icmp_code;
+
+ if (icmp_msg_gen_allowed()) {
+ /* ICMP error would be small, so one buffer is enough. Clear the other */
+ free_all_but_first_chained_buffers(ctx);
+
+ icmp_type = ICMPV4_TIMEEXCEEDED;
+ icmp_code = ICMPV4_TIMTTL;
+ icmp_error_generate(ipv4, icmp_type, icmp_code, rx_uidb_index);
+ ctx->current_length = (u16)
+ ((u8*)ctx->current_header - ctx->packet_data) +
+ spp_net_to_host_byte_order_16(&ipv4->total_len_bytes);
+ PLATFORM_CNAT_SET_TX_VRF(ctx,rx_uidb_index);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+int icmpv4_generate_with_throttling_v1 (spp_ctx_t *ctx, ipv4_header *ipv4,
+ u16 rx_uidb_index, u32 type, u32 code)
+{
+ if (icmp_msg_gen_allowed()) {
+ /* ICMP error would be small, so one buffer is enough. Clear the other */
+ free_all_but_first_chained_buffers(ctx);
+
+ icmp_error_generate(ipv4, type, code, rx_uidb_index);
+ ctx->current_length = (u16)
+ ((u8*)ctx->current_header - ctx->packet_data) +
+ spp_net_to_host_byte_order_16(&ipv4->total_len_bytes);
+ PLATFORM_CNAT_SET_TX_VRF(ctx,rx_uidb_index);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+
+int icmpv6_generate_with_throttling (spp_ctx_t *ctx, ipv6_header_t *ipv6,
+ u16 rx_uidb_index)
+{
+ int icmp_type;
+ int icmp_code;
+
+ if (v6_icmp_msg_gen_allowed()) {
+ icmp_type = ICMPV6_TIMEEXCEEDED;
+ icmp_code = ICMPV6_TIMTTL;
+ icmp_error_generate_v6(ctx, icmp_type, icmp_code, rx_uidb_index);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+int icmpv6_generate_with_throttling_v1 (spp_ctx_t *ctx, ipv6_header_t *ipv6,
+ u16 rx_uidb_index, u32 type, u32 code)
+{
+
+ if (v6_icmp_msg_gen_allowed()) {
+ icmp_error_generate_v6(ctx, type, code, rx_uidb_index);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+#endif
+
+void calculate_window_scale(tcp_hdr_type *tcp_header, u8 *scale) {
+
+ u8 check_options = 0;
+
+ *scale = 0;
+ check_options = ((tcp_header->flags & TCP_FLAG_SYN) &&
+ (((tcp_header->hdr_len>>4) << 2) > sizeof(tcp_hdr_type)));
+
+ if (PREDICT_FALSE(check_options)) {
+ u8 *options_ptr = tcp_findoption(tcp_header, TCP_OPTION_WINDOW_SCALE);
+
+ /*
+ * TCP option field: | kind 1B | len 1B | value 2B|
+ * where kind != [0, 1]
+ */
+ if (PREDICT_TRUE(options_ptr &&
+ (options_ptr[1] == TCP_OPTION_WINDOW_SCALE))) {
+ u8 *ptr = (u8*)(options_ptr + 2);
+ *scale = *ptr;
+
+ if(PREDICT_FALSE(*scale >= 14)) {
+ *scale = 14;
+ }
+
+ return;
+ }
+ }
+}
+
+#if 0
+ALWAYS_INLINE(
+void cnat_log_nat44_tcp_seq_mismatch(
+ cnat_main_db_entry_t *db,
+ cnat_vrfmap_t *vrfmap))
+{
+ /* As of now, Netflow does not require this to be logged
+ * So only syslog
+ */
+ if(PREDICT_TRUE(db->flags & CNAT_TAC_SEQ_MISMATCH)) {
+ /* Already logged ..*/
+ return;
+ }
+ /* else, set the flag and call the log API */
+
+ db->flags = db->flags | CNAT_TAC_SEQ_MISMATCH;
+
+ cnat_syslog_nat44_tcp_seq_mismatch(db, vrfmap);
+}
+
+
+static int cnat_util_init (void *notused)
+{
+ /* run SPP_API_CNAT_PORTMAP_CREATE first*/
+ spp_msg_api_set_handler(SPP_API_CNAT_PORT_ALLOCATE,
+ spp_api_cnat_port_allocate_t_handler);
+
+
+ spp_msg_api_set_handler(SPP_API_CNAT_PORT_CLEAR,
+ spp_api_cnat_port_clear_t_handler);
+
+ /* run vrfmap config first */
+ spp_msg_api_set_handler(SPP_API_CNAT_PORT_CREATE,
+ spp_api_cnat_port_create_t_handler);
+
+ spp_msg_api_set_handler(SPP_API_CNAT_PORT_DELETE,
+ spp_api_cnat_port_delete_t_handler);
+ return 0;
+}
+
+void
+print_ipv6_pkt (ipv6_header_t *ip)
+{
+ u32 i, total_len, l4_len=0;
+
+ u8 *pkt = (u8 *) ip;
+
+ total_len = spp_net_to_host_byte_order_16(&ip->payload_length);
+
+ /* we rarely need to debug > 200 bytes of packet */
+ if(total_len > 200) {
+ total_len = 200;
+ }
+
+ printf("\n======== PRINTING PKT START======\n");
+ printf("======== IPv6 PAYLOAD LEN %d ===========\n", total_len);
+ for (i=0; i < 40; i++) {
+ printf(" %02X ", *(pkt + i));
+ if(i%16==15)
+ printf("\n");
+ }
+
+ if (ip->next_header == IPV6_PROTO_TCP) {
+ printf("\n======== TCP HEADER =================\n");
+ l4_len = 20;
+ }
+ else if (ip->next_header == IPV6_PROTO_UDP) {
+ printf("\n======== UDP HEADER =================\n");
+ l4_len = 8;
+ }
+ else if (ip->next_header == IPV6_PROTO_ICMPV6) {
+ printf("\n======== ICMP HEADER =================\n");
+ l4_len = 8;
+ }
+
+ for (i=40; i < (l4_len + 40); i++) {
+ printf(" %02X ", *(pkt + i));
+ }
+
+ printf("\n======== LAYER4 PAYLOAD ===================\n");
+ for (i=(l4_len + 40); i < total_len; i++) {
+ printf(" %02X ", *(pkt + i));
+ if(i%16==15)
+ printf("\n");
+ }
+
+ printf("\n======== PRINTING PKT END =======\n");
+}
+
+
+
+PLATFORM_SPP_INIT_FUNCTION(cnat_util_init);
+#endif
diff --git a/plugins/vcgn-plugin/vcgn/cnat_v4_ftp_alg.h b/plugins/vcgn-plugin/vcgn/cnat_v4_ftp_alg.h
new file mode 100644
index 00000000000..df3dfcb0797
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_v4_ftp_alg.h
@@ -0,0 +1,133 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_v4_ftp_alg.h
+ *
+ * Copyright (c) 2012-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_V4_FTP_ALG_H__
+#define __CNAT_V4_FTP_ALG_H__
+
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "tcp_header_definitions.h"
+#include "dslite_defs.h"
+#include "dslite_db.h"
+
+/* shorter form of byte order functions */
+
+#define net2host16(x) clib_net_to_host_u16( x)
+#define net2host32(x) clib_net_to_host_u32( x)
+#define net2host64(x) clib_net_to_host_u64( x)
+#define host2net16(x) clib_host_to_net_u16(x)
+#define host2net32(x) clib_host_to_net_u32(x)
+#define host2net64(x) clib_host_to_net_u64(x)
+
+//#define BIGENDIAN
+
+typedef struct iphdrtype_ {
+ u8 v_ihl; /* version and IP header length */
+ u8 tos; /* type of service */
+ u16 tl; /* total length */
+ u16 id; /* identifier */
+ u16 ipreserved: 1;
+ u16 dontfragment: 1;
+ u16 morefragments: 1;
+ u16 fo: 13; /* fragment offset */
+ u8 ttl; /* time to live */
+ u8 prot; /* protocol type */
+ u16 checksum; /* checksum */
+ u32 srcadr; /* IP source address */
+ u32 dstadr; /* IP destination address */
+} iphdrtype;
+
+
+typedef struct tcptype_ {
+ u16 sourceport;
+ u16 destinationport;
+ u32 sequencenumber;
+ u32 acknowledgementnumber;
+ u8 dataoffset;
+ u8 flags;
+#if 0
+/* bypass the ENDIAN part */
+#ifdef BIGENDIAN
+ u8 reserved: 2;
+ u8 urg: 1;
+ u8 ack: 1;
+ u8 psh: 1;
+ u8 rst: 1;
+ u8 syn: 1;
+ u8 fin: 1;
+#else
+ u8 fin: 1;
+ u8 syn: 1;
+ u8 rst: 1;
+ u8 psh: 1;
+ u8 ack: 1;
+ u8 urg: 1;
+ u8 reserved2: 2;
+#endif
+#endif
+
+ u16 window;
+ u16 checksum;
+ u16 urgentpointer;
+ u8 data[0];
+} tcptype ;
+
+
+int watch_ftp_port_cmd (iphdrtype *ip,
+ tcptype *tcp,
+ u32 * ip_addr,
+ u16 * port);
+
+
+u8 * ftp_test_pkt_gen (u32 ip_addr, u16 port);
+
+int update_ftp_port(u8 * pkt, u32 new_ip, u16 new_port, i8 * delta,
+ cnat_main_db_entry_t *db_tcp_control,
+ dslite_table_entry_t *dslite_entry_ptr,
+ ipv6_header_t *ipv6_hdr);
+/*
+ * caller needs to check if it's a ftp packet
+ * this function returns 1
+ * if packet being updated for PORT
+ * otherwise return 0.
+ * Assume IP header DOES NOT have option fields
+ */
+
+int cnat_ftp_alg ( u8* pkt, i8 * delta, cnat_main_db_entry_t *db,
+ dslite_table_entry_t *dslite_entry_ptr,
+ ipv6_header_t *ipv6_hdr);
+
+#define FTP_ALG_DEBUG_PRINTF_ENABLED 1
+
+#ifdef FTP_ALG_DEBUG_PRINTF_ENABLED
+
+#define FTP_ALG_DEBUG_PRINTF(...) { \
+ if (global_debug_flag & CNAT_DEBUG_FTP_ALG) { \
+ printf(__VA_ARGS__); \
+ } }
+
+#else
+
+#define FTP_ALG_DEBUG_PRINTF(...)
+
+#endif
+
+#endif /* __CNAT_V4_FTP_ALG_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/cnat_v4_functions.c b/plugins/vcgn-plugin/vcgn/cnat_v4_functions.c
new file mode 100644
index 00000000000..d3051fba5a7
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_v4_functions.c
@@ -0,0 +1,364 @@
+/*
+ *---------------------------------------------------------------------------
+ * cnat_v4_funtions.c
+ *
+ * Copyright (c) 2008-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+
+
+#include "tcp_header_definitions.h"
+#include "cnat_db.h"
+#include "cnat_config.h"
+#include "cnat_v4_functions.h"
+#include "dslite_defs.h"
+#include "dslite_db.h"
+
+static u32 tcp_logging_count;
+static u32 tcp_logging_overflow;
+
+static tcp_logging_struct_t tcp_logging_array[MAX_TCP_LOGGING_COUNT];
+
+/*
+ * Function to log TCP pkts checksum changes..
+ */
+void
+tcp_debug_logging (
+ u32 seq_num,
+ u32 ack_num,
+ u32 old_ip,
+ u32 new_ip,
+ u16 old_port,
+ u16 new_port,
+ u16 old_ip_crc,
+ u16 new_ip_crc,
+ u16 old_tcp_crc,
+ u16 new_tcp_crc)
+{
+ tcp_logging_array[tcp_logging_count].seq_num = seq_num;
+ tcp_logging_array[tcp_logging_count].ack_num = ack_num;
+ tcp_logging_array[tcp_logging_count].old_ip = old_ip;
+ tcp_logging_array[tcp_logging_count].new_ip = new_ip;
+ tcp_logging_array[tcp_logging_count].old_port = old_port;
+ tcp_logging_array[tcp_logging_count].new_port = new_port;
+ tcp_logging_array[tcp_logging_count].old_ip_crc = old_ip_crc;
+ tcp_logging_array[tcp_logging_count].new_ip_crc = new_ip_crc;
+ tcp_logging_array[tcp_logging_count].old_tcp_crc = old_tcp_crc;
+ tcp_logging_array[tcp_logging_count].new_tcp_crc = new_tcp_crc;
+
+ tcp_logging_count++;
+
+ if (tcp_logging_count >= MAX_TCP_LOGGING_COUNT) {
+ tcp_logging_overflow = 1;
+ tcp_logging_count = 0;
+ }
+}
+
+/*
+ * Function to dmp TCP pkts logged..
+ */
+void
+tcp_debug_logging_dump (void)
+{
+ u32 i, total_count, start_entry;
+
+ if (tcp_logging_overflow) {
+ total_count = MAX_TCP_LOGGING_COUNT;
+ start_entry = tcp_logging_count;
+ printf("Logging Entries Wrapped Around, displaying %d entries\n",
+ total_count);
+ } else {
+ total_count = tcp_logging_count;
+ start_entry = 0;
+ printf("Displaying %d entries\n", total_count);
+ }
+
+ printf("SEQ ACK IP_O IP_N PORT_O PORT_N L3_CRC_O L3_CRC_N L4_CRC_O L4_CRC_N\n");
+
+ for (i = 0; i < total_count; i++) {
+ u32 entry = (i + start_entry) % MAX_TCP_LOGGING_COUNT;
+
+ printf("%04d: 0x%08x 0x%08x 0x%08x 0x%08x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ entry,
+ tcp_logging_array[entry].seq_num,
+ tcp_logging_array[entry].ack_num,
+ tcp_logging_array[entry].old_ip,
+ tcp_logging_array[entry].new_ip,
+ tcp_logging_array[entry].old_port,
+ tcp_logging_array[entry].new_port,
+ tcp_logging_array[entry].old_ip_crc,
+ tcp_logging_array[entry].new_ip_crc,
+ tcp_logging_array[entry].old_tcp_crc,
+ tcp_logging_array[entry].new_tcp_crc);
+ }
+}
+
+/*
+ * Function to enable TCP logging
+ */
+void
+tcp_debug_logging_enable_disable (u32 enable_flag)
+{
+ switch (enable_flag) {
+
+ case TCP_LOGGING_DISABLE:
+ if (tcp_logging_enable_flag == TCP_LOGGING_DISABLE) {
+ printf("\nTCP Logging ALREADY DISABLED\n");
+ } else {
+ printf("\nTCP Logging DISABLED\n");
+ }
+ tcp_logging_enable_flag = 0;
+ break;
+
+ case TCP_LOGGING_ENABLE:
+ if (tcp_logging_enable_flag == TCP_LOGGING_ENABLE) {
+ printf("\nTCP Logging ALREADY ENABLED\n");
+ } else {
+ tcp_logging_enable_flag = 1;
+ tcp_logging_count = 0;
+ tcp_logging_overflow = 0;
+
+ printf("\nTCP Logging ENABLED\n");
+ }
+ break;
+
+ case TCP_LOGGING_PACKET_DUMP:
+ tcp_debug_logging_dump();
+ break;
+
+ case TCP_LOGGING_SUMMARY_DUMP:
+ default:
+ printf("\ntcp_logging_enable_flag %d, tcp_log_count %d\n",
+ tcp_logging_enable_flag, tcp_logging_count);
+ printf("To Enable TCP LOGGING provide a flag value of %d\n",
+ TCP_LOGGING_ENABLE);
+ break;
+ }
+}
+
+void hex_dump (u8 * p, int len) {
+ int i;
+ for (i=0;i<len;i++) {
+ if(i && (i & 0x3 ) == 0) printf(" ");
+ if(i && (i & 0xf ) == 0) printf("\n");
+ PLATFORM_DEBUG_PRINT("%02X ", p[i]);
+ }
+ PLATFORM_DEBUG_PRINT("\n");
+}
+
+void
+print_icmp_pkt (ipv4_header *ip)
+{
+ u32 i, total_len;
+
+ u8 *pkt = (u8 *) ip;
+
+ total_len = clib_net_to_host_u16(ip->total_len_bytes);
+
+ printf("\n======== PRINTING PKT START======\n");
+ printf("======== IP PACKET LEN %d ===========\n", total_len);
+ for (i=0; i < 20; i++) {
+ printf(" %02X ", *(pkt + i));
+ }
+
+ printf("\n======== ICMP HEADER =================\n");
+ for (i=20; i < 28; i++) {
+ printf(" %02X ", *(pkt + i));
+ }
+
+ printf("\n======== ICMP BODY ===================\n");
+ for (i=28; i < total_len; i++) {
+ printf(" %02X ", *(pkt + i));
+ }
+
+ printf("\n======== PRINTING PKT END =======\n");
+}
+
+void
+print_udp_pkt (ipv4_header *ip)
+{
+ u32 i, total_len, udp_len;
+
+ u8 *pkt = (u8 *) ip;
+
+ total_len = clib_net_to_host_u16(ip->total_len_bytes);
+ udp_len = total_len - 20;
+
+ printf("\n======== PRINTING PKT START======\n");
+ printf("======== IP PACKET LEN %d ===========\n", total_len);
+ for (i=0; i < 20; i++) {
+ printf(" %02X ", *(pkt + i));
+ }
+ printf("\n======== UDP PSEUDO HEADER ==========\n");
+ for (i=12; i < 20; i++) {
+ printf(" %02X ", *(pkt + i));
+ }
+ printf(" 00 11 %02X %02X ", udp_len >> 8, udp_len & 0xff);
+
+ printf("\n======== UDP HEADER =================\n");
+ for (i=20; i < 28; i++) {
+ printf(" %02X ", *(pkt + i));
+ }
+ printf("\n======== UDP BODY ===================\n");
+ for (i=28; i < total_len; i++) {
+ printf(" %02X ", *(pkt + i));
+ }
+
+ printf("\n======== PRINTING PKT END =======\n");
+}
+
+void
+print_tcp_pkt (ipv4_header *ip)
+{
+ u32 i, total_len, tcp_len;
+
+ u8 *pkt = (u8 *) ip;
+
+ total_len = clib_net_to_host_u16(ip->total_len_bytes);
+ tcp_len = total_len - 20;
+
+ printf("\n======== PRINTING PKT START======\n");
+ printf("======== IP PACKET LEN %d ===========\n", total_len);
+ for (i=0; i < 20; i++) {
+ printf(" %02X ", *(pkt + i));
+ }
+ printf("\n======== TCP PSEUDO HEADER ==========\n");
+ for (i=12; i < 20; i++) {
+ printf(" %02X ", *(pkt + i));
+ }
+ printf(" 00 06 %02X %02X ", tcp_len >> 8, tcp_len & 0xff);
+
+ printf("\n======== TCP HEADER =================\n");
+ for (i=20; i < 40; i++) {
+ printf(" %02X ", *(pkt + i));
+ }
+ printf("\n======== TCP BODY ===================\n");
+ for (i=40; i < total_len; i++) {
+ printf(" %02X ", *(pkt + i));
+ }
+
+ printf("\n======== PRINTING PKT END =======\n");
+}
+
+/* IN: ipv4 and tcp header pointer,
+ * new ipv4 addr and port value
+ * main db index for accessing per vrf mss value
+ * DO:
+ * NAT
+ * mss adjust if needed
+ * ip & tcp checksum update (incremental)
+ */
+
+inline void tcp_in2out_nat_mss_n_checksum (ipv4_header * ip,
+ tcp_hdr_type * tcp,
+ u32 ipv4_addr,
+ u16 port,
+ cnat_main_db_entry_t * db)
+{
+ u8 *mss_ptr;
+ u8 check_mss = 0;
+ u16 mss_old, mss_new;
+ cnat_vrfmap_t * vrf_map_p;
+
+ cnat_v4_recalculate_tcp_checksum(ip,
+ tcp,
+ &(ip->src_addr),
+ &(tcp->src_port),
+ ipv4_addr,
+ port);
+ u16 frag_offset =
+ clib_net_to_host_u16(ip->frag_flags_offset);
+
+ if(PREDICT_FALSE(frag_offset & IP_FRAG_OFFSET_MASK)) {
+ return; /* No TCP Header at all */
+ }
+
+ /*
+ * check SYN bit and if options field is present
+ * If yes, proceed to extract the options and get TCP MSS value
+ */
+ check_mss = ((tcp->flags & TCP_FLAG_SYN) &&
+ (((tcp->hdr_len>>4) << 2) > sizeof(tcp_hdr_type)));
+
+ if (PREDICT_FALSE(check_mss)) {
+
+ /* get per VRF mss config */
+ if(PREDICT_FALSE(db->flags & (CNAT_DB_DSLITE_FLAG))) {
+ mss_new = dslite_table_db_ptr[db->dslite_nat44_inst_id].tcp_mss;
+ } else {
+ vrf_map_p = cnat_map_by_vrf + db->vrfmap_index;
+ mss_new = vrf_map_p->tcp_mss;
+ }
+ DSLITE_PRINTF(1, "Check MSS true..%u\n", mss_new);
+ /*
+ * If TCP MSS is not configured, skip the MSS checks
+ */
+ if (PREDICT_FALSE(mss_new != V4_TCP_MSS_NOT_CONFIGURED_VALUE)) {
+
+ /* if mss_ptr != NULL, then it points to MSS option */
+ mss_ptr = tcp_findoption(tcp, TCP_OPTION_MSS);
+
+ /*
+ * TCP option field: | kind 1B | len 1B | value 2B|
+ * where kind != [0,1]
+ */
+ if (PREDICT_TRUE(mss_ptr && (mss_ptr[1] == 4))) {
+
+ u16 *ptr = (u16*)(mss_ptr + 2);
+
+ mss_old = clib_net_to_host_u16(*ptr);
+
+ if (PREDICT_FALSE(mss_old > mss_new)) {
+ u32 sum32;
+ u16 mss_old_r, old_tcp_checksum_r;
+
+ *ptr = clib_host_to_net_u16(mss_new);
+
+ mss_old_r = ~mss_old;
+
+ old_tcp_checksum_r =
+ ~clib_net_to_host_u16(tcp->tcp_checksum);
+
+ /*
+ * Revise the TCP checksum
+ */
+ sum32 = old_tcp_checksum_r + mss_old_r + mss_new;
+ FILL_CHECKSUM(tcp->tcp_checksum, sum32)
+
+ if (PREDICT_FALSE(tcp_logging_enable_flag)) {
+ tcp_debug_logging(
+ clib_net_to_host_u32(tcp->seq_num),
+ clib_net_to_host_u32(tcp->ack_num),
+ 0,
+ 0,
+ mss_old,
+ mss_new,
+ 0,
+ 0,
+ ~old_tcp_checksum_r,
+ clib_net_to_host_u16(tcp->tcp_checksum));
+ }
+ }
+ }
+ }
+ }
+}
+
+u32 get_my_svi_intf_ip_addr() {
+ return 0x01010101;
+}
diff --git a/plugins/vcgn-plugin/vcgn/cnat_v4_functions.h b/plugins/vcgn-plugin/vcgn/cnat_v4_functions.h
new file mode 100644
index 00000000000..2429e5e1437
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_v4_functions.h
@@ -0,0 +1,342 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_v4_functions.h
+ *
+ * Copyright (c) 2007-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_V4_FUNCTOINS__
+#define __CNAT_V4_FUNCTOINS__
+
+#include "tcp_header_definitions.h"
+#include "cnat_db.h"
+#include "spp_ctx.h"
+
+#include "platform_common.h"
+
+/*
+ * Defines and structures to enable TCP packet logging
+ */
+#define TCP_LOGGING_DISABLE 0
+#define TCP_LOGGING_ENABLE 1
+#define TCP_LOGGING_PACKET_DUMP 2
+#define TCP_LOGGING_SUMMARY_DUMP 3
+
+#define MAX_TCP_LOGGING_COUNT 1024
+
+typedef struct tcp_logging_struct {
+ u32 seq_num;
+ u32 ack_num;
+ u32 old_ip;
+ u32 new_ip;
+ u16 old_port;
+ u16 new_port;
+ u16 old_ip_crc;
+ u16 new_ip_crc;
+ u16 old_tcp_crc;
+ u16 new_tcp_crc;
+} tcp_logging_struct_t;
+
+void tcp_debug_logging_dump (void);
+void tcp_debug_logging_enable_disable (u32 enable_flag);
+
+void
+tcp_debug_logging (
+ u32 seq_num,
+ u32 ack_num,
+ u32 old_ip,
+ u32 new_ip,
+ u16 old_port,
+ u16 new_port,
+ u16 old_ip_crc,
+ u16 new_ip_crc,
+ u16 old_tcp_crc,
+ u16 new_tcp_crc);
+
+#define JLI printf("%s %s %d\n", __FILE__, __FUNCTION__, __LINE__); fflush(stdout);
+
+#define CNAT_ICMP_DEST_UNREACHABLE 100
+#define INCREMENT_NODE_COUNTER(c) \
+ em->counters[node_counter_base_index + c] += 1;
+
+#define V4_TCP_UPDATE_SESSION_FLAG(db, tcp) \
+if ((tcp->flags & TCP_FLAG_ACK) && (tcp->flags & TCP_FLAG_SYN)) { \
+ db->flags |= CNAT_DB_FLAG_TCP_ACTIVE; \
+} \
+if ((tcp->flags & TCP_FLAG_RST) || (tcp->flags & TCP_FLAG_FIN)) { \
+ db->flags &= ~CNAT_DB_FLAG_TCP_ACTIVE; \
+ db->flags |= CNAT_DB_FLAG_TCP_CLOSING; \
+}
+
+#define V4_TCP_UPDATE_SESSION_DB_FLAG(sdb, tcp) \
+if ((tcp->flags & TCP_FLAG_ACK) && (tcp->flags & TCP_FLAG_SYN)) { \
+ sdb->flags |= CNAT_DB_FLAG_TCP_ACTIVE; \
+} \
+if ((tcp->flags & TCP_FLAG_RST) || (tcp->flags & TCP_FLAG_FIN)) { \
+ sdb->flags &= ~CNAT_DB_FLAG_TCP_ACTIVE; \
+ sdb->flags |= CNAT_DB_FLAG_TCP_CLOSING; \
+}
+
+/*
+ * Code to recalculate checksum after ACK/SEQ number changes
+ * This macro assumes, we have pointer to tcp structure
+ * referenced by the name "tcp"
+ */
+#define CNAT_UPDATE_TCP_SEQ_ACK_CHECKSUM(old_val32, new_val32) \
+{ \
+ u16 old_val_lower, old_val_upper, old_tcp_cr; \
+ u16 new_val_lower, new_val_upper, new_tcp_cr; \
+ u32 sum32; \
+ \
+ old_val_lower = ~((u16) old_val32); \
+ old_val_upper = ~((u16) (old_val32 >> 16)); \
+ old_tcp_cr = ~net2host16(&tcp->tcp_checksum); \
+ new_val_lower = (u16) new_val32; \
+ new_val_upper = (u16) (new_val32 >> 16); \
+ \
+ sum32 = old_val_lower + old_val_upper + old_tcp_cr + \
+ new_val_lower + new_val_upper; \
+ \
+ sum32 = (sum32 & 0xffff) + ((sum32 >> 16) & 0xffff); \
+ sum32 = (sum32 & 0xffff) + ((sum32 >> 16) & 0xffff); \
+ new_tcp_cr = ~((u16)sum32); \
+ \
+ tcp->tcp_checksum = host2net16(new_tcp_cr); \
+}
+
+/*
+ * newchecksum = ~(~oldchecksum + ~old + new)
+ * old/new for l3 checksum: ip address
+ */
+#define CNAT_UPDATE_L3_CHECKSUM_DECLARE \
+u16 old_l3_1r, old_l3_2r; \
+u16 old_l3_cr, new_l3_c; \
+u32 new32;
+
+#define CNAT_UPDATE_L3_CHECKSUM(old_l3_1, old_l3_2, old_l3_c, \
+ new_l3_1, new_l3_2) \
+old_l3_1r = ~(old_l3_1); \
+old_l3_2r = ~(old_l3_2); \
+old_l3_cr = ~(old_l3_c); \
+new32 = old_l3_cr + old_l3_1r + old_l3_2r + new_l3_1 + new_l3_2; \
+new32 = (new32 & 0xffff) + ((new32 >> 16) & 0xffff); \
+new32 = (new32 & 0xffff) + ((new32 >> 16) & 0xffff); \
+new_l3_c = ~((u16)new32);
+
+
+/*
+ * newchecksum = ~(~oldchecksum + ~old + new)
+ * old/new for l3 checksum: ip address
+ * old/new for l4 checksum: ip address and port
+ */
+#define CNAT_UPDATE_L3_L4_CHECKSUM_DECLARE \
+u16 old_l3_1r, old_l3_2r, old_l4r; \
+u16 old_l3_cr, old_l4_cr; \
+u16 new_l3_c, new_l4_c; \
+u32 sum32, new32;
+
+#define CNAT_UPDATE_L3_L4_CHECKSUM(old_l3_1, old_l3_2, old_l4, \
+ old_l3_c, old_l4_c, \
+ new_l3_1, new_l3_2, new_l4) \
+old_l3_1r = ~(old_l3_1); \
+old_l3_2r = ~(old_l3_2); \
+old_l3_cr = ~(old_l3_c); \
+sum32 = old_l3_1r + old_l3_2r + new_l3_1 + new_l3_2; \
+new32 = old_l3_cr + sum32; \
+new32 = (new32 & 0xffff) + ((new32 >> 16) & 0xffff); \
+new32 = (new32 & 0xffff) + ((new32 >> 16) & 0xffff); \
+new_l3_c = ~((u16)new32); \
+old_l4r = ~(old_l4); \
+old_l4_cr = ~(old_l4_c); \
+sum32 += old_l4r + new_l4; \
+new32 = old_l4_cr + sum32; \
+new32 = (new32 & 0xffff) + ((new32 >> 16) & 0xffff); \
+new32 = (new32 & 0xffff) + ((new32 >> 16) & 0xffff); \
+new_l4_c = ~((u16)new32);
+
+/*
+ * For ICMP checksums, we don't use the top IP header for checksum calculation
+ */
+#define CNAT_UPDATE_L3_ICMP_CHECKSUM(old_l3_1, old_l3_2, old_l4, \
+ old_l3_c, old_l4_c, \
+ new_l3_1, new_l3_2, new_l4) \
+old_l3_1r = ~(old_l3_1); \
+old_l3_2r = ~(old_l3_2); \
+old_l3_cr = ~(old_l3_c); \
+sum32 = old_l3_1r + old_l3_2r + new_l3_1 + new_l3_2; \
+new32 = old_l3_cr + sum32; \
+new32 = (new32 & 0xffff) + ((new32 >> 16) & 0xffff); \
+new32 = (new32 & 0xffff) + ((new32 >> 16) & 0xffff); \
+new_l3_c = ~((u16)new32); \
+old_l4r = ~(old_l4); \
+old_l4_cr = ~(old_l4_c); \
+sum32 = old_l4r + new_l4; \
+new32 = old_l4_cr + sum32; \
+new32 = (new32 & 0xffff) + ((new32 >> 16) & 0xffff); \
+new32 = (new32 & 0xffff) + ((new32 >> 16) & 0xffff); \
+new_l4_c = ~((u16)new32);
+
+
+/*
+ * icmp error type message:
+ * newchecksum = ~(~oldchecksum + ~old + new)
+ * old/new for outlayer ip checksum: ip address
+ * old/new for outlayer icmp checksum:
+ * out-layer: ip address
+ * inner-layer: ip addr, port, l3 checksum, l4 checksum
+ */
+#define CNAT_UPDATE_ICMP_ERR_CHECKSUM_DECLARE \
+u16 old_ip_1r, old_ip_2r, old_ip_port_r, old_ip_cr, old_icmp_cr; \
+u16 new_icmp_c; \
+u32 sum32;
+
+
+#define CNAT_UPDATE_ICMP_ERR_CHECKSUM(old_ip_1, old_ip_2, old_ip_port, old_ip_c, old_icmp_c, \
+ new_ip_1, new_ip_2, new_ip_port, new_ip_c) \
+old_ip_1r = ~(old_ip_1); \
+old_ip_2r = ~(old_ip_2); \
+old_ip_port_r = ~(old_ip_port); \
+old_ip_cr = ~(old_ip_c); \
+old_icmp_cr = ~(old_icmp_c); \
+sum32 = old_ip_1r + old_ip_2r + new_ip_1 + new_ip_2 + \
+ old_ip_port_r + new_ip_port + old_ip_cr + new_ip_c; \
+new32 = old_icmp_cr + sum32; \
+new32 = (new32 & 0xffff) + ((new32 >> 16) & 0xffff); \
+new32 = (new32 & 0xffff) + ((new32 >> 16) & 0xffff); \
+new_icmp_c = ~((u16)new32); \
+
+/*
+ * Add the two 16 bit parts of the 32 bit field
+ * Repeat it one more time to take care of any overflow
+ * Complement the u16 value and store it in network format
+ */
+#define FILL_CHECKSUM(checksum_field, sum32) { \
+ sum32 = (sum32 & 0xffff) + ((sum32>>16) & 0xffff); \
+ sum32 = (sum32 & 0xffff) + ((sum32>>16) & 0xffff); \
+ checksum_field = clib_host_to_net_u16(~((u16) sum32)); \
+}
+
+static inline void
+cnat_v4_recalculate_tcp_checksum (ipv4_header *ip,
+ tcp_hdr_type *tcp,
+ u32 *ip_addr_ptr,
+ u16 *tcp_port_addr_ptr,
+ u32 new_ip,
+ u16 new_port)
+{
+ u32 old_ip_addr, old_ip32_r, new_ip32, sum32;
+ u16 old_port_r, old_ip_checksum_r, old_tcp_checksum_r;
+
+ u16 *p16;
+
+ p16 = (u16*) ip_addr_ptr;
+
+ old_ip_addr = *ip_addr_ptr;
+ old_ip32_r = (((u16) ~clib_net_to_host_u16(*p16)) +
+ ((u16) ~clib_net_to_host_u16(*(p16+1))));
+
+ old_port_r = ~clib_net_to_host_u16(*tcp_port_addr_ptr);
+
+ *ip_addr_ptr = clib_host_to_net_u32(new_ip);
+
+ new_ip32 = (new_ip & 0xffff) + ((new_ip >> 16) & 0xffff);
+
+ old_ip_checksum_r = ~clib_net_to_host_u16(ip->checksum);
+
+ /*
+ * Recalculate the new IP checksum
+ */
+ sum32 = old_ip32_r + new_ip32 + old_ip_checksum_r;
+
+ FILL_CHECKSUM(ip->checksum, sum32);
+
+ u16 frag_offset =
+ clib_net_to_host_u16((ip->frag_flags_offset));
+
+ if(PREDICT_FALSE(frag_offset & IP_FRAG_OFFSET_MASK)) {
+ return; /* No need to update TCP fields */
+ }
+
+ *tcp_port_addr_ptr = clib_host_to_net_u16(new_port);
+ old_tcp_checksum_r = ~clib_net_to_host_u16(tcp->tcp_checksum);
+
+ /*
+ * Recalculate the new TCP checksum
+ */
+ sum32 = old_ip32_r + new_ip32 +
+ old_port_r + new_port + old_tcp_checksum_r;
+
+ FILL_CHECKSUM(tcp->tcp_checksum, sum32);
+
+ if (PREDICT_FALSE(tcp_logging_enable_flag)) {
+ tcp_debug_logging(
+ clib_net_to_host_u32(tcp->seq_num),
+ clib_net_to_host_u32(tcp->ack_num),
+ clib_net_to_host_u32(old_ip_addr),
+ clib_net_to_host_u32(*ip_addr_ptr),
+ ~old_port_r,
+ clib_net_to_host_u16(*tcp_port_addr_ptr),
+ ~old_ip_checksum_r,
+ clib_net_to_host_u16(ip->checksum),
+ ~old_tcp_checksum_r,
+ clib_net_to_host_u16(tcp->tcp_checksum));
+ }
+}
+
+
+extern void tcp_in2out_nat_mss_n_checksum (ipv4_header *ip,
+ tcp_hdr_type *tcp,
+ u32 ipv4_addr,
+ u16 port,
+ cnat_main_db_entry_t * db);
+
+void hex_dump(u8 * p, int len);
+
+u32 get_my_svi_intf_ip_addr();
+
+/*
+ * in cnat_v4_icmp_gen.c,
+ * return 1 if icmp msg allow to generate
+ * for this user
+ */
+
+u32 icmp_msg_gen_allowed ();
+
+cnat_icmp_msg_t v6_icmp_msg_gen_allowed();
+
+int v4_crc_zero_udp_allowed();
+void ipv4_decr_ttl_n_calc_csum(ipv4_header *ipv4);
+int icmpv4_generate_with_throttling (spp_ctx_t *ctx, ipv4_header *ipv4,
+ u16 rx_uidb_index);
+
+int icmpv6_generate_with_throttling (spp_ctx_t *ctx, ipv6_header_t *ipv4,
+ u16 rx_uidb_index);
+
+void icmp_error_generate_v6(spp_ctx_t *ctx, u8 icmp_type,
+ u8 icmp_code, u16 uidb_index);
+
+void calculate_window_scale(tcp_hdr_type *tcp_header, u8 *scale);
+
+void cnat_log_nat44_tcp_seq_mismatch(
+ cnat_main_db_entry_t *db,
+ cnat_vrfmap_t *vrfmap);
+void print_icmp_pkt (ipv4_header *ip);
+void print_udp_pkt (ipv4_header *ip);
+void print_tcp_pkt (ipv4_header *ip);
+void print_ipv6_pkt (ipv6_header_t *ip);
+
+
+#endif
+
diff --git a/plugins/vcgn-plugin/vcgn/cnat_v4_pptp_alg.h b/plugins/vcgn-plugin/vcgn/cnat_v4_pptp_alg.h
new file mode 100644
index 00000000000..5a6d4243165
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_v4_pptp_alg.h
@@ -0,0 +1,150 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_v4_pptp_alg.h
+ *
+ * Copyright (c) 2009-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_V4_PPTP_ALG_H__
+#define __CNAT_V4_PPTP_ALG_H__
+
+/* Debug utils of PPTP */
+#define PPTP_DBG(debug, ...) \
+ if(PREDICT_FALSE(cnat_pptp_debug_flag >= debug)) { \
+ PLATFORM_DEBUG_PRINT("%s:%s:%d - ", \
+ __FILE__, __FUNCTION__, __LINE__);\
+ PLATFORM_DEBUG_PRINT(__VA_ARGS__);\
+ PLATFORM_DEBUG_PRINT("\n"); \
+ }
+
+#define PPTP_DUMP_PACKET(ip, len) pptp_hex_dump(ip, len)
+
+
+#define PPTP_DISABLED 0
+#define PPTP_ENABLED 1
+
+#define PPTP_GRE_TIMEOUT 60 /*sec */
+
+#define TCP_PPTP_PORT 1723
+
+#define PPTP_PAC 0
+#define PPTP_PNS 1
+
+/* PPTP MSG TYPE */
+
+#define PPTP_MSG_TYPE_CONTROL 1
+#define PPTP_MSG_TYPE_MGMT 2
+
+/* PPTP control messages */
+
+/* control connection mgmt */
+#define PPTP_START_CC_RQ 1
+#define PPTP_START_CC_RP 2
+#define PPTP_STOP_CC_RQ 3
+#define PPTP_STOP_CC_RP 4
+#define PPTP_ECHO_RQ 5
+#define PPTP_ECHO_RP 6
+
+/* call mgmt */
+#define PPTP_OBOUND_CALL_RQ 7
+#define PPTP_OBOUND_CALL_RP 8
+#define PPTP_IBOUND_CALL_RQ 9
+#define PPTP_IBOUND_CALL_RP 10
+#define PPTP_IBOUND_CALL_CN 11
+#define PPTP_CALL_CLEAR_RQ 12
+#define PPTP_CALL_DISCON_NT 13
+
+/* other */
+
+#define PPTP_WAN_ERR_NT 14
+#define PPTP_SET_LINK_INF 15
+
+#define PPTP_MIN_HDR_LEN 8
+
+/* Byte offsets from start of TCP Data(PPTP header) */
+
+#define PPTP_CTRL_MGMT_TYPE_OFFSET 0x02
+#define PPTP_CC_TYPE_OFFSET 0x08
+#define PPTP_HDR_CALL_ID_OFFSET 0x0c
+#define PPTP_HDR_PEER_CALL_ID_OFFSET 0x0e
+
+#define PPTP_HDR_RESULT_CODE_OFFSET_STCCRP 0x0e
+#define PPTP_HDR_RESULT_CODE_OFFSET 0x10
+
+
+/* Offset of control/mgmt msg types
+ from start of TCP header */
+
+#define TCP_HEADER_SIZE(tcp) \
+ ((tcp->hdr_len>>4) << 2)
+
+
+#define PPTP_MSG_START_OFFSET(tcp) \
+ ((u8*)tcp + TCP_HEADER_SIZE(tcp))
+
+
+#define PPTP_CC_MSG_TYPE_OFFSET(tcp) \
+ (PPTP_MSG_START_OFFSET(tcp) + \
+ PPTP_CC_TYPE_OFFSET )
+
+#define PPTP_MGMT_MSG_TYPE_OFFSET(tcp) \
+ ( PPTP_MSG_START_OFFSET(tcp) + \
+ PPTP_CTRL_MGMT_TYPE_OFFSET )
+
+#define PPTP_CALL_ID_OFFSET(tcp) \
+ ( PPTP_MSG_START_OFFSET(tcp) + \
+ PPTP_HDR_CALL_ID_OFFSET )
+
+#define PPTP_PEER_CALL_ID_OFFSET(tcp) \
+ ( PPTP_MSG_START_OFFSET(tcp) + \
+ PPTP_HDR_PEER_CALL_ID_OFFSET )
+
+#define PPTP_RESULT_CODE_OFFSET(tcp) \
+ ( PPTP_MSG_START_OFFSET(tcp) + \
+ PPTP_HDR_RESULT_CODE_OFFSET )
+
+#define PPTP_RESULT_CODE_OFFSET_STCCRP(tcp) \
+ ( PPTP_MSG_START_OFFSET(tcp) + \
+ PPTP_HDR_RESULT_CODE_OFFSET_STCCRP)
+
+/* values */
+#define PPTP_CC_MSG_TYPE(tcp) \
+ (u16*)PPTP_CC_MSG_TYPE_OFFSET(tcp)
+
+#define PPTP_MGMT_MSG_TYPE(tcp) \
+ (u16*)PPTP_MGMT_MSG_TYPE_OFFSET(tcp)
+
+#define PPTP_CALL_ID(tcp) \
+ (u16*)PPTP_CALL_ID_OFFSET(tcp)
+
+#define PPTP_PEER_CALL_ID(tcp) \
+ (u16*)PPTP_PEER_CALL_ID_OFFSET(tcp)
+
+#define PPTP_RESULT_CODE(tcp) \
+ *(u8*)PPTP_RESULT_CODE_OFFSET(tcp);
+
+#define PPTP_RESULT_CODE_STCCRP(tcp) \
+ *(u8*)PPTP_RESULT_CODE_OFFSET_STCCRP(tcp);
+
+
+/* other code */
+#define PPTP_CHAN_SUCCESS 1
+
+
+/* Data structures */
+
+extern u32 cnat_pptp_debug_flag;
+
+#endif /* __CNAT_V4_PPTP_ALG_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/cnat_v4_tcp_in2out_stages.c b/plugins/vcgn-plugin/vcgn/cnat_v4_tcp_in2out_stages.c
new file mode 100644
index 00000000000..220ced461aa
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_v4_tcp_in2out_stages.c
@@ -0,0 +1,679 @@
+/*
+ *---------------------------------------------------------------------------
+ * cnat_v4_tcp_in2out_stages.c - cnat_v4_tcp_in2out node pipeline stage functions
+ *
+ *
+ * Copyright (c) 2008-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/buffer.h>
+
+#include "cnat_db.h"
+/* #include <cnat_feature_data.h> */
+#include "ipv4_packet.h"
+#include "tcp_header_definitions.h"
+#include "cnat_config.h"
+#include "cnat_global.h"
+#include "cnat_v4_functions.h"
+#include "cnat_v4_ftp_alg.h"
+#include "cnat_v4_pptp_alg.h"
+
+#define foreach_cnat_ipv4_tcp_inside_input_error \
+_(TCP_NAT_IN, "packets received") \
+_(TCP_NAT, "packets NATed") \
+_(TCP_EXCEPTION, "packets to exception") \
+_(TCP_TTL_GEN, "Generated TTL Expiry ICMP packet") \
+_(TCP_TTL_DROP, "Could not generate TTL Expiry ICMP packet") \
+_(TCP_SESSION_DROP, "Could not generate session") \
+_(TCP_FRAG_DROP, "Non-first Fragment received")
+
+typedef enum {
+#define _(sym,str) sym,
+ foreach_cnat_ipv4_tcp_inside_input_error
+#undef _
+ CNAT_IPV4_TCP_INSIDE_INPUT_N_ERROR,
+} cnat_ipv4_tcp_inside_input_t;
+
+static char * cnat_ipv4_tcp_inside_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_cnat_ipv4_tcp_inside_input_error
+#undef _
+
+
+
+typedef struct cnat_v4_tcp_in2out_pipeline_data_ {
+ spp_node_main_vector_t *nmv;
+ /* Add additional pipeline stage data here... */
+ u32 bucket;
+ u16 src_port; /* Added for handling fragments */
+ u16 dst_port; /* Added for handling fragments */
+} cnat_v4_tcp_in2out_pipeline_data_t;
+
+static cnat_v4_tcp_in2out_pipeline_data_t pctx_data[SPP_MAXDISPATCH];
+
+#define EXTRA_PIPELINE_ARGS_PROTO , cnat_v4_tcp_in2out_pipeline_data_t *pctx
+#define EXTRA_PIPELINE_ARGS , pctx
+
+ALWAYS_INLINE(
+static inline void
+stage0(spp_ctx_t **ctxs, int index, spp_node_t *np,
+ u8 *disp_used EXTRA_PIPELINE_ARGS_PROTO))
+{
+ spp_ctx_t *ctx = ctxs[index];
+ /*
+ * Prefetch the context header. This is almost always
+ * the right thing to do
+ */
+ SPP_PREFETCH_CTX(ctx);
+}
+
+ALWAYS_INLINE(
+static inline void
+stage1(spp_ctx_t **ctxs, int index, spp_node_t *np,
+ u8 *disp_used EXTRA_PIPELINE_ARGS_PROTO))
+{
+ spp_ctx_t *ctx = ctxs[index];
+ /* got ctx, prefetch packet data separately */
+ SPP_PREFETCH_CTX_DATA(ctx, 1*CACHE_DATA_QUANTUM);
+}
+
+ALWAYS_INLINE(
+static inline void
+stage2(spp_ctx_t **ctxs, int index, spp_node_t *np,
+ u8 *disp_used EXTRA_PIPELINE_ARGS_PROTO))
+{
+ spp_ctx_t *ctx = ctxs[index];
+ u64 a, b, c;
+ u32 bucket;
+ cnat_feature_data_t *fd = (cnat_feature_data_t *)ctx->feature_data;
+ ipv4_header *ip;
+ tcp_hdr_type * tcp;
+ u8 *prefetch_target;
+
+ INCREMENT_NODE_COUNTER(np, TCP_NAT_IN);
+
+ /* extract the key from ctx and save it to feature_data */
+
+ ip = (ipv4_header *)(ctx->current_header);
+ ctx->application_start = (ip->version_hdr_len_words & 0xf) << 2;
+ tcp = (tcp_hdr_type*) ((u8 *)ip + ctx->application_start);
+
+ PLATFORM_CNAT_SET_RX_VRF(ctx,fd->dbl.k.k.vrf, CNAT_TCP, 1);
+ fd->dbl.k.k.ipv4 = spp_net_to_host_byte_order_32(&ip->src_addr);
+
+ if(PREDICT_FALSE(ctx->ru.rx.frag)) {
+ /* Must have routed through cnat_v4_frag_in2out node
+ * Since feature data of the ctx is being used for other
+ * purposes here, copy them to extra stage argument
+ */
+ u16 *feature_data_ports = (u16 *)&ctx->feature_data[2];
+ pctx[index].src_port = fd->dbl.k.k.port = *feature_data_ports;
+ feature_data_ports++;
+ pctx[index].dst_port = *feature_data_ports;
+ } else {
+ fd->dbl.k.k.port = spp_net_to_host_byte_order_16(&tcp->src_port);
+ pctx[index].dst_port =
+ spp_net_to_host_byte_order_16(&tcp->dest_port);
+ }
+
+#if 0
+ /* extra info for evil mode, or default value for dst_ipv4 field in good mode */
+ fd->dbl.dst_ipv4 = address_dependent_filtering ?
+ spp_net_to_host_byte_order_32(&ip->dest_addr) : 0;
+#endif
+
+ CNAT_V4_GET_HASH(fd->dbl.k.key64,
+ bucket, CNAT_MAIN_HASH_MASK)
+
+ prefetch_target = (u8 *)(&cnat_in2out_hash[bucket]);
+ pctx[index].bucket = bucket;
+
+ /* Prefetch the hash bucket */
+ SPP_PREFETCH(prefetch_target, 0, LOAD);
+
+}
+
+ALWAYS_INLINE(
+static inline void
+stage3(spp_ctx_t **ctxs, int index, spp_node_t *np,
+ u8 *disp_used EXTRA_PIPELINE_ARGS_PROTO))
+{
+ u32 db_index;
+ u32 bucket;
+ uword prefetch_target0, prefetch_target1;
+
+ bucket = pctx[index].bucket;
+
+ /* read the hash bucket */
+ db_index = pctx[index].bucket = cnat_in2out_hash[bucket].next;
+ if (PREDICT_TRUE(db_index != EMPTY)) {
+
+ /*
+ * Prefetch database keys. We save space by not cache-line
+ * aligning the DB entries. We don't want to waste LSU
+ * bandwidth prefetching stuff we won't need.
+ */
+
+ prefetch_target0 = (uword)(cnat_main_db + db_index);
+
+ SPP_PREFETCH(prefetch_target0, 0, LOAD);
+
+ /* Just beyond DB key #2 */
+
+ prefetch_target1 = prefetch_target0 +
+ STRUCT_OFFSET_OF(cnat_main_db_entry_t, user_ports);
+
+ /* If the targets are in different lines, do the second prefetch */
+
+ if (PREDICT_FALSE((prefetch_target0 & ~(SPP_CACHE_LINE_BYTES-1)) !=
+ (prefetch_target1 & ~(SPP_CACHE_LINE_BYTES-1)))) {
+
+ SPP_PREFETCH(prefetch_target1, 0, LOAD);
+
+ }
+ }
+}
+
+static inline void
+stage4(spp_ctx_t **ctxs, int index, spp_node_t *np,
+ u8 *disp_used EXTRA_PIPELINE_ARGS_PROTO)
+{
+ spp_ctx_t *ctx = ctxs[index];
+ u32 db_index = pctx[index].bucket;
+ cnat_main_db_entry_t *db;
+ cnat_feature_data_t *fd;
+
+ /*
+ * Note: if the search already failed (empty bucket),
+ * the answer is already in the pipeline context structure
+ */
+ if (PREDICT_FALSE(db_index == EMPTY)) {
+ return;
+ }
+
+ fd = (cnat_feature_data_t *)ctx->feature_data;
+
+ /*
+ * Note: hash collisions suck. We can't easily prefetch around them.
+ * The first trip around the track will be fast. After that, maybe
+ * not so much...
+ */
+ do {
+
+ db = cnat_main_db + db_index;
+ if (PREDICT_TRUE(db->in2out_key.key64 == fd->dbl.k.key64))
+ break;
+ db_index = db->in2out_hash.next;
+
+ } while (db_index != EMPTY);
+
+ /* even in evil mode, for in2out, we nat all packets regardless mode and dst_ip */
+
+ /* Stick the answer back into the pipeline context structure */
+ pctx[index].bucket = db_index;
+}
+
+ALWAYS_INLINE(
+static inline void
+stage5(spp_ctx_t **ctxs, int index, spp_node_t *np,
+ u8 *disp_used EXTRA_PIPELINE_ARGS_PROTO))
+{
+ spp_ctx_t *ctx = ctxs[index];
+ u32 db_index = pctx[index].bucket;
+ cnat_feature_data_t *fd = (cnat_feature_data_t *)ctx->feature_data;
+ int disposition;
+ cnat_main_db_entry_t *db;
+ /* Below two pointers are just to keep the cnat_ftp_alg call happy*/
+ dslite_table_entry_t *dslite_entry_ptr = NULL;
+ ipv6_header_t *ipv6_hdr = NULL;
+ tcp_hdr_type *tcp;
+ ipv4_header *ip;
+ i8 delta;
+ u32 seq, seq1;
+ u32 window;
+ u8 scale;
+ int rc;
+
+ ip = (ipv4_header *) ctx->current_header;
+
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ if (PREDICT_FALSE(ip->ttl <= 1)) {
+ /* Try to generate ICMP error msg, as TTL is <= 1 */
+
+ if (icmpv4_generate_with_throttling
+ (ctx, ip, ctx->ru.rx.uidb_index)) {
+ /* Generated ICMP */
+ disposition = CNAT_REWRITE_OUTPUT;
+ INCREMENT_NODE_COUNTER(np, TCP_TTL_GEN);
+ } else {
+ /* Could not generated ICMP - drop the packet */
+ disposition = CNAT_DROP;
+ INCREMENT_NODE_COUNTER(np, TCP_TTL_DROP);
+ }
+ goto drop_pkt;
+ }
+ }
+
+ if (PREDICT_FALSE(db_index == EMPTY)) {
+ if(PREDICT_FALSE(ctx->ru.rx.frag)) {
+ /* Must have routed through cnat_v4_frag_in2out node */
+ u16 frag_offset =
+ spp_net_to_host_byte_order_16(&(ip->frag_flags_offset));
+ if(PREDICT_FALSE(frag_offset & IP_FRAG_OFFSET_MASK)) {
+ INCREMENT_NODE_COUNTER(np, TCP_FRAG_DROP);
+ disposition = CNAT_DROP;
+ goto drop_pkt;
+ } else {
+ INCREMENT_NODE_COUNTER(np, TCP_EXCEPTION);
+ disposition = CNAT_V4_TCP_IE;
+ }
+ } else {
+ INCREMENT_NODE_COUNTER(np, TCP_EXCEPTION);
+ disposition = CNAT_V4_TCP_IE;
+ }
+ } else {
+ cnat_key_t dest_info;
+ cnat_session_entry_t *session_db = NULL;
+ db = cnat_main_db + db_index;
+ /* Handle destination sessions */
+ tcp = (tcp_hdr_type*) ((u8*)ip + ctx->application_start);
+ dest_info.k.port = pctx[index].dst_port;
+ dest_info.k.ipv4 = spp_net_to_host_byte_order_32(&(ip->dest_addr));
+
+ if(PREDICT_TRUE(!PLATFORM_DBL_SUPPORT)) {
+
+ /* No DBL support, so just update the destn and proceed */
+ db->dst_ipv4 = dest_info.k.ipv4;
+ db->dst_port = dest_info.k.port;
+ goto update_pkt;
+ }
+
+ if(PREDICT_FALSE(db->dst_ipv4 != dest_info.k.ipv4 ||
+ db->dst_port != dest_info.k.port)) {
+ if(PREDICT_TRUE(db->nsessions == 0)) {
+ /* Should be a static entry
+ * Note this session as the first session and log
+ */
+ cnat_add_dest_n_log(db, &dest_info);
+ } else if(PREDICT_FALSE(db->nsessions == 1)) {
+ /* Destn is not same as in main db. Multiple session
+ * scenario
+ */
+ dest_info.k.vrf = db->in2out_key.k.vrf;
+ session_db = cnat_handle_1to2_session(db, &dest_info);
+ if(PREDICT_FALSE(session_db == NULL)) {
+ disposition = CNAT_DROP;
+ INCREMENT_NODE_COUNTER(np, TCP_SESSION_DROP);
+ goto drop_pkt;
+ }
+ } else { /* There are already multiple destinations */
+ dest_info.k.vrf = db->in2out_key.k.vrf;
+ /* If session already exists,
+ * cnat_create_session_db_entry will return the existing db
+ * else create a new db
+ * If could not create, return NULL
+ */
+ session_db = cnat_create_session_db_entry(&dest_info,
+ db, TRUE);
+ if(PREDICT_FALSE(session_db == NULL)) {
+ disposition = CNAT_DROP;
+ INCREMENT_NODE_COUNTER(np, TCP_SESSION_DROP);
+ goto drop_pkt;
+ }
+ }
+ if(PREDICT_TRUE(session_db)) {
+ /* Have to repeat the window size check for new destinations */
+ window = (u32)spp_net_to_host_byte_order_16(
+ &tcp->window_size);
+ window = window << session_db->scale;
+ if(PREDICT_TRUE(!session_db->window)) {
+ calculate_window_scale(tcp, &scale);
+ session_db->scale = scale;
+ session_db->window = window;
+ } else if (PREDICT_FALSE(session_db->window <
+ window)) {
+ /* Update the db entry with window option from packet */
+ session_db->window = window;
+ } else {
+ /* Do nothing */
+ }
+ session_db->tcp_seq_num = spp_net_to_host_byte_order_32(
+ &tcp->seq_num);
+ session_db->ack_no = spp_net_to_host_byte_order_32(
+ &tcp->ack_num);
+ if (PREDICT_FALSE(global_debug_flag && CNAT_DEBUG_GLOBAL_ALL)) {
+ PLATFORM_DEBUG_PRINT("\n In2out SDB stages seq no = %u,"
+ " ack no = %u, window = %u\n",
+ session_db->tcp_seq_num,
+ session_db->ack_no,
+ session_db->window);
+ }
+ }
+ } else {
+ //Update the seq no and ack no for subsequent communication
+ //after connection establishment
+ //No need to update window here. Window is already updated
+ //during connection establishment
+ window = (u32)spp_net_to_host_byte_order_16(
+ &tcp->window_size);
+ window = window << db->scale;
+ if(PREDICT_FALSE(!ALG_ENABLED_DB(db))) {
+ //This check is done since proto_data is part of union in main
+ //db entry
+ db->proto_data.tcp_seq_chk.seq_no =
+ spp_net_to_host_byte_order_32(
+ &tcp->seq_num);
+ db->proto_data.tcp_seq_chk.ack_no =
+ spp_net_to_host_byte_order_32(
+ &tcp->ack_num);
+ }
+ if (PREDICT_FALSE(db->diff_window < window)) {
+ /* Update the db entry with window option from packet */
+ db->diff_window = window;
+ }
+ if (PREDICT_FALSE(global_debug_flag && CNAT_DEBUG_GLOBAL_ALL)) {
+ PLATFORM_DEBUG_PRINT("\n In2out MainDB seq no = %u,"
+ "\n ack no = %u\n",
+ db->proto_data.tcp_seq_chk.seq_no,
+ db->proto_data.tcp_seq_chk.ack_no);
+ PLATFORM_DEBUG_PRINT("\n In2out MAINDB window = %u\n",
+ db->diff_window);
+ }
+ }
+update_pkt:
+
+ INCREMENT_NODE_COUNTER(np, TCP_NAT);
+
+ disposition = CNAT_REWRITE_OUTPUT;
+
+ /* NAT the packet and update checksum (increamental) */
+
+ /* If it is a non-first fragment, we need not worry about
+ * ALGs as the packet does not have TCP header..
+ * However, under a very race scenario when this non-first
+ * fragment is containing an FTP PORT command OR RTSP command
+ * we cannot handle that case.. in that case the ALG will fail
+ * Do not want to add a lot of complexity to handle one in million
+ * of such ALG case
+ */
+ u16 frag_offset =
+ spp_net_to_host_byte_order_16(&(ip->frag_flags_offset));
+
+ if(PREDICT_FALSE(frag_offset & IP_FRAG_OFFSET_MASK)) {
+ /* Non first fragment.. no TCP header */
+ FTP_ALG_DEBUG_PRINTF("Non first frag.. cannot handle ALG");
+ goto handle_ttl_n_checksum;
+ }
+
+ FTP_ALG_DEBUG_PRINTF("src port 0x%x, dst_port 0x%x",
+ spp_net_to_host_byte_order_16(&tcp->src_port),
+ spp_net_to_host_byte_order_16(&tcp->dest_port))
+
+ /* handle FTP ALG */
+ if (PREDICT_FALSE(ftp_alg_enabled &&
+ (spp_net_to_host_byte_order_16(&tcp->src_port) == 21 ||
+ spp_net_to_host_byte_order_16(&tcp->dest_port) == 21))) {
+
+ if(PREDICT_FALSE((db->flags & CNAT_DB_FLAG_PPTP_TUNNEL_ACTIVE) ||
+ (db->flags & CNAT_DB_FLAG_PPTP_TUNNEL_INIT)))
+ {
+ /* FTP on a PPTP Control session? Ignore FTP */
+ goto handle_ttl_n_checksum;
+ }
+
+ if (PREDICT_FALSE(tcp->flags & (TCP_FLAG_SYN | TCP_FLAG_RST |
+ TCP_FLAG_FIN))) {
+
+ FTP_ALG_DEBUG_PRINTF("SYN Case setting delta = 0")
+
+ /* reset the delta */
+ if(PREDICT_FALSE(session_db != NULL)) {
+ session_db->alg.delta = 0;
+ } else {
+ db->alg.delta = 0;
+ }
+
+ } else {
+
+ /* need to adjust seq # for in2out pkt if delta is not 0 */
+ if (PREDICT_TRUE((session_db && (session_db->alg.delta != 0))
+ || ((!session_db) && (db->alg.delta != 0)))) {
+ seq = net2host32(&tcp->seq_num);
+
+ FTP_ALG_DEBUG_PRINTF("Orig Seq Num 0x%x", seq)
+ /*
+ * for ftp packets, due to PORT command translation,
+ * we may have cases that a packet/payload len gets
+ * changed for tcp, we need to adjust the packet's
+ * sequence numbers to match the changes. The delta
+ * of orig pkt len and new len is in alg_dlt[1] together
+ * with the sequence number that cuased the delta. When
+ * there are multiple len changes, we keep theprevious
+ * delta in alg_dlt[0] for case like pkt retransmission.
+ * So depends on packet seq number, we decide to use
+ * either latest delta or previous delta ([0])
+ * We won't be here if both delta values are 0
+ */
+ if(PREDICT_FALSE(session_db != NULL)) {
+ seq1 = seq > session_db->tcp_seq_num ?
+ (seq + session_db->alg.alg_dlt[1]):
+ (seq + session_db->alg.alg_dlt[0]);
+ } else {
+ seq1 = seq > db->proto_data.seq_pcp.tcp_seq_num ?
+ (seq + db->alg.alg_dlt[1]):
+ (seq + db->alg.alg_dlt[0]);
+ }
+
+ FTP_ALG_DEBUG_PRINTF("Old_seq_num 0x%x New Seq Num 0x%x",
+ seq, seq1)
+
+ if (PREDICT_TRUE(seq1 != seq)) {
+
+ tcp->seq_num = host2net32(seq1);
+
+ FTP_ALG_DEBUG_PRINTF("Old TCP Checksum 0x%x",
+ net2host16(&tcp->tcp_checksum))
+
+ /*
+ * fix checksum incremental for seq # changes
+ * newchecksum = ~(~oldchecksum + ~old + new)
+ */
+ CNAT_UPDATE_TCP_SEQ_ACK_CHECKSUM(seq, seq1)
+ } /* There is a diff in seq */
+
+ } /* ALG Delta is non zero */
+
+ rc = cnat_ftp_alg((u8*) ip, &delta, db, dslite_entry_ptr, ipv6_hdr);
+
+ FTP_ALG_DEBUG_PRINTF("cnat_ftp_alg rc 0x%x", rc)
+
+ /*if located PORT cmd, packet being updated, take the delta and seq # */
+ if (PREDICT_FALSE(rc)) {
+
+ /* set alg flag for this ftp control connection */
+ if(PREDICT_FALSE(session_db != NULL)) {
+ session_db->flags |= CNAT_DB_FLAG_ALG_CTRL_FLOW;
+ } else {
+ db->flags |= CNAT_DB_FLAG_ALG_CTRL_FLOW;
+ }
+
+ /*
+ * rc != 0 indicates this packet has triggered a new pkt len delta
+ * we need to update db entry's seq# with seq# of this packet.
+ *
+ * Move alg_dlt[1] to [0], (current delta -> previous delta)
+ * then apply latest delta to alg_dlt[1] (keep [1] as latest delta)
+ */
+ if(PREDICT_FALSE(session_db != NULL)) {
+ session_db->tcp_seq_num = net2host32(&tcp->seq_num);
+ session_db->alg.alg_dlt[0] = session_db->alg.alg_dlt[1];
+
+ /* accumulate the delta ! */
+ session_db->alg.alg_dlt[1] += delta;
+ FTP_ALG_DEBUG_PRINTF(
+ "cnat_ftp_alg seq_num 0x%x, dlt0 0x%x, dlt1 0x%x",
+ session_db->tcp_seq_num,
+ session_db->alg.alg_dlt[0],
+ session_db->alg.alg_dlt[1])
+
+ } else {
+ db->proto_data.seq_pcp.tcp_seq_num = net2host32(&tcp->seq_num);
+ db->alg.alg_dlt[0] = db->alg.alg_dlt[1];
+
+ /* accumulate the delta ! */
+ db->alg.alg_dlt[1] += delta;
+
+ FTP_ALG_DEBUG_PRINTF(
+ "cnat_ftp_alg seq_num 0x%x, dlt0 0x%x, dlt1 0x%x",
+ db->proto_data.seq_pcp.tcp_seq_num,
+ db->alg.alg_dlt[0],
+ db->alg.alg_dlt[1])
+ }
+ ctx->current_length += delta;
+ }/* cnat_ftp_alg returned non zero */
+ } /* It is not a SYN, RST or FIN */
+ } else if (PREDICT_FALSE(rtsp_alg_port_num &&
+ ((spp_net_to_host_byte_order_16(&tcp->dest_port) == rtsp_alg_port_num) ||
+ (spp_net_to_host_byte_order_16(&tcp->src_port) == rtsp_alg_port_num))) ) {
+
+ if (PREDICT_FALSE(tcp->flags & (TCP_FLAG_SYN | TCP_FLAG_RST |
+ TCP_FLAG_FIN))) {
+
+ FTP_ALG_DEBUG_PRINTF("SYN Case setting delta = 0")
+
+ /* reset the delta */
+ if(PREDICT_FALSE(session_db != NULL)) {
+ session_db->alg.delta = 0;
+ } else {
+ db->alg.delta = 0;
+ }
+
+ } else {
+#define RTSP_ALG_DELTA_MASK 0xFF
+ /* need to adjust seq # for in2out pkt if delta is not 0 */
+ if (PREDICT_FALSE((session_db &&
+ (session_db->alg.delta & RTSP_ALG_DELTA_MASK) != 0) ||
+ ((!session_db) &&
+ (db->alg.delta & RTSP_ALG_DELTA_MASK) != 0))) {
+ seq = net2host32(&tcp->seq_num);
+
+ if(PREDICT_FALSE(session_db != NULL)) {
+ seq1 = seq > session_db->tcp_seq_num ?
+ (seq + db->alg.alg_dlt[1]):
+ (seq + db->alg.alg_dlt[0]);
+ } else {
+ seq1 = seq > db->proto_data.seq_pcp.tcp_seq_num ?
+ (seq + db->alg.alg_dlt[1]):
+ (seq + db->alg.alg_dlt[0]);
+ }
+
+ FTP_ALG_DEBUG_PRINTF("Old_seq_num 0x%x New Seq Num 0x%x",
+ seq, seq1)
+
+ if (PREDICT_TRUE(seq1 != seq)) {
+
+ tcp->seq_num = host2net32(seq1);
+
+ FTP_ALG_DEBUG_PRINTF("Old TCP Checksum 0x%x",
+ net2host16(&tcp->tcp_checksum))
+
+ /*
+ * fix checksum incremental for seq # changes
+ * newchecksum = ~(~oldchecksum + ~old + new)
+ */
+ CNAT_UPDATE_TCP_SEQ_ACK_CHECKSUM(seq, seq1)
+ }
+
+ }
+ }
+ if ((session_db && (!session_db->alg.il)) ||
+ ((!session_db) && (!db->alg.il))) {
+ cnat_rtsp_alg((u8*) ip,
+ &delta,
+ db,
+ ctx->current_length,
+ NULL,
+ NULL);
+ }
+ }
+handle_ttl_n_checksum:
+ if (PLATFORM_HANDLE_TTL_DECREMENT) {
+ /*
+ * Decrement TTL and update IPv4 checksum
+ */
+ ipv4_decr_ttl_n_calc_csum(ip);
+ }
+
+ tcp_in2out_nat_mss_n_checksum(ip,
+ tcp,
+ db->out2in_key.k.ipv4,
+ db->out2in_key.k.port,
+ db);
+/* CNAT_PPTP_ALG_SUPPORT */
+ /* code to handle pptp control msgs */
+ if(PREDICT_FALSE(
+ (spp_net_to_host_byte_order_16(&tcp->dest_port) ==
+ TCP_PPTP_PORT))) {
+
+ u32 ret;
+
+ PPTP_DBG(3, "PPTP mgmt/ctrl msg recieved");
+
+ ret = cnat_handle_pptp_msg(ctx, db , tcp, PPTP_PNS );
+
+ if( PREDICT_FALSE( ret != CNAT_SUCCESS) ) {
+ PPTP_DBG(3, "PPTP mgmt/ctrl msg drop");
+ disposition = CNAT_DROP;
+ PPTP_INCR(ctrl_msg_drops);
+ goto drop_pkt;
+ }
+ }
+
+/* CNAT_PPTP_ALG_SUPPORT */
+
+ /* update transaltion counters */
+ db->in2out_pkts++;
+
+ in2out_forwarding_count++;
+
+ PLATFORM_CNAT_SET_TX_VRF(ctx,db->out2in_key.k.vrf);
+
+ /* update the timer for good mode, or evil mode dst_ip match */
+
+// if (!address_dependent_filtering || fd->dbl.dst_ipv4 == db->dst_ipv4) {
+ if(PREDICT_FALSE(session_db != NULL)) {
+ V4_TCP_UPDATE_SESSION_DB_FLAG(session_db, tcp);
+ CNAT_DB_TIMEOUT_RST(session_db);
+ } else {
+ V4_TCP_UPDATE_SESSION_FLAG(db, tcp);
+ CNAT_DB_TIMEOUT_RST(db);
+ }
+
+// }
+
+ }
+
+ /* Pick up the answer and put it into the context */
+ fd->dbl.db_index = db_index;
+
+drop_pkt:
+
+ DISP_PUSH_CTX(np, ctx, disposition, disp_used, last_disposition, last_contexts_ptr, last_nused_ptr);
+
+}
+
diff --git a/plugins/vcgn-plugin/vcgn/cnat_va_db.c b/plugins/vcgn-plugin/vcgn/cnat_va_db.c
new file mode 100644
index 00000000000..7423bdf2de2
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_va_db.c
@@ -0,0 +1,286 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_va_db.c - virtual assembly database
+ *
+ * Copyright (c) 2009, 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <cnat_va_db.h>
+#include <format.h>
+#include <spp_node.h>
+#include <spp_alloc.h>
+#include <spp_byteorder.h>
+#include <spp_main.h>
+#include <spp_cache.h>
+#include <spp_interface.h>
+#include <spp_api.h>
+#include <spp_client_api.h>
+#include <spp_timers.h>
+#include <cnat_db.h>
+#include <spp_plugin.h>
+#include <cnat_v4_functions.h>
+
+
+va_bucket_t va_bucket[VA_BUCKETS];
+
+void va_bucket_init () {
+
+ u32 i;
+
+ /*
+ * set the pointer in each bucket
+ * points to nowhere
+ */
+ for (i=0; i<VA_BUCKETS; i++) {
+ va_bucket[i].next_available_entry = ~0;
+ }
+
+}
+
+inline void va_db_add_new_entry (u32 bucket_index,
+ va_lookup_key * key )
+{
+
+ va_entry_t * entry_p;
+ u32 head, next;
+
+ entry_p = va_db_lookup(bucket_index, key);
+
+ if (PREDICT_FALSE(entry_p)) {
+ FRAG_DEBUG_PRINTF6(
+ "\nVA_ADD_NEW: Bucket %d fnd Existng entry [%d, %d] -> [%d, %d]\n",
+ bucket_index, entry_p->src_port,
+ entry_p->dst_port, key->e.src_port, key->e.dst_port)
+
+ /* found match entry, update it */
+ entry_p->src_port = key->e.src_port;
+ entry_p->dst_port = key->e.dst_port;
+
+ FRAG_DEBUG_PRINTF3("VA_ADD_NEW: Existing bucket %d, counter %d\n",
+ bucket_index,
+ va_bucket[bucket_index].new_entry_counter)
+
+ } else {
+
+ /* no match, add a new one */
+ head = va_bucket[bucket_index].head_entry;
+ next = va_bucket[bucket_index].next_available_entry;
+
+ FRAG_DEBUG_PRINTF5(
+ "\nVA_ADD_NEW: Filling bucket %d, index %d with key 0x%llx %x\n",
+ bucket_index, next, key->k.key64, key->k.key32)
+
+ va_bucket[bucket_index].va_entry[next] = key->e;
+
+ /* increase next pointer */
+ va_bucket[bucket_index].next_available_entry = (next+1) & VA_BUCKET_MASK;
+
+ if (PREDICT_FALSE(head == va_bucket[bucket_index].next_available_entry)) {
+ /* adjust head circular pointer */
+ va_bucket[bucket_index].head_entry = (head+1) & VA_BUCKET_MASK;
+ }
+
+ va_bucket[bucket_index].new_entry_counter++;
+
+ FRAG_DEBUG_PRINTF4(
+ "VA_ADD_NEW: NEW bucket %d, entry %d counter %d\n",
+ bucket_index, next, va_bucket[bucket_index].new_entry_counter)
+ }
+}
+
+
+/*
+ * use the key,
+ * return pointer to the entry if found,
+ * NULL if not
+ */
+
+inline
+va_entry_t * va_db_lookup (u32 bucket_index, va_lookup_key * key)
+{
+
+ u32 index, next;
+ va_entry_t * entry_p;
+ va_bucket_t * bucket;
+
+ bucket = &va_bucket[bucket_index];
+ index = bucket->head_entry;
+ next = bucket->next_available_entry;
+ entry_p = NULL;
+
+ FRAG_DEBUG_PRINTF4(
+ "\nVA_DB_LOOKUP: bucket index %d head %d next %d\n",
+ bucket_index, index, next)
+
+ /* loop through the entries in the bucket */
+ while( index != next) {
+
+ if(PREDICT_TRUE(memcmp(&bucket->va_entry[index], key, VA_KEY_SIZE)==0)) {
+
+ entry_p = &bucket->va_entry[index];
+ /*In add frag entry function we are again assigning key's src
+ port to entry_p's src port. So when a main DB entry is deleted/
+ timed out, and again another entry is created for the same
+ src ip and src port pair, the frag's entry_p will have the
+ previous port info stored and not updated. Hence the below
+ line is not required*/
+
+ /* *(u32*)&key->e.src_port = *(u32*)&entry_p->src_port; */
+ /* do two ports as u32 :) */
+
+ break;
+ }
+
+ index = (index +1) & VA_BUCKET_MASK;
+
+ }
+
+#ifdef FRAG_DEBUG
+ if (PREDICT_TRUE(entry_p)) {
+ FRAG_DEBUG_PRINTF3("VA_DB_LOOKUP: bucket index %d entry index %d\n",
+ bucket_index, index)
+ FRAG_DEBUG_PRINTF5("VA_DB_LOOKUP: SRC-->DST [0x%x, %d] [0x%x, %d]\n",
+ entry_p->src_ip, entry_p->src_port,
+ entry_p->dst_ip, entry_p->dst_port)
+ FRAG_DEBUG_PRINTF3("[vrf 0x%x, id 0x%x]\n",
+ entry_p->vrf, entry_p->ip_id)
+ } else {
+ FRAG_DEBUG_PRINTF1("\nNULL ENTRY\n")
+ }
+#endif
+
+ return entry_p;
+
+}
+
+inline
+int va_db_delete_entry (u32 bucket_index, va_lookup_key * key)
+{
+
+ u32 index, next;
+ int entry_found = 0;
+ va_bucket_t * bucket;
+
+ bucket = &va_bucket[bucket_index];
+ index = bucket->head_entry;
+ next = bucket->next_available_entry;
+
+ FRAG_DEBUG_PRINTF4(
+ "\nVA_DB_DELETE_ENTRY: bucket index %d head %d next %d\n",
+ bucket_index, index, next);
+
+ /* loop through the entries in the bucket */
+ while( index != next) {
+ if(PREDICT_TRUE(memcmp(&bucket->va_entry[index], key,
+ VA_KEY_SIZE)==0)) {
+ /* Clear the entry */
+ FRAG_DEBUG_PRINTF1("Entry found in delete API");
+ memset(&bucket->va_entry[index], 0, sizeof(va_entry_t));
+ entry_found = 1;
+ break;
+ }
+ index = (index +1) & VA_BUCKET_MASK;
+ }
+ return entry_found;
+}
+
+
+
+void cnat_va_bucket_used (int argc, unsigned long * argv)
+{
+
+ u32 i, sum = 0;;
+
+ for(i=0;i<VA_BUCKETS;i++) {
+
+ if(PREDICT_TRUE(va_bucket[i].new_entry_counter)) sum++;
+
+ }
+
+ if (PREDICT_FALSE(!sum)) {
+ printf("no bucket in use\n");
+ return;
+ }
+
+ printf("index head next counter (%d bucket in use)\n", sum);
+
+ for(i=0;i<VA_BUCKETS;i++) {
+
+ if (PREDICT_FALSE(!va_bucket[i].new_entry_counter)) continue;
+
+ printf(" %04d %04d %04d %d\n", i,
+ va_bucket[i].head_entry,
+ va_bucket[i].next_available_entry,
+ va_bucket[i].new_entry_counter);
+
+ }
+}
+
+void cnat_va_dump (int argc, unsigned long * argv)
+{
+
+ u32 i, sum, index ;
+
+ PLATFORM_DEBUG_PRINT("====== SUMMARY ======\n");
+ PLATFORM_DEBUG_PRINT("Total buckets: %d\n", VA_BUCKETS);
+ PLATFORM_DEBUG_PRINT("Entries per bucket: %d\n", VA_ENTRY_PER_BUCKET);
+
+ sum = 0;
+
+ for(i=0; i<VA_BUCKETS; i++) {
+ if (PREDICT_TRUE(va_bucket[i].new_entry_counter > 0)) sum ++;
+ }
+
+ PLATFORM_DEBUG_PRINT("buckets in use: %d\n", sum);
+
+ sum = 0;
+ for(i=0; i<VA_BUCKETS; i++) {
+
+ if ( PREDICT_FALSE(((va_bucket[i].next_available_entry+1) & VA_BUCKET_MASK)
+ == va_bucket[i].head_entry)) {
+
+ sum ++;
+ }
+ }
+
+ PLATFORM_DEBUG_PRINT("bucket full: %d\n", sum);
+
+ /* dump per bucket info */
+
+ if (argc == 0 ) return;
+
+ index = (u32) argv[0];
+
+ if (PREDICT_FALSE(index >= VA_BUCKETS)) {
+ PLATFORM_DEBUG_PRINT("invalid bucket index %d\n", index);
+ return;
+ }
+
+ PLATFORM_DEBUG_PRINT("\n====== Bucket %d ======\n", index);
+
+ PLATFORM_DEBUG_PRINT("bucket head index %d\n", va_bucket[index].head_entry);
+
+ PLATFORM_DEBUG_PRINT("bucket next index %d\n", va_bucket[index].next_available_entry);
+
+ PLATFORM_DEBUG_PRINT(" source IP dest IP VRF ip-id srcP dstP\n");
+
+ for(i=0;i<VA_ENTRY_PER_BUCKET;i++) {
+ hex_dump((u8*)&va_bucket[index].va_entry[i], sizeof(va_entry_t));
+ }
+
+}
diff --git a/plugins/vcgn-plugin/vcgn/cnat_va_db.h b/plugins/vcgn-plugin/vcgn/cnat_va_db.h
new file mode 100644
index 00000000000..6e0051b46f7
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/cnat_va_db.h
@@ -0,0 +1,121 @@
+/*
+ *------------------------------------------------------------------
+ * cnat_va_db.h - definition for virtual assembly database
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __CNAT_VA_DB_H__
+#define __CNAT_VA_DB_H__
+
+#include <clib_lite.h>
+
+#define FRAG_DEBUG 1
+
+/* virtual assemble hash database size ~ 16B x 64K = 1MB */
+
+#define VA_TOTAL_ENTRIES (64*1024)
+#define VA_ENTRY_PER_BUCKET (8) /* make sure size is power of 2 for circular FIFO */
+#define VA_BUCKET_MASK (VA_ENTRY_PER_BUCKET -1)
+#define VA_BUCKETS (VA_TOTAL_ENTRIES / VA_ENTRY_PER_BUCKET)
+#define VA_KEY_SIZE 12
+
+typedef struct _va_entry {
+ /* key: top 12 bytes */
+ u32 src_ip;
+ u32 dst_ip;
+ u16 vrf; /* overloaded with protocol info with top two bits */
+ u16 ip_id;
+
+ /* values */
+ u16 src_port;
+ u16 dst_port;
+} va_entry_t;
+
+typedef struct _va_keys {
+ u64 key64; /* src & dst IP */
+ u32 key32; /* vrf, protocol and ip_id */
+} va_keys;
+
+typedef union {
+ va_entry_t e;
+ va_keys k;
+} va_lookup_key;
+
+typedef struct _va_bucket_t {
+ u32 head_entry;
+ u32 next_available_entry; /* ~0 for empty bucket */
+ u32 new_entry_counter; /* for debug purpose */
+ va_entry_t va_entry[VA_ENTRY_PER_BUCKET];
+} va_bucket_t;
+
+extern va_bucket_t va_bucket[]; /* hash table in cnat_va_db.c */
+
+void va_bucket_init ();
+
+inline void va_db_add_new_entry (u32 bucket_index, va_lookup_key * );
+inline int va_db_delete_entry (u32 bucket_index, va_lookup_key * );
+inline va_entry_t * va_db_lookup (u32 bucket_index, va_lookup_key * key);
+
+#ifdef FRAG_DEBUG
+
+#define FRAG_DEBUG_PRINTF1(a) \
+ if (frag_debug_flag) { \
+ PLATFORM_DEBUG_PRINT(a); \
+ }
+
+#define FRAG_DEBUG_PRINTF2(a, b) \
+ if (frag_debug_flag) { \
+ PLATFORM_DEBUG_PRINT(a, b); \
+ }
+
+#define FRAG_DEBUG_PRINTF3(a, b, c) \
+ if (frag_debug_flag) { \
+ PLATFORM_DEBUG_PRINT(a, b, c); \
+ }
+
+#define FRAG_DEBUG_PRINTF4(a, b, c, d) \
+ if (frag_debug_flag) { \
+ PLATFORM_DEBUG_PRINT(a, b, c, d); \
+ }
+
+#define FRAG_DEBUG_PRINTF5(a, b, c, d, e) \
+ if (frag_debug_flag) { \
+ PLATFORM_DEBUG_PRINT(a, b, c, d, e); \
+ }
+
+#define FRAG_DEBUG_PRINTF6(a, b, c, d, e, f) \
+ if (frag_debug_flag) { \
+ PLATFORM_DEBUG_PRINT(a, b, c, d, e, f); \
+ }
+#else
+
+#define FRAG_DEBUG_PRINTF1(a)
+
+#define FRAG_DEBUG_PRINTF2(a, b)
+
+#define FRAG_DEBUG_PRINTF3(a, b, c)
+
+#define FRAG_DEBUG_PRINTF4(a, b, c, d)
+
+#define FRAG_DEBUG_PRINTF5(a, b, c, d, e)
+
+#define FRAG_DEBUG_PRINTF6(a, b, c, d, e, f)
+
+#endif
+
+#endif /* __CNAT_VA_DB_H__ */
+
+
diff --git a/plugins/vcgn-plugin/vcgn/dslite_db.h b/plugins/vcgn-plugin/vcgn/dslite_db.h
new file mode 100644
index 00000000000..2269b98c989
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/dslite_db.h
@@ -0,0 +1,170 @@
+/*
+ *------------------------------------------------------------------
+ * dslite_db.h - Stateful DSLITE translation database definitions
+ *
+ * Copyright (c) 2010-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+#ifndef __DSLITE_DB_H__
+#define __DSLITE_DB_H__
+
+#include "cnat_cli.h"
+#include "index_list.h"
+#include "cnat_ports.h"
+#include "cnat_db.h"
+#include "dslite_defs.h"
+
+#define DSLITE_PRINTF(level, ...) \
+ if (dslite_debug_level > level) PLATFORM_DEBUG_PRINT(__VA_ARGS__);
+/*
+#define DSLITE_PRINTF(lvl, ...) \
+{ \
+ avsm_dispatlib_debug (__VA_ARGS__); \
+}
+*/
+
+#define HASH_ENHANCE 4
+//#define DSLITE_DEF
+#define DSLITE_MAIN_DB_SIZE (20000000 / PLATFORM_CNAT_INSTS)
+#define DSLITE_MAIN_HASH_SIZE \
+ (HASH_ENHANCE * PLATFORM_CNAT_MAIN_PRELIM_HASH_SIZE)
+
+#define DSLITE_MAIN_HASH_MASK (DSLITE_MAIN_HASH_SIZE-1)
+
+
+/* nb: 200000 users / 64 CNAT = 3125, 76% occupancy */
+#define DSLITE_USER_HASH_SIZE CNAT_USER_HASH_SIZE
+#define DSLITE_USER_HASH_MASK (DSLITE_USER_HASH_SIZE-1)
+
+/* No. of per ip/port config will be limited to 1000 */
+#define DSLITE_TIMEOUT_HASH_SIZE 1000
+#define DSLITE_TIMEOUT_HASH_MASK (DSLITE_TIMEOUT_HASH_SIZE - 1)
+#define DSLITE_TIMEOUT_FULL_MASK 0xFFFFFFFFFFFFFFFF
+
+#define CNAT_MAX_SESSIONS_PER_BIB 0xFFFF
+
+#define FORCE_DEL 1 /* Delete static BIB entries as well */
+
+/* default timeout values */
+#define DSLITE_UDP_DEFAULT 300 /* 5 min */
+#define DSLITE_UDP_MIN 120 /* 2 min */
+#define DSLITE_TCP_TRANS 240 /* 4 min */
+#define DSLITE_TCP_EST 7200 /* 2 hrs */
+#define DSLITE_TCP_V4_SYN 6 /* 6 sec */
+#define DSLITE_FRAG_MIN 2 /* 2 sec */
+#define DSLITE_ICMP_DEFAULT 60 /* 1 min */
+
+extern u32 dslite_translation_create_count;
+extern u32 dslite_translation_delete_count;
+extern u32 dslite_translation_create_rate;
+extern u32 dslite_translation_delete_rate;
+extern u32 dslite_in2out_forwarding_count;
+extern u32 dslite_in2out_forwarding_rate;
+extern u32 dslite_out2in_forwarding_count;
+extern u32 dslite_out2in_forwarding_rate;
+
+#define DSLITE_V6_GET_HASH(in_key, hash, mask) \
+ a = in_key->ipv6[0] ^ in_key->ipv6[1] ^ in_key->ipv6[2] ^ in_key->ipv6[3] \
+ ^ in_key->ipv4_key.k.ipv4 ^ ((in_key->ipv4_key.k.port << 16) | in_key->ipv4_key.k.vrf); \
+ DSLITE_PRINTF(1, "%x:%x:%x:%x:%x:%x:%x\n", in_key->ipv6[0], in_key->ipv6[1], in_key->ipv6[2], in_key->ipv6[3], \
+ in_key->ipv4_key.k.ipv4, in_key->ipv4_key.k.port, in_key->ipv4_key.k.vrf); \
+ b = c = 0x9e3779b9;\
+ /* Jenkins hash, arbitrarily use c as the "answer" */ \
+ hash_mix32(a, b, c); \
+ hash = c & mask; \
+
+
+#define DSLITE_V6_GET_USER_HASH(ipv6, hash, mask) \
+ a = ipv6[0] ^ ipv6[1] ^ ipv6[2] ^ ipv6[3]; \
+ b = c = 0x9e3779b9;\
+ /* Jenkins hash, arbitrarily use c as the "answer" */ \
+ hash_mix32(a, b, c); \
+ hash = c & mask; \
+
+#define DSLITE_V4_GET_HASH(in_key, hash, mask) \
+ a = in_key.ipv4 ^ ((in_key.port << 16) | in_key.vrf); \
+ b = c = 0x9e3779b9; \
+ /* Jenkins hash, arbitrarily use c as the "answer" */ \
+ hash_mix32(a, b, c); \
+ hash = c & mask;
+
+#define PRIVATE_V4_ADDR_CHECK(addr, invalid) \
+ invalid = 0; \
+ int range1 = ((addr & 0xFF000000) >> 24); \
+ int range2 = ((addr & 0xFFF00000) >> 20); \
+ int range3 = ((addr & 0xFFFF0000) >> 16); \
+ int range4 = ((addr & 0xFFFFFFF8) >> 3); \
+ if(range1 != 0xa && range2 != 0xac1 && range3 != 0xc0a8 && range4 != 0x18000000) \
+ invalid = 1;
+
+#define V4_MAPPED_V6_CHECK(v6_addr, invalid) \
+ invalid = 0; \
+ int word1 = v6_addr[0]; \
+ int word2 = v6_addr[1]; \
+ int word3 = v6_addr[2]; \
+ if(!((word1 == 0) && (word2 == 0) && (word3 == 0x0000FFFF))) \
+ invalid = 1;
+
+
+extern dslite_table_entry_t dslite_table_array[DSLITE_MAX_DSLITE_ENTRIES];
+extern dslite_table_entry_t *dslite_table_ptr;
+
+#define DSLITE_CMP_V6_KEY(key1, key2) \
+ memcmp(key1, key2, sizeof(dslite_v6_key_t))
+
+#define DSLITE_CMP_V4_KEY(key1, key2) \
+ memcmp(key1, key2, sizeof(dslite_v4_key_t))
+
+
+#define DSLITE_CMP_V6_IP(ip1, ip2) \
+ memcmp(ip1, ip2, (sizeof(u32) * 4))
+
+
+#define DSLITE_CMP_V6_KEY1(key1, key2) \
+ (key1.ipv6[0] == key2.ipv6[0]) && (key1.ipv6[1] == key2.ipv6[1]) && \
+ (key1.ipv6[2] == key2.ipv6[2]) && (key1.ipv6[3] == key2.ipv6[3]) && \
+ (key1.port == key2.port) && (key1.vrf == key2.vrf)
+
+
+#define DSLITE_CMP_V6_IP1(ip1, ip2) \
+ ((ip1[0] == ip2[0]) && (ip1[1] == ip2[1]) && \
+ (ip1[2] == ip2[2]) && (ip1[3] == ip2[3]))
+
+#define DSLITE_CMP_V4_KEY1(key1, key2) \
+ (key1.key64 == key2.key64)
+
+cnat_main_db_entry_t*
+dslite_get_main_db_entry_v2(dslite_db_key_bucket_t *ki,
+ port_pair_t port_pair_type,
+ port_type_t port_type,
+ cnat_gen_icmp_info *info,
+ dslite_table_entry_t *dslite_entry_ptr,
+ cnat_key_t *dest_info);
+
+cnat_main_db_entry_t*
+dslite_main_db_lookup_entry(dslite_db_key_bucket_t *ki);
+
+
+cnat_user_db_entry_t*
+dslite_user_db_lookup_entry(dslite_db_key_bucket_t *uki);
+
+cnat_user_db_entry_t*
+dslite_user_db_create_entry(dslite_db_key_bucket_t *uki, u32 portmap_index);
+
+cnat_main_db_entry_t*
+dslite_create_main_db_entry_and_hash(dslite_db_key_bucket_t *ki,
+ cnat_db_key_bucket_t *ko,
+ cnat_user_db_entry_t *udb);
+
+#endif
diff --git a/plugins/vcgn-plugin/vcgn/dslite_defs.h b/plugins/vcgn-plugin/vcgn/dslite_defs.h
new file mode 100644
index 00000000000..4860adcb77d
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/dslite_defs.h
@@ -0,0 +1,336 @@
+/*
+ *------------------------------------------------------------------
+ * dslite_defs.h - DSLITE structure definiitions
+ *
+ * Copyright (c) 2011-2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __DSLITE_DEFS_H__
+#define __DSLITE_DEFS_H__
+
+#ifdef TOBE_PORTED
+#include "spp_platform_common.h"
+#include "cgse_defs.h"
+#endif
+#include "cnat_cli.h"
+#include "cnat_config.h"
+#include "cnat_ports.h"
+#include "cnat_bulk_port_defs.h"
+
+extern u32 ds_lite_config_debug_level;
+
+#define SWAP_IPV6_ADDR(ipv6_hdr, dslite_entry_ptr) \
+ ipv6_hdr->dst_addr[0] = ipv6_hdr->src_addr[0]; \
+ ipv6_hdr->dst_addr[1] = ipv6_hdr->src_addr[1]; \
+ ipv6_hdr->dst_addr[2] = ipv6_hdr->src_addr[2]; \
+ ipv6_hdr->dst_addr[3] = ipv6_hdr->src_addr[3]; \
+ ipv6_hdr->src_addr[0] = spp_host_to_net_byte_order_32(dslite_entry_ptr->AFTR_v6_address[0]); \
+ ipv6_hdr->src_addr[1] = spp_host_to_net_byte_order_32(dslite_entry_ptr->AFTR_v6_address[1]); \
+ ipv6_hdr->src_addr[2] = spp_host_to_net_byte_order_32(dslite_entry_ptr->AFTR_v6_address[2]); \
+ ipv6_hdr->src_addr[3] = spp_host_to_net_byte_order_32(dslite_entry_ptr->AFTR_v6_address[3]);
+
+#define DSLITE_SET_TX_PKT_TYPE(type) { \
+ ctx->ru.tx.packet_type = type; \
+}
+
+#define DSLITE_INC_STATS_V4(PTR, COUNTER, IPV4_SRC_ADDR) { \
+ PTR->COUNTER++; \
+}
+
+#define DSLITE_INC_STATS_V6(PTR, COUNTER, IPV6_DEST_ADDR) { \
+ PTR->COUNTER++; \
+}
+
+
+#define DSLITE_INVALID_UIDX 0xffff /*invalid svi app uidb index */
+#define DSLITE_INVALID_VRFID 0xffffffff /*invalid vrf id */
+
+#define DSLITE_VRF_MASK 0x3fff
+#define DSLITE_MAX_VRFMAP_ENTRIES (DSLITE_VRF_MASK + 1)
+
+#define DSLITE_VRFMAP_ENTRY_INVALID 0xffff
+
+#define DSLITE_V6_PREFIX_MASK_MIN 16
+#define DSLITE_V6_PREFIX_MASK_MAX 96
+#define DSLITE_V6_PREFIX_MASK_MULTIPLE 8
+
+#define DSLITE_TUNNEL_MTU_MIN 1280
+#define DSLITE_TUNNEL_MTU_MAX 9216
+
+#define DSLITE_TUNNEL_TTL_MIN 0
+#define DSLITE_TUNNEL_TTL_MAX 255
+
+#define DSLITE_TUNNEL_TOS_MIN 0
+#define DSLITE_TUNNEL_TOS_MAX 255
+
+#define DSLITE_V4_MASK_MAX 32
+
+//#define XLAT_MAX_FRAG_ID_COUNTERS (256)
+#define DSLITE_AFTR_IPV4_ADDR 0xC0000001
+
+#define DSLITE_MAX_TAP_RG_ENTRIES 2
+#define DSLITE_MAX_DSLITE_ENTRIES (256)
+#define DSLITE_MAX_DSLITE_ID (DSLITE_MAX_DSLITE_ENTRIES-1)
+/* Define the below value as 64 if first 64 entries are for NAT44 */
+#define DSLITE_INDEX_OFFSET 1
+
+#define DSLITE_INVALID_DSLITE_ID (0)
+
+#define DSLITE_TABLE_ENTRY_DELETED 0
+#define DSLITE_TABLE_ENTRY_ACTIVE 1
+#define DSLITE_TABLE_ENTRY_DORMANT 2
+#define DSLITE_TABLE_ENTRY_INVALID_UIDB 3
+
+typedef struct {
+ u16 tcp_initial_setup_timeout;
+ u16 tcp_active_timeout;
+ u16 udp_init_session_timeout;
+ u16 udp_act_session_timeout;
+ u16 icmp_session_timeout;
+ u16 temp;
+} dslite_timeout_info_t;
+
+
+typedef struct {
+
+ u16 state; /* To use nat44 enums ?? TBD */
+ u16 dslite_id; /* DSLITE_ID value for this table entry - for easy access */
+
+ u16 i_vrf; /* V6 uidb index */
+ u16 o_vrf; /* V4 uidb index */
+
+ u16 cnat_main_db_max_ports_per_user; /* port limit */
+ u16 tcp_mss; /*tcp max segment size for this inside vrf */
+
+ u32 delete_time;
+
+ cnat_portmap_v2_t *portmap_list;
+
+ u32 nfv9_logging_index;
+ u32 syslog_logging_index;
+ u32 AFTR_v6_address[4];
+
+#define DSLITE_IPV4_TOS_OVERRIDE_FLAG 0x00000001
+#define DSLITE_IPV6_TOS_OVERRIDE_FLAG 0x00000002
+#define DSLITE_IPV4_TTL_OVERRIDE_FLAG 0x00000004
+#define DSLITE_IPV6_TTL_OVERRIDE_FLAG 0x00000008
+#define DSLITE_IPV6_FRAG_REASSEMB_ENG 0x00000010
+#define DSLITE_FTP_ALG_ENABLE 0x00000020
+#define DSLITE_RTSP_ALG_ENABLE 0x00000040
+#define DSLITE_NETFLOW_ENABLE 0x00000080
+#define DSLITE_SYSLOG_ENABLE 0x00000100
+
+ u16 feature_flags;
+ u16 tunnel_mtu;
+
+ u8 ipv4_ttl_value;
+ u8 ipv6_ttl_value;
+ u8 ipv4_tos_value;
+ u8 ipv6_tos_value;
+
+ u32 v4_if_num; /* V4 SVI ifnum */
+ u32 v6_if_num; /* V6 SVI ifnum */
+ u32 i_vrf_id; //inside vrf id
+ u32 o_vrf_id; //outside vrf id
+
+ dslite_timeout_info_t timeout_info;
+ u16 cnat_static_port_range;
+ u16 dyn_start_port;
+
+ u32 AFTR_v4_addr;
+ bulk_alloc_size_t bulk_size; /* should be equivalent to u16 - 2 bytes */
+ u32 pcp_server_addr;
+ u16 pcp_server_port;
+ u8 mapping_refresh_both_direction;
+ u8 pad;
+ u16 rtsp_port;
+#define DSLITE_BIDIR_REFRESH 1
+ u8 dslite_enable; /* DS-Lite enable check flag */
+ u8 syslog_logging_policy; /* DS-Lite Session Logging check flag */
+ u8 nf_logging_policy;
+
+ u8 temp1;
+ u16 temp2;
+ u32 temp3;
+ u32 rseed_ip;
+} dslite_table_entry_t;
+
+typedef struct {
+ u64 v4_to_v6_invalid_uidb_drop_count;
+ u64 v6_to_v4_invalid_uidb_drop_count;
+ u64 v4_to_v6_frag_invalid_uidb_drop_count;
+} dslite_global_counters_t;
+
+typedef struct {
+ u32 tap_enable;
+ u32 ipv4_addr;
+ u32 ipv6_addr[4];
+} dslite_tap_rg_t;
+
+extern dslite_table_entry_t *dslite_table_db_ptr;
+
+
+#define DSLITE_ADD_UIDB_INDEX_DSLITE_ID_MAPPING(uidb_index, dslite_id) \
+ *(cgse_uidb_index_cgse_id_mapping_ptr + uidb_index) = dslite_id;
+
+extern u8 my_instance_number;
+
+extern void dslite_clear_counters(u16 dslite_id);
+extern void dslite_clear_per_RG_counters();
+extern dslite_global_counters_t dslite_global_counters;
+extern u32 dslite_config_debug_level;
+extern u32 dslite_data_path_debug_level;
+extern u32 dslite_defrag_debug_level;
+extern u32 dslite_debug_level;
+
+typedef struct {
+ u64 v6_to_v4_tcp_input_count;
+ u64 v6_to_v4_tcp_nat_error;
+ u64 v6_to_v4_tcp_output_count;
+} dslite_v6_to_v4_tcp_counter_t;
+
+typedef struct {
+ u64 v4_to_v6_tcp_input_count;
+ u64 v4_to_v6_tcp_no_entry;
+ u64 v4_to_v6_tcp_output_count;
+} dslite_v4_to_v6_tcp_counter_t;
+
+typedef struct {
+ u64 v6_to_v4_udp_input_count;
+ u64 v6_to_v4_udp_nat_error;
+ u64 v6_to_v4_udp_output_count;
+} dslite_v6_to_v4_udp_counter_t;
+
+typedef struct {
+ u64 v4_to_v6_udp_input_count;
+ u64 v4_to_v6_udp_no_entry;
+ u64 v4_to_v6_udp_output_count;
+} dslite_v4_to_v6_udp_counter_t;
+
+typedef struct {
+ u64 v6_to_v4_icmp_qry_input_count;
+ u64 v6_to_v4_icmp_qry_nat_error;
+ u64 v6_to_v4_icmp_qry_output_count;
+} dslite_v6_to_v4_icmp_qry_counter_t;
+
+typedef struct {
+ u64 v4_to_v6_icmp_qry_input_count;
+ u64 v4_to_v6_icmp_qry_no_nat_entry;
+ u64 v4_to_v6_icmp_qry_output_count;
+} dslite_v4_to_v6_icmp_qry_counter_t;
+
+typedef struct {
+ u64 v6_to_v4_icmp_error_input_count;
+ u64 v6_to_v4_icmp_error_nat_error;
+ u64 v6_to_v4_icmp_error_output_count;
+} dslite_v6_to_v4_icmp_error_counter_t;
+
+typedef struct {
+ u64 v4_to_v6_icmp_error_input_count;
+ u64 v4_to_v6_icmp_error_no_nat_entry;
+ u64 v4_to_v6_icmp_error_output_count;
+} dslite_v4_to_v6_icmp_error_counter_t;
+
+typedef struct {
+ u64 v6_icmp_error_input_count;
+ u64 v6_AFTR_echo_reply_count;
+ u64 v6_to_v4_icmp_error_unsupported_type_drop_count;
+ u64 v6_to_v4_icmp_error_no_db_entry_count;
+ u64 v6_to_v4_icmp_err_throttled_count;
+ u64 v6_to_v4_icmp_error_xlated_count;
+} dslite_v6_icmp_error_counter_t;
+
+typedef struct {
+ u64 v4_to_v6_ttl_gen_count;
+ u64 v4_to_v6_icmp_throttle_count;
+ u64 v4_to_v6_ptb_gen_count;
+ u64 v4_to_v6_aftr_v4_echo_reply_count;
+ u64 v6_to_v4_ttl_gen_count;
+ u64 v6_to_v4_icmp_throttle_count;
+ u64 v6_to_v4_admin_prohib_icmp_count;
+ u64 v6_to_v4_aftr_v4_echo_reply_count;
+ u64 v6_icmp_gen_count;
+} dslite_icmp_gen_counter_t;
+
+typedef struct {
+ u64 dslite_input_tunnel_pkt;
+ u64 dslite_encap_count;
+ u64 dslite_decap_count;
+ u64 dslite_sec_check_failed;
+ u64 dslite_unsupp_packet;
+} dslite_common_counter_t;
+
+typedef struct {
+
+ dslite_v6_to_v4_tcp_counter_t v64_tcp_counters;
+ dslite_v4_to_v6_tcp_counter_t v46_tcp_counters;
+ dslite_v6_to_v4_udp_counter_t v64_udp_counters;
+ dslite_v4_to_v6_udp_counter_t v46_udp_counters;
+ dslite_v6_to_v4_icmp_qry_counter_t v64_icmp_counters;
+ dslite_v4_to_v6_icmp_qry_counter_t v46_icmp_counters;
+ dslite_v6_to_v4_icmp_error_counter_t v64_icmp_error_counters;
+ dslite_v4_to_v6_icmp_error_counter_t v46_icmp_error_counters;
+ dslite_v6_icmp_error_counter_t dslite_v6_icmp_err_counters;
+ dslite_icmp_gen_counter_t dslite_icmp_gen_counters;
+ dslite_common_counter_t dslite_common_counters;
+} dslite_counters_t;
+
+typedef struct {
+ u32 active_translations;
+ u32 translation_create_rate;
+ u32 translation_delete_rate;
+ u32 in2out_forwarding_rate;
+ u32 out2in_forwarding_rate;
+ u32 in2out_drops_port_limit_exceeded;
+ u32 in2out_drops_system_limit_reached;
+ u32 in2out_drops_resource_depletion;
+ u32 no_translation_entry_drops;
+ u32 pool_address_totally_free;
+ u32 num_subscribers;
+ u32 dummy;
+ u64 drops_sessiondb_limit_exceeded;
+} dslite_common_stats_t;
+
+typedef struct {
+ u16 msg_id;
+ u8 rc;
+ u8 pad[5];
+ dslite_counters_t counters;
+} dslite_show_statistics_summary_resp;
+
+
+#define CMD_GENERATE_PTB 0x1
+#define CMD_GENERATE_TTL 0x2
+
+/*
+ * This structure is to provide abstraction for data exchanged from one
+ * VPP node to its disposition or further in the dslite node graph.
+ */
+typedef struct {
+ u32 icmp_gen_type; // ctx->feature_data[0]
+ u32 reserved1; // ctx->feature_data[1]
+ u32 reserved2; // ctx->feature_data[2]
+ u32 reserved3; // ctx->feature_data[3]
+} dslite_feature_data_t;
+
+extern dslite_counters_t dslite_all_counters[DSLITE_MAX_DSLITE_ENTRIES];
+//extern dslite_inst_gen_counter_t dslite_inst_gen_counters[DSLITE_MAX_DSLITE_ENTRIES];
+
+
+ extern void dslite_show_config(void);
+#define STAT_PORT_RANGE_FROM_INST_PTR(inst) ((inst)->cnat_static_port_range)
+
+#endif /* __DSLITE_DEFS_H__ */
+
diff --git a/plugins/vcgn-plugin/vcgn/index_list.c b/plugins/vcgn-plugin/vcgn/index_list.c
new file mode 100644
index 00000000000..ec1b83b0b30
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/index_list.c
@@ -0,0 +1,336 @@
+/*
+ *------------------------------------------------------------------
+ * index_list.c - vector-index-based lists. 64-bit pointers suck.
+ *
+ * Copyright (c) 2008-2009, 2011 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdio.h>
+#include <string.h>
+//#include <clib_lite.h>
+#include <vppinfra/vec.h>
+#include "index_list.h"
+
+/*
+ * index_slist_addhead
+ *
+ * args: headp -- pointer to e.g. a hash bucket
+ * vector -- vector containing the list
+ * elsize -- size of an element in this vector
+ * offset -- offset in each vector element of this list thread
+ * index_to_add -- index in the vector to add to the list
+ *
+ * Adds new items to the head of the list. Try not to screw up the args!
+ */
+void index_slist_addhead (index_slist_t *headp,
+ u8 *vector, u32 elsize, u32 offset, u32 index_to_add)
+{
+ return (index_slist_addhead_inline(headp, vector, elsize, offset,
+ index_to_add));
+}
+
+/*
+ * index_slist_remelem
+ *
+ * args: headp -- pointer to e.g. a hash bucket
+ * vector -- vector containing the list
+ * elsize -- size of an element in this vector
+ * offset -- offset in each vector element of this list thread
+ * index_to_del -- index in the vector to delete from the list
+ *
+ * Try not to screw up the args!
+ */
+
+int index_slist_remelem (index_slist_t *headp,
+ u8 *vector, u32 elsize, u32 offset,
+ u32 index_to_delete)
+{
+ return (index_slist_remelem_inline(headp, vector, elsize, offset,
+ index_to_delete));
+}
+
+
+/*
+ * index_dlist_addtail
+ *
+ * Append the indicated vector element to the doubly-linked list
+ * whose first element is pointed to by headp.
+ *
+ * args: head_index -- listhead vector element index.
+ * vector -- vector containing the list
+ * elsize -- size of an element in this vector
+ * offset -- offset in each vector element of this list thread
+ * index_to_add -- index in the vector to add to the list
+ *
+ * Do not call this routine to create the listhead. Simply set
+ * index_dlist->next = index_dlist->prev = index of item.
+ *
+ * Try not to screw up the args.
+ */
+
+void index_dlist_addtail (u32 head_index, u8 *vector, u32 elsize,
+ u32 offset, u32 index_to_add)
+{
+ index_dlist_t *elp;
+ index_dlist_t *elp_next;
+ index_dlist_t *headp;
+
+ headp = (index_dlist_t *)(vector + offset + elsize*head_index);
+ elp = (index_dlist_t *)(vector + offset + elsize*index_to_add);
+ elp->next = index_to_add;
+ elp->prev = index_to_add;
+
+ elp->next = headp->next;
+ headp->next = index_to_add;
+
+ elp_next = (index_dlist_t *)(vector + offset + elsize*elp->next);
+ elp->prev = elp_next->prev;
+ elp_next->prev = index_to_add;
+}
+
+u32 index_dlist_remelem (u32 head_index,
+ u8 *vector, u32 elsize, u32 offset,
+ u32 index_to_delete)
+{
+ u32 rv = head_index;
+ index_dlist_t *headp, *elp, *elp_next;
+
+ elp = (index_dlist_t *)(vector + offset + elsize*index_to_delete);
+
+ /* Deleting the head index? */
+ if (PREDICT_FALSE(head_index == index_to_delete)) {
+ rv = elp->next;
+ /* The only element on the list? */
+ if (PREDICT_FALSE(rv == head_index))
+ rv = EMPTY;
+ }
+
+ headp = (index_dlist_t *)(vector + offset + elsize*elp->prev);
+ headp->next = elp->next;
+ elp_next = (index_dlist_t *)(vector + offset + elsize*elp->next);
+ elp_next->prev = elp->prev;
+
+ elp->next = elp->prev = EMPTY;
+
+ return rv;
+}
+
+
+#ifdef TEST_CODE2
+
+typedef struct tv_ {
+ char junk[43];
+ index_dlist_t l;
+} tv_t;
+
+
+void index_list_test_cmd(int argc, unsigned long *argv)
+{
+ int i, j;
+ u32 head_index;
+ index_dlist_t *headp;
+ tv_t *tp=0;
+
+ vec_validate(tp, 3);
+ head_index = 3;
+
+ memset(tp, 0xa, sizeof(tp[0])*vec_len(tp));
+
+ /* Here's how to set up the head element... */
+ headp = &((tp + head_index)->l);
+ headp->next = headp->prev = head_index;
+
+ for (i = 0; i < 3; i++) {
+ index_dlist_addtail(head_index, (u8 *)tp, sizeof(tp[0]),
+ STRUCT_OFFSET_OF(tv_t, l), i);
+ printf("headp next %d prev %d\n",
+ headp->next, headp->prev);
+ for (j = 0; j <= 3; j++) {
+ printf ("[%d]: next %d prev %d\n", j,
+ tp[j].l.next, tp[j].l.prev);
+ }
+ printf("---------------\n");
+
+ }
+
+ printf("After all adds:\n");
+
+ printf("headp next %d prev %d\n",
+ headp->next, headp->prev);
+
+ for (j = 0; j <= 3; j++) {
+ printf ("[%d]: next %d prev %d\n", j,
+ tp[j].l.next, tp[j].l.prev);
+ }
+ printf("---------------\n");
+
+ head_index = index_dlist_remelem (head_index, (u8 *)tp, sizeof(tp[0]),
+ STRUCT_OFFSET_OF(tv_t, l), 1);
+
+ printf("after delete 1, head index %d\n", head_index);
+ headp = &((tp + head_index)->l);
+ printf("headp next %d prev %d\n",
+ headp->next, headp->prev);
+ for (j = 0; j <= 3; j++) {
+ printf ("[%d]: next %d prev %d\n", j,
+ tp[j].l.next, tp[j].l.prev);
+ }
+ printf("---------------\n");
+
+ index_dlist_addtail(head_index, (u8 *)tp, sizeof(tp[0]),
+ STRUCT_OFFSET_OF(tv_t, l), 1);
+
+ printf("after re-add 1, head index %d\n", head_index);
+ headp = &((tp + head_index)->l);
+ printf("headp next %d prev %d\n",
+ headp->next, headp->prev);
+ for (j = 0; j <= 3; j++) {
+ printf ("[%d]: next %d prev %d\n", j,
+ tp[j].l.next, tp[j].l.prev);
+ }
+ printf("---------------\n");
+
+ for (i = 3; i >= 0; i--) {
+ head_index = index_dlist_remelem (head_index, (u8 *)tp, sizeof(tp[0]),
+ STRUCT_OFFSET_OF(tv_t, l), i);
+ printf("after delete, head index %d\n", head_index);
+ if (head_index != EMPTY) {
+ headp = &((tp + head_index)->l);
+ printf("headp next %d prev %d\n",
+ headp->next, headp->prev);
+ for (j = 0; j <= 3; j++) {
+ printf ("[%d]: next %d prev %d\n", j,
+ tp[j].l.next, tp[j].l.prev);
+ }
+ } else {
+ printf("empty list\n");
+ }
+ printf("---------------\n");
+ }
+}
+#endif /* test code 2 */
+
+#ifdef TEST_CODE
+
+typedef struct tv_ {
+ char junk[43];
+ index_slist_t l;
+} tv_t;
+
+
+void index_list_test_cmd(int argc, unsigned long *argv)
+{
+ int i, j;
+ tv_t *tp = 0;
+ index_slist_t *buckets = 0;
+
+ vec_add1((u32 *)buckets, EMPTY);
+ vec_validate(tp, 9);
+
+ for (i = 0; i < 10; i++) {
+ index_slist_addhead(buckets, (u8 *)tp, sizeof(*tp),
+ STRUCT_OFFSET_OF(tv_t, l), i);
+ }
+
+ printf ("after adds, buckets[0] = %u\n", buckets[0]);
+
+ for (j = 0; j < 10; j++) {
+ printf("tp[%d] next %u\n", j, tp[j].l);
+
+ }
+
+ for (i = 0; i < 10; i++) {
+ if (PREDICT_FALSE(index_slist_remelem(buckets, (u8 *) tp, sizeof(*tp),
+ STRUCT_OFFSET_OF(tv_t, l), i))) {
+ printf("OUCH: remelem failure at index %d\n", i);
+ }
+ if (PREDICT_FALSE(tp[i].l.next != EMPTY)) {
+ printf("OUCH: post-remelem next not EMPTY, index %d\n", i);
+ }
+ }
+
+ printf ("after deletes, buckets[0] = %x\n", buckets[0]);
+
+ for (i = 0; i < 10; i++) {
+ index_slist_addhead(buckets, (u8 *)tp, sizeof(*tp),
+ STRUCT_OFFSET_OF(tv_t, l), i);
+ }
+
+ printf ("after adds, buckets[0] = %u\n", buckets[0]);
+
+ for (j = 0; j < 10; j++) {
+ printf("tp[%d] next %u\n", j, tp[j].l);
+
+ }
+
+ for (i = 9; i >= 0; i--) {
+ if (PREDICT_FALSE(index_slist_remelem(buckets, (u8 *) tp, sizeof(*tp),
+ STRUCT_OFFSET_OF(tv_t, l), i))) {
+ printf("OUCH: remelem failure at index %d\n", i);
+ }
+ if ((tp[i].l.next != EMPTY)) {
+ printf("OUCH: post-remelem next not EMPTY, index %d\n", i);
+ }
+ }
+
+ printf ("after deletes, buckets[0] = %x\n", buckets[0]);
+
+ printf("add evens, then odds...\n");
+
+ for (i = 0; i < 10; i += 2) {
+ index_slist_addhead(buckets, (u8 *)tp, sizeof(*tp),
+ STRUCT_OFFSET_OF(tv_t, l), i);
+
+ printf ("head = buckets[0].next = %d\n", buckets[0].next);
+ for (j = 0; j < 10; j++) {
+ printf("tp[%d] next %u\n", j, tp[j].l);
+ }
+ printf("-------------\n");
+ }
+
+ for (i = 1; i < 10; i += 2) {
+ index_slist_addhead(buckets, (u8 *)tp, sizeof(*tp),
+ STRUCT_OFFSET_OF(tv_t, l), i);
+
+ printf ("head = buckets[0].next = %d\n", buckets[0].next);
+ for (j = 0; j < 10; j++) {
+ printf("tp[%d] next %u\n", j, tp[j].l);
+ }
+ printf("-------------\n");
+ }
+
+ printf ("after adds, buckets[0] = %u\n", buckets[0]);
+
+ for (j = 0; j < 10; j++) {
+ printf("tp[%d] next %u\n", j, tp[j].l);
+
+ }
+
+ for (i = 9; i >= 0; i--) {
+ if (PREDICT_FALSE(index_slist_remelem(buckets, (u8 *) tp, sizeof(*tp),
+ STRUCT_OFFSET_OF(tv_t, l), i))) {
+ printf("OUCH: remelem failure at index %d\n", i);
+ }
+ if (PREDICT_FALSE(tp[i].l.next != EMPTY)) {
+ printf("OUCH: post-remelem next not EMPTY, index %d\n", i);
+ }
+ }
+
+ printf ("after deletes, buckets[0] = %x\n", buckets[0]);
+
+ vec_free(buckets);
+ vec_free(tp);
+}
+#endif /* test code */
diff --git a/plugins/vcgn-plugin/vcgn/index_list.h b/plugins/vcgn-plugin/vcgn/index_list.h
new file mode 100644
index 00000000000..498cd7eb7ad
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/index_list.h
@@ -0,0 +1,118 @@
+/*
+ *------------------------------------------------------------------
+ * index_list.h - vector-index-based doubly-linked lists
+ *
+ * Copyright (c) 2008-2009 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef _INDEX_LIST_H_
+#define _INDEX_LIST_H_ 1
+
+/* An index we can't possibly see in practice... */
+#define EMPTY ((u32)~0)
+
+typedef struct index_slist_ {
+ u32 next;
+} index_slist_t;
+
+/*
+ * index_slist_addhead
+ *
+ * args: headp -- pointer to e.g. a hash bucket
+ * vector -- vector containing the list
+ * elsize -- size of an element in this vector
+ * offset -- offset in each vector element of this list thread
+ * index_to_add -- index in the vector to add to the list
+ *
+ * Adds new items to the head of the list. Try not to screw up the args!
+ */
+static inline void
+ index_slist_addhead_inline (index_slist_t *headp,
+ u8 *vector, u32 elsize,
+ u32 offset, u32 index_to_add)
+{
+ index_slist_t *addme;
+
+ addme = (index_slist_t *)(vector + offset + elsize*index_to_add);
+ addme->next = EMPTY;
+
+ if (headp->next == EMPTY) {
+ headp->next = index_to_add;
+ return;
+ } else {
+ addme->next = headp->next;
+ headp->next = index_to_add;
+ }
+}
+
+/*
+ * index_slist_remelem
+ *
+ * args: headp -- pointer to e.g. a hash bucket
+ * vector -- vector containing the list
+ * elsize -- size of an element in this vector
+ * offset -- offset in each vector element of this list thread
+ * index_to_del -- index in the vector to delete from the list
+ *
+ * Try not to screw up the args!
+ */
+
+static inline int
+ index_slist_remelem_inline (index_slist_t *headp,
+ u8 *vector, u32 elsize,
+ u32 offset, u32 index_to_delete)
+{
+ index_slist_t *findme;
+ index_slist_t *prev;
+ index_slist_t *cur;
+
+ findme = (index_slist_t *)(vector + offset + elsize*index_to_delete);
+
+ if (headp->next == index_to_delete) {
+ headp->next = findme->next;
+ findme->next = EMPTY;
+ return 0;
+ }
+
+ prev = (index_slist_t *)(vector + offset + elsize*headp->next);
+ cur = (index_slist_t *)(vector + offset + elsize*prev->next);
+ while (cur != findme) {
+ if (cur->next == EMPTY)
+ return (1);
+ prev = cur;
+ cur = (index_slist_t *)(vector + offset + elsize*cur->next);
+ }
+ prev->next = findme->next;
+ findme->next = EMPTY;
+ return 0;
+}
+
+void index_slist_addhead (index_slist_t *headp,
+ u8 *vector, u32 elsize, u32 offset, u32 index);
+int index_slist_remelem (index_slist_t *headp,
+ u8 *vector, u32 elsize, u32 offset, u32 index);
+
+typedef struct index_dlist_ {
+ u32 next;
+ u32 prev;
+} index_dlist_t;
+
+void index_dlist_addtail (u32 head_index, u8 *vector, u32 elsize,
+ u32 offset, u32 index_to_add);
+
+u32 index_dlist_remelem (u32 head_index,
+ u8 *vector, u32 elsize, u32 offset,
+ u32 index_to_delete);
+#endif /* _INDEX_LIST_H_ */
diff --git a/plugins/vcgn-plugin/vcgn/nat64_db.h b/plugins/vcgn-plugin/vcgn/nat64_db.h
new file mode 100644
index 00000000000..837464f6940
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/nat64_db.h
@@ -0,0 +1,480 @@
+/*
+ *------------------------------------------------------------------
+ * nat64_db.h - Stateful NAT64 translation database definitions
+ *
+ * Copyright (c) 2010-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+#ifndef __NAT64_DB_H__
+#define __NAT64_DB_H__
+
+#include "cnat_cli.h"
+#include "index_list.h"
+#include "cnat_ports.h"
+#include "cnat_db.h"
+#include "nat64_defs.h"
+#include "cnat_bulk_port_defs.h"
+
+nat64_vrfmap_t *nat64_map_by_vrf;
+
+#define SESSION_OPT
+
+#define HASH_ENHANCE 4
+
+
+#define NAT64_MAIN_DB_SIZE \
+ (PLATFORM_NAT64_MAX_SESSIONS / PLATFORM_CNAT_INSTS)
+#define NAT64_MAIN_HASH_SIZE \
+ (HASH_ENHANCE * PLATFORM_CNAT_MAIN_PRELIM_HASH_SIZE)
+
+#define NAT64_MAIN_HASH_MASK (NAT64_MAIN_HASH_SIZE-1)
+
+
+/* nb: 200000 users / 64 CNAT = 3125, 76% occupancy */
+#define NAT64_USER_HASH_SIZE CNAT_USER_HASH_SIZE
+#define NAT64_USER_HASH_MASK (NAT64_USER_HASH_SIZE-1)
+
+/* Number of sessions per BIB entry/NAT64 translation
+ - nsessions is u16 type. So selected 0xFFFF
+ - Ideally Sessions per transltion will not reach the limit
+ - Only DoS can possible. It can take care of it */
+#define NAT64_MAX_SESSIONS_PER_BIB 0xFFFF
+
+/* No. of per ip/port config will be limited to 1000 */
+/* totally 25K across all instances) */
+#define NAT64_TIMEOUT_HASH_SIZE \
+ PLATFORM_NAT64_TIMEOUT_HASH_SIZE
+
+#define NAT64_TIMEOUT_HASH_MASK (NAT64_TIMEOUT_HASH_SIZE - 1)
+#define NAT64_TIMEOUT_FULL_MASK 0xFFFFFFFFFFFFFFFF
+
+
+#define FORCE_DEL 1 /* Delete static BIB entries as well */
+
+/* default timeout values */
+#define NAT64_UDP_DEFAULT 300 /* 5 min */
+#define NAT64_UDP_MIN 120 /* 2 min */
+#define NAT64_TCP_TRANS 240 /* 4 min */
+#define NAT64_TCP_EST 7200 /* 2 hrs */
+#define NAT64_TCP_V4_SYN 6 /* 6 sec */
+#define NAT64_FRAG_MIN 2 /* 2 sec */
+#define NAT64_ICMP_DEFAULT 60 /* 1 min */
+
+
+#define NAT64_V6_GET_HASH(in_key, hash, mask) \
+ a = in_key->ipv6[0] ^ in_key->ipv6[1] ^ in_key->ipv6[2] ^ in_key->ipv6[3] \
+ ^ ((in_key->port << 16) | in_key->vrf); \
+ b = c = 0x9e3779b9;\
+ /* Jenkins hash, arbitrarily use c as the "answer" */ \
+ hash_mix32(a, b, c); \
+ hash = c & mask; \
+
+
+#define NAT64_V4_GET_HASH(in_key, hash, mask) \
+ a = in_key.ipv4 ^ ((in_key.port << 16) | in_key.vrf); \
+ b = c = 0x9e3779b9; \
+ /* Jenkins hash, arbitrarily use c as the "answer" */ \
+ hash_mix32(a, b, c); \
+ hash = c & mask;
+
+
+
+#define NAT64_V6_GET_SESSION_HASH(bib_index, in_addr, port, vrf, hash, mask) \
+ a = bib_index ^ in_addr[0] ^ in_addr[1] ^ in_addr[2] ^ in_addr[3] \
+ ^ port ^ vrf; \
+ b = c = 0x9e3779b9; \
+ /* Jenkins hash, arbitrarily use c as the "answer" */ \
+ hash_mix32(a, b, c); \
+ hash = c & mask;
+
+#define NAT64_V4_GET_SESSION_HASH(bib_index, in_addr, port, vrf, hash, mask) \
+ a = bib_index ^ in_addr ^ port ^ vrf; \
+ b = c = 0x9e3779b9; \
+ /* Jenkins hash, arbitrarily use c as the "answer" */ \
+ hash_mix32(a, b, c); \
+ hash = c & mask;
+
+
+extern index_slist_t *nat64_bib_out2in_hash;
+extern index_slist_t *nat64_bib_in2out_hash;
+extern index_slist_t *nat64_bib_user_hash;
+extern index_slist_t *nat64_session_out2in_hash;
+#ifndef SESSION_OPT
+extern index_slist_t *nat64_session_in2out_hash;
+#endif
+extern index_slist_t *nat64_frag_out2in_hash;
+extern index_slist_t *nat64_frag_in2out_hash;
+extern index_slist_t *nat64_timeout_hash;
+
+
+/*
+ * nat64_ bib_entry_t
+ * This structure depicts Binding Information Base of NAT64 sessions.
+ * It stores information about the inside v6 source transport address and
+ * corresponding outside v4 source transport address for each protocol.
+ */
+
+typedef struct {
+
+ index_slist_t nat64_bib_out2in_hash;
+ index_slist_t nat64_bib_in2out_hash;
+
+ /* 0x08 */
+ u16 flags; /* flags in cnat_db.h (cnat_main_db_entry_t) */
+#define NAT64_DB_FLAG_STATIC_PORT CNAT_DB_FLAG_STATIC_PORT
+#define NAT64_DB_NAT64_FLAG CNAT_DB_NAT64_FLAG
+#define NAT64_DB_FLAG_ALG_ENTRY CNAT_DB_FLAG_ALG_ENTRY
+#define NAT64_DB_FLAG_PCPI CNAT_DB_FLAG_PCPI
+#define NAT64_DB_FLAG_PCPE CNAT_DB_FLAG_PCPE
+
+ /* 0x0A */
+ u16 nat64_inst_id;
+ /* 0x0C */
+ u32 user_index;
+
+ /* 0x10 */
+ nat64_v4_key_t v4_out_key;
+
+ /* 0x18 */
+ nat64_v6_key_t v6_in_key;
+
+ /* 0x2C */
+ index_dlist_t user_ports;
+ /* 0x34 */
+ u32 session_head_index;
+ /* 0x38 - 56B*/
+ u16 nsessions;
+ u16 pad2;
+
+ /* 0x3C - 60B */
+ u32 in2outpkts;
+ u32 out2inpkts;
+ /* 0x44 - 68B */
+
+ /* 0x42 - 70B */
+ union { /* used by FTP ALG, pkt len delta due to FTP PORT cmd */
+ u16 delta;
+ i8 alg_dlt[2]; /* two delta values, 0 for previous, 1 for current */
+ u16 il; /* Used to indicate if interleaved mode is used
+ in case of RTSP ALG */
+ } alg;
+
+ u16 temp1;
+
+ u32 entry_expires;
+
+ u32 temp3;
+ /* unused, temp1 ,temp2 and temp3 put to make it in sync with nat44 main db entry size */
+ /* size of = 0x54 = 84 B */
+ u32 unused;
+
+} nat64_bib_entry_t ;
+
+/*
+ * nat64_bib_user_entry_t
+ * This structure stores information about translations of a particular user
+ * (User here refers to a same inside source address)
+ */
+typedef struct {
+ /* 0x00 */
+ index_slist_t user_hash;
+ /* 0x04 */
+ u16 ntranslations;
+ /* 0x06 */
+ u8 icmp_msg_count;
+ /* 0x07 */
+ u8 flags;
+#define NAT64_USER_DB_NAT64_FLAG CNAT_USER_DB_NAT64_FLAG
+
+ /* 0x08 */
+ u32 translation_list_head_index;
+ /* 0x0C */
+ u32 portmap_index;
+ /* 0x10 */
+ nat64_v6_key_t v6_in_key;
+ /* 0x24 = 36 B */
+
+ u32 align1; /* Make it 8B boundary and in sync with nat44 user db entry size */
+#ifndef NO_BULK_LOGGING
+ /* size of = 0x28 = 40 B */
+ /* Now adding 8 more bytes for bulk allocation.. This makes it
+ * 0x30 (48). For nat64 stful, we may support bulk allocation
+ * later */
+ /* Indicates the currently used bulk port range */
+ i16 bulk_port_range_cache[BULK_RANGE_CACHE_SIZE];
+#endif /* NO_BULK_LOGGING */
+} nat64_bib_user_entry_t;
+
+/*
+ * nat64_session_entry_t
+ * This structure represents the session table. It maintains the information
+ * about the flow of the packets. It would consist of source and destination
+ * (inside and outside) ipv4 and ipv4 transport addresses.
+ */
+typedef struct {
+
+ /* 0x00 */
+ index_slist_t nat64_session_out2in_hash;
+
+ /* 0x04 */
+ u32 bib_index; /* would point to v4/v6 src transport address */
+
+ /* 0x08 */
+ nat64_v4_key_t v4_dest_key;
+
+#ifndef SESSION_OPT
+ index_slist_t nat64_session_in2out_hash;
+ nat64_v6_key_t v6_dest_key;
+#endif
+
+ /* 0x10 */
+ u16 flags;/* Will be used for flags same as nat44 session */
+
+ /* 0x12 */
+ u16 timeout;
+
+ /* 0x14 */
+ u32 entry_expires;
+ /* 0x18 */
+ index_dlist_t bib_list;
+ /* 0x20 = 32 B */
+
+ union { /* alg same as cnat_main_db_t */
+ u16 delta;
+ i8 alg_dlt[2];
+ u16 il;
+ } alg;
+
+ /* 0x22 */
+ u16 tcp_flags; /* Mainly TCP events - check nat64_tcp_sm.h */
+
+ /* 0x24 */
+ u32 tcp_seq_num;
+
+ /* 0x28 */ /* unused1, unused2 and unused3 are put to make it in sync with
+ * cnat_session_db */
+ u32 unused1;
+
+ /* 0x2C */
+ u32 unused2;
+
+ /* 0x30 */
+ u16 unused3;
+
+ /* 0x32 - 50B */
+
+} nat64_session_entry_t;
+
+/*
+ * nat64_session_tcp_init_entry_t
+ * This structure will be used to store information about v4 initiation
+ * tcp entries.
+ */
+typedef struct {
+ nat64_v6_key_t v6_in_key;
+ nat64_v4_key_t v4_out_key;
+} nat64_session_tcp_init_entry_t;
+
+/*
+ * nat64_in_v6_frag_entry_t
+ * This structure will be used to store information about fragment flows
+ * that are coming from inside v6 hosts.
+ */
+typedef struct {
+ index_slist_t nat64_frag_in2out_hash;
+
+ u32 v6_src_addr[4];
+ u32 v6_destn_addr[4];
+ u32 frag_iden;
+ u16 vrf;
+ u16 pad1;
+} nat64_in_v6_frag_entry_t ;
+
+/*
+ * nat64_out_v4_frag_entry_t
+ * This structure will be used to store information about fragment flows
+ * that are coming from outside v4 machines.
+ */
+typedef struct {
+ index_slist_t nat64_frag_out2in_hash;
+
+ u32 v4_src_addr;
+ u32 v4_destn_addr;
+ u16 frag_iden;
+ u16 vrf;
+} nat64_out_v4_frag_entry_t ;
+
+/*
+ * nat64_timeout _t
+ * These following structures will be used to store information destination
+ * timeouts configured.
+ */
+typedef struct {
+ nat64_v4_key_t timeout_key;
+ u16 timeout_value;
+} nat64_timeout_t;
+
+/*
+ * nat64_timeout_db_entry_t
+ */
+typedef struct {
+ nat64_timeout_t t_key;
+ index_slist_t t_hash;
+} nat64_timeout_db_entry_t;
+
+
+typedef union {
+ cnat_main_db_entry_t nat44_main_db;
+ nat64_bib_entry_t nat64_bib_db;
+} cgse_nat_db_entry_t;
+
+typedef union {
+ cnat_session_entry_t nat44_session_db;
+ nat64_session_entry_t nat64_session_db;
+} cgse_nat_session_db_entry_t;
+
+typedef union {
+ cnat_user_db_entry_t nat44_user_db;
+ nat64_bib_user_entry_t nat64_user_db;
+} cgse_nat_user_db_entry_t;
+
+extern index_slist_t *nat64_bib_out2in_hash;
+extern index_slist_t *nat64_bib_in2out_hash;
+extern index_slist_t *nat64_bib_user_hash;
+extern index_slist_t *nat64_session_out2in_hash;
+extern index_slist_t *nat64_session_in2out_hash;
+extern index_slist_t *nat64_frag_out2in_hash;
+extern index_slist_t *nat64_frag_in2out_hash;
+extern index_slist_t *nat64_timeout_hash;
+
+extern nat64_bib_entry_t *nat64_bib_db;
+extern nat64_bib_user_entry_t *nat64_bib_user_db;
+extern nat64_session_entry_t *nat64_session_db;
+extern nat64_in_v6_frag_entry_t *nat64_in_frag_db;
+extern nat64_out_v4_frag_entry_t *nat64_out_frag_db;
+extern nat64_session_tcp_init_entry_t *nat64_tcp_init_db ;
+extern nat64_timeout_db_entry_t *nat64_timeout_db;
+
+extern nat64_table_entry_t nat64_table_array[NAT64_MAX_NAT64_ENTRIES];
+extern nat64_table_entry_t *nat64_table_ptr;
+
+extern cgse_nat_db_entry_t *cgse_nat_db;
+extern cgse_nat_user_db_entry_t *cgse_user_db;
+extern cgse_nat_session_db_entry_t *cgse_session_db;
+
+void nat64_bib_user_db_delete (nat64_bib_user_entry_t *up);
+
+nat64_bib_user_entry_t*
+nat64_bib_user_db_create_entry(nat64_v6_key_t *uki, u32 bucket,
+ u32 portmap_index);
+
+nat64_bib_user_entry_t*
+nat64_bib_user_db_lookup_entry(nat64_v6_key_t *uki, u32 *bucket);
+
+
+nat64_bib_entry_t*
+nat64_bib_db_lookup_entry(nat64_v6_key_t *ki);
+
+void nat64_bib_db_in2out_hash_delete (nat64_bib_entry_t *ep);
+
+void nat64_bib_db_out2in_hash_delete (nat64_bib_entry_t *ep);
+
+nat64_bib_entry_t *
+nat64_create_bib_db_entry_and_hash(nat64_v6_key_t *ki,
+ nat64_v4_key_t *ko,
+ nat64_bib_user_entry_t *udb);
+
+
+void nat64_delete_bib_db_entry (nat64_bib_entry_t *ep, u8 force);
+
+nat64_bib_entry_t *
+nat64_bib_db_lookup_entry_out2in (nat64_v4_key_t *ko);
+
+nat64_bib_entry_t *
+nat64_get_bib_db_entry (nat64_v6_key_t *ki,
+ port_pair_t port_pair_type,
+ port_type_t port_type,
+ cnat_gen_icmp_info *info);
+
+
+nat64_bib_entry_t*
+nat64_create_static_bib_db_entry (nat64_v6_key_t *ki,
+ nat64_v4_key_t *ko,
+ nat64_table_entry_t *my_table,
+ cnat_gen_icmp_info *info);
+
+
+
+//void nat64_session_db_in2out_hash_delete (nat64_session_entry_t *ep);
+void nat64_session_db_out2in_hash_delete (nat64_session_entry_t *ep);
+
+/*nat64_session_entry_t *
+nat64_session_db_lookup_entry(nat64_v6_key_t *ki, u32 bib_index); */
+
+
+nat64_session_entry_t *
+nat64_session_db_lookup_entry_out2in (nat64_v4_key_t *ko,u32 bib_index);
+
+/*
+nat64_session_entry_t *
+nat64_create_session_db_entry(nat64_v6_key_t *ki,
+ nat64_v4_key_t *ko,
+ nat64_bib_entry_t *bdb);
+*/
+nat64_session_entry_t *
+nat64_create_session_db_entry_v2( nat64_v4_key_t *ko,
+ nat64_bib_entry_t *bdb);
+
+
+//void nat64_delete_session_db_entry (nat64_session_entry_t *ep);
+void nat64_delete_session_db_entry_v2 (nat64_session_entry_t *ep, u8 force);
+
+u32 nat64_timeout_db_hash_lookup (nat64_v4_key_t t_key);
+
+u16 query_and_update_db_timeout_nat64(nat64_session_entry_t *db);
+
+void nat64_timeout_db_hash_add (nat64_timeout_db_entry_t *t_entry);
+
+u16 nat64_timeout_db_create (nat64_timeout_t t_entry);
+
+void nat64_timeout_db_delete(nat64_v4_key_t t_key);
+
+#define NAT64_CMP_V6_KEY(key1, key2) \
+ memcmp(key1, key2, sizeof(nat64_v6_key_t))
+
+#define NAT64_CMP_V4_KEY(key1, key2) \
+ memcmp(key1, key2, sizeof(nat64_v4_key_t))
+
+
+#define NAT64_CMP_V6_IP(ip1, ip2) \
+ memcmp(ip1, ip2, (sizeof(u32) * 4))
+
+
+#define NAT64_CMP_V6_KEY1(key1, key2) \
+ (key1.ipv6[0] == key2.ipv6[0]) && (key1.ipv6[1] == key2.ipv6[1]) && \
+ (key1.ipv6[2] == key2.ipv6[2]) && (key1.ipv6[3] == key2.ipv6[3]) && \
+ (key1.port == key2.port) && (key1.vrf == key2.vrf)
+
+
+#define NAT64_CMP_V6_IP1(ip1, ip2) \
+ ((ip1[0] == ip2[0]) && (ip1[1] == ip2[1]) && \
+ (ip1[2] == ip2[2]) && (ip1[3] == ip2[3]))
+
+#define NAT64_CMP_V4_KEY1(key1, key2) \
+ (key1.key64 == key2.key64)
+
+
+extern u8 nat64_timeout_dirty_flag[NAT64_MAX_NAT64_ENTRIES];
+
+#endif
diff --git a/plugins/vcgn-plugin/vcgn/nat64_defs.h b/plugins/vcgn-plugin/vcgn/nat64_defs.h
new file mode 100644
index 00000000000..47e431a7462
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/nat64_defs.h
@@ -0,0 +1,576 @@
+/*
+ *------------------------------------------------------------------
+ * nat64_defs.h - NAT64 structure definiitions
+ *
+ * Copyright (c) 2007-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __NAT64_DEFS_H__
+#define __NAT64_DEFS_H__
+
+#ifdef TOBE_PORTED
+#include "spp_platform_common.h"
+#include "cgse_defs.h"
+#include "xlat_defs.h"
+#endif
+#include "cnat_cli.h"
+#include "cnat_ports.h"
+#include "tcp_header_definitions.h"
+#include "nat64_tcp_sm.h"
+#include "cnat_db.h"
+
+#define NAT64_MAX_FRAG_ID_COUNTERS (256)
+
+#define NAT64_MAX_NAT64_ENTRIES 500
+
+#define NAT64_MAX_ID (NAT64_MAX_NAT64_ENTRIES-1)
+
+#define NAT64_INVALID_ID (0)
+
+#define NAT64_MAX_CFG_INSTANCES 64
+
+#define NAT64_TABLE_ENTRY_DELETED 0
+#define NAT64_TABLE_ENTRY_ACTIVE 1
+#define NAT64_TABLE_ENTRY_DORMANT 2
+#define NAT64_TABLE_ENTRY_INVALID_UIDB 3
+
+#define NAT64_MAX_TRANSLATION_ENTRIES PLATFORM_MAX_TRANSLATION_ENTRIES
+
+#define NAT64_WKP_PREFIX_LEN 96
+#define NAT64_WKP_PREFIX_0 0x0064FF9B
+#define NAT64_WKP_PREFIX_1 0x00000000
+#define NAT64_WKP_PREFIX_2 0x00000000
+#define NAT64_WKP_PREFIX_3 0x00000000
+
+
+/* Reset the expiry time only if it is not 0
+** if it is 0 - then queue for delete by clear command
+**/
+
+#define NAT64_TIMEOUT_RST(db) \
+ if(PREDICT_TRUE(db->entry_expires !=0 )) \
+ db->entry_expires = cnat_current_time;
+
+extern u32 nat64_config_debug_level;
+extern u32 nat64_data_path_debug_level;
+
+extern u32 nat64_translation_create_count[NAT64_MAX_NAT64_ENTRIES];
+extern u32 nat64_translation_delete_count[NAT64_MAX_NAT64_ENTRIES];
+extern u32 nat64_translation_create_rate[NAT64_MAX_NAT64_ENTRIES];
+extern u32 nat64_translation_delete_rate[NAT64_MAX_NAT64_ENTRIES];
+extern u32 nat64_in2out_forwarding_count[NAT64_MAX_NAT64_ENTRIES];
+extern u32 nat64_in2out_forwarding_rate[NAT64_MAX_NAT64_ENTRIES];
+extern u32 nat64_out2in_forwarding_count[NAT64_MAX_NAT64_ENTRIES];
+extern u32 nat64_out2in_forwarding_rate[NAT64_MAX_NAT64_ENTRIES];
+
+extern u32 nat64_translation_create_count_old[NAT64_MAX_NAT64_ENTRIES];
+extern u32 nat64_translation_delete_count_old[NAT64_MAX_NAT64_ENTRIES];
+extern u32 nat64_in2out_forwarding_count_old[NAT64_MAX_NAT64_ENTRIES];
+extern u32 nat64_out2in_forwarding_count_old[NAT64_MAX_NAT64_ENTRIES];
+
+extern u16 *nat64_frag_id_counter_ptr;
+
+typedef struct {
+ u64 v6_to_v4_tcp_input_count;
+ u64 v6_to_v4_tcp_non_translatable_drop_count;
+ u64 v6_to_v4_tcp_state_drop_count;
+ u64 v6_to_v4_tcp_no_db_drop_count;
+ u64 v6_to_v4_tcp_output_count;
+} nat64_v6_to_v4_tcp_counter_t;
+
+typedef struct {
+ u64 v4_to_v6_tcp_input_count;
+ u64 v4_to_v6_tcp_no_db_drop_count;
+ u64 v4_to_v6_tcp_v4_init_policy_drop_count;
+ u64 v4_to_v6_tcp_state_drop_count;
+ u64 v4_to_v6_tcp_output_count;
+ u64 v4_to_v6_tcp_filter_drop_count;
+} nat64_v4_to_v6_tcp_counter_t;
+
+typedef struct {
+ u64 v6_to_v4_udp_input_count;
+ u64 v6_to_v4_udp_non_translatable_drop_count;
+ u64 v6_to_v4_udp_no_db_drop_count;
+ u64 v6_to_v4_udp_output_count;
+ u64 v6_to_v4_udp_checksum_zero_count;
+} nat64_v6_to_v4_udp_counter_t;
+
+typedef struct {
+ u64 v4_to_v6_udp_input_count;
+ u64 v4_to_v6_udp_no_db_drop_count;
+ u64 v4_to_v6_udp_filter_drop_count;
+ u64 v4_to_v6_udp_output_count;
+ u64 v4_to_v6_udp_crc_zero_drop_count;
+ u64 v4_to_v6_udp_frag_crc_zero_drop_count;
+ u64 v4_to_v6_udp_crc_zero_recycle_sent_count;
+ u64 v4_to_v6_udp_crc_zero_recycle_drop_count;
+} nat64_v4_to_v6_udp_counter_t;
+
+typedef struct {
+ u64 v6_to_v4_icmp_input_count;
+ u64 v6_to_v4_icmp_no_db_drop_count;
+ u64 v6_to_v4_icmp_non_translatable_drop_count;
+ u64 v6_to_v4_icmp_qry_output_count;
+} nat64_v6_to_v4_icmp_counter_t;
+
+typedef struct {
+ u64 v4_to_v6_icmp_input_count;
+ u64 v4_to_v6_icmp_no_db_drop_count;
+ u64 v4_to_v6_icmp_filter_drop;
+ u64 v4_to_v6_icmp_qry_output_count;
+} nat64_v4_to_v6_icmp_counter_t;
+
+typedef struct {
+ u64 v6_to_v4_icmp_error_input_count;
+ u64 v6_to_v4_icmp_error_no_db_drop_count;
+ u64 v6_to_v4_icmp_error_invalid_next_hdr_drop_count;
+ u64 v6_to_v4_icmp_error_non_translatable_drop_count;
+ u64 v6_to_v4_icmp_error_unsupported_type_drop_count;
+ u64 v6_to_v4_icmp_error_output_count;
+} nat64_v6_to_v4_icmp_error_counter_t;
+
+typedef struct {
+ u64 v4_to_v6_icmp_error_input_count;
+ u64 v4_to_v6_icmp_error_no_db_drop_count;
+ u64 v4_to_v6_icmp_error_unsupported_type_drop_count;
+ u64 v4_to_v6_icmp_error_unsupported_protocol_drop_count;
+ u64 v4_to_v6_icmp_error_output_count;
+} nat64_v4_to_v6_icmp_error_counter_t;
+
+
+
+typedef struct {
+ u64 nat64_v4_frag_input_count;
+ u64 nat64_v4_frag_forward_count;
+ u64 nat64_v4_frag_drop_count;
+ u64 nat64_v4_frag_throttled_count;
+ u64 nat64_v4_frag_timeout_drop_count;
+ u64 nat64_v4_frag_tcp_input_count;
+ u64 nat64_v4_frag_udp_input_count;
+ u64 nat64_v4_frag_icmp_input_count;
+
+ u64 nat64_v6_frag_input_count;
+ u64 nat64_v6_frag_forward_count;
+ u64 nat64_v6_frag_drop_count;
+ u64 nat64_v6_frag_throttled_count;
+ u64 nat64_v6_frag_timeout_drop_count;
+ u64 nat64_v6_frag_tcp_input_count;
+ u64 nat64_v6_frag_udp_input_count;
+ u64 nat64_v6_frag_icmp_input_count;
+ u64 nat64_v6_frag_invalid_input_count;
+} nat64_frag_counter_t;
+
+typedef struct {
+ u64 v6_to_v4_options_input_count;
+ u64 v6_to_v4_options_drop_count;
+ u64 v6_to_v4_options_forward_count;
+ u64 v6_to_v4_options_no_db_drop_count;
+ u64 v6_to_v4_unsupp_proto_count;
+
+ u64 v4_to_v6_options_input_count;
+ u64 v4_to_v6_options_drop_count;
+ u64 v4_to_v6_options_forward_count;
+ u64 v4_to_v6_options_no_db_drop_count;
+ u64 v4_to_v6_unsupp_proto_count;
+} nat64_options_counter_t;
+
+typedef struct {
+ u64 v4_icmp_gen_count;
+ u64 v6_icmp_gen_count;
+} nat64_icmp_gen_counter_t;
+
+typedef struct{
+ u32 nat64_num_translations;
+ u32 nat64_num_dynamic_translations;
+ u32 nat64_num_static_translations;
+ u32 nat64_sessions;
+ u64 nat64_port_limit_exceeded;
+ u64 nat64_system_limit_reached;
+ u64 nat64_resource_depletion_drops;
+ u64 nat64_no_translation_entry_drops;
+ u64 nat64_filtering_drops ;
+ u64 nat64_invalid_ipv6_prefix_drops;
+ u32 num_subscribers;
+ u32 dummy;
+ u64 drops_sessiondb_limit_exceeded;
+} nat64_inst_gen_counter_t;
+
+typedef struct {
+
+ nat64_v6_to_v4_tcp_counter_t v64_tcp_counters;
+ nat64_v4_to_v6_tcp_counter_t v46_tcp_counters;
+ nat64_v6_to_v4_udp_counter_t v64_udp_counters;
+ nat64_v4_to_v6_udp_counter_t v46_udp_counters;
+ nat64_v6_to_v4_icmp_counter_t v64_icmp_counters;
+ nat64_v4_to_v6_icmp_counter_t v46_icmp_counters;
+ nat64_v6_to_v4_icmp_error_counter_t v64_icmp_error_counters;
+ nat64_v4_to_v6_icmp_error_counter_t v46_icmp_error_counters;
+ nat64_frag_counter_t nat64_frag_counters;
+ nat64_options_counter_t nat64_options_counters;
+ nat64_icmp_gen_counter_t nat64_icmp_gen_counters;
+
+} nat64_counters_t;
+
+/*
+ * nat64_portmap_v2_t
+ * This structure stores information about the IP address and ports
+ * available for NAT for this nat64 instance.
+ */
+
+typedef struct {
+ u32 delete_time;
+ u32 last_sent_timestamp;
+ u32 inuse;
+ u32 ipv4_address; /* native bit order */
+ uword bm[(BITS_PER_INST + BITS(uword)-1)/BITS(uword)];
+} nat64_portmap_t;
+
+/*
+ * nat64_v4_db_key_t
+ * This structure gives information about the v4 transport address
+ * (ipv4, port, protocol)
+ */
+typedef struct {
+ u32 ipv4;
+ u16 port;
+ u16 vrf; //bit0-12:inst_id, bit13:unused, bit14-15:protocol
+} nat64_v4_db_key_t;
+
+/* Union will be easier while compare/hash */
+typedef union {
+ nat64_v4_db_key_t k;
+ u64 key64;
+} nat64_v4_key_t;
+/*
+ * nat64_v6_db_key_t
+ * This structure gives information about the v6 transport address
+ * (ipv6, port, protocol)
+ */
+typedef struct {
+ u32 ipv6[4];
+ u16 port;
+ u16 vrf; //bit0-12:inst_id, bit13:unused, bit14-15:protocol
+} nat64_v6_key_t;
+
+
+typedef struct {
+ u16 udp_timeout;
+ u16 tcp_trans_timeout;
+ u16 tcp_est_timeout;
+ u16 tcp_v4_init_timeout;
+ u16 frag_timeout;
+ u16 icmp_timeout;
+} nat64_timeout_info_t;
+
+#define NAT64_UDP_DEF 300 /* 5min */
+#define NAT64_TCP_TRANS_DEF 240 /* 4min */
+#define NAT64_TCP_EST_DEF 7200 /* 2Hrs */
+#define NAT64_TCP_V4_DEF 6 /* 6 sec */
+#define NAT64_FRAG_DEF 2 /* 2 sec */
+#define NAT64_ICMP_DEF 60 /* 60 sec */
+
+/*
+ * nat64_table_entry_t
+ * This structure is used to store information regarding every nat64 instance.
+ */
+
+/* structure will hold the L4 information, of a particular frag stream set
+ * src_port - holds the original src port
+ * dst_port - holds the original dst port
+ * total_len - useful only in ICMP nodes
+ * cnat_port - vlaue used for looksups
+ * next_prot - Protocol after translation */
+
+typedef struct l4_frag_info {
+ u16 next_node_idx;
+ u16 src_port;
+ u16 dst_port;
+ u16 total_length;
+ u8 protocol;
+ u16 cnat_prot;
+ u16 next_prot;
+} l4_frag_info_t;
+
+typedef struct {
+ u16 state;
+ u16 nat64_id; /* nat64_id value for this table entry - for easy access */
+
+ u16 v4_uidb_index; /* V4 uidb index */
+ u16 v6_uidb_index; /* V6 uidb index */
+
+ u8 octet0_position;
+ u8 octet1_position;
+ u8 octet2_position;
+ u8 octet3_position;
+
+ u16 v4_to_v6_tcp_mss; /* TCP MSS */
+ u16 v6_to_v4_tcp_mss; /* TCP MSS */
+
+ /*
+ * V6 NAT64 prefix value and mask size
+ */
+ u32 v6_prefix[4];
+ u32 v6_prefix_mask[4];
+
+ u8 v6_prefix_mask_len;
+ u8 ubits_reserved_on;
+#define IPV4_TOS_OVERRIDE_FLAG 0x1
+#define IPV6_TOS_OVERRIDE_FLAG 0x2
+#define NAT64_STFUL_RTSP_ALG_ENABLE 0x4
+ u8 feature_flags;
+
+ u8 ipv4_tos_value;
+ u8 ipv6_tos_value;
+ u8 df_bit_clear;
+ u8 ipv6_mtu_set;
+
+ u8 filtering_policy;
+#define NAT64_ADDRESS_DEPENDENT_ENABLE 1
+ u8 tcp_policy;
+#define NAT64_TCP_SECURITY_FLAG_DISABLE 1
+ u8 ftp_flags;
+
+ u8 tcp_v4_init_enable;
+#define NAT64_TCP_V4_INIT_ENABLE 1
+
+ u8 logging_policy;
+#define NAT64_BIB_LOG_ENABLE 0 /* Default */
+#define NAT64_SESSION_LOG_ENABLE 1
+
+#define NAT64_BIDIR_REFRESH 1 /* 1 - timer refresh in both direction */
+#define NAT64_UNIDIR_REFRESH 0 /* 0 - default (only v6 side refresh timer)*/
+
+ u8 nat64_refresh_both_direction; /* 0 - default (only v6 side refresh timer) */
+#define NAT64_BIDIR_REFRESH 1 /* 1 - timer refresh in both direction */
+
+ u8 udp_zero_checksum; /* 0 - default (calc checksum) */
+#define NAT64_UDP_ZERO_CHECKSUM_DROP 1 /* 1 -drop */
+
+ u16 port_limit;
+
+ cnat_portmap_v2_t *port_map;
+
+ u32 logging_index;
+
+ nat64_timeout_info_t timeout_info;
+ /*
+ * These fields are not used much, let us keep it in the end
+ */
+ u32 v4_vrf_id; /* V4 vrf id */
+ u32 v6_vrf_id; /* V6 vrf id */
+
+ u32 v4_if_num; /* V4 SVI ifnum */
+ u32 v6_if_num; /* V6 SVI ifnum */
+
+ u16 dyn_start_port;
+
+ u16 pcp_server_port;
+ u32 pcp_server_addr[4];
+ u32 rseed_ip;
+#define NAT64_FRAG_ENABLE 1
+#define NAT64_FRAG_DISABLE 0
+ u8 frag_state;
+ u8 nat64_enable; /* Enable/Disable this instance. */
+
+ u16 rtsp_port;
+
+} nat64_table_entry_t;
+
+
+
+extern nat64_table_entry_t nat64_table_array[NAT64_MAX_NAT64_ENTRIES];
+extern nat64_table_entry_t *nat64_table_ptr;
+extern nat64_counters_t nat64_all_counters[NAT64_MAX_NAT64_ENTRIES];
+extern nat64_inst_gen_counter_t nat64_inst_gen_counters[NAT64_MAX_NAT64_ENTRIES];
+
+typedef struct nat64_common_pipeline_data_ {
+#ifdef TOBE_PORTED
+ spp_node_main_vector_t *nmv;
+#endif
+
+ u16 *nat64_id_ptr;
+
+ nat64_table_entry_t *nat64_entry_ptr;
+
+} nat64_common_pipeline_data_t;
+
+typedef struct nat64_v6_to_v4_pipeline_data_ {
+ nat64_common_pipeline_data_t common_data;
+
+ u32 bib_bucket;
+ u32 session_bucket;
+
+ nat64_v6_key_t v6_in_key;
+ nat64_v6_key_t v6_dest_key;
+
+ /*
+ * IPv6 Data, everthing in host order except for the addr fields
+ */
+ u32 version_trafficclass_flowlabel;
+
+ u16 payload_length;
+ u8 next_header;
+ u8 hop_limit;
+
+ /*
+ * These Address fields are in Network Order, so that
+ * it is easy to extract the IPv4 address from them
+ */
+ u32 ipv6_src[4];
+
+ u32 ipv6_dst[4];
+
+ u8 frag_next_header;
+ u8 frag_reserved;
+ u16 frag_offset_res_m;
+ u32 frag_identification;
+
+ ipv4_header *ipv4_header;
+ union {
+ struct _v4_l4_info {
+ u8 *ipv4_l4_header;
+ u8 pad0;
+ u8 pad1;
+ u8 pad2;
+ u8 pad3;
+ } v4_l4_info;
+ struct _v4_icmp_info {
+ icmp_v4_t *ipv4_icmp_header;
+ u8 old_icmp_type;
+ u8 new_icmp_type;
+ u8 old_icmp_code;
+ u8 new_icmp_code;
+ u16 checksum;
+ u16 old_iden; // length (ICMP extn), ptr (param)
+ u16 new_iden; // ----- do -------------
+ u16 old_seq; // MTU for PTB case
+ u16 new_seq; // ----- do -------------
+ } v4_icmp_info;
+ struct _v4_udp_info {
+ udp_hdr_type_t *ipv4_udp_header;
+ u8 pad0;
+ u8 pad1;
+ u8 pad2;
+ u8 pad3;
+ } v4_udp_info;
+ struct _v4_tcp_info {
+ tcp_hdr_type *ipv4_tcp_header;
+ u16 old_src_port;
+ u16 new_src_port;
+ u16 dest_port;
+ nat64_tcp_events tcp_event;
+ } v4_tcp_info;
+ } l4_u;
+
+
+ l4_frag_info_t *frag_info; /* port for tcp/udp, ident - icmp */
+
+
+ /* Counters will be added here */
+ union {
+ nat64_v6_to_v4_tcp_counter_t *tcp_counter;
+ nat64_v6_to_v4_udp_counter_t *udp_counter;
+ nat64_v6_to_v4_icmp_counter_t *icmp_counter;
+ nat64_v6_to_v4_icmp_error_counter_t *icmp_error_counter;
+ nat64_frag_counter_t *frag_counter;
+ nat64_options_counter_t *options_counter;
+ } nat64_ctr_u;
+ nat64_icmp_gen_counter_t *icmp_gen_counter;
+} nat64_v6_to_v4_pipeline_data_t;
+
+
+typedef struct nat64_v4_to_v6_pipeline_data_ {
+ nat64_common_pipeline_data_t common_data;
+
+ u32 bib_bucket;
+ u32 session_bucket;
+
+ nat64_v4_key_t v4_src_key; /* Will be translated using Prefix */
+ nat64_v4_key_t v4_dest_key; /* will be the out key for NAT64 */
+
+ /*
+ * IPv4 data
+ */
+ u8 version_hdr_len_words;
+ u8 tos;
+ u16 total_len_bytes;
+
+ u16 identification;
+ u16 frag_flags_offset;
+
+ u8 ttl;
+ u8 protocol;
+ u16 l4_checksum;
+
+ u32 ipv4_src_addr;
+ u32 ipv4_dst_addr;
+
+ /*
+ * Pointers to IPv6 headers
+ */
+ ipv6_header_t *ipv6_header;
+ ipv6_frag_header_t *ipv6_frag_header;
+
+ union {
+ struct _v6_l4_info {
+ u8 *ipv6_l4_header;
+ u8 pad0;
+ u8 pad1;
+ u8 pad2;
+ u8 pad3;
+ } v6_l4_info;
+ struct _v6_icmp_info {
+ icmp_v6_t *ipv6_icmp_header;
+ u8 old_icmp_type;
+ u8 new_icmp_type;
+ u8 old_icmp_code;
+ u8 new_icmp_code;
+ u16 old_iden; // length (ICMP extn), ptr (param)
+ u16 new_iden; // ----- do -------------
+ u16 old_seq; // MTU for PTB case
+ u16 new_seq; // ----- do -------------
+ } v6_icmp_info;
+ struct _v6_udp_info {
+ udp_hdr_type_t *ipv6_udp_header;
+ u8 pad0;
+ u8 pad1;
+ u8 pad2;
+ u8 pad3;
+ } v6_udp_info;
+ struct _v6_tcp_info {
+ tcp_hdr_type *ipv6_tcp_header;
+ u16 old_dest_port;
+ u16 new_dest_port;
+ u16 src_port;
+ nat64_tcp_events tcp_event;
+ } v6_tcp_info;
+ } l4_u;
+
+ l4_frag_info_t *frag_info; /* port for tcp/udp, ident - icmp */
+
+ /* Need to add counters here */
+ union {
+ nat64_v4_to_v6_tcp_counter_t *tcp_counter;
+ nat64_v4_to_v6_udp_counter_t *udp_counter;
+ nat64_v4_to_v6_icmp_counter_t *icmp_counter;
+ nat64_v4_to_v6_icmp_error_counter_t *icmp_error_counter;
+ nat64_frag_counter_t *frag_counter;
+ nat64_options_counter_t *options_counter;
+ } nat64_ctr_u;
+ nat64_icmp_gen_counter_t *icmp_gen_counter;
+
+} nat64_v4_to_v6_pipeline_data_t;
+
+#endif
diff --git a/plugins/vcgn-plugin/vcgn/nat64_tcp_sm.h b/plugins/vcgn-plugin/vcgn/nat64_tcp_sm.h
new file mode 100644
index 00000000000..3a505bc1649
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/nat64_tcp_sm.h
@@ -0,0 +1,91 @@
+/*
+ *------------------------------------------------------------------
+ * nat64_tcp_sm.h - Stateful NAT64 translation TCP State machine
+ *
+ * Copyright (c) 2011 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+#ifndef __NAT64_TCP_SM_H__
+#define __NAT64_TCP_SM_H__
+
+
+/* TCP States */
+typedef enum {
+ TCP_CLOSED,
+ TCP_V4_INIT,
+ TCP_V6_INIT,
+ TCP_ESTABLISHED,
+ TCP_V4_FIN_RCV,
+ TCP_V6_FIN_RCV,
+ TCP_V4V6_FIN_RCV,
+ TCP_TRANS,
+ TCP_NONE
+} nat64_tcp_states;
+
+/* TCP Events */
+typedef enum {
+ TCP_TIMEOUT_EV,
+ TCP_V6_SYN_EV,
+ TCP_V4_SYN_EV,
+ TCP_V6_FIN_EV,
+ TCP_V4_FIN_EV,
+ TCP_V6_RST_EV,
+ TCP_V4_RST_EV,
+ TCP_DEFAULT_EV,
+ TCP_EV_COUNT
+} nat64_tcp_events;
+
+/* TCP Actions */
+typedef enum {
+ TCP_FORWARD,
+ TCP_COND_FORWARD, /* Conditional forward, based on presence of
+ * session and bib entries */
+ TCP_STORE,
+ TCP_PROBE,
+ TCP_CREATE_SESSION,
+ TCP_DELETE_SESSION,
+ TCP_DROP,
+ TCP_ACTION_NONE,
+ TCP_ACTION_COUNT
+} nat64_tcp_actions;
+
+typedef struct {
+ nat64_tcp_states next_state;
+ nat64_tcp_actions action;
+} nat64_tcp_trans_t;
+
+typedef struct {
+ nat64_tcp_trans_t event[TCP_EV_COUNT];
+} nat64_tcp_state_trans_t;
+
+extern nat64_tcp_state_trans_t nat64_tcp_sm_lookup[TCP_NONE];
+
+/*
+inline void
+nat64_update_v6_to_v4_tcp (nat64_v6_to_v4_pipeline_data_t *pctx_ptr,
+ nat64_bib_entry_t *bib_ptr);
+
+inline u8 nat64_v6_to_v4_tcp_perform_action (
+ spp_ctx_t *ctx,
+ nat64_v6_to_v4_pipeline_data_t *pctx_ptr,
+ nat64_bib_entry_t *bib_db,
+ nat64_session_entry_t *session_db);
+
+inline void
+nat64_copy_tcp_into_pctx (nat64_v6_to_v4_pipeline_data_t *pctx_ptr);
+*/
+
+
+
+#endif
diff --git a/plugins/vcgn-plugin/vcgn/platform_common.h b/plugins/vcgn-plugin/vcgn/platform_common.h
new file mode 100644
index 00000000000..2805b6078ce
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/platform_common.h
@@ -0,0 +1,136 @@
+/*
+ *---------------------------------------------------------------------------
+ * platform_common.h -- file has all platform related macros defined as NULL
+ * included "platform_common_override.h will have actual
+ * platform specific defines
+ *
+ * Copyright (c) 2011-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+#ifndef __PLATFORM_COMMON_H__
+#define __PLATFORM_COMMON_H__
+
+/* $$$ FIXME causes printf format warnings */
+#define PLATFORM_DEBUG_PRINT(...) /* printf(__VA_ARGS__) */
+#define PLATFORM_FILL_DST_ADDR_PORT_TABLE
+#define PLATFORM_SET_CTX_RU_TX_FROM_NODE(ctx, value)
+#define PLATFORM_SET_CTX_RU_TX_DST_IPPORT_IDX(ctx, value)
+#define PLATFORM_SET_CTX_RU_TX_PKT_TYPE(ctx, type)
+#define PLATFORM_SET_RX_VRF(ctx, rx_vrf, hardcode, mask)
+#define PLATFORM_SET_TX_VRF(ctx, tx_vrf)
+/* PLATFORM_CNAT_SET_RX_VRF definition is not same as PLATFORM_SET_RX_VRF,
+ * So, maintaining two different definitions
+ */
+#define PLATFORM_CNAT_SET_RX_VRF(ctx, rx_vrf, proto)
+#define PLATFORM_CNAT_SET_TX_VRF(ctx, tx_vrf)
+
+#define PLATFORM_PRINT_TESTING_PG()
+#define PLATFORM_INIT_TEMP_SENSORS()
+#define PLATFORM_READ_CPU_SENSORS(value)
+#define PLATFORM_SET_TEMP_READ_BLOCK(var, val)
+#define PLATFORM_NFV9_DISP_NODE_IDX
+
+
+/* Assumption is, syslog packets
+ * are sent out via same channel as that of NFV9.
+ * Has to be overridden if this assumption is false.
+ */
+#define PLATFORM_SYSLOG_DISP_NODE_IDX PLATFORM_NFV9_DISP_NODE_IDX
+
+#define PLATFORM_CNAT_DB_DUMP_POLICY_PRINT()
+#define PLATFORM_PRINT_CTX_VALUES(ctx)
+#define PLATFORM_ADD_VRF_MAP_HANDLE_PARTITION(uidb_index, partition_id)
+#define PLATFORM_DEL_VRF_MAP_HANDLE_PARTITION(uidb_index, partition_id)
+#define PLATFORM_ALLOC_NFV9_PKT_BUFFER(ctx, to_lc_cpu)
+#define PLATFORM_SET_DSLITE_ENABLE_FLAG(uidb_index, dslite_id)
+#define PLATFORM_CHECK_DSLITE_ENABLE_FLAG
+
+#define PLATFORM_CNAT_INSTS 1
+#define PLATFORM_HANDLE_TTL_DECREMENT 0 // Don't handle TTL in NAT44 Application (default).
+
+// For ISM, we need to copy the ipv6->hop_limit to ipv4 ttl.
+#define PLATFORM_6RD_COPY_TTL_IPV6_TO_IPV4 0
+
+//For ISM case, need to allow as the TTL decrement happens at ingress LC
+#define PLATFORM_6RD_ALLOW_TTL_1 0
+
+#define PLATFORM_HANDLE_ICMP_TTL_EXPIRED 0 // Don't handle ICMP_ERROR msg for TTL <=1 in NAT44 App (default).
+
+#define PLATFORM_IPV4_FRAG_FRAG_HOLD_LIMIT 1
+#define PLATFORM_MAX_IPV4_CTX_ENTRIES 1
+#define PLATFORM_MAPE_FRAG 0
+
+#define PLATFORM_ADDR_MASK_PER_CORE 0
+#define PLATFORM_ADDR_MASK_PER_CORE_PER_PARTITION 0
+#define PLATFORM_MAX_CORES 1
+#define PLATFORM_MAX_CORES_PER_PARTITION 1
+#define PLATFORM_MAX_NAT_ENTRIES 1
+#define PLATFORM_MAX_USER_ENTRIES 1
+#define PLATFORM_CNAT_MAX_ADDR_POOL_SIZE 0x1
+#define PLATFORM_DBL_SUPPORT 0 /* Default no DBL support, no NAT44 session table */
+
+#define PLATFORM_MAX_DB_ENTRY_PER_SCAN 1
+#define PLATFORM_MAX_DB_ENTRY_SELECTED_PER_SCAN 1
+#define MAX_COMBINED_DB_ENTRIES_PER_SCAN 0
+
+#define PLATFORM_CNAT_TIMEOUT_IPPROT_MASK 0
+#define PLATFORM_CNAT_TIMEOUT_PORTPROT_MASK 0
+
+#define PLATFORM_MAX_SHOW_BUFFER_SIZE 1700
+#define PLATFORM_MAX_TRANSLATION_ENTRIES (50)
+#define PLATFORM_MAX_UTIL_ENTRIES (100)
+#define PLATFORM_MAX_NAT44_UTIL_ENTRIES ((64)/PLATFORM_MAX_CORES)
+
+#define PLATFORM_CNAT_NFV9_SHIM_HDR_OFFSET 0
+#define PLATFORM_CNAT_NFV9_L2_ENCAPS_OFFSET 0
+
+
+/* Below are nat64 statful related define */
+#define PLATFORM_NAT64_SET_RX_VRF(rx_vrf, proto, inst_id) \
+ rx_vrf = proto | (inst_id & CNAT_VRF_MASK);
+
+#define PLATFORM_NAT64_MAX_TRANSLATION_ENTRIES (30)
+#define PLATFORM_DS_LITE_MAX_TRANSLATION_ENTRIES (30)
+
+#define PLATFORM_SET_NAT64_ENABLE_FLAG(uidb_index, nat64_id) \
+ { \
+ nat64_set_enable_flag(nat64_id, ENABLE); \
+ }
+
+#define PLATFORM_CHECK_NAT64_ENABLE_FLAG 1
+#define PLATFORM_SET_MAPE_ENABLE_FLAG(uidb_index, mape_id)
+#define PLATFORM_CHECK_MAPE_ENABLE_FLAG 1
+
+/* very small number , PD has correct value.
+ this is bcoz, if platform doesnt support nat64..shudnt take too much..*/
+#define PLATFORM_NAT64_MAX_SESSIONS 10
+#define PLATFORM_NAT64_TIMEOUT_HASH_SIZE 10
+#define PLATFORM_MAP_ADDR_PER_CORE 1024
+
+#define ENABLE 1
+#define DISABLE 0
+
+/* Platform Xlat inline learn function */
+#define PLATFORM_INLINE_LEARN_FUNC(a,b,c)
+
+
+/* Checksum calculation to be done in software */
+#define PLATFORM_XLAT_SW_CHECKSUM_CALC 0
+
+
+/* Below include overrides all the above null defs and defines platform specific
+ define */
+#include "platform_common_override.h"
+
+#endif /* __PLATFORM_COMMON_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/platform_common_override.h b/plugins/vcgn-plugin/vcgn/platform_common_override.h
new file mode 100644
index 00000000000..d6d3b0785b5
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/platform_common_override.h
@@ -0,0 +1,304 @@
+/*
+ *---------------------------------------------------------------------------
+ * platform_common_override.h -- Files has actual platform specific defines.
+ * Will only included by platform_common.h
+ *
+ * Copyright (c) 2011-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+
+#ifndef __PLATFORM_COMMON_OVERRIDE_H__
+#define __PLATFORM_COMMON_OVERRIDE_H__
+
+extern unsigned char my_octeon_id;
+
+#undef PLATFORM_DBL_SUPPORT
+#define PLATFORM_DBL_SUPPORT 1 // Destination Based logging support
+ // NAT44 session table required.
+
+#undef PLATFORM_ADDR_MASK_PER_CORE
+/* commenting this. Currently we are considering only single core */
+//#define PLATFORM_ADDR_MASK_PER_CORE 0x3f // Using 64 cores
+#define PLATFORM_ADDR_MASK_PER_CORE 0x01
+
+#undef MAX_COMBINED_DB_ENTRIES_PER_SCAN
+#define MAX_COMBINED_DB_ENTRIES_PER_SCAN 128
+
+#undef PLATFORM_MAX_CORES
+#define PLATFORM_MAX_CORES (PLATFORM_ADDR_MASK_PER_CORE + 1)
+
+// Roddick does not have any partition of cores
+#undef PLATFORM_ADDR_MASK_PER_CORE_PER_PARTITION
+#define PLATFORM_ADDR_MASK_PER_CORE_PER_PARTITION \
+ PLATFORM_ADDR_MASK_PER_CORE
+
+#undef PLATFORM_MAX_CORES_PER_PARTITION
+#define PLATFORM_MAX_CORES_PER_PARTITION PLATFORM_MAX_CORES
+
+#undef PLATFORM_CNAT_INSTS
+//#define PLATFORM_CNAT_INSTS 64
+#define PLATFORM_CNAT_INSTS 1 /* currently its only single instance */
+
+#undef PLATFORM_MAX_NAT_ENTRIES
+//#define PLATFORM_MAX_NAT_ENTRIES 20000000 // 20M
+#define PLATFORM_MAX_NAT_ENTRIES 1666660 // ~80M/48 (79999680/48)
+
+#undef PLATFORM_MAX_USER_ENTRIES
+#define PLATFORM_MAX_USER_ENTRIES 20800 // ~1M/48 (998400/48)
+
+
+/* 524288:
+ (20000000 translations) / (64 CNAT INSTANCES) = 312500
+ nearest higher number which is power of 2 next to 312500
+*/
+#undef PLATFORM_CNAT_MAIN_PRELIM_HASH_SIZE
+//#define PLATFORM_CNAT_MAIN_PRELIM_HASH_SIZE 524288
+#define PLATFORM_CNAT_MAIN_PRELIM_HASH_SIZE (5<<20)
+/* 4096:
+ (200000 users) / (64 CNAT INSTANCES) = 3125
+ nearest higher number which is power of 2 next to 3125
+*/
+#undef PLATFORM_CNAT_USER_PRELIM_HASH_SIZE
+#define PLATFORM_CNAT_USER_PRELIM_HASH_SIZE 4096
+
+#undef PLATFORM_CNAT_MAX_ADDR_POOL_SIZE
+#define PLATFORM_CNAT_MAX_ADDR_POOL_SIZE 0x10000 // max /16
+
+#undef PLATFORM_MAX_DB_ENTRY_PER_SCAN
+#define PLATFORM_MAX_DB_ENTRY_PER_SCAN 400
+
+#undef PLATFORM_MAX_DB_ENTRY_SELECTED_PER_SCAN
+#define PLATFORM_MAX_DB_ENTRY_SELECTED_PER_SCAN 100 // 1/4th of above
+
+#undef PLATFORM_CNAT_TIMEOUT_IPPROT_MASK
+#define PLATFORM_CNAT_TIMEOUT_IPPROT_MASK 0xFFFFFFFF0000FFFF
+
+#undef PLATFORM_CNAT_TIMEOUT_PORTPROT_MASK
+#define PLATFORM_CNAT_TIMEOUT_PORTPROT_MASK 0x00000000FFFFFFFF
+
+#ifdef TARGET_RODDICK /* EVB doesnt need it */
+#undef PLATFORM_FILL_DST_ADDR_PORT_TABLE
+#define PLATFORM_FILL_DST_ADDR_PORT_TABLE fill_dst_addr_port_table();
+#endif
+
+
+#ifndef RODDICK_ON_LINUX_OR_EVB
+#undef PLATFORM_SET_CTX_RU_TX_FROM_NODE
+#undef PLATFORM_SET_CTX_RU_TX_DST_IPPORT_IDX
+#undef PLATFORM_SET_CTX_RU_TX_PKT_TYPE
+
+#define PLATFORM_SET_CTX_RU_TX_FROM_NODE(ctx, value) \
+ (vnet_buffer(ctx))->vcgn_uii.ru.tx.from_node = value;
+#define PLATFORM_SET_CTX_RU_TX_DST_IPPORT_IDX(ctx, value) \
+ (vnet_buffer(ctx))->vcgn_uii.ru.tx.dst_ip_port_idx = value;
+#define PLATFORM_SET_CTX_RU_TX_PKT_TYPE(ctx, type) \
+ (vnet_buffer(ctx))->vcgn_uii.ru.tx.packet_type = type;
+#endif
+
+#undef PLATFORM_SET_RX_VRF
+#undef PLATFORM_SET_TX_VRF
+#ifdef TARGET_RODDICK
+#define PLATFORM_SET_RX_VRF(ctx, rx_vrf, hardcode, mask) \
+ rx_vrf = (ctx->ru.rx.uidb_index & CNAT_VRF_MASK);
+#define PLATFORM_SET_TX_VRF(ctx, tx_vrf) \
+ ctx->ru.tx.uidb_index = tx_vrf;
+#else /*EVB */
+#define PLATFORM_SET_RX_VRF(ctx, rx_vrf, hardcode, mask) \
+ rx_vrf = hardcode;
+#define PLATFORM_SET_TX_VRF(ctx, tx_vrf)
+#endif
+
+#undef PLATFORM_CNAT_SET_RX_VRF
+#undef PLATFORM_CNAT_SET_TX_VRF
+
+#define PLATFORM_CNAT_SET_RX_VRF(if_index, rx_vrf, proto) \
+ rx_vrf = proto | ((if_index) & CNAT_VRF_MASK);
+
+#define PLATFORM_CNAT_SET_TX_VRF(if_index, tx_vrf) \
+ (if_index) = ((tx_vrf) & CNAT_VRF_MASK);
+
+
+
+#undef PLATFORM_NAT64_SET_RX_VRF
+
+#ifdef TARGET_RODDICK
+
+#define PLATFORM_NAT64_SET_RX_VRF(rx_vrf, proto, inst_id) \
+ rx_vrf = proto | (inst_id & CNAT_VRF_MASK);
+
+#else /* EVB */
+
+#define PLATFORM_NAT64_SET_RX_VRF(rx_vrf, proto, inst_id) \
+ rx_vrf = proto | inst_id;
+
+#endif
+
+#ifdef TARGET_EVB
+#define VRF_MAP_CONFIG
+#endif
+
+#undef PLATFORM_PRINT_TESTING_PG
+#if defined(TARGET_LINUX_UDVR) || defined(CNAT_PG)
+#define PLATFORM_PRINT_TESTING_PG() printf("testing pg\n");
+#else
+#define PLATFORM_PRINT_TESTING_PG()
+#endif
+
+#ifdef TARGET_RODDICK
+#undef PLATFORM_INIT_TEMP_SENSORS
+#undef PLATFORM_READ_CPU_SENSORS
+#undef PLATFORM_SET_TEMP_READ_BLOCK
+
+#define PLATFORM_INIT_TEMP_SENSORS() Init_temperature_sensors();
+#define PLATFORM_READ_CPU_SENSORS(value) read_octeon_sensors(value);
+#define PLATFORM_SET_TEMP_READ_BLOCK(var, val) var = &val->param[0];
+#endif
+
+#undef PLATFORM_HANDLE_ICMP_TTL_EXPIRED
+#define PLATFORM_HANDLE_ICMP_TTL_EXPIRED 1 // handle TTL in NAT44 Application (for AVSM)
+
+#undef PLATFORM_NFV9_DISP_NODE_IDX
+#ifdef TARGET_RODDICK
+#define PLATFORM_NFV9_DISP_NODE_IDX "roddick_infra_l3_tx"
+#else /* EVB */
+#define PLATFORM_NFV9_DISP_NODE_IDX "cnat_rewrite_output"
+#endif
+
+#undef PLATFORM_CNAT_DB_DUMP_POLICY_PRINT
+#define PLATFORM_CNAT_DB_DUMP_POLICY_PRINT() \
+ printf("my instance:%d\n" \
+ "icmp timeout %d udp init timeout %d act timeout %d\n" \
+ "tcp init timeout %d act timeout %d mapping refresh %d\n" \
+ "port limit per user %d ftp alg %d lb debug %d\n" \
+ "icmp rate limit 0x%x config delete timer 0x%x\n" \
+ "global debug flag 0x%x\n" \
+ "icmp rate limit (pkts/per sec) %d\n" \
+ "dynamic port range start %d\n" \
+ "debug ivrf 0x%x flag 0x%x start_addr 0x%x end_addr 0x%x\n" \
+ "debug ovrf 0x%x flag 0x%x start_addr 0x%x end_addr 0x%x\n", \
+ my_instance_number, \
+ icmp_session_timeout, udp_init_session_timeout, udp_act_session_timeout, \
+ tcp_initial_setup_timeout, tcp_active_timeout, \
+ mapping_refresh_both_direction, cnat_main_db_max_ports_per_user, \
+ ftp_alg_enabled, lb_debug_enable, per_user_icmp_msg_limit, \
+ config_delete_timeout, \
+ global_debug_flag, \
+ cnat_main_db_icmp_rate_limit, \
+ cnat_static_port_range, \
+ debug_i_vrf, debug_i_flag, debug_i_addr_start, debug_i_addr_end, \
+ debug_o_vrf, debug_o_flag, debug_o_addr_start, debug_o_addr_end);
+
+
+#undef PLATFORM_PRINT_CTX_VALUES
+#ifdef TARGET_RODDICK
+#define PLATFORM_PRINT_CTX_VALUES(ctx) \
+ printf("\nAFTER: %s cur_hdr %p, uidb %d, pkt_type %d, cur_len %d\n", \
+ type_str, \
+ ctx->current_header, \
+ ctx->ru.tx.uidb_index, \
+ ctx->ru.tx.packet_type, \
+ ctx->current_length);
+#else /* EVB */
+#define PLATFORM_PRINT_CTX_VALUES(ctx) \
+ printf("\nAFTER: %s cur_hdr %p, cur_len %d\n", \
+ type_str,\
+ ctx->current_header, \
+ ctx->current_length);
+#endif
+
+#undef PLATFORM_ADD_VRF_MAP_HANDLE_PARTITION
+#define PLATFORM_ADD_VRF_MAP_HANDLE_PARTITION(uidb_index, partition_id)
+
+#undef PLATFORM_DEL_VRF_MAP_HANDLE_PARTITION
+#define PLATFORM_DEL_VRF_MAP_HANDLE_PARTITION(uidb_index, partition_id)
+
+#undef PLATFORM_ALLOC_NFV9_PKT_BUFFER
+#define PLATFORM_ALLOC_NFV9_PKT_BUFFER(ctx, to_lc_cpu)
+
+#undef PLATFORM_CNAT_NFV9_SHIM_HDR_OFFSET
+#ifdef TARGET_RODDICK
+// This corresponds to the length of the IMETRO SHIM Header for RODDICK
+#define PLATFORM_CNAT_NFV9_SHIM_HDR_OFFSET 8
+#else
+#define PLATFORM_CNAT_NFV9_SHIM_HDR_OFFSET 0
+#endif
+
+#undef PLATFORM_CNAT_NFV9_L2_ENCAPS_OFFSET
+#ifdef TARGET_RODDICK
+#define PLATFORM_CNAT_NFV9_L2_ENCAPS_OFFSET 0
+#else
+#define PLATFORM_CNAT_NFV9_L2_ENCAPS_OFFSET 16
+#endif
+
+#undef PLATFORM_MAX_SHOW_BUFFER_SIZE
+#undef PLATFORM_MAX_TRANSLATION_ENTRIES
+#undef PLATFORM_MAX_UTIL_ENTRIES
+
+#define PLATFORM_MAX_SHOW_BUFFER_SIZE 1700
+#define PLATFORM_MAX_TRANSLATION_ENTRIES (50)
+#define PLATFORM_NAT64_MAX_TRANSLATION_ENTRIES (30)
+#define PLATFORM_MAX_UTIL_ENTRIES (100)
+
+
+#undef PLATFORM_NAT64_MAX_SESSIONS
+#undef PLATFORM_NAT64_TIMEOUT_HASH_SIZE
+#define PLATFORM_NAT64_MAX_SESSIONS 20000000
+#define PLATFORM_NAT64_TIMEOUT_HASH_SIZE 24001 /* Ref: CSCtr36242 */
+
+#undef PLATFORM_CHECK_DSLITE_ENABLE_FLAG
+#define PLATFORM_CHECK_DSLITE_ENABLE_FLAG 1
+
+/* Fragment hold limit is Platform specific */
+/* For Roddick, it is 63 due to hardware limitation */
+#undef PLATFORM_IPV4_FRAG_FRAG_HOLD_LIMIT
+#define PLATFORM_IPV4_FRAG_FRAG_HOLD_LIMIT 63
+
+#undef PLATFORM_MAX_IPV4_CTX_ENTRIES
+#define PLATFORM_MAX_IPV4_CTX_ENTRIES 80
+
+#undef PLATFORM_DIRN_IN_TO_OUT
+#undef PLATFORM_DIRN_OUT_TO_IN
+#undef PLATFORM_SET_SVI_PARAMS_FIELD
+
+#define PLATFORM_DIRN_IN_TO_OUT
+#define PLATFORM_DIRN_OUT_TO_IN
+#define PLATFORM_SET_SVI_PARAMS_FIELD(var, value)
+
+#undef PLATFORM_GET_NFV9_L3_HDR_OFFSET
+#define PLATFORM_GET_NFV9_L3_HDR_OFFSET \
+ ((u8 *)ctx + ctx->data + CNAT_NFV9_IP_HDR_OFFSET);
+
+#undef PLATFORM_GET_NFV9_L4_HDR_OFFSET
+#define PLATFORM_GET_NFV9_L4_HDR_OFFSET \
+ ((u8 *) ctx + ctx->data + CNAT_NFV9_UDP_HDR_OFFSET);
+
+#undef PLATFORM_MEMSET_CNAT_LOG_PKT_DATA
+#define PLATFORM_MEMSET_CNAT_LOG_PKT_DATA
+
+/*
+ Index 0 -- SE_P2MP
+ Index 1 -- HA Destination 1
+ Index 2 -- HA Destination 2
+ Index 3 -- EXT_LOG_SRVR
+*/
+enum {
+ NODE_CGNCFG,
+ NODE_HA,
+ NODE_PD_CONFIG,
+ NODE_LOGGING,
+ NODE_TRACE_BACKUP,
+ NODE_MAX,
+};
+
+#endif /* __PLATFORM_COMMON_OVERRIDE_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/spp_ctx.h b/plugins/vcgn-plugin/vcgn/spp_ctx.h
new file mode 100644
index 00000000000..2d3c95c8887
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/spp_ctx.h
@@ -0,0 +1,76 @@
+/*
+ *------------------------------------------------------------------
+ * spp_ctx.h - packet / context definitions
+ *
+ * Copyright (c) 2007-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __SPP_CTX_H__
+#define __SPP_CTX_H__
+
+/* Packet header / data */
+
+/* Any change to spp_ctx_t structure should be updated in vnet/buffer.h
+ * as well.
+ */
+typedef struct _spp_ctx {
+#ifdef TOBE_PORTED
+ /* Following fields are required to handle multibuffer */
+ u32 num_buffers; /* Number of buffers part of packet */
+ vlib_buffer_t *next_ctx_this_packet;
+
+ /* Following is used by non-UDP protocols */
+#define SPP_CTX_FEATURE_DATA_SIZE 16
+
+ u8 feature_data[SPP_CTX_FEATURE_DATA_SIZE];
+#endif
+
+ union { /* Roddick specific */
+ u32 roddick_info;
+ struct __tx_pkt_info { /* Used by PI to PI communication for TX */
+ u32 uidb_index:16; /* uidb_index to transmit */
+ u32 packet_type:2; /* 1-IPv4, 2-Ipv6, - 0,3 - Unused */
+ u32 ipv4_defrag:1; /* 0 - Normal, 1 - update first
+ * segment size
+ * (set by 6rd defrag node)
+ */
+
+ u32 dst_ip_port_idx:4;/* Index to dst_ip_port_table */
+ u32 from_node:4;
+ u32 calc_chksum:1;
+ u32 reserved:4;
+ } tx;
+ struct __rx_pkt_info { /* Used by PD / PI communication */
+ u32 uidb_index:16; /* uidb_index received in packet */
+ u32 packet_type:2; /* 1-IPv4, 2-Ipv6, - 0,3 - Unused */
+ u32 icmp_type:1; /* 0-ICMP query type, 1-ICMP error type */
+ u32 protocol_type:2; /* 1-TCP, 2-UDP, 3-ICMP, 0 - Unused */
+ u32 ipv4_defrag:1; /* 0 - Normal, 1 - update first
+ * segment size
+ * (set by 6rd defrag node)
+ */
+
+ u32 direction:1; /* 0-Outside, 1-Inside */
+ u32 frag:1; /*IP fragment-1, Otherwise-0*/
+ u32 option:1; /* 0-No IP option (v4) present, non-fragHdr
+ * option hdr present (v6)
+ */
+ u32 df_bit:1; /* IPv4 DF bit copied here */
+ u32 reserved1:6;
+ } rx;
+ } ru;
+} spp_ctx_t;
+
+#endif
diff --git a/plugins/vcgn-plugin/vcgn/spp_platform_trace_log.c b/plugins/vcgn-plugin/vcgn/spp_platform_trace_log.c
new file mode 100644
index 00000000000..a96894f935d
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/spp_platform_trace_log.c
@@ -0,0 +1,989 @@
+/*
+ *------------------------------------------------------------------
+ * spp_platform_trace_log.c
+ *
+ * Copyright (c) 2008-2011, 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *---------------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <stdio.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/clib.h>
+#include <vlib/main.h>
+
+#include "tcp_header_definitions.h"
+#include "platform_common.h"
+#include "spp_platform_trace_log.h"
+
+#define WORD_SIZE sizeof(u32)
+
+int temperature_read_blocked = 1;
+
+spp_cnat_logger_tbl_t spp_cnat_logger_table[] =
+{
+ { CNAT_ERROR_SUCCESS,
+ 3,
+ 0,
+ {"i-vrf",
+ "ipv4 addr",
+ "port"}
+ },
+ { CNAT_NO_CONFIG_ERROR,
+ 3,
+ 180,
+ {"i-vrf",
+ "ipv4 addr",
+ "port"}
+ },
+ { CNAT_NO_VRF_RUN_ERROR,
+ 3,
+ 180,
+ {"i-vrf",
+ "ipv4 addr",
+ "port"}
+ },
+ { CNAT_NO_POOL_FOR_ANY_ERROR,
+ 3,
+ 180,
+ {"i-vrf",
+ "ipv4 addr",
+ "port"}
+ },
+ { CNAT_NO_PORT_FOR_ANY_ERROR,
+ 3,
+ 60,
+ {"i-vrf",
+ "ipv4 addr",
+ "port"}
+ },
+ { CNAT_BAD_INUSE_ANY_ERROR,
+ 3,
+ 60,
+ {"i-vrf",
+ "ipv4 addr",
+ "port"}
+ },
+ { CNAT_NOT_FOUND_ANY_ERROR,
+ 3,
+ 60,
+ {"i-vrf",
+ "ipv4 addr",
+ "port"}
+ },
+ { CNAT_INV_PORT_FOR_DIRECT_ERROR,
+ 3,
+ 60,
+ {"i-vrf",
+ "ipv4 addr",
+ "port"}
+ },
+ { CNAT_BAD_INUSE_DIRECT_ERROR,
+ 3,
+ 1,
+ {"i-vrf",
+ "ipv4 addr",
+ "port"}
+ },
+ { CNAT_NOT_FOUND_DIRECT_ERROR,
+ 3,
+ 1,
+ {"i-vrf",
+ "ipv4 addr",
+ "port"}
+ },
+ { CNAT_OUT_OF_PORT_LIMIT_ERROR,
+ 3,
+ 60,
+ {"i-vrf",
+ "ipv4 addr",
+ "port"}
+ },
+ { CNAT_MAIN_DB_CREATE_ERROR,
+ 0,
+ 30,
+ {""}
+ },
+ { CNAT_LOOKUP_ERROR,
+ 1,
+ 30,
+ {"Type"}
+ },
+ { CNAT_INDEX_MISMATCH_ERROR,
+ 2,
+ 30,
+ {"in2out_index",
+ "out2in_index"}
+ },
+ { CNAT_PACKET_DROP_ERROR,
+ 3,
+ 15,
+ {"i-vrf",
+ "ipv4 addr",
+ "port"}
+ },
+ { CNAT_INV_UNUSED_USR_INDEX,
+ 1,
+ 10,
+ {"invalid/unused user index"}
+ },
+ { CNAT_INVALID_VRFMAP_INDEX,
+ 0,
+ 60,
+ {""}
+ },
+ { CNAT_USER_OUT_OF_PORTS,
+ 2,
+ 1800,
+ {"i-vrf",
+ "ipv4 addr"}
+ },
+ { CNAT_EXT_PORT_THRESH_EXCEEDED,
+ 2,
+ 180,
+ {"i-vrf",
+ "ipv4 address"}
+ },
+ { CNAT_EXT_PORT_THRESH_NORMAL,
+ 2,
+ 180,
+ {"vrf",
+ "ipv4 address"}
+ },
+ { CNAT_NO_EXT_PORT_AVAILABLE,
+ 0,
+ 1,
+ {"",}
+ },
+ { CNAT_SESSION_THRESH_EXCEEDED,
+ 2,
+ 1800,
+ {"vrf",
+ "ipv4 address"}
+ },
+ { CNAT_SESSION_THRESH_NORMAL,
+ 2,
+ 30, /* changed to 30 */
+ {"vrf",
+ "ipv4 address"}
+ },
+ { WQE_ALLOCATION_ERROR,
+ 0,
+ 180, /* changed to 180 */
+ {""}
+ },
+ { ERROR_PKT_DROPPED,
+ 2,
+ 60, /* changed to 60 */
+ {"spi-port",
+ "error-code"}
+ },
+ { SYSMGR_PD_KEY_CREATION_ERROR,
+ 0,
+ 30,
+ {""}
+ },
+ { SYSMGR_PD_SHMEM_ID_ERROR,
+ 0,
+ 1,
+ {""}
+ },
+ { SYSMGR_PD_SHMEM_ATTACH_ERROR,
+ 0,
+ 1,
+ {""}
+ },
+ { OCTEON_CKHUM_SKIPPED,
+ 2,
+ 60, /* changed to 60 */
+ {"version",
+ "protocol"}
+ },
+ { PK0_SEND_STATUS,
+ 1,
+ 15,
+ {"status"}
+ },
+ { CMD_BUF_ALLOC_ERR,
+ 0,
+ 60,
+ {""}
+ },
+ { SPP_CTX_ALLOC_FAILED,
+ 1,
+ 300, /* every 5 min */
+ {"node"}
+ },
+ { SPP_MAX_DISPATCH_REACHED,
+ 1,
+ 60,
+ {"node"}
+ },
+ { HA_SIGCHILD_RECV,
+ 3,
+ 1,
+ {"pid",
+ "uid",
+ "signal",}
+ },
+ { SIGACTION_ERR,
+ 0,
+ 1,
+ {""}
+ },
+ { HA_INVALID_SEQ_OR_CONFIG_OR_TYPE,
+ 2,
+ 10,
+ {"seq-id or config option",
+ "Type"}
+ },
+ { NODE_CREATION_ERROR,
+ 1,
+ 1,
+ {"node"}
+ },
+
+ { CNAT_CLI_INVALID_INPUT,
+ 4,
+ 0,
+ {"Error Type",
+ "Passed",
+ "Expected",
+ "Type"}
+ },
+ { CNAT_DUMMY_HANDLER_HIT,
+ 1,
+ 0,
+ {"Handler"}
+ },
+ { CNAT_CONFIG_ERROR,
+ 5,
+ 0,
+ {"Sub code",
+ "Param 1",
+ "Param 2",
+ "Param 3",
+ "Param 4"}
+ },
+ { CNAT_NFV9_ERROR,
+ 1,
+ 180, /* changed to 180 */
+ {"Sub code"}
+ },
+ { CNAT_CMVX_TWSI_READ_WRITE_FAIL,
+ 3,
+ 180,
+ {"Operation",
+ "Location",
+ "Data"}
+ },
+ { CNAT_TEMP_SENSOR_TIMEOUT,
+ 0,
+ 180,
+ {""}
+ },
+ { CNAT_TEMP_SENSOR_DATA_MISMATCH,
+ 2,
+ 180,
+ {"Actual",
+ "Expected"}
+ },
+ { CNAT_TEMP_SENSOR_CONFIG_FAILED,
+ 1,
+ 180,
+ {"Glik"}
+ },
+ { HA_APP_NOT_RESPONDING,
+ 2,
+ 180,
+ {"CPU",
+ "Core"}
+ },
+ { HA_DATA_PATH_TEST_FAILED,
+ 0,
+ 30,
+ {""}
+ },
+ { CNAT_WRONG_PORT_ALLOC_TYPE,
+ 3,
+ 60,
+ {"i-vrf",
+ "ipv4 addr",
+ "port"}
+ },
+ { CNAT_NEW_PORT_ALLOC_ERROR,
+ 3,
+ 60,
+ {"i-vrf",
+ "ipv4 addr",
+ "port"}
+ },
+ { CNAT_INVALID_INDEX_TO_FREE_PORT,
+ 0,
+ 60,
+ {""}
+ },
+ { CNAT_DELETE_DB_ENTRY_NO_PORTMAP,
+ 0,
+ 60,
+ {""}
+ },
+ { CNAT_MAIN_DB_LIMIT_ERROR,
+ 0,
+ 180,
+ {""}
+ },
+ { CNAT_USER_DB_LIMIT_ERROR,
+ 0,
+ 180,
+ {""}
+ },
+ { CNAT_FRAG_DB_ERROR,
+ 1,
+ 180,
+ {"Type"}
+ },
+
+ { DROP_PKT_DUMP,
+ 0,
+ 20,
+ {""}
+ }
+};
+
+#define LOG_TABLE_MAX_ENTRIES \
+ (sizeof(spp_cnat_logger_table)/sizeof(spp_cnat_logger_table[0]))
+
+u32 error_code_timestamps[LOG_TABLE_MAX_ENTRIES];
+spp_timer_t sensor_timer;
+spp_trace_log_global_info_t spp_trace_log_global_info;
+spp_global_counters_t spp_global_counters;
+
+/*
+ * Logging information structures
+ */
+spp_trace_log_info_t spp_default_trace_log_info;
+spp_trace_log_info_t *spp_trace_log_info_pool;
+
+#ifdef TOBE_PORTED
+/*
+ * The following 2 functions are temporary hacks until
+ * we have RTC support from the PD nodes
+ */
+inline
+u32 spp_trace_log_get_sys_up_time_in_ms (void)
+{
+ spp_node_main_vector_t *nmv;
+ u32 sys_up_time;
+
+ nmv = spp_get_node_main_vectorized_inline();
+
+ sys_up_time = (u32) (nmv->ticks / nmv->ticks_per_ms);
+
+ return (sys_up_time);
+}
+
+u32 spp_trace_log_get_unix_time_in_seconds (void)
+{
+ spp_node_main_vector_t *nmv;
+ u32 unix_time;
+
+ nmv = spp_get_node_main_vectorized_inline();
+
+ unix_time = (u32) (nmv->ticks / nmv->ticks_per_second);
+
+ return (unix_time);
+}
+
+/*
+ * edt: * * spp_trace_log_send_queued_pkt
+ *
+ * Tries to send a logging pkt that has been queued earlier
+ * because it could not be sent due to downstream constipation
+ *
+ * Argument: spp_trace_log_info_t *trace_logging_info
+ * structure that contains the packet context
+ */
+inline
+void spp_trace_log_send_queued_pkt (spp_trace_log_info_t *trace_logging_info)
+{
+ spp_node_t *output_node;
+
+ output_node = spp_get_nodes() +
+ spp_trace_log_global_info.spp_trace_log_disp_node_index;
+
+ if (PREDICT_TRUE(output_node->sf.nused < SPP_MAXDISPATCH)) {
+ /*
+ * Move the logging context to output node
+ */
+ spp_dispatch_make_node_runnable(output_node);
+ output_node->sf.ctxs[output_node->sf.nused++] =
+ trace_logging_info->queued_logging_context;
+
+ /*
+ * Context has been queued, it will be freed after the pkt
+ * is sent. Clear this from the logging_context_info structure
+ */
+ trace_logging_info->queued_logging_context = NULL;
+
+ } else {
+ /*
+ * Can't do much, just return, may be we can send it later
+ */
+ spp_global_counters.spp_trace_log_downstream_constipation_count++;
+ }
+}
+
+/*
+ * edt: * * spp_trace_log_send_pkt
+ *
+ * Tries to send a logging pkt. If the packet cannot be sent
+ * because of rewrite_output node cannot process it, queue
+ * it temporarily and try to send it later.
+ *
+ * Argument: spp_trace_log_info_t *trace_logging_info
+ * structure that contains the packet context
+ */
+inline
+void spp_trace_log_send_pkt (spp_trace_log_info_t *trace_logging_info)
+{
+ spp_node_t *output_node;
+
+
+ output_node = spp_get_nodes() +
+ spp_trace_log_global_info.spp_trace_log_disp_node_index;
+
+ if (PREDICT_TRUE(output_node->sf.nused < SPP_MAXDISPATCH)) {
+ /*
+ * Move the logging context to output node
+ */
+ spp_dispatch_make_node_runnable(output_node);
+ output_node->sf.ctxs[output_node->sf.nused++] =
+ trace_logging_info->current_logging_context;
+
+ } else {
+ /*
+ * Queue the context into the logging_info structure,
+ * We will try to send it later. Currently, we will
+ * restrict to only one context queued.
+ */
+ spp_global_counters.spp_trace_log_downstream_constipation_count++;
+
+ /*
+ * Attach the current logging context which is full to the
+ * queued context list in trace_logging_info structure
+ */
+ trace_logging_info->queued_logging_context =
+ trace_logging_info->current_logging_context;
+
+ /*
+ * Whether the context is queued or not, set the current context index
+ * to EMPTY, as the earlier context can no more be used to send
+ * more logging records.
+ */
+ }
+
+ trace_logging_info->current_logging_context = NULL;
+}
+
+/*
+ * edt: * * spp_trace_log_send_pkt_always_success
+ *
+ * Tries to send a logging pkt. This cannot fail due to downstream
+ * constipation because we have already checked if the rewrite_output
+ * node can accept it.
+ *
+ * Argument: spp_trace_log_info_t *trace_logging_info
+ * structure that contains the packet context
+ *
+ * Argument: spp_node_t *output_node
+ * spp_node_t structure for rewrite_output node
+ */
+inline
+void spp_trace_log_send_pkt_always_success (
+ spp_trace_log_info_t *trace_logging_info,
+ spp_node_t *output_node)
+{
+ /*
+ * At this point we either have a current or queued logging context
+ */
+ if (PREDICT_TRUE(trace_logging_info->current_logging_context != NULL)) {
+
+ output_node->sf.ctxs[output_node->sf.nused++] =
+ trace_logging_info->current_logging_context;
+
+ trace_logging_info->current_logging_context = NULL;
+ } else {
+ /*
+ * For queued logging context
+ */
+ output_node->sf.ctxs[output_node->sf.nused++] =
+ trace_logging_info->queued_logging_context;
+
+ trace_logging_info->queued_logging_context = NULL;
+ }
+
+ /*
+ * Move the logging context to output node
+ */
+ spp_dispatch_make_node_runnable(output_node);
+
+}
+
+/*
+ * edt: * * spp_create_trace_log_context
+ *
+ * Tries to create a logging context with packet buffer
+ * to send a new logging packet
+ *
+ * Argument: spp_trace_log_info_t *trace_logging_info
+ * structure that contains the nfv9 logging info and will store
+ * the packet context as well.
+ */
+inline
+void spp_create_trace_log_context (
+ spp_trace_log_info_t *trace_logging_info)
+{
+ spp_ctx_t *ctx;
+
+ /*
+ * If queued_logging_context_index is non-EMPTY, we already have a logging
+ * packet queued to be sent. First try sending this before allocating
+ * a new context. We can have only one active packet context per
+ * trace_logging_info structure
+ */
+ if (PREDICT_FALSE(trace_logging_info->queued_logging_context != NULL)) {
+ spp_trace_log_send_queued_pkt(trace_logging_info);
+ /*
+ * If we cannot still send the queued pkt, just return
+ * Downstream Constipation count would have increased anyway
+ */
+ if (trace_logging_info->queued_logging_context != NULL) {
+ spp_global_counters.spp_trace_log_context_creation_deferred_count++;
+ return;
+ }
+ }
+
+
+ /*
+ * No context can be allocated, return silently
+ * calling routine will handle updating the error counters
+ */
+ if (spp_ctx_alloc(&ctx, 1) < 1) {
+ spp_global_counters.spp_trace_log_context_creation_fail_count++;
+ return;
+ }
+
+ trace_logging_info->current_logging_context = ctx;
+ trace_logging_info->pkt_length = 0;
+
+ trace_logging_info->current_logging_context_timestamp =
+ spp_trace_log_get_sys_up_time_in_ms();
+
+ ctx->flags = SPP_CTX_END_OF_PACKET;
+ ctx->ru.tx.from_node = NODE_TRACE_BACKUP;
+ ctx->ru.tx.dst_ip_port_idx = EXT_TRACE_BACKUP_INDEX;
+ ctx->next_ctx_this_packet = (spp_ctx_t*) SPP_CTX_NO_NEXT_CTX;
+ ctx->current_header = &ctx->packet_data[SPP_TRACE_LOG_HDR_OFFSET];
+ ctx->current_length = 0;
+
+ trace_logging_info->log_record = 0;
+ trace_logging_info->total_record_count = 0;
+ trace_logging_info->next_data_ptr =
+ (u8 *) &ctx->packet_data[SPP_TRACE_LOG_HDR_OFFSET];
+
+}
+
+/*
+ * edt: * * spp_trace_log_add_record_create
+ *
+ * Tries to create an add record to the NFV9 packet
+ *
+ * Argument: spp_trace_log_info_t *trace_logging_info
+ * structure that contains the nfv9 logging info and will store
+ * the packet context as well.
+ */
+inline
+void spp_trace_log_add_record_create (spp_trace_log_info_t *trace_logging_info)
+{
+
+ trace_logging_info->log_header =
+ (spp_trace_log_hdr_t *) (trace_logging_info->next_data_ptr);
+
+ /*
+ * Initialize the number of traces recorded
+ */
+ trace_logging_info->log_header->num_traces =
+ spp_host_to_net_byte_order_32(0);
+
+
+ trace_logging_info->log_record =
+ (spp_trace_log_t *) (trace_logging_info->log_header + 1);
+
+ /*
+ * Update the length of the total pkt
+ */
+ trace_logging_info->pkt_length +=
+ SPP_LOG_TRACE_HEADER_LENGTH;
+
+ /*
+ * Set the data pointer beyond the trace header field
+ */
+ trace_logging_info->next_data_ptr =
+ (u8 *) (trace_logging_info->log_header + 1);
+
+}
+
+/*
+ * edt: * * spp_trace_logger
+ *
+ * Tries to log spp/cnat event/errors
+ *
+ * Argument: u8 *error_code
+ * Error code passed
+ *
+ * Argument: optional arguments
+ */
+void spp_trace_logger (u16 error_code, u16 num_args, u32 *arg)
+{
+ spp_trace_log_info_t *trace_logging_info = 0;
+ u8 i;
+
+ trace_logging_info =
+ spp_trace_log_info_pool +
+ spp_trace_log_global_info.spp_log_pool_index[SPP_LOG_LTRACE];
+
+ if (PREDICT_FALSE(trace_logging_info->current_logging_context == NULL)) {
+ spp_create_trace_log_context(trace_logging_info);
+
+ /*
+ * If still empty, return after increasing the count
+ */
+ if (PREDICT_FALSE(trace_logging_info->current_logging_context == NULL)) {
+ return;
+ }
+ }
+
+ if (PREDICT_FALSE(trace_logging_info->log_record == NULL)) {
+ spp_trace_log_add_record_create(trace_logging_info);
+ }
+
+ /*
+ * We should definitely have add_record now, no need to sanitize
+ */
+ trace_logging_info->log_record->error_code =
+ spp_host_to_net_byte_order_16(error_code);
+ trace_logging_info->log_record->num_args =
+ spp_host_to_net_byte_order_16(num_args);
+
+ for (i = 0; i < num_args; i++) {
+ trace_logging_info->log_record->arg[i] =
+ spp_host_to_net_byte_order_32(*(arg + i));
+ }
+
+ trace_logging_info->pkt_length += SPP_TRACE_LOG_RECORD_LENGTH + WORD_SIZE*num_args;
+ trace_logging_info->current_logging_context->current_length =
+ trace_logging_info->pkt_length;
+ trace_logging_info->total_record_count += 1;
+
+ trace_logging_info->next_data_ptr =
+ (u8 *) (trace_logging_info->next_data_ptr + WORD_SIZE + WORD_SIZE*num_args);
+
+ trace_logging_info->log_record =
+ (spp_trace_log_t *) (trace_logging_info->next_data_ptr);
+
+ /*
+ * Initialize the number of traces recorded
+ */
+ trace_logging_info->log_header->num_traces =
+ spp_host_to_net_byte_order_32(trace_logging_info->total_record_count);
+
+
+
+ /*
+ * If we have exceeded the packet length, let us send the
+ * packet now. There is buffer of additional bytes beyond
+ * max_pkt_length to ensure that the last add/delete record
+ * can be stored safely.
+ */
+ if (trace_logging_info->pkt_length >
+ trace_logging_info->max_length_minus_max_record_size) {
+ spp_trace_log_send_pkt(trace_logging_info);
+ }
+}
+
+
+/*
+ * edt: * * spp_trace_log_timer_handler
+ *
+ * Timer handler for sending any pending NFV9 record
+ *
+ * Argument: spp_timer_t * timer_p
+ * Timer handler structure
+ */
+inline
+void spp_trace_log_timer_handler (spp_timer_t * timer_p)
+{
+ spp_node_t *output_node;
+ spp_trace_log_info_t *trace_logging_info = 0;
+ u32 current_timestamp = spp_trace_log_get_sys_up_time_in_ms();
+ i16 sf_nused;
+
+ output_node = spp_get_nodes() +
+ spp_trace_log_global_info.spp_trace_log_disp_node_index;
+
+ sf_nused = output_node->sf.nused;
+
+ pool_foreach (trace_logging_info, spp_trace_log_info_pool, ({
+ /*
+ * Check if no more logging contexts can be queued
+ */
+ if (PREDICT_FALSE(sf_nused >= SPP_MAXDISPATCH)) {
+ break;
+ }
+
+ /*
+ * If there is a current logging context and timestamp
+ * indicates it is pending for long, send it out
+ * Also if there is a queued context send it out as well
+ */
+ if (trace_logging_info->queued_logging_context ||
+ (trace_logging_info->current_logging_context &&
+ (current_timestamp -
+ trace_logging_info->current_logging_context_timestamp)
+ > 1000)) {
+ spp_trace_log_send_pkt_always_success(trace_logging_info,
+ output_node);
+ sf_nused++;
+ }
+ }));
+
+ timer_p->expires =
+ spp_timer_in_n_ms_inline(1000); /* every 1 sec */
+ spp_timer_start(timer_p);
+
+}
+inline
+void spp_sensor_timer_handler (spp_timer_t * timer_p)
+{
+#ifdef TARGET_RODDICK
+ if (!temperature_read_blocked) {
+ Init_temperature_sensors();
+ read_octeon_sensors(TEMPERATURE_SENSOR_QUIET_MODE);
+ }
+
+ timer_p->expires =
+ spp_timer_in_n_ms_inline(60000); /* every 1 sec */
+ spp_timer_start(timer_p);
+
+#endif
+}
+void init_trace_log_buf_pool (void)
+{
+ spp_trace_log_info_t *my_spp_log_info;
+ u8 found;
+ spp_log_type_t log_type;
+
+ /*
+ * Init SPP logging info as needed, this will be done only once
+ */
+ spp_trace_log_init();
+
+ found = 0;
+
+ for (log_type = SPP_LOG_LTRACE; log_type < SPP_LOG_MAX; log_type++ ) {
+ /* Do we already have a map for this log type? */
+ pool_foreach (my_spp_log_info, spp_trace_log_info_pool, ({
+ if (my_spp_log_info->log_type == log_type) {
+ found = 1;
+ break;
+ }
+ }));
+
+ /*
+ * Entry not present
+ */
+ if (!found) {
+ pool_get(spp_trace_log_info_pool, my_spp_log_info);
+ memset(my_spp_log_info, 0, sizeof(*my_spp_log_info));
+
+ /*
+ * Make the current and head logging context indeices as EMPTY.
+ * When first logging happens, these get set correctly
+ */
+ my_spp_log_info->current_logging_context = NULL;
+ my_spp_log_info->queued_logging_context = NULL;
+
+ my_spp_log_info->log_type = log_type;
+ my_spp_log_info->max_length_minus_max_record_size =
+ SPP_TRACE_LOG_MAX_PKT_LENGTH;
+
+ spp_trace_log_global_info.spp_log_pool_index[log_type] =
+ my_spp_log_info - spp_trace_log_info_pool;
+ }
+
+ }
+
+ return;
+}
+
+
+/*
+ * one time function
+ * has to be called at the init time
+ */
+void spp_trace_log_init (void)
+{
+ if (!spp_trace_log_global_info.spp_trace_log_init_done) {
+
+#ifdef TARGET_RODDICK
+ spp_trace_log_global_info.spp_trace_log_disp_node_index =
+ spp_lookup_node_index("roddick_infra_l3_tx");
+#elif defined(TARGET_BOOSTER)
+ spp_trace_log_global_info.spp_trace_log_disp_node_index =
+ spp_lookup_node_index("booster_infra_l3_tx");
+#endif
+ ASSERT(spp_trace_log_global_info.spp_trace_log_disp_node_index != (u16)~0);
+
+ spp_trace_log_global_info.log_timer.cb_index =
+ spp_timer_register_callback(spp_trace_log_timer_handler);
+ spp_trace_log_global_info.log_timer.expires =
+ spp_timer_in_n_ms_inline(1000); /* every 1 sec */
+ spp_timer_start(&spp_trace_log_global_info.log_timer);
+
+ if (!my_core_id) {
+ sensor_timer.cb_index =
+ spp_timer_register_callback(spp_sensor_timer_handler);
+ sensor_timer.expires =
+ spp_timer_in_n_ms_inline(60000); /* every 1 sec */
+ spp_timer_start(&sensor_timer);
+ }
+
+ spp_trace_log_global_info.spp_trace_log_init_done = 1;
+
+ /*
+ * Set MSC ip_addr, port values
+ */
+#ifdef TARGET_RODDICK
+ dst_ipv4_port_table[EXT_TRACE_BACKUP_INDEX].ipv4_address =
+ vpp_boot_params.msc_ip_address;
+ switch(vpp_boot_params.octeon_number) {
+ case 0:
+ dst_ipv4_port_table[EXT_TRACE_BACKUP_INDEX].port = 0x15BF;
+ break;
+ case 1:
+ dst_ipv4_port_table[EXT_TRACE_BACKUP_INDEX].port = 0x15BF;
+ break;
+ case 2:
+ dst_ipv4_port_table[EXT_TRACE_BACKUP_INDEX].port = 0x15BF;
+ break;
+ case 3:
+ dst_ipv4_port_table[EXT_TRACE_BACKUP_INDEX].port = 0x15BF;
+ break;
+ }
+#else
+ dst_ipv4_port_table[EXT_TRACE_BACKUP_INDEX].ipv4_address = 0x01020304;
+ dst_ipv4_port_table[EXT_TRACE_BACKUP_INDEX].port = 0x15BF;
+#endif
+
+ }
+}
+
+void spp_printf (u16 error_code, u16 num_args, u32 *arg)
+{
+ u32 current_timestamp;
+ spp_node_main_vector_t *nmv;
+
+ if (PREDICT_FALSE(error_code >= LOG_TABLE_MAX_ENTRIES))
+ {
+ /* printf("Error code invalid %d, %d, %d, %d\n",
+ error_code, LOG_TABLE_MAX_ENTRIES,
+ sizeof(spp_cnat_logger_table), sizeof(spp_cnat_logger_table[0]));
+ */
+ return; /* Should not happen */
+ }
+
+ nmv = spp_get_node_main_vectorized_inline();
+ current_timestamp = nmv->ticks / nmv->ticks_per_second;
+
+ /* Check if any further hashing is required */
+
+ if (PREDICT_FALSE(error_code == DUMP_PKT_IDX)) {
+#ifdef TARGET_RODDICK || defined(TARGET_BOOSTER)
+ spp_trace_logger(error_code, num_args, arg);
+#else
+ u8 j ;
+
+ printf("PKT DUMP :: ");
+ for (j = 0 ; j < num_args; j++) {
+ printf("0x%x ", arg[j]);
+ if (j == (num_args - 1)) {
+ printf("\n");
+ }
+ }
+#endif
+ } else if (PREDICT_TRUE((current_timestamp - error_code_timestamps[error_code]) >=
+ spp_cnat_logger_table[error_code].rate_limit_time)) {
+ /* update timestamp */
+ error_code_timestamps[error_code] = current_timestamp;
+
+#ifdef TARGET_RODDICK || defined(TARGET_BOOSTER)
+ spp_trace_logger(error_code, num_args, arg);
+#else
+ u8 j ;
+
+ for (j = 0 ; j < num_args; j++) {
+ printf("%s: %d ", spp_cnat_logger_table[error_code].param_name[j], arg[j]);
+ if (j == (num_args - 1)) {
+ printf("\n");
+ }
+ }
+#endif
+ }
+}
+
+#else /* TOBE_PORTEED */
+void spp_trace_logger(u16 error_code, u16 num_args, u32 *arg)
+{
+ /* To be filled */
+}
+
+void spp_trace_log_init(void)
+{
+ /* To be filled */
+}
+
+void init_trace_log_buf_pool(void)
+{
+ /* To be filled */
+}
+
+void spp_printf(u16 error_code, u16 num_args, u32 *arg)
+{
+ /* To be filled */
+}
+
+inline u32 spp_trace_log_get_unix_time_in_seconds (void)
+{
+ vlib_main_t *vlib_main;
+
+ vlib_main = vlib_get_main();
+ return(vlib_time_now((vlib_main_t *) vlib_main));
+}
+
+#endif /* TOBE_PORTED */
+
diff --git a/plugins/vcgn-plugin/vcgn/spp_platform_trace_log.h b/plugins/vcgn-plugin/vcgn/spp_platform_trace_log.h
new file mode 100644
index 00000000000..36da710f28c
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/spp_platform_trace_log.h
@@ -0,0 +1,358 @@
+/*
+ *------------------------------------------------------------------
+ * spp_platform_trace_log.h
+ *
+ * Copyright (c) 2009-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __SPP_PLATFORM_TRACE_LOG_H__
+#define __SPP_PLATFORM_TRACE_LOG_H__
+
+#include <stdio.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/bitmap.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/clib.h>
+
+#include "spp_ctx.h"
+#include "spp_timers.h"
+
+
+typedef enum {
+ SPP_LOG_LTRACE,
+ SPP_LOG_MAX
+} spp_log_type_t;
+
+typedef struct {
+ u32 num_traces;
+} spp_trace_log_hdr_t;
+
+typedef struct {
+ u16 error_code;
+ u16 num_args;
+ u32 arg[0];
+} spp_trace_log_t;
+
+#define DUMP_PKT_IDX 61
+#define OCTEON_SENSOR_READ 62
+
+typedef enum {
+ CNAT_ERROR_SUCCESS,
+ CNAT_NO_CONFIG_ERROR,
+ CNAT_NO_VRF_RUN_ERROR,
+ CNAT_NO_POOL_FOR_ANY_ERROR,
+ CNAT_NO_PORT_FOR_ANY_ERROR,
+ CNAT_BAD_INUSE_ANY_ERROR,
+ CNAT_NOT_FOUND_ANY_ERROR,
+ CNAT_INV_PORT_FOR_DIRECT_ERROR,
+ CNAT_BAD_INUSE_DIRECT_ERROR,
+ CNAT_NOT_FOUND_DIRECT_ERROR,
+ CNAT_OUT_OF_PORT_LIMIT_ERROR,
+ CNAT_MAIN_DB_CREATE_ERROR,
+ CNAT_LOOKUP_ERROR,
+ CNAT_INDEX_MISMATCH_ERROR,
+ CNAT_PACKET_DROP_ERROR,
+ CNAT_INV_UNUSED_USR_INDEX,
+ CNAT_INVALID_VRFMAP_INDEX,
+ CNAT_USER_OUT_OF_PORTS,
+ CNAT_EXT_PORT_THRESH_EXCEEDED,
+ CNAT_EXT_PORT_THRESH_NORMAL,
+ CNAT_NO_EXT_PORT_AVAILABLE,
+ CNAT_SESSION_THRESH_EXCEEDED,
+ CNAT_SESSION_THRESH_NORMAL,
+ WQE_ALLOCATION_ERROR,
+ ERROR_PKT_DROPPED,
+ SYSMGR_PD_KEY_CREATION_ERROR,
+ SYSMGR_PD_SHMEM_ID_ERROR,
+ SYSMGR_PD_SHMEM_ATTACH_ERROR,
+ OCTEON_CKHUM_SKIPPED,
+ PK0_SEND_STATUS,
+ CMD_BUF_ALLOC_ERR,
+ SPP_CTX_ALLOC_FAILED,
+ SPP_MAX_DISPATCH_REACHED,
+ HA_SIGCHILD_RECV,
+ SIGACTION_ERR,
+ HA_INVALID_SEQ_OR_CONFIG_OR_TYPE,
+ NODE_CREATION_ERROR,
+ CNAT_CLI_INVALID_INPUT, /* new adds as part of CSCto04510, see sub codes below */
+ CNAT_DUMMY_HANDLER_HIT, /* Has sub codes , see spp_dummy_handler_sub_cdes_t */
+ CNAT_CONFIG_ERROR, /* has subcodes-see spp_config_error_sub_codes_t below */
+ CNAT_NFV9_ERROR, /* Has sub codes see spp_nfv9_error_sub_codes_t below */
+ CNAT_CMVX_TWSI_READ_WRITE_FAIL, /* Hassub codes see spp_cmvx_error_sub_codes_t */
+ CNAT_TEMP_SENSOR_TIMEOUT,
+ CNAT_TEMP_SENSOR_DATA_MISMATCH,
+ CNAT_TEMP_SENSOR_CONFIG_FAILED,
+ HA_APP_NOT_RESPONDING,
+ HA_DATA_PATH_TEST_FAILED,
+ CNAT_WRONG_PORT_ALLOC_TYPE,
+ CNAT_NEW_PORT_ALLOC_ERROR,
+ CNAT_INVALID_INDEX_TO_FREE_PORT,
+ CNAT_DELETE_DB_ENTRY_NO_PORTMAP,
+ CNAT_MAIN_DB_LIMIT_ERROR,
+ CNAT_USER_DB_LIMIT_ERROR,
+ CNAT_FRAG_DB_ERROR, /* see spp_frag_db_error_sub_codes_t below */
+
+ DROP_PKT_DUMP,
+ CNAT_NAT64_SYSTEM_LIMIT_ERROR,
+ CNAT_ERROR_MAX
+} spp_error_codes_t;
+
+typedef enum {
+
+ TCP_MSS_INVALID_IVRF = 10, /* 1 param - vrf id */
+ NFV9_LOG_INVALID_IP_OR_PORT = 20, /* 2 params - nfv9 server ip and port */
+ NFV9_LOG_INVALID_PARAMS_OTHERS, /* 3 params, ref rate, time out, path mtu */
+ NFV9_LOG_PATH_MTU_TOO_SMALL, /* 1 param, path mtu passed */
+ NFV9_LOG_CANNOT_ADD_VRF_NOT_FOUND, /* 1 param, in vrf id */
+
+ VRF_MAP_ADDR_POOL_START_ADDR_GT_END_ADDR = 30, /* 2 params, start and end addr */
+ VRF_MAP_ADDR_POOL_ADDR_POOL_TOO_LARGE, /* 2 params, start and end addr */
+ VRF_MAP_ADDR_POOL_INVALID_IN_OR_OUT_VRF, /* 2 params, in vrf and out vrf */
+ VRF_MAP_ADDR_POOL_TOO_LARGE_FOR_CORE, /* 2 params, pool size, core instance */
+ VRF_MAP_DEL_POOL_START_ADDR_GT_END_ADDR, /* 2 params, start and end addr */
+ VRF_MAP_DEL_POOL_ADDR_POOL_NOT_FOUND, /* 2 params, start and end addr */
+ VRF_MAP_DEL_POOL_VRF_MAP_EMPTY, /* 2 params, start and end addr */
+
+ ADD_SVI_ADDR_INVALID_VRF = 40, /* 2 params, vrf passed and ipv4 addr */
+ ADD_SVI_INDEX_INVALID_VRF, /* 2 params, vrf, uidb_index */
+
+ MAPPED_STAT_PORT_INVALID_OUTPUT_PARAMS = 50,
+ /* 3 params, out vrf, out ip, out port */
+ MAPPED_STAT_PORT_UDP_PORT_POLARITY_MISMATCH, /* 2 params, in port and out port */
+ MAPPED_STAT_PORT_IN_VRF_MAP_EMPTY, /* 1 param, in vrf id passed */
+ MAPPED_STAT_PORT_VRF_MAP_NOT_IN_S_RUN, /* 1 param, vrf map status */
+ MAPPED_STAT_PORT_INVALID_OUT_VRF_ID, /* 1 param, out vrf id passed */
+ MAPPED_STAT_PORT_FAILED_TO_ADD_STAT_PORT, /* 4 params, in vrf, in ip, in port, error code */
+
+ STAT_PORT_INVALID_IN_PARAMS = 60, /* 4 params, in vrf, in ip, in port, proto */
+ STAT_PORT_FAILED_TO_ADD_STAT_PORT, /* 4 params, in vrf, in ip, in port, error code */
+ STAT_PORT_CONFIG_IN_USE, /* 4 params, in vrf, in ip, in port, proto */
+
+ DEL_STAT_PORT_IN_VRF_MAP_EMPTY = 70, /* 1 param, in vrf id passed */
+ DEL_STAT_PORT_INVALID_IN_PARAMS, /* 4 params, in vrf, in ip, in port, proto */
+ DEL_STAT_PORT_CANNOT_DELETE_NO_ENTRY, /* 4 params, in vrf, in ip, in port, proto */
+ DEL_STAT_PORT_CANNOT_DELETE_NOT_STATIC_PORT, /* 4 params, in vrf, in ip, in port, proto*/
+
+ XLAT_SVI_CFG_INVALID_INDEX = 80, /* 1 param - uidb_index */
+ XLAT_WRONG_V6_PREFIX_MASK, /* 1 param - v6 prefix mask */
+ XLAT_INVALID_XLAT_ID_ERROR, /* 1 param - id */
+
+ V6RD_INVALID_6RD_ID_ERROR = 90, /*1 param - id */
+ MAPE_INVALID_MAPE_ID_ERROR = 100 /* param - id */
+} spp_config_error_sub_codes_t;
+
+typedef enum {
+ CONFIG_DUMMY,
+ CONFIG_DUMMY_MAX,
+ SHOW_DUMMY,
+ SHOW_DUMMY_MAX,
+ DEBUG_DUMMY,
+ DEBUG_DUMMY_MAX
+} spp_dummy_handler_sub_cdes_t;
+
+typedef enum {
+ CMVX_READ,
+ CMVX_WRITE
+} spp_cmvx_error_sub_codes_t;
+
+typedef enum {
+ FRAG_DB_INVALID_BUCKET,
+ FRAG_DB_NO_ENTRY
+} spp_frag_db_error_sub_codes_t;
+
+typedef enum {
+ CLI_INVALID_PAYLOAD_SIZE,
+ CLI_INVALID_MSG_ID
+} spp_cli_error_sub_codes_t;
+
+typedef enum {
+ NFV9_DOWNSTREAM_CONGESTION,
+ NFV9_FAILED_TO_CREATE_CONTEXT
+} spp_nfv9_error_sub_codes_t;
+
+typedef struct spp_cnat_logger_tbl_t_ {
+ u16 error_code; // The thread id stored by software
+ u16 num_args;
+ u16 rate_limit_time; // If we need to rate_limit logging
+ u8 param_name[7][32];// Parameter name for debug purposes
+} spp_cnat_logger_tbl_t;
+
+extern spp_cnat_logger_tbl_t spp_cnat_logger_table[];
+
+/*
+ * This corresponds to the length of the IMETRO SHIM Header for RODDICK
+ * For non-roddick cases, introduce an Ethernet header as well
+ */
+#if defined(RODDICK)
+#define SPP_TRACE_LOG_SHIM_HDR_OFFSET 8
+#define SPP_TRACE_LOG_ENCAPS_OFFSET 0
+#else
+#define SPP_TRACE_LOG_SHIM_HDR_OFFSET 0
+#define SPP_TRACE_LOG_ENCAPS_OFFSET 16
+#endif
+
+#define SPP_LOG_TRACE_HEADER_LENGTH \
+ (sizeof(spp_trace_log_hdr_t))
+
+
+#define SPP_TRACE_LOG_IP_HDR_OFFSET \
+ (SPP_TRACE_LOG_ENCAPS_OFFSET + \
+ SPP_TRACE_LOG_SHIM_HDR_OFFSET)
+
+
+#define SPP_TRACE_LOG_UDP_HDR_OFFSET \
+ (SPP_TRACE_LOG_IP_HDR_OFFSET + sizeof(ipv4_header))
+
+#define SPP_TRACE_LOG_HDR_OFFSET \
+ (SPP_TRACE_LOG_UDP_HDR_OFFSET + sizeof(udp_hdr_type_t))
+
+#define SPP_TRACE_LOG_RECORD_LENGTH 4
+
+/*
+ * Let us put the maximum length of the log data to be 1400
+ */
+#define SPP_TRACE_LOG_MAX_PKT_LENGTH 800
+
+/* Structures and defines to store log info for MSC */
+#define SPP_TRACE_LOG_INVALID_LOGGING_INDEX 0xffffffff
+
+/*
+ * This structure stores the Logging information on per LOG TYPE
+ * basis. This structure is allocated from a pool and index
+ * to this structure based on log type
+ */
+typedef struct {
+ /*
+ * This field determines the maximum size of the Netflow V9 information
+ * that can be stored in a logging packet
+ */
+ u16 max_length_minus_max_record_size;
+
+ u32 sequence_num; /* Sequence number of the logging packet */
+ u32 last_pkt_sent_count;
+ u16 pkt_length; /* Length of the currently NFv9 information */
+ u16 log_record_length; /* Length of add record */
+ u16 total_record_length; /* number of trace records */
+ u16 total_record_count;
+ spp_log_type_t log_type;
+ /*
+ * current logging context
+ */
+ spp_ctx_t *current_logging_context;
+
+ /*
+ * Timestamp in UNIX seconds corresponding to when the current
+ * logging packet was created
+ */
+ u32 current_logging_context_timestamp;
+
+ /*
+ * Queued logging context waiting to be sent to the l3 infra node
+ */
+ spp_ctx_t *queued_logging_context;
+
+ /*
+ * Headers corresponding to various records in this
+ * current nfv9 logging context
+ */
+ spp_trace_log_t *log_record;
+ spp_trace_log_hdr_t *log_header;
+ u8 *next_data_ptr;
+
+} spp_trace_log_info_t;
+
+typedef struct {
+ /*
+ * spp_ctx_alloc() call failed
+ */
+ u64 spp_trace_log_context_creation_fail_count;
+
+ /*
+ * Cannot send the existing logging pkt, so cannot create
+ * any additional packets for logging purposes
+ */
+ u64 spp_trace_log_context_creation_deferred_count;
+
+ /*
+ * Cannot send the existing logging pkt due to cnat_rewrite_output
+ * superframe being full.
+ */
+ u64 spp_trace_log_downstream_constipation_count;
+} spp_global_counters_t;
+
+
+/*
+ * Global structure for SPP LOGS
+ */
+typedef struct {
+ /* A timer structure to periodically send log packets
+ * that have been waiting to be full for a long time. This will
+ * ensure event/error logs don't get delayed too much before they
+ * are sent to the MSC.
+ */
+ spp_timer_t log_timer;
+
+ /*
+ * Node index corresponding to the infra L3 output node
+ * to which the nfv9 logging node will send the packet
+ */
+ u16 spp_trace_log_disp_node_index;
+
+ /*
+ * Whether we have initialized the NFv9 information
+ */
+ u8 spp_trace_log_init_done;
+
+ /*
+ * pool index in global pool based on log type
+ */
+ u32 spp_log_pool_index[SPP_LOG_MAX];
+
+} spp_trace_log_global_info_t;
+
+
+extern spp_timer_t sensor_timer;
+extern spp_trace_log_info_t spp_default_trace_log_info;
+extern spp_trace_log_info_t *spp_trace_log_info_pool;
+
+extern spp_trace_log_global_info_t spp_trace_log_global_info;
+
+void spp_trace_logger(u16 error_code, u16 num_args, u32 *arg);
+void spp_trace_log_init(void);
+void init_trace_log_buf_pool(void);
+void spp_printf(u16 error_code, u16 num_args, u32 *arg);
+
+/*
+ * The following 2 functions are temporary hacks until
+ * we have RTC support from the PD nodes
+ */
+#if 0
+inline
+u32 spp_trace_log_get_sys_up_time_in_ms (void);
+#endif
+extern
+u32 spp_trace_log_get_unix_time_in_seconds (void);
+
+enum {
+ TEMPERATURE_SENSOR_TEST_MODE,
+ TEMPERATURE_SENSOR_QUIET_MODE,
+};
+
+extern int temperature_read_blocked;
+
+void read_octeon_sensors(u8 mode);
+void Init_temperature_sensors();
+#endif /* __SPP_PLATFORM_TRACE_LOG_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/spp_timers.h b/plugins/vcgn-plugin/vcgn/spp_timers.h
new file mode 100644
index 00000000000..afb0147b2ed
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/spp_timers.h
@@ -0,0 +1,139 @@
+/*
+ *------------------------------------------------------------------
+ * spp_timers.h
+ *
+ * Copyright (c) 2008-2009 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+#ifndef __SPP_TIMERS_H__
+#define __SPP_TIMERS_H__
+
+
+typedef struct d_list_el_ {
+ struct d_list_el_ *next;
+ struct d_list_el_ *prev;
+} d_list_el_t;
+
+/*
+ * d_list_init
+ */
+
+static inline void d_list_init (d_list_el_t *headp)
+{
+ headp->prev = headp->next = headp;
+}
+
+/*
+ * d_list_init - add at head of list
+ */
+
+static inline void d_list_add_head (d_list_el_t *headp,
+ d_list_el_t *elp)
+{
+ ASSERT(elp->prev == elp); /* multiple enqueue, BAD! */
+ ASSERT(elp->next == elp);
+
+ elp->next = headp->next;
+ headp->next = elp;
+ elp->prev = elp->next->prev;
+ elp->next->prev = elp;
+}
+
+/*
+ * d_list_add_tail - add element at tail of list
+ */
+static inline void d_list_add_tail (d_list_el_t *headp,
+ d_list_el_t *elp)
+{
+ ASSERT(elp->prev == elp); /* multiple enqueue, BAD! */
+ ASSERT(elp->next == elp);
+
+ headp = headp->prev;
+
+ elp->next = headp->next;
+ headp->next = elp;
+ elp->prev = elp->next->prev;
+ elp->next->prev = elp;
+}
+
+/*
+ * d_list_rem_head - removes first element from list
+ */
+static inline d_list_el_t *d_list_rem_head (d_list_el_t *headp)
+{
+ d_list_el_t *elp;
+
+ elp = headp->next;
+ if (elp == headp)
+ return (NULL);
+ headp->next = elp->next;
+ elp->next->prev = elp->prev;
+
+ elp->next = elp->prev = elp;
+ return (elp);
+}
+
+/*
+ * d_list_rem_elem - removes specific element from list.
+ */
+static inline void d_list_rem_elem (d_list_el_t *elp)
+{
+ d_list_el_t *headp;
+
+ headp = elp->prev;
+
+ headp->next = elp->next;
+ elp->next->prev = elp->prev;
+ elp->next = elp->prev = elp;
+}
+
+#define TIMER_BKTS_PER_WHEEL 128 /* power of 2, please */
+#define TIMER_NWHEELS 4
+
+typedef struct {
+ i32 curindex; /* current index for this wheel */
+ d_list_el_t *bkts; /* vector of bucket listheads */
+} spp_timer_wheel_t;
+
+
+typedef struct {
+ u64 next_run_ticks; /* Next time we expire timers */
+ spp_timer_wheel_t **wheels; /* pointers to wheels */
+} spp_timer_axle_t;
+
+
+typedef struct {
+ d_list_el_t el;
+ u16 cb_index;
+ u16 flags;
+ u64 expires;
+} spp_timer_t;
+
+#define SPP_TIMER_RUNNING 0x0001
+
+
+/*
+ * prototypes
+ */
+void spp_timer_set_ticks_per_ms(u64);
+void spp_timer_axle_init (spp_timer_axle_t *ta);
+void spp_timer_expire(spp_timer_axle_t *ta, u64 now);
+void spp_timer_final_init(void);
+
+void spp_timer_start(spp_timer_t *tp);
+void spp_timer_start_axle(spp_timer_axle_t *ta, spp_timer_t *tp);
+void spp_timer_stop(spp_timer_t *tp);
+u16 spp_timer_register_callback (void (*fp)(spp_timer_t *));
+
+#endif /* __SPP_TIMERS_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/tcp_header_definitions.h b/plugins/vcgn-plugin/vcgn/tcp_header_definitions.h
new file mode 100644
index 00000000000..02920bcc8ee
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/tcp_header_definitions.h
@@ -0,0 +1,1582 @@
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * Filename: tcp_header_definitions.h
+ *
+ * Description: Layer 2, 3, 4 definitions and header types
+ *
+ * Assumptions and Constraints:
+ *
+ * Copyright (c) 2012-2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *-----------------------------------------------------------------------------
+ */
+
+#ifndef __TCP_HEADER_DEFINITIONS_H__
+#define __TCP_HEADER_DEFINITIONS_H__
+
+/*
+ * A general list of Layer 3 protocols, used by many Layer 2 encaps.
+ *
+ * formerly:
+ * TYPE_IP TYPE_IP10MB
+ * TYPE_ARP TYPE_RFC826_ARP
+ * TYPE_RARP TYPE_REVERSE_ARP
+ * TYPE_MPLS TYPE_TAGSWITCH
+ */
+#define TYPE_IP 0x0800
+#define TYPE_IP_V6 0x86DD
+#define TYPE_ARP 0x0806
+#define TYPE_RARP 0x8035
+#define TYPE_MPLS 0x8847
+#define TYPE_CDP 0x2000
+#define TYPE_CGMP 0x2001
+#define TYPE_LACP 0x8808 /* 802.3ad */
+#define TYPE_CLNS 0xFEFE
+
+#define TYPE_PPPOE_SESSION 0x8864 /* PTA plus */
+#define TYPE_PPPOE_DISCOVERY 0x8863 /* PTA plus */
+
+/*
+ * for atm arp handling
+ */
+#define IN_ATM_ARP_BIT 0x0008
+
+/*
+ * The Layer 2 header structures.
+ */
+
+
+/*
+** HDLC
+*/
+
+typedef struct hdlc_hdr_type {
+ u16 addr;
+ u16 type;
+ u8 data[0];
+} hdlc_hdr_type;
+
+#define HDLC_ADDR_CMD 0x0F00
+#define HDLC_HDR_LEN 4
+#define HDLC_BROADCAST_BIT 31
+#define TYPE_KEEP 0x8035
+
+#define HDLC_CLNS (HDLC_ADDR_CMD<<16|TYPE_CLNS)
+#define HDLC_CDP (HDLC_ADDR_CMD<<16|TYPE_CDP)
+#define HDLC_MPLS (HDLC_ADDR_CMD<<16|TYPE_MPLS)
+#define HDLC_IP (HDLC_ADDR_CMD<<16|TYPE_IP)
+#define HDLC_IP_V6 (HDLC_ADDR_CMD<<16|TYPE_IP_V6)
+#define HDLC_KEEPALIVE_CMD (HDLC_ADDR_CMD<<16|TYPE_KEEP)
+
+/*
+** PPP
+*/
+
+typedef struct ppp_comp_hdr_type {
+ union {
+ u8 ppp_u8[4];
+ u16 ppp_u16[2];
+ u32 ppp_u32;
+ } ppp_comp_u;
+} ppp_comp_hdr_type;
+
+#define PPP_STATION 0xFF03
+#define PPP_STATION_LEN 0x2
+#define PPP_ENDPROTO 0x01
+#define PPP_NOT_ENDPROTO 0xfffffffe
+#define PPP_CONTROL_PROTOCOL_MASK 0x8000
+#define PPP_CONTROL_PROTOCOL_BIT 15
+#define PPP_CSCO_LEN 4
+#define PPP_RFC1661_LEN 2
+#define PPP_RFC1661_COMP_LEN 1
+
+#define TYPE_PPP_IP 0x0021
+#define TYPE_PPP_IP_V6 0x0057
+#define TYPE_PPP_MPLS_UNICAST 0x0281
+#define TYPE_PPP_MPLS_CONTROL 0x8281
+#define TYPE_PPP_CLNS 0x0023
+#define TYPE_PPP_CDP 0x0207
+
+#define TYPE_PPP_IPCP 0x8021
+#define TYPE_PPP_LCP 0xC021
+#define TYPE_PPP_PAP 0xC023
+#define TYPE_PPP_LQR 0xC025
+#define TYPE_PPP_CHAP 0xC223
+
+
+#define TYPE_PPP_LCP_ECHO_REQUEST 0x09
+/*
+** MultiLink PPP
+*/
+
+#define MLPPP_FLAGS_FIELD_LEN 4
+#define MLPPP_BEGIN_MASK 0x80000000
+#define MLPPP_END_MASK 0x40000000
+#define MLPPP_BEGIN_END_MASK (MLPPP_BEGIN_MASK|MLPPP_END_MASK)
+#define MLPPP_BEGIN_END_SHIFT 30
+#define MLPPP_SEQUENCE_NUM_MASK 0x00FFFFFF
+#define MLPPP_MC_CLASS_ID_MASK 0x3C000000
+#define MLPPP_MC_CLASS_SHIFT 26
+
+#define TYPE_PPP_MULTILINK 0x003D
+
+/* these are needed in the micro-code, for optimizations */
+#define TYPE_PPP_FULL_IP_4 0xff030021
+#define TYPE_PPP_FULL_IP_3 0xff0321
+#define TYPE_PPP_FULL_IP_2 0x0021
+#define TYPE_PPP_FULL_IP_1 0x21
+
+#define MLPPP_BEGIN_END_MASK_BYTE 0xC0
+#define MLPPP_BEGIN_BIT 7
+#define MLPPP_END_BIT 6
+#define MLPPP_MC_CLASS_ID_MASK_BYTE 0x3C
+#define MLPPP_MC_CLASS_ID_SHIFT_BYTE 2
+
+#define MLPOA_BEGIN_END_SHIFT 24
+
+/*
+** Ethernet ARPA
+*/
+
+
+typedef struct ethernet_arpa_hdr_type {
+ u8 daddr[6];
+ u8 saddr[6];
+ u16 type;
+ u8 data[0];
+} ethernet_arpa_hdr_type;
+
+typedef struct extension_802p3_type {
+ u16 type;
+ u8 ctl;
+ u8 data[0];
+} extension_802p3_type;
+
+typedef struct ethernet_802p3_hdr_type {
+ u8 daddr[6];
+ u8 saddr[6];
+ u16 length;
+ extension_802p3_type extension;
+} ethernet_802p3_hdr_type;
+
+
+typedef struct ethernet_vlan_802p3_hdr_type {
+ u8 daddr[6];
+ u8 saddr[6];
+ u16 type1;
+ u16 vlan_id;
+ u16 length;
+ extension_802p3_type extension;
+} ethernet_vlan_802p3_hdr_type;
+
+#define MIN_ETHERNET_PKT_LEN 60
+#define MAX_ETHERNET_PKT_LEN 1500
+#define ETHERNET_ARPA_HDR_LEN 14
+#define ETHERNET_TYPE_FIELD_SIZE 2
+
+
+/*
+** Ethernet 802.1q (VLAN)
+*/
+
+typedef struct ethernet_vlan_hdr_type {
+ u8 dest_addr[6];
+ u8 src_addr[6];
+ u16 type1;
+ u16 vlan_hdr;
+ u16 type2;
+ u8 data[0];
+} ethernet_vlan_hdr_type;
+
+
+/*
+** Ethernet 802.1.q-in-q (QinQ)
+*/
+
+typedef struct ethernet_qinq_hdr_type {
+ u8 dest_addr[6];
+ u8 src_addr[6];
+ u16 type1;
+ u16 vlan_hdr1;
+ u16 type2;
+ u16 vlan_hdr2;
+ u16 type3;
+ u8 data[0];
+} ethernet_qinq_hdr_type;
+
+
+/*
+** Ethernet 802.3ad EtherChannel control
+*/
+
+typedef struct ethernet_lacp_hdr_type {
+ u8 daddr[6];
+ u8 saddr[6];
+ u16 type;
+ u16 LAcmd;
+ u8 data[0];
+} ethernet_lacp_hdr_type;
+
+
+/*
+** Ethernet 802.1 Bridge (spanning tree) PDU
+*/
+
+typedef struct ethernet_bpdu_hdr_type {
+ u8 daddr[6];
+ u8 saddr[6];
+ u8 dsap;
+ u8 ssap;
+ u8 control;
+ u8 more[0];
+} ethernet_bpdu_hdr_type;
+
+#define ETH_BPDU_DSAP 0x42
+#define ETH_BPDU_SSAP 0x42
+#define ETH_BPDU_CONTROL 0x03
+#define ETH_BPDU_MATCH 0x424203
+
+
+/************************************************************/
+/* PTA PLUS ETHERNET ENCAPSULATIONS */
+/*
+ * PPPoEoARPA 20 bytes
+ */
+typedef struct ethernet_pppoe_arpa_hdr_type {
+ u8 daddr[6];
+ u8 saddr[6];
+ u16 type;
+ /* pppoe hdr at begining of enet payload */
+ u16 vtc; /* version(4b), type(4b) and code(8b) fields */
+ u16 sid;
+ u16 len;
+ u8 ppp_header[0]; /* PPP header start, no ff03 field present */
+} ethernet_pppoe_arpa_hdr_type;
+
+typedef struct pppoe_hdr_type {
+ /* pppoe hdr at begining of enet payload */
+ u16 vtc; /* version(4b), type(4b) and code(8b) fields */
+ u16 sid;
+ u16 len;
+ u8 ppp_header[0]; /* PPP header start, no ff03 field present */
+} pppoe_hdr_type;
+
+/*
+** PPPoEoVLAN (802.1p or 802.1q) 24 bytes
+*/
+typedef struct ethernet_pppoe_vlan_hdr_type {
+ u8 dest_addr[6];
+ u8 src_addr[6];
+ u16 type1;
+ u16 vlan_hdr;
+ u16 type2;
+ /* pppoe hdr at begining of enet payload */
+ u16 vtc; /* version(4b), type(4b) and code(8b) fields */
+ u16 sid;
+ u16 len;
+ u8 ppp_header[0]; /* PPP header start, no ff03 field present */
+} ethernet_pppoe_vlan_hdr_type;
+
+/*
+** PPPoEoQinQ 28 bytes
+*/
+typedef struct ethernet_pppoe_qinq_hdr_type {
+ u8 dest_addr[6];
+ u8 src_addr[6];
+ u16 type1;
+ u16 vlan_hdr1;
+ u16 type2;
+ u16 vlan_hdr2;
+ u16 type3;
+ /* pppoe hdr at begining of enet payload */
+ u16 vtc; /* version(4b), type(4b) and code(8b) fields */
+ u16 sid;
+ u16 len;
+ u8 ppp_header[0]; /* PPP header start, no ff03 field present */
+} ethernet_pppoe_qinq_hdr_type;
+
+#define ETH_PPPOE_ARPA_HDR_LEN sizeof(ethernet_pppoe_arpa_hdr_type)
+#define ETH_PPPOE_VLAN_HDR_LEN sizeof(ethernet_pppoe_vlan_hdr_type)
+#define ETH_PPPOE_QINQ_HDR_LEN sizeof(ethernet_pppoe_qinq_hdr_type)
+#define PPPOE_HDR_LEN 6
+/* End PTA PLUS ETHERNET ENCAPSULATIONS */
+/****************************************************************/
+
+
+
+#define TYPE_DOT1Q 0x8100
+#define DOT1Q_HDR_LEN 18
+#define DOT1Q_VLAN_ID_MASK 0x0FFF
+#define DOT1Q_VLAN_ID_RES_0 0x0000
+#define DOT1Q_VLAN_ID_RES_4095 0x0FFF
+#define DOT1Q_ARPA_INDEX DOT1Q_VLAN_ID_RES_0
+
+#define TYPE_QINQ_91 0x9100
+#define TYPE_QINQ_92 0x9200
+#define TYPE_QINQ_88A8 0x88A8
+#define QINQ_HDR_LEN 22
+
+/*
+ * 802.1p support
+ */
+#define DOT1P_VLAN_COS_MASK 0xE000
+#define DOT1P_VLAN_COS_SHIFT 13
+#define DOT1P_MAX_COS_VALUE 7
+
+/*
+** Frame Relay
+*/
+
+/*
+ * formerly:
+ * TYPE_FR_IETF_IPV4 ENCAPS_FR_IETF
+ * TYPE_FR_CISCO_IPV4 ENCAPS_FR_CISCO
+ * TYPE_FR_ISIS ENCAPS_FR_ISIS
+ *
+ * FR_LMI_DLCI_CISCO LMI_DLCI_CISCO
+ * FR_LMI_DLCI_IETF LMI_DLCI_ITUANSI
+ */
+
+typedef struct frame_relay_hdr_type {
+ u16 address;
+ u16 control_nlpid;
+ u8 data[0];
+} frame_relay_hdr_type;
+
+typedef struct fr_snap_hdr_type {
+ u16 address;
+ u8 control;
+ u8 pad;
+ u8 nlpid;
+ u8 oui[3];
+ u16 protocol_id;
+} fr_snap_hdr_type;
+
+#define FR_ADDR_LEN 2
+#define FR_CTL_NLPID_LEN 2
+#define FR_HDR_LEN (FR_ADDR_LEN+FR_CTL_NLPID_LEN)
+
+/*
+ * These defines are for the FR-SNAP header.
+ * The SNAP header is set up solely so that we can
+ * identify ARP packets, which look like this:
+ *
+ * control pad nlpid oui protocol_id
+ * 03 00 80 00 00 00 0806
+ */
+#define FR_ARP_CONTROL 0x03
+#define FR_ARP_PAD 0x00
+#define FR_ARP_NLPID 0x80
+#define FR_ARP_OUI_0 0x00
+#define FR_ARP_OUI_1 0x00
+#define FR_ARP_OUI_2 0x00
+/*
+ * these are used only in the tmc code
+ */
+#define FR_NLPID_OUI_LEN 4
+#define FR_ARP_CONTROL_PAD 0x0300
+#define FR_ARP_NLPID_OUI 0x80000000
+
+
+#define FR_DLCI_UPPER_MASK 0xFC00
+#define FR_DLCI_UPPER_SHIFT 6
+#define FR_DLCI_LOWER_MASK 0x00F0
+#define FR_DLCI_LOWER_SHIFT 4
+
+/*
+ * Defines for converting a DLCI for insertion into a synthesized FR address
+ * field for FRoMPLS disposition.
+
+ * bit 8 7 6 5 4 3 2 1
+ * +-------------------------------+
+ * | Flag |
+ * | 0 1 1 1 1 1 1 0 |
+ * +-------------------------------+
+ * | Upper DLCI |C/R| 0 |
+ * +-------------------------------+
+ * | Lower DLCI | F | B | DE| 1 |
+ * +-------------------------------+
+ * | |
+ * :Frame relay information field :
+ * : (i.e.payload) :
+ * | |
+ * +-------------------------------+
+ * | FCS (2 or 4 octets) |
+ * | |
+ * +-------------------------------+
+ * | Flag |
+ * | 0 1 1 1 1 1 1 0 |
+ * +-------------------------------+
+ *
+ * a-With 10 bits for the DLCI
+ */
+#define FR_DLCI_TO_HDR_UPPER_MASK 0x3f0
+#define FR_DLCI_TO_HDR_UPPER_SHIFT (10-4)
+#define FR_DLCI_TO_HDR_LOWER_MASK 0xf
+#define FR_DLCI_TO_HDR_LOWER_SHIFT 4
+
+#define TYPE_FR_IETF_IP 0x03CC
+#define TYPE_FR_IETF_IP_V6 0x038E
+#define TYPE_FR_CISCO_IP 0x0800
+#define TYPE_FR_CISCO_IP_V6 0x86DD
+#define TYPE_FR_ISIS 0x0383
+#define TYPE_FR_SNAP0PAD 0x0380
+#define TYPE_FR_SNAP1PAD 0x0300
+#define TYPE_FR_FRF12 0x03B1
+#define TYPE_FR_MLP 0x03CF
+#define TYPE_FR_EEK 0x8037
+
+#define FR_LMI_DLCI_CISCO 1023
+#define FR_LMI_DLCI_IETF 0
+
+#define FR_NOT_NOT_NOT 0
+#define FR_NOT_NOT_DE 1
+#define FR_NOT_BECN_NOT 2
+#define FR_NOT_BECN_DE 3
+#define FR_FECN_NOT_NOT 4
+#define FR_FECN_NOT_DE 5
+#define FR_FECN_BECN_NOT 6
+#define FR_FECN_BECN_DE 7
+
+#define FR_FECN_BECN_DE_MASK 0x000E
+#define FR_FECN_BECN_DE_SHIFT 1
+
+/* Address field extension bit for standard 2-byte FR address field */
+#define FR_EA1_MASK 0x0001
+#define FR_EA1_MASK_BIT 0
+
+/*
+ * these are needed in the micro-code, for optimizations
+ */
+
+/* the bit position (in the address field) of the LSB of the DLCI */
+#define FR_DLCI_LS_BIT 4
+
+
+/*
+**
+** MultiLink Frame Relay
+**
+*/
+
+typedef struct mlfr_hdr_type {
+ u16 frag_hdr;
+ u16 address;
+ u16 control_nlpid;
+ u8 data[0];
+} mlfr_hdr_type;
+
+/*
+ * LIP frames have B, E and C set--the other
+ * bits in the frag_hdr field are irrelevant.
+ *
+ * NOTE: Injected LIP packets have a frag_hdr of 0xE100.
+ *
+ */
+#define MLFR_LIP_FRAME 0xE100
+#define MLFR_LIP_MASK 0xE000
+#define MLFR_FRAG_HDR_LEN 2
+
+#define MLFR_BEGIN_MASK 0x8000
+#define MLFR_END_MASK 0x4000
+#define MLFR_BEGIN_END_MASK (MLFR_BEGIN_MASK|MLFR_END_MASK)
+#define MLFR_BEGIN_END_SHIFT 14
+
+#define MLFR_SEQ_NUM_HI_MASK 0x1E00
+#define MLFR_SEQ_NUM_HI_SHIFT 1
+#define MLFR_SEQ_NUM_LO_MASK 0x00FF
+
+/*
+ * these are needed in the micro-code, for optimizations
+ */
+#define MLFR_BEGIN_END_MASK_BYTE 0xC0
+
+
+/*
+ * FRF.12 definitions
+ */
+typedef struct frf12_hdr_type_ {
+ u16 address;
+ u16 control_nlpid;
+ u16 frag_hdr;
+ u8 data[0];
+} frf12_hdr_type;
+
+#define FRF12_FRAG_HDR_LEN sizeof(frf12_hdr_type)
+
+#define FRF12_BEGIN_MASK 0x8000
+#define FRF12_END_MASK 0x4000
+#define FRF12_BEGIN_END_MASK (FRF12_BEGIN_MASK|FRF12_END_MASK)
+#define FRF12_BEGIN_END_SHIFT 8
+
+#define FRF12_SEQ_NUM_HI_MASK 0x1E00
+#define FRF12_SEQ_NUM_HI_SHIFT 1
+#define FRF12_SEQ_NUM_LO_MASK 0x00FF
+#define FRF12_BEGIN_END_MASK_BYTE 0xC0
+
+
+
+/*
+**
+** MLP over Frame Relay
+** The ppp hdr can be either a
+** an MLP hdr or a PPP hdr
+**
+** MLP can be compressed or not:
+** a) 0xff03003d
+** b) 0x003d
+** c) 0x3d
+** followed by:
+** 1 byte with begin/end bits
+** 3 bytes of a sequence #
+**
+** PPP can be also be compressed or not.
+** Only these will be fwded:
+** a) 0xff030021
+** b) 0xff0321
+** c) 0x0021
+** d) 0x21
+**
+**
+*/
+typedef struct mlpofr_hdr_type {
+ u16 address;
+ u16 control_nlpid;
+ u8 ppp_header[0];
+} mlpofr_hdr_type;
+
+/*
+** ATM -
+*/
+
+/*
+ * channel_handle is defined as follows:
+ *
+ * bits 15 = reserved (must be 0)
+ * bits 14 - 0 = channel handle
+ *
+ *
+ * flags is a bitfield defined as follows:
+ *
+ * bits 15 - 13 = proto (PPPoA RFC1661 = 0,
+ * PPPoE = 1,
+ * RBE = 2,
+ * PPPoA Cisco = 3,
+ * MLPoATM RFC1661 = 4,
+ * MLPoATM Cisco = 5,
+ * Reserved = 6-7)
+ * bit 12 = encap (MUX=0,
+ * SNAP=1)
+ * bits 11 - 6 = reserved (must be 0)
+ * bits 5 - 3 = pkt_type (AAL5 pkt = 0,
+ * Raw cell (includes F4 OAM) = 1,
+ * F5 segment OAM cell = 2
+ * F5 end-to-end OAM cell = 3
+ * Reserved = 4-7)
+ * bit 2 = EFCI (congestion indication)
+ * bit 1 = reserved (must be 0)
+ * bit 0 = CLP (cell loss priority)
+ */
+
+typedef struct apollo_atm_generic_hdr_type {
+ u16 channel_handle;
+ u16 flags;
+} apollo_atm_generic_hdr_type;
+
+typedef struct apollo_atm_aal5_snap_hdr_type {
+ u16 channel_handle;
+ u16 flags;
+ u8 dsap;
+ u8 ssap;
+ u8 control;
+ u8 oui[3];
+ u16 type;
+ u8 data[0];
+} apollo_atm_aal5_snap_hdr_type;
+
+typedef struct atm_aal5_snap_hdr_type {
+ u8 dsap;
+ u8 ssap;
+ u8 control;
+ u8 oui[3];
+ u16 pid;
+ u16 pad;
+ u8 data[0];
+} atm_aal5_snap_hdr_type;
+
+
+typedef struct apollo_atm_aal5_snap_hdr1_type {
+ u16 channel_handle;
+ u16 flags;
+ u8 dsap;
+ u8 ssap;
+ u8 control;
+ u8 oui0;
+ u8 oui1;
+ u8 oui2;
+ u16 type;
+ u8 data[0];
+} apollo_atm_aal5_snap_hdr1_type;
+
+typedef struct apollo_atm_aal5_clns_hdr_type {
+ u16 channel_handle;
+ u16 flags;
+ u16 type;
+ u16 data;
+} apollo_atm_aal5_clns_hdr_type;
+
+typedef struct apollo_atm_aal5_ilmi_hdr_type {
+ u16 channel_handle;
+ u16 flags;
+ u8 data[0];
+} apollo_atm_aal5_ilmi_hdr_type;
+
+typedef struct apollo_atm_aal5_mux_hdr_type {
+ u16 channel_handle;
+ u16 flags;
+ u8 data[0];
+} apollo_atm_aal5_mux_hdr_type;
+
+typedef struct apollo_atm_oam_f4_hdr_type {
+ u16 channel_handle;
+ u16 flags;
+ /*
+ * gcf_vpi_vci_pt_clp is a bitfield defined as follows:
+ *
+ * bits 31 - 28 = GCF
+ * bits 27 - 20 = VPI
+ * bits 19 - 4 = VCI
+ * bits 3 - 1 = PT
+ * bit 0 = CLP
+ */
+ u32 gcf_vpi_vci_pt_clp;
+ u8 data[0];
+} apollo_atm_oam_f4_hdr_type;
+
+#define APOLLO_ATM_OAM_F4_HDR_PT_MASK 0xE
+#define APOLLO_ATM_OAM_F4_HDR_PT_SHIFT 1
+
+typedef struct apollo_atm_oam_f5_hdr_type {
+ u16 channel_handle;
+ u16 flags;
+ u8 data[0];
+} apollo_atm_oam_f5_hdr_type;
+
+#define APOLLO_IRONBUS_EXT_LESS_PROTO 0xFFFF0FFF
+#define APOLLO_CHANNEL_HANDLE_MASK 0xFFFF
+#define APOLLO_PKT_TYPE_MASK 0x0038
+#define APOLLO_PKT_TYPE_SHIFT 3
+#define APOLLO_FLAG_CLP_MASK 0x0001
+#define APOLLO_FLAG_CLP_BIT 0
+
+#define APOLLO_CHANNEL_HANDLE_RES_0 0x0000
+/*
+ * The 1 byte HEC field is removed by the line card.
+ */
+#define APOLLO_F4_RX_CELL_SIZE 52
+#define APOLLO_F5_RX_CELL_SIZE 52
+
+#define APOLLO_ATM_PACKET_TYPE_AAL5 0
+#define APOLLO_ATM_PACKET_TYPE_F4 1
+#define APOLLO_ATM_PACKET_TYPE_F5_SEG 2
+#define APOLLO_ATM_PACKET_TYPE_F5_E_TO_E 3
+#define APOLLO_ATM_PACKET_TYPE_4 4
+#define APOLLO_ATM_PACKET_TYPE_5 5
+#define APOLLO_ATM_PACKET_TYPE_6 6
+#define APOLLO_ATM_PACKET_RESERVED 7
+
+#define APOLLO_AAL5_MUX_IP_HDR_LEN 4
+#define APOLLO_AAL5_SNAP_HDR_LEN 12
+
+#define APOLLO_RCV_IRON_BUS_EXT_LEN 4
+#define APOLLO_TX_IRON_BUS_EXT_LEN 8
+
+/*
+ * MLPoA type definitions
+ */
+#define MLPOA_CISCO_HDR 0xFF03
+#define MLPOA_SNAP_HDR_LEN 4
+#define MLPOA_CISCO_HDR_LEN 2
+
+/************************************************************/
+/* PTA PLUS ATM ENCAPSULATIONS */
+
+/* RBE header 28 bytes*/
+typedef struct apollo_atm_aal5_llcsnap_rbe_hdr_type {
+ u16 channel_handle;
+ u16 flags;
+ u8 dsap;
+ u8 ssap;
+ u8 control;
+ u8 oui[3];
+ u16 pid;
+ u16 pad;
+ /* enet header within */
+ u8 daddr[6];
+ u8 saddr[6];
+ u16 type;
+ u8 data[0]; /* start of IP */
+} apollo_atm_aal5_llcsnap_rbe_hdr_type;
+
+/* PPPoEoA header 34 bytes*/
+typedef struct apollo_atm_aal5_llcsnap_pppoe_hdr_type {
+ u16 channel_handle;
+ u16 flags;
+ u8 dsap;
+ u8 ssap;
+ u8 control;
+ u8 oui[3];
+ u16 pid;
+ u16 pad;
+ /* enet header within */
+ u8 daddr[6];
+ u8 saddr[6];
+ u16 type;
+ /* pppoe hdr at begining of enet payload */
+ u16 vtc; /* version(4b), type(4b) and code(8b) fields */
+ u16 sid;
+ u16 len;
+ u8 ppp_header[0]; /* PPP header start, no ff03 field present */
+} apollo_atm_aal5_llcsnap_pppoe_hdr_type;
+
+
+/* PPPoA MUX 4 bytes*/
+typedef struct apollo_atm_aal5_mux_pppoa_hdr_type {
+ u16 channel_handle;
+ u16 flags;
+ u8 ppp_header[0];
+} apollo_atm_aal5_mux_pppoa_hdr_type;
+
+
+/* PPPoA SNAP LLC 8 bytes */
+typedef struct apollo_atm_aal5_llcsnap_pppoa_hdr_type {
+ u16 channel_handle;
+ u16 flags;
+ u8 dsap;
+ u8 ssap;
+ u8 control;
+ u8 nlpid;
+ u8 ppp_header[0];
+} apollo_atm_aal5_llcsnap_pppoa_hdr_type;
+
+/* MLPoA MUX (generic) */
+typedef struct apollo_atm_aal5_mux_mlpoa_hdr_type {
+ u16 channel_handle;
+ u16 flags;
+ u8 ppp_header[0];
+} apollo_atm_aal5_mux_mlpoa_hdr_type;
+
+/* MLPoA SNAP LLC */
+typedef struct apollo_atm_aal5_llcsnap_mlpoa_hdr_type {
+ u16 channel_handle;
+ u16 flags;
+ u8 dsap;
+ u8 ssap;
+ u8 control;
+ u8 nlpid;
+ u8 ppp_header[0];
+} apollo_atm_aal5_llcsnap_mlpoa_hdr_type;
+
+
+#define PPPOA_SNAPLLC_HDR_LEN sizeof(apollo_atm_aal5_llcsnap_pppoa_hdr_type)
+#define PPPOA_MUX_HDR_LEN sizeof(apollo_atm_aal5_mux_pppoa_hdr_type)
+#define PPPOE_SNAPLLC_HDR_LEN sizeof(apollo_atm_aal5_llcsnap_pppoe_hdr_type)
+#define RBE_SNAPLLC_HDR_LEN sizeof(apollo_atm_aal5_llcsnap_rbe_hdr_type)
+
+/* End PTA PLUS ATM ENCAPSULATIONS */
+/****************************************************************/
+
+#define LLCSNAP_PID_DOT3_NOFCS 0x0007
+
+/*
+** the SNAP header
+*/
+
+/*
+ * Note that some of these definitions are split
+ * up along certain word or half word boundaries
+ * to help expediate the TMC code.
+ */
+#define LLC_SNAP_HDR_DSAP 0xAA
+#define LLC_SNAP_HDR_SSAP 0xAA
+#define LLC_SNAP_HDR_CONTROL 0x03
+#define LLC_SNAP_HDR_OUI_0 0x00
+#define LLC_SNAP_HDR_OUI_1 0x00
+#define LLC_SNAP_HDR_OUI_2 0x00
+#define LLC_SNAP_HDR_OUI_2_CDP 0x0C
+
+#define LLC_SNAP_HDR_DSAP_SSAP 0xAAAA
+#define LLC_SNAP_HDR_DSAP_SSAP_CTRL_OUI0 0xAAAA0300
+#define LLC_SNAP_HDR_CONTROL_OUI 0x03000000
+#define LLC_SNAP_HDR_OUI1_OUI2_CDP 0x000C2000
+
+
+
+/*
+** SRP
+*/
+
+/*
+ * The v2_gen_hdr is a 2-byte field that contains the following:
+ *
+ * [ ttl | ring_id | mode | priority | parity ]
+ * bits 8 1 3 3 1
+ */
+typedef struct srp_hdr_type {
+ u16 v2_gen_hdr;
+ u8 dest_addr[6];
+ u8 src_addr[6];
+ u16 protocol;
+ u8 data[0];
+} srp_hdr_type;
+
+#define SRP_HDR_LEN 16
+
+#define SRP_IB_CHANNEL_CONTROL 0x0000
+#define SRP_IB_CHANNEL_DATA_HI 0x0001
+#define SRP_IB_CHANNEL_DATA_LO 0x0002
+
+#define SRP_RING_ID_MASK 0x0080
+#define SRP_RING_ID_BIT 7
+
+#define SRP_MODE_BITS_MASK 0x0070
+#define SRP_MODE_BITS_SHIFT 4
+#define SRP_MODE_CONTROL_TOPOLOGY 4
+#define SRP_MODE_CONTROL_IPS 5
+#define SRP_MODE_DATA 7
+
+#define SRP_PRIORITY_BITS_MASK 0x000E
+#define SRP_PRIORITY_BITS_SHIFT 1
+#define SRP_PRIORITY_HIGH 7
+#define SRP_PRIORITY_PAK_PRIORITY 6
+
+/* this is for the tmc code */
+#define SRP_INV_PRIORITY_BITS_MASK 0xFFF1
+
+#define SRP_PROT_CONTROL_TOPOLOGY 0x2007
+#define SRP_PROT_CONTROL_IPS 0x2007
+
+/* this is for the tmc code */
+#define SRP_TRUE 1
+#define SRP_FALSE 0
+
+/*
+** MPLS
+*/
+#define MPLS_EOS_BIT 0x00000100
+#define MPLS_EOS_SHIFT 8
+#define MPLS_LABEL_SIZE 4
+#define MAX_MPLS_LABEL_STACK 6
+#define MPLS_LABEL_MASK 0xfffff000
+#define MPLS_LABEL_SHIFT 12
+#define MPLS_TTL_MASK 0x000000ff
+#define MPLS_EXP_MASK 0x00000e00
+#define MPLS_EXP_SHIFT 9
+#define MPLS_EXP_TTL_MASK 0x00000eff
+
+
+
+typedef union _layer2 {
+ hdlc_hdr_type hdlc;
+ ppp_comp_hdr_type ppp;
+ ethernet_arpa_hdr_type eth_arpa;
+ ethernet_vlan_hdr_type eth_vlan;
+ ethernet_qinq_hdr_type eth_qinq;
+ ethernet_lacp_hdr_type eth_lacp;
+ ethernet_bpdu_hdr_type eth_bpdu;
+ ethernet_802p3_hdr_type eth_802p3;
+ ethernet_vlan_802p3_hdr_type eth_vlan_802p3;
+ ethernet_pppoe_arpa_hdr_type eth_pppoe_arpa; /* PTA plus */
+ ethernet_pppoe_vlan_hdr_type eth_pppoe_vlan; /* PTA plus */
+ ethernet_pppoe_qinq_hdr_type eth_pppoe_qinq; /* PTA plus */
+ frame_relay_hdr_type frame_relay;
+ fr_snap_hdr_type fr_snap;
+ mlfr_hdr_type mlfr;
+ mlpofr_hdr_type mlpofr;
+ frf12_hdr_type frf12;
+ apollo_atm_generic_hdr_type atm_generic;
+ apollo_atm_aal5_snap_hdr_type atm_aal5_snap;
+ apollo_atm_aal5_snap_hdr1_type atm_aal5_snap1;
+ apollo_atm_aal5_clns_hdr_type atm_aal5_clns;
+ apollo_atm_aal5_ilmi_hdr_type atm_aal5_ilmi;
+ apollo_atm_aal5_mux_hdr_type atm_aal5_mux;
+ apollo_atm_oam_f4_hdr_type atm_oam_f4;
+ apollo_atm_oam_f5_hdr_type atm_oam_f5;
+ apollo_atm_aal5_llcsnap_rbe_hdr_type atm_aal5_rbe_snapllc; /* PTA plus */
+ apollo_atm_aal5_llcsnap_pppoe_hdr_type atm_aal5_pppoe_snapllc; /* PTA plus */
+ apollo_atm_aal5_mux_pppoa_hdr_type atm_aal5_pppoa_mux; /* PTA plus */
+ apollo_atm_aal5_llcsnap_pppoa_hdr_type atm_aal5_pppoa_snapllc; /* PTA plus */
+ apollo_atm_aal5_mux_mlpoa_hdr_type mlpoa_generic;
+ apollo_atm_aal5_llcsnap_mlpoa_hdr_type mlpoa_snapllc;
+ srp_hdr_type srp;
+} layer2_t;
+
+/*
+ * Define the Common OAM cell format - F4 & F5 cells
+ * For F4 cells:
+ * VPI == User VPI
+ * VCI == (3 == Segment), (4 == End-to-End)
+ *
+ * For F5 cells:
+ * VPI == User VPI
+ * VCI == User VCI
+ * PT == (100 == Segment, 101 == End-to-End)
+ *
+ * OAM Cell Type & Function Type:
+ *
+ * OAM_TYPE = (0001 == Fault management)
+ * OAM_FUNC == (0000 == AIS, 0001 == RDI, 0100 == CC,
+ * 1000 == loopback)
+ *
+ * OAM_TYPE = (0010 == Performance management)
+ * OAM_FUNC == (0000 == Forward Monitoring(FM),
+ * 0001 == Backward monitoring(BR),
+ * 0010 == Monitoring & reporting (FM+BR))
+ *
+ * OAM_TYPE = (1000 == Activation/Deactivation)
+ * OAM_FUNC == (0000 == Performance Monitoring,
+ * 0001 == Continuity Check)
+ *
+ * OAM_TYPE = (1111 == Sytem Management)
+ * OAM_FUNC == (0001 == Security - non-real-time,
+ * 0010 == Security - real-time)
+ *
+ */
+#define ATM_OAM_FAULT_MGMT 0x1 /* OAM Fault mgmt. code */
+#define ATM_OAM_PRFRM_MGMT 0x2 /* performance mgmt code */
+#define ATM_OAM_ACT_DEACT 0x8 /* OAM Activation/Deactivation
+ code */
+#define ATM_OAM_SYSTEM_MGMT 0xF /* System Management code */
+
+#define ATM_OAM_AIS_FUNC 0x0 /* AIS function type */
+#define ATM_OAM_RDI_FUNC 0x1 /* RDI function type */
+#define ATM_OAM_CC_FUNC 0x4 /* OAM CC FM function code */
+#define ATM_OAM_LOOP_FUNC 0x8 /* Loopback function type */
+
+#define ATM_OAM_F5_SEGMENT 0x4 /* Segment function */
+#define ATM_OAM_F5_ENDTOEND 0x5 /* End-to-End function */
+#define ATM_OAM_F4_SEGMENT 0x3 /* Segment function */
+#define ATM_OAM_F4_ENDTOEND 0x4 /* End-to-End function */
+#define ATM_OAM_F4_PTI_ZERO 0x0 /* PTI=0 for F4 OAM */
+
+typedef struct atm_oam_hdr_t_ {
+ unsigned oam_gfc:4; /* GFC */
+ unsigned oam_vpi:8; /* VPI */
+ unsigned oam_vci_ms:4; /* VCI (Most Significant Bits) */
+
+ unsigned oam_vci_ls:12; /* VCI (Least Significant Bits) */
+ unsigned oam_pt:3; /* Payload Type */
+ unsigned oam_clp:1; /* Cell Loss Priority */
+ u8 data[0];
+} atm_oam_hdr_t;
+
+typedef struct atm_oam_type_func_t_ {
+ u8 oam_type:4;
+ u8 oam_func:4;
+ u8 data[0];
+} atm_oam_type_func_t;
+
+/*
+** IP Version 4 header
+*/
+
+/*
+ * version_hdr_len_words [7-4] IP Header Version
+ * [3-0] IP Header Length in 32-bit words
+ * tos Type of Service
+ * total_len_bytes Total IP datagram length in bytes
+ * (including IP header)
+ * identification Unique fragmentation identifier
+ * frag_flags_offset [15-13] Fragmentation flags
+ * [12-0] Fragmentation Offset
+ * ttl Time To Live
+ * protocol_id Protocol Identifier
+ * checksum 16-bit 1's complement IP Header checksum
+ * src_addr IP Source Address
+ * dest_addr IP Destination Address
+ */
+typedef struct ipv4_header {
+ u8 version_hdr_len_words;
+ u8 tos;
+ u16 total_len_bytes;
+ u16 identification;
+ u16 frag_flags_offset;
+ u8 ttl;
+ u8 protocol;
+ u16 checksum;
+ u32 src_addr;
+ u32 dest_addr;
+ u8 data[0];
+} ipv4_header;
+
+/*OPTIONS PACKET TYPE
+ * +-+-+-+-+-+-+-+-+
+ * |C| CL| OP |
+ * +-+-+-+-+-+-+-+-+
+ */
+typedef struct ipv4_options {
+ u8 copy :1 ;
+ u8 op_class :2 ;
+ u8 option :5 ;
+ u8 pad ;
+}ipv4_options;
+
+#define LOOSE_SOURCE_ROUTE 131
+#define STRICT_SOURCE_ROUTE 137
+#define IPV4_NO_OPTIONS_HDR_LEN (sizeof(ipv4_header))
+#define IPV4_VERSION 4
+#define IPV4_HEADER_LENGTH_WORDS 5
+#define IPV4_VERSION_HDR_LEN_FIELD ((u8) 0x45)
+#define IPV4_HEADER_LENGTH_WORDS 5
+#define IPV4_MIN_HEADER_LENGTH_BYTES 20
+#define IP_HDR_LEN sizeof(ipv4_header)
+#define IPV4_VERSION_VALUE_SHIFT 4
+
+#define IPV4_FRAG_OFFSET_MASK (0x1fff)
+#define IPV4_FRAG_MF_MASK (0x2000)
+#define IPV4_FRAG_MF_SHIFT (13)
+
+/* 0.0.0.0 */
+#define IP_BOOTP_SOURCE_ADDRESS 0
+/* 255.255.255.255 */
+#define IP_LIMITED_BROADCAST_ADDRESS 0xFFFFFFFF
+
+/*
+ * IPv4 header - version & length fields
+ */
+#define IP_VER_LEN 0x45
+#define IP_VER 0x4
+#define IP_MIN_LEN 0x5
+#define IP_VER_MASK 0xf0
+#define IP_LEN_MASK 0x0f
+
+/*
+ * IPv4 header - TOS field
+ */
+#define PS_IP_TOS_MASK 0xff
+#define IP_PRECEDENCE_SHIFT 5 /* shift value up to precedence bits */
+#define IP_DSCP_SHIFT 2 /* shift value up to dscp bits */
+
+#define IP_TOS_PRECEDENCE 0xe0 /* mask of precedence in tos byte */
+#define IP_TOS_NO_PRECEDENCE 0x1f
+#define IP_TOS_LOW_DELAY 8 /* values must be shifted 1 bit */
+#define IP_TOS_HIGH_TPUT 4 /* before using */
+#define IP_TOS_HIGH_RELY 2
+#define IP_TOS_LOW_COST 1
+#define IP_TOS_NORMAL 0
+#define IP_TOS_MASK 0x1e /* mask of tos in tos byte */
+#define IP_TOS_MBZ_MASK 0x01 /* mask for MZB bit in tos byte */
+#define IP_TOS_DSCP 0xfc /* mask for dscp in tos byte */
+#define IP_TOS_NO_DSCP 0x03
+
+#define IP_TOS_METRIC_TYPES 8
+#define IP_TOS_SHIFT 1
+
+#define IP_TOS_PRECEDENCE_MASK (IP_TOS_PRECEDENCE | IP_TOS_MASK)
+
+/*
+ * IP TOS Precedence values (High order 3 bits)
+ */
+#define TOS_PREC_NET_CONTROL 0xe0
+#define TOS_PREC_INET_CONTROL 0xc0
+#define TOS_PREC_CRIT_ECP 0xa0
+#define TOS_PREC_FLASH_OVER 0x80
+#define TOS_PREC_FLASH 0x60
+#define TOS_PREC_IMMEDIATE 0x40
+#define TOS_PREC_PRIORITY 0x20
+#define TOS_PREC_ROUTINE 0x00
+#define TOS_PREC_ILLEGAL 0xff /* invalid precedence value */
+
+#define TOS_PREC_NET_CONTROL_NUM 7
+#define TOS_PREC_INET_CONTROL_NUM 6
+#define TOS_PREC_CRIT_ECP_NUM 5
+#define TOS_PREC_FLASH_OVER_NUM 4
+#define TOS_PREC_FLASH_NUM 3
+#define TOS_PREC_IMMEDIATE_NUM 2
+#define TOS_PREC_PRIORITY_NUM 1
+#define TOS_PREC_ROUTINE_NUM 0
+
+
+
+/*
+ * IPv4 header - flags and fragment offset fields
+ */
+#define IP_FRAG_OFFSET_MASK 0x1fff
+
+
+#define IP_FRAG_MORE_MASK 0x2000
+#define IP_FRAG_DF_MASK 0x4000
+#define IP_FRAG_UNDEF_MASK 0x8000
+#define IP_FRAG_NO_DF_SET 0x0000
+
+/* bit definitions for fragment flags */
+#define IP_FRAG_MORE_BIT 13
+#define IP_FRAG_DF_BIT 14
+#define IP_FRAG_UNDEF_BIT 15
+
+/*
+ * IPv4 header - TTL field
+ */
+#define TTL_DEFAULT 255
+#define TTL_1 1
+#define TTL_2 2
+#define TTL_255 255
+
+
+/*
+ * IPv4 header - protocol field
+ *
+ * ICMP_PROT 1 ICMP
+ * IGMP_PROT 2 group management
+ * GGP_PROT 3 GGP
+ * IPINIP_PROT 4 IPv4 in IPv4 encapsulation
+ * TCP_PROT 6 TCP
+ * EGP_PROT 8 EGP
+ * IGRP_PROT 9 IGRP
+ * UDP_PROT 17 UDP
+ * HMP_PROT 20 HMP
+ * RDP_PROT 27 RDP
+ * IPV6_INIP_PROT 41 IPV6 in IPv4 encapsulation
+ * RSVP_PROT 46 RSVP
+ * GRE_PROT 47 GRE
+ * ESP_PROT 50 ESP
+ * AHP_PROT 51 AHP
+ * SDNS0_PROT 53 SNDS
+ * NHRP_PROT 54 NHRP
+ * SDNS1_PROT 55 SDNS1
+ * HELLO_PROT 63 HELLO
+ * ND_PROT 77 ND
+ * EONIP_PROT 80 CLNS over IP
+ * VINES_PROT 83 Banyan Vines
+ * NEWIGRP_PROT 88 IGRP
+ * OSPF_PROT 89 OSPF
+ * FST_RSRB_PROT 90 RSRB
+ * FST_DLSW_PROT 91 DLSW
+ * NOSIP_PROT 94 KA9Q/NOS compatible IP over IP
+ * PIM_PROT 103 PIMv2
+ * PCP_PROT 108 PCP
+ * PGM_PROT 113 PGM
+ * MAX_PROT 113 maximum protocol number in the above list,
+ * used in creating case registry
+ */
+#define ICMP_PROT 1
+#define IGMP_PROT 2
+#define GGP_PROT 3
+#define IPINIP_PROT 4
+#define TCP_PROT 6
+#define EGP_PROT 8
+#define IGRP_PROT 9
+#define UDP_PROT 17
+#define HMP_PROT 20
+#define RDP_PROT 27
+#define IPV6_INIP_PROT 41
+#define RSVP_PROT 46
+#define GRE_PROT 47
+#define ESP_PROT 50
+#define AHP_PROT 51
+#define SDNS0_PROT 53
+#define NHRP_PROT 54
+#define SDNS1_PROT 55
+#define HELLO_PROT 63
+#define ND_PROT 77
+#define EONIP_PROT 80
+#define VINES_PROT 83
+#define NEWIGRP_PROT 88
+#define OSPF_PROT 89
+#define FST_RSRB_PROT 90
+#define FST_DLSW_PROT 91
+#define NOSIP_PROT 94
+#define PIM_PROT 103
+#define PCP_PROT 108
+#define PGM_PROT 113
+#define MAX_PROT 113
+
+/*Well Known Application ports */
+#define FTP_PORT 21 /* For control connection */
+/*
+ * TCP header
+ */
+typedef struct tcp_hdr_type {
+ u16 src_port;
+ u16 dest_port;
+ u32 seq_num;
+ u32 ack_num;
+ u8 hdr_len;
+ u8 flags;
+ u16 window_size;
+ u16 tcp_checksum;
+ u16 urgent_pointer;
+ u8 option_data[0];
+} tcp_hdr_type;
+
+#define TCP_FLAG_FIN 0x01
+#define TCP_FLAG_SYN 0x02
+#define TCP_FLAG_RST 0x04
+#define TCP_FLAG_PUSH 0x08
+#define TCP_FLAG_ACK 0x10
+#define TCP_FLAG_URG 0x20
+#define TCP_FLAG_ECE 0x40
+#define TCP_FLAG_CWR 0x80
+
+/*
+ * TCP Option
+ */
+typedef struct tcp_option_s {
+ u8 kind;
+ u8 length;
+ u8 data[0];
+} tcp_option_t;
+
+#define TCP_END_OPTIONS_LIST 0
+#define TCP_OPTION_NOP 1
+#define TCP_OPTION_MSS 2
+#define TCP_OPTION_WINDOW_SCALE 3
+#define TCP_OPTION_SACK_PERMITTED 4
+#define TCP_OPTION_SACK_DATA 5
+#define TCP_OPTION_ECHO 6
+#define TCP_OPTION_ECHO_REPLY 7
+#define TCP_OPTION_TSOPT 8
+/*
+ 9 2 Partial Order Connection Permitted. RFC 1693
+ 10 3 Partial Order Service Profile. RFC 1693
+ 11 6 CC, Connection Count. RFC 1644
+ 12 6 CC.NEW RFC 1644
+ 13 6 CC.ECHO RFC 1644
+ 14 3 TCP Alternate Checksum Request. RFC 1146
+ 15 Variable. TCP Alternate Checksum Data. RFC 1146
+ 16 Skeeter.
+ 17 Bubba.
+ 18 3 Trailer Checksum Option.
+*/
+#define TCP_OPTION_MD5_SIGNATURE 19
+/*
+ 20 SCPS Capabilities.
+ 21 Selective Negative Acknowledgements.
+ 22 Record Boundaries.
+ 23 Corruption experienced.
+ 24 SNAP.
+ 25
+ 26 TCP Compression Filter.
+*/
+#define TCP_OPTION_QUICK_START 27
+
+#define TCP_OPTION_NUM_MAX 27
+
+#ifdef TARGET_CISCO
+#define tcp_printf(format_str, params...) //printf(format_str, ## params)
+#else
+#define tcp_printf(format_str, params...) printf(format_str, ## params)
+#endif
+
+typedef struct udp_hdr_type {
+ u16 src_port;
+ u16 dest_port;
+ u16 udp_length;
+ u16 udp_checksum;
+} udp_hdr_type_t;
+
+#define TYPE_IPV6 0x86dd
+#define TYPE_IPV4 0x0800
+
+/*
+ * version_trafficclass_flowlabel [31:28] IP Header Version,
+ [27:20] traffic_class,
+ [19:0] flow_label[20]
+ * payload_length Length of packet in bytes excluding header size(s)
+ * next_header Identifies the type of header following the IPv6 header
+ * hop_limit Decremented by 1 by each forwarding node, packet discarded when zero
+ * src_addr IPv6 Source Address
+ * dst_addr IPv6 Destination Address
+ */
+typedef struct ipv6_header {
+ u32 version_trafficclass_flowlabel;
+ u16 payload_length;
+ u8 next_header;
+ u8 hop_limit;
+ u32 src_addr[4];
+ u32 dst_addr[4];
+ u8 data[0];
+} ipv6_header_t;
+
+#define IPV6_HDR_LEN 40
+#define IPV6_HDR_LEN_WORDS 10
+#define IPV6_FLABLE_MASK 0x000FFFFF
+#define IPV6_MIN_PATH_MTU (1280)
+
+#define IPV6_GET_IP_VER(ih) ((clib_net_to_host_u32((ih) \
+ ->version_trafficclass_flowlabel) >> 28) & 0xf)
+#define IPV6_GET_TOS(ih) ((clib_net_to_host_u32((ih) \
+ ->version_trafficclass_flowlabel) >> 20) & 0xff)
+#define IPV6_GET_FLOW_LABEL(ih) ((clib_net_to_host_u32((ih) \
+ ->version_trafficclass_flowlabel)) & 0xfffff)
+
+#define IPV6_VERSION_VALUE (6)
+#define IPV6_VERSION_VALUE_SHIFT (28)
+#define IPV6_TRAFFIC_CLASS_VALUE_SHIFT (20)
+#define IPV6_TRAFFIC_CLASS_VALUE_MASK (0xff)
+
+#define IPV6_PROTO_HOPOPTS 0
+#define IPV6_PROTO_TCP 6
+#define IPV6_PROTO_UDP 17
+#define IPV6_PROTO_IPV6 41
+#define IPV6_PROTO_ROUTING 43
+#define IPV6_PROTO_FRAGMENT 44
+#define IPV6_PROTO_DESTOPTS 60
+#define IPV6_PROTO_ESP 50
+#define IPV6_PROTO_AH 51
+#define IPV6_PROTO_ICMPV6 58
+#define IPV6_PROTO_NONE 59
+
+/* standard v6 extension header are 2 tytes
+ * one byte next header
+ * one byte header length
+ */
+
+typedef struct ipv6_frag_header {
+ u8 next_header;
+ u8 reserved;
+ u16 frag_offset_res_m;
+ u32 identification;
+} ipv6_frag_header_t;
+
+#define IPV6_FRAG_HDR_LEN (sizeof(ipv6_frag_header_t))
+
+#define IPV6_FRAG_OFFSET_MASK (0xFFF8)
+#define IPV6_FRAG_OFFSET_SHIFT (3)
+#define IPV6_FRAG_MORE_FRAG_MASK (0x0001)
+
+#define IPV6_TOS_SHIFT 20
+#define IPV6_TOS_SHIFT_HLF_WD 4
+#define IPV6_NEXT_HDR_SHIFT 8
+
+typedef struct ipv6_routing_header {
+ u8 next_header;
+ u8 hdr_ext_len;
+ u8 routing_type;
+ u8 segments_left;
+ u8 data[0];
+} ipv6_routing_header_t;
+#define IPV6_ROUTING_HDR_LEN (sizeof(ipv6_routing_header_t))
+
+typedef struct ipv6_hop_header {
+ u8 next_header;
+ u8 hdr_ext_len;
+ u8 options[0];
+} ipv6_hop_header_t;
+#define IPV6_HOP_LEN (sizeof(ipv6_hop_header_t))
+
+typedef struct ipv6_dest_opt_header {
+ u8 next_header;
+ u8 hdr_ext_len;
+ u8 options[0];
+} ipv6_dest_opt_header_t;
+#define IPV6_DESTOPT_LEN (sizeof(ipv6_dest_opt_header_t))
+
+
+/* Definition of ICMP header */
+typedef struct icmp_v4_s {
+ u8 type;
+ u8 code;
+ u16 checksum;
+ u16 identifier;
+ u16 sequence;
+} icmp_v4_t;
+
+#define ICMPV4_HDR_SIZE (sizeof(icmp_v4_t))
+#define ICMPV4_ECHOREPLY 0 /* Type: echo reply */
+#define ICMPV4_ECHO 8 /* Type: echo request */
+
+#define ICMPV4_UNREACHABLE 3 /* Type: destination unreachable */
+#define ICMPV4_UNRNET 0 /* Code: Net unreachable */
+#define ICMPV4_UNRHOST 1 /* Code: host unreachable */
+#define ICMPV4_UNRPROT 2 /* Code: protocol unreachable */
+#define ICMPV4_UNRPORT 3 /* Code: port unreachable */
+#define ICMPV4_UNRFRAG 4 /* Code: frag req DF set */
+#define ICMPV4_UNRADMIN 13 /* Code: administratively prohib. */
+#define ICMPV4_SOURCEROUTE_FAILED 5 /* Code: administratively prohib. */
+
+#define ICMPV4_SRC_ROUTE_FAIL 5 /* Code: Source Route Failed */
+#define ICMPV4_NO_ROUTE_DESTN_8 8 /* Code: No Route to Destn */
+#define ICMPV4_NO_ROUTE_DESTN_11 11 /* Code: No Route to Destn */
+#define ICMPV4_NO_ROUTE_DESTN_12 12 /* Code: No Route to Destn */
+
+#define ICMPV4_ADMIN_PROH_9 9 /* Code: Administratively Prohibited */
+#define ICMPV4_ADMIN_PROH_10 10 /* Code: Administratively Prohibited */
+#define ICMPV4_PREC_CUTOFF 15 /* Code: Precedence Cutoff */
+
+
+#define ICMPV4_TIMEEXCEEDED 11 /* Type: time exceeded */
+#define ICMPV4_TIMTTL 0 /* Code: ttl in transit code */
+
+#define ICMPV4_PARAMETER_PROBLEM 12 /* Type: Parameter Problem */
+#define ICMPV4_PARAM_ERROR 0 /* Code: Pointer to Error */
+#define ICMPV4_MISSING_OPTION_CODE 1 /* Code: Mission option */
+#define ICMPV4_PARAM_BAD_LEN 2 /* Code: Bad Length */
+
+#define ICMPV4_CONVERSION_ERROR 31
+#define ICMPV4_SOURCE_QUENCH 4
+#define ICMPV4_REDIRECT 5
+#define ICMPV4_TIMESTAMP 13
+#define ICMPV4_TIMESTAMP_REPLY 14
+#define ICMPV4_INFO_REQUEST 15
+#define ICMPV4_INFO_REPLY 16
+#define ICMPV4_ADDR_MASK_REQUEST 17
+#define ICMPV4_ADDR_MASK_REPLY 18
+
+typedef struct icmp_v6_s {
+
+ u8 type;
+ u8 code;
+ u16 checksum;
+
+ u32 data[0];
+} icmp_v6_t;
+
+typedef struct pseudo_v6_header {
+ u32 src_addr[4];
+ u32 dst_addr[4];
+ u16 payload_length;
+ u16 next_header;
+} pseudo_v6_header_t;
+
+
+#define ICMPV6_ECHO 128
+#define ICMPV6_ECHO_REPLY 129
+#define ICMPV6_PKT_TOO_BIG 2
+#define ICMPV6_TIMEEXCEEDED 3
+#define ICMPV6_TIMTTL 0
+#define ICMPV6_PARAMETER_PROBLEM 4
+#define ICMPV6_UNREACHABLE 1
+#define ICMPV6_NEIGHBOR_SOLICITAION 135
+#define ICMPV6_NEIGHBOR_ADVT 136
+/* ICMP V6 generated packet size */
+#define ICMPV6_ERR_SIZE 48
+#define ICMPV6_HDR_SIZE (sizeof(icmp_v6_t) +sizeof(u32))
+
+/* Code for Type 1 */
+#define ICMPV6_UNRDESTN 0 /* Code: No route to Desnt */
+#define ICMPV6_ADM_PROH 1 /* Code: Adminitrative Prohibited */
+#define ICMPV6_SRC_ADD_SCOPE 2 /* Code: Source Address beyond scope */
+#define ICMPV6_UNRHOST 3 /* Code: Host Unreachable */
+#define ICMPV6_UNRPORT 4 /* Code: Port UnReachable */
+
+#define ICMPV6_UNRPROT 1 /* type 4 - Code: No route to Desnt */
+
+#define ICMPV6_PTB_CODE 0 /* Code: For PTB */
+#define ICMPV6_PARAM_CODE 0 /* Code: For Parameter Problem */
+#define ICMPV6_UNREC_HDR 1 /* Code: For Parameter Problem */
+#define ICMPV6_SRC_ADD_FAIL 5 /* Code: For Source address failed */
+#define ICMP_ECHO_REPLY_CODE 0
+#define DEFAULT_TTL_HOPLIMIT_VAL 64
+
+typedef struct pptp_hdr_type {
+
+ u16 flags_ver;
+ u16 proto_type; /* PPP = 0x880B */
+ u16 payload_len;
+ u16 call_id;
+ u32 seq_no;
+ u32 ack_no;
+
+} pptp_hdr_type_t;
+
+/*
+ * NAME
+ *
+ * tcp_findoption
+ *
+ * SYNOPSIS
+ * u8* tcp_findoption (tcp_hdr_t *tcp, uchar option)
+ *
+ * PARAMETERS
+ * tcp - pointer to TCP header
+ * option - TCP option
+ *
+ * RETURNS
+ * This function returns a pointer to the option found,
+ * otherwise returns null.
+ *
+ *
+ * DESCRIPTION
+ * This function searches the option and returns a pointer to the
+ * matched option field containing option kind/length/data sub-fields.
+ *
+ */
+static inline u8* tcp_findoption (tcp_hdr_type *tcp, u8 option)
+{
+ u8*data;
+ u8 len, optlen;
+
+ data = tcp->option_data;
+ len = ((tcp->hdr_len>>4) << 2) - sizeof(tcp_hdr_type);
+
+#define MAXTCPOPTIONBYTES 40
+#define MINTCPOPTIONLENGTH 2
+
+ while (len) {
+ if (PREDICT_TRUE(option == data[0])) {
+ return (data);
+ } else {
+ switch (data[0]) {
+ case TCP_END_OPTIONS_LIST:
+ return (NULL);
+ case TCP_OPTION_NOP:
+ len -= 1;
+ data += 1;
+ break;
+ default:
+ /* Sanity check the length. */
+ optlen = data[1];
+ if ((optlen < MINTCPOPTIONLENGTH) ||
+ (optlen > MAXTCPOPTIONBYTES) ||
+ (optlen > len)) {
+ return (NULL);
+ }
+ len -= optlen;
+ data += optlen;
+ break;
+ }
+ }
+ }
+
+ return (NULL);
+}
+
+
+static inline u32 crc_calc (ipv4_header *ipv4)
+{
+ u16 *ipv4_word_ptr = (u16 *) ipv4;
+ u32 crc32;
+ /*
+ * Add all fields except the checksum field
+ */
+ crc32 = (u32)clib_net_to_host_u16(*ipv4_word_ptr) +
+ (u32)clib_net_to_host_u16(*(ipv4_word_ptr + 1)) +
+ (u32)clib_net_to_host_u16(*(ipv4_word_ptr + 2)) +
+ (u32)clib_net_to_host_u16(*(ipv4_word_ptr + 3)) +
+ (u32)clib_net_to_host_u16(*(ipv4_word_ptr + 4)) +
+ (u32)clib_net_to_host_u16(*(ipv4_word_ptr + 6)) +
+ (u32)clib_net_to_host_u16(*(ipv4_word_ptr + 7)) +
+ (u32)clib_net_to_host_u16(*(ipv4_word_ptr + 8)) +
+ (u32)clib_net_to_host_u16(*(ipv4_word_ptr + 9));
+
+ /* Add in the carry of the original sum */
+ crc32 = (crc32 & 0xFFFF) + (crc32 >> 16);
+ /* Add in the carry of the final sum */
+ crc32 = (crc32 & 0xFFFF) + (crc32 >> 16);
+
+ return crc32;
+}
+
+#endif /* __TCP_HEADER_DEFINITIONS_H__ */
diff --git a/plugins/vcgn-plugin/vcgn/vcgn_classify.c b/plugins/vcgn-plugin/vcgn/vcgn_classify.c
new file mode 100644
index 00000000000..18cc4ba0d1e
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/vcgn_classify.c
@@ -0,0 +1,1508 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/plugin/plugin.h>
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+#include <vppinfra/pool.h>
+
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include "cnat_db.h"
+#include "cnat_global.h"
+#include "cnat_cli.h"
+#include "cnat_config.h"
+#include "cnat_logging.h"
+#include "cnat_config_api.h"
+#include "cnat_show_api.h"
+#include "cnat_show_response.h"
+#include "cnat_ipv4_udp.h"
+#include "cnat_common_api.h"
+
+#include <arpa/inet.h>
+
+typedef struct {
+ u32 cached_next_index;
+
+ /* inside, outside interface handles */
+ u32 * inside_sw_if_index_table;
+ u32 * outside_sw_if_index_table;
+
+ /* convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+ u8 cnat_db_initalized;
+} vcgn_classify_main_t;
+
+typedef struct {
+ /* $$$$ fill in with per-pkt trace data */
+ u32 next_index;
+ u32 sw_if_index;
+ u32 orig_dst_address;
+ u16 orig_dst_port;
+} vcgn_classify_trace_t;
+
+#define FIND_MY_VRF_USING_I_VRF_ID \
+ my_vrfmap_found = 0; \
+ pool_foreach (my_vrfmap, cnat_map_by_vrf, ({ \
+ if (my_vrfmap->i_vrf_id == i_vrf_id) { \
+ my_vrfmap_found = 1; \
+ my_vrfmap_temp = my_vrfmap; \
+ break; \
+ } \
+ }));
+
+
+/* packet trace format function */
+static u8 * format_swap_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ vcgn_classify_trace_t * t = va_arg (*args, vcgn_classify_trace_t *);
+
+ s = format (s, "VCGN_CLASSIFY: dst %U dst_port %d sw_if_index %d next %d",
+ format_ip4_address, (ip4_header_t *) &t->orig_dst_address,
+ clib_net_to_host_u16(t->orig_dst_port),
+ t->sw_if_index, t->next_index);
+ return s;
+}
+
+vcgn_classify_main_t vcgn_classify_main;
+
+vlib_node_registration_t vcgn_classify_node;
+
+#define foreach_vcgn_classify_error \
+_(PACKETS_RECEIVED, "total packets received") \
+_(V4_PACKETS_PROCESSED, "ipv4 packets processed for vCGN") \
+_(V4_PACKETS_PUNTED, "ipv4 packets punted") \
+_(V6_PACKETS_PUNTED, "ipv6 packets punted") \
+_(MPLS_PACKETS_PUNTED, "mpls unicast packets punted") \
+_(ETH_PACKETS_PUNTED, "ethernet packets punted")
+
+
+typedef enum {
+#define _(sym,str) VCGN_CLASSIFY_ERROR_##sym,
+ foreach_vcgn_classify_error
+#undef _
+ VCGN_CLASSIFY_N_ERROR,
+} vcgn_classify_error_t;
+
+static char * vcgn_classify_error_strings[] = {
+#define _(sym,string) string,
+ foreach_vcgn_classify_error
+#undef _
+};
+
+/*
+ * To drop a pkt and increment one of the previous counters:
+ *
+ * set b0->error = error_node->errors[VCGN_CLASSIFY_ERROR_EXAMPLE];
+ * set next0 to a disposition index bound to "error-drop".
+ *
+ * To manually increment the specific counter VCGN_CLASSIFY_ERROR_EXAMPLE:
+ *
+ * vlib_node_t *n = vlib_get_node (vm, vcgn_classify.index);
+ * u32 node_counter_base_index = n->error_heap_index;
+ * vlib_error_main_t * em = &vm->error_main;
+ * em->counters[node_counter_base_index + VCGN_CLASSIFY_ERROR_EXAMPLE] += 1;
+ *
+ */
+
+typedef enum {
+ VCGN_CLASSIFY_NEXT_IP4_INPUT,
+ VCGN_CLASSIFY_NEXT_IP6_INPUT,
+ VCGN_CLASSIFY_NEXT_MPLS_INPUT,
+ VCGN_CLASSIFY_NEXT_ETHERNET_INPUT,
+ VCGN_CLASSIFY_NEXT_UDP_INSIDE,
+ VCGN_CLASSIFY_NEXT_UDP_OUTSIDE,
+ VCGN_CLASSIFY_NEXT_TCP_INSIDE,
+ VCGN_CLASSIFY_NEXT_TCP_OUTSIDE,
+ VCGN_CLASSIFY_NEXT_ICMP_Q_INSIDE,
+ VCGN_CLASSIFY_NEXT_ICMP_Q_OUTSIDE,
+ VCGN_CLASSIFY_NEXT_ICMP_E_INSIDE,
+ VCGN_CLASSIFY_NEXT_ICMP_E_OUTSIDE,
+ VCGN_CLASSIFY_N_NEXT,
+} vcgn_classify_next_t;
+
+static uword
+vcgn_classify_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, * from, * to_next;
+ vcgn_classify_next_t next_index;
+ vcgn_classify_main_t * vcm = &vcgn_classify_main;
+ vlib_node_t *n = vlib_get_node (vm, vcgn_classify_node.index);
+ u32 node_counter_base_index = n->error_heap_index;
+ vlib_error_main_t * em = &vm->error_main;
+ u16 *l3_type;
+ int counter;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ #if 0
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+ next0 = vcm->cached_next_index;
+ sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
+ next1 = vcm->cached_next_index;
+
+ /* $$$$ your message in this space. Process 2 x pkts */
+ em->counters[node_counter_base_index + VCGN_CLASSIFY_ERROR_PACKETS_RECEIVED] += 2;
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ vcgn_classify_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ vcgn_classify_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+ #endif /* if 0 */
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0;
+ u32 sw_if_index0;
+ ip4_header_t * h0;
+ //ipv4_header *h0;
+ ethernet_header_t *eth0;
+ icmp_v4_t *icmp;
+ u8 icmp_type;
+ u8 ipv4_hdr_len;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ eth0 = (ethernet_header_t *) vlib_buffer_get_current(b0);
+ u16 *etype = &eth0->type;
+
+ /* vlan tag 0x8100 */
+ if (*etype == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) {
+ l3_type = (etype + 1); /* Skip 2 bytes of vlan id */
+ vlib_buffer_advance(b0, 18);
+ } else {
+ l3_type = etype;
+ vlib_buffer_advance(b0, 14);
+ }
+ /* Handling v4 pkts 0x800 */
+ if (*l3_type == clib_host_to_net_u16(ETHERNET_TYPE_IP4)) {
+
+ h0 = vlib_buffer_get_current (b0);
+
+ u8 protocol_type = h0->protocol;
+
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+ next0 = VCGN_CLASSIFY_NEXT_IP4_INPUT;
+ counter = VCGN_CLASSIFY_ERROR_V4_PACKETS_PROCESSED;
+
+ if (protocol_type == 0x11) { /* UDP# 17 */
+ next0 = (sw_if_index0 < vec_len(vcm->inside_sw_if_index_table) &&
+ vcm->inside_sw_if_index_table[sw_if_index0] != EMPTY) ?
+ VCGN_CLASSIFY_NEXT_UDP_INSIDE : next0;
+
+ next0 = (sw_if_index0 < vec_len(vcm->outside_sw_if_index_table) &&
+ vcm->outside_sw_if_index_table[sw_if_index0] != EMPTY) ?
+ VCGN_CLASSIFY_NEXT_UDP_OUTSIDE : next0;
+
+ } else if (protocol_type == 0x06) { /* TCP# 6 */
+ next0 = (sw_if_index0 < vec_len(vcm->inside_sw_if_index_table) &&
+ vcm->inside_sw_if_index_table[sw_if_index0] != EMPTY) ?
+ VCGN_CLASSIFY_NEXT_TCP_INSIDE : next0;
+
+ next0 = (sw_if_index0 < vec_len(vcm->outside_sw_if_index_table) &&
+ vcm->outside_sw_if_index_table[sw_if_index0] != EMPTY) ?
+ VCGN_CLASSIFY_NEXT_TCP_OUTSIDE : next0;
+
+ } else if (protocol_type == 0x01) { /* ICMP # 1 */
+
+ ipv4_hdr_len = (h0->ip_version_and_header_length & 0xf) << 2;
+ icmp = (icmp_v4_t *)((u8*)h0 + ipv4_hdr_len);
+ icmp_type = icmp->type;
+
+ if ((icmp_type == ICMPV4_ECHO) ||
+ (icmp_type == ICMPV4_ECHOREPLY)) {
+ next0 = (sw_if_index0 < vec_len(vcm->inside_sw_if_index_table) &&
+ vcm->inside_sw_if_index_table[sw_if_index0] != EMPTY) ?
+ VCGN_CLASSIFY_NEXT_ICMP_Q_INSIDE : next0;
+
+ next0 = (sw_if_index0 < vec_len(vcm->outside_sw_if_index_table) &&
+ vcm->outside_sw_if_index_table[sw_if_index0] != EMPTY) ?
+ VCGN_CLASSIFY_NEXT_ICMP_Q_OUTSIDE : next0;
+
+ } else {
+ next0 = (sw_if_index0 < vec_len(vcm->inside_sw_if_index_table) &&
+ vcm->inside_sw_if_index_table[sw_if_index0] != EMPTY) ?
+ VCGN_CLASSIFY_NEXT_ICMP_E_INSIDE : next0;
+
+ next0 = (sw_if_index0 < vec_len(vcm->outside_sw_if_index_table) &&
+ vcm->outside_sw_if_index_table[sw_if_index0] != EMPTY) ?
+ VCGN_CLASSIFY_NEXT_ICMP_E_OUTSIDE : next0;
+ }
+ } else {
+ /* cannot do NATting with this L4 protocol */
+ counter = VCGN_CLASSIFY_ERROR_V4_PACKETS_PUNTED;
+ }
+
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED))) {
+ udp_header_t * u0 = (udp_header_t *)(h0+1);
+ vcgn_classify_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ t->orig_dst_address = h0->dst_address.as_u32;
+ t->orig_dst_port = u0->dst_port;
+ }
+
+ } else if (*l3_type == clib_host_to_net_u16(ETHERNET_TYPE_IP6)) {
+
+ /* IPv6 0x86DD */
+ next0 = VCGN_CLASSIFY_NEXT_IP6_INPUT;
+ counter = VCGN_CLASSIFY_ERROR_V6_PACKETS_PUNTED;
+
+ } else if (*l3_type ==
+ clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
+
+ /* MPLS unicast 0x8847 */
+ next0 = VCGN_CLASSIFY_NEXT_MPLS_INPUT;
+ counter = VCGN_CLASSIFY_ERROR_MPLS_PACKETS_PUNTED;
+ } else { /* Remaining all should be pushed to "ethernet-input" */
+
+ next0 = VCGN_CLASSIFY_NEXT_ETHERNET_INPUT;
+ counter = VCGN_CLASSIFY_ERROR_ETH_PACKETS_PUNTED;
+ }
+
+ em->counters[node_counter_base_index + counter] += 1;
+ em->counters[node_counter_base_index +
+ VCGN_CLASSIFY_ERROR_PACKETS_RECEIVED] += 1;
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (vcgn_classify_node) = {
+ .function = vcgn_classify_node_fn,
+ .name = "vcgn-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_swap_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(vcgn_classify_error_strings),
+ .error_strings = vcgn_classify_error_strings,
+
+ .n_next_nodes = VCGN_CLASSIFY_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [VCGN_CLASSIFY_NEXT_IP4_INPUT] = "ip4-input",
+ [VCGN_CLASSIFY_NEXT_IP6_INPUT] = "ip6-input",
+ [VCGN_CLASSIFY_NEXT_MPLS_INPUT] = "mpls-gre-input",
+ [VCGN_CLASSIFY_NEXT_ETHERNET_INPUT] = "ethernet-input",
+ [VCGN_CLASSIFY_NEXT_UDP_INSIDE] = "vcgn-v4-udp-i2o",
+ [VCGN_CLASSIFY_NEXT_UDP_OUTSIDE] = "vcgn-v4-udp-o2i",
+ [VCGN_CLASSIFY_NEXT_TCP_INSIDE] = "vcgn-v4-tcp-i2o",
+ [VCGN_CLASSIFY_NEXT_TCP_OUTSIDE] = "vcgn-v4-tcp-o2i",
+ [VCGN_CLASSIFY_NEXT_ICMP_Q_INSIDE] = "vcgn-v4-icmp-q-i2o",
+ [VCGN_CLASSIFY_NEXT_ICMP_Q_OUTSIDE] = "vcgn-v4-icmp-q-o2i",
+ [VCGN_CLASSIFY_NEXT_ICMP_E_INSIDE] = "vcgn-v4-icmp-e-i2o",
+ [VCGN_CLASSIFY_NEXT_ICMP_E_OUTSIDE] = "vcgn-v4-icmp-e-o2i"
+ },
+};
+
+
+/* A test function to init the vrf map */
+
+clib_error_t *vcgn_classify_init (vlib_main_t *vm)
+{
+ vcgn_classify_main_t * mp = &vcgn_classify_main;
+
+ mp->vlib_main = vm;
+ mp->vnet_main = vnet_get_main();
+ u32 inside_sw_if_index = 1;
+ u32 outside_sw_if_index = 0;
+
+ vec_validate_init_empty (mp->inside_sw_if_index_table,
+ inside_sw_if_index + 1, EMPTY);
+ vec_validate_init_empty (mp->outside_sw_if_index_table,
+ outside_sw_if_index + 1, EMPTY);
+
+ /*
+ * inside_sw_if_index cell of the table stores outside_sw_if_index
+ * and vice versa. This is ensurs pair of indices being remembered
+ * using one mem-location.
+ */
+ mp->inside_sw_if_index_table[inside_sw_if_index] = outside_sw_if_index;
+ mp->outside_sw_if_index_table[outside_sw_if_index] = inside_sw_if_index;
+
+#if DPDK==1
+ dpdk_set_next_node (DPDK_RX_NEXT_IP4_INPUT, "vcgn-classify");
+#endif
+
+ {
+ pg_node_t * pn;
+ pn = pg_get_node (vcgn_classify_node.index);
+ pn->unformat_edit = unformat_pg_ip4_header;
+ }
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (vcgn_classify_init);
+
+/* Show command handlers */
+static clib_error_t *
+show_vcgn_stats_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ if (cnat_db_init_done) {
+ cnat_nat44_handle_show_stats(vm);
+ } else {
+ vlib_cli_output(vm, "vCGN is not configured !!\n");
+ }
+ return 0;
+}
+
+
+static clib_error_t *
+show_vcgn_config_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ cnat_nat44_handle_show_config(vm);
+ return 0;
+}
+
+static clib_error_t *
+show_vcgn_inside_translation_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ vcgn_classify_main_t * vcm = &vcgn_classify_main;
+ spp_api_cnat_v4_show_inside_entry_req_t inside_req;
+ u8 *proto;
+ ip4_address_t inside_addr;
+ u32 start_port = 1;
+ u32 end_port = 65535;
+ u32 inside_sw_if_index = EMPTY;
+
+ inside_req.start_port = start_port;
+ inside_req.end_port = end_port;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat(input, "protocol %s", &proto)) {
+ if (!strncmp((char *) proto, "udp", 3)) {
+ inside_req.protocol = 1;
+ } else if (!strncmp((char *) proto, "tcp", 3)) {
+ inside_req.protocol = 2;
+ } else {
+ inside_req.protocol = 3;
+ }
+ } else if (unformat (input, "interface %U",
+ unformat_vnet_sw_interface, vnm, &inside_sw_if_index)) {
+ if (inside_sw_if_index > vec_len(vcm->inside_sw_if_index_table) ||
+ vcm->inside_sw_if_index_table[inside_sw_if_index] == EMPTY) {
+ return clib_error_return (0, "Could not find the inside interface");
+ }
+ } else if (unformat (input, "inside-addr %U",
+ unformat_ip4_address, &inside_addr)) {
+ inside_req.ipv4_addr = clib_net_to_host_u32(inside_addr.as_u32);
+ } else if (unformat(input, "start-port %u", &start_port)) {
+ inside_req.start_port = start_port;
+ } else if (unformat(input, "end-port %u", &end_port)) {
+ inside_req.end_port = end_port;
+ } else { break;}
+ }
+ inside_req.vrf_id = inside_sw_if_index;
+ inside_req.flags |= CNAT_TRANSLATION_ENTRY_DYNAMIC; /* as of now only dynamic */
+ inside_req.all_entries = 0; /* we can see it later */
+#if DEBUG
+ vlib_cli_output(vm, "proto %d, inside-addr 0x%x, start_port %u, "
+ "end_port %u, vrf 0x%x\n",
+ inside_req.protocol,
+ inside_req.ipv4_addr,
+ inside_req.start_port,
+ inside_req.end_port,
+ inside_sw_if_index);
+#endif
+ if (cnat_db_init_done) {
+ cnat_v4_show_inside_entry_req_t_handler(&inside_req, vm);
+ } else {
+ vlib_cli_output(vm, "vCGN is not configured !!\n");
+ }
+ return 0;
+}
+
+
+static clib_error_t *
+show_vcgn_outside_translation_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ void cnat_v4_show_outside_entry_req_t_handler
+ (spp_api_cnat_v4_show_outside_entry_req_t *mp, vlib_main_t *vm);
+ vnet_main_t * vnm = vnet_get_main();
+ vcgn_classify_main_t * vcm = &vcgn_classify_main;
+ spp_api_cnat_v4_show_outside_entry_req_t outside_req;
+ u8 *proto;
+ ip4_address_t outside_addr;
+ u32 start_port = 1;
+ u32 end_port = 65535;
+ u32 outside_sw_if_index = EMPTY;
+
+
+ outside_req.start_port = start_port;
+ outside_req.end_port = end_port;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat(input, "protocol %s", &proto)) {
+ if (!strncmp((char *) proto, "udp", 3)) {
+ outside_req.protocol = 1;
+ } else if (!strncmp((char *) proto, "tcp", 3)) {
+ outside_req.protocol = 2;
+ } else {
+ outside_req.protocol = 3;
+ }
+ } else if (unformat (input, "interface %U",
+ unformat_vnet_sw_interface, vnm, &outside_sw_if_index)) {
+ if (outside_sw_if_index > vec_len(vcm->outside_sw_if_index_table) ||
+ vcm->outside_sw_if_index_table[outside_sw_if_index] == EMPTY) {
+ return clib_error_return (0, "Could not find the outside interface");
+ }
+ } else if (unformat (input, "outside-addr %U",
+ unformat_ip4_address, &outside_addr)) {
+ outside_req.ipv4_addr = clib_net_to_host_u32(outside_addr.as_u32);
+ } else if (unformat(input, "start-port %u", &start_port)) {
+ outside_req.start_port = start_port;
+ } else if (unformat(input, "end-port %u", &end_port)) {
+ outside_req.end_port = end_port;
+ } else { break;}
+ }
+ outside_req.vrf_id = outside_sw_if_index;
+ outside_req.flags |= CNAT_TRANSLATION_ENTRY_DYNAMIC; /* as of now only dynamic */
+#if DEBUG
+ vlib_cli_output(vm, "proto %d, outside-addr 0x%x, start_port %u, "
+ "end_port %u, vrf 0x%x\n",
+ outside_req.protocol,
+ outside_req.ipv4_addr,
+ outside_req.start_port,
+ outside_req.end_port,
+ outside_sw_if_index);
+#endif
+ if (cnat_db_init_done) {
+ cnat_v4_show_outside_entry_req_t_handler(&outside_req, vm);
+ } else {
+ vlib_cli_output(vm, "vCGN is not configured !!\n");
+ }
+ return 0;
+}
+
+
+/* Config command handlers */
+static clib_error_t *
+set_vcgn_inside_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ vcgn_classify_main_t * vcm = &vcgn_classify_main;
+ u32 inside_sw_if_index = 1;
+ u32 outside_sw_if_index = ~0;
+ void cnat_db_v2_init (void );
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat(input, "%U",
+ unformat_vnet_sw_interface, vnm, &inside_sw_if_index))
+ ;
+ else if (unformat(input, "outside %U",
+ unformat_vnet_sw_interface, vnm, &outside_sw_if_index))
+ ;
+ else break;
+ }
+ if (inside_sw_if_index == ~0 ||
+ outside_sw_if_index == ~0)
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+
+ if (inside_sw_if_index == outside_sw_if_index)
+ return clib_error_return (0, "inside and outside interfaces can't be the same...");
+
+ /*
+ * Initialize in/out sw_if_index table. Could use
+ * non-indexed table to reduce memory. However, this
+ * is consulted in vcgn_classify for every packet.
+ * Therefore, table is indexed by sw_if_index.
+ */
+ vec_validate_init_empty (vcm->inside_sw_if_index_table,
+ inside_sw_if_index + 1, EMPTY);
+ vec_validate_init_empty (vcm->outside_sw_if_index_table,
+ outside_sw_if_index + 1, EMPTY);
+
+ /*
+ * inside_sw_if_index cell of the table stores outside_sw_if_index
+ * and vice versa. This is ensurs pair of indices being remembered
+ * using one mem-location.
+ */
+ vcm->inside_sw_if_index_table[inside_sw_if_index] = outside_sw_if_index;
+ vcm->outside_sw_if_index_table[outside_sw_if_index] = inside_sw_if_index;
+
+ if (! vcm->cnat_db_initalized) {
+ int i;
+ cnat_db_v2_init();
+
+ for (i = 0; i < CNAT_MAX_VRFMAP_ENTRIES; i++) {
+ vrf_map_array[i] = VRF_MAP_ENTRY_EMPTY;
+ }
+ /* Turn on the db scanner process */
+ cnat_scanner_db_process_turn_on(vm);
+ vcm->cnat_db_initalized = 1;
+ }
+ return 0;
+}
+
+static clib_error_t *
+set_vcgn_map_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ vcgn_classify_main_t * vcm = &vcgn_classify_main;
+ ip4_address_t lo, hi;
+ spp_api_cnat_v4_add_vrf_map_t map;
+ u32 inside_sw_if_index = EMPTY;
+ u32 outside_sw_if_index;
+
+ vnet_hw_interface_t *inside_hw_if_index = NULL;
+ vnet_hw_interface_t *outside_hw_if_index = NULL;
+
+ if (! unformat(input, "inside %U",
+ unformat_vnet_sw_interface, vnm, &inside_sw_if_index))
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+
+ if (!unformat (input, "%U", unformat_ip4_address, &lo))
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+
+ if (unformat (input, "- %U", unformat_ip4_address, &hi))
+ ;
+
+ /* $$$$ remember to set i_vrf, i_vrf_id as needed */
+
+ /* Fill the structure spp_api_cnat_v4_add_vrf_map_t & let this API handle it */
+ /* i_vrf_id & o_vrf_id are 32-bit & i_vrf, o_vrf are 16 bit */
+
+ if (inside_sw_if_index > vec_len(vcm->inside_sw_if_index_table) ||
+ vcm->inside_sw_if_index_table[inside_sw_if_index] == EMPTY) {
+ return clib_error_return (0, "Could not find the inside interface");
+ }
+ outside_sw_if_index = vcm->inside_sw_if_index_table[inside_sw_if_index];
+
+ map.i_vrf_id = inside_sw_if_index;
+ map.o_vrf_id = outside_sw_if_index;
+ map.i_vrf = inside_sw_if_index;
+ map.o_vrf = outside_sw_if_index;
+
+ map.start_addr[0] = clib_net_to_host_u32(lo.as_u32);
+ map.end_addr[0] = clib_net_to_host_u32(hi.as_u32);
+
+ cnat_nat44_add_vrf_map_t_handler(&map, vm);
+
+#if 1
+ inside_hw_if_index = vnet_get_sup_hw_interface(vcm->vnet_main, inside_sw_if_index);
+ if (inside_hw_if_index) {
+ vnet_hw_interface_rx_redirect_to_node(vcm->vnet_main,
+ inside_hw_if_index->hw_if_index, vcgn_classify_node.index);
+ }
+ outside_hw_if_index = vnet_get_sup_hw_interface(vcm->vnet_main, outside_sw_if_index);
+ if (outside_hw_if_index) {
+ vnet_hw_interface_rx_redirect_to_node(vcm->vnet_main,
+ outside_hw_if_index->hw_if_index, vcgn_classify_node.index);
+ }
+#endif
+ return 0;
+}
+
+static clib_error_t *
+set_vcgn_tcp_timeout_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ /*
+ vnet_main_t * vnm = vnet_get_main();
+ vcgn_classify_main_t * vcm = &vcgn_classify_main;
+ */
+ u32 act_timeout = 0;
+ u32 init_timeout = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat(input, "active %u", &act_timeout))
+ tcp_active_timeout = act_timeout;
+ else if (unformat(input, "init %u", &init_timeout))
+ tcp_initial_setup_timeout = init_timeout;
+ else break;
+ }
+ return 0;
+}
+
+static clib_error_t *
+set_vcgn_udp_timeout_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ /*
+ vnet_main_t * vnm = vnet_get_main();
+ vcgn_classify_main_t * vcm = &vcgn_classify_main;
+ */
+ u32 act_timeout = 0;
+ u32 init_timeout = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat(input, "active %u", &act_timeout))
+ udp_act_session_timeout = act_timeout;
+ else if (unformat(input, "init %u", &init_timeout))
+ udp_init_session_timeout = init_timeout;
+ else break;
+ }
+ return 0;
+}
+
+
+static clib_error_t *
+set_vcgn_icmp_timeout_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ /*
+ * vnet_main_t * vnm = vnet_get_main();
+ * vcgn_classify_main_t * vcm = &vcgn_classify_main;
+ */
+ u32 timeout = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat(input, "%u", &timeout))
+ ;
+ else break;
+ }
+ icmp_session_timeout = timeout;
+ return 0;
+}
+
+
+static clib_error_t *
+set_vcgn_protocol_default_timeout_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ /*
+ vnet_main_t * vnm = vnet_get_main();
+ vcgn_classify_main_t * vcm = &vcgn_classify_main;
+ */
+ u8 *protocol;
+ u8 reset = 1;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat(input, "%s", &protocol))
+ ;
+ else break;
+ }
+ cnat_nat44_set_protocol_timeout_value(0, 0, protocol, reset, vm);
+ return 0;
+}
+
+static clib_error_t *
+set_vcgn_dynamic_port_start_range_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ /*
+ vnet_main_t * vnm = vnet_get_main();
+ vcgn_classify_main_t * vcm = &vcgn_classify_main;
+ */
+ u32 port = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat(input, "%u", &port))
+ ;
+ else break;
+ }
+ if (port != 0 && port > 65535) {
+ vlib_cli_output(vm, "Error !! Invalid port\n");
+ } else {
+ cnat_static_port_range = port;
+ vlib_cli_output(vm, "Dynamic Port Range Config Successful !!\n");
+ }
+ return 0;
+}
+
+static clib_error_t *
+set_vcgn_port_limit_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ /*
+ vnet_main_t * vnm = vnet_get_main();
+ vcgn_classify_main_t * vcm = &vcgn_classify_main;
+ */
+ u32 port = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat(input, "%u", &port))
+ ;
+ else break;
+ }
+ if (port != 0 && port > 65535) {
+ vlib_cli_output(vm, "Error !! Invalid port\n");
+ } else {
+ cnat_main_db_max_ports_per_user = port;
+ vlib_cli_output(vm, "Port Limit Config Successful !!\n");
+ }
+ return 0;
+}
+
+static inline void nfv9_init_pkt_sent_data(cnat_nfv9_logging_info_t *nfv9_info)
+{
+ nfv9_server_info_t *server = nfv9_server_info_pool +
+ nfv9_info->server_index;
+
+ /*
+ * Reset the pkts_since_last_template and sent_time
+ * so that template will be sent next time
+ */
+ server->last_template_sent_time = 0;
+ server->pkts_since_last_template = 0xffffffff;
+}
+
+static inline u16 nfv9_get_max_length_minus_max_record_size(u16 path_mtu)
+{
+ u16 max_length_minus_max_record_size;
+ if(!path_mtu) /* Use default */
+ path_mtu = NFV9_DEF_PATH_MTU;
+
+ max_length_minus_max_record_size = path_mtu -
+ CNAT_NFV9_DATAFLOW_RECORD_HEADER_LENGTH -
+ NFV9_PAD_VALUE -
+ CNAT_NFV9_MAX_SINGLE_RECORD_LENGTH; /* Note.. as of now this record
+ * requires max number of bytes. If you add more records,
+ * this needs to be re-checked */
+ if (max_length_minus_max_record_size < CNAT_NFV9_MIN_RECORD_SIZE) {
+ max_length_minus_max_record_size = CNAT_NFV9_MIN_RECORD_SIZE;
+ }
+ return max_length_minus_max_record_size;
+}
+
+/* This function finds if the netflow server indicated by
+ * new_server_info is already configured for some other instance
+ * if yes, it returns the same pointer so that, info sent to the
+ * server is consistent. If the server is not found, a new instance
+ * is created and returned. If an existing server is used, its refernce
+ * count is incrimented (indicating the number of instances using the
+ * same server
+ */
+ /* #define DEBUG_NF_SERVER_CONFIG 1 */
+static u16 nfv9_get_server_instance(
+ cnat_nfv9_logging_info_t *nfv9_info, nfv9_server_info_t *new_server_info)
+{
+
+ /* Check if the instance has a server already and if yes, does it match */
+ nfv9_server_info_t *server;
+ if(nfv9_info->server_index != EMPTY) {
+ server = nfv9_server_info_pool + nfv9_info->server_index;
+
+ if((server->ipv4_address == new_server_info->ipv4_address) &&
+ (server->port == new_server_info->port)) {
+ /* Same server.. just check if refresh rate/timeouts are reduced */
+#ifdef DEBUG_NF_SERVER_CONFIG
+ if(my_instance_number == 1) {
+ printf("\n Server match for %x and port %d\n",
+ new_server_info->ipv4_address, new_server_info->port);
+ }
+#endif /* #ifdef DEBUG_NF_SERVER_CONFIG */
+ goto adjust_refresh_rate;
+ } else { /* The server is being changed */
+ server->ref_count--;
+#ifdef DEBUG_NF_SERVER_CONFIG
+ if(my_instance_number == 1) {
+ printf("\n Server change from %x, %d to %x, %d"
+ "Ref count %d\n",
+ server->ipv4_address,
+ server->port,
+ new_server_info->ipv4_address, new_server_info->port,
+ server->ref_count);
+ }
+#endif /* #ifdef DEBUG_NF_SERVER_CONFIG */
+ if(!server->ref_count) {
+ /* Return this server to pool */
+#ifdef DEBUG_NF_SERVER_CONFIG
+ if(my_instance_number == 1) {
+ PLATFORM_DEBUG_PRINT("Deleting Server %x, %d at %d\n",
+ server->ipv4_address,
+ server->port,
+ nfv9_info->server_index);
+ }
+#endif /* #ifdef DEBUG_NF_SERVER_CONFIG */
+ pool_put(nfv9_server_info_pool, server);
+ }
+ }
+ }
+
+ /* Now check if the server is already present in the pool */
+ u8 found = 0;
+ server = 0;
+ pool_foreach (server, nfv9_server_info_pool, ({
+ if ((server->ipv4_address == new_server_info->ipv4_address) &&
+ (server->port == new_server_info->port)) {
+ server->ref_count++;
+ nfv9_info->server_index = server - nfv9_server_info_pool;
+ found = 1;
+#ifdef DEBUG_NF_SERVER_CONFIG
+ if(my_instance_number == 1) {
+ printf("Re-using server %x, %d Ref count %d\n",
+ server->ipv4_address, server->port, server->ref_count);
+ }
+#endif /* #ifdef DEBUG_NF_SERVER_CONFIG */
+ break;
+ }
+ }));
+
+ if(!found) {
+ /* Create a new one, initialize and return */
+ server = 0;
+ pool_get(nfv9_server_info_pool, server);
+ clib_memcpy(server, new_server_info, sizeof(nfv9_server_info_t));
+ server->ref_count = 1;
+ nfv9_info->server_index = server - nfv9_server_info_pool;
+#ifdef DEBUG_NF_SERVER_CONFIG
+ if(my_instance_number == 1) {
+ printf("Create new server for at %d %x and port %d\n",
+ nfv9_info->server_index,
+ new_server_info->ipv4_address, new_server_info->port);
+ }
+#endif /* #ifdef DEBUG_NF_SERVER_CONFIG */
+ return CNAT_SUCCESS;
+ }
+
+adjust_refresh_rate:
+ if(server->refresh_rate >
+ new_server_info->refresh_rate) {
+ server->refresh_rate =
+ new_server_info->refresh_rate;
+#ifdef DEBUG_NF_SERVER_CONFIG
+ if(my_instance_number == 1) {
+ printf("Reset refresh rate to %d\n",
+ server->refresh_rate);
+ }
+#endif /* #ifdef DEBUG_NF_SERVER_CONFIG */
+ }
+
+ if(server->timeout_rate >
+ new_server_info->timeout_rate) {
+ server->timeout_rate =
+ new_server_info->timeout_rate;
+#ifdef DEBUG_NF_SERVER_CONFIG
+ if(my_instance_number == 1) {
+ printf("Reset timeout rate to %d\n",
+ server->timeout_rate);
+ }
+#endif /* #ifdef DEBUG_NF_SERVER_CONFIG */
+ }
+
+ return CNAT_SUCCESS;
+}
+static clib_error_t *
+set_vcgn_nfv9_logging_cofig_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vcgn_classify_main_t * vcm = &vcgn_classify_main;
+ spp_api_cnat_v4_config_nfv9_logging_t nfv9_conf;
+ ip4_address_t server_addr;
+ u32 ip_addr = 0;
+ u32 port;
+ u32 refresh_rate = 0;
+ u32 timeout = 0;
+ u32 pmtu = 0;
+ u8 enable = 1;
+/* vcgn changes start*/
+ cnat_nfv9_logging_info_t *my_nfv9_logging_info = NULL;
+ cnat_nfv9_logging_info_t *my_nfv9_logging_info_tmp = NULL;
+ cnat_vrfmap_t *my_vrfmap = 0, *my_vrfmap_temp = 0;
+ u16 i_vrf = ~0;
+ u32 i_vrf_id = ~0;
+ u8 found;
+ u32 inside_sw_if_index = EMPTY;
+ /*
+ * Init NFv9 logging info as needed, this will be done only once
+ */
+ cnat_nfv9_logging_init();
+
+ found = 0;
+
+/* vcgn changes end*/
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
+ if (unformat (input, "inside %U",
+ unformat_vnet_sw_interface, &inside_sw_if_index)) {
+ /* Do nothing */
+ } else if (unformat (input, "server %U", unformat_ip4_address, &server_addr))
+ ip_addr = clib_net_to_host_u32(server_addr.as_u32);
+ else if (unformat(input, "port %u", &port))
+ ;
+ else if (unformat(input, "refresh-rate %u", &refresh_rate))
+ ;
+ else if (unformat(input, "timeout %u", &timeout))
+ ;
+ else if (unformat(input, "pmtu %u", &pmtu))
+ ;
+ else if (unformat(input, "del"))
+ enable = 0;
+ else break;
+ }
+
+ if (inside_sw_if_index > vec_len(vcm->inside_sw_if_index_table) ||
+ vcm->inside_sw_if_index_table[inside_sw_if_index] == EMPTY) {
+ return clib_error_return (0, "Could not find the inside interface");
+ }
+ i_vrf = inside_sw_if_index;
+ i_vrf_id = inside_sw_if_index;
+
+ #if 0
+ vlib_cli_output(vm, "ip 0x%x, port %u, refresh %u, "
+ "timeout %u, pmtu %u enable %u\n",
+ ip_addr, port, refresh_rate,
+ timeout, pmtu, enable);
+ #endif
+ if (refresh_rate == 0) refresh_rate = 500; /* num of pkts */
+ if (timeout == 0) timeout = 30; /* in mins */
+
+ nfv9_conf.enable = enable;
+ nfv9_conf.ipv4_address = ip_addr;
+ nfv9_conf.i_vrf_id = inside_sw_if_index;
+ nfv9_conf.i_vrf = inside_sw_if_index;
+ nfv9_conf.port = port;
+ nfv9_conf.refresh_rate = refresh_rate;
+ nfv9_conf.timeout_rate = timeout;
+ nfv9_conf.path_mtu = pmtu;
+ nfv9_conf.nfv9_global_collector = 0;
+ nfv9_conf.session_logging = 0;
+
+ /*
+ * At this point the NFv9 global information should already be
+ * inited as we have called cnat_nfv9_logging_init()
+ */
+
+ if (nfv9_conf.nfv9_global_collector) {
+ if (cnat_nfv9_global_info.cnat_nfv9_global_collector_index != EMPTY) {
+ found = 1;
+ my_nfv9_logging_info = cnat_nfv9_logging_info_pool +
+ cnat_nfv9_global_info.cnat_nfv9_global_collector_index;
+ }
+ } else {
+ /* Do we already have a map for this VRF? */
+ pool_foreach (my_nfv9_logging_info, cnat_nfv9_logging_info_pool, ({
+ if (my_nfv9_logging_info->i_vrf_id == i_vrf_id) {
+ nfv9_server_info_t *server = nfv9_server_info_pool +
+ my_nfv9_logging_info->server_index;
+ if((server->ipv4_address == (nfv9_conf.ipv4_address)) && (server->port == (nfv9_conf.port))) {
+ found = 1;
+ my_nfv9_logging_info_tmp = my_nfv9_logging_info;
+ break;
+ }
+ }
+ }));
+ }
+
+ if ((nfv9_conf.ipv4_address == 0) ||
+ (nfv9_conf.port == 0)) {
+ vlib_cli_output(vm,
+ "Add NFv9 ivrf %d Logging Invalid values [IPv4 0x%x, PORT %d]\n",
+ i_vrf,
+ (nfv9_conf.ipv4_address),
+ (nfv9_conf.port));
+ goto done;
+ }
+
+ if (nfv9_conf.enable) {
+ if ((nfv9_conf.ipv4_address == 0) ||
+ (nfv9_conf.port == 0)) {
+ nfv9_conf.rc = CNAT_ERR_PARSER;
+ vlib_cli_output(vm,
+ "NFV9_logging i_vrf %d, Invalid [v4_addr 0x%x port %d]\n",
+ i_vrf,
+ (nfv9_conf.ipv4_address),
+ (nfv9_conf.port));
+ goto done;
+ }
+
+ nfv9_server_info_t new_server_info;
+ memset(&new_server_info, 0, sizeof(nfv9_server_info_t));
+ new_server_info.ipv4_address =
+ nfv9_conf.ipv4_address;
+ new_server_info.port =
+ (nfv9_conf.port);
+ new_server_info.refresh_rate =
+ (nfv9_conf.refresh_rate);
+ /*
+ * Store the timeout in seconds. User configures it in minutes
+ */
+ new_server_info.timeout_rate =
+ 60*(nfv9_conf.timeout_rate);
+ if (found && my_nfv9_logging_info) {
+ /*
+ * Entry already present, change it
+ */
+ my_nfv9_logging_info->max_length_minus_max_record_size =
+ nfv9_get_max_length_minus_max_record_size(
+ ((nfv9_conf.path_mtu)));
+ } else {
+ pool_get(cnat_nfv9_logging_info_pool, my_nfv9_logging_info);
+ memset(my_nfv9_logging_info, 0, sizeof(*my_nfv9_logging_info));
+ my_nfv9_logging_info->server_index = EMPTY;
+ my_nfv9_logging_info->nfv9_logging_next_index = EMPTY;
+ /*
+ * Make the current and head logging context indeices as EMPTY.
+ * When first logging happens, these get set correctly
+ */
+ my_nfv9_logging_info->current_logging_context = NULL;
+ my_nfv9_logging_info->queued_logging_context = NULL;
+#if 0
+ my_nfv9_logging_info->f = NULL;
+ my_nfv9_logging_info->to_next = NULL;
+ output_node = vlib_get_node_by_name (vm, (u8 *) "ip4-input");
+ my_nfv9_logging_info->ip4_input_node_index = output_node->index;
+ printf("ip4_input_node_index %d\n", my_nfv9_logging_info->ip4_input_node_index);
+#endif
+ my_nfv9_logging_info->i_vrf = i_vrf;
+ my_nfv9_logging_info->i_vrf_id = i_vrf_id;
+ my_nfv9_logging_info->max_length_minus_max_record_size =
+ nfv9_get_max_length_minus_max_record_size(
+ nfv9_conf.path_mtu);
+
+ /* my_nfv9_logging_info will have a copy of logging_policy
+ * because, it is quite possible that nfv9 config arrives before
+ * the corresponding vrfmap is initialized. In such cases
+ * this copy will be used to update the vrfmap entry
+ */
+ my_nfv9_logging_info->logging_policy = nfv9_conf.session_logging;
+
+ if (nfv9_conf.nfv9_global_collector) {
+ cnat_nfv9_global_info.cnat_nfv9_global_collector_index =
+ my_nfv9_logging_info - cnat_nfv9_logging_info_pool;
+
+ pool_foreach (my_vrfmap, cnat_map_by_vrf, ({
+ if (my_vrfmap->nfv9_logging_index == EMPTY) {
+ my_vrfmap->nfv9_logging_index =
+ cnat_nfv9_global_info.cnat_nfv9_global_collector_index;
+ }
+ }));
+ } else {
+ u32 my_vrfmap_found = 0;
+
+ FIND_MY_VRF_USING_I_VRF_ID
+ my_vrfmap = my_vrfmap_temp;
+ if (my_vrfmap_found) {
+ if(my_vrfmap->nfv9_logging_index == EMPTY) {
+ my_vrfmap->nfv9_logging_index =
+ my_nfv9_logging_info - cnat_nfv9_logging_info_pool;
+ // my_vrfmap->nf_logging_policy = mp->session_logging;
+ } else {
+ cnat_nfv9_logging_info_t *my_nfv9_logging_info_temp = cnat_nfv9_logging_info_pool + my_vrfmap->nfv9_logging_index;
+ while(my_nfv9_logging_info_temp->nfv9_logging_next_index != EMPTY){
+ my_nfv9_logging_info_temp = cnat_nfv9_logging_info_pool + my_nfv9_logging_info_temp->nfv9_logging_next_index;
+ }
+ my_nfv9_logging_info_temp->nfv9_logging_next_index = my_nfv9_logging_info - cnat_nfv9_logging_info_pool;
+ }
+ }
+ }
+ }
+
+ /* Update logging policy */
+ my_nfv9_logging_info->logging_policy = nfv9_conf.session_logging;
+ if (nfv9_conf.nfv9_global_collector) {
+ if(PLATFORM_DBL_SUPPORT) {
+ pool_foreach (my_vrfmap, cnat_map_by_vrf, ({
+ if (my_vrfmap->nfv9_logging_index ==
+ cnat_nfv9_global_info.cnat_nfv9_global_collector_index) {
+ my_vrfmap->nf_logging_policy = nfv9_conf.session_logging;
+ }
+ }));
+ } else {
+ nfv9_conf.rc = CNAT_ERR_NO_SESSION_DB;
+ }
+ } else {
+ if(PLATFORM_DBL_SUPPORT) {
+ u32 my_vrfmap_found = 0;
+ my_vrfmap_temp = NULL;
+ FIND_MY_VRF_USING_I_VRF_ID
+ my_vrfmap = my_vrfmap_temp;
+ if (my_vrfmap_found) {
+ // my_vrfmap->nf_logging_policy = mp->session_logging;
+ }
+ } else {
+ nfv9_conf.rc = CNAT_ERR_NO_SESSION_DB;
+ }
+ }
+ u8 nfv9_logging_policy = 0;
+ u32 my_vrfmap_found = 0;
+ my_vrfmap_temp = NULL;
+ FIND_MY_VRF_USING_I_VRF_ID
+ my_vrfmap = my_vrfmap_temp;
+ if (my_vrfmap_found) {
+ u32 index_curr = my_vrfmap->nfv9_logging_index;
+ cnat_nfv9_logging_info_t *my_nfv9_logging_info_temp;
+ while(index_curr != EMPTY) {
+ my_nfv9_logging_info_temp = cnat_nfv9_logging_info_pool + index_curr;
+ nfv9_logging_policy = nfv9_logging_policy || my_nfv9_logging_info_temp->logging_policy;
+ index_curr = (cnat_nfv9_logging_info_pool + index_curr)->nfv9_logging_next_index;
+ }
+ my_vrfmap->nf_logging_policy = nfv9_logging_policy;
+ }
+ //vlib_cli_output(vm,"Netflow logging policy = %d\n", my_vrfmap->nf_logging_policy);
+ if(nfv9_get_server_instance(my_nfv9_logging_info, &new_server_info)
+ != CNAT_SUCCESS) {
+ vlib_cli_output(vm, "Error to get server instance");
+ nfv9_conf.rc = CNAT_ERR_PARSER;
+ goto done;
+ }
+ nfv9_init_pkt_sent_data(my_nfv9_logging_info);
+
+ vlib_cli_output(vm,"Adding NFv9 Logging Succeeded\n");
+ nfv9_configured = 1;
+
+ } else {
+ /*Delete path*/
+ if (found) {
+ /* if found entry then we need to overwrite the my_nfv9_logging_info_tmp
+ * to my_nfv9_logging_info
+ */
+ my_nfv9_logging_info = my_nfv9_logging_info_tmp;
+ if (i_vrf == INVALID_UIDX) {
+ /*
+ * We are deleting a global collector. Mark the collectors
+ * in those VRFs using the global collector
+ */
+ pool_foreach (my_vrfmap, cnat_map_by_vrf, ({
+ if (my_vrfmap->nfv9_logging_index ==
+ cnat_nfv9_global_info.cnat_nfv9_global_collector_index) {
+ my_vrfmap->nfv9_logging_index = EMPTY;
+ }
+ }));
+
+ cnat_nfv9_global_info.cnat_nfv9_global_collector_index = EMPTY;
+ } else {
+ u32 my_vrfmap_found = 0;
+ my_vrfmap_temp = NULL;
+ FIND_MY_VRF_USING_I_VRF_ID
+ my_vrfmap = my_vrfmap_temp;
+ if (my_vrfmap_found) {
+ // my_vrfmap->nfv9_logging_index = cnat_nfv9_global_info.cnat_nfv9_global_collector_index;
+ }
+ }
+ if (my_nfv9_logging_info->queued_logging_context ||
+ my_nfv9_logging_info->current_logging_context) {
+ /*
+ * If there is a pending context:
+ * Set the deleted flag to 1. This will ensure
+ * that the logging info structure gets freed after any
+ * pending packet get sent
+ */
+ my_nfv9_logging_info->deleted = 1;
+ } else {
+ /*
+ * No pending context, just free the logging info structure
+ */
+ u32 index = my_nfv9_logging_info - cnat_nfv9_logging_info_pool;
+ if(index == my_vrfmap->nfv9_logging_index) {
+ /* Deleting the first sever */
+ my_vrfmap->nfv9_logging_index = my_nfv9_logging_info->nfv9_logging_next_index;
+ /* if(my_nfv9_logging_info->nfv9_logging_next_index != EMPTY){
+ my_vrfmap->nf_logging_policy = (cnat_nfv9_logging_info_pool + my_nfv9_logging_info->nfv9_logging_next_index)->logging_policy;
+ } else {
+ my_vrfmap->nf_logging_policy = EMPTY;
+ }*/
+ } else {
+ u32 index_curr = my_vrfmap->nfv9_logging_index;
+ u32 index_prev = EMPTY;
+ while(index_curr != EMPTY) {
+ index_prev = index_curr;
+ index_curr = (cnat_nfv9_logging_info_pool + index_curr)->nfv9_logging_next_index;
+ if(index == index_curr)
+ {
+ (cnat_nfv9_logging_info_pool + index_prev)->nfv9_logging_next_index = (cnat_nfv9_logging_info_pool + index_curr)->nfv9_logging_next_index;
+ break;
+ }
+ }
+ }
+ nfv9_delete_server_info(my_nfv9_logging_info);
+ pool_put(cnat_nfv9_logging_info_pool, my_nfv9_logging_info);
+ }
+
+ vlib_cli_output(vm, "Deleting NFv9 Logging Succeeded\n");
+ /*
+ * Search across all vrf and check if nfv9 logging is configured.
+ */
+ nfv9_configured = 0;
+ pool_foreach (my_nfv9_logging_info, cnat_nfv9_logging_info_pool, ({
+ nfv9_configured = 1;
+ break;
+ }));
+ } else {
+ nfv9_conf.rc = CNAT_NO_CONFIG;
+ vlib_cli_output(vm, "Add NFv9 Logging Failed (2) Non Existent vrf %d\n",
+ i_vrf);
+
+ }
+ u8 nfv9_logging_policy = 0;
+ u32 my_vrfmap_found = 0;
+ my_vrfmap_temp = NULL;
+ FIND_MY_VRF_USING_I_VRF_ID
+ my_vrfmap = my_vrfmap_temp;
+ if (my_vrfmap_found) {
+ u32 index_curr = my_vrfmap->nfv9_logging_index;
+ cnat_nfv9_logging_info_t *my_nfv9_logging_info_temp;
+ while(index_curr != EMPTY) {
+ my_nfv9_logging_info_temp = cnat_nfv9_logging_info_pool + index_curr;
+ nfv9_logging_policy = nfv9_logging_policy || my_nfv9_logging_info_temp->logging_policy;
+ index_curr = (cnat_nfv9_logging_info_pool + index_curr)->nfv9_logging_next_index;
+ }
+ my_vrfmap->nf_logging_policy = nfv9_logging_policy;
+ }
+ }
+
+done:
+ return 0;
+}
+
+/* config CLIs */
+VLIB_CLI_COMMAND (set_vcgn_map_command) = {
+ .path = "set vcgn map",
+ .short_help = "set vcgn map <lo-address> [- <hi-address>]",
+ .function = set_vcgn_map_command_fn,
+};
+
+VLIB_CLI_COMMAND (set_vcgn_inside_command) = {
+ .path = "set vcgn inside",
+ .short_help = "set vcgn inside <inside intfc> outside <outside intfc>",
+ .function = set_vcgn_inside_command_fn,
+};
+
+VLIB_CLI_COMMAND (set_vcgn_tcp_timeout_command) = {
+ .path = "set vcgn tcp timeout",
+ .short_help = "set vcgn tcp timeout active <1-65535> init <1-65535>",
+ .function = set_vcgn_tcp_timeout_command_fn,
+};
+
+VLIB_CLI_COMMAND (set_vcgn_udp_timeout_command) = {
+ .path = "set vcgn udp timeout",
+ .short_help = "set vcgn udp timeout active <1-65535> init <1-65535>",
+ .function = set_vcgn_udp_timeout_command_fn,
+};
+
+VLIB_CLI_COMMAND (set_vcgn_icmp_timeout_command) = {
+ .path = "set vcgn icmp timeout",
+ .short_help = "set vcgn icmp timeout <1-65535>",
+ .function = set_vcgn_icmp_timeout_command_fn,
+};
+
+VLIB_CLI_COMMAND (set_vcgn_protocol_default_timeout_command) = {
+ .path = "set vcgn default timeout",
+ .short_help = "set vcgn default timeout protocol <tcp/udp/icmp>",
+ .function = set_vcgn_protocol_default_timeout_command_fn,
+};
+
+VLIB_CLI_COMMAND (set_vcgn_dynamic_port_start_range_command) = {
+ .path = "set vcgn dynamic port start",
+ .short_help = "set vcgn dynamic port start <1-65535>",
+ .function = set_vcgn_dynamic_port_start_range_command_fn,
+};
+
+VLIB_CLI_COMMAND (set_vcgn_port_limit_command) = {
+ .path = "set vcgn port limit",
+ .short_help = "set vcgn port limit <1-65535>",
+ .function = set_vcgn_port_limit_command_fn,
+};
+
+VLIB_CLI_COMMAND (set_vcgn_nfv9_logging_cofig_command) = {
+ .path = "set vcgn nfv9",
+ .short_help = "set vcgn nfv9 [del] inside <interface> "
+ "server <ip-addr> port <port> [refresh-rate <n>] "
+ "[timeout <n>] [pmtu <n>]",
+ .function = set_vcgn_nfv9_logging_cofig_command_fn,
+};
+
+
+/* show CLIs */
+VLIB_CLI_COMMAND (show_vcgn_config_command) = {
+ .path = "show vcgn config",
+ .short_help = "show vcgn config",
+ .function = show_vcgn_config_command_fn,
+};
+
+VLIB_CLI_COMMAND (show_vcgn_stat_command) = {
+ .path = "show vcgn statistics",
+ .short_help = "show vcgn statistics",
+ .function = show_vcgn_stats_command_fn,
+};
+
+VLIB_CLI_COMMAND (show_vcgn_inside_translation_command) = {
+ .path = "show vcgn inside-translation",
+ .short_help = "show vcgn inside-translation protocol <tcp/udp/icmp> "
+ "interface <inside-if> inside-addr <ip-addr> "
+ "[start-port <n>] [end-port <n>]",
+ .function = show_vcgn_inside_translation_command_fn,
+};
+
+VLIB_CLI_COMMAND (show_vcgn_outside_translation_command) = {
+ .path = "show vcgn outside-translation",
+ .short_help = "show vcgn outside-translation protocol <tcp/udp/icmp> "
+ "interface <outside-if> outside-addr <ip-addr> "
+ "[start-port <n>] [end-port <n>]",
+ .function = show_vcgn_outside_translation_command_fn,
+};
+
+static clib_error_t *
+vcgn_init (vlib_main_t * vm)
+{
+ clib_error_t * error = 0;
+
+ if ((error = vlib_call_init_function
+ (vm, vcgn_classify_init)))
+ return error;
+ if ((error = vlib_call_init_function
+ (vm, cnat_ipv4_udp_inside_input_init)))
+ return error;
+ if ((error = vlib_call_init_function
+ (vm, cnat_ipv4_udp_outside_input_init)))
+ return error;
+ if ((error = vlib_call_init_function
+ (vm, cnat_ipv4_udp_inside_input_exc_init)))
+ return error;
+ if ((error = vlib_call_init_function
+ (vm, cnat_db_scanner_init)))
+ return error;
+ if ((error = vlib_call_init_function
+ (vm, cnat_ipv4_tcp_inside_input_init)))
+ return error;
+ if ((error = vlib_call_init_function
+ (vm, cnat_ipv4_tcp_inside_input_exc_init)))
+ return error;
+ if ((error = vlib_call_init_function
+ (vm, cnat_ipv4_tcp_outside_input_init)))
+ return error;
+ if ((error = vlib_call_init_function
+ (vm, cnat_ipv4_icmp_q_inside_input_init)))
+ return error;
+ if ((error = vlib_call_init_function
+ (vm, cnat_ipv4_icmp_q_inside_input_exc_init)))
+ return error;
+ if ((error = vlib_call_init_function
+ (vm, cnat_ipv4_icmp_q_outside_input_init)))
+ return error;
+ if ((error = vlib_call_init_function
+ (vm, cnat_ipv4_icmp_e_inside_input_init)))
+ return error;
+ if ((error = vlib_call_init_function
+ (vm, cnat_ipv4_icmp_e_outside_input_init)))
+ return error;
+
+ return error;
+}
+
+/*
+ * This routine exists to convince the vlib plugin framework that
+ * we haven't accidentally copied a random .dll into the plugin
+ * directory. This is used in lieu of VLIB_INIT_FUNCTION(vcgn_init).
+ *
+ * Also collects global variable pointers passed from the vpp engine
+ */
+clib_error_t *
+vlib_plugin_register (vlib_main_t * vm, vnet_plugin_handoff_t * h,
+ int from_early_init)
+{
+ return vcgn_init(vm);
+}
diff --git a/plugins/vcgn-plugin/vcgn/vcgn_db.h b/plugins/vcgn-plugin/vcgn/vcgn_db.h
new file mode 100644
index 00000000000..cd7d835cba1
--- /dev/null
+++ b/plugins/vcgn-plugin/vcgn/vcgn_db.h
@@ -0,0 +1,117 @@
+/*
+ *------------------------------------------------------------------
+ * vcgn_db.h - translation database definitions
+ *
+ * Copyright (c) 2007-2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __VCGN_DB_H__
+#define __VCGN_DB_H__
+
+#include "index_list.h"
+
+/*
+ * The key structure. All fields are in NETWORK byte order!
+ */
+typedef struct {
+ u32 ipv4;
+ u16 port;
+ u16 vrf; //bit0-12:vrf, bit13:unused, bit14-15:protocol
+} cnat_db_key_t;
+
+/* bit14-15:protocol in cnat_db_key_t */
+#define CNAT_INVALID_PROTO 0x0000
+#define CNAT_UDP 0x4000
+#define CNAT_TCP 0x8000
+#define CNAT_ICMP 0xc000
+#define CNAT_VRF_MASK 0x3fff
+#define CNAT_PRO_MASK 0xc000
+#define CNAT_PRO_SHIFT 14
+
+/*
+ * Maximum number of VRF entries supported
+ */
+#define CNAT_MAX_VRFMAP_ENTRIES (CNAT_VRF_MASK + 1)
+/*
+ * for hashing purposes, fetch the key in one instr.
+ */
+typedef union {
+ cnat_db_key_t k;
+ u64 key64;
+} cnat_key_t;
+
+/*
+ * Main translation database entries. Currently 0x50 = 80 bytes in length.
+ * Given 20,000,000 entries, it saves nearly 1gb of SDRAM to pack the entries
+ * and pay the extra prefetch. So, that's what we do.
+ */
+
+typedef struct {
+ /* 0x00 */
+ index_slist_t out2in_hash; /* hash-and-chain, x2 */
+ index_slist_t in2out_hash;
+
+ /* 0x08 */
+ cnat_key_t out2in_key; /* network-to-user, outside-to-inside key */
+
+ /* 0x10 */
+ cnat_key_t in2out_key; /* user-to-network, inside-to-outside key */
+
+ /* 0x18 */
+ index_dlist_t user_ports; /* per-user translation list */
+
+ /* 0x20 */
+ u32 user_index; /* index of user that owns this entry */
+
+ /* 0x24 */
+ u16 vrfmap_index; /* index of vrfmap */
+
+ /* 0x26 */
+ u16 flags; /* Always need flags... */
+#define CNAT_DB_FLAG_PORT_PAIR (1<<0)
+#define CNAT_DB_FLAG_TCP_ACTIVE (1<<1)
+#define CNAT_DB_FLAG_ENTRY_FREE (1<<2)
+#define CNAT_DB_FLAG_UDP_ACTIVE (1<<3)
+#define CNAT_DB_FLAG_STATIC_PORT (1<<4)
+#define CNAT_DB_FLAG_ALG_ENTRY (1<<5)
+
+ /* 0x28 */
+ u32 dst_ipv4; /* pointer to ipv4 dst list, used in evil mode */
+
+ /* 0x2C */
+ u32 out2in_pkts; /* pkt counters */
+
+ /* 0x30 */
+ u32 in2out_pkts;
+
+ /* 0x34 */
+ u32 entry_expires; /* timestamp used to expire translations */
+
+ /* 0x38 */
+ union { /* used by FTP ALG, pkt len delta due to FTP PORT cmd */
+ u16 delta;
+ i8 alg_dlt[2]; /* two delta values, 0 for previous, 1 for current */
+ u16 il; /* Used to indicate if interleaved mode is used
+ in case of RTSP ALG */
+ } alg;
+
+ /* 0x 48 */
+ u32 tcp_seq_num; /* last tcp (FTP) seq # that has pkt len change due to PORT */
+
+ cnat_timeout_t destn_key;
+
+ /* 0x4C... last byte -- 72 total */
+} cnat_main_db_entry_t;
+#endif