/* * Copyright (c) 2016 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #define foreach_lb_error \ _(NONE, "no error") \ _(PROTO_NOT_SUPPORTED, "protocol not supported") typedef enum { #define _(sym,str) LB_ERROR_##sym, foreach_lb_error #undef _ LB_N_ERROR, } lb_error_t; static char *lb_error_strings[] = { #define _(sym,string) string, foreach_lb_error #undef _ }; typedef struct { u32 vip_index; u32 as_index; } lb_trace_t; typedef struct { u32 vip_index; u32 node_port; } lb_nodeport_trace_t; typedef struct { u32 vip_index; u32 as_index; u32 rx_sw_if_index; u32 next_index; } lb_nat_trace_t; u8 * format_lb_trace (u8 * s, va_list * args) { lb_main_t *lbm = &lb_main; CLIB_UNUSED(vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED(vlib_node_t * node) = va_arg (*args, vlib_node_t *); lb_trace_t *t = va_arg (*args, lb_trace_t *); if (pool_is_free_index(lbm->vips, t->vip_index)) { s = format (s, "lb vip[%d]: This VIP was freed since capture\n"); } else { s = format (s, "lb vip[%d]: %U\n", t->vip_index, format_lb_vip, &lbm->vips[t->vip_index]); } if (pool_is_free_index(lbm->ass, t->as_index)) { s = format (s, "lb as[%d]: This AS was freed since capture\n"); } else { s = format (s, "lb as[%d]: %U\n", t->as_index, format_lb_as, &lbm->ass[t->as_index]); } return s; } u8 * format_lb_nat_trace (u8 * s, va_list * args) { lb_main_t *lbm = &lb_main; CLIB_UNUSED(vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED(vlib_node_t * node) = va_arg (*args, vlib_node_t *); lb_nat_trace_t *t = va_arg (*args, lb_nat_trace_t *); if (pool_is_free_index(lbm->vips, t->vip_index)) { s = format (s, "lb vip[%d]: This VIP was freed since capture\n"); } else { s = format (s, "lb vip[%d]: %U\n", t->vip_index, format_lb_vip, &lbm->vips[t->vip_index]); } if (pool_is_free_index(lbm->ass, t->as_index)) { s = format (s, "lb as[%d]: This AS was freed since capture\n"); } else { s = format (s, "lb as[%d]: %U\n", t->as_index, format_lb_as, &lbm->ass[t->as_index]); } s = format (s, "lb nat: rx_sw_if_index = %d, next_index = %d", t->rx_sw_if_index, t->next_index); return s; } lb_hash_t * lb_get_sticky_table (u32 thread_index) { lb_main_t *lbm = &lb_main; lb_hash_t *sticky_ht = lbm->per_cpu[thread_index].sticky_ht; //Check if size changed if (PREDICT_FALSE( sticky_ht && (lbm->per_cpu_sticky_buckets != lb_hash_nbuckets(sticky_ht)))) { //Dereference everything in there lb_hash_bucket_t *b; u32 i; lb_hash_foreach_entry(sticky_ht, b, i) { vlib_refcount_add (&lbm->as_refcount, thread_index, b->value[i], -1); vlib_refcount_add (&lbm->as_refcount, thread_index, 0, 1); } lb_hash_free (sticky_ht); sticky_ht = NULL; } //Create if necessary if (PREDICT_FALSE(sticky_ht == NULL)) { lbm->per_cpu[thread_index].sticky_ht = lb_hash_alloc ( lbm->per_cpu_sticky_buckets, lbm->flow_timeout); sticky_ht = lbm->per_cpu[thread_index].sticky_ht; clib_warning("Regenerated sticky table %p", sticky_ht); } ASSERT(sticky_ht); //Update timeout sticky_ht->timeout = lbm->flow_timeout; return sticky_ht; } u64 lb_node_get_other_ports4 (ip4_header_t *ip40) { return 0; } u64 lb_node_get_other_ports6 (ip6_header_t *ip60) { return 0; } static_always_inline void lb_node_get_hash (lb_main_t *lbm, vlib_buffer_t *p, u8 is_input_v4, u32 *hash, u32 *vip_idx, u8 per_port_vip) { vip_port_key_t key; clib_bihash_kv_8_8_t kv, value; /* For vip case, retrieve vip index for ip lookup */ *vip_idx = vnet_buffer (p)->ip.adj_index[VLIB_TX]; if (per_port_vip) { /* For per-port-vip case, ip lookup stores dummy index */ key.vip_prefix_index = *vip_idx; } if (is_input_v4) { ip4_header_t *ip40; u64 ports; ip40 = vlib_buffer_get_current (p); if (PREDICT_TRUE( ip40->protocol == IP_PROTOCOL_TCP || ip40->protocol == IP_PROTOCOL_UDP)) ports = ((u64) ((udp_header_t *) (ip40 + 1))->src_port << 16) | ((u64) ((udp_header_t *) (ip40 + 1))->dst_port); else ports = lb_node_get_other_ports4 (ip40); *hash = lb_hash_hash (*((u64 *) &ip40->address_pair), ports, 0, 0, 0); if (per_port_vip) { key.protocol = ip40->protocol; key.port = (u16)(ports & 0xFFFF); } } else { ip6_header_t *ip60; ip60 = vlib_buffer_get_current (p); u64 ports; if (PREDICT_TRUE( ip60->protocol == IP_PROTOCOL_TCP || ip60->protocol == IP_PROTOCOL_UDP)) ports = ((u64) ((udp_header_t *) (ip60 + 1))->src_port << 16) | ((u64) ((udp_header_t *) (ip60 + 1))->dst_port); else ports = lb_node_get_other_ports6 (ip60); *hash = lb_hash_hash (ip60->src_address.as_u64[0], ip60->src_address.as_u64[1], ip60->dst_address.as_u64[0], ip60->dst_address.as_u64[1], ports); if (per_port_vip) { key.protocol = ip60->protocol; key.port = (u16)(ports & 0xFFFF); } } /* For per-port-vip case, retrieve vip index for vip_port_filter table */ if (per_port_vip) { kv.key = key.as_u64; if (clib_bihash_search_8_8(&lbm->vip_index_per_port, &kv, &value) < 0) { /* return default vip */ *vip_idx = 0; return; } *vip_idx = value.value; } } static_always_inline uword lb_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, u8 is_input_v4, //Compile-time parameter stating that is input is v4 (or v6) lb_encap_type_t encap_type, //Compile-time parameter is GRE4/GRE6/L3DSR/NAT4/NAT6 u8 per_port_vip) //Compile-time parameter stating that is per_port_vip or not { lb_main_t *lbm = &lb_main; u32 n_left_from, *from, next_index, *to_next, n_left_to_next; u32 thread_index = vm->thread_index; u32 lb_time = lb_hash_time_now (vm); lb_hash_t *sticky_ht = lb_get_sticky_table (thread_index); from = vlib_frame_vector_args (frame); n_left_from = frame->
#!/bin/sh

if [ $# -lt 2 ]; then
    cat - <<EOF
$0 FROM-DIR TO-DIR ENVIRONMENT

Copies files from one directory to another with possible
transformations.

Files named FILE.spp will be transformed via the spp preprocessor
subject to environment definitions.  Source FILE.copyimgspp results in
destination file FILE in the corresponding destination directory.

Files named FILE.copyimgsh are run as shell scripts in (i.e. via chdir)
the corresponding destination directory (and not copied).

First regular files are copied.  Then transformations are preformed.
Finally, shell scripts are run.
EOF
  exit 1;
fi

FROM_DIR=$1
TO_DIR=$2

FILTER=" -and -not -name '*~'";
FILTER="${FILTER} -and -not -name '.*~'";
FILTER="$FILTER -and -not -path '*/.git*'";
FILTER="$FILTER -and -not -path '*/.svn*'";
FILTER="$FILTER -and -not -path '*/.CVS*'";

FROM_FILES=`(cd $FROM_DIR; eval "find . -not -type d $FILTER")`;
 FROM_DIRS=`(cd $FROM_DIR; eval "find .      -type d $FILTER")`;

COPY_FILES=
SPP_FILES=
SH_FILES=
for f in $FROM_FILES; do
  case $f in
    *.copyimgspp) SPP_FILES="$SPP_FILES $f" ;;
    *.copyimgsh)   SH_FILES="$SH_FILES $f" ;;
    *)		 COPY_FILES="$COPY_FILES $f";;
  esac
done

# Make destination directories.
mkdir -p $TO_DIR;
if [ "$FROM_DIRS" != "" ]; then
  for d in $FROM_DIRS; do
    mkdir -p $TO_DIR/$d;
  done
fi

# Copy files
if [ "$COPY_FILES" != "" ]; then
    tar -cf - -C $FROM_DIR $COPY_FILES | tar --preserve-permissions -xf - -C $TO_DIR;
fi

# Use spp to transform any spp files
if [ "$SPP_FILES" != "" ]; then
  for f in $SPP_FILES; do
    d=`dirname $f`;
    b=`basename $f .copyimgspp`;
    mkdir -p $TO_DIR/$d;
    t=$TO_DIR/$d/$b;
    spp -o $TO_DIR/$d/$b $FROM_DIR/$f || exit 1;
  done;
fi

# Now that all files have been copied/created we run any shell scripts
ABS_FROM_DIR=`(cd $FROM_DIR; pwd)`;
if [ "$SH_FILES" != "" ]; then
  # Allow directory to define some functions
  if [ -f $FROM_DIR/copyimgsh-functions.sh ]; then
    . $FROM_DIR/copyimgsh-functions.sh ;
  fi ;
  for f in $SH_FILES; do
    d=`dirname $f`;
    b=`basename $f`;
    mkdir -p $TO_DIR/$d;
    (cd $TO_DIR/$d; . $ABS_FROM_DIR/$d/$b) || exit 1;
  done;
fi;
(b0)->sw_if_index[VLIB_TX] = sm40->fib_index; old_addr0 = ip40->src_address.as_u32; ip40->src_address.as_u32 = new_addr0; csum = ip40->checksum; csum = ip_csum_sub_even (csum, old_addr0); csum = ip_csum_add_even (csum, new_addr0); ip40->checksum = ip_csum_fold (csum); if (PREDICT_TRUE(proto0 == LB_NAT_PROTOCOL_TCP)) { old_port0 = tcp0->src_port; tcp0->src_port = new_port0; csum = tcp0->checksum; csum = ip_csum_sub_even (csum, old_addr0); csum = ip_csum_sub_even (csum, old_port0); csum = ip_csum_add_even (csum, new_addr0); csum = ip_csum_add_even (csum, new_port0); tcp0->checksum = ip_csum_fold (csum); } else if (PREDICT_TRUE(proto0 == LB_NAT_PROTOCOL_UDP)) { old_port0 = udp0->src_port; udp0->src_port = new_port0; csum = udp0->checksum; csum = ip_csum_sub_even (csum, old_addr0); csum = ip_csum_sub_even (csum, old_port0); csum = ip_csum_add_even (csum, new_addr0); csum = ip_csum_add_even (csum, new_port0); udp0->checksum = ip_csum_fold (csum); } pkts_processed += next0 != LB_NAT4_IN2OUT_NEXT_DROP; } else { ip6_header_t * ip60; ip6_address_t old_addr0, new_addr0; lb_snat6_key_t key60; lb_snat_mapping_t *sm60; u32 index60; ip60 = vlib_buffer_get_current (b0); udp0 = ip6_next_header (ip60); tcp0 = (tcp_header_t *) udp0; proto0 = lb_ip_proto_to_nat_proto (ip60->protocol); key60.addr.as_u64[0] = ip60->src_address.as_u64[0]; key60.addr.as_u64[1] = ip60->src_address.as_u64[1]; key60.protocol = proto0; key60.port = udp0->src_port; key60.fib_index = rx_fib_index0; if (lb_nat66_mapping_match (lbm, &key60, &index60)) { next0 = LB_NAT6_IN2OUT_NEXT_DROP; goto trace0; } sm60 = pool_elt_at_index(lbm->snat_mappings, index60); new_addr0.as_u64[0] = sm60->src_ip.as_u64[0]; new_addr0.as_u64[1] = sm60->src_ip.as_u64[1]; new_port0 = sm60->src_port; vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm60->fib_index; old_addr0.as_u64[0] = ip60->src_address.as_u64[0]; old_addr0.as_u64[1] = ip60->src_address.as_u64[1]; ip60->src_address.as_u64[0] = new_addr0.as_u64[0]; ip60->src_address.as_u64[1] = new_addr0.as_u64[1]; if (PREDICT_TRUE(proto0 == LB_NAT_PROTOCOL_TCP)) { old_port0 = tcp0->src_port; tcp0->src_port = new_port0; csum = tcp0->checksum; csum = ip_csum_sub_even (csum, old_addr0.as_u64[0]); csum = ip_csum_sub_even (csum, old_addr0.as_u64[1]); csum = ip_csum_add_even (csum, new_addr0.as_u64[0]); csum = ip_csum_add_even (csum, new_addr0.as_u64[1]); csum = ip_csum_sub_even (csum, old_port0); csum = ip_csum_add_even (csum, new_port0); tcp0->checksum = ip_csum_fold (csum); } else if (PREDICT_TRUE(proto0 == LB_NAT_PROTOCOL_UDP)) { old_port0 = udp0->src_port; udp0->src_port = new_port0; csum = udp0->checksum; csum = ip_csum_sub_even (csum, old_addr0.as_u64[0]); csum = ip_csum_sub_even (csum, old_addr0.as_u64[1]); csum = ip_csum_add_even (csum, new_addr0.as_u64[0]); csum = ip_csum_add_even (csum, new_addr0.as_u64[1]); csum = ip_csum_sub_even (csum, old_port0); csum = ip_csum_add_even (csum, new_port0); udp0->checksum = ip_csum_fold (csum); } pkts_processed += next0 != LB_NAT4_IN2OUT_NEXT_DROP; } trace0: if (PREDICT_FALSE( (node->flags & VLIB_NODE_FLAG_TRACE) && (b0->flags & VLIB_BUFFER_IS_TRACED))) { lb_nat_trace_t *t = vlib_add_trace (vm, node, b0, sizeof(*t)); t->rx_sw_if_index = sw_if_index0; t->next_index = next0; } /* verify speculative enqueue, maybe switch current next frame */ vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } vlib_node_increment_counter (vm, stats_node_index, LB_NAT_IN2OUT_ERROR_IN2OUT_PACKETS, pkts_processed); return frame->n_vectors; } static uword lb6_gre6_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE6, 0); } static uword lb6_gre4_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE4, 0); } static uword lb4_gre6_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE6, 0); } static uword lb4_gre4_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE4, 0); } static uword lb6_gre6_port_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE6, 1); } static uword lb6_gre4_port_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE4, 1); } static uword lb4_gre6_port_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE6, 1); } static uword lb4_gre4_port_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE4, 1); } static uword lb4_l3dsr_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_L3DSR, 0); } static uword lb4_l3dsr_port_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_L3DSR, 1); } static uword lb6_nat6_port_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_NAT6, 1); } static uword lb4_nat4_port_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_NAT4, 1); } static uword lb_nat4_in2out_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return lb_nat_in2out_node_fn (vm, node, frame, 1); } static uword lb_nat6_in2out_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return lb_nat_in2out_node_fn (vm, node, frame, 0); } VLIB_REGISTER_NODE (lb6_gre6_node) = { .function = lb6_gre6_node_fn, .name = "lb6-gre6", .vector_size = sizeof(u32), .format_trace = format_lb_trace, .n_errors = LB_N_ERROR, .error_strings = lb_error_strings, .n_next_nodes = LB_N_NEXT, .next_nodes = { [LB_NEXT_DROP] = "error-drop" }, }; VLIB_REGISTER_NODE (lb6_gre4_node) = { .function = lb6_gre4_node_fn, .name = "lb6-gre4", .vector_size = sizeof(u32), .format_trace = format_lb_trace, .n_errors = LB_N_ERROR, .error_strings = lb_error_strings, .n_next_nodes = LB_N_NEXT, .next_nodes = { [LB_NEXT_DROP] = "error-drop" }, }; VLIB_REGISTER_NODE (lb4_gre6_node) = { .function = lb4_gre6_node_fn, .name = "lb4-gre6", .vector_size = sizeof(u32), .format_trace = format_lb_trace, .n_errors = LB_N_ERROR, .error_strings = lb_error_strings, .n_next_nodes = LB_N_NEXT, .next_nodes = { [LB_NEXT_DROP] = "error-drop" }, }; VLIB_REGISTER_NODE (lb4_gre4_node) = { .function = lb4_gre4_node_fn, .name = "lb4-gre4", .vector_size = sizeof(u32), .format_trace = format_lb_trace, .n_errors = LB_N_ERROR, .error_strings = lb_error_strings, .n_next_nodes = LB_N_NEXT, .next_nodes = { [LB_NEXT_DROP] = "error-drop" }, }; VLIB_REGISTER_NODE (lb6_gre6_port_node) = { .function = lb6_gre6_port_node_fn, .name = "lb6-gre6-port", .vector_size = sizeof(u32), .format_trace = format_lb_trace, .n_errors = LB_N_ERROR, .error_strings = lb_error_strings, .n_next_nodes = LB_N_NEXT, .next_nodes = { [LB_NEXT_DROP] = "error-drop" }, }; VLIB_REGISTER_NODE (lb6_gre4_port_node) = { .function = lb6_gre4_port_node_fn, .name = "lb6-gre4-port", .vector_size = sizeof(u32), .format_trace = format_lb_trace, .n_errors = LB_N_ERROR, .error_strings = lb_error_strings, .n_next_nodes = LB_N_NEXT, .next_nodes = { [LB_NEXT_DROP] = "error-drop" }, }; VLIB_REGISTER_NODE (lb4_gre6_port_node) = { .function = lb4_gre6_port_node_fn, .name = "lb4-gre6-port", .vector_size = sizeof(u32), .format_trace = format_lb_trace, .n_errors = LB_N_ERROR, .error_strings = lb_error_strings, .n_next_nodes = LB_N_NEXT, .next_nodes = { [LB_NEXT_DROP] = "error-drop" }, }; VLIB_REGISTER_NODE (lb4_gre4_port_node) = { .function = lb4_gre4_port_node_fn, .name = "lb4-gre4-port", .vector_size = sizeof(u32), .format_trace = format_lb_trace, .n_errors = LB_N_ERROR, .error_strings = lb_error_strings, .n_next_nodes = LB_N_NEXT, .next_nodes = { [LB_NEXT_DROP] = "error-drop" }, }; VLIB_REGISTER_NODE (lb4_l3dsr_port_node) = { .function = lb4_l3dsr_port_node_fn, .name = "lb4-l3dsr-port", .vector_size = sizeof(u32), .format_trace = format_lb_trace, .n_errors = LB_N_ERROR, .error_strings = lb_error_strings, .n_next_nodes = LB_N_NEXT, .next_nodes = { [LB_NEXT_DROP] = "error-drop" }, }; VLIB_REGISTER_NODE (lb4_l3dsr_node) = { .function = lb4_l3dsr_node_fn, .name = "lb4-l3dsr", .vector_size = sizeof(u32), .format_trace = format_lb_trace, .n_errors = LB_N_ERROR, .error_strings = lb_error_strings, .n_next_nodes = LB_N_NEXT, .next_nodes = { [LB_NEXT_DROP] = "error-drop" }, }; VLIB_REGISTER_NODE (lb6_nat6_port_node) = { .function = lb6_nat6_port_node_fn, .name = "lb6-nat6-port", .vector_size = sizeof(u32), .format_trace = format_lb_trace, .n_errors = LB_N_ERROR, .error_strings = lb_error_strings, .n_next_nodes = LB_N_NEXT, .next_nodes = { [LB_NEXT_DROP] = "error-drop" }, }; VLIB_REGISTER_NODE (lb4_nat4_port_node) = { .function = lb4_nat4_port_node_fn, .name = "lb4-nat4-port", .vector_size = sizeof(u32), .format_trace = format_lb_trace, .n_errors = LB_N_ERROR, .error_strings = lb_error_strings, .n_next_nodes = LB_N_NEXT, .next_nodes = { [LB_NEXT_DROP] = "error-drop" }, }; static uword lb4_nodeport_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return lb_nodeport_node_fn (vm, node, frame, 1); } static uword lb6_nodeport_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return lb_nodeport_node_fn (vm, node, frame, 0); } VLIB_REGISTER_NODE (lb4_nodeport_node) = { .function = lb4_nodeport_node_fn, .name = "lb4-nodeport", .vector_size = sizeof(u32), .format_trace = format_nodeport_lb_trace, .n_errors = LB_N_ERROR, .error_strings = lb_error_strings, .n_next_nodes = LB4_NODEPORT_N_NEXT, .next_nodes = { [LB4_NODEPORT_NEXT_IP4_NAT4] = "lb4-nat4-port", [LB4_NODEPORT_NEXT_DROP] = "error-drop", }, }; VLIB_REGISTER_NODE (lb6_nodeport_node) = { .function = lb6_nodeport_node_fn, .name = "lb6-nodeport", .vector_size = sizeof(u32), .format_trace = format_nodeport_lb_trace, .n_errors = LB_N_ERROR, .error_strings = lb_error_strings, .n_next_nodes = LB6_NODEPORT_N_NEXT, .next_nodes = { [LB6_NODEPORT_NEXT_IP6_NAT6] = "lb6-nat6-port", [LB6_NODEPORT_NEXT_DROP] = "error-drop", }, }; VNET_FEATURE_INIT (lb_nat4_in2out_node_fn, static) = { .arc_name = "ip4-unicast", .node_name = "lb-nat4-in2out", .runs_before = VNET_FEATURES("ip4-lookup"), }; VLIB_REGISTER_NODE (lb_nat4_in2out_node) = { .function = lb_nat4_in2out_node_fn, .name = "lb-nat4-in2out", .vector_size = sizeof(u32), .format_trace = format_lb_nat_trace, .n_errors = LB_N_ERROR, .error_strings = lb_error_strings, .n_next_nodes = LB_NAT4_IN2OUT_N_NEXT, .next_nodes = { [LB_NAT4_IN2OUT_NEXT_DROP] = "error-drop", [LB_NAT4_IN2OUT_NEXT_LOOKUP] = "ip4-lookup", }, }; VNET_FEATURE_INIT (lb_nat6_in2out_node_fn, static) = { .arc_name = "ip6-unicast", .node_name = "lb-nat6-in2out", .runs_before = VNET_FEATURES("ip6-lookup"), }; VLIB_REGISTER_NODE (lb_nat6_in2out_node) = { .function = lb_nat6_in2out_node_fn, .name = "lb-nat6-in2out", .vector_size = sizeof(u32), .format_trace = format_lb_nat_trace, .n_errors = LB_N_ERROR, .error_strings = lb_error_strings, .n_next_nodes = LB_NAT6_IN2OUT_N_NEXT, .next_nodes = { [LB_NAT6_IN2OUT_NEXT_DROP] = "error-drop", [LB_NAT6_IN2OUT_NEXT_LOOKUP] = "ip6-lookup", }, };