aboutsummaryrefslogtreecommitdiffstats
path: root/tests/vpp/perf/nfv_density/dcr_memif/chain/2n-10ge2p1x710-eth-l2bd-1ch-4mif-2dcr2t-vppip4-ndrpdr.robot
blob: a7f8344aa2801a2cbfa1c488f7bd43a4ba56297a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

*** Settings ***
| Resource | resources/libraries/robot/shared/default.robot
| ...
| Force Tags | 2_NODE_SINGLE_LINK_TOPO | PERFTEST | HW_ENV | NDRPDR
| ... | NIC_Intel-X710 | ETH | L2BDMACLRN | BASE | MEMIF | DOCKER | 1R2C
| ... | NF_DENSITY | CHAIN | NF_VPPIP4 | 2DCR2T
| ...
| Suite Setup | Setup suite single link | performance
| Suite Teardown | Tear down suite | performance
| Test Setup | Setup test
| Test Teardown | Tear down test | performance | container
| ...
| Test Template | Local Template
| ...
| Documentation | **RFC2544: Pkt throughput L2BD test cases with memif 1 chain
| ... | 2 docker container*
| ...
| ... | *[Top] Network Topologies:* TG-DUT1-TG 2-node circular topology with
| ... | single links between nodes.
| ... | *[Enc] Packet Encapsulations:* Eth-IPv4 for L2 bridge domain.
| ... | *[Cfg] DUT configuration:* DUT1 is configured with two L2 bridge domains
| ... | and MAC learning enabled. DUT1 tested with ${nic_name}.\
| ... | Container is connected to VPP via Memif interface. Container is running
| ... | same VPP version as running on DUT. Container is limited via cgroup to
| ... | use cores allocated from pool of isolated CPUs. There are no memory
| ... | contraints.
| ... | *[Ver] TG verification:* TG finds and reports throughput NDR (Non Drop\
| ... | Rate) with zero packet loss tolerance and throughput PDR (Partial Drop\
| ... | Rate) with non-zero packet loss tolerance (LT) expressed in percentage\
| ... | of packets transmitted. NDR and PDR are discovered for different\
| ... | Ethernet L2 frame sizes using MLRsearch library.\
| ... | Test packets are generated by TG on links to DUTs. TG traffic profile
| ... | contains two L3 flow-groups (flow-group per direction, 254 flows per
| ... | flow-group) with all packets containing Ethernet header, IPv4 header
| ... | with IP protocol=61 and static payload. MAC addresses are matching MAC
| ... | addresses of the TG node interfaces.

*** Variables ***
| @{plugins_to_enable}= | dpdk_plugin.so | memif_plugin.so
| ${osi_layer}= | L3
| ${nic_name}= | Intel-X710
| ${overhead}= | ${0}
| ${nf_dtcr}= | ${1}
| ${nf_dtc}= | ${1}
# Traffic profile:
| ${traffic_profile}= | trex-sl-2n3n-ethip4-ip4src254-1c2n
# Container
| ${container_engine}= | Docker
| ${container_chain_topology}= | chain_ip4

*** Keywords ***
| Local Template
| | [Documentation]
| | ... | [Cfg] DUT runs L2BD switching config.
| | ... | Each DUT uses ${phy_cores} physical core(s) for worker threads.
| | ... | [Ver] Measure NDR and PDR values using MLRsearch algorithm.\
| | ...
| | ... | *Arguments:*
| | ... | - frame_size - Framesize in Bytes in integer or string (IMIX_v4_1).
| | ... | Type: integer, string
| | ... | - phy_cores - Number of physical cores. Type: integer
| | ... | - rxq - Number of RX queues, default value: ${None}. Type: integer
| | ...
| | [Arguments] | ${frame_size} | ${phy_cores} | ${rxq}=${None}
| | ...
| | Set Test Variable | \${frame_size}
| | ...
| | Given Add worker threads and rxqueues to all DUTs | ${phy_cores} | ${rxq}
| | And Add PCI devices to all DUTs
| | And Set Max Rate And Jumbo And Handle Multi Seg
| | And Apply startup configuration on all VPP DUTs
| | And Start containers for performance test
| | ... | nf_chains=${1} | nf_nodes=${2} | auto_scale=${False}
| | And Initialize L2 Bridge Domain for multiple chains with memif pairs
| | ... | nf_chains=${1} | nf_nodes=${2} | auto_scale=${False}
| | Then Find NDR and PDR intervals using optimized search

*** Test Cases ***
| tc01-64B-1c-eth-l2bd-1ch-4mif-2dcr2t-vppip4-ndrpdr
| | [Tags] | 64B | 1C
| | frame_size=${64} | phy_cores=${1}

| tc02-64B-2c-eth-l2bd-1ch-4mif-2dcr2t-vppip4-ndrpdr
| | [Tags] | 64B | 2C
| | frame_size=${64} | phy_cores=${2}

| tc03-64B-4c-eth-l2bd-1ch-4mif-2dcr2t-vppip4-ndrpdr
| | [Tags] | 64B | 4C
| | frame_size=${64} | phy_cores=${4}

| tc04-1518B-1c-eth-l2bd-1ch-4mif-2dcr2t-vppip4-ndrpdr
| | [Tags] | 1518B | 1C
| | frame_size=${1518} | phy_cores=${1}

| tc05-1518B-2c-eth-l2bd-1ch-4mif-2dcr2t-vppip4-ndrpdr
| | [Tags] | 1518B | 2C
| | frame_size=${1518} | phy_cores=${2}

| tc06-1518B-4c-eth-l2bd-1ch-4mif-2dcr2t-vppip4-ndrpdr
| | [Tags] | 1518B | 4C
| | frame_size=${1518} | phy_cores=${4}

| tc07-9000B-1c-eth-l2bd-1ch-4mif-2dcr2t-vppip4-ndrpdr
| | [Tags] | 9000B | 1C
| | frame_size=${9000} | phy_cores=${1}

| tc08-9000B-2c-eth-l2bd-1ch-4mif-2dcr2t-vppip4-ndrpdr
| | [Tags] | 9000B | 2C
| | frame_size=${9000} | phy_cores=${2}

| tc09-9000B-4c-eth-l2bd-1ch-4mif-2dcr2t-vppip4-ndrpdr
| | [Tags] | 9000B | 4C
| | frame_size=${9000} | phy_cores=${4}

| tc10-IMIX-1c-eth-l2bd-1ch-4mif-2dcr2t-vppip4-ndrpdr
| | [Tags] | IMIX | 1C
| | frame_size=IMIX_v4_1 | phy_cores=${1}

| tc11-IMIX-2c-eth-l2bd-1ch-4mif-2dcr2t-vppip4-ndrpdr
| | [Tags] | IMIX | 2C
| | frame_size=IMIX_v4_1 | phy_cores=${2}

| tc12-IMIX-4c-eth-l2bd-1ch-4mif-2dcr2t-vppip4-ndrpdr
| | [Tags] | IMIX | 4C
| | frame_size=IMIX_v4_1 | phy_cores=${4}
/span> = sizeof (ip6_header_t); } else { s = format (s, "%U%U", format_white_space, indent, format_ip4_header, ih4); offset = ip4_header_bytes (ih4); } s = format (s, "\n%U%U", format_white_space, indent, format_esp_header, t->packet_data + offset); return s; } always_inline uword dpdk_esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, int is_ip6) { u32 n_left_from, *from, *to_next, next_index; ipsec_main_t *im = &ipsec_main; u32 thread_idx = vlib_get_thread_index (); dpdk_crypto_main_t *dcm = &dpdk_crypto_main; crypto_resource_t *res = 0; ipsec_sa_t *sa0 = 0; crypto_alg_t *cipher_alg = 0, *auth_alg = 0; struct rte_cryptodev_sym_session *session = 0; u32 ret, last_sa_index = ~0; u8 numa = rte_socket_id (); u8 is_aead = 0; crypto_worker_main_t *cwm = vec_elt_at_index (dcm->workers_main, thread_idx); struct rte_crypto_op **ops = cwm->ops; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; ret = crypto_alloc_ops (numa, ops, n_left_from); if (ret) { if (is_ip6) vlib_node_increment_counter (vm, dpdk_esp6_encrypt_node.index, ESP_ENCRYPT_ERROR_DISCARD, 1); else vlib_node_increment_counter (vm, dpdk_esp4_encrypt_node.index, ESP_ENCRYPT_ERROR_DISCARD, 1); /* Discard whole frame */ return n_left_from; } next_index = ESP_ENCRYPT_NEXT_DROP; while (n_left_from > 0) { u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (n_left_from > 0 && n_left_to_next > 0) { clib_error_t *error; u32 bi0; vlib_buffer_t *b0 = 0; u32 sa_index0; ip4_and_esp_header_t *ih0, *oh0 = 0; ip6_and_esp_header_t *ih6_0, *oh6_0 = 0; ip4_and_udp_and_esp_header_t *ouh0 = 0; esp_header_t *esp0; esp_footer_t *f0; u8 next_hdr_type; u32 iv_size; u16 orig_sz; u8 trunc_size; u16 rewrite_len; u16 udp_encap_adv = 0; struct rte_mbuf *mb0 = 0; struct rte_crypto_op *op; u16 res_idx; bi0 = from[0]; from += 1; n_left_from -= 1; b0 = vlib_get_buffer (vm, bi0); ih0 = vlib_buffer_get_current (b0); mb0 = rte_mbuf_from_vlib_buffer (b0); /* ih0/ih6_0 */ CLIB_PREFETCH (ih0, sizeof (ih6_0[0]), LOAD); /* f0 */ CLIB_PREFETCH (vlib_buffer_get_tail (b0), 20, STORE); /* mb0 */ CLIB_PREFETCH (mb0, CLIB_CACHE_LINE_BYTES, STORE); op = ops[0]; ops += 1; ASSERT (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED); dpdk_op_priv_t *priv = crypto_op_get_priv (op); u16 op_len = sizeof (op[0]) + sizeof (op[0].sym[0]) + sizeof (priv[0]); CLIB_PREFETCH (op, op_len, STORE); sa_index0 = vnet_buffer (b0)->ipsec.sad_index; if (sa_index0 != last_sa_index) { sa0 = pool_elt_at_index (im->sad, sa_index0); cipher_alg = vec_elt_at_index (dcm->cipher_algs, sa0->crypto_alg); auth_alg = vec_elt_at_index (dcm->auth_algs, sa0->integ_alg); is_aead = (cipher_alg->type == RTE_CRYPTO_SYM_XFORM_AEAD); if (is_aead) auth_alg = cipher_alg; res_idx = get_resource (cwm, sa0); if (PREDICT_FALSE (res_idx == (u16) ~ 0)) { clib_warning ("unsupported SA by thread index %u", thread_idx); if (is_ip6) vlib_node_increment_counter (vm, dpdk_esp6_encrypt_node.index, ESP_ENCRYPT_ERROR_NOSUP, 1); else vlib_node_increment_counter (vm, dpdk_esp4_encrypt_node.index, ESP_ENCRYPT_ERROR_NOSUP, 1); to_next[0] = bi0; to_next += 1; n_left_to_next -= 1; goto trace; } res = vec_elt_at_index (dcm->resource, res_idx); error = crypto_get_session (&session, sa_index0, res, cwm, 1); if (PREDICT_FALSE (error || !session)) { clib_warning ("failed to get crypto session"); if (is_ip6) vlib_node_increment_counter (vm, dpdk_esp6_encrypt_node.index, ESP_ENCRYPT_ERROR_SESSION, 1); else vlib_node_increment_counter (vm, dpdk_esp4_encrypt_node.index, ESP_ENCRYPT_ERROR_SESSION, 1); to_next[0] = bi0; to_next += 1; n_left_to_next -= 1; goto trace; } last_sa_index = sa_index0; } if (PREDICT_FALSE (esp_seq_advance (sa0))) { clib_warning ("sequence number counter has cycled SPI %u", sa0->spi); if (is_ip6) vlib_node_increment_counter (vm, dpdk_esp6_encrypt_node.index, ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); else vlib_node_increment_counter (vm, dpdk_esp4_encrypt_node.index, ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); //TODO: rekey SA to_next[0] = bi0; to_next += 1; n_left_to_next -= 1; goto trace; } orig_sz = b0->current_length; /* TODO multi-seg support - total_length_not_including_first_buffer */ sa0->total_data_size += b0->current_length; res->ops[res->n_ops] = op; res->bi[res->n_ops] = bi0; res->n_ops += 1; dpdk_gcm_cnt_blk *icb = &priv->cb; crypto_set_icb (icb, sa0->salt, sa0->seq, sa0->seq_hi); iv_size = cipher_alg->iv_len; trunc_size = auth_alg->trunc_size; /* if UDP encapsulation is used adjust the address of the IP header */ if (sa0->udp_encap && !is_ip6) udp_encap_adv = sizeof (udp_header_t); if (sa0->is_tunnel) { rewrite_len = 0; if (!is_ip6 && !sa0->is_tunnel_ip6) /* ip4inip4 */ { /* in tunnel mode send it back to FIB */ priv->next = DPDK_CRYPTO_INPUT_NEXT_IP4_LOOKUP; u8 adv = sizeof (ip4_header_t) + udp_encap_adv + sizeof (esp_header_t) + iv_size; vlib_buffer_advance (b0, -adv); oh0 = vlib_buffer_get_current (b0); ouh0 = vlib_buffer_get_current (b0); next_hdr_type = IP_PROTOCOL_IP_IN_IP; /* * oh0->ip4.ip_version_and_header_length = 0x45; * oh0->ip4.tos = ih0->ip4.tos; * oh0->ip4.fragment_id = 0; * oh0->ip4.flags_and_fragment_offset = 0; */ oh0->ip4.checksum_data_64[0] = clib_host_to_net_u64 (0x45ULL << 56); /* * oh0->ip4.ttl = 254; * oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP; */ oh0->ip4.checksum_data_32[2] = clib_host_to_net_u32 (0xfe320000); oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32; oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32; if (sa0->udp_encap) { oh0->ip4.protocol = IP_PROTOCOL_UDP; esp0 = &ouh0->esp; } else esp0 = &oh0->esp; esp0->spi = clib_host_to_net_u32 (sa0->spi); esp0->seq = clib_host_to_net_u32 (sa0->seq); } else if (is_ip6 && sa0->is_tunnel_ip6) /* ip6inip6 */ { /* in tunnel mode send it back to FIB */ priv->next = DPDK_CRYPTO_INPUT_NEXT_IP6_LOOKUP; u8 adv = sizeof (ip6_header_t) + sizeof (esp_header_t) + iv_size; vlib_buffer_advance (b0, -adv); ih6_0 = (ip6_and_esp_header_t *) ih0; oh6_0 = vlib_buffer_get_current (b0); next_hdr_type = IP_PROTOCOL_IPV6; oh6_0->ip6.ip_version_traffic_class_and_flow_label = ih6_0->ip6.ip_version_traffic_class_and_flow_label; oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP; oh6_0->ip6.hop_limit = 254; oh6_0->ip6.src_address.as_u64[0] = sa0->tunnel_src_addr.ip6.as_u64[0]; oh6_0->ip6.src_address.as_u64[1] = sa0->tunnel_src_addr.ip6.as_u64[1]; oh6_0->ip6.dst_address.as_u64[0] = sa0->tunnel_dst_addr.ip6.as_u64[0]; oh6_0->ip6.dst_address.as_u64[1] = sa0->tunnel_dst_addr.ip6.as_u64[1]; esp0 = &oh6_0->esp; oh6_0->esp.spi = clib_host_to_net_u32 (sa0->spi); oh6_0->esp.seq = clib_host_to_net_u32 (sa0->seq); } else /* unsupported ip4inip6, ip6inip4 */ { if (is_ip6) vlib_node_increment_counter (vm, dpdk_esp6_encrypt_node.index, ESP_ENCRYPT_ERROR_NOSUP, 1); else vlib_node_increment_counter (vm, dpdk_esp4_encrypt_node.index, ESP_ENCRYPT_ERROR_NOSUP, 1); to_next[0] = bi0; to_next += 1; n_left_to_next -= 1; goto trace; } vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; } else /* transport mode */ { priv->next = DPDK_CRYPTO_INPUT_NEXT_INTERFACE_OUTPUT; rewrite_len = vnet_buffer (b0)->ip.save_rewrite_length; u16 adv = sizeof (esp_header_t) + iv_size + udp_encap_adv; vlib_buffer_advance (b0, -adv - rewrite_len); u8 *src = ((u8 *) ih0) - rewrite_len; u8 *dst = vlib_buffer_get_current (b0); oh0 = vlib_buffer_get_current (b0) + rewrite_len; if (is_ip6) { orig_sz -= sizeof (ip6_header_t); ih6_0 = (ip6_and_esp_header_t *) ih0; next_hdr_type = ih6_0->ip6.protocol; memmove (dst, src, rewrite_len + sizeof (ip6_header_t)); oh6_0 = (ip6_and_esp_header_t *) oh0; oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP; esp0 = &oh6_0->esp; } else /* ipv4 */ { u16 ip_size = ip4_header_bytes (&ih0->ip4); orig_sz -= ip_size; next_hdr_type = ih0->ip4.protocol; memmove (dst, src, rewrite_len + ip_size); oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP; esp0 = (esp_header_t *) (((u8 *) oh0) + ip_size); if (sa0->udp_encap) { oh0->ip4.protocol = IP_PROTOCOL_UDP; esp0 = (esp_header_t *) (((u8 *) oh0) + ip_size + udp_encap_adv); } else { oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP; esp0 = (esp_header_t *) (((u8 *) oh0) + ip_size); } } esp0->spi = clib_host_to_net_u32 (sa0->spi); esp0->seq = clib_host_to_net_u32 (sa0->seq); } if (sa0->udp_encap && ouh0) { ouh0->udp.src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec); ouh0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec); ouh0->udp.checksum = 0; } ASSERT (is_pow2 (cipher_alg->boundary)); u16 mask = cipher_alg->boundary - 1; u16 pad_payload_len = ((orig_sz + 2) + mask) & ~mask; u8 pad_bytes = pad_payload_len - 2 - orig_sz; u8 *padding = vlib_buffer_put_uninit (b0, pad_bytes + 2 + trunc_size); /* The extra pad bytes would be overwritten by the digest */ if (pad_bytes) clib_memcpy (padding, pad_data, 16); f0 = (esp_footer_t *) (padding + pad_bytes); f0->pad_length = pad_bytes; f0->next_header = next_hdr_type; if (is_ip6) { u16 len = b0->current_length - sizeof (ip6_header_t); oh6_0->ip6.payload_length = clib_host_to_net_u16 (len - rewrite_len); } else { oh0->ip4.length = clib_host_to_net_u16 (b0->current_length - rewrite_len); oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4); if (sa0->udp_encap && ouh0) { ouh0->udp.length = clib_host_to_net_u16 (clib_net_to_host_u16 (ouh0->ip4.length) - ip4_header_bytes (&ouh0->ip4)); } } vnet_buffer (b0)->sw_if_index[VLIB_RX] = vnet_buffer (b0)->sw_if_index[VLIB_RX]; b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; /* mbuf packet starts at ESP header */ mb0->data_len = vlib_buffer_get_tail (b0) - ((u8 *) esp0); mb0->pkt_len = vlib_buffer_get_tail (b0) - ((u8 *) esp0); mb0->data_off = ((void *) esp0) - mb0->buf_addr; u32 cipher_off, cipher_len, auth_len = 0; u32 *aad = NULL; u8 *digest = vlib_buffer_get_tail (b0) - trunc_size; u64 digest_paddr = mb0->buf_physaddr + digest - ((u8 *) mb0->buf_addr); if (!is_aead && cipher_alg->alg == RTE_CRYPTO_CIPHER_AES_CBC) { cipher_off = sizeof (esp_header_t); cipher_len = iv_size + pad_payload_len; } else /* CTR/GCM */ { u32 *esp_iv = (u32 *) (esp0 + 1); esp_iv[0] = sa0->seq; esp_iv[1] = sa0->seq_hi; cipher_off = sizeof (esp_header_t) + iv_size; cipher_len = pad_payload_len; } if (is_aead) { aad = (u32 *) priv->aad; aad[0] = clib_host_to_net_u32 (sa0->spi); aad[1] = clib_host_to_net_u32 (sa0->seq); /* aad[3] should always be 0 */ if (PREDICT_FALSE (sa0->use_esn)) aad[2] = clib_host_to_net_u32 (sa0->seq_hi); else aad[2] = 0; } else { auth_len = vlib_buffer_get_tail (b0) - ((u8 *) esp0) - trunc_size; if (sa0->use_esn) { u32 *_digest = (u32 *) digest; _digest[0] = clib_host_to_net_u32 (sa0->seq_hi); auth_len += 4; } } crypto_op_setup (is_aead, mb0, op, session, cipher_off, cipher_len, 0, auth_len, (u8 *) aad, digest, digest_paddr); trace: if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); tr->crypto_alg = sa0->crypto_alg; tr->integ_alg = sa0->integ_alg; u8 *p = vlib_buffer_get_current (b0); if (!sa0->is_tunnel) p += vnet_buffer (b0)->ip.save_rewrite_length; clib_memcpy (tr->packet_data, p, sizeof (tr->packet_data)); } } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } if (is_ip6) { vlib_node_increment_counter (vm, dpdk_esp6_encrypt_node.index, ESP_ENCRYPT_ERROR_RX_PKTS, from_frame->n_vectors); crypto_enqueue_ops (vm, cwm, 1, dpdk_esp6_encrypt_node.index, ESP_ENCRYPT_ERROR_ENQ_FAIL, numa); } else { vlib_node_increment_counter (vm, dpdk_esp4_encrypt_node.index, ESP_ENCRYPT_ERROR_RX_PKTS, from_frame->n_vectors); crypto_enqueue_ops (vm, cwm, 1, dpdk_esp4_encrypt_node.index, ESP_ENCRYPT_ERROR_ENQ_FAIL, numa); } crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops); return from_frame->n_vectors; } static uword dpdk_esp4_encrypt_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return dpdk_esp_encrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ ); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (dpdk_esp4_encrypt_node) = { .function = dpdk_esp4_encrypt_node_fn, .name = "dpdk4-esp-encrypt", .flags = VLIB_NODE_FLAG_IS_OUTPUT, .vector_size = sizeof (u32), .format_trace = format_esp_encrypt_trace, .n_errors = ARRAY_LEN (esp_encrypt_error_strings), .error_strings = esp_encrypt_error_strings, .n_next_nodes = 1, .next_nodes = { [ESP_ENCRYPT_NEXT_DROP] = "error-drop", } }; /* *INDENT-ON* */ VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp4_encrypt_node, dpdk_esp4_encrypt_node_fn); static uword dpdk_esp6_encrypt_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return dpdk_esp_encrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ ); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (dpdk_esp6_encrypt_node) = { .function = dpdk_esp6_encrypt_node_fn, .name = "dpdk6-esp-encrypt", .flags = VLIB_NODE_FLAG_IS_OUTPUT, .vector_size = sizeof (u32), .format_trace = format_esp_encrypt_trace, .n_errors = ARRAY_LEN (esp_encrypt_error_strings), .error_strings = esp_encrypt_error_strings, .n_next_nodes = 1, .next_nodes = { [ESP_ENCRYPT_NEXT_DROP] = "error-drop", } }; /* *INDENT-ON* */ VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp6_encrypt_node, dpdk_esp6_encrypt_node_fn); /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */