aboutsummaryrefslogtreecommitdiffstats
path: root/tests/vpp/perf/l2/10ge2p1x520-eth-l2xcbase-pdrchk.robot
blob: 72589f6ca18d530c6cfc19b92ada17b4ab228414 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
# Copyright (c) 2017 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

*** Settings ***
| Resource | resources/libraries/robot/performance/performance_setup.robot
| Library | resources.libraries.python.InterfaceUtil
| Library | resources.libraries.python.NodePath
| ...
| Force Tags | 3_NODE_SINGLE_LINK_TOPO | PERFTEST | HW_ENV | PDRCHK
| ... | NIC_Intel-X520-DA2 | ETH | L2XCFWD | BASE | L2XCBASE
| ...
| Suite Setup | Set up 3-node performance topology with DUT's NIC model
| ... | L2 | Intel-X520-DA2
| Suite Teardown | Tear down 3-node performance topology
| ...
| Test Setup | Set up performance test
| Test Teardown | Tear down performance pdrchk test
| ...
| Documentation | *Reference PDR throughput L2XC verify test cases*
| ...
| ... | *[Top] Network Topologies:* TG-DUT1-DUT2-TG 3-node circular topology
| ... | with single links between nodes.
| ... | *[Enc] Packet Encapsulations:* Eth-IPv4 for L2 cross connect.
| ... | *[Cfg] DUT configuration:* DUT1 and DUT2 are configured with L2 cross-
| ... | connect. DUT1 and DUT2 tested with 2p10GE NIC X520 Niantic by Intel.
| ... | *[Ver] TG verification:* In short performance tests, TG verifies
| ... | DUTs' throughput at ref-PDR (reference Non Drop Rate) with zero packet
| ... | loss tolerance. Ref-PDR value is periodically updated acording to
| ... | formula: ref-PDR = 0.9x PDR, where PDR is found in RFC2544 long
| ... | performance tests for the same DUT configuration. Test packets are
| ... | generated by TG on links to DUTs. TG traffic profile contains two L3
| ... | flow-groups (flow-group per direction, 254 flows per flow-group) with
| ... | all packets containing Ethernet header, IPv4 header with IP protocol=61
| ... | and static payload. MAC addresses are matching MAC addresses of the
| ... | TG node interfaces.
| ... | *[Ref] Applicable standard specifications:* RFC2544.

*** Variables ***
# Traffic profile:
| ${traffic_profile} | trex-sl-3n-ethip4-ip4src254

*** Keywords ***
| Check PDR for L2 xconnect
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with ${wt} thread, ${wt} phy core,\
| | ... | ${rxq} receive queue per NIC port.
| | ... | [Ver] Verify ref-PDR for ${framesize} Byte frames using single trial\
| | ... | throughput test at 2x ${rate}.
| | ...
| | [Arguments] | ${framesize} | ${rate} | ${wt} | ${rxq}
| | ...
| | # Test Variables required for test and test teardown
| | Set Test Variable | ${framesize}
| | Set Test Variable | ${rate}
| | ${get_framesize}= | Get Frame Size | ${framesize}
| | ...
| | Given Add '${wt}' worker threads and '${rxq}' rxqueues in 3-node single-link circular topology
| | And Add PCI devices to DUTs in 3-node single link topology
| | And Run Keyword If | ${get_framesize} < ${1522}
| | ... | Add no multi seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | And Initialize L2 xconnect in 3-node circular topology
| | Then Traffic should pass with partial loss | ${perf_trial_duration}
| | ... | ${rate} | ${framesize} | ${traffic_profile}
| | ... | ${perf_pdr_loss_acceptance} | ${perf_pdr_loss_acceptance_type}

*** Test Cases ***
| tc01-64B-1t1c-eth-l2xcbase-pdrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 1 thread, 1 phy core,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Verify ref-NDR for 64 Byte frames using single trial\
| | ... | throughput test at 2x ${rate}.
| | ...
| | [Tags] | 64B | 1T1C | STHREAD
| | ...
| | [Template] | Check PDR for L2 xconnect
| | framesize=${64} | rate=5.9mpps | wt=1 | rxq=1

| tc02-1518B-1t1c-eth-l2xcbase-pdrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 1 thread, 1 phy core,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Verify ref-NDR for 1518 Byte frames using single trial\
| | ... | throughput test at 2x ${rate}.
| | ...
| | [Tags] | 1518B | 1T1C | STHREAD
| | ...
| | [Template] | Check PDR for L2 xconnect
| | framesize=${1518} | rate=812743pps | wt=1 | rxq=1

| tc03-9000B-1t1c-eth-l2xcbase-pdrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 1 thread, 1 phy core,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Verify ref-NDR for 9000 Byte frames using single trial\
| | ... | throughput test at 2x ${rate}.
| | ...
| | [Tags] | 9000B | 1T1C | STHREAD
| | ...
| | [Template] | Check PDR for L2 xconnect
| | framesize=${9000} | rate=138580pps | wt=1 | rxq=1

| tc04-64B-2t2c-eth-l2xcbase-pdrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 2 threads, 2 phy cores,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Verify ref-NDR for 64 Byte frames using single trial\
| | ... | throughput test at 2x ${rate}.
| | ...
| | [Tags] | 64B | 2T2C | MTHREAD
| | ...
| | [Template] | Check PDR for L2 xconnect
| | framesize=${64} | rate=10.4mpps | wt=2 | rxq=1

| tc05-1518B-2t2c-eth-l2xcbase-pdrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 2 threads, 2 phy cores,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Verify ref-NDR for 1518 Byte frames using single trial\
| | ... | throughput test at 2x ${rate}.
| | ...
| | [Tags] | 1518B | 2T2C | MTHREAD
| | ...
| | [Template] | Check PDR for L2 xconnect
| | framesize=${1518} | rate=812743pps | wt=2 | rxq=1

| tc06-9000B-2t2c-eth-l2xcbase-pdrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 2 threads, 2 phy cores,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Verify ref-NDR for 9000 Byte frames using single trial\
| | ... | throughput test at 2x ${rate}.
| | ...
| | [Tags] | 9000B | 2T2C | MTHREAD
| | ...
| | [Template] | Check PDR for L2 xconnect
| | framesize=${9000} | rate=100000pps | wt=2 | rxq=1

| tc07-64B-4t4c-eth-l2xcbase-pdrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 4 threads, 4 phy cores,\
| | ... | 2 receive queues per NIC port.
| | ... | [Ver] Verify ref-NDR for 64 Byte frames using single trial\
| | ... | throughput test at 2x ${rate}.
| | ...
| | [Tags] | 64B | 4T4C | MTHREAD
| | ...
| | [Template] | Check PDR for L2 xconnect
| | framesize=${64} | rate=10.4mpps | wt=4 | rxq=2

| tc08-1518B-4t4c-eth-l2xcbase-pdrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 4 threads, 4 phy cores,\
| | ... | 2 receive queues per NIC port.
| | ... | [Ver] Verify ref-NDR for 1518 Byte frames using single trial\
| | ... | throughput test at 2x ${rate}.
| | ...
| | [Tags] | 1518B | 4T4C | MTHREAD
| | ...
| | [Template] | Check PDR for L2 xconnect
| | framesize=${1518} | rate=812743pps | wt=4 | rxq=2

| tc09-9000B-4t4c-eth-l2xcbase-pdrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 4 threads, 4 phy cores,\
| | ... | 2 receive queues per NIC port.
| | ... | [Ver] Verify ref-NDR for 9000 Byte frames using single trial\
| | ... | throughput test at 2x ${rate}.
| | ...
| | [Tags] | 9000B | 4T4C | MTHREAD
| | ...
| | [Template] | Check PDR for L2 xconnect
| | framesize=${9000} | rate=138580pps | wt=4 | rxq=2
/span> *gm = &gre_main; int i; /* Named type. */ if (unformat_user (input, unformat_vlib_number_by_name, gm->protocol_info_by_name, &i)) { gre_protocol_info_t *pi = vec_elt_at_index (gm->protocol_infos, i); *result = pi->protocol; return 1; } return 0; } uword unformat_gre_protocol_net_byte_order (unformat_input_t * input, va_list * args) { u16 *result = va_arg (*args, u16 *); if (!unformat_user (input, unformat_gre_protocol_host_byte_order, result)) return 0; *result = clib_host_to_net_u16 ((u16) * result); return 1; } uword unformat_gre_header (unformat_input_t * input, va_list * args) { u8 **result = va_arg (*args, u8 **); gre_header_t _h, *h = &_h; u16 p; if (!unformat (input, "%U", unformat_gre_protocol_host_byte_order, &p)) return 0; h->protocol = clib_host_to_net_u16 (p); /* Add header to result. */ { void *p; u32 n_bytes = sizeof (h[0]); vec_add2 (*result, p, n_bytes); clib_memcpy (p, h, n_bytes); } return 1; } static int gre_proto_from_vnet_link (vnet_link_t link) { switch (link) { case VNET_LINK_IP4: return (GRE_PROTOCOL_ip4); case VNET_LINK_IP6: return (GRE_PROTOCOL_ip6); case VNET_LINK_MPLS: return (GRE_PROTOCOL_mpls_unicast); case VNET_LINK_ETHERNET: return (GRE_PROTOCOL_teb); case VNET_LINK_ARP: return (GRE_PROTOCOL_arp); case VNET_LINK_NSH: ASSERT (0); break; } ASSERT (0); return (GRE_PROTOCOL_ip4); } static u8 * gre_build_rewrite (vnet_main_t * vnm, u32 sw_if_index, vnet_link_t link_type, const void *dst_address) { gre_main_t *gm = &gre_main; ip4_and_gre_header_t *h4; ip6_and_gre_header_t *h6; gre_header_t *gre; u8 *rewrite = NULL; gre_tunnel_t *t; u32 ti; u8 is_ipv6; ti = gm->tunnel_index_by_sw_if_index[sw_if_index]; if (~0 == ti) /* not one of ours */ return (0); t = pool_elt_at_index (gm->tunnels, ti); is_ipv6 = t->tunnel_dst.fp_proto == FIB_PROTOCOL_IP6 ? 1 : 0; if (!is_ipv6) { vec_validate (rewrite, sizeof (*h4) - 1); h4 = (ip4_and_gre_header_t *) rewrite; gre = &h4->gre; h4->ip4.ip_version_and_header_length = 0x45; h4->ip4.ttl = 254; h4->ip4.protocol = IP_PROTOCOL_GRE; /* fixup ip4 header length and checksum after-the-fact */ h4->ip4.src_address.as_u32 = t->tunnel_src.ip4.as_u32; h4->ip4.dst_address.as_u32 = t->tunnel_dst.fp_addr.ip4.as_u32; h4->ip4.checksum = ip4_header_checksum (&h4->ip4); } else { vec_validate (rewrite, sizeof (*h6) - 1); h6 = (ip6_and_gre_header_t *) rewrite; gre = &h6->gre; h6->ip6.ip_version_traffic_class_and_flow_label = clib_host_to_net_u32 (6 << 28); h6->ip6.hop_limit = 255; h6->ip6.protocol = IP_PROTOCOL_GRE; /* fixup ip6 header length and checksum after-the-fact */ h6->ip6.src_address.as_u64[0] = t->tunnel_src.ip6.as_u64[0]; h6->ip6.src_address.as_u64[1] = t->tunnel_src.ip6.as_u64[1]; h6->ip6.dst_address.as_u64[0] = t->tunnel_dst.fp_addr.ip6.as_u64[0]; h6->ip6.dst_address.as_u64[1] = t->tunnel_dst.fp_addr.ip6.as_u64[1]; } if (PREDICT_FALSE (t->type == GRE_TUNNEL_TYPE_ERSPAN)) { gre->protocol = clib_host_to_net_u16 (GRE_PROTOCOL_erspan); gre->flags_and_version = clib_host_to_net_u16 (GRE_FLAGS_SEQUENCE); } else gre->protocol = clib_host_to_net_u16 (gre_proto_from_vnet_link (link_type)); return (rewrite); } #define is_v4_packet(_h) ((*(u8*) _h) & 0xF0) == 0x40 static void gre4_fixup (vlib_main_t * vm, ip_adjacency_t * adj, vlib_buffer_t * b0, const void *data) { ip4_header_t *ip0; ip0 = vlib_buffer_get_current (b0); /* Fixup the checksum and len fields in the GRE tunnel encap * that was applied at the midchain node */ ip0->length = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)); ip0->checksum = ip4_header_checksum (ip0); } static void gre6_fixup (vlib_main_t * vm, ip_adjacency_t * adj, vlib_buffer_t * b0, const void *data) { ip6_header_t *ip0; ip0 = vlib_buffer_get_current (b0); /* Fixup the payload length field in the GRE tunnel encap that was applied * at the midchain node */ ip0->payload_length = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)) - sizeof (*ip0); } void gre_update_adj (vnet_main_t * vnm, u32 sw_if_index, adj_index_t ai) { gre_main_t *gm = &gre_main; gre_tunnel_t *t; u32 ti; u8 is_ipv6; ti = gm->tunnel_index_by_sw_if_index[sw_if_index]; t = pool_elt_at_index (gm->tunnels, ti); is_ipv6 = t->tunnel_dst.fp_proto == FIB_PROTOCOL_IP6 ? 1 : 0; adj_nbr_midchain_update_rewrite (ai, !is_ipv6 ? gre4_fixup : gre6_fixup, NULL, (VNET_LINK_ETHERNET == adj_get_link_type (ai) ? ADJ_FLAG_MIDCHAIN_NO_COUNT : ADJ_FLAG_NONE), gre_build_rewrite (vnm, sw_if_index, adj_get_link_type (ai), NULL)); gre_tunnel_stack (ai); } typedef enum { GRE_ENCAP_NEXT_DROP, GRE_ENCAP_NEXT_L2_MIDCHAIN, GRE_ENCAP_N_NEXT, } gre_encap_next_t; #define NEXT_IDX (GRE_ENCAP_NEXT_L2_MIDCHAIN) /** * @brief TX function. Only called for L2 payload including TEB or ERSPAN. * L3 traffic uses the adj-midchains. */ static uword gre_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { gre_main_t *gm = &gre_main; vnet_main_t *vnm = gm->vnet_main; u32 next_index; u32 *from, *to_next, n_left_from, n_left_to_next; u32 sw_if_index0 = 0; u32 sw_if_index1 = 0; adj_index_t adj_index0 = ADJ_INDEX_INVALID; adj_index_t adj_index1 = ADJ_INDEX_INVALID; gre_tunnel_t *gt0 = NULL; gre_tunnel_t *gt1 = NULL; /* Vector of buffer / pkt indices we're supposed to process */ from = vlib_frame_vector_args (frame); /* Number of buffers / pkts */ n_left_from = frame->n_vectors; /* Speculatively send the first buffer to the last disposition we used */ next_index = node->cached_next_index; while (n_left_from > 0) { /* set up to enqueue to our disposition with index = next_index */ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (n_left_from >= 4 && n_left_to_next >= 2) { u32 bi0 = from[0]; u32 bi1 = from[1]; vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0); vlib_buffer_t *b1 = vlib_get_buffer (vm, bi1); to_next[0] = bi0; to_next[1] = bi1; from += 2; to_next += 2; n_left_to_next -= 2; n_left_from -= 2; if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX]) { sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX]; vnet_hw_interface_t *hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0); gt0 = &gm->tunnels[hi0->dev_instance]; adj_index0 = gt0->l2_adj_index; } if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX]) { if (sw_if_index0 == vnet_buffer (b1)->sw_if_index[VLIB_TX]) { sw_if_index1 = sw_if_index0; gt1 = gt0; adj_index1 = adj_index0; } else { sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX]; vnet_hw_interface_t *hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1); gt1 = &gm->tunnels[hi1->dev_instance]; adj_index1 = gt1->l2_adj_index; } } vnet_buffer (b0)->ip.adj_index[VLIB_TX] = adj_index0; vnet_buffer (b1)->ip.adj_index[VLIB_TX] = adj_index1; if (PREDICT_FALSE (gt0->type == GRE_TUNNEL_TYPE_ERSPAN)) { /* Encap GRE seq# and ERSPAN type II header */ vlib_buffer_advance (b0, -sizeof (erspan_t2_t)); erspan_t2_t *h0 = vlib_buffer_get_current (b0); u32 seq_num = clib_smp_atomic_add (&gt0->gre_sn->seq_num, 1); u64 hdr = clib_host_to_net_u64 (ERSPAN_HDR2); h0->seq_num = clib_host_to_net_u32 (seq_num); h0->t2_u64 = hdr; h0->t2.cos_en_t_session |= clib_host_to_net_u16 (gt0->session_id); } if (PREDICT_FALSE (gt1->type == GRE_TUNNEL_TYPE_ERSPAN)) { /* Encap GRE seq# and ERSPAN type II header */ vlib_buffer_advance (b1, -sizeof (erspan_t2_t)); erspan_t2_t *h1 = vlib_buffer_get_current (b1); u32 seq_num = clib_smp_atomic_add (&gt1->gre_sn->seq_num, 1); u64 hdr = clib_host_to_net_u64 (ERSPAN_HDR2); h1->seq_num = clib_host_to_net_u32 (seq_num); h1->t2_u64 = hdr; h1->t2.cos_en_t_session |= clib_host_to_net_u16 (gt1->session_id); } if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { gre_tx_trace_t *tr0 = vlib_add_trace (vm, node, b0, sizeof (*tr0)); tr0->tunnel_id = gt0 - gm->tunnels; tr0->src = gt0->tunnel_src; tr0->dst = gt0->tunnel_dst.fp_addr; tr0->length = vlib_buffer_length_in_chain (vm, b0); } if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) { gre_tx_trace_t *tr1 = vlib_add_trace (vm, node, b1, sizeof (*tr1)); tr1->tunnel_id = gt1 - gm->tunnels; tr1->src = gt1->tunnel_src; tr1->dst = gt1->tunnel_dst.fp_addr; tr1->length = vlib_buffer_length_in_chain (vm, b1); } vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, n_left_to_next, bi0, bi1, NEXT_IDX, NEXT_IDX); } while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0 = from[0]; vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0); to_next[0] = bi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX]) { sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX]; vnet_hw_interface_t *hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0); gt0 = &gm->tunnels[hi0->dev_instance]; adj_index0 = gt0->l2_adj_index; } vnet_buffer (b0)->ip.adj_index[VLIB_TX] = adj_index0; if (PREDICT_FALSE (gt0->type == GRE_TUNNEL_TYPE_ERSPAN)) { /* Encap GRE seq# and ERSPAN type II header */ vlib_buffer_advance (b0, -sizeof (erspan_t2_t)); erspan_t2_t *h0 = vlib_buffer_get_current (b0); u32 seq_num = clib_smp_atomic_add (&gt0->gre_sn->seq_num, 1); u64 hdr = clib_host_to_net_u64 (ERSPAN_HDR2); h0->seq_num = clib_host_to_net_u32 (seq_num); h0->t2_u64 = hdr; h0->t2.cos_en_t_session |= clib_host_to_net_u16 (gt0->session_id); } if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { gre_tx_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); tr->tunnel_id = gt0 - gm->tunnels; tr->src = gt0->tunnel_src; tr->dst = gt0->tunnel_dst.fp_addr; tr->length = vlib_buffer_length_in_chain (vm, b0); } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, NEXT_IDX); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } vlib_node_increment_counter (vm, node->node_index, GRE_ERROR_PKTS_ENCAP, frame->n_vectors); return frame->n_vectors; } static char *gre_error_strings[] = { #define gre_error(n,s) s, #include "error.def" #undef gre_error }; /* *INDENT-OFF* */ VLIB_REGISTER_NODE (gre_encap_node) = { .function = gre_interface_tx, .name = "gre-encap", .vector_size = sizeof (u32), .format_trace = format_gre_tx_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = GRE_N_ERROR, .error_strings = gre_error_strings, .n_next_nodes = GRE_ENCAP_N_NEXT, .next_nodes = { [GRE_ENCAP_NEXT_DROP] = "error-drop", [GRE_ENCAP_NEXT_L2_MIDCHAIN] = "adj-l2-midchain", }, }; VLIB_NODE_FUNCTION_MULTIARCH (gre_encap_node, gre_interface_tx) /* *INDENT-ON* */ static u8 * format_gre_tunnel_name (u8 * s, va_list * args) { u32 dev_instance = va_arg (*args, u32); gre_main_t *gm = &gre_main; gre_tunnel_t *t; if (dev_instance >= vec_len (gm->tunnels)) return format (s, "<improperly-referenced>"); t = pool_elt_at_index (gm->tunnels, dev_instance); return format (s, "gre%d", t->user_instance); } static u8 * format_gre_device (u8 * s, va_list * args) { u32 dev_instance = va_arg (*args, u32); CLIB_UNUSED (int verbose) = va_arg (*args, int); s = format (s, "GRE tunnel: id %d\n", dev_instance); return s; } /* *INDENT-OFF* */ VNET_DEVICE_CLASS (gre_device_class) = { .name = "GRE tunnel device", .format_device_name = format_gre_tunnel_name, .format_device = format_gre_device, .format_tx_trace = format_gre_tx_trace, .admin_up_down_function = gre_interface_admin_up_down, #ifdef SOON .clear counter = 0; #endif }; VNET_HW_INTERFACE_CLASS (gre_hw_interface_class) = { .name = "GRE", .format_header = format_gre_header_with_length, .unformat_header = unformat_gre_header, .build_rewrite = gre_build_rewrite, .update_adjacency = gre_update_adj, .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P, }; /* *INDENT-ON* */ static void add_protocol (gre_main_t * gm, gre_protocol_t protocol, char *protocol_name) { gre_protocol_info_t *pi; u32 i; vec_add2 (gm->protocol_infos, pi, 1); i = pi - gm->protocol_infos; pi->name = protocol_name; pi->protocol = protocol; pi->next_index = pi->node_index = ~0; hash_set (gm->protocol_info_by_protocol, protocol, i); hash_set_mem (gm->protocol_info_by_name, pi->name, i); } static clib_error_t * gre_init (vlib_main_t * vm) { gre_main_t *gm = &gre_main; clib_error_t *error; ip_main_t *im = &ip_main; ip_protocol_info_t *pi; memset (gm, 0, sizeof (gm[0])); gm->vlib_main = vm; gm->vnet_main = vnet_get_main (); if ((error = vlib_call_init_function (vm, ip_main_init))) return error; if ((error = vlib_call_init_function (vm, ip4_lookup_init))) return error; if ((error = vlib_call_init_function (vm, ip6_lookup_init))) return error; /* Set up the ip packet generator */ pi = ip_get_protocol_info (im, IP_PROTOCOL_GRE); pi->format_header = format_gre_header; pi->unformat_pg_edit = unformat_pg_gre_header; gm->protocol_info_by_name = hash_create_string (0, sizeof (uword)); gm->protocol_info_by_protocol = hash_create (0, sizeof (uword)); gm->tunnel_by_key4 = hash_create_mem (0, sizeof (gre_tunnel_key4_t), sizeof (uword)); gm->tunnel_by_key6 = hash_create_mem (0, sizeof (gre_tunnel_key6_t), sizeof (uword)); gm->seq_num_by_key = hash_create_mem (0, sizeof (gre_sn_key_t), sizeof (uword)); #define _(n,s) add_protocol (gm, GRE_PROTOCOL_##s, #s); foreach_gre_protocol #undef _ return vlib_call_init_function (vm, gre_input_init); } VLIB_INIT_FUNCTION (gre_init); gre_main_t * gre_get_main (vlib_main_t * vm) { vlib_call_init_function (vm, gre_init); return &gre_main; } /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */