summaryrefslogtreecommitdiffstats
path: root/test/template_bd.py
blob: ae17135198462cad9b627025240e4384d9557290 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
#!/usr/bin/env python

from abc import abstractmethod, ABCMeta

from scapy.layers.l2 import Ether, Raw
from scapy.layers.inet import IP, UDP

from util import ip4_range


class BridgeDomain(object):
    """ Bridge domain abstraction """
    __metaclass__ = ABCMeta

    @property
    def frame_request(self):
        """ Ethernet frame modeling a generic request """
        return (Ether(src='00:00:00:00:00:01', dst='00:00:00:00:00:02') /
                IP(src='1.2.3.4', dst='4.3.2.1') /
                UDP(sport=10000, dport=20000) /
                Raw('\xa5' * 100))

    @property
    def frame_reply(self):
        """ Ethernet frame modeling a generic reply """
        return (Ether(src='00:00:00:00:00:02', dst='00:00:00:00:00:01') /
                IP(src='4.3.2.1', dst='1.2.3.4') /
                UDP(sport=20000, dport=10000) /
                Raw('\xa5' * 100))

    @abstractmethod
    def encap_mcast(self, pkt, src_ip, src_mac, vni):
        """ Encapsulate mcast packet """
        pass

    @abstractmethod
    def encapsulate(self, pkt, vni):
        """ Encapsulate packet """
        pass

    @abstractmethod
    def decapsulate(self, pkt):
        """ Decapsulate packet """
        pass

    @abstractmethod
    def check_encapsulation(self, pkt, vni, local_only=False):
        """ Verify the encapsulation """
        pass

    def assert_eq_pkts(self, pkt1, pkt2):
        """ Verify the Ether, IP, UDP, payload are equal in both
        packets
        """
        self.assertEqual(pkt1[Ether].src, pkt2[Ether].src)
        self.assertEqual(pkt1[Ether].dst, pkt2[Ether].dst)
        self.assertEqual(pkt1[IP].src, pkt2[IP].src)
        self.assertEqual(pkt1[IP].dst, pkt2[IP].dst)
        self.assertEqual(pkt1[UDP].sport, pkt2[UDP].sport)
        self.assertEqual(pkt1[UDP].dport, pkt2[UDP].dport)
        self.assertEqual(pkt1[Raw], pkt2[Raw])

    def test_decap(self):
        """ Decapsulation test
        Send encapsulated frames from pg0
        Verify receipt of decapsulated frames on pg1
        """

        encapsulated_pkt = self.encapsulate(self.frame_request,
                                            self.single_tunnel_bd)

        self.pg0.add_stream([encapsulated_pkt, ])

        self.pg1.enable_capture()

        self.pg_start()

        # Pick first received frame and check if it's the non-encapsulated
        # frame
        out = self.pg1.get_capture(1)
        pkt = out[0]
        self.assert_eq_pkts(pkt, self.frame_request)

    def test_encap(self):
        """ Encapsulation test
        Send frames from pg1
        Verify receipt of encapsulated frames on pg0
        """
        self.pg1.add_stream([self.frame_reply])

        self.pg0.enable_capture()

        self.pg_start()

        # Pick first received frame and check if it's corectly encapsulated.
        out = self.pg0.get_capture(1)
        pkt = out[0]
        self.check_encapsulation(pkt, self.single_tunnel_bd)

        payload = self.decapsulate(pkt)
        self.assert_eq_pkts(payload, self.frame_reply)

    def test_ucast_flood(self):
        """ Unicast flood test
        Send frames from pg3
        Verify receipt of encapsulated frames on pg0
        """
        self.pg3.add_stream([self.frame_reply])

        self.pg0.enable_capture()

        self.pg_start()

        # Get packet from each tunnel and assert it's corectly encapsulated.
        out = self.pg0.get_capture(self.n_ucast_tunnels)
        for pkt in out:
            self.check_encapsulation(pkt, self.ucast_flood_bd, True)
            payload = self.decapsulate(pkt)
            self.assert_eq_pkts(payload, self.frame_reply)

    def test_mcast_flood(self):
        """ Multicast flood test
        Send frames from pg2
        Verify receipt of encapsulated frames on pg0
        """
        self.pg2.add_stream([self.frame_reply])

        self.pg0.enable_capture()

        self.pg_start()

        # Pick first received frame and check if it's corectly encapsulated.
        out = self.pg0.get_capture(1)
        pkt = out[0]
        self.check_encapsulation(pkt, self.mcast_flood_bd, True)

        payload = self.decapsulate(pkt)
        self.assert_eq_pkts(payload, self.frame_reply)

    def test_mcast_rcv(self):
        """ Multicast receive test
        Send 20 encapsulated frames from pg0 only 10 match unicast tunnels
        Verify receipt of 10 decap frames on pg2
        """
        mac = self.pg0.remote_mac
        ip_range_start = 10
        ip_range_end = 30
        mcast_stream = [
            self.encap_mcast(self.frame_request, ip, mac, self.mcast_flood_bd)
            for ip in ip4_range(self.pg0.remote_ip4,
                                ip_range_start, ip_range_end)]
        self.pg0.add_stream(mcast_stream)
        self.pg2.enable_capture()
        self.pg_start()
        out = self.pg2.get_capture(10)
        for pkt in out:
            self.assert_eq_pkts(pkt, self.frame_request)
>[12]; /* raw data */ } l2output_trace_t; /* packet trace format function */ static u8 * format_l2output_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); l2output_trace_t *t = va_arg (*args, l2output_trace_t *); s = format (s, "l2-output: sw_if_index %d dst %U src %U data " "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x", t->sw_if_index, format_ethernet_address, t->dst, format_ethernet_address, t->src, t->raw[0], t->raw[1], t->raw[2], t->raw[3], t->raw[4], t->raw[5], t->raw[6], t->raw[7], t->raw[8], t->raw[9], t->raw[10], t->raw[11]); return s; } static char *l2output_error_strings[] = { #define _(sym,string) string, foreach_l2output_error #undef _ }; /** * Check for split horizon violations. * Return 0 if split horizon check passes, otherwise return non-zero. * Packets should not be transmitted out an interface with the same * split-horizon group as the input interface, except if the @c shg is 0 * in which case the check always passes. */ static_always_inline void split_horizon_violation (vlib_node_runtime_t * node, u8 shg, vlib_buffer_t * b, u16 * next) { if (shg != vnet_buffer (b)->l2.shg) return; next[0] = L2OUTPUT_NEXT_DROP; b->error = node->errors[L2OUTPUT_ERROR_SHG_DROP]; } static_always_inline void l2output_process_batch_inline (vlib_main_t * vm, vlib_node_runtime_t * node, l2_output_config_t * config, vlib_buffer_t ** b, i16 * cdo, u16 * next, u32 n_left, int l2_efp, int l2_vtr, int l2_pbb, int shg_set, int update_feature_bitmap) { while (n_left >= 8) { vlib_prefetch_buffer_header (b[4], LOAD); vlib_prefetch_buffer_header (b[5], LOAD); vlib_prefetch_buffer_header (b[6], LOAD); vlib_prefetch_buffer_header (b[7], LOAD); /* prefetch eth headers only if we need to touch them */ if (l2_vtr || l2_pbb || shg_set) { CLIB_PREFETCH (b[4]->data + cdo[4], CLIB_CACHE_LINE_BYTES, LOAD); CLIB_PREFETCH (b[5]->data + cdo[5], CLIB_CACHE_LINE_BYTES, LOAD); CLIB_PREFETCH (b[6]->data + cdo[6], CLIB_CACHE_LINE_BYTES, LOAD); CLIB_PREFETCH (b[7]->data + cdo[7], CLIB_CACHE_LINE_BYTES, LOAD); } if (update_feature_bitmap) { vnet_buffer (b[0])->l2.feature_bitmap = config->feature_bitmap; vnet_buffer (b[1])->l2.feature_bitmap = config->feature_bitmap; vnet_buffer (b[2])->l2.feature_bitmap = config->feature_bitmap; vnet_buffer (b[3])->l2.feature_bitmap = config->feature_bitmap; } if (l2_vtr) { int i; for (i = 0; i < 4; i++) { u32 failed1 = l2_efp && l2_efp_filter_process (b[i], &(config->input_vtr)); u32 failed2 = l2_vtr_process (b[i], &(config->output_vtr)); if (PREDICT_FALSE (failed1 | failed2)) { next[i] = L2OUTPUT_NEXT_DROP; if (failed2) b[i]->error = node->errors[L2OUTPUT_ERROR_VTR_DROP]; if (failed1) b[i]->error = node->errors[L2OUTPUT_ERROR_EFP_DROP]; } } } if (l2_pbb) { int i; for (i = 0; i < 4; i++) if (l2_pbb_process (b[i], &(config->output_pbb_vtr))) { next[i] = L2OUTPUT_NEXT_DROP; b[i]->error = node->errors[L2OUTPUT_ERROR_VTR_DROP]; } } if (shg_set) { split_horizon_violation (node, config->shg, b[0], next); split_horizon_violation (node, config->shg, b[1], next + 1); split_horizon_violation (node, config->shg, b[2], next + 2); split_horizon_violation (node, config->shg, b[3], next + 3); } /* next */ n_left -= 4; b += 4; next += 4; cdo += 4; } while (n_left) { if (update_feature_bitmap) vnet_buffer (b[0])->l2.feature_bitmap = config->feature_bitmap; if (l2_vtr) { u32 failed1 = l2_efp && l2_efp_filter_process (b[0], &(config->input_vtr)); u32 failed2 = l2_vtr_process (b[0], &(config->output_vtr)); if (PREDICT_FALSE (failed1 | failed2)) { *next = L2OUTPUT_NEXT_DROP; if (failed2) b[0]->error = node->errors[L2OUTPUT_ERROR_VTR_DROP]; if (failed1) b[0]->error = node->errors[L2OUTPUT_ERROR_EFP_DROP]; } } if (l2_pbb && l2_pbb_process (b[0], &(config->output_pbb_vtr))) { next[0] = L2OUTPUT_NEXT_DROP; b[0]->error = node->errors[L2OUTPUT_ERROR_VTR_DROP]; } if (shg_set) split_horizon_violation (node, config->shg, b[0], next); /* next */ n_left -= 1; b += 1; next += 1; } } static_always_inline void l2output_set_buffer_error (vlib_buffer_t ** b, u32 n_left, vlib_error_t error) { while (n_left >= 8) { vlib_prefetch_buffer_header (b[4], LOAD); vlib_prefetch_buffer_header (b[5], LOAD); vlib_prefetch_buffer_header (b[6], LOAD); vlib_prefetch_buffer_header (b[7], LOAD); b[0]->error = b[1]->error = b[2]->error = b[3]->error = error; b += 4; n_left -= 4; } while (n_left) { b[0]->error = error; b += 1; n_left -= 1; } } static_always_inline void l2output_process_batch (vlib_main_t * vm, vlib_node_runtime_t * node, l2_output_config_t * config, vlib_buffer_t ** b, i16 * cdo, u16 * next, u32 n_left, int l2_efp, int l2_vtr, int l2_pbb) { u32 feature_bitmap = config->feature_bitmap & ~L2OUTPUT_FEAT_OUTPUT; if (config->shg == 0 && feature_bitmap == 0) l2output_process_batch_inline (vm, node, config, b, cdo, next, n_left, l2_efp, l2_vtr, l2_pbb, 0, 0); else if (config->shg == 0) l2output_process_batch_inline (vm, node, config, b, cdo, next, n_left, l2_efp, l2_vtr, l2_pbb, 0, 1); else if (feature_bitmap == 0) l2output_process_batch_inline (vm, node, config, b, cdo, next, n_left, l2_efp, l2_vtr, l2_pbb, 1, 0); else l2output_process_batch_inline (vm, node, config, b, cdo, next, n_left, l2_efp, l2_vtr, l2_pbb, 1, 1); } VLIB_NODE_FN (l2output_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { u32 n_left, *from; l2output_main_t *msm = &l2output_main; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b; u16 nexts[VLIB_FRAME_SIZE]; u32 sw_if_indices[VLIB_FRAME_SIZE], *sw_if_index; i16 cur_data_offsets[VLIB_FRAME_SIZE], *cdo; l2_output_config_t *config; u32 feature_bitmap; from = vlib_frame_vector_args (frame); n_left = frame->n_vectors; /* number of packets to process */ vlib_get_buffers (vm, from, bufs, n_left); b = bufs; sw_if_index = sw_if_indices; cdo = cur_data_offsets; /* extract data from buffer metadata */ while (n_left >= 8) { /* Prefetch the buffer header for the N+2 loop iteration */ vlib_prefetch_buffer_header (b[4], LOAD); vlib_prefetch_buffer_header (b[5], LOAD); vlib_prefetch_buffer_header (b[6], LOAD); vlib_prefetch_buffer_header (b[7], LOAD); sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_TX]; cdo[0] = b[0]->current_data; sw_if_index[1] = vnet_buffer (b[1])->sw_if_index[VLIB_TX]; cdo[1] = b[1]->current_data; sw_if_index[2] = vnet_buffer (b[2])->sw_if_index[VLIB_TX]; cdo[2] = b[2]->current_data; sw_if_index[3] = vnet_buffer (b[3])->sw_if_index[VLIB_TX]; cdo[3] = b[3]->current_data; /* next */ sw_if_index += 4; n_left -= 4; b += 4; cdo += 4; } while (n_left) { sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_TX]; cdo[0] = b[0]->current_data; /* next */ sw_if_index += 1; n_left -= 1; b += 1; cdo += 1; } n_left = frame->n_vectors; while (n_left) { u16 count, new_next, *next; u16 off = frame->n_vectors - n_left; b = bufs + off; if (n_left >= 4) { vlib_prefetch_buffer_header (b[0], LOAD); vlib_prefetch_buffer_header (b[1], LOAD); vlib_prefetch_buffer_header (b[2], LOAD); vlib_prefetch_buffer_header (b[3], LOAD); } sw_if_index = sw_if_indices + off; cdo = cur_data_offsets + off; next = nexts + off; count = clib_count_equal_u32 (sw_if_index, n_left); n_left -= count; config = vec_elt_at_index (msm->configs, sw_if_index[0]); feature_bitmap = config->feature_bitmap; if (PREDICT_FALSE ((feature_bitmap & ~L2OUTPUT_FEAT_OUTPUT) != 0)) new_next = feat_bitmap_get_next_node_index (l2output_main.l2_out_feat_next, feature_bitmap); else new_next = vec_elt (l2output_main.output_node_index_vec, sw_if_index[0]); clib_memset_u16 (nexts + off, new_next, count); if (new_next == L2OUTPUT_NEXT_DROP) { l2output_set_buffer_error (b, count, node->errors[L2OUTPUT_ERROR_MAPPING_DROP]); continue; } /* VTR */ if (config->out_vtr_flag && config->output_vtr.push_and_pop_bytes) { if (feature_bitmap & L2OUTPUT_FEAT_EFP_FILTER) l2output_process_batch (vm, node, config, b, cdo, next, count, /* l2_efp */ 1, /* l2_vtr */ 1, /* l2_pbb */ 0); else l2output_process_batch (vm, node, config, b, cdo, next, count, /* l2_efp */ 0, /* l2_vtr */ 1, /* l2_pbb */ 0); } else if (config->out_vtr_flag && config->output_pbb_vtr.push_and_pop_bytes) l2output_process_batch (vm, node, config, b, cdo, next, count, /* l2_efp */ 0, /* l2_vtr */ 0, /* l2_pbb */ 1); else l2output_process_batch (vm, node, config, b, cdo, next, count, /* l2_efp */ 0, /* l2_vtr */ 0, /* l2_pbb */ 0); } if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE))) { n_left = frame->n_vectors; /* number of packets to process */ b = bufs; while (n_left) { if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) { ethernet_header_t *h; l2output_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t)); t->sw_if_index = vnet_buffer (b[0])->sw_if_index[VLIB_TX]; h = vlib_buffer_get_current (b[0]); clib_memcpy (t->src, h->src_address, 6); clib_memcpy (t->dst, h->dst_address, 6); clib_memcpy (t->raw, &h->type, sizeof (t->raw)); } /* next */ n_left--; b++; } } vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); vlib_node_increment_counter (vm, l2output_node.index, L2OUTPUT_ERROR_L2OUTPUT, frame->n_vectors); return frame->n_vectors; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (l2output_node) = { .name = "l2-output", .vector_size = sizeof (u32), .format_trace = format_l2output_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN(l2output_error_strings), .error_strings = l2output_error_strings, .n_next_nodes = L2OUTPUT_N_NEXT, /* edit / add dispositions here */ .next_nodes = { [L2OUTPUT_NEXT_DROP] = "error-drop", [L2OUTPUT_NEXT_BAD_INTF] = "l2-output-bad-intf", }, }; /* *INDENT-ON* */ #define foreach_l2output_bad_intf_error \ _(DROP, "L2 output to interface not in L2 mode or deleted") static char *l2output_bad_intf_error_strings[] = { #define _(sym,string) string, foreach_l2output_bad_intf_error #undef _ }; typedef enum { #define _(sym,str) L2OUTPUT_BAD_INTF_ERROR_##sym, foreach_l2output_bad_intf_error #undef _ L2OUTPUT_BAD_INTF_N_ERROR, } l2output_bad_intf_error_t; /** * Output node for interfaces/tunnels which was in L2 mode but were changed * to L3 mode or possibly deleted thereafter. On changing forwarding mode * of any tunnel/interface from L2 to L3, its entry in l2_output_main table * next_nodes.output_node_index_vec[sw_if_index] MUST be set to the value of * L2OUTPUT_NEXT_BAD_INTF. Thus, if there are stale entries in the L2FIB for * this sw_if_index, l2-output will send packets for this sw_if_index to the * l2-output-bad-intf node which just setup the proper drop reason before * sending packets to the error-drop node to drop the packet. Then, stale L2FIB * entries for delted tunnels won't cause possible packet or memory corrpution. */ VLIB_NODE_FN (l2output_bad_intf_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { u32 n_left_from, *from, *to_next; l2output_next_t next_index = 0; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; /* number of packets to process */ while (n_left_from > 0) { u32 n_left_to_next; /* get space to enqueue frame to graph node "next_index" */ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (n_left_from >= 4 && n_left_to_next >= 2) { u32 bi0, bi1; vlib_buffer_t *b0, *b1; to_next[0] = bi0 = from[0]; to_next[1] = bi1 = from[1]; from += 2; to_next += 2; n_left_from -= 2; n_left_to_next -= 2; b0 = vlib_get_buffer (vm, bi0); b1 = vlib_get_buffer (vm, bi1); b0->error = node->errors[L2OUTPUT_BAD_INTF_ERROR_DROP]; b1->error = node->errors[L2OUTPUT_BAD_INTF_ERROR_DROP]; } while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; vlib_buffer_t *b0; bi0 = from[0]; to_next[0] = bi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; b0 = vlib_get_buffer (vm, bi0); b0->error = node->errors[L2OUTPUT_BAD_INTF_ERROR_DROP]; } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return frame->n_vectors; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (l2output_bad_intf_node) = { .name = "l2-output-bad-intf", .vector_size = sizeof (u32), .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN(l2output_bad_intf_error_strings), .error_strings = l2output_bad_intf_error_strings, .n_next_nodes = 1, /* edit / add dispositions here */ .next_nodes = { [0] = "error-drop", }, }; /* *INDENT-ON* */ static clib_error_t * l2output_init (vlib_main_t * vm) { l2output_main_t *mp = &l2output_main; mp->vlib_main = vm; mp->vnet_main = vnet_get_main (); /* Create the config vector */ vec_validate (mp->configs, 100); /* Until we hook up the CLI config, just create 100 sw interface entries and zero them */ /* Initialize the feature next-node indexes */ feat_bitmap_init_next_nodes (vm, l2output_node.index, L2OUTPUT_N_FEAT, l2output_get_feat_names (), mp->l2_out_feat_next); /* Initialize the output node mapping table */ vec_validate_init_empty (mp->output_node_index_vec, 100, L2OUTPUT_NEXT_DROP); return 0; } VLIB_INIT_FUNCTION (l2output_init); #ifndef CLIB_MARCH_VARIANT /** Create a mapping in the next node mapping table for the given sw_if_index. */ void l2output_create_output_node_mapping (vlib_main_t * vlib_main, vnet_main_t * vnet_main, u32 sw_if_index) { vnet_hw_interface_t *hw0 = vnet_get_sup_hw_interface (vnet_main, sw_if_index); /* dynamically create graph node arc */ u32 next = vlib_node_add_next (vlib_main, l2output_node.index, hw0->output_node_index); l2output_main.output_node_index_vec[sw_if_index] = next; } /* Get a pointer to the config for the given interface */ l2_output_config_t * l2output_intf_config (u32 sw_if_index) { l2output_main_t *mp = &l2output_main; vec_validate (mp->configs, sw_if_index); return vec_elt_at_index (mp->configs, sw_if_index); } /** Enable (or disable) the feature in the bitmap for the given interface. */ void l2output_intf_bitmap_enable (u32 sw_if_index, u32 feature_bitmap, u32 enable) { l2output_main_t *mp = &l2output_main; l2_output_config_t *config; vec_validate (mp->configs, sw_if_index); config = vec_elt_at_index (mp->configs, sw_if_index); if (enable) { config->feature_bitmap |= feature_bitmap; } else { config->feature_bitmap &= ~feature_bitmap; } } #endif /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */