aboutsummaryrefslogtreecommitdiffstats
path: root/test/test_memif.py
blob: aae5b3c1efbd36dff2ad8c0d29062ffbe423a4a9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
import socket
import unittest

from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, ICMP
import six

from framework import VppTestCase, VppTestRunner, running_extended_tests
from remote_test import RemoteClass, RemoteVppTestCase
from vpp_memif import MEMIF_MODE, MEMIF_ROLE, remove_all_memif_vpp_config, \
    VppSocketFilename, VppMemif
from vpp_ip_route import VppIpRoute, VppRoutePath


class TestMemif(VppTestCase):
    """ Memif Test Case """

    @classmethod
    def setUpClass(cls):
        # fork new process before client connects to VPP
        cls.remote_test = RemoteClass(RemoteVppTestCase)
        cls.remote_test.start_remote()
        cls.remote_test.set_request_timeout(10)
        super(TestMemif, cls).setUpClass()
        cls.remote_test.setUpClass(cls.tempdir)
        cls.create_pg_interfaces(range(1))
        for pg in cls.pg_interfaces:
            pg.config_ip4()
            pg.admin_up()
            pg.resolve_arp()

    @classmethod
    def tearDownClass(cls):
        cls.remote_test.tearDownClass()
        cls.remote_test.quit_remote()
        for pg in cls.pg_interfaces:
            pg.unconfig_ip4()
            pg.set_table_ip4(0)
            pg.admin_down()
        super(TestMemif, cls).tearDownClass()

    def tearDown(self):
        remove_all_memif_vpp_config(self)
        remove_all_memif_vpp_config(self.remote_test)
        super(TestMemif, self).tearDown()

    def _check_socket_filename(self, dump, socket_id, filename):
        for d in dump:
            if (d.socket_id == socket_id) and (
                    d.socket_filename.rstrip(b"\0") == filename):
                return True
        return False

    def test_memif_socket_filename_add_del(self):
        """ Memif socket filename add/del """

        # dump default socket filename
        dump = self.vapi.memif_socket_filename_dump()
        self.assertTrue(
            self._check_socket_filename(
                dump, 0, b"%s/memif.sock" % six.ensure_binary(
                    self.tempdir, encoding='utf-8')))

        memif_sockets = []
        # existing path
        memif_sockets.append(
            VppSocketFilename(
                self, 1, b"%s/memif1.sock" % six.ensure_binary(
                    self.tempdir, encoding='utf-8')))
        # default path (test tempdir)
        memif_sockets.append(
            VppSocketFilename(
                self,
                2,
                b"memif2.sock",
                add_default_folder=True))
        # create new folder in default folder
        memif_sockets.append(
            VppSocketFilename(
                self,
                3,
                b"sock/memif3.sock",
                add_default_folder=True))

        for sock in memif_sockets:
            sock.add_vpp_config()
            dump = sock.query_vpp_config()
            self.assertTrue(
                self._check_socket_filename(
                    dump,
                    sock.socket_id,
                    sock.socket_filename))

        for sock in memif_sockets:
            sock.remove_vpp_config()

        dump = self.vapi.memif_socket_filename_dump()
        self.assertTrue(
            self._check_socket_filename(
                dump, 0, b"%s/memif.sock" % six.ensure_binary(
                    self.tempdir, encoding='utf-8')))

    def _create_delete_test_one_interface(self, memif):
        memif.add_vpp_config()

        dump = memif.query_vpp_config()

        self.assertTrue(dump)
        self.assertEqual(dump.sw_if_index, memif.sw_if_index)
        self.assertEqual(dump.role, memif.role)
        self.assertEqual(dump.mode, memif.mode)
        if (memif.socket_id is not None):
            self.assertEqual(dump.socket_id, memif.socket_id)

        memif.remove_vpp_config()

        dump = memif.query_vpp_config()

        self.assertFalse(dump)

    def _connect_test_one_interface(self, memif):
        self.assertTrue(memif.wait_for_link_up(5))
        dump = memif.query_vpp_config()

        if memif.role == MEMIF_ROLE.SLAVE:
            self.assertEqual(dump.ring_size, memif.ring_size)
            self.assertEqual(dump.buffer_size, memif.buffer_size)
        else:
            self.assertEqual(dump.ring_size, 1)
            self.assertEqual(dump.buffer_size, 0)

    def _connect_test_interface_pair(self, memif0, memif1):
        memif0.add_vpp_config()
        memif1.add_vpp_config()

        memif0.admin_up()
        memif1.admin_up()

        self._connect_test_one_interface(memif0)
        self._connect_test_one_interface(memif1)

        memif0.remove_vpp_config()
        memif1.remove_vpp_config()

    def test_memif_create_delete(self):
        """ Memif create/delete interface """

        memif = VppMemif(self, MEMIF_ROLE.SLAVE, MEMIF_MODE.ETHERNET)
        self._create_delete_test_one_interface(memif)
        memif.role = MEMIF_ROLE.MASTER
        self._create_delete_test_one_interface(memif)

    def test_memif_create_custom_socket(self):
        """ Memif create with non-default socket filename """

        memif_sockets = []
        # existing path
        memif_sockets.append(
            VppSocketFilename(
                self, 1, b"%s/memif1.sock" % six.ensure_binary(
                    self.tempdir, encoding='utf-8')))
        # default path (test tempdir)
        memif_sockets.append(
            VppSocketFilename(
                self,
                2,
                b"memif2.sock",
                add_default_folder=True))
        # create new folder in default folder
        memif_sockets.append(
            VppSocketFilename(
                self,
                3,
                b"sock/memif3.sock",
                add_default_folder=True))

        memif = VppMemif(self, MEMIF_ROLE.SLAVE, MEMIF_MODE.ETHERNET)

        for sock in memif_sockets:
            sock.add_vpp_config()
            memif.socket_id = sock.socket_id
            memif.role = MEMIF_ROLE.SLAVE
            self._create_delete_test_one_interface(memif)
            memif.role = MEMIF_ROLE.MASTER
            self._create_delete_test_one_interface(memif)

    def test_memif_connect(self):
        """ Memif connect """
        memif = VppMemif(self, MEMIF_ROLE.SLAVE,  MEMIF_MODE.ETHERNET,
                         ring_size=1024, buffer_size=2048)

        remote_socket = VppSocketFilename(self.remote_test, 1,
                                          b"%s/memif.sock" % six.ensure_binary(
                                              self.tempdir, encoding='utf-8'))
        remote_socket.add_vpp_config()

        remote_memif = VppMemif(self.remote_test, MEMIF_ROLE.MASTER,
                                MEMIF_MODE.ETHERNET, socket_id=1,
                                ring_size=1024, buffer_size=2048)

        self._connect_test_interface_pair(memif, remote_memif)

        memif.role = MEMIF_ROLE.MASTER
        remote_memif.role = MEMIF_ROLE.SLAVE

        self._connect_test_interface_pair(memif, remote_memif)

    def _create_icmp(self, pg, memif, num):
        pkts = []
        for i in range(num):
            pkt = (Ether(dst=pg.local_mac, src=pg.remote_mac) /
                   IP(src=pg.remote_ip4, dst=memif.ip4_addr) /
                   ICMP(id=memif.if_id, type='echo-request', seq=i))
            pkts.append(pkt)
        return pkts

    def _verify_icmp(self, pg, memif, rx, seq):
        ip = rx[IP]
        self.assertEqual(ip.src, memif.ip4_addr)
        self.assertEqual(ip.dst, pg.remote_ip4)
        self.assertEqual(ip.proto, 1)
        icmp = rx[ICMP]
        self.assertEqual(icmp.type, 0)  # echo-reply
        self.assertEqual(icmp.id, memif.if_id)
        self.assertEqual(icmp.seq, seq)

    def test_memif_ping(self):
        """ Memif ping """

        memif = VppMemif(self, MEMIF_ROLE.SLAVE,  MEMIF_MODE.ETHERNET)

        remote_socket = VppSocketFilename(self.remote_test, 1,
                                          b"%s/memif.sock" % six.ensure_binary(
                                              self.tempdir, encoding='utf-8'))
        remote_socket.add_vpp_config()

        remote_memif = VppMemif(self.remote_test, MEMIF_ROLE.MASTER,
                                MEMIF_MODE.ETHERNET, socket_id=1)

        memif.add_vpp_config()
        memif.config_ip4()
        memif.admin_up()

        remote_memif.add_vpp_config()
        remote_memif.config_ip4()
        remote_memif.admin_up()

        self.assertTrue(memif.wait_for_link_up(5))
        self.assertTrue(remote_memif.wait_for_link_up(5))

        # add routing to remote vpp
        route = VppIpRoute(self.remote_test, self.pg0._local_ip4_subnet, 24,
                           [VppRoutePath(memif.ip4_addr, 0xffffffff)],
                           register=False)

        route.add_vpp_config()

        # create ICMP echo-request from local pg to remote memif
        packet_num = 10
        pkts = self._create_icmp(self.pg0, remote_memif, packet_num)

        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()
        capture = self.pg0.get_capture(packet_num, timeout=2)
        seq = 0
        for c in capture:
            self._verify_icmp(self.pg0, remote_memif, c, seq)
            seq += 1

        route.remove_vpp_config()


if __name__ == '__main__':
    unittest.main(testRunner=VppTestRunner)
pan> clib_host_to_net_u64(map_get_pfx(d, clib_net_to_host_u32(addr), clib_net_to_host_u16(port))); } /* * map_get_sfx */ static_always_inline u64 map_get_sfx (map_domain_t *d, u32 addr, u16 port) { u16 psid = (port >> d->psid_shift) & d->psid_mask; /* Shared 1:1 mode. */ if (d->ea_bits_len == 0 && d->rules) return clib_net_to_host_u64(d->rules[psid].as_u64[1]); if (d->ip6_prefix_len == 128) return clib_net_to_host_u64(d->ip6_prefix.as_u64[1]); /* IPv4 prefix */ if (d->flags & MAP_DOMAIN_PREFIX) return (u64) (addr & (0xFFFFFFFF << d->suffix_shift)) << 16; /* Shared or full IPv4 address */ return ((u64) addr << 16) | psid; } static_always_inline u64 map_get_sfx_net (map_domain_t *d, u32 addr, u16 port) { return clib_host_to_net_u64(map_get_sfx(d, clib_net_to_host_u32(addr), clib_net_to_host_u16(port))); } static_always_inline u32 map_get_ip4 (ip6_address_t *addr) { return clib_host_to_net_u32(clib_net_to_host_u64(addr->as_u64[1]) >> 16); } /* * Get the MAP domain from an IPv4 lookup adjacency. */ static_always_inline map_domain_t * ip4_map_get_domain (u32 mdi) { map_main_t *mm = &map_main; return pool_elt_at_index(mm->domains, mdi); } /* * Get the MAP domain from an IPv6 lookup adjacency. * If the IPv6 address or prefix is not shared, no lookup is required. * The IPv4 address is used otherwise. */ static_always_inline map_domain_t * ip6_map_get_domain (u32 mdi, ip4_address_t *addr, u32 *map_domain_index, u8 *error) { map_main_t *mm = &map_main; /* * Disable direct MAP domain lookup on decap, until the security check is updated to verify IPv4 SA. * (That's done implicitly when MAP domain is looked up in the IPv4 FIB) */ #ifdef MAP_NONSHARED_DOMAIN_ENABLED #error "How can you be sure this domain is not shared?" *map_domain_index = mdi; return pool_elt_at_index(mm->domains, mdi); #endif u32 lbi = ip4_fib_forwarding_lookup(0, addr); const dpo_id_t *dpo = load_balance_get_bucket(lbi, 0); if (PREDICT_TRUE(dpo->dpoi_type == map_dpo_type || dpo->dpoi_type == map_t_dpo_type)) { *map_domain_index = dpo->dpoi_index; return pool_elt_at_index(mm->domains, *map_domain_index); } *error = MAP_ERROR_NO_DOMAIN; return NULL; } map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop); void map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop); #define map_ip4_reass_lock() while (__sync_lock_test_and_set(map_main.ip4_reass_lock, 1)) {} #define map_ip4_reass_unlock() do {CLIB_MEMORY_BARRIER(); *map_main.ip4_reass_lock = 0;} while(0) static_always_inline void map_ip4_reass_get_fragments(map_ip4_reass_t *r, u32 **pi) { int i; for (i=0; i<MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++) if(r->fragments[i] != ~0) { vec_add1(*pi, r->fragments[i]); r->fragments[i] = ~0; map_main.ip4_reass_buffered_counter--; } } int map_ip4_reass_add_fragment(map_ip4_reass_t *r, u32 pi); map_ip6_reass_t * map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id, u8 protocol, u32 **pi_to_drop); void map_ip6_reass_free(map_ip6_reass_t *r, u32 **pi_to_drop); #define map_ip6_reass_lock() while (__sync_lock_test_and_set(map_main.ip6_reass_lock, 1)) {} #define map_ip6_reass_unlock() do {CLIB_MEMORY_BARRIER(); *map_main.ip6_reass_lock = 0;} while(0) int map_ip6_reass_add_fragment(map_ip6_reass_t *r, u32 pi, u16 data_offset, u16 next_data_offset, u8 *data_start, u16 data_len); void map_ip4_drop_pi(u32 pi); int map_ip4_reass_conf_ht_ratio(f32 ht_ratio, u32 *trashed_reass, u32 *dropped_packets); #define MAP_IP4_REASS_CONF_HT_RATIO_MAX 100 int map_ip4_reass_conf_pool_size(u16 pool_size, u32 *trashed_reass, u32 *dropped_packets); #define MAP_IP4_REASS_CONF_POOL_SIZE_MAX (0xfeff) int map_ip4_reass_conf_lifetime(u16 lifetime_ms); #define MAP_IP4_REASS_CONF_LIFETIME_MAX 0xffff int map_ip4_reass_conf_buffers(u32 buffers); #define MAP_IP4_REASS_CONF_BUFFERS_MAX (0xffffffff) void map_ip6_drop_pi(u32 pi); int map_ip6_reass_conf_ht_ratio(f32 ht_ratio, u32 *trashed_reass, u32 *dropped_packets); #define MAP_IP6_REASS_CONF_HT_RATIO_MAX 100 int map_ip6_reass_conf_pool_size(u16 pool_size, u32 *trashed_reass, u32 *dropped_packets); #define MAP_IP6_REASS_CONF_POOL_SIZE_MAX (0xfeff) int map_ip6_reass_conf_lifetime(u16 lifetime_ms); #define MAP_IP6_REASS_CONF_LIFETIME_MAX 0xffff int map_ip6_reass_conf_buffers(u32 buffers); #define MAP_IP6_REASS_CONF_BUFFERS_MAX (0xffffffff) #define u8_ptr_add(ptr, index) (((u8 *)ptr) + index) #define u16_net_add(u, val) clib_host_to_net_u16(clib_net_to_host_u16(u) + (val)) static_always_inline void ip4_map_t_embedded_address (map_domain_t *d, ip6_address_t *ip6, const ip4_address_t *ip4) { ASSERT(d->ip6_src_len == 96); //No support for other lengths for now ip6->as_u64[0] = d->ip6_src.as_u64[0]; ip6->as_u32[2] = d->ip6_src.as_u32[2]; ip6->as_u32[3] = ip4->as_u32; } static_always_inline u32 ip6_map_t_embedded_address (map_domain_t *d, ip6_address_t *addr) { ASSERT(d->ip6_src_len == 96); //No support for other lengths for now return addr->as_u32[3]; } static inline void map_domain_counter_lock (map_main_t *mm) { if (mm->counter_lock) while (__sync_lock_test_and_set(mm->counter_lock, 1)) /* zzzz */ ; } static inline void map_domain_counter_unlock (map_main_t *mm) { if (mm->counter_lock) *mm->counter_lock = 0; } static_always_inline void map_send_all_to_node(vlib_main_t *vm, u32 *pi_vector, vlib_node_runtime_t *node, vlib_error_t *error, u32 next) { u32 n_left_from, *from, next_index, *to_next, n_left_to_next; //Deal with fragments that are ready from = pi_vector; n_left_from = vec_len(pi_vector); next_index = node->cached_next_index; while (n_left_from > 0) { vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next); while (n_left_from > 0 && n_left_to_next > 0) { u32 pi0 = to_next[0] = from[0]; from += 1; n_left_from -= 1; to_next += 1; n_left_to_next -= 1; vlib_buffer_t *p0 = vlib_get_buffer(vm, pi0); p0->error = *error; vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next); } vlib_put_next_frame(vm, node, next_index, n_left_to_next); } } /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */