summaryrefslogtreecommitdiffstats
path: root/test/vpp_ipip_tun_interface.py
blob: 6e5ade6eb3df3d311f1a783d8d2216bccaf237fb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
from vpp_tunnel_interface import VppTunnelInterface
from ipaddress import ip_address


class VppIpIpTunInterface(VppTunnelInterface):
    """
    VPP IP-IP Tunnel interface
    """

    def __init__(self, test, parent_if, src, dst):
        super(VppIpIpTunInterface, self).__init__(test, parent_if)
        self.src = src
        self.dst = dst

    def add_vpp_config(self):
        r = self.test.vapi.ipip_add_tunnel(
            tunnel={
                'src': self.src,
                'dst': self.dst,
                'table_id': 0,
                'instance': 0xffffffff,
            })
        self.set_sw_if_index(r.sw_if_index)
        self.test.registry.register(self, self.test.logger)

    def remove_vpp_config(self):
        self.test.vapi.ipip_del_tunnel(sw_if_index=self._sw_if_index)

    def query_vpp_config(self):
        ts = self.test.vapi.ipip_tunnel_dump(sw_if_index=0xffffffff)
        for t in ts:
            if t.tunnel.sw_if_index == self._sw_if_index:
                return True
        return False

    def __str__(self):
        return self.object_id()

    def object_id(self):
        return "ipip-%d" % self._sw_if_index
pan> <vppinfra/os.h> /* Valgrind stuff. */ #include <vppinfra/memcheck.h> #include <vppinfra/valgrind.h> void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS]; void clib_mem_exit (void) { u8 *heap = clib_mem_get_per_cpu_heap (); if (heap) mheap_free (heap); clib_mem_set_per_cpu_heap (0); } /* Initialize CLIB heap based on memory/size given by user. Set memory to 0 and CLIB will try to allocate its own heap. */ void * clib_mem_init (void *memory, uword memory_size) { u8 *heap; if (memory || memory_size) heap = mheap_alloc (memory, memory_size); else { /* Allocate lots of address space since this will limit the amount of memory the program can allocate. In the kernel we're more conservative since some architectures (e.g. mips) have pretty small kernel virtual address spaces. */ #ifdef __KERNEL__ #define MAX_VM_MEG 64 #else #define MAX_VM_MEG 1024 #endif uword alloc_size = MAX_VM_MEG << 20; uword tries = 16; while (1) { heap = mheap_alloc (0, alloc_size); if (heap) break; alloc_size = (alloc_size * 3) / 4; tries--; if (tries == 0) break; } } clib_mem_set_heap (heap); return heap; } void * clib_mem_init_thread_safe (void *memory, uword memory_size) { mheap_t *h; u8 *heap; clib_mem_init (memory, memory_size); heap = clib_mem_get_per_cpu_heap (); ASSERT (heap); h = mheap_header (heap); /* make the main heap thread-safe */ h->flags |= MHEAP_FLAG_THREAD_SAFE; return heap; } u8 * format_clib_mem_usage (u8 * s, va_list * va) { int verbose = va_arg (*va, int); return format (s, "%U", format_mheap, clib_mem_get_heap (), verbose); } void clib_mem_usage (clib_mem_usage_t * u) { mheap_usage (clib_mem_get_heap (), u); } /* Call serial number for debugger breakpoints. */ uword clib_mem_validate_serial = 0; void clib_mem_validate (void) { if (MHEAP_HAVE_SMALL_OBJECT_CACHE) clib_warning ("clib_mem_validate disabled (small object cache is ON)"); else { mheap_validate (clib_mem_get_heap ()); clib_mem_validate_serial++; } } void clib_mem_trace (int enable) { mheap_trace (clib_mem_get_heap (), enable); } /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */