aboutsummaryrefslogtreecommitdiffstats
path: root/test/test_geneve.py
blob: ffa8fb8e8e589a011f0c4822046128b6b1121334 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
#!/usr/bin/env python

import socket
from util import ip4_range, ip4_range
import unittest
from framework import VppTestCase, VppTestRunner
from template_bd import BridgeDomain

from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
from scapy.layers.geneve import GENEVE
from scapy.utils import atol
from vpp_ip_route import VppIpRoute, VppRoutePath
from vpp_ip import INVALID_INDEX


class TestGeneve(BridgeDomain, VppTestCase):
    """ GENEVE Test Case """

    def __init__(self, *args):
        BridgeDomain.__init__(self)
        VppTestCase.__init__(self, *args)

    def encapsulate(self, pkt, vni):

        """
        Encapsulate the original payload frame by adding GENEVE header with its
        UDP, IP and Ethernet fields
        """
        return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
                IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
                UDP(sport=self.dport, dport=self.dport, chksum=0) /
                GENEVE(vni=vni) /
                pkt)

    def ip_range(self, start, end):
        """ range of remote ip's """
        return ip4_range(self.pg0.remote_ip4, start, end)

    def encap_mcast(self, pkt, src_ip, src_mac, vni):
        """
        Encapsulate the original payload frame by adding GENEVE header with its
        UDP, IP and Ethernet fields
        """
        return (Ether(src=src_mac, dst=self.mcast_mac) /
                IP(src=src_ip, dst=self.mcast_ip4) /
                UDP(sport=self.dport, dport=self.dport, chksum=0) /
                GENEVE(vni=vni) /
                pkt)

    def decapsulate(self, pkt):
        """
        Decapsulate the original payload frame by removing GENEVE header
        """
        # check if is set I flag
        # self.assertEqual(pkt[GENEVE].flags, int('0x8', 16))
        return pkt[GENEVE].payload

    # Method for checking GENEVE encapsulation.
    #
    def check_encapsulation(self, pkt, vni, local_only=False, mcast_pkt=False):
        # TODO: add error messages
        # Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
        #  by VPP using ARP.
        self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
        if not local_only:
            if not mcast_pkt:
                self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
            else:
                self.assertEqual(pkt[Ether].dst, type(self).mcast_mac)
        # Verify GENEVE tunnel source IP is VPP_IP and destination IP is MY_IP.
        self.assertEqual(pkt[IP].src, self.pg0.local_ip4)
        if not local_only:
            if not mcast_pkt:
                self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4)
            else:
                self.assertEqual(pkt[IP].dst, type(self).mcast_ip4)
        # Verify UDP destination port is GENEVE 4789, source UDP port could be
        #  arbitrary.
        self.assertEqual(pkt[UDP].dport, type(self).dport)
        # TODO: checksum check
        # Verify VNI
        self.assertEqual(pkt[GENEVE].vni, vni)

    @classmethod
    def create_geneve_flood_test_bd(cls, vni, n_ucast_tunnels):
        # Create 10 ucast geneve tunnels under bd
        ip_range_start = 10
        ip_range_end = ip_range_start + n_ucast_tunnels
        next_hop_address = cls.pg0.remote_ip4
        for dest_ip4 in ip4_range(next_hop_address, ip_range_start,
                                  ip_range_end):
            # add host route so dest_ip4 will not be resolved
            rip = VppIpRoute(cls, dest_ip4, 32,
                             [VppRoutePath(next_hop_address,
                                           INVALID_INDEX)],
                             register=False)
            rip.add_vpp_config()
            r = cls.vapi.geneve_add_del_tunnel(
                local_address=cls.pg0.local_ip4, remote_address=dest_ip4,
                vni=vni)
            cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
                                                bd_id=vni)

    @classmethod
    def add_del_shared_mcast_dst_load(cls, is_add):
        """
        add or del tunnels sharing the same mcast dst
        to test geneve ref_count mechanism
        """
        n_shared_dst_tunnels = 10
        vni_start = 10000
        vni_end = vni_start + n_shared_dst_tunnels
        for vni in range(vni_start, vni_end):
            r = cls.vapi.geneve_add_del_tunnel(
                local_address=cls.pg0.local_ip4,
                remote_address=cls.mcast_ip4, mcast_sw_if_index=1,
                is_add=is_add, vni=vni)
            if r.sw_if_index == 0xffffffff:
                raise ValueError("bad sw_if_index: ~0")

    @classmethod
    def add_shared_mcast_dst_load(cls):
        cls.add_del_shared_mcast_dst_load(is_add=1)

    @classmethod
    def del_shared_mcast_dst_load(cls):
        cls.add_del_shared_mcast_dst_load(is_add=0)

    @classmethod
    def add_del_mcast_tunnels_load(cls, is_add):
        """
        add or del tunnels to test geneve stability
        """
        n_distinct_dst_tunnels = 10
        ip_range_start = 10
        ip_range_end = ip_range_start + n_distinct_dst_tunnels
        for dest_ip4 in ip4_range(cls.mcast_ip4, ip_range_start,
                                  ip_range_end):
            vni = bytearray(dest_ip4)[3]
            cls.vapi.geneve_add_del_tunnel(local_address=cls.pg0.local_ip4,
                                           remote_address=dest_ip4,
                                           mcast_sw_if_index=1, is_add=is_add,
                                           vni=vni)

    @classmethod
    def add_mcast_tunnels_load(cls):
        cls.add_del_mcast_tunnels_load(is_add=1)

    @classmethod
    def del_mcast_tunnels_load(cls):
        cls.add_del_mcast_tunnels_load(is_add=0)

    # Class method to start the GENEVE test case.
    #  Overrides setUpClass method in VppTestCase class.
    #  Python try..except statement is used to ensure that the tear down of
    #  the class will be executed even if exception is raised.
    #  @param cls The class pointer.
    @classmethod
    def setUpClass(cls):
        super(TestGeneve, cls).setUpClass()

        try:
            cls.dport = 6081

            # Create 2 pg interfaces.
            cls.create_pg_interfaces(range(4))
            for pg in cls.pg_interfaces:
                pg.admin_up()

            # Configure IPv4 addresses on VPP pg0.
            cls.pg0.config_ip4()

            # Resolve MAC address for VPP's IP address on pg0.
            cls.pg0.resolve_arp()

            # Our Multicast address
            cls.mcast_ip4 = '239.1.1.1'
            iplong = atol(cls.mcast_ip4)
            cls.mcast_mac = "01:00:5e:%02x:%02x:%02x" % (
                (iplong >> 16) & 0x7F, (iplong >> 8) & 0xFF, iplong & 0xFF)

            # Create GENEVE VTEP on VPP pg0, and put geneve_tunnel0 and pg1
            #  into BD.
            cls.single_tunnel_bd = 1
            r = cls.vapi.geneve_add_del_tunnel(
                local_address=cls.pg0.local_ip4,
                remote_address=cls.pg0.remote_ip4, vni=cls.single_tunnel_bd)
            cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
                                                bd_id=cls.single_tunnel_bd)
            cls.vapi.sw_interface_set_l2_bridge(
                rx_sw_if_index=cls.pg1.sw_if_index, bd_id=cls.single_tunnel_bd)

            # Setup vni 2 to test multicast flooding
            cls.n_ucast_tunnels = 10
            cls.mcast_flood_bd = 2
            cls.create_geneve_flood_test_bd(cls.mcast_flood_bd,
                                            cls.n_ucast_tunnels)
            r = cls.vapi.geneve_add_del_tunnel(
                local_address=cls.pg0.local_ip4,
                remote_address=cls.mcast_ip4, mcast_sw_if_index=1,
                vni=cls.mcast_flood_bd)
            cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
                                                bd_id=cls.mcast_flood_bd)
            cls.vapi.sw_interface_set_l2_bridge(
                rx_sw_if_index=cls.pg2.sw_if_index, bd_id=cls.mcast_flood_bd)

            # Add and delete mcast tunnels to check stability
            cls.add_shared_mcast_dst_load()
            cls.add_mcast_tunnels_load()
            cls.del_shared_mcast_dst_load()
            cls.del_mcast_tunnels_load()

            # Setup vni 3 to test unicast flooding
            cls.ucast_flood_bd = 3
            cls.create_geneve_flood_test_bd(cls.ucast_flood_bd,
                                            cls.n_ucast_tunnels)
            cls.vapi.sw_interface_set_l2_bridge(
                rx_sw_if_index=cls.pg3.sw_if_index, bd_id=cls.ucast_flood_bd)
        except Exception:
            super(TestGeneve, cls).tearDownClass()
            raise

    # Method to define VPP actions before tear down of the test case.
    #  Overrides tearDown method in VppTestCase class.
    #  @param self The object pointer.
    def tearDown(self):
        super(TestGeneve, self).tearDown()

    def show_commands_at_teardown(self):
        self.logger.info(self.vapi.cli("show bridge-domain 1 detail"))
        self.logger.info(self.vapi.cli("show bridge-domain 2 detail"))
        self.logger.info(self.vapi.cli("show bridge-domain 3 detail"))
        self.logger.info(self.vapi.cli("show geneve tunnel"))


if __name__ == '__main__':
    unittest.main(testRunner=VppTestRunner)
class="mi">0; } static void vl_api_unmap_segment_t_handler (vl_api_unmap_segment_t * mp) { /* * XXX Need segment_name to session_id hash, * XXX - have sessionID by handle hash currently */ VDBG (1, "Unmapped segment '%s'", mp->segment_name); } static void vl_api_app_cut_through_registration_add_t_handler (vl_api_app_cut_through_registration_add_t * mp) { vcl_cut_through_registration_t *ctr; u32 mqc_index = ~0; vcl_worker_t *wrk; int *fds = 0; if (mp->n_fds) { ASSERT (mp->n_fds == 2); vec_validate (fds, mp->n_fds); vl_socket_client_recv_fd_msg (fds, mp->n_fds, 5); } wrk = vcl_worker_get (mp->wrk_index); ctr = vcl_ct_registration_lock_and_alloc (wrk); ctr->mq = uword_to_pointer (mp->evt_q_address, svm_msg_q_t *); ctr->peer_mq = uword_to_pointer (mp->peer_evt_q_address, svm_msg_q_t *); VDBG (0, "Adding ct registration %u", vcl_ct_registration_index (wrk, ctr)); if (mp->n_fds && (mp->fd_flags & SESSION_FD_F_MQ_EVENTFD)) { svm_msg_q_set_consumer_eventfd (ctr->mq, fds[0]); svm_msg_q_set_producer_eventfd (ctr->peer_mq, fds[1]); mqc_index = vcl_mq_epoll_add_evfd (wrk, ctr->mq); ctr->epoll_evt_conn_index = mqc_index; vec_free (fds); } vcl_ct_registration_lookup_add (wrk, mp->evt_q_address, vcl_ct_registration_index (wrk, ctr)); vcl_ct_registration_unlock (wrk); } static void vl_api_bind_sock_reply_t_handler (vl_api_bind_sock_reply_t * mp) { /* Expecting a similar message on mq. So ignore this */ VDBG (1, "VCL<%d>: bapi msg vpp handle 0x%llx, sid %u: bind retval: %u!", getpid (), mp->handle, mp->context, mp->retval); } static void vl_api_unbind_sock_reply_t_handler (vl_api_unbind_sock_reply_t * mp) { if (mp->retval) clib_warning ("VCL<%d>: ERROR: sid %u: unbind failed: %U", getpid (), mp->context, format_api_error, ntohl (mp->retval)); else VDBG (1, "VCL<%d>: sid %u: unbind succeeded!", getpid (), mp->context); } static void vl_api_disconnect_session_reply_t_handler (vl_api_disconnect_session_reply_t * mp) { if (mp->retval) clib_warning ("VCL<%d>: ERROR: sid %u: disconnect failed: %U", getpid (), mp->context, format_api_error, ntohl (mp->retval)); } static void vl_api_connect_session_reply_t_handler (vl_api_connect_sock_reply_t * mp) { if (mp->retval) clib_warning ("VCL<%d>: ERROR: sid %u: connect failed: %U", getpid (), mp->context, format_api_error, ntohl (mp->retval)); } #define foreach_sock_msg \ _(SESSION_ENABLE_DISABLE_REPLY, session_enable_disable_reply) \ _(BIND_SOCK_REPLY, bind_sock_reply) \ _(UNBIND_SOCK_REPLY, unbind_sock_reply) \ _(CONNECT_SESSION_REPLY, connect_session_reply) \ _(DISCONNECT_SESSION_REPLY, disconnect_session_reply) \ _(APPLICATION_ATTACH_REPLY, application_attach_reply) \ _(APPLICATION_DETACH_REPLY, application_detach_reply) \ _(MAP_ANOTHER_SEGMENT, map_another_segment) \ _(UNMAP_SEGMENT, unmap_segment) \ _(APP_CUT_THROUGH_REGISTRATION_ADD, app_cut_through_registration_add) \ _(APP_WORKER_ADD_DEL_REPLY, app_worker_add_del_reply) \ void vppcom_api_hookup (void) { #define _(N, n) \ vl_msg_api_set_handlers(VL_API_##N, #n, \ vl_api_##n##_t_handler, \ vl_noop_handler, \ vl_api_##n##_t_endian, \ vl_api_##n##_t_print, \ sizeof(vl_api_##n##_t), 1); foreach_sock_msg; #undef _ } /* * VPP-API message functions */ void vppcom_send_session_enable_disable (u8 is_enable) { vl_api_session_enable_disable_t *bmp; bmp = vl_msg_api_alloc (sizeof (*bmp)); memset (bmp, 0, sizeof (*bmp)); bmp->_vl_msg_id = ntohs (VL_API_SESSION_ENABLE_DISABLE); bmp->client_index = vcm->my_client_index; bmp->context = htonl (0xfeedface); bmp->is_enable = is_enable; vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & bmp); } void vppcom_app_send_attach (void) { vl_api_application_attach_t *bmp; u8 nsid_len = vec_len (vcm->cfg.namespace_id); u8 app_is_proxy = (vcm->cfg.app_proxy_transport_tcp || vcm->cfg.app_proxy_transport_udp); bmp = vl_msg_api_alloc (sizeof (*bmp)); memset (bmp, 0, sizeof (*bmp)); bmp->_vl_msg_id = ntohs (VL_API_APPLICATION_ATTACH); bmp->client_index = vcm->my_client_index; bmp->context = htonl (0xfeedface); bmp->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_ACCEPT_REDIRECT | APP_OPTIONS_FLAGS_ADD_SEGMENT | (vcm->cfg.app_scope_local ? APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE : 0) | (vcm->cfg.app_scope_global ? APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE : 0) | (app_is_proxy ? APP_OPTIONS_FLAGS_IS_PROXY : 0) | APP_OPTIONS_FLAGS_USE_MQ_FOR_CTRL_MSGS | (vcm->cfg.use_mq_eventfd ? APP_OPTIONS_FLAGS_EVT_MQ_USE_EVENTFD : 0); bmp->options[APP_OPTIONS_PROXY_TRANSPORT] = (u64) ((vcm->cfg.app_proxy_transport_tcp ? 1 << TRANSPORT_PROTO_TCP : 0) | (vcm->cfg.app_proxy_transport_udp ? 1 << TRANSPORT_PROTO_UDP : 0)); bmp->options[APP_OPTIONS_SEGMENT_SIZE] = vcm->cfg.segment_size; bmp->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = vcm->cfg.add_segment_size; bmp->options[APP_OPTIONS_RX_FIFO_SIZE] = vcm->cfg.rx_fifo_size; bmp->options[APP_OPTIONS_TX_FIFO_SIZE] = vcm->cfg.tx_fifo_size; bmp->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = vcm->cfg.preallocated_fifo_pairs; bmp->options[APP_OPTIONS_EVT_QUEUE_SIZE] = vcm->cfg.event_queue_size; if (nsid_len) { bmp->namespace_id_len = nsid_len; clib_memcpy_fast (bmp->namespace_id, vcm->cfg.namespace_id, nsid_len); bmp->options[APP_OPTIONS_NAMESPACE_SECRET] = vcm->cfg.namespace_secret; } vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & bmp); } void vppcom_app_send_detach (void) { vl_api_application_detach_t *bmp; bmp = vl_msg_api_alloc (sizeof (*bmp)); memset (bmp, 0, sizeof (*bmp)); bmp->_vl_msg_id = ntohs (VL_API_APPLICATION_DETACH); bmp->client_index = vcm->my_client_index; bmp->context = htonl (0xfeedface); vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & bmp); } void vcl_send_app_worker_add_del (u8 is_add) { vcl_worker_t *wrk = vcl_worker_get_current (); vl_api_app_worker_add_del_t *mp; u32 wrk_index = wrk->wrk_index; mp = vl_msg_api_alloc (sizeof (*mp)); memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_APP_WORKER_ADD_DEL); mp->client_index = vcm->my_client_index; mp->app_index = clib_host_to_net_u32 (vcm->app_index); mp->context = wrk_index; mp->is_add = is_add; if (!is_add) mp->wrk_index = clib_host_to_net_u32 (wrk_index); vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & mp); } void vppcom_send_connect_sock (vcl_session_t * session) { vcl_worker_t *wrk = vcl_worker_get_current (); vl_api_connect_sock_t *cmp; cmp = vl_msg_api_alloc (sizeof (*cmp)); memset (cmp, 0, sizeof (*cmp)); cmp->_vl_msg_id = ntohs (VL_API_CONNECT_SOCK); cmp->client_index = vcm->my_client_index; cmp->context = session->session_index; cmp->wrk_index = wrk->vpp_wrk_index; cmp->is_ip4 = session->transport.is_ip4; clib_memcpy_fast (cmp->ip, &session->transport.rmt_ip, sizeof (cmp->ip)); cmp->port = session->transport.rmt_port; cmp->proto = session->session_type; clib_memcpy_fast (cmp->options, session->options, sizeof (cmp->options)); vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & cmp); } void vppcom_send_disconnect_session (u64 vpp_handle) { vl_api_disconnect_session_t *dmp; dmp = vl_msg_api_alloc (sizeof (*dmp)); memset (dmp, 0, sizeof (*dmp)); dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); dmp->client_index = vcm->my_client_index; dmp->handle = vpp_handle; vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & dmp); } /* VPP combines bind and listen as one operation. VCL manages the separation * of bind and listen locally via vppcom_session_bind() and * vppcom_session_listen() */ void vppcom_send_bind_sock (vcl_session_t * session) { vcl_worker_t *wrk = vcl_worker_get_current (); vl_api_bind_sock_t *bmp; /* Assumes caller has acquired spinlock: vcm->sessions_lockp */ bmp = vl_msg_api_alloc (sizeof (*bmp)); memset (bmp, 0, sizeof (*bmp)); bmp->_vl_msg_id = ntohs (VL_API_BIND_SOCK); bmp->client_index = vcm->my_client_index; bmp->context = session->session_index; bmp->wrk_index = wrk->vpp_wrk_index; bmp->is_ip4 = session->transport.is_ip4; clib_memcpy_fast (bmp->ip, &session->transport.lcl_ip, sizeof (bmp->ip)); bmp->port = session->transport.lcl_port; bmp->proto = session->session_type; clib_memcpy_fast (bmp->options, session->options, sizeof (bmp->options)); vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & bmp); } void vppcom_send_unbind_sock (u64 vpp_handle) { vcl_worker_t *wrk = vcl_worker_get_current (); vl_api_unbind_sock_t *ump; ump = vl_msg_api_alloc (sizeof (*ump)); memset (ump, 0, sizeof (*ump)); ump->_vl_msg_id = ntohs (VL_API_UNBIND_SOCK); ump->client_index = vcm->my_client_index; ump->wrk_index = wrk->vpp_wrk_index; ump->handle = vpp_handle; vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & ump); } void vppcom_send_accept_session_reply (u64 handle, u32 context, int retval) { vl_api_accept_session_reply_t *rmp; rmp = vl_msg_api_alloc (sizeof (*rmp)); memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY); rmp->retval = htonl (retval); rmp->context = context; rmp->handle = handle; vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & rmp); } u32 vcl_max_nsid_len (void) { vl_api_application_attach_t *mp; return (sizeof (mp->namespace_id) - 1); } void vppcom_init_error_string_table (void) { vcm->error_string_by_error_number = hash_create (0, sizeof (uword)); #define _(n, v, s) hash_set (vcm->error_string_by_error_number, -v, s); foreach_vnet_api_error; #undef _ hash_set (vcm->error_string_by_error_number, 99, "Misc"); } int vppcom_connect_to_vpp (char *app_name) { api_main_t *am = &api_main; vppcom_cfg_t *vcl_cfg = &vcm->cfg; if (vcl_cfg->vpp_api_socket_name) { if (vl_socket_client_connect ((char *) vcl_cfg->vpp_api_socket_name, app_name, 0 /* default rx/tx buffer */ )) { VERR ("app (%s) socket connect failed!", app_name); return VPPCOM_ECONNREFUSED; } if (vl_socket_client_init_shm (0)) { VERR ("app (%s) init shm failed!", app_name); return VPPCOM_ECONNREFUSED; } } else { if (!vcl_cfg->vpp_api_filename) vcl_cfg->vpp_api_filename = format (0, "/vpe-api%c", 0); VDBG (0, "app (%s) connecting to VPP api (%s)...", app_name, vcl_cfg->vpp_api_filename); if (vl_client_connect_to_vlib ((char *) vcl_cfg->vpp_api_filename, app_name, vcm->cfg.vpp_api_q_length) < 0) { VERR ("app (%s) connect failed!", app_name); return VPPCOM_ECONNREFUSED; } } vcm->vl_input_queue = am->shmem_hdr->vl_input_queue; vcm->my_client_index = (u32) am->my_client_index; vcm->app_state = STATE_APP_CONN_VPP; VDBG (0, "app (%s) is connected to VPP!", app_name); vcl_evt (VCL_EVT_INIT, vcm); return VPPCOM_OK; } /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */