summaryrefslogtreecommitdiffstats
path: root/src/vnet/ipfix-export/ipfix_doc.md
blob: 59118f68e11e1a35888738b21ffb6fae7c822897 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
# IPFIX support {#ipfix_doc}

VPP includes a high-performance IPFIX record exporter. This note
explains how to use the internal APIs to export IPFIX data, and how to
configure and send the required IPFIX templates.

As you'll see, a bit of typing is required. 

## First: create an ipfix "report"

Include the flow report header file, fill out a @ref
vnet_flow_report_add_del_args_t structure, and call vnet_flow_report_add_del.

```{.c}
   #include <vnet/ipfix-export/flow_report.h>
   /* Defined in flow_report.h, of interest when constructing reports */

   /* ipfix field definitions for a particular report */
   typedef struct
   {
     u32 info_element;
     u32 size;
   } ipfix_report_element_t;

   /* Report add/del argument structure */
   typedef struct
   {
     /* Callback to flush current ipfix packet / frame */
     vnet_flow_data_callback_t *flow_data_callback;

     /* Callback to build the template packet rewrite string */
     vnet_flow_rewrite_callback_t *rewrite_callback;

     /* List of ipfix elements in the report */
     ipfix_report_element_t *report_elements;
     u32 n_report_elements;
     /* Kept in flow report, used e.g. by flow classifier */
     opaque_t opaque;
     /* Add / delete a report */
     int is_add;
     /* Ipfix "domain-ID", see RFC, set as desired */
     u32 domain_id;
     /* ipfix packet source port, often set to UDP_DST_PORT_ipfix */
     u16 src_port;
     /* Set by ipfix infra, needed to send data packets */
     u32 *stream_indexp;
   } vnet_flow_report_add_del_args_t;

   /* Private header file contents */

   /* Report ipfix element definition */
   #define foreach_simple_report_ipfix_element     \
   _(sourceIPv4Address, 4)                         \
   _(destinationIPv4Address, 4)                    \
   _(sourceTransportPort, 2)                       \
   _(destinationTransportPort, 2)                  \
   _(protocolIdentifier, 1)                        \
   _(flowStartMicroseconds, 8)                     \
   _(flowEndMicroseconds, 8)

   static ipfix_report_element_t simple_report_elements[] = {
   #define _(a,b) {a,b},
     foreach_simple_report_ipfix_element
   #undef _
   };

   typedef struct
   {
     /** Buffers and frames, per thread */
     vlib_buffer_t **buffers_by_thread;
     vlib_frame_t **frames_by_thread;
     u32 *next_record_offset_by_thread;

     /** Template ID's */
     u16 *template_ids;

     /** Time reference pair */
     u64 usec_time_0;
     f64 vlib_time_0;

     /** Stream index */
     u32 stream_index;

     /* Convenience */
     flow_report_main_t *flow_report_main;
     vlib_main_t *vlib_main;
     vnet_main_t *vnet_main;
   } my_logging_main_t;
   
   extern my_logging_main_t my_logging_main;

   ...

   /* Recitations */
   flow_report_main_t *frm = &flow_report_main;
   my_logging_main_t *mlm = &my_logging_main;
   vnet_flow_report_add_del_args_t a;
   int rv;
   u16 template_id;

   ... 

   /* Init function: set up time reference pair */
   mlm->vlib_time_0 = vlib_time_now (vm);
   mlm->milisecond_time_0 = unix_time_now_nsec () * 1e-6;

   ...

   /* Create a report */
   memset (&a, 0, sizeof (a));
   a.is_add = 1 /* to enable the report */;
   a.domain_id = 1 /* pick a domain ID */;
   a.src_port = UDP_DST_PORT_ipfix /* src port for reports */;

   /* Use the generic template packet rewrite string generator */
   a.rewrite_callback = vnet_flow_rewrite_generic_callback;

   /* Supply a list of ipfix report elements */
   a.report_elements = simple_report_elements;
   a.n_report_elements = ARRAY_LEN (simple_report_elements);

   /* Pointer to the ipfix stream index, set by the report infra */
   a.stream_indexp = &mlm->stream_index;
   a.flow_data_callback = my_flow_data_callback;

   /* Create the report */
   rv = vnet_flow_report_add_del (frm, &a, &template_id);
   if (rv) 
     oops...

   /* Save the template-ID for later use */
   mlm->template_id = template_id;

```

Several things are worth describing in more detail.

### vnet_flow_rewrite_generic_callback programming

This generic callback helps build ipfix template packets.  When
registering an ipfix report, pass an (array, count)
of ipfix elements as shown above. 

### my_flow_data_callback

The ipfix flow export infrastructure calls this callback to flush the
current ipfix packet; to make sure that ipfix data is not retained for
an unreasonably long period of time.

We typically code it as shown below, to call an application-specific
function with (uninteresting arguments), and "do_flush = 1":


```{.c}

      vlib_frame_t *my_flow_data_callback
                   (flow_report_main_t * frm,
	           flow_report_t * fr,
		   vlib_frame_t * f,
		   u32 * to_next, u32 node_index)
      { 

         my_buffer_flow_record (0, ... , 0, 1 /* do_flush */);
         return f;
      }
```

### my_flow_data_header

This function creates the packet header for an ipfix data packet

```{.c}

   static inline void
   my_flow_report_header (flow_report_main_t * frm,
			  vlib_buffer_t * b0, u32 * offset)
   {
      my_logging_main_t *mlm = &my_logging_main;
      flow_report_stream_t *stream;
      ip4_ipfix_template_packet_t *tp;
      ipfix_message_header_t *h = 0;


      ipfix_set_header_t *s = 0;
      ip4_header_t *ip;
      udp_header_t *udp;

      stream = &frm->streams[mlm->stream_index];

      b0->current_data = 0;
      b0->current_length = sizeof (*ip) + sizeof (*udp) + sizeof (*h) +
        sizeof (*s);
      b0->flags |= (VLIB_BUFFER_TOTAL_LENGTH_VALID | VNET_BUFFER_F_FLOW_REPORT);
      vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
      vnet_buffer (b0)->sw_if_index[VLIB_TX] = frm->fib_index;
      tp = vlib_buffer_get_current (b0);
      ip = (ip4_header_t *) & tp->ip4;
      udp = (udp_header_t *) (ip + 1);
      h = (ipfix_message_header_t *) (udp + 1);
      s = (ipfix_set_header_t *) (h + 1);

      ip->ip_version_and_header_length = 0x45;
      ip->ttl = 254;
      ip->protocol = IP_PROTOCOL_UDP;
      ip->flags_and_fragment_offset = 0;
      ip->src_address.as_u32 = frm->src_address.as_u32;
      ip->dst_address.as_u32 = frm->ipfix_collector.as_u32;
      udp->src_port = clib_host_to_net_u16 (stream->src_port);
      udp->dst_port = clib_host_to_net_u16 (frm->collector_port);
      udp->checksum = 0;

      h->export_time = clib_host_to_net_u32 ((u32)
            				 (((f64) frm->unix_time_0) +
               				  (vlib_time_now (frm->vlib_main) -
               				   frm->vlib_time_0)));
         h->sequence_number = clib_host_to_net_u32 (stream->sequence_number++);
         h->domain_id = clib_host_to_net_u32 (stream->domain_id);

         *offset = (u32) (((u8 *) (s + 1)) - (u8 *) tp);
   }
   ```

   ### fixup and transmit a flow record

   ```{.c}
      
      static inline void
      my_send_ipfix_pkt (flow_report_main_t * frm,
           		 vlib_frame_t * f, vlib_buffer_t * b0, u16 template_id)
      {
        ip4_ipfix_template_packet_t *tp;
        ipfix_message_header_t *h = 0;
        ipfix_set_header_t *s = 0;
        ip4_header_t *ip;
        udp_header_t *udp;
        vlib_main_t *vm = frm->vlib_main;

        tp = vlib_buffer_get_current (b0);
        ip = (ip4_header_t *) & tp->ip4;
        udp = (udp_header_t *) (ip + 1);
        h = (ipfix_message_header_t *) (udp + 1);
        s = (ipfix_set_header_t *) (h + 1);

        s->set_id_length = ipfix_set_id_length (template_id,
      					  b0->current_length -
      					  (sizeof (*ip) + sizeof (*udp) +
      					   sizeof (*h)));
        h->version_length = version_length (b0->current_length -
      				      (sizeof (*ip) + sizeof (*udp)));

        ip->length = clib_host_to_net_u16 (b0->current_length);
        ip->checksum = ip4_header_checksum (ip);
        udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));

        if (frm->udp_checksum)
          {
            udp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip);
            if (udp->checksum == 0)
      	udp->checksum = 0xffff;
          }

        ASSERT (ip->checksum == ip4_header_checksum (ip));

        vlib_put_frame_to_node (vm, ip4_lookup_node.index, f);
      }  
   ```

   ### my_buffer_flow_record

   This is the key routine which paints individual flow records into
   an ipfix packet under construction. It's pretty straightforward
   (albeit stateful) vpp data-plane code. The code shown below is
   thread-safe by construction.

   ```{.c}
   static inline void
   my_buffer_flow_record_internal (my_flow_record_t * rp, int do_flush,
                                       u32 thread_index)
   {
     vlib_main_t *vm = vlib_mains[thread_index];
     my_logging_main_t *mlm = &jvp_ipfix_main;
     flow_report_main_t *frm = &flow_report_main;
     vlib_frame_t *f;
     vlib_buffer_t *b0 = 0;
     u32 bi0 = ~0;
     u32 offset;

     b0 = mlm->buffers_by_thread[thread_index];

     if (PREDICT_FALSE (b0 == 0))
       {
         if (do_flush)
   	return;

         if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
   	{
   	  clib_warning ("can't allocate ipfix data buffer");
   	  return;
   	}

         b0 = vlib_get_buffer (vm, bi0);
         VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
         offset = 0;
         mlm->buffers_by_thread[thread_index] = b0;
       }
     else
       {
         bi0 = vlib_get_buffer_index (vm, b0);
         offset = mlm->next_record_offset_by_thread[thread_index];
       }

     f = mlm->frames_by_thread[thread_index];
     if (PREDICT_FALSE (f == 0))
       {
         u32 *to_next;
         f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
         mlm->frames_by_thread[thread_index] = f;
         to_next = vlib_frame_vector_args (f);
         to_next[0] = bi0;
         f->n_vectors = 1;
         mlm->frames_by_thread[thread_index] = f;
       }

     if (PREDICT_FALSE (offset == 0))
       my_flow_report_header (frm, b0, &offset);

     if (PREDICT_TRUE (do_flush == 0))
       {
         /* Paint the new ipfix data record into the buffer */
         clib_memcpy (b0->data + offset, rp, sizeof (*rp));
         offset += sizeof (*rp);
         b0->current_length += sizeof (*rp);
       }

     if (PREDICT_FALSE (do_flush || (offset + sizeof (*rp)) > frm->path_mtu))
       {
         /* Nothing to send? */
         if (offset == 0)
   	return;

         send_ipfix_pkt (frm, f, b0, mlm->template_ids[0]);
         mlm->buffers_by_thread[thread_index] = 0;
         mlm->frames_by_thread[thread_index] = 0;
         offset = 0;
       }
     mlm->next_record_offset_by_thread[thread_index] = offset;
   }  

   static void
   my_buffer_flow_record (my_flow_record_t * rp, int do_flush)
   {
     u32 thread_index = vlib_get_thread_index();
     my_buffer_flow_record_internal (rp, do_flush, thread_index);
   }  

```
&l)) { if (l > 128) return 0; *len = l; } else { return 0; } return 1; } ///////////////////////// #define vl_msg_id(n,h) n, typedef enum { #include <gtpu/gtpu.api.h> /* We'll want to know how many messages IDs we need... */ VL_MSG_FIRST_AVAILABLE, } vl_msg_id_t; #undef vl_msg_id /* define message structures */ #define vl_typedefs #include <gtpu/gtpu.api.h> #undef vl_typedefs /* declare message handlers for each api */ #define vl_endianfun /* define message structures */ #include <gtpu/gtpu.api.h> #undef vl_endianfun /* instantiate all the print functions we know about */ #define vl_print(handle, ...) #define vl_printfun #include <gtpu/gtpu.api.h> #undef vl_printfun /* Get the API version number. */ #define vl_api_version(n,v) static u32 api_version=(v); #include <gtpu/gtpu.api.h> #undef vl_api_version typedef struct { /* API message ID base */ u16 msg_id_base; vat_main_t *vat_main; } gtpu_test_main_t; gtpu_test_main_t gtpu_test_main; static void vl_api_gtpu_add_del_tunnel_reply_t_handler (vl_api_gtpu_add_del_tunnel_reply_t * mp) { vat_main_t *vam = &vat_main; i32 retval = ntohl (mp->retval); if (vam->async_mode) { vam->async_errors += (retval < 0); } else { vam->retval = retval; vam->sw_if_index = ntohl (mp->sw_if_index); vam->result_ready = 1; } } #define foreach_standard_reply_retval_handler \ _(sw_interface_set_gtpu_bypass_reply) #define _(n) \ static void vl_api_##n##_t_handler \ (vl_api_##n##_t * mp) \ { \ vat_main_t * vam = gtpu_test_main.vat_main; \ i32 retval = ntohl(mp->retval); \ if (vam->async_mode) { \ vam->async_errors += (retval < 0); \ } else { \ vam->retval = retval; \ vam->result_ready = 1; \ } \ } foreach_standard_reply_retval_handler; #undef _ /* * Table of message reply handlers, must include boilerplate handlers * we just generated */ #define foreach_vpe_api_reply_msg \ _(SW_INTERFACE_SET_GTPU_BYPASS_REPLY, sw_interface_set_gtpu_bypass_reply) \ _(GTPU_ADD_DEL_TUNNEL_REPLY, gtpu_add_del_tunnel_reply) \ _(GTPU_TUNNEL_DETAILS, gtpu_tunnel_details) static uword api_unformat_sw_if_index (unformat_input_t * input, va_list * args) { vat_main_t *vam = va_arg (*args, vat_main_t *); u32 *result = va_arg (*args, u32 *); u8 *if_name; uword *p; if (!unformat (input, "%s", &if_name)) return 0; p = hash_get_mem (vam->sw_if_index_by_interface_name, if_name); if (p == 0) return 0; *result = p[0]; return 1; } static int api_sw_interface_set_gtpu_bypass (vat_main_t * vam) { unformat_input_t *i = vam->input; vl_api_sw_interface_set_gtpu_bypass_t *mp; u32 sw_if_index = 0; u8 sw_if_index_set = 0; u8 is_enable = 1; u8 is_ipv6 = 0; int ret; /* Parse args required to build the message */ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT) { if (unformat (i, "%U", api_unformat_sw_if_index, vam, &sw_if_index)) sw_if_index_set = 1; else if (unformat (i, "sw_if_index %d", &sw_if_index)) sw_if_index_set = 1; else if (unformat (i, "enable")) is_enable = 1; else if (unformat (i, "disable")) is_enable = 0; else if (unformat (i, "ip4")) is_ipv6 = 0; else if (unformat (i, "ip6")) is_ipv6 = 1; else break; } if (sw_if_index_set == 0) { errmsg ("missing interface name or sw_if_index"); return -99; } /* Construct the API message */ M (SW_INTERFACE_SET_GTPU_BYPASS, mp); mp->sw_if_index = ntohl (sw_if_index); mp->enable = is_enable; mp->is_ipv6 = is_ipv6; /* send it... */ S (mp); /* Wait for a reply... */ W (ret); return ret; } static uword unformat_gtpu_decap_next (unformat_input_t * input, va_list * args) { u32 *result = va_arg (*args, u32 *); u32 tmp; if (unformat (input, "l2")) *result = GTPU_INPUT_NEXT_L2_INPUT; else if (unformat (input, "%d", &tmp)) *result = tmp; else return 0; return 1; } static int api_gtpu_add_del_tunnel (vat_main_t * vam) { unformat_input_t *line_input = vam->input; vl_api_gtpu_add_del_tunnel_t *mp; ip46_address_t src, dst; u8 is_add = 1; u8 ipv4_set = 0, ipv6_set = 0; u8 src_set = 0; u8 dst_set = 0; u8 grp_set = 0; u32 mcast_sw_if_index = ~0; u32 encap_vrf_id = 0; u32 decap_next_index = ~0; u32 teid = 0; int ret; /* Can't "universally zero init" (={0}) due to GCC bug 53119 */ memset (&src, 0, sizeof src); memset (&dst, 0, sizeof dst); while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "del")) is_add = 0; else if (unformat (line_input, "src %U", unformat_ip4_address, &src.ip4)) { ipv4_set = 1; src_set = 1; } else if (unformat (line_input, "dst %U", unformat_ip4_address, &dst.ip4)) { ipv4_set = 1; dst_set = 1; } else if (unformat (line_input, "src %U", unformat_ip6_address, &src.ip6)) { ipv6_set = 1; src_set = 1; } else if (unformat (line_input, "dst %U", unformat_ip6_address, &dst.ip6)) { ipv6_set = 1; dst_set = 1; } else if (unformat (line_input, "group %U %U", unformat_ip4_address, &dst.ip4, api_unformat_sw_if_index, vam, &mcast_sw_if_index)) { grp_set = dst_set = 1; ipv4_set = 1; } else if (unformat (line_input, "group %U", unformat_ip4_address, &dst.ip4)) { grp_set = dst_set = 1; ipv4_set = 1; } else if (unformat (line_input, "group %U %U", unformat_ip6_address, &dst.ip6, api_unformat_sw_if_index, vam, &mcast_sw_if_index)) { grp_set = dst_set = 1; ipv6_set = 1; } else if (unformat (line_input, "group %U", unformat_ip6_address, &dst.ip6)) { grp_set = dst_set = 1; ipv6_set = 1; } else if (unformat (line_input, "mcast_sw_if_index %u", &mcast_sw_if_index)) ; else if (unformat (line_input, "encap-vrf-id %d", &encap_vrf_id)) ; else if (unformat (line_input, "decap-next %U", unformat_gtpu_decap_next, &decap_next_index)) ; else if (unformat (line_input, "teid %d", &teid)) ; else { errmsg ("parse error '%U'", format_unformat_error, line_input); return -99; } } if (src_set == 0) { errmsg ("tunnel src address not specified"); return -99; } if (dst_set == 0) { errmsg ("tunnel dst address not specified"); return -99; } if (grp_set && !ip46_address_is_multicast (&dst)) { errmsg ("tunnel group address not multicast"); return -99; } if (grp_set && mcast_sw_if_index == ~0) { errmsg ("tunnel nonexistent multicast device"); return -99; } if (grp_set == 0 && ip46_address_is_multicast (&dst)) { errmsg ("tunnel dst address must be unicast"); return -99; } if (ipv4_set && ipv6_set) { errmsg ("both IPv4 and IPv6 addresses specified"); return -99; } M (GTPU_ADD_DEL_TUNNEL, mp); if (ipv6_set) { clib_memcpy (mp->src_address, &src.ip6, sizeof (src.ip6)); clib_memcpy (mp->dst_address, &dst.ip6, sizeof (dst.ip6)); } else { clib_memcpy (mp->src_address, &src.ip4, sizeof (src.ip4)); clib_memcpy (mp->dst_address, &dst.ip4, sizeof (dst.ip4)); } mp->encap_vrf_id = ntohl (encap_vrf_id); mp->decap_next_index = ntohl (decap_next_index); mp->mcast_sw_if_index = ntohl (mcast_sw_if_index); mp->teid = ntohl (teid); mp->is_add = is_add; mp->is_ipv6 = ipv6_set; S (mp); W (ret); return ret; } static void vl_api_gtpu_tunnel_details_t_handler (vl_api_gtpu_tunnel_details_t * mp) { vat_main_t *vam = &vat_main; ip46_address_t src = to_ip46 (mp->is_ipv6, mp->dst_address); ip46_address_t dst = to_ip46 (mp->is_ipv6, mp->src_address); print (vam->ofp, "%11d%24U%24U%14d%18d%13d%19d", ntohl (mp->sw_if_index), format_ip46_address, &src, IP46_TYPE_ANY, format_ip46_address, &dst, IP46_TYPE_ANY, ntohl (mp->encap_vrf_id), ntohl (mp->decap_next_index), ntohl (mp->teid), ntohl (mp->mcast_sw_if_index)); } static int api_gtpu_tunnel_dump (vat_main_t * vam) { unformat_input_t *i = vam->input; vl_api_gtpu_tunnel_dump_t *mp; u32 sw_if_index; u8 sw_if_index_set = 0; int ret; /* Parse args required to build the message */ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT) { if (unformat (i, "sw_if_index %d", &sw_if_index)) sw_if_index_set = 1; else break; } if (sw_if_index_set == 0) { sw_if_index = ~0; } if (!vam->json_output) { print (vam->ofp, "%11s%24s%24s%14s%18s%13s%19s", "sw_if_index", "src_address", "dst_address", "encap_vrf_id", "decap_next_index", "teid", "mcast_sw_if_index"); } /* Get list of gtpu-tunnel interfaces */ M (GTPU_TUNNEL_DUMP, mp); mp->sw_if_index = htonl (sw_if_index); S (mp); W (ret); return ret; } /* * List of messages that the api test plugin sends, * and that the data plane plugin processes */ #define foreach_vpe_api_msg \ _(sw_interface_set_gtpu_bypass, \ "<intfc> | sw_if_index <id> [ip4 | ip6] [enable | disable]") \ _(gtpu_add_del_tunnel, \ "src <ip-addr> { dst <ip-addr> | group <mcast-ip-addr>\n" \ "{ <intfc> | mcast_sw_if_index <nn> } }\n" \ "teid <teid> [encap-vrf-id <nn>] [decap-next <l2|nn>] [del]") \ _(gtpu_tunnel_dump, "[<intfc> | sw_if_index <nn>]") \ static void gtpu_vat_api_hookup (vat_main_t *vam) { gtpu_test_main_t * gtm = &gtpu_test_main; /* Hook up handlers for replies from the data plane plug-in */ #define _(N,n) \ vl_msg_api_set_handlers((VL_API_##N + gtm->msg_id_base), \ #n, \ vl_api_##n##_t_handler, \ vl_noop_handler, \ vl_api_##n##_t_endian, \ vl_api_##n##_t_print, \ sizeof(vl_api_##n##_t), 1); foreach_vpe_api_reply_msg; #undef _ /* API messages we can send */ #define _(n,h) hash_set_mem (vam->function_by_name, #n, api_##n); foreach_vpe_api_msg; #undef _ /* Help strings */ #define _(n,h) hash_set_mem (vam->help_by_name, #n, h); foreach_vpe_api_msg; #undef _ } clib_error_t * vat_plugin_register (vat_main_t *vam) { gtpu_test_main_t * gtm = &gtpu_test_main; u8 * name; gtm->vat_main = vam; /* Ask the vpp engine for the first assigned message-id */ name = format (0, "gtpu_%08x%c", api_version, 0); gtm->msg_id_base = vl_client_get_first_plugin_msg_id ((char *) name); if (gtm->msg_id_base != (u16) ~0) gtpu_vat_api_hookup (vam); vec_free(name); return 0; }