Age | Commit message (Collapse) | Author | Files | Lines |
|
Type: refactor
This patch does the following conversions
TLS_ENGINE_X -> CRYPTO_ENGINE_X
tls_engine_type_t -> crypto_engine_t
It does not change numbering of engines
Change-Id: I872dfaec3a6713bf4229c84d1ffd98b8b2419995
Signed-off-by: Nathan Skrzypczak <nathan.skrzypczak@gmail.com>
|
|
Type: refactor
Signed-off-by: Ole Troan <ot@cisco.com>
Change-Id: Ie1f0cccf0c9dc4c0d8ae1e1b5d8e6a75c325d1ce
|
|
Add .json output to format_vnet_sw_interface_cntrs(...)
Type: feature
Signed-off-by: Dave Barach <dave@barachs.net>
Change-Id: Ied036ebfaaafbf1dfc2a4e396c00f09f40659400
|
|
Enforce that variable length fields are the last element of API messages.
Add a 'fixed' version of string type, since dealing with
multiple variable length strings turned out too painful
for the C language bindings.
The string type is now:
{
string name[64]; // NUL terminated C-string. Essentially decays to u8 name[64]
string name[]; // Variable length string with embedded len field (vl_api_string_t)
};
The latter notation could be made available to other types as well.
e.g.
{
vl_api_address_t addresses[];
}
instead of
{
u32 n_addr;
vl_api_address_t addresses[n_addr];
};
Type: fix
Change-Id: I18fa17ef47227633752ab50453e8d20a652a9f9b
Signed-off-by: Ole Troan <ot@cisco.com>
|
|
Type: fix
Fixes: 21231
Signed-off-by: Dave Barach <dave@barachs.net>
Change-Id: Iefdd961ba1dcfd0a8d82e5dc1205b3cd4547943d
|
|
Type: refactor
Signed-off-by: Dave Barach <dave@barachs.net>
Change-Id: I015390b703ef502b8b41efa08bff45e65b5eed83
|
|
Type: refactor
Signed-off-by: Dave Barach <dave@barachs.net>
Change-Id: I45745e568cd943293d0015a61f67ec799b6804d8
|
|
If the corresponding vpp plugin is absent, return a non-zero
clib_error_t * from vat_plugin_register ("xxx plugin not loaded"). The
vat plugin calls dlclose on the vat plugin, and it disappears.
Depending on the plugin configuration, this can reduce the vpp virtual
size by several gigabytes.
Added a VAT_PLUGIN(<plugin-name>) macro to vat_helper_macros, clean up
boilerplate vat_plugin_register() implementations. Fixed a number of
non-standard vat_plugin_register methods.
Type: refactor
Change-Id: Iac908e5af7d5497c78d6aa9c3c51cdae08374045
Signed-off-by: Dave Barach <dave@barachs.net>
|
|
using inlines exposes the users to the internal data types used by VPP, namely vec.h. This file does not compile with a C++ compiler.
Type: feature
Change-Id: I1544fdd9eae998309f865df61df78571bdb96903
Signed-off-by: Neale Ranns <nranns@cisco.com>
|
|
Previous use of strndup() required user to remember to call free().
Now return a vector pointing directly to the API message string.
Of course user must remember to copy the string out if lifetime
is longer than API message lifetime.
Change-Id: Ib5e2b3d52d258e1a42ea9ea9a9e04abbe360e2bf
Type: fix
Signed-off-by: Ole Troan <ot@cisco.com>
|
|
Type: fix.
Change-Id: I48587c5eba6de6de820d348177b6733d0b048013
Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
|
|
This is not my core competency.
Reviews/feedback/suggestions welcomed. ;)
Tested with:
rv = self.vapi.http_static_enable(
fifo_size = 1,
cache_size_limit = 1000000,
prealloc_fifos = 0,
private_segment_size = 0,
uri="tcp://0.0.0.0/80",
www_root = "/var/tmp/run/vpp/html"
)
DBGvpp# show http static server
www_root /var/tmp/run/vpp/html, cache size 0 bytes, limit 1000000 bytes, evictions 0
Change-Id: I0f660753317ceedab89da1b65701a24d6f7145de
Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
|
|
More to the point: fix the Python API binding for the plugins.
The http_static and ct6 plugin setup_message_id_table calls didn't
have "_"'s between message names and CRCs. I would have expected this
to originate in the emacs skeleton, but the skeleton is correct.
Punctuation saves lives: "Let's eat Grandma..." != "Let's eat,
Grandma..." More or less.
Change-Id: Icd87813308701d203cf6f147b2bb90d39d51ddeb
Signed-off-by: Dave Barach <dave@barachs.net>
|
|
- Make plugin descriptions more consistent
so the output of "show plugin" can be
used in the wiki.
Change-Id: I4c6feb11e7dcc5a4cf0848eed37f1d3b035c7dda
Signed-off-by: Dave Wallace <dwallacelf@gmail.com>
|
|
Change-Id: Ie76c69641c8598164d0d00fd498018037258fd86
Signed-off-by: Florin Coras <fcoras@cisco.com>
|
|
Both firefox and chrome seem happy to browse a hugo-generated site
Change-Id: Id216ad9c781643df42ac4fbce598eb2afa600f4d
Signed-off-by: Dave Barach <dave@barachs.net>
|
|
Remove duplicate error check
Change-Id: I64463c7f606e198fe8553efea294f0e09bd8dbc3
Signed-off-by: Dave Barach <dave@barachs.net>
|
|
Good enough to serve the vpp sphinx and doxygen docs. Knows about
html, css, and javascript files.
Change-Id: Ib18c19f07f35f91ba935ea26ed7be406dacf2205
Signed-off-by: Dave Barach <dave@barachs.net>
|
7'>547
548
549
/*
*------------------------------------------------------------------
* lisp_gpe_api.c - lisp_gpe api
*
* Copyright (c) 2016 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*------------------------------------------------------------------
*/
#include <vnet/vnet.h>
#include <vlibmemory/api.h>
#include <vnet/interface.h>
#include <vnet/api_errno.h>
#include <vnet/lisp-gpe/lisp_gpe.h>
#include <vnet/lisp-gpe/lisp_gpe_adjacency.h>
#include <vnet/lisp-gpe/lisp_gpe_tunnel.h>
#include <vnet/lisp-gpe/lisp_gpe_fwd_entry.h>
#include <vnet/lisp-gpe/lisp_gpe_tenant.h>
#include <vnet/fib/fib_table.h>
#include <vnet/vnet_msg_enum.h>
#include <vnet/ip/ip_types_api.h>
#include <vnet/ethernet/ethernet_types_api.h>
#include <vnet/lisp-gpe/lisp_types_api.h>
#define vl_api_gpe_locator_pair_t_endian vl_noop_handler
#define vl_api_gpe_locator_pair_t_print vl_noop_handler
#define vl_api_gpe_add_del_fwd_entry_t_endian vl_noop_handler
#define vl_api_gpe_add_del_fwd_entry_t_print vl_noop_handler
#define vl_typedefs /* define message structures */
#include <vnet/vnet_all_api_h.h>
#undef vl_typedefs
#define vl_endianfun /* define message structures */
#include <vnet/vnet_all_api_h.h>
#undef vl_endianfun
/* instantiate all the print functions we know about */
#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
#define vl_printfun
#include <vnet/vnet_all_api_h.h>
#undef vl_printfun
#include <vlibapi/api_helper_macros.h>
#define foreach_vpe_api_msg \
_(GPE_ADD_DEL_FWD_ENTRY, gpe_add_del_fwd_entry) \
_(GPE_FWD_ENTRIES_GET, gpe_fwd_entries_get) \
_(GPE_FWD_ENTRY_PATH_DUMP, gpe_fwd_entry_path_dump) \
_(GPE_ENABLE_DISABLE, gpe_enable_disable) \
_(GPE_ADD_DEL_IFACE, gpe_add_del_iface) \
_(GPE_FWD_ENTRY_VNIS_GET, gpe_fwd_entry_vnis_get) \
_(GPE_SET_ENCAP_MODE, gpe_set_encap_mode) \
_(GPE_GET_ENCAP_MODE, gpe_get_encap_mode) \
_(GPE_ADD_DEL_NATIVE_FWD_RPATH, gpe_add_del_native_fwd_rpath) \
_(GPE_NATIVE_FWD_RPATHS_GET, gpe_native_fwd_rpaths_get)
static locator_pair_t *
unformat_gpe_loc_pairs (void *locs, u32 rloc_num)
{
u32 i;
locator_pair_t *pairs = 0, pair, *p;
vl_api_gpe_locator_t *r;
for (i = 0; i < rloc_num; i++)
{
/* local locator */
r = &((vl_api_gpe_locator_t *) locs)[i];
clib_memset (&pair, 0, sizeof (pair));
ip_address_decode2 (&r->addr, &pair.lcl_loc);
pair.weight = r->weight;
vec_add1 (pairs, pair);
}
for (i = rloc_num; i < rloc_num * 2; i++)
{
/* remote locators */
r = &((vl_api_gpe_locator_t *) locs)[i];
p = &pairs[i - rloc_num];
ip_address_decode2 (&r->addr, &p->rmt_loc);
}
return pairs;
}
static void
gpe_fwd_entry_path_dump_t_net_to_host
(vl_api_gpe_fwd_entry_path_dump_t * mp)
{
mp->fwd_entry_index = clib_net_to_host_u32 (mp->fwd_entry_index);
}
static void
lisp_api_set_locator (vl_api_gpe_locator_t * loc,
const ip_address_t * addr, u8 weight)
{
loc->weight = weight;
ip_address_encode2 (addr, &loc->addr);
}
static void
vl_api_gpe_fwd_entry_path_dump_t_handler
(vl_api_gpe_fwd_entry_path_dump_t * mp)
{
lisp_fwd_path_t *path;
vl_api_gpe_fwd_entry_path_details_t *rmp = NULL;
lisp_gpe_main_t *lgm = &lisp_gpe_main;
vl_api_registration_t *reg;
lisp_gpe_fwd_entry_t *lfe;
gpe_fwd_entry_path_dump_t_net_to_host (mp);
reg = vl_api_client_index_to_registration (mp->client_index);
if (!reg)
return;
if (pool_is_free_index (lgm->lisp_fwd_entry_pool, mp->fwd_entry_index))
return;
lfe = pool_elt_at_index (lgm->lisp_fwd_entry_pool, mp->fwd_entry_index);
if (LISP_GPE_FWD_ENTRY_TYPE_NEGATIVE == lfe->type)
return;
vec_foreach (path, lfe->paths)
{
rmp = vl_msg_api_alloc (sizeof (*rmp));
clib_memset (rmp, 0, sizeof (*rmp));
const lisp_gpe_tunnel_t *lgt;
rmp->_vl_msg_id =
clib_host_to_net_u16 (VL_API_GPE_FWD_ENTRY_PATH_DETAILS);
const lisp_gpe_adjacency_t *ladj =
lisp_gpe_adjacency_get (path->lisp_adj);
lisp_api_set_locator (&rmp->rmt_loc, &ladj->remote_rloc, path->weight);
lgt = lisp_gpe_tunnel_get (ladj->tunnel_index);
lisp_api_set_locator (&rmp->lcl_loc, &lgt->key->lcl, path->weight);
rmp->context = mp->context;
vl_api_send_msg (reg, (u8 *) rmp);
}
}
static void
gpe_fwd_entries_copy (vl_api_gpe_fwd_entry_t * dst,
lisp_api_gpe_fwd_entry_t * src)
{
lisp_api_gpe_fwd_entry_t *e;
u32 i = 0;
vec_foreach (e, src)
{
clib_memset (&dst[i], 0, sizeof (*dst));
dst[i].dp_table = e->dp_table;
dst[i].fwd_entry_index = e->fwd_entry_index;
dst[i].vni = e->vni;
dst[i].action = e->action;
switch (fid_addr_type (&e->leid))
{
case FID_ADDR_IP_PREF:
dst[i].leid.type = EID_TYPE_API_PREFIX;
dst[i].reid.type = EID_TYPE_API_PREFIX;
ip_prefix_encode2 (&fid_addr_ippref (&e->leid),
&dst[i].leid.address.prefix);
ip_prefix_encode2 (&fid_addr_ippref (&e->reid),
&dst[i].reid.address.prefix);
break;
case FID_ADDR_MAC:
mac_address_encode ((mac_address_t *) fid_addr_mac (&e->leid),
dst[i].leid.address.mac);
mac_address_encode ((mac_address_t *) fid_addr_mac (&e->reid),
dst[i].reid.address.mac);
dst[i].leid.type = EID_TYPE_API_MAC;
dst[i].reid.type = EID_TYPE_API_MAC;
break;
default:
clib_warning ("unknown fid type %d!", fid_addr_type (&e->leid));
break;
}
i++;
}
}
static void
gpe_fwd_entries_get_t_net_to_host (vl_api_gpe_fwd_entries_get_t * mp)
{
mp->vni = clib_net_to_host_u32 (mp->vni);
}
static void
gpe_entry_t_host_to_net (vl_api_gpe_fwd_entry_t * e)
{
e->fwd_entry_index = clib_host_to_net_u32 (e->fwd_entry_index);
e->dp_table = clib_host_to_net_u32 (e->dp_table);
e->vni = clib_host_to_net_u32 (e->vni);
}
static void
gpe_fwd_entries_get_reply_t_host_to_net
(vl_api_gpe_fwd_entries_get_reply_t * mp)
{
u32 i;
vl_api_gpe_fwd_entry_t *e;
for (i = 0; i < mp->count; i++)
{
e = &mp->entries[i];
gpe_entry_t_host_to_net (e);
}
mp->count = clib_host_to_net_u32 (mp->count);
}
static void
vl_api_gpe_fwd_entry_vnis_get_t_handler (vl_api_gpe_fwd_entry_vnis_get_t * mp)
{
vl_api_gpe_fwd_entry_vnis_get_reply_t *rmp = 0;
hash_pair_t *p;
u32 i = 0;
int rv = 0;
u32 *vnis = vnet_lisp_gpe_get_fwd_entry_vnis ();
u32 size = hash_elts (vnis) * sizeof (u32);
/* *INDENT-OFF* */
REPLY_MACRO4 (VL_API_GPE_FWD_ENTRY_VNIS_GET_REPLY, size,
{
rmp->count = clib_host_to_net_u32 (hash_elts (vnis));
hash_foreach_pair (p, vnis,
({
rmp->vnis[i++] = clib_host_to_net_u32 (p->key);
}));
});
/* *INDENT-ON* */
hash_free (vnis);
}
static void
vl_api_gpe_fwd_entries_get_t_handler (vl_api_gpe_fwd_entries_get_t * mp)
{
lisp_api_gpe_fwd_entry_t *e;
vl_api_gpe_fwd_entries_get_reply_t *rmp = 0;
u32 size = 0;
int rv = 0;
gpe_fwd_entries_get_t_net_to_host (mp);
e = vnet_lisp_gpe_fwd_entries_get_by_vni (mp->vni);
size = vec_len (e) * sizeof (vl_api_gpe_fwd_entry_t);
/* *INDENT-OFF* */
REPLY_MACRO4 (VL_API_GPE_FWD_ENTRIES_GET_REPLY, size,
{
rmp->count = vec_len (e);
gpe_fwd_entries_copy (rmp->entries, e);
gpe_fwd_entries_get_reply_t_host_to_net (rmp);
});
/* *INDENT-ON* */
vec_free (e);
}
static void
gpe_add_del_fwd_entry_t_net_to_host (vl_api_gpe_add_del_fwd_entry_t * mp)
{
mp->vni = clib_net_to_host_u32 (mp->vni);
mp->dp_table = clib_net_to_host_u32 (mp->dp_table);
mp->loc_num = clib_net_to_host_u32 (mp->loc_num);
}
static void
vl_api_gpe_add_del_fwd_entry_t_handler (vl_api_gpe_add_del_fwd_entry_t * mp)
{
vl_api_gpe_add_del_fwd_entry_reply_t *rmp;
vnet_lisp_gpe_add_del_fwd_entry_args_t _a, *a = &_a;
locator_pair_t *pairs = 0;
int rv = 0;
gpe_add_del_fwd_entry_t_net_to_host (mp);
clib_memset (a, 0, sizeof (a[0]));
rv = unformat_lisp_eid_api (&a->rmt_eid, mp->vni, &mp->rmt_eid);
rv |= unformat_lisp_eid_api (&a->lcl_eid, mp->vni, &mp->lcl_eid);
if (mp->loc_num % 2 != 0)
{
rv = -1;
goto send_reply;
}
pairs = unformat_gpe_loc_pairs (mp->locs, mp->loc_num / 2);
if (rv)
goto send_reply;
a->is_add = mp->is_add;
a->locator_pairs = pairs;
a->dp_table = mp->dp_table;
a->vni = mp->vni;
a->action = mp->action;
if (mp->loc_num == 0)
a->is_negative = 1;
rv = vnet_lisp_gpe_add_del_fwd_entry (a, 0);
vec_free (pairs);
send_reply:
/* *INDENT-OFF* */
REPLY_MACRO2 (VL_API_GPE_ADD_DEL_FWD_ENTRY_REPLY,
{
rmp->fwd_entry_index = clib_host_to_net_u32 (a->fwd_entry_index);
});
/* *INDENT-ON* */
}
static void
vl_api_gpe_enable_disable_t_handler (vl_api_gpe_enable_disable_t * mp)
{
vl_api_gpe_enable_disable_reply_t *rmp;
int rv = 0;
vnet_lisp_gpe_enable_disable_args_t _a, *a = &_a;
a->is_en = mp->is_enable;
vnet_lisp_gpe_enable_disable (a);
REPLY_MACRO (VL_API_GPE_ENABLE_DISABLE_REPLY);
}
static void
vl_api_gpe_add_del_iface_t_handler (vl_api_gpe_add_del_iface_t * mp)
{
vl_api_gpe_add_del_iface_reply_t *rmp;
int rv = 0;
u32 vni, dp_table;
vni = clib_net_to_host_u32 (mp->vni);
dp_table = clib_net_to_host_u32 (mp->dp_table);
if (mp->is_l2)
{
if (mp->is_add)
{
if (~0 == lisp_gpe_tenant_l2_iface_add_or_lock (vni, dp_table))
rv = 1;
}
else
lisp_gpe_tenant_l2_iface_unlock (vni);
}
else
{
if (mp->is_add)
{
if (~0 == lisp_gpe_tenant_l3_iface_add_or_lock (vni, dp_table, 1))
rv = 1;
}
else
lisp_gpe_tenant_l3_iface_unlock (vni);
}
REPLY_MACRO (VL_API_GPE_ADD_DEL_IFACE_REPLY);
}
static void
vl_api_gpe_set_encap_mode_t_handler (vl_api_gpe_set_encap_mode_t * mp)
{
vl_api_gpe_set_encap_mode_reply_t *rmp;
int rv = 0;
rv = vnet_gpe_set_encap_mode (mp->is_vxlan);
REPLY_MACRO (VL_API_GPE_SET_ENCAP_MODE_REPLY);
}
static void
vl_api_gpe_get_encap_mode_t_handler (vl_api_gpe_get_encap_mode_t * mp)
{
vl_api_gpe_get_encap_mode_reply_t *rmp;
int rv = 0;
/* *INDENT-OFF* */
REPLY_MACRO2 (VL_API_GPE_GET_ENCAP_MODE_REPLY,
({
rmp->encap_mode = vnet_gpe_get_encap_mode ();
}));
/* *INDENT-ON* */
}
static void
vl_api_gpe_add_del_native_fwd_rpath_t_handler
(vl_api_gpe_add_del_native_fwd_rpath_t * mp)
{
vl_api_gpe_add_del_native_fwd_rpath_reply_t *rmp;
vnet_gpe_native_fwd_rpath_args_t _a, *a = &_a;
int rv = 0;
clib_memset (a, 0, sizeof (a[0]));
if (mp->nh_addr.af)
clib_memcpy (&a->rpath.frp_addr.ip6, mp->nh_addr.un.ip6,
sizeof (ip6_address_t));
else
clib_memcpy (&a->rpath.frp_addr.ip4, mp->nh_addr.un.ip4,
sizeof (ip4_address_t));
a->is_add = mp->is_add;
a->rpath.frp_proto = mp->nh_addr.af ? DPO_PROTO_IP6 : DPO_PROTO_IP4;
a->rpath.frp_fib_index =
fib_table_find (dpo_proto_to_fib (a->rpath.frp_proto),
clib_net_to_host_u32 (mp->table_id));
if (~0 == a->rpath.frp_fib_index)
{
rv = VNET_API_ERROR_INVALID_VALUE;
goto done;
}
a->rpath.frp_sw_if_index = clib_net_to_host_u32 (mp->nh_sw_if_index);
a->rpath.frp_weight = 1;
rv = vnet_gpe_add_del_native_fwd_rpath (a);
done:
REPLY_MACRO (VL_API_GPE_ADD_DEL_NATIVE_FWD_RPATH_REPLY);
}
static void
gpe_native_fwd_rpaths_copy (vl_api_gpe_native_fwd_rpath_t * dst,
fib_route_path_t * src)
{
fib_route_path_t *e;
fib_table_t *table;
u32 i = 0;
vec_foreach (e, src)
{
clib_memset (&dst[i], 0, sizeof (*dst));
table = fib_table_get (e->frp_fib_index, dpo_proto_to_fib (e->frp_proto));
dst[i].fib_index = table->ft_table_id;
dst[i].nh_sw_if_index = e->frp_sw_if_index;
ip_address_encode (&e->frp_addr, IP46_TYPE_ANY, &dst[i].nh_addr);
i++;
}
}
static void
gpe_native_fwd_rpath_t_host_to_net (vl_api_gpe_native_fwd_rpath_t * e)
{
e->fib_index = clib_host_to_net_u32 (e->fib_index);
e->nh_sw_if_index = clib_host_to_net_u32 (e->nh_sw_if_index);
}
static void
gpe_native_fwd_rpaths_get_reply_t_host_to_net
(vl_api_gpe_native_fwd_rpaths_get_reply_t * mp)
{
u32 i;
vl_api_gpe_native_fwd_rpath_t *e;
for (i = 0; i < mp->count; i++)
{
e = &mp->entries[i];
gpe_native_fwd_rpath_t_host_to_net (e);
}
mp->count = clib_host_to_net_u32 (mp->count);
}
static void
vl_api_gpe_native_fwd_rpaths_get_t_handler (vl_api_gpe_native_fwd_rpaths_get_t
* mp)
{
lisp_gpe_main_t *lgm = vnet_lisp_gpe_get_main ();
vl_api_gpe_native_fwd_rpaths_get_reply_t *rmp;
u32 size = 0;
int rv = 0;
u8 rpath_index = mp->is_ip4 ? 1 : 0;
size = vec_len (lgm->native_fwd_rpath[rpath_index])
* sizeof (vl_api_gpe_native_fwd_rpath_t);
/* *INDENT-OFF* */
REPLY_MACRO4 (VL_API_GPE_NATIVE_FWD_RPATHS_GET_REPLY, size,
{
rmp->count = vec_len (lgm->native_fwd_rpath[rpath_index]);
gpe_native_fwd_rpaths_copy (rmp->entries,
lgm->native_fwd_rpath[rpath_index]);
gpe_native_fwd_rpaths_get_reply_t_host_to_net (rmp);
});
/* *INDENT-ON* */
}
/*
* gpe_api_hookup
* Add vpe's API message handlers to the table.
* vlib has already mapped shared memory and
* added the client registration handlers.
* See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
*/
#define vl_msg_name_crc_list
#include <vnet/vnet_all_api_h.h>
#undef vl_msg_name_crc_list
static void
setup_message_id_table (api_main_t * am)
{
#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
foreach_vl_msg_name_crc_lisp_gpe;
#undef _
}
static clib_error_t *
gpe_api_hookup (vlib_main_t * vm)
{
api_main_t *am = vlibapi_get_main ();
#define _(N,n) \
vl_msg_api_set_handlers(VL_API_##N, #n, \
vl_api_##n##_t_handler, \
vl_noop_handler, \
vl_api_##n##_t_endian, \
vl_api_##n##_t_print, \
sizeof(vl_api_##n##_t), 1);
foreach_vpe_api_msg;
#undef _
/*
* Set up the (msg_name, crc, message-id) table
*/
setup_message_id_table (am);
return 0;
}
VLIB_API_INIT_FUNCTION (gpe_api_hookup);
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/
|