summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/cmake/library.cmake30
-rw-r--r--src/tools/vppapigen/vppapigen_c.py6
-rw-r--r--src/vat/CMakeLists.txt1
-rw-r--r--src/vat/api_format.c559
-rw-r--r--src/vlibapi/api_helper_macros.h10
-rw-r--r--src/vlibmemory/CMakeLists.txt8
-rw-r--r--src/vlibmemory/memclnt_api.c719
-rw-r--r--src/vlibmemory/vlib.api250
-rw-r--r--src/vlibmemory/vlib_api.c828
-rw-r--r--src/vlibmemory/vlibapi_test.c470
-rw-r--r--src/vnet/CMakeLists.txt6
-rw-r--r--src/vnet/arp/arp_test.c2
-rw-r--r--src/vnet/ip/ip_test.c2
-rw-r--r--src/vnet/ip6-nd/ip6_nd_test.c6
-rw-r--r--src/vpp/CMakeLists.txt2
-rw-r--r--src/vpp/api/api.c367
-rw-r--r--src/vpp/api/vpe.api243
17 files changed, 1744 insertions, 1765 deletions
diff --git a/src/cmake/library.cmake b/src/cmake/library.cmake
index d19eb3c80cc..c80d7f226d6 100644
--- a/src/cmake/library.cmake
+++ b/src/cmake/library.cmake
@@ -108,6 +108,36 @@ function (add_vpp_headers path)
endforeach()
endfunction()
+macro(add_vat_test_library lib)
+ cmake_parse_arguments(TEST
+ ""
+ ""
+ ${ARGN}
+ )
+
+ foreach(file ${ARGN})
+ get_filename_component(name ${file} NAME_WE)
+ set(test_lib ${lib}_${name}_plugin)
+ add_library(${test_lib} SHARED ${file})
+ target_compile_options(${test_lib} PUBLIC ${VPP_DEFAULT_MARCH_FLAGS})
+ if(NOT VPP_EXTERNAL_PROJECT)
+ add_dependencies(${test_lib} api_headers)
+ endif()
+ include_directories(${CMAKE_CURRENT_BINARY_DIR})
+ set_target_properties(${test_lib} PROPERTIES NO_SONAME 1)
+ set_target_properties(${test_lib} PROPERTIES
+ PREFIX ""
+ LIBRARY_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/vpp_api_test_plugins)
+
+ # install .so
+ install(
+ TARGETS ${test_lib}
+ DESTINATION ${VPP_LIBRARY_DIR}/vpp_api_test_plugins
+ COMPONENT ${ARG_COMPONENT}
+ )
+ endforeach()
+endmacro()
+
macro(add_vpp_test_library lib)
cmake_parse_arguments(TEST
""
diff --git a/src/tools/vppapigen/vppapigen_c.py b/src/tools/vppapigen/vppapigen_c.py
index 4227cae1ece..2d526c151b7 100644
--- a/src/tools/vppapigen/vppapigen_c.py
+++ b/src/tools/vppapigen/vppapigen_c.py
@@ -1478,11 +1478,7 @@ def generate_c_test_boilerplate(services, defines, file_crc, module, plugin,
.format(n=e, ID=e.upper()))
write('}\n')
- if plugin:
- write('clib_error_t * vat_plugin_register (vat_main_t *vam)\n')
- else:
- write('clib_error_t * vat_{}_plugin_register (vat_main_t *vam)\n'
- .format(module))
+ write('clib_error_t * vat_plugin_register (vat_main_t *vam)\n')
write('{\n')
write(' {n}_test_main_t * mainp = &{n}_test_main;\n'.format(n=module))
write(' mainp->vat_main = vam;\n')
diff --git a/src/vat/CMakeLists.txt b/src/vat/CMakeLists.txt
index 66ea69b8da4..e4f89458e12 100644
--- a/src/vat/CMakeLists.txt
+++ b/src/vat/CMakeLists.txt
@@ -32,7 +32,6 @@ add_vpp_executable(vpp_api_test ENABLE_EXPORTS
ip_types_api.c
ip_types.c
protocols.def
- ../vnet/arp/arp_test.c
DEPENDS api_headers
diff --git a/src/vat/api_format.c b/src/vat/api_format.c
index bb168f8459e..8f23d773d99 100644
--- a/src/vat/api_format.c
+++ b/src/vat/api_format.c
@@ -579,185 +579,6 @@ ip_set (ip46_address_t * dst, void *src, u8 is_ip4)
}
-static void
-vl_api_cli_reply_t_handler (vl_api_cli_reply_t * mp)
-{
- vat_main_t *vam = &vat_main;
- i32 retval = ntohl (mp->retval);
-
- vam->retval = retval;
- vam->shmem_result = uword_to_pointer (mp->reply_in_shmem, u8 *);
- vam->result_ready = 1;
-}
-
-static void
-vl_api_cli_reply_t_handler_json (vl_api_cli_reply_t * mp)
-{
- vat_main_t *vam = &vat_main;
- vat_json_node_t node;
- void *oldheap;
- u8 *reply;
-
- vat_json_init_object (&node);
- vat_json_object_add_int (&node, "retval", ntohl (mp->retval));
- vat_json_object_add_uint (&node, "reply_in_shmem",
- ntohl (mp->reply_in_shmem));
- /* Toss the shared-memory original... */
- oldheap = vl_msg_push_heap ();
-
- reply = uword_to_pointer (mp->reply_in_shmem, u8 *);
- vec_free (reply);
-
- vl_msg_pop_heap (oldheap);
-
- vat_json_print (vam->ofp, &node);
- vat_json_free (&node);
-
- vam->retval = ntohl (mp->retval);
- vam->result_ready = 1;
-}
-
-static void
-vl_api_cli_inband_reply_t_handler (vl_api_cli_inband_reply_t * mp)
-{
- vat_main_t *vam = &vat_main;
- i32 retval = ntohl (mp->retval);
-
- vec_reset_length (vam->cmd_reply);
-
- vam->retval = retval;
- if (retval == 0)
- vam->cmd_reply = vl_api_from_api_to_new_vec (mp, &mp->reply);
- vam->result_ready = 1;
-}
-
-static void
-vl_api_cli_inband_reply_t_handler_json (vl_api_cli_inband_reply_t * mp)
-{
- vat_main_t *vam = &vat_main;
- vat_json_node_t node;
- u8 *reply = 0; /* reply vector */
-
- reply = vl_api_from_api_to_new_vec (mp, &mp->reply);
- vec_reset_length (vam->cmd_reply);
-
- vat_json_init_object (&node);
- vat_json_object_add_int (&node, "retval", ntohl (mp->retval));
- vat_json_object_add_string_copy (&node, "reply", reply);
-
- vat_json_print (vam->ofp, &node);
- vat_json_free (&node);
- vec_free (reply);
-
- vam->retval = ntohl (mp->retval);
- vam->result_ready = 1;
-}
-
-static void vl_api_get_node_index_reply_t_handler
- (vl_api_get_node_index_reply_t * mp)
-{
- vat_main_t *vam = &vat_main;
- i32 retval = ntohl (mp->retval);
- if (vam->async_mode)
- {
- vam->async_errors += (retval < 0);
- }
- else
- {
- vam->retval = retval;
- if (retval == 0)
- errmsg ("node index %d", ntohl (mp->node_index));
- vam->result_ready = 1;
- }
-}
-
-static void vl_api_get_node_index_reply_t_handler_json
- (vl_api_get_node_index_reply_t * mp)
-{
- vat_main_t *vam = &vat_main;
- vat_json_node_t node;
-
- vat_json_init_object (&node);
- vat_json_object_add_int (&node, "retval", ntohl (mp->retval));
- vat_json_object_add_uint (&node, "node_index", ntohl (mp->node_index));
-
- vat_json_print (vam->ofp, &node);
- vat_json_free (&node);
-
- vam->retval = ntohl (mp->retval);
- vam->result_ready = 1;
-}
-
-static void vl_api_get_next_index_reply_t_handler
- (vl_api_get_next_index_reply_t * mp)
-{
- vat_main_t *vam = &vat_main;
- i32 retval = ntohl (mp->retval);
- if (vam->async_mode)
- {
- vam->async_errors += (retval < 0);
- }
- else
- {
- vam->retval = retval;
- if (retval == 0)
- errmsg ("next node index %d", ntohl (mp->next_index));
- vam->result_ready = 1;
- }
-}
-
-static void vl_api_get_next_index_reply_t_handler_json
- (vl_api_get_next_index_reply_t * mp)
-{
- vat_main_t *vam = &vat_main;
- vat_json_node_t node;
-
- vat_json_init_object (&node);
- vat_json_object_add_int (&node, "retval", ntohl (mp->retval));
- vat_json_object_add_uint (&node, "next_index", ntohl (mp->next_index));
-
- vat_json_print (vam->ofp, &node);
- vat_json_free (&node);
-
- vam->retval = ntohl (mp->retval);
- vam->result_ready = 1;
-}
-
-static void vl_api_add_node_next_reply_t_handler
- (vl_api_add_node_next_reply_t * mp)
-{
- vat_main_t *vam = &vat_main;
- i32 retval = ntohl (mp->retval);
- if (vam->async_mode)
- {
- vam->async_errors += (retval < 0);
- }
- else
- {
- vam->retval = retval;
- if (retval == 0)
- errmsg ("next index %d", ntohl (mp->next_index));
- vam->result_ready = 1;
- }
-}
-
-static void vl_api_add_node_next_reply_t_handler_json
- (vl_api_add_node_next_reply_t * mp)
-{
- vat_main_t *vam = &vat_main;
- vat_json_node_t node;
-
- vat_json_init_object (&node);
- vat_json_object_add_int (&node, "retval", ntohl (mp->retval));
- vat_json_object_add_uint (&node, "next_index", ntohl (mp->next_index));
-
- vat_json_print (vam->ofp, &node);
- vat_json_free (&node);
-
- vam->retval = ntohl (mp->retval);
- vam->result_ready = 1;
-}
-
static void vl_api_show_version_reply_t_handler
(vl_api_show_version_reply_t * mp)
{
@@ -796,80 +617,6 @@ static void vl_api_show_version_reply_t_handler_json
vam->result_ready = 1;
}
-static void vl_api_show_threads_reply_t_handler
- (vl_api_show_threads_reply_t * mp)
-{
- vat_main_t *vam = &vat_main;
- i32 retval = ntohl (mp->retval);
- int i, count = 0;
-
- if (retval >= 0)
- count = ntohl (mp->count);
-
- for (i = 0; i < count; i++)
- print (vam->ofp,
- "\n%-2d %-11s %-11s %-5d %-6d %-4d %-6d",
- ntohl (mp->thread_data[i].id), mp->thread_data[i].name,
- mp->thread_data[i].type, ntohl (mp->thread_data[i].pid),
- ntohl (mp->thread_data[i].cpu_id), ntohl (mp->thread_data[i].core),
- ntohl (mp->thread_data[i].cpu_socket));
-
- vam->retval = retval;
- vam->result_ready = 1;
-}
-
-static void vl_api_show_threads_reply_t_handler_json
- (vl_api_show_threads_reply_t * mp)
-{
- vat_main_t *vam = &vat_main;
- vat_json_node_t node;
- vl_api_thread_data_t *td;
- i32 retval = ntohl (mp->retval);
- int i, count = 0;
-
- if (retval >= 0)
- count = ntohl (mp->count);
-
- vat_json_init_object (&node);
- vat_json_object_add_int (&node, "retval", retval);
- vat_json_object_add_uint (&node, "count", count);
-
- for (i = 0; i < count; i++)
- {
- td = &mp->thread_data[i];
- vat_json_object_add_uint (&node, "id", ntohl (td->id));
- vat_json_object_add_string_copy (&node, "name", td->name);
- vat_json_object_add_string_copy (&node, "type", td->type);
- vat_json_object_add_uint (&node, "pid", ntohl (td->pid));
- vat_json_object_add_int (&node, "cpu_id", ntohl (td->cpu_id));
- vat_json_object_add_int (&node, "core", ntohl (td->id));
- vat_json_object_add_int (&node, "cpu_socket", ntohl (td->cpu_socket));
- }
-
- vat_json_print (vam->ofp, &node);
- vat_json_free (&node);
-
- vam->retval = retval;
- vam->result_ready = 1;
-}
-
-static int
-api_show_threads (vat_main_t * vam)
-{
- vl_api_show_threads_t *mp;
- int ret;
-
- print (vam->ofp,
- "\n%-2s %-11s %-11s %-5s %-6s %-4s %-6s",
- "ID", "Name", "Type", "LWP", "cpu_id", "Core", "Socket");
-
- M (SHOW_THREADS, mp);
-
- S (mp);
- W (ret);
- return ret;
-}
-
#define vl_api_bridge_domain_details_t_endian vl_noop_handler
#define vl_api_bridge_domain_details_t_print vl_noop_handler
@@ -955,95 +702,6 @@ static void vl_api_get_first_msg_id_reply_t_handler_json
vam->result_ready = 1;
}
-static void vl_api_get_node_graph_reply_t_handler
- (vl_api_get_node_graph_reply_t * mp)
-{
- vat_main_t *vam = &vat_main;
- i32 retval = ntohl (mp->retval);
- u8 *pvt_copy, *reply;
- void *oldheap;
- vlib_node_t *node;
- int i;
-
- if (vam->async_mode)
- {
- vam->async_errors += (retval < 0);
- }
- else
- {
- vam->retval = retval;
- vam->result_ready = 1;
- }
-
- /* "Should never happen..." */
- if (retval != 0)
- return;
-
- reply = uword_to_pointer (mp->reply_in_shmem, u8 *);
- pvt_copy = vec_dup (reply);
-
- /* Toss the shared-memory original... */
- oldheap = vl_msg_push_heap ();
-
- vec_free (reply);
-
- vl_msg_pop_heap (oldheap);
-
- if (vam->graph_nodes)
- {
- hash_free (vam->graph_node_index_by_name);
-
- for (i = 0; i < vec_len (vam->graph_nodes[0]); i++)
- {
- node = vam->graph_nodes[0][i];
- vec_free (node->name);
- vec_free (node->next_nodes);
- vec_free (node);
- }
- vec_free (vam->graph_nodes[0]);
- vec_free (vam->graph_nodes);
- }
-
- vam->graph_node_index_by_name = hash_create_string (0, sizeof (uword));
- vam->graph_nodes = vlib_node_unserialize (pvt_copy);
- vec_free (pvt_copy);
-
- for (i = 0; i < vec_len (vam->graph_nodes[0]); i++)
- {
- node = vam->graph_nodes[0][i];
- hash_set_mem (vam->graph_node_index_by_name, node->name, i);
- }
-}
-
-static void vl_api_get_node_graph_reply_t_handler_json
- (vl_api_get_node_graph_reply_t * mp)
-{
- vat_main_t *vam = &vat_main;
- void *oldheap;
- vat_json_node_t node;
- u8 *reply;
-
- /* $$$$ make this real? */
- vat_json_init_object (&node);
- vat_json_object_add_int (&node, "retval", ntohl (mp->retval));
- vat_json_object_add_uint (&node, "reply_in_shmem", mp->reply_in_shmem);
-
- reply = uword_to_pointer (mp->reply_in_shmem, u8 *);
-
- /* Toss the shared-memory original... */
- oldheap = vl_msg_push_heap ();
-
- vec_free (reply);
-
- vl_msg_pop_heap (oldheap);
-
- vat_json_print (vam->ofp, &node);
- vat_json_free (&node);
-
- vam->retval = ntohl (mp->retval);
- vam->result_ready = 1;
-}
-
/* Format hex dump. */
u8 *
format_hex_bytes (u8 * s, va_list * va)
@@ -1120,15 +778,8 @@ foreach_standard_reply_retval_handler;
#define foreach_vpe_api_reply_msg \
_ (GET_FIRST_MSG_ID_REPLY, get_first_msg_id_reply) \
- _ (GET_NODE_GRAPH_REPLY, get_node_graph_reply) \
_ (CONTROL_PING_REPLY, control_ping_reply) \
- _ (CLI_REPLY, cli_reply) \
- _ (CLI_INBAND_REPLY, cli_inband_reply) \
- _ (GET_NODE_INDEX_REPLY, get_node_index_reply) \
- _ (GET_NEXT_INDEX_REPLY, get_next_index_reply) \
- _ (ADD_NODE_NEXT_REPLY, add_node_next_reply) \
_ (SHOW_VERSION_REPLY, show_version_reply) \
- _ (SHOW_THREADS_REPLY, show_threads_reply) \
#define foreach_standalone_reply_msg \
@@ -1142,53 +793,6 @@ typedef struct
case L2_VTR_ ## op: \
return "" # op;
-/*
- * Pass CLI buffers directly in the CLI_INBAND API message,
- * instead of an additional shared memory area.
- */
-static int
-exec_inband (vat_main_t * vam)
-{
- vl_api_cli_inband_t *mp;
- unformat_input_t *i = vam->input;
- int ret;
-
- if (vec_len (i->buffer) == 0)
- return -1;
-
- if (vam->exec_mode == 0 && unformat (i, "mode"))
- {
- vam->exec_mode = 1;
- return 0;
- }
- if (vam->exec_mode == 1 && (unformat (i, "exit") || unformat (i, "quit")))
- {
- vam->exec_mode = 0;
- return 0;
- }
-
- /*
- * In order for the CLI command to work, it
- * must be a vector ending in \n, not a C-string ending
- * in \n\0.
- */
- M2 (CLI_INBAND, mp, vec_len (vam->input->buffer));
- vl_api_vec_to_api_string (vam->input->buffer, &mp->cmd);
-
- S (mp);
- W (ret);
- /* json responses may or may not include a useful reply... */
- if (vec_len (vam->cmd_reply))
- print (vam->ofp, "%v", (char *) (vam->cmd_reply));
- return ret;
-}
-
-int
-exec (vat_main_t *vam)
-{
- return exec_inband (vam);
-}
-
int
api_sw_interface_dump (vat_main_t *vam)
{
@@ -2442,140 +2046,6 @@ api_unformat_classify_match (unformat_input_t * input, va_list * args)
return 0;
}
-static int
-api_get_node_index (vat_main_t *vam)
-{
- unformat_input_t *i = vam->input;
- vl_api_get_node_index_t *mp;
- u8 *name = 0;
- int ret;
-
- while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
- {
- if (unformat (i, "node %s", &name))
- ;
- else
- break;
- }
- if (name == 0)
- {
- errmsg ("node name required");
- return -99;
- }
- if (vec_len (name) >= ARRAY_LEN (mp->node_name))
- {
- errmsg ("node name too long, max %d", ARRAY_LEN (mp->node_name));
- return -99;
- }
-
- M (GET_NODE_INDEX, mp);
- clib_memcpy (mp->node_name, name, vec_len (name));
- vec_free (name);
-
- S (mp);
- W (ret);
- return ret;
-}
-
-static int
-api_get_next_index (vat_main_t *vam)
-{
- unformat_input_t *i = vam->input;
- vl_api_get_next_index_t *mp;
- u8 *node_name = 0, *next_node_name = 0;
- int ret;
-
- while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
- {
- if (unformat (i, "node-name %s", &node_name))
- ;
- else if (unformat (i, "next-node-name %s", &next_node_name))
- break;
- }
-
- if (node_name == 0)
- {
- errmsg ("node name required");
- return -99;
- }
- if (vec_len (node_name) >= ARRAY_LEN (mp->node_name))
- {
- errmsg ("node name too long, max %d", ARRAY_LEN (mp->node_name));
- return -99;
- }
-
- if (next_node_name == 0)
- {
- errmsg ("next node name required");
- return -99;
- }
- if (vec_len (next_node_name) >= ARRAY_LEN (mp->next_name))
- {
- errmsg ("next node name too long, max %d", ARRAY_LEN (mp->next_name));
- return -99;
- }
-
- M (GET_NEXT_INDEX, mp);
- clib_memcpy (mp->node_name, node_name, vec_len (node_name));
- clib_memcpy (mp->next_name, next_node_name, vec_len (next_node_name));
- vec_free (node_name);
- vec_free (next_node_name);
-
- S (mp);
- W (ret);
- return ret;
-}
-
-static int
-api_add_node_next (vat_main_t *vam)
-{
- unformat_input_t *i = vam->input;
- vl_api_add_node_next_t *mp;
- u8 *name = 0;
- u8 *next = 0;
- int ret;
-
- while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
- {
- if (unformat (i, "node %s", &name))
- ;
- else if (unformat (i, "next %s", &next))
- ;
- else
- break;
- }
- if (name == 0)
- {
- errmsg ("node name required");
- return -99;
- }
- if (vec_len (name) >= ARRAY_LEN (mp->node_name))
- {
- errmsg ("node name too long, max %d", ARRAY_LEN (mp->node_name));
- return -99;
- }
- if (next == 0)
- {
- errmsg ("next node required");
- return -99;
- }
- if (vec_len (next) >= ARRAY_LEN (mp->next_name))
- {
- errmsg ("next name too long, max %d", ARRAY_LEN (mp->next_name));
- return -99;
- }
-
- M (ADD_NODE_NEXT, mp);
- clib_memcpy (mp->node_name, name, vec_len (name));
- clib_memcpy (mp->next_name, next, vec_len (next));
- vec_free (name);
- vec_free (next);
-
- S (mp);
- W (ret);
- return ret;
-}
-
#define foreach_vtr_op \
_ ("disable", L2_VTR_DISABLED) \
_ ("push-1", L2_VTR_PUSH_1) \
@@ -2637,21 +2107,6 @@ api_get_first_msg_id (vat_main_t *vam)
return ret;
}
-static int
-api_get_node_graph (vat_main_t *vam)
-{
- vl_api_get_node_graph_t *mp;
- int ret;
-
- M (GET_NODE_GRAPH, mp);
-
- /* send it... */
- S (mp);
- /* Wait for the reply */
- W (ret);
- return ret;
-}
-
#define foreach_pbb_vtr_op \
_("disable", L2_VTR_DISABLED) \
_("pop", L2_VTR_POP_2) \
@@ -3218,15 +2673,17 @@ echo (vat_main_t * vam)
return 0;
}
+int exec (vat_main_t *vam) __attribute__ ((weak));
+int
+exec (vat_main_t *vam)
+{
+ return -1;
+}
+
/* List of API message constructors, CLI names map to api_xxx */
#define foreach_vpe_api_msg \
-_(get_node_index, "node <node-name") \
-_(add_node_next, "node <node-name> next <next-node-name>") \
_(show_version, "") \
-_(show_threads, "") \
_(get_first_msg_id, "client <name>") \
-_(get_node_graph, " ") \
-_(get_next_index, "node-name <node-name> next-node-name <node-name>") \
_(sock_init_shm, "size <nnn>") \
/* List of command functions, CLI names map directly to functions */
#define foreach_cli_function \
@@ -3239,8 +2696,6 @@ _(elog_enable, "usage: elog_enable") \
_(elog_save, "usage: elog_save <filename>") \
_(get_msg_id, "usage: get_msg_id name_and_crc") \
_(echo, "usage: echo <message>") \
-_(exec, "usage: exec <vpe-debug-CLI-command>") \
-_(exec_inband, "usage: exec_inband <vpe-debug-CLI-command>") \
_(help, "usage: help") \
_(q, "usage: quit") \
_(quit, "usage: quit") \
diff --git a/src/vlibapi/api_helper_macros.h b/src/vlibapi/api_helper_macros.h
index d49282e9e65..2e0a62229f3 100644
--- a/src/vlibapi/api_helper_macros.h
+++ b/src/vlibapi/api_helper_macros.h
@@ -271,12 +271,8 @@ do { \
/* "trust, but verify" */
-
-static inline uword
-vnet_sw_if_index_is_api_valid (u32 sw_if_index)
-{
- return vnet_sw_interface_is_api_valid (vnet_get_main (), sw_if_index);
-}
+#define vnet_sw_if_index_is_api_valid(sw_if_index) \
+ vnet_sw_interface_is_api_valid (vnet_get_main (), sw_if_index)
#define VALIDATE_SW_IF_INDEX(mp) \
do { u32 __sw_if_index = ntohl((mp)->sw_if_index); \
@@ -423,7 +419,7 @@ typedef struct
/* convenience */
vlib_main_t *vlib_main;
- vnet_main_t *vnet_main;
+ struct vnet_main_t *vnet_main;
} vpe_api_main_t;
extern vpe_api_main_t vpe_api_main;
diff --git a/src/vlibmemory/CMakeLists.txt b/src/vlibmemory/CMakeLists.txt
index b48ff7b5766..456cba9baeb 100644
--- a/src/vlibmemory/CMakeLists.txt
+++ b/src/vlibmemory/CMakeLists.txt
@@ -18,8 +18,9 @@ add_vpp_library (vlibmemory
memory_client.c
socket_client.c
socket_api.c
- vlib_api.c
+ memclnt_api.c
vlib_api_cli.c
+ vlib_api.c
../vlibapi/api_shared.c
../vlibapi/node_serialize.c
@@ -35,6 +36,7 @@ add_vpp_library (vlibmemory
API_FILES
memclnt.api
+ vlib.api
LINK_LIBRARIES vppinfra svm vlib
)
@@ -51,3 +53,7 @@ add_vpp_library (vlibmemoryclient
LINK_LIBRARIES vppinfra svm
)
add_dependencies(vlibmemoryclient vlibmemory_api_headers)
+
+add_vat_test_library(vlib
+ vlibapi_test.c
+)
diff --git a/src/vlibmemory/memclnt_api.c b/src/vlibmemory/memclnt_api.c
new file mode 100644
index 00000000000..5ebc31f71dc
--- /dev/null
+++ b/src/vlibmemory/memclnt_api.c
@@ -0,0 +1,719 @@
+/*
+ *------------------------------------------------------------------
+ * memclnt_api.c VLIB API implementation
+ *
+ * Copyright (c) 2009 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <fcntl.h>
+#include <pthread.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/format.h>
+#include <vppinfra/byte_order.h>
+#include <vppinfra/elog.h>
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+/**
+ * @file
+ * @brief Binary API messaging via shared memory
+ * Low-level, primary provisioning interface
+ */
+/*? %%clicmd:group_label Binary API CLI %% ?*/
+/*? %%syscfg:group_label Binary API configuration %% ?*/
+
+#define TRACE_VLIB_MEMORY_QUEUE 0
+
+#include <vlibmemory/vl_memory_msg_enum.h> /* enumerate all vlib messages */
+
+#define vl_typedefs /* define message structures */
+#include <vlibmemory/vl_memory_api_h.h>
+#undef vl_typedefs
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vlibmemory/vl_memory_api_h.h>
+#undef vl_printfun
+
+static inline void *
+vl_api_trace_plugin_msg_ids_t_print (vl_api_trace_plugin_msg_ids_t *a,
+ void *handle)
+{
+ vl_print (handle, "vl_api_trace_plugin_msg_ids: %s first %u last %u\n",
+ a->plugin_name, clib_host_to_net_u16 (a->first_msg_id),
+ clib_host_to_net_u16 (a->last_msg_id));
+ return handle;
+}
+
+/* instantiate all the endian swap functions we know about */
+#define vl_endianfun
+#include <vlibmemory/vl_memory_api_h.h>
+#undef vl_endianfun
+
+static void
+vl_api_get_first_msg_id_t_handler (vl_api_get_first_msg_id_t *mp)
+{
+ vl_api_get_first_msg_id_reply_t *rmp;
+ vl_api_registration_t *regp;
+ uword *p;
+ api_main_t *am = vlibapi_get_main ();
+ vl_api_msg_range_t *rp;
+ u8 name[64];
+ u16 first_msg_id = ~0;
+ int rv = -7; /* VNET_API_ERROR_INVALID_VALUE */
+
+ regp = vl_api_client_index_to_registration (mp->client_index);
+ if (!regp)
+ return;
+
+ if (am->msg_range_by_name == 0)
+ goto out;
+ strncpy ((char *) name, (char *) mp->name, ARRAY_LEN (name));
+ name[ARRAY_LEN (name) - 1] = '\0';
+ p = hash_get_mem (am->msg_range_by_name, name);
+ if (p == 0)
+ goto out;
+
+ rp = vec_elt_at_index (am->msg_ranges, p[0]);
+ first_msg_id = rp->first_msg_id;
+ rv = 0;
+
+out:
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_GET_FIRST_MSG_ID_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = ntohl (rv);
+ rmp->first_msg_id = ntohs (first_msg_id);
+ vl_api_send_msg (regp, (u8 *) rmp);
+}
+
+void
+vl_api_api_versions_t_handler (vl_api_api_versions_t *mp)
+{
+ api_main_t *am = vlibapi_get_main ();
+ vl_api_api_versions_reply_t *rmp;
+ vl_api_registration_t *reg;
+ u32 nmsg = vec_len (am->api_version_list);
+ int msg_size = sizeof (*rmp) + sizeof (rmp->api_versions[0]) * nmsg;
+ int i;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ rmp = vl_msg_api_alloc (msg_size);
+ clib_memset (rmp, 0, msg_size);
+ rmp->_vl_msg_id = ntohs (VL_API_API_VERSIONS_REPLY);
+
+ /* fill in the message */
+ rmp->context = mp->context;
+ rmp->count = htonl (nmsg);
+
+ for (i = 0; i < nmsg; ++i)
+ {
+ api_version_t *vl = &am->api_version_list[i];
+ rmp->api_versions[i].major = htonl (vl->major);
+ rmp->api_versions[i].minor = htonl (vl->minor);
+ rmp->api_versions[i].patch = htonl (vl->patch);
+ strncpy ((char *) rmp->api_versions[i].name, vl->name,
+ ARRAY_LEN (rmp->api_versions[i].name));
+ rmp->api_versions[i].name[ARRAY_LEN (rmp->api_versions[i].name) - 1] =
+ '\0';
+ }
+
+ vl_api_send_msg (reg, (u8 *) rmp);
+}
+
+#define foreach_vlib_api_msg \
+ _ (GET_FIRST_MSG_ID, get_first_msg_id) \
+ _ (API_VERSIONS, api_versions)
+
+/*
+ * vl_api_init
+ */
+static int
+vlib_api_init (void)
+{
+ vl_msg_api_msg_config_t cfg;
+ vl_msg_api_msg_config_t *c = &cfg;
+
+ clib_memset (c, 0, sizeof (*c));
+
+#define _(N, n) \
+ do \
+ { \
+ c->id = VL_API_##N; \
+ c->name = #n; \
+ c->handler = vl_api_##n##_t_handler; \
+ c->cleanup = vl_noop_handler; \
+ c->endian = vl_api_##n##_t_endian; \
+ c->print = vl_api_##n##_t_print; \
+ c->size = sizeof (vl_api_##n##_t); \
+ c->traced = 1; /* trace, so these msgs print */ \
+ c->replay = 0; /* don't replay client create/delete msgs */ \
+ c->message_bounce = 0; /* don't bounce this message */ \
+ vl_msg_api_config (c); \
+ } \
+ while (0);
+
+ foreach_vlib_api_msg;
+#undef _
+
+ return 0;
+}
+
+u64 vector_rate_histogram[SLEEP_N_BUCKETS];
+
+/*
+ * Callback to send ourselves a plugin numbering-space trace msg
+ */
+static void
+send_one_plugin_msg_ids_msg (u8 *name, u16 first_msg_id, u16 last_msg_id)
+{
+ vl_api_trace_plugin_msg_ids_t *mp;
+ api_main_t *am = vlibapi_get_main ();
+ vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
+ svm_queue_t *q;
+
+ mp = vl_msg_api_alloc_as_if_client (sizeof (*mp));
+ clib_memset (mp, 0, sizeof (*mp));
+
+ mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_TRACE_PLUGIN_MSG_IDS);
+ strncpy ((char *) mp->plugin_name, (char *) name,
+ sizeof (mp->plugin_name) - 1);
+ mp->first_msg_id = clib_host_to_net_u16 (first_msg_id);
+ mp->last_msg_id = clib_host_to_net_u16 (last_msg_id);
+
+ q = shmem_hdr->vl_input_queue;
+
+ vl_msg_api_send_shmem (q, (u8 *) &mp);
+}
+
+void
+vl_api_save_msg_table (void)
+{
+ u8 *serialized_message_table;
+ api_main_t *am = vlibapi_get_main ();
+ u8 *chroot_file;
+ int fd, rv;
+
+ /*
+ * Snapshoot the api message table.
+ */
+ if (strstr ((char *) am->save_msg_table_filename, "..") ||
+ index ((char *) am->save_msg_table_filename, '/'))
+ {
+ clib_warning ("illegal save-message-table filename '%s'",
+ am->save_msg_table_filename);
+ return;
+ }
+
+ chroot_file = format (0, "/tmp/%s%c", am->save_msg_table_filename, 0);
+
+ fd = creat ((char *) chroot_file, 0644);
+
+ if (fd < 0)
+ {
+ clib_unix_warning ("creat");
+ return;
+ }
+
+ serialized_message_table = vl_api_serialize_message_table (am, 0);
+
+ rv =
+ write (fd, serialized_message_table, vec_len (serialized_message_table));
+
+ if (rv != vec_len (serialized_message_table))
+ clib_unix_warning ("write");
+
+ rv = close (fd);
+ if (rv < 0)
+ clib_unix_warning ("close");
+
+ vec_free (chroot_file);
+ vec_free (serialized_message_table);
+}
+
+clib_error_t *vat_builtin_main_init (vlib_main_t *vm) __attribute__ ((weak));
+clib_error_t *
+vat_builtin_main_init (vlib_main_t *vm)
+{
+ return 0;
+}
+
+static uword
+vl_api_clnt_process (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *f)
+{
+ vlib_global_main_t *vgm = vlib_get_global_main ();
+ int private_segment_rotor = 0, i, rv;
+ vl_socket_args_for_process_t *a;
+ vl_shmem_hdr_t *shm;
+ svm_queue_t *q;
+ clib_error_t *e;
+ api_main_t *am = vlibapi_get_main ();
+ f64 dead_client_scan_time;
+ f64 sleep_time, start_time;
+ f64 vector_rate;
+ clib_error_t *error;
+ uword event_type;
+ uword *event_data = 0;
+ f64 now;
+
+ if ((error = vl_sock_api_init (vm)))
+ {
+ clib_error_report (error);
+ clib_warning ("socksvr_api_init failed, quitting...");
+ return 0;
+ }
+
+ if ((rv = vlib_api_init ()) < 0)
+ {
+ clib_warning ("vlib_api_init returned %d, quitting...", rv);
+ return 0;
+ }
+
+ shm = am->shmem_hdr;
+ q = shm->vl_input_queue;
+
+ e = vlib_call_init_exit_functions (vm, &vgm->api_init_function_registrations,
+ 1 /* call_once */, 1 /* is_global */);
+ if (e)
+ clib_error_report (e);
+
+ e = vat_builtin_main_init (vm);
+ if (e)
+ clib_error_report (e);
+
+ sleep_time = 10.0;
+ dead_client_scan_time = vlib_time_now (vm) + 10.0;
+
+ /*
+ * Send plugin message range messages for each plugin we loaded
+ */
+ for (i = 0; i < vec_len (am->msg_ranges); i++)
+ {
+ vl_api_msg_range_t *rp = am->msg_ranges + i;
+ send_one_plugin_msg_ids_msg (rp->name, rp->first_msg_id,
+ rp->last_msg_id);
+ }
+
+ /*
+ * Save the api message table snapshot, if configured
+ */
+ if (am->save_msg_table_filename)
+ vl_api_save_msg_table ();
+
+ /* $$$ pay attention to frame size, control CPU usage */
+ while (1)
+ {
+ /*
+ * There's a reason for checking the queue before
+ * sleeping. If the vlib application crashes, it's entirely
+ * possible for a client to enqueue a connect request
+ * during the process restart interval.
+ *
+ * Unless some force of physics causes the new incarnation
+ * of the application to process the request, the client will
+ * sit and wait for Godot...
+ */
+ vector_rate = (f64) vlib_last_vectors_per_main_loop (vm);
+ start_time = vlib_time_now (vm);
+ while (1)
+ {
+ if (vl_mem_api_handle_rpc (vm, node) ||
+ vl_mem_api_handle_msg_main (vm, node))
+ {
+ vm->api_queue_nonempty = 0;
+ VL_MEM_API_LOG_Q_LEN ("q-underflow: len %d", 0);
+ sleep_time = 20.0;
+ break;
+ }
+
+ /* Allow no more than 10us without a pause */
+ if (vlib_time_now (vm) > start_time + 10e-6)
+ {
+ int index = SLEEP_400_US;
+ if (vector_rate > 40.0)
+ sleep_time = 400e-6;
+ else if (vector_rate > 20.0)
+ {
+ index = SLEEP_200_US;
+ sleep_time = 200e-6;
+ }
+ else if (vector_rate >= 1.0)
+ {
+ index = SLEEP_100_US;
+ sleep_time = 100e-6;
+ }
+ else
+ {
+ index = SLEEP_10_US;
+ sleep_time = 10e-6;
+ }
+ vector_rate_histogram[index] += 1;
+ break;
+ }
+ }
+
+ /*
+ * see if we have any private api shared-memory segments
+ * If so, push required context variables, and process
+ * a message.
+ */
+ if (PREDICT_FALSE (vec_len (am->vlib_private_rps)))
+ {
+ if (private_segment_rotor >= vec_len (am->vlib_private_rps))
+ private_segment_rotor = 0;
+ vl_mem_api_handle_msg_private (vm, node, private_segment_rotor++);
+ }
+
+ vlib_process_wait_for_event_or_clock (vm, sleep_time);
+ vec_reset_length (event_data);
+ event_type = vlib_process_get_events (vm, &event_data);
+ now = vlib_time_now (vm);
+
+ switch (event_type)
+ {
+ case QUEUE_SIGNAL_EVENT:
+ vm->queue_signal_pending = 0;
+ VL_MEM_API_LOG_Q_LEN ("q-awake: len %d", q->cursize);
+
+ break;
+ case SOCKET_READ_EVENT:
+ for (i = 0; i < vec_len (event_data); i++)
+ {
+ vl_api_registration_t *regp;
+
+ a = pool_elt_at_index (socket_main.process_args, event_data[i]);
+ regp = vl_socket_get_registration (a->reg_index);
+ if (regp)
+ {
+ vl_socket_process_api_msg (regp, (i8 *) a->data);
+ a = pool_elt_at_index (socket_main.process_args,
+ event_data[i]);
+ }
+ vec_free (a->data);
+ pool_put (socket_main.process_args, a);
+ }
+ break;
+
+ /* Timeout... */
+ case -1:
+ break;
+
+ default:
+ clib_warning ("unknown event type %d", event_type);
+ break;
+ }
+
+ if (now > dead_client_scan_time)
+ {
+ vl_mem_api_dead_client_scan (am, shm, now);
+ dead_client_scan_time = vlib_time_now (vm) + 10.0;
+ }
+ }
+
+ return 0;
+}
+
+VLIB_REGISTER_NODE (vl_api_clnt_node) = {
+ .function = vl_api_clnt_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "api-rx-from-ring",
+ .state = VLIB_NODE_STATE_DISABLED,
+ .process_log2_n_stack_bytes = 18,
+};
+
+void
+vl_mem_api_enable_disable (vlib_main_t *vm, int enable)
+{
+ vlib_node_set_state (
+ vm, vl_api_clnt_node.index,
+ (enable ? VLIB_NODE_STATE_POLLING : VLIB_NODE_STATE_DISABLED));
+}
+
+static uword
+api_rx_from_node (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
+{
+ uword n_packets = frame->n_vectors;
+ uword n_left_from;
+ u32 *from;
+ static u8 *long_msg;
+
+ vec_validate (long_msg, 4095);
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+
+ while (n_left_from > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ void *msg;
+ uword msg_len;
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ from += 1;
+ n_left_from -= 1;
+
+ msg = b0->data + b0->current_data;
+ msg_len = b0->current_length;
+ if (b0->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ ASSERT (long_msg != 0);
+ _vec_len (long_msg) = 0;
+ vec_add (long_msg, msg, msg_len);
+ while (b0->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ b0 = vlib_get_buffer (vm, b0->next_buffer);
+ msg = b0->data + b0->current_data;
+ msg_len = b0->current_length;
+ vec_add (long_msg, msg, msg_len);
+ }
+ msg = long_msg;
+ }
+ vl_msg_api_handler_no_trace_no_free (msg);
+ }
+
+ /* Free what we've been given. */
+ vlib_buffer_free (vm, vlib_frame_vector_args (frame), n_packets);
+
+ return n_packets;
+}
+
+VLIB_REGISTER_NODE (api_rx_from_node_node, static) = {
+ .function = api_rx_from_node,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .vector_size = 4,
+ .name = "api-rx-from-node",
+};
+
+static void
+vl_api_rpc_call_t_handler (vl_api_rpc_call_t *mp)
+{
+ vl_api_rpc_call_reply_t *rmp;
+ int (*fp) (void *);
+ i32 rv = 0;
+ vlib_main_t *vm = vlib_get_main ();
+
+ if (mp->function == 0)
+ {
+ rv = -1;
+ clib_warning ("rpc NULL function pointer");
+ }
+
+ else
+ {
+ if (mp->need_barrier_sync)
+ vlib_worker_thread_barrier_sync (vm);
+
+ fp = uword_to_pointer (mp->function, int (*) (void *));
+ rv = fp (mp->data);
+
+ if (mp->need_barrier_sync)
+ vlib_worker_thread_barrier_release (vm);
+ }
+
+ if (mp->send_reply)
+ {
+ svm_queue_t *q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q)
+ {
+ rmp = vl_msg_api_alloc_as_if_client (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_RPC_CALL_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = rv;
+ vl_msg_api_send_shmem (q, (u8 *) &rmp);
# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Interface util library."""

from time import sleep

from enum import IntEnum
from ipaddress import ip_address
from robot.api import logger

from resources.libraries.python.Constants import Constants
from resources.libraries.python.CpuUtils import CpuUtils
from resources.libraries.python.DUTSetup import DUTSetup
from resources.libraries.python.L2Util import L2Util
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.parsers.JsonParser import JsonParser
from resources.libraries.python.ssh import SSH, exec_cmd_no_error
from resources.libraries.python.topology import NodeType, Topology
from resources.libraries.python.VPPUtil import VPPUtil


class InterfaceStatusFlags(IntEnum):
    """Interface status flags."""
    IF_STATUS_API_FLAG_ADMIN_UP = 1
    IF_STATUS_API_FLAG_LINK_UP = 2


class MtuProto(IntEnum):
    """MTU protocol."""
    MTU_PROTO_API_L3 = 0
    MTU_PROTO_API_IP4 = 1
    MTU_PROTO_API_IP6 = 2
    MTU_PROTO_API_MPLS = 3
    MTU_PROTO_API_N = 4


class LinkDuplex(IntEnum):
    """Link duplex"""
    LINK_DUPLEX_API_UNKNOWN = 0
    LINK_DUPLEX_API_HALF = 1
    LINK_DUPLEX_API_FULL = 2


class SubInterfaceFlags(IntEnum):
    """Sub-interface flags."""
    SUB_IF_API_FLAG_NO_TAGS = 1
    SUB_IF_API_FLAG_ONE_TAG = 2
    SUB_IF_API_FLAG_TWO_TAGS = 4
    SUB_IF_API_FLAG_DOT1AD = 8
    SUB_IF_API_FLAG_EXACT_MATCH = 16
    SUB_IF_API_FLAG_DEFAULT = 32
    SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY = 64
    SUB_IF_API_FLAG_INNER_VLAN_ID_ANY = 128
    SUB_IF_API_FLAG_DOT1AH = 256


class RxMode(IntEnum):
    """RX mode"""
    RX_MODE_API_UNKNOWN = 0
    RX_MODE_API_POLLING = 1
    RX_MODE_API_INTERRUPT = 2
    RX_MODE_API_ADAPTIVE = 3
    RX_MODE_API_DEFAULT = 4


class IfType(IntEnum):
    """Interface type"""
    # A hw interface
    IF_API_TYPE_HARDWARE = 0
    # A sub-interface
    IF_API_TYPE_SUB = 1
    IF_API_TYPE_P2P = 2
    IF_API_TYPE_PIPE = 3


class LinkBondLoadBalanceAlgo(IntEnum):
    """Link bonding load balance algorithm."""
    BOND_API_LB_ALGO_L2 = 0
    BOND_API_LB_ALGO_L34 = 1
    BOND_API_LB_ALGO_L23 = 2
    BOND_API_LB_ALGO_RR = 3
    BOND_API_LB_ALGO_BC = 4
    BOND_API_LB_ALGO_AB = 5


class LinkBondMode(IntEnum):
    """Link bonding mode."""
    BOND_API_MODE_ROUND_ROBIN = 1
    BOND_API_MODE_ACTIVE_BACKUP = 2
    BOND_API_MODE_XOR = 3
    BOND_API_MODE_BROADCAST = 4
    BOND_API_MODE_LACP = 5


class InterfaceUtil(object):
    """General utilities for managing interfaces"""

    __UDEV_IF_RULES_FILE = '/etc/udev/rules.d/10-network.rules'

    @staticmethod
    def pci_to_int(pci_str):
        """Convert PCI address from string format (0000:18:0a.0) to
        integer representation (169345024).

        :param pci_str: PCI address in string representation.
        :type pci_str: str
        :returns: Integer representation of PCI address.
        :rtype: int
        """
        pci = list(pci_str.split(':')[0:2])
        pci.extend(pci_str.split(':')[2].split('.'))

        return (int(pci[0], 16) | int(pci[1], 16) << 16 |
                int(pci[2], 16) << 24 | int(pci[3], 16) << 29)

    @staticmethod
    def pci_to_eth(node, pci_str):
        """Convert PCI address to Linux ethernet name.

        :param pci_str: PCI address.
        :type pci_str: str
        :returns: Ethernet name.
        :rtype: str
        """
        cmd = ('basename /sys/bus/pci/devices/{pci_str}/net/*'.
               format(pci_str=pci_str))
        try:
            stdout, _ = exec_cmd_no_error(node, cmd)
        except RuntimeError:
            raise RuntimeError("Cannot convert {pci_str} to ethernet name!".
                               format(pci_str=pci_str))

        return stdout.strip()

    @staticmethod
    def get_interface_index(node, interface):
        """Get interface sw_if_index from topology file.

        :param node: Node where the interface is.
        :param interface: Numeric index or name string of a specific interface.
        :type node: dict
        :type interface: str or int
        :returns: SW interface index.
        :rtype: int
        """
        try:
            sw_if_index = int(interface)
        except ValueError:
            sw_if_index = Topology.get_interface_sw_index(node, interface)
            if sw_if_index is None:
                sw_if_index = \
                    Topology.get_interface_sw_index_by_name(node, interface)
        except TypeError as err:
            raise TypeError('Wrong interface format {ifc}: {err}'.format(
                ifc=interface, err=err.message))

        return sw_if_index

    @staticmethod
    def set_interface_state(node, interface, state, if_type='key'):
        """Set interface state on a node.

        Function can be used for DUTs as well as for TGs.

        :param node: Node where the interface is.
        :param interface: Interface key or sw_if_index or name.
        :param state: One of 'up' or 'down'.
        :param if_type: Interface type
        :type node: dict
        :type interface: str or int
        :type state: str
        :type if_type: str
        :returns: Nothing.
        :raises ValueError: If the interface type is unknown.
        :raises ValueError: If the state of interface is unexpected.
        :raises ValueError: If the node has an unknown node type.
        """
        if if_type == 'key':
            if isinstance(interface, basestring):
                sw_if_index = Topology.get_interface_sw_index(node, interface)
                iface_name = Topology.get_interface_name(node, interface)
            else:
                sw_if_index = interface
        elif if_type == 'name':
            iface_key = Topology.get_interface_by_name(node, interface)
            if iface_key is not None:
                sw_if_index = Topology.get_interface_sw_index(node, iface_key)
            iface_name = interface
        else:
            raise ValueError('Unknown if_type: {type}'.format(type=if_type))

        if node['type'] == NodeType.DUT:
            if state == 'up':
                flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
            elif state == 'down':
                flags = 0
            else:
                raise ValueError('Unexpected interface state: {state}'.format(
                    state=state))
            cmd = 'sw_interface_set_flags'
            err_msg = 'Failed to set interface state on host {host}'.format(
                host=node['host'])
            args = dict(
                sw_if_index=int(sw_if_index),
                flags=flags)
            with PapiSocketExecutor(node) as papi_exec:
                papi_exec.add(cmd, **args).get_reply(err_msg)
        elif node['type'] == NodeType.TG or node['type'] == NodeType.VM:
            cmd = 'ip link set {ifc} {state}'.format(
                ifc=iface_name, state=state)
            exec_cmd_no_error(node, cmd, sudo=True)
        else:
            raise ValueError('Node {} has unknown NodeType: "{}"'
                             .format(node['host'], node['type']))

    @staticmethod
    def set_interface_ethernet_mtu(node, iface_key, mtu):
        """Set Ethernet MTU for specified interface.

        Function can be used only for TGs.

        :param node: Node where the interface is.
        :param iface_key: Interface key from topology file.
        :param mtu: MTU to set.
        :type node: dict
        :type iface_key: str
        :type mtu: int
        :returns: Nothing.
        :raises ValueError: If the node type is "DUT".
        :raises ValueError: If the node has an unknown node type.
        """
        if node['type'] == NodeType.DUT:
            raise ValueError('Node {}: Setting Ethernet MTU for interface '
                             'on DUT nodes not supported', node['host'])
        elif node['type'] == NodeType.TG:
            iface_name = Topology.get_interface_name(node, iface_key)
            cmd = 'ip link set {} mtu {}'.format(iface_name, mtu)
            exec_cmd_no_error(node, cmd, sudo=True)
        else:
            raise ValueError('Node {} has unknown NodeType: "{}"'
                             .format(node['host'], node['type']))

    @staticmethod
    def set_default_ethernet_mtu_on_all_interfaces_on_node(node):
        """Set default Ethernet MTU on all interfaces on node.

        Function can be used only for TGs.

        :param node: Node where to set default MTU.
        :type node: dict
        :returns: Nothing.
        """
        for ifc in node['interfaces']:
            InterfaceUtil.set_interface_ethernet_mtu(node, ifc, 1500)

    @staticmethod
    def vpp_set_interface_mtu(node, interface, mtu=9200):
        """Set Ethernet MTU on interface.

        :param node: VPP node.
        :param interface: Interface to setup MTU. Default: 9200.
        :param mtu: Ethernet MTU size in Bytes.
        :type node: dict
        :type interface: str or int
        :type mtu: int
        """
        if isinstance(interface, basestring):
            sw_if_index = Topology.get_interface_sw_index(node, interface)
        else:
            sw_if_index = interface

        cmd = 'hw_interface_set_mtu'
        err_msg = 'Failed to set interface MTU on host {host}'.format(
            host=node['host'])
        args = dict(sw_if_index=sw_if_index,
                    mtu=int(mtu))
        try:
            with PapiSocketExecutor(node) as papi_exec:
                papi_exec.add(cmd, **args).get_reply(err_msg)
        except AssertionError as err:
            # TODO: Make failure tolerance optional.
            logger.debug("Setting MTU failed. Expected?\n{err}".format(
                err=err))

    @staticmethod
    def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
        """Set Ethernet MTU on all interfaces.

        :param node: VPP node.
        :param mtu: Ethernet MTU size in Bytes. Default: 9200.
        :type node: dict
        :type mtu: int
        """
        for interface in node['interfaces']:
            InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)

    @staticmethod
    def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
        """Set Ethernet MTU on all interfaces on all DUTs.

        :param nodes: VPP nodes.
        :param mtu: Ethernet MTU size in Bytes. Default: 9200.
        :type nodes: dict
        :type mtu: int
        """
        for node in nodes.values():
            if node['type'] == NodeType.DUT:
                InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)

    @staticmethod
    def vpp_node_interfaces_ready_wait(node, retries=15):
        """Wait until all interfaces with admin-up are in link-up state.

        :param node: Node to wait on.
        :param retries: Number of retries to check interface status (optional,
            default 15).
        :type node: dict
        :type retries: int
        :returns: Nothing.
        :raises RuntimeError: If any interface is not in link-up state after
            defined number of retries.
        """
        for _ in xrange(0, retries):
            not_ready = list()
            out = InterfaceUtil.vpp_get_interface_data(node)
            for interface in out:
                if interface.get('flags') == 1:
                    not_ready.append(interface.get('interface_name'))
            if not not_ready:
                break
            else:
                logger.debug('Interfaces still not in link-up state:\n{ifs} '
                             '\nWaiting...'.format(ifs=not_ready))
                sleep(1)
        else:
            err = 'Timeout, interfaces not up:\n{ifs}'.format(ifs=not_ready) \
                if 'not_ready' in locals() else 'No check executed!'
            raise RuntimeError(err)

    @staticmethod
    def all_vpp_interfaces_ready_wait(nodes, retries=15):
        """Wait until all interfaces with admin-up are in link-up state for all
        nodes in the topology.

        :param nodes: Nodes in the topology.
        :param retries: Number of retries to check interface status (optional,
            default 15).
        :type nodes: dict
        :type retries: int
        :returns: Nothing.
        """
        for node in nodes.values():
            if node['type'] == NodeType.DUT:
                InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)

    @staticmethod
    def vpp_get_interface_data(node, interface=None):
        """Get all interface data from a VPP node. If a name or
        sw_interface_index is provided, return only data for the matching
        interface(s).

        :param node: VPP node to get interface data from.
        :param interface: Numeric index or name string of a specific interface.
        :type node: dict
        :type interface: int or str
        :returns: List of dictionaries containing data for each interface, or a
            single dictionary for the specified interface.
        :rtype: list or dict
        :raises TypeError: if the data type of interface is neither basestring
            nor int.
        """
        def process_if_dump(if_dump):
            """Process interface dump.

            :param if_dump: Interface dump.
            :type if_dump: dict
            :returns: Processed interface dump.
            :rtype: dict
            """
            if_dump['l2_address'] = str(if_dump['l2_address'])
            if_dump['b_dmac'] = str(if_dump['b_dmac'])
            if_dump['b_smac'] = str(if_dump['b_smac'])
            if_dump['flags'] = if_dump['flags'].value
            if_dump['type'] = if_dump['type'].value
            if_dump['link_duplex'] = if_dump['link_duplex'].value
            if_dump['sub_if_flags'] = if_dump['sub_if_flags'].value \
                if hasattr(if_dump['sub_if_flags'], 'value') \
                else int(if_dump['sub_if_flags'])

            return if_dump

        if interface is not None:
            if isinstance(interface, basestring):
                param = 'interface_name'
            elif isinstance(interface, int):
                param = 'sw_if_index'
            else:
                raise TypeError('Wrong interface format {ifc}'.format(
                    ifc=interface))
        else:
            param = ''

        cmd = 'sw_interface_dump'
        args = dict(
            name_filter_valid=False,
            name_filter=''
        )
        err_msg = 'Failed to get interface dump on host {host}'.format(
            host=node['host'])
        with PapiSocketExecutor(node) as papi_exec:
            details = papi_exec.add(cmd, **args).get_details(err_msg)
        logger.debug('Received data:\n{d!r}'.format(d=details))

        data = list() if interface is None else dict()
        for dump in details:
            if interface is None:
                data.append(process_if_dump(dump))
            elif str(dump.get(param)).rstrip('\x00') == str(interface):
                data = process_if_dump(dump)
                break

        logger.debug('Interface data:\n{if_data}'.format(if_data=data))
        return data

    @staticmethod
    def vpp_get_interface_name(node, sw_if_index):
        """Get interface name for the given SW interface index from actual
        interface dump.

        :param node: VPP node to get interface data from.
        :param sw_if_index: SW interface index of the specific interface.
        :type node: dict
        :type sw_if_index: int
        :returns: Name of the given interface.
        :rtype: str
        """
        if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
        if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
            if_data = InterfaceUtil.vpp_get_interface_data(
                node, if_data['sup_sw_if_index'])

        return if_data.get('interface_name')

    @staticmethod
    def vpp_get_interface_sw_index(node, interface_name):
        """Get interface name for the given SW interface index from actual
        interface dump.

        :param node: VPP node to get interface data from.
        :param interface_name: Interface name.
        :type node: dict
        :type interface_name: str
        :returns: Name of the given interface.
        :rtype: str
        """
        if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)

        return if_data.get('sw_if_index')

    @staticmethod
    def vpp_get_interface_mac(node, interface):
        """Get MAC address for the given interface from actual interface dump.

        :param node: VPP node to get interface data from.
        :param interface: Numeric index or name string of a specific interface.
        :type node: dict
        :type interface: int or str
        :returns: MAC address.
        :rtype: str
        """
        if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
        if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
            if_data = InterfaceUtil.vpp_get_interface_data(
                node, if_data['sup_sw_if_index'])

        return if_data.get('l2_address')

    @staticmethod
    def tg_set_interface_driver(node, pci_addr, driver):
        """Set interface driver on the TG node.

        :param node: Node to set interface driver on (must be TG node).
        :param pci_addr: PCI address of the interface.
        :param driver: Driver name.
        :type node: dict
        :type pci_addr: str
        :type driver: str
        :raises RuntimeError: If unbinding from the current driver fails.
        :raises RuntimeError: If binding to the new driver fails.
        """
        old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
        if old_driver == driver:
            return

        ssh = SSH()
        ssh.connect(node)

        # Unbind from current driver
        if old_driver is not None:
            cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/unbind"'\
                .format(pci_addr, old_driver)
            (ret_code, _, _) = ssh.exec_command_sudo(cmd)
            if int(ret_code) != 0:
                raise RuntimeError("'{0}' failed on '{1}'"
                                   .format(cmd, node['host']))

        # Bind to the new driver
        cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/bind"'\
            .format(pci_addr, driver)
        (ret_code, _, _) = ssh.exec_command_sudo(cmd)
        if int(ret_code) != 0:
            raise RuntimeError("'{0}' failed on '{1}'"
                               .format(cmd, node['host']))

    @staticmethod
    def tg_get_interface_driver(node, pci_addr):
        """Get interface driver from the TG node.

        :param node: Node to get interface driver on (must be TG node).
        :param pci_addr: PCI address of the interface.
        :type node: dict
        :type pci_addr: str
        :returns: Interface driver or None if not found.
        :rtype: str
        :raises RuntimeError: If PCI rescan or lspci command execution failed.
        """
        return DUTSetup.get_pci_dev_driver(node, pci_addr)

    @staticmethod
    def tg_set_interfaces_udev_rules(node):
        """Set udev rules for interfaces.

        Create udev rules file in /etc/udev/rules.d where are rules for each
        interface used by TG node, based on MAC interface has specific name.
        So after unbind and bind again to kernel driver interface has same
        name as before. This must be called after TG has set name for each
        port in topology dictionary.
        udev rule example
        SUBSYSTEM=="net", ACTION=="add", ATTR{address}=="52:54:00:e1:8a:0f",
        NAME="eth1"

        :param node: Node to set udev rules on (must be TG node).
        :type node: dict
        :raises RuntimeError: If setting of udev rules fails.
        """
        ssh = SSH()
        ssh.connect(node)

        cmd = 'rm -f {0}'.format(InterfaceUtil.__UDEV_IF_RULES_FILE)
        (ret_code, _, _) = ssh.exec_command_sudo(cmd)
        if int(ret_code) != 0:
            raise RuntimeError("'{0}' failed on '{1}'"
                               .format(cmd, node['host']))

        for interface in node['interfaces'].values():
            rule = 'SUBSYSTEM==\\"net\\", ACTION==\\"add\\", ATTR{address}' + \
                   '==\\"' + interface['mac_address'] + '\\", NAME=\\"' + \
                   interface['name'] + '\\"'
            cmd = 'sh -c "echo \'{0}\' >> {1}"'.format(
                rule, InterfaceUtil.__UDEV_IF_RULES_FILE)
            (ret_code, _, _) = ssh.exec_command_sudo(cmd)
            if int(ret_code) != 0:
                raise RuntimeError("'{0}' failed on '{1}'"
                                   .format(cmd, node['host']))

        cmd = '/etc/init.d/udev restart'
        ssh.exec_command_sudo(cmd)

    @staticmethod
    def tg_set_interfaces_default_driver(node):
        """Set interfaces default driver specified in topology yaml file.

        :param node: Node to setup interfaces driver on (must be TG node).
        :type node: dict
        """
        for interface in node['interfaces'].values():
            InterfaceUtil.tg_set_interface_driver(node,
                                                  interface['pci_address'],
                                                  interface['driver'])

    @staticmethod
    def update_vpp_interface_data_on_node(node):
        """Update vpp generated interface data for a given  }
-VLIB_API_INIT_FUNCTION (rpc_api_hookup);
+VLIB_API_INIT_FUNCTION (vlib_apis_hookup);
/*
* fd.io coding-style-patch-verification: ON
diff --git a/src/vlibmemory/vlibapi_test.c b/src/vlibmemory/vlibapi_test.c
new file mode 100644
index 00000000000..820096ab80d
--- /dev/null
+++ b/src/vlibmemory/vlibapi_test.c
@@ -0,0 +1,470 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2021 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vat/vat.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vppinfra/error.h>
+
+#include <vpp/api/types.h>
+#include <vnet/mpls/packet.h>
+#include <vnet/ip/ip_types_api.h>
+
+typedef struct
+{
+ u16 msg_id_base;
+ vat_main_t *vat_main;
+} vlib_test_main_t;
+vlib_test_main_t vlib_test_main;
+
+#define __plugin_msg_base vlib_test_main.msg_id_base
+#include <vlibapi/vat_helper_macros.h>
+
+/* Declare message IDs */
+#include <vlibmemory/vlib.api_enum.h>
+#include <vlibmemory/vlib.api_types.h>
+
+static void
+vl_api_cli_reply_t_handler (vl_api_cli_reply_t *mp)
+{
+ vat_main_t *vam = &vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ vam->retval = retval;
+ vam->shmem_result = uword_to_pointer (mp->reply_in_shmem, u8 *);
+ vam->result_ready = 1;
+}
+
+static void
+vl_api_cli_inband_reply_t_handler (vl_api_cli_inband_reply_t *mp)
+{
+ vat_main_t *vam = &vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ vec_reset_length (vam->cmd_reply);
+
+ vam->retval = retval;
+ if (retval == 0)
+ vam->cmd_reply = vl_api_from_api_to_new_vec (mp, &mp->reply);
+ vam->result_ready = 1;
+}
+
+static void
+vl_api_get_node_index_reply_t_handler (vl_api_get_node_index_reply_t *mp)
+{
+ vat_main_t *vam = &vat_main;
+ i32 retval = ntohl (mp->retval);
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ }
+ else
+ {
+ vam->retval = retval;
+ if (retval == 0)
+ errmsg ("node index %d", ntohl (mp->node_index));
+ vam->result_ready = 1;
+ }
+}
+
+static void
+vl_api_get_next_index_reply_t_handler (vl_api_get_next_index_reply_t *mp)
+{
+ vat_main_t *vam = &vat_main;
+ i32 retval = ntohl (mp->retval);
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ }
+ else
+ {
+ vam->retval = retval;
+ if (retval == 0)
+ errmsg ("next node index %d", ntohl (mp->next_index));
+ vam->result_ready = 1;
+ }
+}
+
+static void
+vl_api_add_node_next_reply_t_handler (vl_api_add_node_next_reply_t *mp)
+{
+ vat_main_t *vam = &vat_main;
+ i32 retval = ntohl (mp->retval);
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ }
+ else
+ {
+ vam->retval = retval;
+ if (retval == 0)
+ errmsg ("next index %d", ntohl (mp->next_index));
+ vam->result_ready = 1;
+ }
+}
+
+static void
+vl_api_get_f64_endian_value_reply_t_handler (
+ vl_api_get_f64_endian_value_reply_t *mp)
+{
+ // not yet implemented
+}
+
+static void
+vl_api_get_f64_increment_by_one_reply_t_handler (
+ vl_api_get_f64_increment_by_one_reply_t *mp)
+{
+ // not yet implemented
+}
+
+static int
+api_get_f64_endian_value (vat_main_t *vam)
+{
+ // not yet implemented
+ return -1;
+}
+
+static int
+api_get_f64_increment_by_one (vat_main_t *vam)
+{
+ // not yet implemented
+ return -1;
+}
+
+/*
+ * Pass CLI buffers directly in the CLI_INBAND API message,
+ * instead of an additional shared memory area.
+ */
+static int
+exec_inband (vat_main_t *vam)
+{
+ vl_api_cli_inband_t *mp;
+ unformat_input_t *i = vam->input;
+ int ret;
+
+ if (vec_len (i->buffer) == 0)
+ return -1;
+
+ if (vam->exec_mode == 0 && unformat (i, "mode"))
+ {
+ vam->exec_mode = 1;
+ return 0;
+ }
+ if (vam->exec_mode == 1 && (unformat (i, "exit") || unformat (i, "quit")))
+ {
+ vam->exec_mode = 0;
+ return 0;
+ }
+
+ /*
+ * In order for the CLI command to work, it
+ * must be a vector ending in \n, not a C-string ending
+ * in \n\0.
+ */
+ M2 (CLI_INBAND, mp, vec_len (vam->input->buffer));
+ vl_api_vec_to_api_string (vam->input->buffer, &mp->cmd);
+
+ S (mp);
+ W (ret);
+ /* json responses may or may not include a useful reply... */
+ if (vec_len (vam->cmd_reply))
+ print (vam->ofp, "%v", (char *) (vam->cmd_reply));
+ return ret;
+}
+static int
+api_cli_inband (vat_main_t *vam)
+{
+ return exec_inband (vam);
+}
+
+int
+exec (vat_main_t *vam)
+{
+ return exec_inband (vam);
+}
+
+static int
+api_cli (vat_main_t *vam)
+{
+ return exec (vam);
+}
+
+static int
+api_get_node_index (vat_main_t *vam)
+{
+ unformat_input_t *i = vam->input;
+ vl_api_get_node_index_t *mp;
+ u8 *name = 0;
+ int ret;
+
+ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (i, "node %s", &name))
+ ;
+ else
+ break;
+ }
+ if (name == 0)
+ {
+ errmsg ("node name required");
+ return -99;
+ }
+ if (vec_len (name) >= ARRAY_LEN (mp->node_name))
+ {
+ errmsg ("node name too long, max %d", ARRAY_LEN (mp->node_name));
+ return -99;
+ }
+
+ M (GET_NODE_INDEX, mp);
+ clib_memcpy (mp->node_name, name, vec_len (name));
+ vec_free (name);
+
+ S (mp);
+ W (ret);
+ return ret;
+}
+
+static int
+api_get_next_index (vat_main_t *vam)
+{
+ unformat_input_t *i = vam->input;
+ vl_api_get_next_index_t *mp;
+ u8 *node_name = 0, *next_node_name = 0;
+ int ret;
+
+ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (i, "node-name %s", &node_name))
+ ;
+ else if (unformat (i, "next-node-name %s", &next_node_name))
+ break;
+ }
+
+ if (node_name == 0)
+ {
+ errmsg ("node name required");
+ return -99;
+ }
+ if (vec_len (node_name) >= ARRAY_LEN (mp->node_name))
+ {
+ errmsg ("node name too long, max %d", ARRAY_LEN (mp->node_name));
+ return -99;
+ }
+
+ if (next_node_name == 0)
+ {
+ errmsg ("next node name required");
+ return -99;
+ }
+ if (vec_len (next_node_name) >= ARRAY_LEN (mp->next_name))
+ {
+ errmsg ("next node name too long, max %d", ARRAY_LEN (mp->next_name));
+ return -99;
+ }
+
+ M (GET_NEXT_INDEX, mp);
+ clib_memcpy (mp->node_name, node_name, vec_len (node_name));
+ clib_memcpy (mp->next_name, next_node_name, vec_len (next_node_name));
+ vec_free (node_name);
+ vec_free (next_node_name);
+
+ S (mp);
+ W (ret);
+ return ret;
+}
+
+static int
+api_add_node_next (vat_main_t *vam)
+{
+ unformat_input_t *i = vam->input;
+ vl_api_add_node_next_t *mp;
+ u8 *name = 0;
+ u8 *next = 0;
+ int ret;
+
+ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (i, "node %s", &name))
+ ;
+ else if (unformat (i, "next %s", &next))
+ ;
+ else
+ break;
+ }
+ if (name == 0)
+ {
+ errmsg ("node name required");
+ return -99;
+ }
+ if (vec_len (name) >= ARRAY_LEN (mp->node_name))
+ {
+ errmsg ("node name too long, max %d", ARRAY_LEN (mp->node_name));
+ return -99;
+ }
+ if (next == 0)
+ {
+ errmsg ("next node required");
+ return -99;
+ }
+ if (vec_len (next) >= ARRAY_LEN (mp->next_name))
+ {
+ errmsg ("next name too long, max %d", ARRAY_LEN (mp->next_name));
+ return -99;
+ }
+
+ M (ADD_NODE_NEXT, mp);
+ clib_memcpy (mp->node_name, name, vec_len (name));
+ clib_memcpy (mp->next_name, next, vec_len (next));
+ vec_free (name);
+ vec_free (next);
+
+ S (mp);
+ W (ret);
+ return ret;
+}
+
+static void
+vl_api_show_threads_reply_t_handler (vl_api_show_threads_reply_t *mp)
+{
+ vat_main_t *vam = &vat_main;
+ i32 retval = ntohl (mp->retval);
+ int i, count = 0;
+
+ if (retval >= 0)
+ count = ntohl (mp->count);
+
+ for (i = 0; i < count; i++)
+ print (vam->ofp, "\n%-2d %-11s %-11s %-5d %-6d %-4d %-6d",
+ ntohl (mp->thread_data[i].id), mp->thread_data[i].name,
+ mp->thread_data[i].type, ntohl (mp->thread_data[i].pid),
+ ntohl (mp->thread_data[i].cpu_id), ntohl (mp->thread_data[i].core),
+ ntohl (mp->thread_data[i].cpu_socket));
+
+ vam->retval = retval;
+ vam->result_ready = 1;
+}
+
+static int
+api_show_threads (vat_main_t *vam)
+{
+ vl_api_show_threads_t *mp;
+ int ret;
+
+ print (vam->ofp, "\n%-2s %-11s %-11s %-5s %-6s %-4s %-6s", "ID", "Name",
+ "Type", "LWP", "cpu_id", "Core", "Socket");
+
+ M (SHOW_THREADS, mp);
+
+ S (mp);
+ W (ret);
+ return ret;
+}
+
+static void
+vl_api_get_node_graph_reply_t_handler (vl_api_get_node_graph_reply_t *mp)
+{
+ vat_main_t *vam = &vat_main;
+ i32 retval = ntohl (mp->retval);
+ u8 *pvt_copy, *reply;
+ void *oldheap;
+ vlib_node_t *node;
+ int i;
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ }
+ else
+ {
+ vam->retval = retval;
+ vam->result_ready = 1;
+ }
+
+ /* "Should never happen..." */
+ if (retval != 0)
+ return;
+
+ reply = uword_to_pointer (mp->reply_in_shmem, u8 *);
+ pvt_copy = vec_dup (reply);
+
+ /* Toss the shared-memory original... */
+ oldheap = vl_msg_push_heap ();
+
+ vec_free (reply);
+
+ vl_msg_pop_heap (oldheap);
+
+ if (vam->graph_nodes)
+ {
+ hash_free (vam->graph_node_index_by_name);
+
+ for (i = 0; i < vec_len (vam->graph_nodes[0]); i++)
+ {
+ node = vam->graph_nodes[0][i];
+ vec_free (node->name);
+ vec_free (node->next_nodes);
+ vec_free (node);
+ }
+ vec_free (vam->graph_nodes[0]);
+ vec_free (vam->graph_nodes);
+ }
+
+ vam->graph_node_index_by_name = hash_create_string (0, sizeof (uword));
+ vam->graph_nodes = vlib_node_unserialize (pvt_copy);
+ vec_free (pvt_copy);
+
+ for (i = 0; i < vec_len (vam->graph_nodes[0]); i++)
+ {
+ node = vam->graph_nodes[0][i];
+ hash_set_mem (vam->graph_node_index_by_name, node->name, i);
+ }
+}
+
+static int
+api_get_node_graph (vat_main_t *vam)
+{
+ vl_api_get_node_graph_t *mp;
+ int ret;
+
+ M (GET_NODE_GRAPH, mp);
+
+ /* send it... */
+ S (mp);
+ /* Wait for the reply */
+ W (ret);
+ return ret;
+}
+
+#define VL_API_LOCAL_SETUP_MESSAGE_ID_TABLE local_setup_message_id_table
+static void
+local_setup_message_id_table (vat_main_t *vam)
+{
+ /* Add exec as an alias for cli_inband */
+ hash_set_mem (vam->function_by_name, "exec", api_cli_inband);
+ hash_set_mem (vam->help_by_name, "exec",
+ "usage: exec <vpe-debug-CLI-command>");
+}
+
+#include <vlibmemory/vlib.api_test.c>
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/CMakeLists.txt b/src/vnet/CMakeLists.txt
index 68133308b76..2dbde7c5259 100644
--- a/src/vnet/CMakeLists.txt
+++ b/src/vnet/CMakeLists.txt
@@ -1520,6 +1520,12 @@ add_vpp_library (vatclient
DEPENDS api_headers
)
+add_vat_test_library(vnet
+ ip/ip_test.c
+ arp/arp_test.c
+ ip6-nd/ip6_nd_test.c
+)
+
##############################################################################
# VAT2 plugins
##############################################################################
diff --git a/src/vnet/arp/arp_test.c b/src/vnet/arp/arp_test.c
index 29eeeb59d0f..b92fa06f9f6 100644
--- a/src/vnet/arp/arp_test.c
+++ b/src/vnet/arp/arp_test.c
@@ -158,8 +158,6 @@ api_proxy_arp_intfc_enable_disable (vat_main_t * vam)
#include <vnet/arp/arp.api_test.c>
-VAT_REGISTER_FEATURE_FUNCTION (vat_arp_plugin_register);
-
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/vnet/ip/ip_test.c b/src/vnet/ip/ip_test.c
index 6bdeca5ff32..81a84c12a3b 100644
--- a/src/vnet/ip/ip_test.c
+++ b/src/vnet/ip/ip_test.c
@@ -1547,8 +1547,6 @@ vl_api_ip_details_t_handler (vl_api_ip_details_t *mp)
#include <vnet/ip/ip.api_test.c>
-VAT_REGISTER_FEATURE_FUNCTION (vat_ip_plugin_register);
-
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/vnet/ip6-nd/ip6_nd_test.c b/src/vnet/ip6-nd/ip6_nd_test.c
index 5ca37029a76..99f869a5a1d 100644
--- a/src/vnet/ip6-nd/ip6_nd_test.c
+++ b/src/vnet/ip6-nd/ip6_nd_test.c
@@ -318,6 +318,12 @@ api_sw_interface_ip6nd_ra_config (vat_main_t * vam)
W (ret);
return ret;
}
+static int
+api_ip6nd_proxy_enable_disable (vat_main_t *vam)
+{
+ // not yet implemented
+ return -1;
+}
#include <ip6-nd/ip6_nd.api_test.c>
diff --git a/src/vpp/CMakeLists.txt b/src/vpp/CMakeLists.txt
index e15037cccb7..32f702de36d 100644
--- a/src/vpp/CMakeLists.txt
+++ b/src/vpp/CMakeLists.txt
@@ -73,8 +73,6 @@ if(VPP_API_TEST_BUILTIN)
api/api_main.c
api/plugin.c
api/types.c
- ../vnet/arp/arp_test.c
- ../vnet/ip/ip_test.c
)
add_definitions(-DVPP_API_TEST_BUILTIN=1)
endif()
diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c
index b935c002228..5477ec949ae 100644
--- a/src/vpp/api/api.c
+++ b/src/vpp/api/api.c
@@ -76,20 +76,11 @@
#undef vl_printfun
#include <vlibapi/api_helper_macros.h>
-#define foreach_vpe_api_msg \
-_(CONTROL_PING, control_ping) \
-_(CLI, cli) \
-_(CLI_INBAND, cli_inband) \
-_(GET_NODE_INDEX, get_node_index) \
-_(ADD_NODE_NEXT, add_node_next) \
-_(SHOW_VERSION, show_version) \
-_(SHOW_THREADS, show_threads) \
-_(GET_NODE_GRAPH, get_node_graph) \
-_(GET_NEXT_INDEX, get_next_index) \
-_(LOG_DUMP, log_dump) \
-_(SHOW_VPE_SYSTEM_TIME, show_vpe_system_time) \
-_(GET_F64_ENDIAN_VALUE, get_f64_endian_value) \
-_(GET_F64_INCREMENT_BY_ONE, get_f64_increment_by_one) \
+#define foreach_vpe_api_msg \
+ _ (CONTROL_PING, control_ping) \
+ _ (SHOW_VERSION, show_version) \
+ _ (SHOW_VPE_SYSTEM_TIME, show_vpe_system_time) \
+ _ (LOG_DUMP, log_dump)
#define QUOTE_(x) #x
#define QUOTE(x) QUOTE_(x)
@@ -139,108 +130,6 @@ vl_api_control_ping_t_handler (vl_api_control_ping_t * mp)
}
static void
-shmem_cli_output (uword arg, u8 * buffer, uword buffer_bytes)
-{
- u8 **shmem_vecp = (u8 **) arg;
- u8 *shmem_vec;
- void *oldheap;
- u32 offset;
-
- shmem_vec = *shmem_vecp;
-
- offset = vec_len (shmem_vec);
-
- oldheap = vl_msg_push_heap ();
-
- vec_validate (shmem_vec, offset + buffer_bytes - 1);
-
- clib_memcpy (shmem_vec + offset, buffer, buffer_bytes);
-
- vl_msg_pop_heap (oldheap);
-
- *shmem_vecp = shmem_vec;
-}
-
-
-static void
-vl_api_cli_t_handler (vl_api_cli_t * mp)
-{
- vl_api_cli_reply_t *rp;
- vl_api_registration_t *reg;
- vlib_main_t *vm = vlib_get_main ();
- unformat_input_t input;
- u8 *shmem_vec = 0;
- void *oldheap;
-
- reg = vl_api_client_index_to_registration (mp->client_index);
- if (!reg)
- return;;
-
- rp = vl_msg_api_alloc (sizeof (*rp));
- rp->_vl_msg_id = ntohs (VL_API_CLI_REPLY);
- rp->context = mp->context;
-
- unformat_init_vector (&input, (u8 *) (uword) mp->cmd_in_shmem);
-
- vlib_cli_input (vm, &input, shmem_cli_output, (uword) & shmem_vec);
-
- oldheap = vl_msg_push_heap ();
- vec_add1 (shmem_vec, 0);
- vl_msg_pop_heap (oldheap);
-
- rp->reply_in_shmem = (uword) shmem_vec;
-
- vl_api_send_msg (reg, (u8 *) rp);
-}
-
-static void
-inband_cli_output (uword arg, u8 * buffer, uword buffer_bytes)
-{
- u8 **mem_vecp = (u8 **) arg;
- u8 *mem_vec = *mem_vecp;
- u32 offset = vec_len (mem_vec);
-
- vec_validate (mem_vec, offset + buffer_bytes - 1);
- clib_memcpy (mem_vec + offset, buffer, buffer_bytes);
- *mem_vecp = mem_vec;
-}
-
-static void
-vl_api_cli_inband_t_handler (vl_api_cli_inband_t * mp)
-{
- vl_api_cli_inband_reply_t *rmp;
- int rv = 0;
- vlib_main_t *vm = vlib_get_main ();
- unformat_input_t input;
- u8 *out_vec = 0;
- u8 *cmd_vec = 0;
-
- if (vl_msg_api_get_msg_length (mp) <
- vl_api_string_len (&mp->cmd) + sizeof (*mp))
- {
- rv = -1;
- goto error;
- }
-
- cmd_vec = vl_api_from_api_to_new_vec (mp, &mp->cmd);
-
- unformat_init_string (&input, (char *) cmd_vec,
- vl_api_string_len (&mp->cmd));
- rv = vlib_cli_input (vm, &input, inband_cli_output, (uword) & out_vec);
- unformat_free (&input);
-
-error:
- /* *INDENT-OFF* */
- REPLY_MACRO3(VL_API_CLI_INBAND_REPLY, vec_len (out_vec),
- ({
- vl_api_vec_to_api_string(out_vec, &rmp->reply);
- }));
- /* *INDENT-ON* */
- vec_free (out_vec);
- vec_free (cmd_vec);
-}
-
-static void
vl_api_show_version_t_handler (vl_api_show_version_t * mp)
{
vl_api_show_version_reply_t *rmp;
@@ -264,204 +153,14 @@ vl_api_show_version_t_handler (vl_api_show_version_t * mp)
}
static void
-get_thread_data (vl_api_thread_data_t * td, int index)
+vl_api_show_vpe_system_time_t_handler (vl_api_show_vpe_system_time_t *mp)
{
- vlib_worker_thread_t *w = vlib_worker_threads + index;
- td->id = htonl (index);
- if (w->name)
- strncpy ((char *) td->name, (char *) w->name, ARRAY_LEN (td->name) - 1);
- if (w->registration)
- strncpy ((char *) td->type, (char *) w->registration->name,
- ARRAY_LEN (td->type) - 1);
- td->pid = htonl (w->lwp);
- td->cpu_id = htonl (w->cpu_id);
- td->core = htonl (w->core_id);
- td->cpu_socket = htonl (w->numa_id);
-}
-
-static void
-vl_api_show_threads_t_handler (vl_api_show_threads_t * mp)
-{
- int count = 0;
-
-#if !defined(__powerpc64__)
- vl_api_registration_t *reg;
- vl_api_show_threads_reply_t *rmp;
- vl_api_thread_data_t *td;
- int i, msg_size = 0;
- count = vec_len (vlib_worker_threads);
- if (!count)
- return;
-
- msg_size = sizeof (*rmp) + sizeof (rmp->thread_data[0]) * count;
- reg = vl_api_client_index_to_registration (mp->client_index);
- if (!reg)
- return;
-
- rmp = vl_msg_api_alloc (msg_size);
- clib_memset (rmp, 0, msg_size);
- rmp->_vl_msg_id = htons (VL_API_SHOW_THREADS_REPLY);
- rmp->context = mp->context;
- rmp->count = htonl (count);
- td = rmp->thread_data;
-
- for (i = 0; i < count; i++)
- {
- get_thread_data (&td[i], i);
- }
-
- vl_api_send_msg (reg, (u8 *) rmp);
-#else
-
- /* unimplemented support */
- rv = -9;
- clib_warning ("power pc does not support show threads api");
- /* *INDENT-OFF* */
- REPLY_MACRO2(VL_API_SHOW_THREADS_REPLY,
- ({
- rmp->count = htonl(count);
- }));
- /* *INDENT-ON* */
-#endif
-}
-
-static void
-vl_api_get_node_index_t_handler (vl_api_get_node_index_t * mp)
-{
- vlib_main_t *vm = vlib_get_main ();
- vl_api_get_node_index_reply_t *rmp;
- vlib_node_t *n;
int rv = 0;
- u32 node_index = ~0;
-
- n = vlib_get_node_by_name (vm, mp->node_name);
-
- if (n == 0)
- rv = VNET_API_ERROR_NO_SUCH_NODE;
- else
- node_index = n->index;
-
- /* *INDENT-OFF* */
- REPLY_MACRO2(VL_API_GET_NODE_INDEX_REPLY,
- ({
- rmp->node_index = htonl(node_index);
- }));
- /* *INDENT-ON* */
-}
-
-static void
-vl_api_get_next_index_t_handler (vl_api_get_next_index_t * mp)
-{
- vlib_main_t *vm = vlib_get_main ();
- vl_api_get_next_index_reply_t *rmp;
- vlib_node_t *node, *next_node;
- int rv = 0;
- u32 next_node_index = ~0, next_index = ~0;
- uword *p;
-
- node = vlib_get_node_by_name (vm, mp->node_name);
-
- if (node == 0)
- {
- rv = VNET_API_ERROR_NO_SUCH_NODE;
- goto out;
- }
-
- next_node = vlib_get_node_by_name (vm, mp->next_name);
-
- if (next_node == 0)
- {
- rv = VNET_API_ERROR_NO_SUCH_NODE2;
- goto out;
- }
- else
- next_node_index = next_node->index;
-
- p = hash_get (node->next_slot_by_node, next_node_index);
-
- if (p == 0)
- {
- rv = VNET_API_ERROR_NO_SUCH_ENTRY;
- goto out;
- }
- else
- next_index = p[0];
-
-out:
- /* *INDENT-OFF* */
- REPLY_MACRO2(VL_API_GET_NEXT_INDEX_REPLY,
- ({
- rmp->next_index = htonl(next_index);
- }));
- /* *INDENT-ON* */
-}
-
-static void
-vl_api_add_node_next_t_handler (vl_api_add_node_next_t * mp)
-{
- vlib_main_t *vm = vlib_get_main ();
- vl_api_add_node_next_reply_t *rmp;
- vlib_node_t *n, *next;
- int rv = 0;
- u32 next_index = ~0;
-
- n = vlib_get_node_by_name (vm, mp->node_name);
-
- if (n == 0)
- {
- rv = VNET_API_ERROR_NO_SUCH_NODE;
- goto out;
- }
-
- next = vlib_get_node_by_name (vm, mp->next_name);
-
- if (next == 0)
- rv = VNET_API_ERROR_NO_SUCH_NODE2;
- else
- next_index = vlib_node_add_next (vm, n->index, next->index);
-
-out:
- /* *INDENT-OFF* */
- REPLY_MACRO2(VL_API_ADD_NODE_NEXT_REPLY,
- ({
- rmp->next_index = htonl(next_index);
- }));
- /* *INDENT-ON* */
-}
-
-static void
-vl_api_get_node_graph_t_handler (vl_api_get_node_graph_t * mp)
-{
- int rv = 0;
- u8 *vector = 0;
- vlib_main_t *vm = vlib_get_main ();
- void *oldheap;
- vl_api_get_node_graph_reply_t *rmp;
- static vlib_node_t ***node_dups;
- static vlib_main_t **stat_vms;
-
- oldheap = vl_msg_push_heap ();
-
- /*
- * Keep the number of memcpy ops to a minimum (e.g. 1).
- */
- vec_validate (vector, 16384);
- vec_reset_length (vector);
-
- vlib_node_get_nodes (vm, 0 /* main threads */ ,
- 0 /* include stats */ ,
- 1 /* barrier sync */ ,
- &node_dups, &stat_vms);
- vector = vlib_node_serialize (vm, node_dups, vector, 1 /* include nexts */ ,
- 1 /* include stats */ );
-
- vl_msg_pop_heap (oldheap);
-
+ vl_api_show_vpe_system_time_reply_t *rmp;
/* *INDENT-OFF* */
- REPLY_MACRO2(VL_API_GET_NODE_GRAPH_REPLY,
- ({
- rmp->reply_in_shmem = (uword) vector;
- }));
+ REPLY_MACRO2 (
+ VL_API_SHOW_VPE_SYSTEM_TIME_REPLY,
+ ({ rmp->vpe_system_time = clib_host_to_net_f64 (unix_time_now ()); }));
/* *INDENT-ON* */
}
@@ -530,51 +229,6 @@ vl_api_log_dump_t_handler (vl_api_log_dump_t * mp)
}
-static void
-vl_api_show_vpe_system_time_t_handler (vl_api_show_vpe_system_time_t * mp)
-{
- int rv = 0;
- vl_api_show_vpe_system_time_reply_t *rmp;
- /* *INDENT-OFF* */
- REPLY_MACRO2(VL_API_SHOW_VPE_SYSTEM_TIME_REPLY,
- ({
- rmp->vpe_system_time = clib_host_to_net_f64 (unix_time_now ());
- }));
- /* *INDENT-ON* */
-}
-
-static void
-vl_api_get_f64_endian_value_t_handler (vl_api_get_f64_endian_value_t * mp)
-{
- int rv = 0;
- f64 one = 1.0;
- vl_api_get_f64_endian_value_reply_t *rmp;
- if (1.0 != clib_net_to_host_f64 (mp->f64_one))
- rv = VNET_API_ERROR_API_ENDIAN_FAILED;
-
- /* *INDENT-OFF* */
- REPLY_MACRO2(VL_API_GET_F64_ENDIAN_VALUE_REPLY,
- ({
- rmp->f64_one_result = clib_host_to_net_f64 (one);
- }));
- /* *INDENT-ON* */
-}
-
-static void
-vl_api_get_f64_increment_by_one_t_handler (vl_api_get_f64_increment_by_one_t *
- mp)
-{
- int rv = 0;
- vl_api_get_f64_increment_by_one_reply_t *rmp;
-
- /* *INDENT-OFF* */
- REPLY_MACRO2(VL_API_GET_F64_INCREMENT_BY_ONE_REPLY,
- ({
- rmp->f64_value = clib_host_to_net_f64 (clib_net_to_host_f64(mp->f64_value) + 1.0);
- }));
- /* *INDENT-ON* */
-}
-
#define BOUNCE_HANDLER(nn) \
static void vl_api_##nn##_t_handler ( \
vl_api_##nn##_t *mp) \
@@ -642,7 +296,6 @@ vpe_api_hookup (vlib_main_t * vm)
am->is_mp_safe[VL_API_CONTROL_PING] = 1;
am->is_mp_safe[VL_API_CONTROL_PING_REPLY] = 1;
am->is_mp_safe[VL_API_IP_ROUTE_ADD_DEL] = 1;
- am->is_mp_safe[VL_API_GET_NODE_GRAPH] = 1;
/*
* Set up the (msg_name, crc, message-id) table
diff --git a/src/vpp/api/vpe.api b/src/vpp/api/vpe.api
index 9047d0e1aa9..3c4c0214ad5 100644
--- a/src/vpp/api/vpe.api
+++ b/src/vpp/api/vpe.api
@@ -19,7 +19,7 @@
called through a shared memory interface.
*/
-option version = "1.6.1";
+option version = "1.7.0";
import "vpp/api/vpe_types.api";
@@ -77,92 +77,6 @@ define control_ping_reply
u32 vpe_pid;
};
-/** \brief Process a vpe parser cli string request
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param cmd_in_shmem - pointer to cli command string
-*/
-define cli
-{
- u32 client_index;
- u32 context;
- u64 cmd_in_shmem;
-};
-define cli_inband
-{
- u32 client_index;
- u32 context;
- string cmd[];
-};
-
-/** \brief vpe parser cli string response
- @param context - sender context, to match reply w/ request
- @param retval - return code for request
- @param reply_in_shmem - Reply string from cli processing if any
-*/
-define cli_reply
-{
- u32 context;
- i32 retval;
- u64 reply_in_shmem;
-};
-define cli_inband_reply
-{
- u32 context;
- i32 retval;
- string reply[];
-};
-
-/** \brief Get node index using name request
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param node_name[] - name of the node
-*/
-define get_node_index
-{
- u32 client_index;
- u32 context;
- string node_name[64];
-};
-
-/** \brief Get node index using name request
- @param context - sender context, to match reply w/ request
- @param retval - return code for the request
- @param node_index - index of the desired node if found, else ~0
-*/
-define get_node_index_reply
-{
- u32 context;
- i32 retval;
- u32 node_index;
-};
-
-/** \brief Set the next node for a given node request
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param node_name[] - node to add the next node to
- @param next_name[] - node to add as the next node
-*/
-define add_node_next
-{
- u32 client_index;
- u32 context;
- string node_name[64];
- string next_name[64];
-};
-
-/** \brief IP Set the next node for a given node response
- @param context - sender context, to match reply w/ request
- @param retval - return code for the add next node request
- @param next_index - the index of the next node if success, else ~0
-*/
-define add_node_next_reply
-{
- u32 context;
- i32 retval;
- u32 next_index;
-};
-
/** \brief show version
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
@@ -190,99 +104,26 @@ define show_version_reply
string build_directory[256];
};
-
-/** \brief show_threads display the information about vpp
- threads running on system along with their process id,
- cpu id, physical core and cpu socket.
-*/
-define show_threads
-{
- u32 client_index;
- u32 context;
-};
-
-/** \brief thread data
- @param id - thread index
- @param name - thread name i.e. vpp_main or vpp_wk_0
- @param type - thread type i.e. workers or stats
- @param pid - thread Process Id
- @param cpu_id - thread pinned to cpu.
- "CPUs or Logical cores are the number of physical cores times
- the number of threads that can run on each core through
- the use of hyperthreading." (from unix.stackexchange.com)
- @param core - thread pinned to actual physical core.
- @param cpu_socket - thread is running on which cpu socket.
-*/
-typedef thread_data
-{
- u32 id;
- string name[64];
- string type[64];
- u32 pid;
- u32 cpu_id;
- u32 core;
- u32 cpu_socket;
-};
-
-/** \brief show_threads_reply
- @param context - returned sender context, to match reply w/ request
- @param retval - return code
- @param count - number of threads in thread_data array
- @param thread_data - array of thread data
-*/
-define show_threads_reply
-{
- u32 context;
- i32 retval;
- u32 count;
- vl_api_thread_data_t thread_data[count];
-};
-
-define get_node_graph
-{
- u32 client_index;
- u32 context;
-};
-
-/** \brief get_node_graph_reply
- @param context - returned sender context, to match reply w/ request
- @param retval - return code
- @param reply_in_shmem - result from vlib_node_serialize, in shared
- memory. Process with vlib_node_unserialize, remember to switch
- heaps and free the result.
-*/
-
-define get_node_graph_reply
-{
- u32 context;
- i32 retval;
- u64 reply_in_shmem;
-};
-
-/** \brief Query relative index via node names
+/** \brief Show the current system timestamp.
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
- @param node_name - name of node to find relative index from
- @param next_name - next node from node_name to find relative index of
*/
-define get_next_index
+define show_vpe_system_time
{
u32 client_index;
u32 context;
- string node_name[64];
- string next_name[64];
};
-/** \brief Reply for get next node index
+/** \brief Reply for show vpe system time.
@param context - sender context which was passed in the request
@param retval - return value
- @param next_index - index of the next_node
+ @param vpe_system_time - the time in seconds since epoch of the host system.
*/
-define get_next_index_reply
+define show_vpe_system_time_reply
{
u32 context;
i32 retval;
- u32 next_index;
+ vl_api_timestamp_t vpe_system_time;
};
define log_dump {
@@ -299,76 +140,6 @@ define log_details {
string message[256];
};
-/** \brief Show the current system timestamp.
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
-*/
-define show_vpe_system_time
-{
- u32 client_index;
- u32 context;
-};
-
-/** \brief Reply for show vpe system time.
- @param context - sender context which was passed in the request
- @param retval - return value
- @param vpe_system_time - the time in seconds since epoch of the host system.
-*/
-define show_vpe_system_time_reply
-{
- u32 context;
- i32 retval;
- vl_api_timestamp_t vpe_system_time;
-};
-
-/** \brief f64 types are not standardized across the wire. Sense wire format in each direction by sending the f64 value 1.0.
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param f64_one - The constant of 1.0. If you send a different value, expect an rv=VNET_API_ERROR_API_ENDIAN_FAILED.
-*/
-define get_f64_endian_value
-{
- u32 client_index;
- u32 context;
- f64 f64_one [default=1.0];
-};
-
-/** \brief get_f64_endian_value reply message
- @param context - sender context which was passed in the request
- @param retval - return value - VNET_API_ERROR_API_ENDIAN_FAILED if f64_one != 1.0
- @param f64_one_result - The value of 'f64 1.0'
-*/
-define get_f64_endian_value_reply
-{
- u32 context;
- u32 retval;
- f64 f64_one_result;
-};
-
-/** \brief Verify f64 wire format by sending a value and receiving the value + 1.0
- @param client_index - opaque cookie to identify the sender.
- @param context - sender context, to match reply w/ request.
- @param f64_value - The value you want to test. Default: 1.0.
-*/
-define get_f64_increment_by_one
-{
- u32 client_index;
- u32 context;
- f64 f64_value [default=1.0];
-};
-
-/** \brief get_f64_increment_by_one reply
- @param client_index - opaque cookie to identify the sender.
- @param context - sender context, to match reply w/ request.
- @param f64_value - The input f64_value incremented by 1.0.
-*/
-define get_f64_increment_by_one_reply
-{
- u32 context;
- u32 retval;
- f64 f64_value;
-};
-
/*
* Local Variables:
* eval: (c-set-style "gnu")
n class="p">, inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0 ) err_msg = 'Failed to create sub-interface on host {host}'.format( host=node['host']) with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) if_key = Topology.add_new_port(node, 'subinterface') Topology.update_interface_sw_if_index(node, if_key, sw_if_index) ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index) Topology.update_interface_name(node, if_key, ifc_name) return '{ifc}.{s_id}'.format(ifc=interface, s_id=sub_id), sw_if_index @staticmethod def create_gre_tunnel_interface(node, source_ip, destination_ip): """Create GRE tunnel interface on node. :param node: VPP node to add tunnel interface. :param source_ip: Source of the GRE tunnel. :param destination_ip: Destination of the GRE tunnel. :type node: dict :type source_ip: str :type destination_ip: str :returns: Name and index of created GRE tunnel interface. :rtype: tuple :raises RuntimeError: If unable to create GRE tunnel interface. """ cmd = 'gre_tunnel_add_del' tunnel = dict(type=0, instance=Constants.BITWISE_NON_ZERO, src=str(source_ip), dst=str(destination_ip), outer_fib_id=0, session_id=0) args = dict(is_add=1, tunnel=tunnel) err_msg = 'Failed to create GRE tunnel interface on host {host}'.format( host=node['host']) with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) if_key = Topology.add_new_port(node, 'gre_tunnel') Topology.update_interface_sw_if_index(node, if_key, sw_if_index) ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index) Topology.update_interface_name(node, if_key, ifc_name) return ifc_name, sw_if_index @staticmethod def vpp_create_loopback(node, mac=None): """Create loopback interface on VPP node. :param node: Node to create loopback interface on. :param mac: Optional MAC address for loopback interface. :type node: dict :type mac: str :returns: SW interface index. :rtype: int :raises RuntimeError: If it is not possible to create loopback on the node. """ cmd = 'create_loopback' args = dict(mac_address=L2Util.mac_to_bin(mac) if mac else 0) err_msg = 'Failed to create loopback interface on host {host}'.format( host=node['host']) with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) if_key = Topology.add_new_port(node, 'loopback') Topology.update_interface_sw_if_index(node, if_key, sw_if_index) ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index) Topology.update_interface_name(node, if_key, ifc_name) if mac: mac = InterfaceUtil.vpp_get_interface_mac(node, ifc_name) Topology.update_interface_mac_address(node, if_key, mac) return sw_if_index @staticmethod def vpp_create_bond_interface(node, mode, load_balance=None, mac=None): """Create bond interface on VPP node. :param node: DUT node from topology. :param mode: Link bonding mode. :param load_balance: Load balance (optional, valid for xor and lacp modes, otherwise ignored). :param mac: MAC address to assign to the bond interface (optional). :type node: dict :type mode: str :type load_balance: str :type mac: str :returns: Interface key (name) in topology. :rtype: str :raises RuntimeError: If it is not possible to create bond interface on the node. """ cmd = 'bond_create' args = dict( id=int(Constants.BITWISE_NON_ZERO), use_custom_mac=False if mac is None else True, mac_address=L2Util.mac_to_bin(mac) if mac else None, mode=getattr(LinkBondMode, 'BOND_API_MODE_{md}'.format( md=mode.replace('-', '_').upper())).value, lb=0 if load_balance is None else getattr( LinkBondLoadBalanceAlgo, 'BOND_API_LB_ALGO_{lb}'.format( lb=load_balance.upper())).value, numa_only=False ) err_msg = 'Failed to create bond interface on host {host}'.format( host=node['host']) with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) InterfaceUtil.add_eth_interface( node, sw_if_index=sw_if_index, ifc_pfx='eth_bond') if_key = Topology.get_interface_by_sw_index(node, sw_if_index) return if_key @staticmethod def add_eth_interface(node, ifc_name=None, sw_if_index=None, ifc_pfx=None): """Add ethernet interface to current topology. :param node: DUT node from topology. :param ifc_name: Name of the interface. :param sw_if_index: SW interface index. :param ifc_pfx: Interface key prefix. :type node: dict :type ifc_name: str :type sw_if_index: int :type ifc_pfx: str """ if_key = Topology.add_new_port(node, ifc_pfx) if ifc_name and sw_if_index is None: sw_if_index = InterfaceUtil.vpp_get_interface_sw_index( node, ifc_name) Topology.update_interface_sw_if_index(node, if_key, sw_if_index) if sw_if_index and ifc_name is None: ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index) Topology.update_interface_name(node, if_key, ifc_name) ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index) Topology.update_interface_mac_address(node, if_key, ifc_mac) @staticmethod def vpp_create_avf_interface(node, vf_pci_addr, num_rx_queues=None): """Create AVF interface on VPP node. :param node: DUT node from topology. :param vf_pci_addr: PCI address binded to i40evf driver. :param num_rx_queues: Number of RX queues. :type node: dict :type vf_pci_addr: str :type num_rx_queues: int :returns: Interface key (name) in topology. :rtype: str :raises RuntimeError: If it is not possible to create AVF interface on the node. """ PapiSocketExecutor.run_cli_cmd( node, 'set logging class avf level debug') cmd = 'avf_create' args = dict(pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr), enable_elog=0, rxq_num=int(num_rx_queues) if num_rx_queues else 0, rxq_size=0, txq_size=0) err_msg = 'Failed to create AVF interface on host {host}'.format( host=node['host']) with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) InterfaceUtil.add_eth_interface( node, sw_if_index=sw_if_index, ifc_pfx='eth_avf') if_key = Topology.get_interface_by_sw_index(node, sw_if_index) return if_key @staticmethod def vpp_create_rdma_interface(node, pci_addr, num_rx_queues=None): """Create RDMA interface on VPP node. :param node: DUT node from topology. :param pci_addr: PCI address binded to rdma-core driver. :param num_rx_queues: Number of RX queues. :type node: dict :type pci_addr: str :type num_rx_queues: int :returns: Interface key (name) in topology. :rtype: str :raises RuntimeError: If it is not possible to create RDMA interface on the node. """ cmd = 'rdma_create' args = dict(name=InterfaceUtil.pci_to_eth(node, pci_addr), host_if=InterfaceUtil.pci_to_eth(node, pci_addr), rxq_num=int(num_rx_queues) if num_rx_queues else 0, rxq_size=0, txq_size=0) err_msg = 'Failed to create RDMA interface on host {host}'.format( host=node['host']) with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) InterfaceUtil.add_eth_interface( node, sw_if_index=sw_if_index, ifc_pfx='eth_rdma') if_key = Topology.get_interface_by_sw_index(node, sw_if_index) return if_key @staticmethod def vpp_enslave_physical_interface(node, interface, bond_if): """Enslave physical interface to bond interface on VPP node. :param node: DUT node from topology. :param interface: Physical interface key from topology file. :param bond_if: Load balance :type node: dict :type interface: str :type bond_if: str :raises RuntimeError: If it is not possible to enslave physical interface to bond interface on the node. """ cmd = 'bond_enslave' args = dict( sw_if_index=Topology.get_interface_sw_index(node, interface), bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if), is_passive=False, is_long_timeout=False ) err_msg = 'Failed to enslave physical interface {ifc} to bond ' \ 'interface {bond} on host {host}'.format(ifc=interface, bond=bond_if, host=node['host']) with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @staticmethod def vpp_show_bond_data_on_node(node, verbose=False): """Show (detailed) bond information on VPP node. :param node: DUT node from topology. :param verbose: If detailed information is required or not. :type node: dict :type verbose: bool """ cmd = 'sw_interface_bond_dump' err_msg = 'Failed to get bond interface dump on host {host}'.format( host=node['host']) data = ('Bond data on node {host}:\n'.format(host=node['host'])) with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd).get_details(err_msg) for bond in details: data += ('{b}\n'.format(b=bond['interface_name'])) data += (' mode: {m}\n'.format( m=bond['mode'].name.replace('BOND_API_MODE_', '').lower())) data += (' load balance: {lb}\n'.format( lb=bond['lb'].name.replace('BOND_API_LB_ALGO_', '').lower())) data += (' number of active slaves: {n}\n'.format( n=bond['active_slaves'])) if verbose: slave_data = InterfaceUtil.vpp_bond_slave_dump( node, Topology.get_interface_by_sw_index( node, bond['sw_if_index'])) for slave in slave_data: if not slave['is_passive']: data += (' {s}\n'.format(s=slave['interface_name'])) data += (' number of slaves: {n}\n'.format(n=bond['slaves'])) if verbose: for slave in slave_data: data += (' {s}\n'.format(s=slave['interface_name'])) data += (' interface id: {i}\n'.format(i=bond['id'])) data += (' sw_if_index: {i}\n'.format(i=bond['sw_if_index'])) logger.info(data) @staticmethod def vpp_bond_slave_dump(node, interface): """Get bond interface slave(s) data on VPP node. :param node: DUT node from topology. :param interface: Physical interface key from topology file. :type node: dict :type interface: str :returns: Bond slave interface data. :rtype: dict """ cmd = 'sw_interface_slave_dump' args = dict(sw_if_index=Topology.get_interface_sw_index( node, interface)) err_msg = 'Failed to get slave dump on host {host}'.format( host=node['host']) with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd, **args).get_details(err_msg) logger.debug('Slave data:\n{slave_data}'.format(slave_data=details)) return details @staticmethod def vpp_show_bond_data_on_all_nodes(nodes, verbose=False): """Show (detailed) bond information on all VPP nodes in DICT__nodes. :param nodes: Nodes in the topology. :param verbose: If detailed information is required or not. :type nodes: dict :type verbose: bool """ for node_data in nodes.values(): if node_data['type'] == NodeType.DUT: InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose) @staticmethod def vpp_enable_input_acl_interface(node, interface, ip_version, table_index): """Enable input acl on interface. :param node: VPP node to setup interface for input acl. :param interface: Interface to setup input acl. :param ip_version: Version of IP protocol. :param table_index: Classify table index. :type node: dict :type interface: str or int :type ip_version: str :type table_index: int """ cmd = 'input_acl_set_interface' args = dict( sw_if_index=InterfaceUtil.get_interface_index(node, interface), ip4_table_index=table_index if ip_version == 'ip4' else Constants.BITWISE_NON_ZERO, ip6_table_index=table_index if ip_version == 'ip6' else Constants.BITWISE_NON_ZERO, l2_table_index=table_index if ip_version == 'l2' else Constants.BITWISE_NON_ZERO, is_add=1) err_msg = 'Failed to enable input acl on interface {ifc}'.format( ifc=interface) with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @staticmethod def get_interface_classify_table(node, interface): """Get name of classify table for the given interface. TODO: Move to Classify.py. :param node: VPP node to get data from. :param interface: Name or sw_if_index of a specific interface. :type node: dict :type interface: str or int :returns: Classify table name. :rtype: str """ if isinstance(interface, basestring): sw_if_index = InterfaceUtil.get_sw_if_index(node, interface) else: sw_if_index = interface cmd = 'classify_table_by_interface' args = dict(sw_if_index=sw_if_index) err_msg = 'Failed to get classify table name by interface {ifc}'.format( ifc=interface) with PapiSocketExecutor(node) as papi_exec: reply = papi_exec.add(cmd, **args).get_reply(err_msg) return reply @staticmethod def get_sw_if_index(node, interface_name): """Get sw_if_index for the given interface from actual interface dump. :param node: VPP node to get interface data from. :param interface_name: Name of the specific interface. :type node: dict :type interface_name: str :returns: sw_if_index of the given interface. :rtype: str """ interface_data = InterfaceUtil.vpp_get_interface_data( node, interface=interface_name) return interface_data.get('sw_if_index') @staticmethod def vxlan_gpe_dump(node, interface_name=None): """Get VxLAN GPE data for the given interface. :param node: VPP node to get interface data from. :param interface_name: Name of the specific interface. If None, information about all VxLAN GPE interfaces is returned. :type node: dict :type interface_name: str :returns: Dictionary containing data for the given VxLAN GPE interface or if interface=None, the list of dictionaries with all VxLAN GPE interfaces. :rtype: dict or list """ def process_vxlan_gpe_dump(vxlan_dump): """Process vxlan_gpe dump. :param vxlan_dump: Vxlan_gpe nterface dump. :type vxlan_dump: dict :returns: Processed vxlan_gpe interface dump. :rtype: dict """ if vxlan_dump['is_ipv6']: vxlan_dump['local'] = \ ip_address(unicode(vxlan_dump['local'])) vxlan_dump['remote'] = \ ip_address(unicode(vxlan_dump['remote'])) else: vxlan_dump['local'] = \ ip_address(unicode(vxlan_dump['local'][0:4])) vxlan_dump['remote'] = \ ip_address(unicode(vxlan_dump['remote'][0:4])) return vxlan_dump if interface_name is not None: sw_if_index = InterfaceUtil.get_interface_index( node, interface_name) else: sw_if_index = int(Constants.BITWISE_NON_ZERO) cmd = 'vxlan_gpe_tunnel_dump' args = dict(sw_if_index=sw_if_index) err_msg = 'Failed to get VXLAN-GPE dump on host {host}'.format( host=node['host']) with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd, **args).get_details(err_msg) data = list() if interface_name is None else dict() for dump in details: if interface_name is None: data.append(process_vxlan_gpe_dump(dump)) elif dump['sw_if_index'] == sw_if_index: data = process_vxlan_gpe_dump(dump) break logger.debug('VXLAN-GPE data:\n{vxlan_gpe_data}'.format( vxlan_gpe_data=data)) return data @staticmethod def assign_interface_to_fib_table(node, interface, table_id, ipv6=False): """Assign VPP interface to specific VRF/FIB table. :param node: VPP node where the FIB and interface are located. :param interface: Interface to be assigned to FIB. :param table_id: VRF table ID. :param ipv6: Assign to IPv6 table. Default False. :type node: dict :type interface: str or int :type table_id: int :type ipv6: bool """ cmd = 'sw_interface_set_table' args = dict( sw_if_index=InterfaceUtil.get_interface_index(node, interface), is_ipv6=ipv6, vrf_id=int(table_id)) err_msg = 'Failed to assign interface {ifc} to FIB table'.format( ifc=interface) with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @staticmethod def set_linux_interface_mac(node, interface, mac, namespace=None, vf_id=None): """Set MAC address for interface in linux. :param node: Node where to execute command. :param interface: Interface in namespace. :param mac: MAC to be assigned to interface. :param namespace: Execute command in namespace. Optional :param vf_id: Virtual Function id. Optional :type node: dict :type interface: str :type mac: str :type namespace: str :type vf_id: int """ mac_str = 'vf {vf_id} mac {mac}'.format(vf_id=vf_id, mac=mac) \ if vf_id is not None else 'address {mac}'.format(mac=mac) ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else '' cmd = ('{ns} ip link set {interface} {mac}'. format(ns=ns_str, interface=interface, mac=mac_str)) exec_cmd_no_error(node, cmd, sudo=True) @staticmethod def set_linux_interface_trust_on(node, interface, namespace=None, vf_id=None): """Set trust on (promisc) for interface in linux. :param node: Node where to execute command. :param interface: Interface in namespace. :param namespace: Execute command in namespace. Optional :param vf_id: Virtual Function id. Optional :type node: dict :type interface: str :type namespace: str :type vf_id: int """ trust_str = 'vf {vf_id} trust on'.format(vf_id=vf_id) \ if vf_id is not None else 'trust on' ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else '' cmd = ('{ns} ip link set dev {interface} {trust}'. format(ns=ns_str, interface=interface, trust=trust_str)) exec_cmd_no_error(node, cmd, sudo=True) @staticmethod def set_linux_interface_spoof_off(node, interface, namespace=None, vf_id=None): """Set spoof off for interface in linux. :param node: Node where to execute command. :param interface: Interface in namespace. :param namespace: Execute command in namespace. Optional :param vf_id: Virtual Function id. Optional :type node: dict :type interface: str :type namespace: str :type vf_id: int """ spoof_str = 'vf {vf_id} spoof off'.format(vf_id=vf_id) \ if vf_id is not None else 'spoof off' ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else '' cmd = ('{ns} ip link set dev {interface} {spoof}'. format(ns=ns_str, interface=interface, spoof=spoof_str)) exec_cmd_no_error(node, cmd, sudo=True) @staticmethod def init_avf_interface(node, ifc_key, numvfs=1, osi_layer='L2'): """Init PCI device by creating VIFs and bind them to vfio-pci for AVF driver testing on DUT. :param node: DUT node. :param ifc_key: Interface key from topology file. :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs. :param osi_layer: OSI Layer type to initialize TG with. Default value "L2" sets linux interface spoof off. :type node: dict :type ifc_key: str :type numvfs: int :type osi_layer: str :returns: Virtual Function topology interface keys. :rtype: list :raises RuntimeError: If a reason preventing initialization is found. """ # Read PCI address and driver. pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key) pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":") uio_driver = Topology.get_uio_driver(node) kernel_driver = Topology.get_interface_driver(node, ifc_key) if kernel_driver not in ("i40e", "i40evf"): raise RuntimeError( "AVF needs i40e-compatible driver, not {driver} at node {host}" " ifc {ifc}".format( driver=kernel_driver, host=node["host"], ifc=ifc_key)) current_driver = DUTSetup.get_pci_dev_driver( node, pf_pci_addr.replace(':', r'\:')) VPPUtil.stop_vpp_service(node) if current_driver != kernel_driver: # PCI device must be re-bound to kernel driver before creating VFs. DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True) # Stop VPP to prevent deadlock. # Unbind from current driver. DUTSetup.pci_driver_unbind(node, pf_pci_addr) # Bind to kernel driver. DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver) # Initialize PCI VFs. DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs) vf_ifc_keys = [] # Set MAC address and bind each virtual function to uio driver. for vf_id in range(numvfs): vf_mac_addr = ":".join([pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4], pf_mac_addr[5], "{:02x}".format(vf_id)]) pf_dev = '`basename /sys/bus/pci/devices/{pci}/net/*`'.\ format(pci=pf_pci_addr) InterfaceUtil.set_linux_interface_trust_on(node, pf_dev, vf_id=vf_id) if osi_layer == 'L2': InterfaceUtil.set_linux_interface_spoof_off(node, pf_dev, vf_id=vf_id) InterfaceUtil.set_linux_interface_mac(node, pf_dev, vf_mac_addr, vf_id=vf_id) DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id) DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver) # Add newly created ports into topology file vf_ifc_name = '{pf_if_key}_vif'.format(pf_if_key=ifc_key) vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id) vf_ifc_key = Topology.add_new_port(node, vf_ifc_name) Topology.update_interface_name(node, vf_ifc_key, vf_ifc_name+str(vf_id+1)) Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr) Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr) vf_ifc_keys.append(vf_ifc_key) return vf_ifc_keys @staticmethod def vpp_sw_interface_rx_placement_dump(node): """Dump VPP interface RX placement on node. :param node: Node to run command on. :type node: dict :returns: Thread mapping information as a list of dictionaries. :rtype: list """ cmd = 'sw_interface_rx_placement_dump' err_msg = "Failed to run '{cmd}' PAPI command on host {host}!".format( cmd=cmd, host=node['host']) with PapiSocketExecutor(node) as papi_exec: for ifc in node['interfaces'].values(): if ifc['vpp_sw_index'] is not None: papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index']) details = papi_exec.get_details(err_msg) return sorted(details, key=lambda k: k['sw_if_index']) @staticmethod def vpp_sw_interface_set_rx_placement(node, sw_if_index, queue_id, worker_id): """Set interface RX placement to worker on node. :param node: Node to run command on. :param sw_if_index: VPP SW interface index. :param queue_id: VPP interface queue ID. :param worker_id: VPP worker ID (indexing from 0). :type node: dict :type sw_if_index: int :type queue_id: int :type worker_id: int :raises RuntimeError: If failed to run command on host or if no API reply received. """ cmd = 'sw_interface_set_rx_placement' err_msg = "Failed to set interface RX placement to worker on host " \ "{host}!".format(host=node['host']) args = dict( sw_if_index=sw_if_index, queue_id=queue_id, worker_id=worker_id, is_main=False ) with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @staticmethod def vpp_round_robin_rx_placement(node, prefix): """Set Round Robin interface RX placement on all worker threads on node. :param node: Topology nodes. :param prefix: Interface name prefix. :type node: dict :type prefix: str """ worker_id = 0 worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1 if not worker_cnt: return for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node): for interface in node['interfaces'].values(): if placement['sw_if_index'] == interface['vpp_sw_index'] \ and prefix in interface['name']: InterfaceUtil.vpp_sw_interface_set_rx_placement( node, placement['sw_if_index'], placement['queue_id'], worker_id % worker_cnt) worker_id += 1 @staticmethod def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix): """Set Round Robin interface RX placement on all worker threads on all DUTs. :param nodes: Topology nodes. :param prefix: Interface name prefix. :type nodes: dict :type prefix: str """ for node in nodes.values(): if node['type'] == NodeType.DUT: InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)