/* * Copyright (c) 2016 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #define vl_typedefs /* define message structures */ #include #undef vl_typedefs /* declare message handlers for each api */ #define vl_endianfun /* define message structures */ #include #undef vl_endianfun /* instantiate all the print functions we know about */ #define vl_print(handle, ...) #define vl_printfun #include #undef vl_printfun #if (CLIB_DEBUG > 0) /* Set VPPCOM_DEBUG 2 for connection debug, 3 for read/write debug output */ #define VPPCOM_DEBUG 1 #else #define VPPCOM_DEBUG 0 #endif /* * VPPCOM Private definitions and functions. */ typedef enum { STATE_APP_START, STATE_APP_CONN_VPP, STATE_APP_ENABLED, STATE_APP_ATTACHED, } app_state_t; typedef enum { STATE_START, STATE_CONNECT, STATE_LISTEN, STATE_ACCEPT, STATE_DISCONNECT, STATE_FAILED } session_state_t; typedef struct { volatile session_state_t state; svm_fifo_t *server_rx_fifo; svm_fifo_t *server_tx_fifo; u32 sm_seg_index; u64 vpp_session_handle; unix_shared_memory_queue_t *event_queue; /* Socket configuration state */ u8 is_server; u8 is_listen; u8 is_cut_thru; u8 is_nonblocking; u32 vrf; u8 is_ip4; u8 ip[16]; u16 port; u8 proto; u64 client_queue_address; u64 options[16]; } session_t; typedef struct vppcom_cfg_t_ { u64 heapsize; u64 segment_baseva; u32 segment_size; u32 add_segment_size; u32 preallocated_fifo_pairs; u32 rx_fifo_size; u32 tx_fifo_size; u32 event_queue_size; u32 listen_queue_size; f64 app_timeout; f64 session_timeout; f64 accept_timeout; } vppcom_cfg_t; typedef struct vppcom_main_t_ { u8 init; u32 *client_session_index_fifo; volatile u32 bind_session_index; u32 tx_event_id; int main_cpu; /* vpe input queue */ unix_shared_memory_queue_t *vl_input_queue; /* API client handle */ u32 my_client_index; /* Session pool */ clib_spinlock_t sessions_lockp; session_t *sessions; /* Hash table for disconnect processing */ uword *session_index_by_vpp_handles; /* Select bitmaps */ clib_bitmap_t *rd_bitmap; clib_bitmap_t *wr_bitmap; clib_bitmap_t *ex_bitmap; /* Our event queue */ unix_shared_memory_queue_t *app_event_queue; /* unique segment name counter */ u32 unique_segment_index; pid_t my_pid; /* For deadman timers */ clib_time_t clib_time; /* State of the connection, shared between msg RX thread and main thread */ volatile app_state_t app_state; vppcom_cfg_t cfg; /* VNET_API_ERROR_FOO -> "Foo" hash table */ uword *error_string_by_error_number; } vppcom_main_t; vppcom_main_t vppcom_main = {.my_client_index = ~0 }; static const char * vppcom_app_state_str (app_state_t state) { char *st; switch (state) { case STATE_APP_START: st = "STATE_APP_START"; break; case STATE_APP_CONN_VPP: st = "STATE_APP_CONN_VPP"; break; case STATE_APP_ENABLED: st = "STATE_APP_ENABLED"; break; case STATE_APP_ATTACHED: st = "STATE_APP_ATTACHED"; break; default: st = "UNKNOWN_APP_STATE"; break; } return st; } static const char * vppcom_session_state_str (session_state_t state) { char *st; switch (state) { case STATE_START: st = "STATE_START"; break; case STATE_CONNECT: st = "STATE_CONNECT"; break; case STATE_LISTEN: st = "STATE_LISTEN"; break; case STATE_ACCEPT: st = "STATE_ACCEPT"; break; case STATE_DISCONNECT: st = "STATE_DISCONNECT"; break; case STATE_FAILED: st = "STATE_FAILED"; break; default: st = "UNKNOWN_STATE"; break; } return st; } /* * VPPCOM Utility Functions */ static inline int vppcom_session_at_index (u32 session_index, session_t * volatile *sess) { vppcom_main_t *vcm = &vppcom_main; /* Assumes that caller has acquired spinlock: vcm->sessions_lockp */ if (PREDICT_FALSE ((session_index == ~0) || pool_is_free_index (vcm->sessions, session_index))) { clib_warning ("[%d] invalid session, sid (%d) has been closed!", vcm->my_pid, session_index); return VPPCOM_EBADFD; } *sess = pool_elt_at_index (vcm->sessions, session_index); return VPPCOM_OK; } static int vppcom_connect_to_vpp (char *app_name) { api_main_t *am = &api_main; vppcom_main_t *vcm = &vppcom_main; if (VPPCOM_DEBUG > 0) printf ("\nConnecting to VPP api..."); if (vl_client_connect_to_vlib ("/vpe-api", app_name, 32) < 0) { clib_warning ("[%d] connect to vpp (%s) failed!", vcm->my_pid, app_name); return VPPCOM_ECONNREFUSED; } vcm->vl_input_queue = am->shmem_hdr->vl_input_queue; vcm->my_client_index = am->my_client_index; if (VPPCOM_DEBUG > 0) printf (" connected!\n"); vcm->app_state = STATE_APP_CONN_VPP; return VPPCOM_OK; } static u8 * format_api_error (u8 * s, va_list * args) { vppcom_main_t *vcm = &vppcom_main; i32 error = va_arg (*args, u32); uword *p; p = hash_get (vcm->error_string_by_error_number, -error); if (p) s = format (s, "%s (%d)", p[0], error); else s = format (s, "%d", error); return s; } static void vppcom_init_error_string_table (void) { vppcom_main_t *vcm = &vppcom_main; vcm->error_string_by_error_number = hash_create (0, sizeof (uword)); #define _(n,v,s) hash_set (vcm->error_string_by_error_number, -v, s); foreach_vnet_api_error; #undef _ hash_set (vcm->error_string_by_error_number, 99, "Misc"); } static inline int vppcom_wait_for_app_state_change (app_state_t app_state) { vppcom_main_t *vcm = &vppcom_main; f64 timeout = clib_time_now (&vcm->clib_time) + vcm->cfg.app_timeout; while (clib_time_now (&vcm->clib_time) < timeout) { if (vcm->app_state == app_state) return VPPCOM_OK; } if (VPPCOM_DEBUG > 0) clib_warning ("[%d] timeout waiting for state %s (%d)", vcm->my_pid, vppcom_app_state_str (app_state), app_state); return VPPCOM_ETIMEDOUT; } static inline int vppcom_wait_for_session_state_change (u32 session_index, session_state_t state, f64 wait_for_time) { vppcom_main_t *vcm = &vppcom_main; f64 timeout = clib_time_now (&vcm->clib_time) + wait_for_time; session_t *volatile session; int rv; do { clib_spinlock_lock (&vcm->sessions_lockp); rv = vppcom_session_at_index (session_index, &session); if (PREDICT_FALSE (rv)) { clib_spinlock_unlock (&vcm->sessions_lockp); return rv; } if (session->state == state) { clib_spinlock_unlock (&vcm->sessions_lockp); return VPPCOM_OK; } clib_spinlock_unlock (&vcm->sessions_lockp); } while (clib_time_now (&vcm->clib_time) < timeout); if (VPPCOM_DEBUG > 0) clib_warning ("[%d] timeout waiting for state %s (%d)", vcm->my_pid, vppcom_session_state_str (state), state); return VPPCOM_ETIMEDOUT; } static inline int vppcom_wait_for_client_session_index (f64 wait_for_time) { vppcom_main_t *vcm = &vppcom_main; f64 timeout = clib_time_now (&vcm->clib_time) + wait_for_time; do { if (clib_fifo_elts (vcm->client_session_index_fifo)) return VPPCOM_OK; } while (clib_time_now (&vcm->clib_time) < timeout); if (wait_for_time == 0) return VPPCOM_EAGAIN; if (VPPCOM_DEBUG > 0) clib_warning ("[%d] timeout waiting for client_session_index", vcm->my_pid); return VPPCOM_ETIMEDOUT; } /* * VPP-API message functions */ static void vppcom_send_session_enable_disable (u8 is_enable) { vppcom_main_t *vcm = &vppcom_main; vl_api_session_enable_disable_t *bmp; bmp = vl_msg_api_alloc (sizeof (*bmp)); memset (bmp, 0, sizeof (*bmp)); bmp->_vl_msg_id = ntohs (VL_API_SESSION_ENABLE_DISABLE); bmp->client_index = vcm->my_client_index; bmp->context = htonl (0xfeedface); bmp->is_enable = is_enable; vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & bmp); } static int vppcom_app_session_enable (void) { vppcom_main_t *vcm = &vppcom_main; int rv; if (vcm->app_state != STATE_APP_ENABLED) { vppcom_send_session_enable_disable (1 /* is_enabled == TRUE */ ); rv = vppcom_wait_for_app_state_change (STATE_APP_ENABLED); if (PREDICT_FALSE (rv)) { if (VPPCOM_DEBUG > 0) clib_warning ("[%d] Session enable timed out, rv = %s (%d)", vcm->my_pid, vppcom_retval_str (rv), rv); return rv; } } return VPPCOM_OK; } static void vl_api_session_enable_disable_reply_t_handler (vl_api_session_enable_disable_reply_t * mp) { vppcom_main_t *vcm = &vppcom_main; if (mp->retval) { clib_warning ("[%d] session_enable_disable failed: %U", vcm->my_pid, format_api_error, ntohl (mp->retval)); } else vcm->app_state = STATE_APP_ENABLED; } static void vppcom_app_send_attach (void) { vppcom_main_t *vcm = &vppcom_main; vl_api_application_attach_t *bmp; bmp = vl_msg_api_alloc (sizeof (*bmp)); memset (bmp, 0, sizeof (*bmp)); bmp->_vl_msg_id = ntohs (VL_API_APPLICATION_ATTACH); bmp->client_index = vcm->my_client_index; bmp->context = htonl (0xfeedface); bmp->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_USE_FIFO | APP_OPTIONS_FLAGS_ADD_SEGMENT; bmp->options[SESSION_OPTIONS_SEGMENT_SIZE] = vcm->cfg.segment_size; bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = vcm->cfg.add_segment_size; bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = vcm->cfg.rx_fifo_size; bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = vcm->cfg.tx_fifo_size; vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & bmp); } static int vppcom_app_attach (void) { vppcom_main_t *vcm = &vppcom_main; int rv; vppcom_app_send_attach (); rv = vppcom_wait_for_app_state_change (STATE_APP_ATTACHED); if (PREDICT_FALSE (rv)) { if (VPPCOM_DEBUG > 0) clib_warning ("[%d] application attach timed out, rv = %s (%d)", vcm->my_pid, vppcom_retval_str (rv), rv); return rv; } return VPPCOM_OK; } static void vppcom_app_detach (void) { vppcom_main_t *vcm = &vppcom_main; vl_api_application_detach_t *bmp; bmp = vl_msg_api_alloc (sizeof (*bmp)); memset (bmp, 0, sizeof (*bmp)); bmp->_vl_msg_id = ntohs (VL_API_APPLICATION_DETACH); bmp->client_index = vcm->my_client_index; bmp->context = htonl (0xfeedface); vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & bmp); } static void vl_api_application_attach_reply_t_handler (vl_api_application_attach_reply_t * mp) { vppcom_main_t *vcm = &vppcom_main; static svm_fifo_segment_create_args_t _a; svm_fifo_segment_create_args_t *a = &_a; int rv; memset (a, 0, sizeof (*a)); if (mp->retval) { clib_warning ("[%d] attach failed: %U", vcm->my_pid, format_api_error, ntohl (mp->retval)); return; } if (mp->segment_name_length == 0) { clib_warning ("[%d] segment_name_length zero", vcm->my_pid); return; } a->segment_name = (char *) mp->segment_name; a->segment_size = mp->segment_size; ASSERT (mp->app_event_queue_address); /* Attach to the segment vpp created */ rv = svm_fifo_segment_attach (a); vec_reset_length (a->new_segment_indices); if (PREDICT_FALSE (rv)) { clib_warning ("[%d] svm_fifo_segment_attach ('%s') failed", vcm->my_pid, mp->segment_name); return; } vcm->app_event_queue = uword_to_pointer (mp->app_event_queue_address, unix_shared_memory_queue_t *); vcm->app_state = STATE_APP_ATTACHED; } static void vl_api_application_detach_reply_t_handler (vl_api_application_detach_reply_t * mp) { vppcom_main_t *vcm = &vppcom_main; if (mp->retval) clib_warning ("[%d] detach failed: %U", vcm->my_pid, format_api_error, ntohl (mp->retval)); vcm->app_state = STATE_APP_ENABLED; } static void vl_api_disconnect_session_reply_t_handler (vl_api_disconnect_session_reply_t * mp) { vppcom_main_t *vcm = &vppcom_main; uword *p; p = hash_get (vcm->session_index_by_vpp_handles, mp->handle); if (p) { session_t *session = 0; int rv; clib_spinlock_lock (&vcm->sessions_lockp); rv = vppcom_session_at_index (p[0], &session); if (PREDICT_FALSE (rv)) { if (VPPCOM_DEBUG > 1) clib_warning ("[%d] invalid session, sid (%d) has been closed!", vcm->my_pid, p[0]); } hash_unset (vcm->session_index_by_vpp_handles, mp->handle); session->state = STATE_DISCONNECT; clib_spinlock_unlock (&vcm->sessions_lockp); } else { if (VPPCOM_DEBUG > 1) clib_warning ("[%d] couldn't find session key %llx", vcm->my_pid, mp->handle); } if (mp->retval) clib_warning ("[%d] disconnect_session failed: %U", vcm->my_pid, format_api_error, ntohl (mp->retval)); } static void vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t * mp) { vppcom_main_t *vcm = &vppcom_main; static svm_fifo_segment_create_args_t _a; svm_fifo_segment_create_args_t *a = &_a; int rv; memset (a, 0, sizeof (*a)); a->segment_name = (char *) mp->segment_name; a->segment_size = mp->segment_size; /* Attach to the segment vpp created */ rv = svm_fifo_segment_attach (a); vec_reset_length (a->new_segment_indices); if (PREDICT_FALSE (rv)) { clib_warning ("[%d] svm_fifo_segment_attach ('%s') failed", vcm->my_pid, mp->segment_name); return; } if (VPPCOM_DEBUG > 1) clib_warning ("[%d] mapped new segment '%s' size %d", vcm->my_pid, mp->segment_name, mp->segment_size); } static void vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) { vppcom_main_t *vcm = &vppcom_main; session_t *session = 0; vl_api_disconnect_session_reply_t *rmp; uword *p; int rv = 0; p = hash_get (vcm->session_index_by_vpp_handles, mp->handle); if (p) { int rval; clib_spinlock_lock (&vcm->sessions_lockp); rval = vppcom_session_at_index (p[0], &session); if (PREDICT_FALSE (rval)) { if (VPPCOM_DEBUG > 1) clib_warning ("[%d] invalid session, sid (%d) has been closed!", vcm->my_pid, p[0]); } else pool_put (vcm->sessions, session); clib_spinlock_unlock (&vcm->sessions_lockp); hash_unset (vcm->session_index_by_vpp_handles, mp->handle); } else { clib_warning ("[%d] couldn't find session key %llx", vcm->my_pid, mp->handle); rv = -11; } rmp = vl_msg_api_alloc (sizeof (*rmp)); memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION_REPLY); rmp->retval = htonl (rv); rmp->handle = mp->handle; vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & rmp); } static void vl_api_reset_session_t_handler (vl_api_reset_session_t * mp) { vppcom_main_t *vcm = &vppcom_main; session_t *session = 0; vl_api_reset_session_reply_t *rmp; uword *p; int rv = 0; p = hash_get (vcm->session_index_by_vpp_handles, mp->handle); if (p) { int rval; clib_spinlock_lock (&vcm->sessions_lockp); rval = vppcom_session_at_index (p[0], &session); if (PREDICT_FALSE (rval)) { if (VPPCOM_DEBUG > 1) clib_warning ("[%d] invalid session, sid (%d) has been closed!", vcm->my_pid, p[0]); } else pool_put (vcm->sessions, session); clib_spinlock_unlock (&vcm->sessions_lockp); hash_unset (vcm->session_index_by_vpp_handles, mp->handle); } else { clib_warning ("[%d] couldn't find session key %llx", vcm->my_pid, mp->handle); rv = -11; } rmp = vl_msg_api_alloc (sizeof (*rmp)); memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_RESET_SESSION_REPLY); rmp->retval = htonl (rv); rmp->handle = mp->handle; vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & rmp); } static void vl_api_connect_sock_reply_t_handler (vl_api_connect_sock_reply_t * mp) { vppcom_main_t *vcm = &vppcom_main; session_t *session; u32 session_index; svm_fifo_t *rx_fifo, *tx_fifo; u8 is_cut_thru = 0; int rv; if (mp->retval) { clib_warning ("[%d] connect failed: %U", vcm->my_pid, format_api_error, ntohl (mp->retval)); return; } session_index = ntohl (mp->app_connect); if (VPPCOM_DEBUG > 1) clib_warning ("[%d] app_connect = %d 0x%08x", vcm->my_pid, session_index, session_index); clib_spinlock_lock (&vcm->sessions_lockp); if (pool_is_free_index (vcm->sessions, session_index)) { clib_spinlock_unlock (&vcm->sessions_lockp); if (VPPCOM_DEBUG > 1) clib_warning ("[%d] invalid session, sid %d is closed!", vcm->my_pid, session_index); return; } /* We've been redirected */ if (mp->segment_name_length > 0) { static svm_fifo_segment_create_args_t _a; svm_fifo_segment_create_args_t *a = &_a; is_cut_thru = 1; memset (a, 0, sizeof (*a)); a->segment_name = (char *) mp->segment_name; if (VPPCOM_DEBUG > 1) clib_warning ("[%d] cut-thru segment: %s", vcm->my_pid, a->segment_name); rv = svm_fifo_segment_attach (a); vec_reset_length (a->new_segment_indices); if (PREDICT_FALSE (rv)) { clib_warning ("[%d] sm_fifo_segment_attach ('%s') failed", vcm->my_pid, a->segment_name); return; } } /* * Setup session */ if (VPPCOM_DEBUG > 1) clib_warning ("[%d] client sid %d", vcm->my_pid, session_index); session = pool_elt_at_index (vcm->sessions, session_index); session->is_cut_thru = is_cut_thru; session->event_queue = uword_to_pointer (mp->vpp_event_queue_address,
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""This module exists to provide setup utilities for the framework on topology
nodes. All tasks required to be run before the actual tests are started is
supposed to end up here.
"""

from shlex import split
from subprocess import Popen, PIPE, call
from multiprocessing import Pool
from tempfile import NamedTemporaryFile
from os.path import basename
from os import environ

from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn

from resources.libraries.python.ssh import SSH
from resources.libraries.python.constants import Constants as con
from resources.libraries.python.topology import NodeType

__all__ = ["SetupFramework"]


def pack_framework_dir():
    """Pack the testing WS into temp file, return its name."""

    try:
        directory = environ["TMPDIR"]
    except KeyError:
        directory = None

    if directory is not None:
        tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="openvpp-testing-",
                                     dir="{0}".format(directory))
    else:
        tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="openvpp-testing-")
    file_name = tmpfile.name
    tmpfile.close()

    proc = Popen(
        split("tar --exclude-vcs --exclude=./tmp --exclude=*.deb -zcf {0} .".
              format(file_name)), stdout=PIPE, stderr=PIPE)
    (stdout, stderr) = proc.communicate()

    logger.debug(stdout)
    logger.debug(stderr)

    return_code = proc.wait()
    if return_code != 0:
        raise Exception("Could not pack testing framework.")

    return file_name


def copy_tarball_to_node(tarball, node):
    """Copy tarball file from local host to remote node.

    :param tarball: Path to tarball to upload.
    :param node: Dictionary created from topology.
    :type tarball: str
    :type node: dict
    :returns: nothing
    """
    logger.console('Copying tarball to {0}'.format(node['host']))
    ssh = SSH()
    ssh.connect(node)

    ssh.scp(tarball, "/tmp/")


def extract_tarball_at_node(tarball, node):
    """Extract tarball at given node.

    Extracts tarball using tar on given node to specific CSIT location.

    :param tarball: Path to tarball to upload.
    :param node: Dictionary created from topology.
    :type tarball: str
    :type node: dict
    :returns: nothing
    """
    logger.console('Extracting tarball to {0} on {1}'.format(
        con.REMOTE_FW_DIR, node['host']))
    ssh = SSH()
    ssh.connect(node)

    cmd = 'sudo rm -rf {1}; mkdir {1} ; tar -zxf {0} -C {1}; ' \
        'rm -f {0}'.format(tarball, con.REMOTE_FW_DIR)
    (ret_code, _, stderr) = ssh.exec_command(cmd, timeout=30)
    if ret_code != 0:
        logger.error('Unpack error: {0}'.format(stderr))
        raise Exception('Failed to unpack {0} at node {1}'.format(
            tarball, node['host']))


def create_env_directory_at_node(node):
    """Create fresh virtualenv to a directory, install pip requirements."""
    logger.console('Extracting virtualenv, installing requirements.txt '
                   'on {0}'.format(node['host']))
    ssh = SSH()
    ssh.connect(node)
    (ret_code, stdout, stderr) = ssh.exec_command(
        'cd {0} && rm -rf env && '
        'virtualenv --system-site-packages --never-download env && '
        '. env/bin/activate && '
        'pip install -r requirements.txt'
        .format(con.REMOTE_FW_DIR), timeout=100)
    if ret_code != 0:
        logger.error('Virtualenv creation error: {0}'.format(stdout + stderr))
        raise Exception('Virtualenv setup failed')
    else:
        logger.console('Virtualenv created on {0}'.format(node['host']))


# pylint: disable=broad-except
def setup_node(args):
    """Run all set-up methods for a node.

    This method is used as map_async parameter. It receives tuple with all
    parameters as passed to map_async function.

    :param args: All parameters needed to setup one node.
    :type args: tuple
    :returns: True - success, False - error
    :rtype: bool
    """
    tarball, remote_tarball, node = args
    try:
        copy_tarball_to_node(tarball, node)
        extract_tarball_at_node(remote_tarball, node)
        if node['type'] == NodeType.TG:
            create_env_directory_at_node(node)
    except Exception as exc:
        logger.error("Node setup failed, error:'{0}'".format(exc.message))
        return False
    else:
        logger.console('Setup of node {0} done'.format(node['host']))
        return True


def delete_local_tarball(tarball):
    """Delete local tarball to prevent disk pollution.

    :param tarball: Path to tarball to upload.
    :type tarball: str
    :returns: nothing
    """
    call(split('sh -c "rm {0} > /dev/null 2>&1"'.format(tarball)))


class SetupFramework(object):
    """Setup suite run on topology nodes.

    Many VAT/CLI based tests need the scripts at remote hosts before executing
    them. This class packs the whole testing directory and copies it over
    to all nodes in topology under /tmp/
    """

    @staticmethod
    def setup_framework(nodes):
        """Pack the whole directory and extract in temp on each node."""

        tarball = pack_framework_dir()
        msg = 'Framework packed to {0}'.format(tarball)
        logger.console(msg)
        logger.trace(msg)
        remote_tarball = "/tmp/{0}".format(basename(tarball))

        # Turn off logging since we use multiprocessing
        log_level = BuiltIn().set_log_level('NONE')
        params = ((tarball, remote_tarball, node) for node in nodes.values())
        pool = Pool(processes=len(nodes))
        result = pool.map_async(setup_node, params)
        pool.close()
        pool.join()

        # Turn on logging
        BuiltIn().set_log_level(log_level)

        logger.info(
            'Executed node setups in parallel, waiting for processes to end')
        result.wait()

        logger.info('Results: {0}'.format(result.get()))

        logger.trace('Test framework copied to all topology nodes')
        delete_local_tarball(tarball)
        logger.console('All nodes are ready')
my_pid, session_index); session->vrf = ep->vrf; session->is_ip4 = ep->is_ip4; memset (session->ip, 0, sizeof (*session->ip)); clib_memcpy (session->ip, ep->ip, sizeof (session->ip)); session->port = ep->port; clib_spinlock_unlock (&vcm->sessions_lockp); return VPPCOM_OK; } int vppcom_session_listen (uint32_t session_index, uint32_t q_len) { vppcom_main_t *vcm = &vppcom_main; session_t *session = 0; int rv; clib_spinlock_lock (&vcm->sessions_lockp); rv = vppcom_session_at_index (session_index, &session); if (PREDICT_FALSE (rv)) { clib_spinlock_unlock (&vcm->sessions_lockp); if (VPPCOM_DEBUG > 0) clib_warning ("[%d] invalid session, sid (%d) has been closed!", vcm->my_pid, session_index); return rv; } if (VPPCOM_DEBUG > 0) clib_warning ("[%d] sid %d", vcm->my_pid, session_index); ASSERT (vcm->bind_session_index == ~0); vcm->bind_session_index = session_index; vppcom_send_bind_sock (session); clib_spinlock_unlock (&vcm->sessions_lockp); rv = vppcom_wait_for_session_state_change (session_index, STATE_LISTEN, vcm->cfg.session_timeout); if (PREDICT_FALSE (rv)) { vcm->bind_session_index = ~0; if (VPPCOM_DEBUG > 0) clib_warning ("[%d] server listen timed out, rv = %d (%d)", vcm->my_pid, vppcom_retval_str (rv), rv); return rv; } clib_spinlock_lock (&vcm->sessions_lockp); rv = vppcom_session_at_index (session_index, &session); if (PREDICT_FALSE (rv)) { clib_spinlock_unlock (&vcm->sessions_lockp); if (VPPCOM_DEBUG > 0) clib_warning ("[%d] invalid session, sid (%d) has been closed!", vcm->my_pid, session_index); return rv; } session->is_listen = 1; clib_spinlock_unlock (&vcm->sessions_lockp); clib_fifo_validate (vcm->client_session_index_fifo, q_len); return VPPCOM_OK; } int vppcom_session_accept (uint32_t listen_session_index, vppcom_endpt_t * ep, double wait_for_time) { vppcom_main_t *vcm = &vppcom_main; session_t *session = 0; u32 client_session_index; int rv; f64 wait_for; clib_spinlock_lock (&vcm->sessions_lockp); rv = vppcom_session_at_index (listen_session_index, &session); if (PREDICT_FALSE (rv)) { clib_spinlock_unlock (&vcm->sessions_lockp); if (VPPCOM_DEBUG > 0) clib_warning ("[%d] invalid session, sid (%d) has been closed!", vcm->my_pid, listen_session_index); return rv; } if (session->state != STATE_LISTEN) { clib_spinlock_unlock (&vcm->sessions_lockp); if (VPPCOM_DEBUG > 0) clib_warning ("[%d] session not in listen state, state = %s", vcm->my_pid, vppcom_session_state_str (session->state)); return VPPCOM_EBADFD; } wait_for = session->is_nonblocking ? 0 : (wait_for_time < 0) ? vcm->cfg.accept_timeout : wait_for_time; if (VPPCOM_DEBUG > 0) clib_warning ("[%d] sid %d, state %s (%d)", vcm->my_pid, listen_session_index, vppcom_session_state_str (session->state), session->state); clib_spinlock_unlock (&vcm->sessions_lockp); while (1) { rv = vppcom_wait_for_client_session_index (wait_for); if (rv) { if ((VPPCOM_DEBUG > 0)) clib_warning ("[%d] sid %d, accept timed out, rv = %s (%d)", vcm->my_pid, listen_session_index, vppcom_retval_str (rv), rv); if ((wait_for == 0) || (wait_for_time > 0)) return rv; } else break; } clib_fifo_sub1 (vcm->client_session_index_fifo, client_session_index); session = 0; clib_spinlock_lock (&vcm->sessions_lockp); rv = vppcom_session_at_index (client_session_index, &session); ASSERT (rv == VPPCOM_OK); ASSERT (session->is_server); if (VPPCOM_DEBUG > 0) clib_warning ("[%d] Got a request: client sid %d", vcm->my_pid, client_session_index); ep->vrf = session->vrf; ep->is_cut_thru = session->is_cut_thru; ep->is_ip4 = session->is_ip4; ep->port = session->port; memset (ep->ip, 0, sizeof (ip6_address_t)); clib_memcpy (ep->ip, session->ip, sizeof (ip6_address_t)); session->state = STATE_LISTEN; clib_spinlock_unlock (&vcm->sessions_lockp); return (int) client_session_index; } int vppcom_session_connect (uint32_t session_index, vppcom_endpt_t * server_ep) { vppcom_main_t *vcm = &vppcom_main; session_t *session = 0; int rv; ip46_address_t *ip46; clib_spinlock_lock (&vcm->sessions_lockp); rv = vppcom_session_at_index (session_index, &session); if (PREDICT_FALSE (rv)) { clib_spinlock_unlock (&vcm->sessions_lockp); if (VPPCOM_DEBUG > 0) clib_warning ("[%d] invalid session, sid (%d) has been closed!", vcm->my_pid, session_index); return rv; } if (session->state == STATE_CONNECT) { clib_spinlock_unlock (&vcm->sessions_lockp); if (VPPCOM_DEBUG > 0) clib_warning ("[%d] session, sid (%d) already connected!", vcm->my_pid, session_index); return VPPCOM_OK; } session->vrf = server_ep->vrf; session->is_ip4 = server_ep->is_ip4; ip46 = (ip46_address_t *) session->ip; *ip46 = to_ip46 (!server_ep->is_ip4, server_ep->ip); session->port = server_ep->port; if (VPPCOM_DEBUG > 0) { u8 *ip_str = format (0, "%U", format_ip46_address, &session->ip, session->is_ip4); clib_warning ("[%d] connect sid %d to %s server port %d", vcm->my_pid, session_index, ip_str, clib_net_to_host_u16 (session->port)); vec_free (ip_str); } vppcom_send_connect_sock (session, session_index); clib_spinlock_unlock (&vcm->sessions_lockp); rv = vppcom_wait_for_session_state_change (session_index, STATE_CONNECT, vcm->cfg.session_timeout); if (PREDICT_FALSE (rv)) { if (VPPCOM_DEBUG > 0) clib_warning ("[%d] connect timed out, rv = %s (%d)", vcm->my_pid, vppcom_retval_str (rv), rv); return rv; } return VPPCOM_OK; } int vppcom_session_read (uint32_t session_index, void *buf, int n) { session_fifo_event_t _e, *e = &_e; vppcom_main_t *vcm = &vppcom_main; session_t *session = 0; svm_fifo_t *rx_fifo; int n_read = 0; int rv; char *fifo_str; ASSERT (buf); clib_spinlock_lock (&vcm->sessions_lockp); rv = vppcom_session_at_index (session_index, &session); if (PREDICT_FALSE (rv)) { clib_spinlock_unlock (&vcm->sessions_lockp); if (VPPCOM_DEBUG > 0) clib_warning ("[%d] invalid session, sid (%d) has been closed!", vcm->my_pid, session_index); return rv; } if (session->is_cut_thru) { rx_fifo = session->is_server ? session->server_rx_fifo : session->server_tx_fifo; fifo_str = session->is_server ? "server_rx_fifo" : "server_tx_fifo"; clib_spinlock_unlock (&vcm->sessions_lockp); n_read = svm_fifo_dequeue_nowait (rx_fifo, n, buf); if (n_read <= 0) return VPPCOM_EAGAIN; } else { rv = unix_shared_memory_queue_sub (session->event_queue, (u8 *) e, 1 /* nowait */ ); clib_spinlock_unlock (&vcm->sessions_lockp); if (rv < 0) return VPPCOM_EAGAIN; switch (e->event_type) { case FIFO_EVENT_APP_RX: rx_fifo = e->fifo; fifo_str = "app_rx_fifo"; n_read = svm_fifo_dequeue_nowait (rx_fifo, n, buf); break; case FIFO_EVENT_DISCONNECT: return VPPCOM_ECONNRESET; default: if (VPPCOM_DEBUG > 0) clib_warning ("[%d] unknown event type %d", vcm->my_pid, e->event_type); return VPPCOM_EAGAIN; } } if (VPPCOM_DEBUG > 2) clib_warning ("[%d] sid %d, read %d bytes from %s (%p)", vcm->my_pid, session_index, n_read, fifo_str, rx_fifo); return n_read; } static inline int vppcom_session_read_ready (session_t * session, u32 session_index) { session_fifo_event_t _e, *e = &_e; vppcom_main_t *vcm = &vppcom_main; svm_fifo_t *rx_fifo; int rv; int ready = 0; /* Assumes caller has acquired spinlock: vcm->sessions_lockp */ if (session->is_cut_thru) { rx_fifo = session->is_server ? session->server_rx_fifo : session->server_tx_fifo; ready = svm_fifo_max_dequeue (rx_fifo); } else if (session->is_listen) ready = clib_fifo_elts (vcm->client_session_index_fifo); else { rv = unix_shared_memory_queue_sub (vcm->app_event_queue, (u8 *) e, 1 /* nowait */ ); if (rv >= 0) { switch (e->event_type) { case FIFO_EVENT_APP_RX: rx_fifo = e->fifo; ready = svm_fifo_max_dequeue (rx_fifo); break; case FIFO_EVENT_DISCONNECT: return VPPCOM_ECONNRESET; default: clib_warning ("[%d] unknown event type %d", vcm->my_pid, e->event_type); } } } if (VPPCOM_DEBUG > 2) clib_warning ("[%d] sid %d, peek %s (%p), ready = %d", vcm->my_pid, session_index, session->is_server ? "server_rx_fifo" : "server_tx_fifo", rx_fifo, ready); return ready; } int vppcom_session_write (uint32_t session_index, void *buf, int n) { vppcom_main_t *vcm = &vppcom_main; session_t *session = 0; svm_fifo_t *tx_fifo; unix_shared_memory_queue_t *q; session_fifo_event_t evt; int rv; char *fifo_str; u8 is_nonblocking; ASSERT (buf); clib_spinlock_lock (&vcm->sessions_lockp); rv = vppcom_session_at_index (session_index, &session); if (PREDICT_FALSE (rv)) { clib_spinlock_unlock (&vcm->sessions_lockp); if (VPPCOM_DEBUG > 0) clib_warning ("[%d] invalid session, sid (%d) has been closed!", vcm->my_pid, session_index); return rv; } tx_fifo = ((!session->is_cut_thru || session->is_server) ? session->server_tx_fifo : session->server_rx_fifo); fifo_str = ((!session->is_cut_thru || session->is_server) ? "server_tx_fifo" : "server_rx_fifo"); is_nonblocking = session->is_nonblocking; clib_spinlock_unlock (&vcm->sessions_lockp); do { rv = svm_fifo_enqueue_nowait (tx_fifo, n, buf); } while (!is_nonblocking && (rv <= 0)); /* If event wasn't set, add one */ if ((rv > 0) && svm_fifo_set_event (tx_fifo)) { int rval; /* Fabricate TX event, send to vpp */ evt.fifo = tx_fifo; evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = vcm->tx_event_id++; clib_spinlock_lock (&vcm->sessions_lockp); rval = vppcom_session_at_index (session_index, &session); if (PREDICT_FALSE (rval)) { clib_spinlock_unlock (&vcm->sessions_lockp); if (VPPCOM_DEBUG > 1) clib_warning ("[%d] invalid session, sid (%d) has been closed!", vcm->my_pid, session_index); return rval; } q = session->event_queue; clib_spinlock_unlock (&vcm->sessions_lockp); ASSERT (q); unix_shared_memory_queue_add (q, (u8 *) & evt, 0 /* do wait for mutex */ ); } if (VPPCOM_DEBUG > 2) clib_warning ("[%d] sid %d, wrote %d bytes to %s (%p)", vcm->my_pid, session_index, rv, fifo_str, tx_fifo); return rv; } static inline int vppcom_session_write_ready (session_t * session, u32 session_index) { vppcom_main_t *vcm = &vppcom_main; svm_fifo_t *tx_fifo; int rv; /* Assumes caller has acquired spinlock: vcm->sessions_lockp */ tx_fifo = ((!session->is_cut_thru || session->is_server) ? session->server_tx_fifo : session->server_rx_fifo); rv = svm_fifo_max_enqueue (tx_fifo); if (VPPCOM_DEBUG > 2) clib_warning ("[%d] sid %d, peek %s (%p), ready = %d", vcm->my_pid, session_index, session->is_server ? "server_tx_fifo" : "server_rx_fifo", tx_fifo, rv); return rv; } int vppcom_select (unsigned long n_bits, unsigned long *read_map, unsigned long *write_map, unsigned long *except_map, double time_to_wait) { vppcom_main_t *vcm = &vppcom_main; u32 session_index; session_t *session = 0; int rv, bits_set = 0; f64 timeout = clib_time_now (&vcm->clib_time) + time_to_wait; u32 minbits = clib_max (n_bits, BITS (uword)); ASSERT (sizeof (clib_bitmap_t) == sizeof (long int)); if (read_map) { clib_bitmap_validate (vcm->rd_bitmap, minbits); clib_memcpy (vcm->rd_bitmap, read_map, vec_len (vcm->rd_bitmap)); memset (read_map, 0, vec_len (vcm->rd_bitmap)); } if (write_map) { clib_bitmap_validate (vcm->wr_bitmap, minbits); clib_memcpy (vcm->wr_bitmap, write_map, vec_len (vcm->wr_bitmap)); memset (write_map, 0, vec_len (vcm->wr_bitmap)); } if (except_map) { clib_bitmap_validate (vcm->ex_bitmap, minbits); clib_memcpy (vcm->ex_bitmap, except_map, vec_len (vcm->ex_bitmap)); memset (except_map, 0, vec_len (vcm->ex_bitmap)); } do { /* *INDENT-OFF* */ clib_bitmap_foreach (session_index, vcm->rd_bitmap, ({ clib_spinlock_lock (&vcm->sessions_lockp); rv = vppcom_session_at_index (session_index, &session); if (rv < 0) { clib_spinlock_unlock (&vcm->sessions_lockp); if (VPPCOM_DEBUG > 1) clib_warning ("[%d] session %d specified in " "read_map is closed.", vcm->my_pid, session_index); bits_set = VPPCOM_EBADFD; goto select_done; } rv = vppcom_session_read_ready (session, session_index); clib_spinlock_unlock (&vcm->sessions_lockp); if (vcm->ex_bitmap && clib_bitmap_get (vcm->ex_bitmap, session_index) && (rv < 0)) { // TBD: clib_warning clib_bitmap_set_no_check (except_map, session_index, 1); bits_set++; } else if (rv > 0) { // TBD: clib_warning clib_bitmap_set_no_check (read_map, session_index, 1); bits_set++; } })); clib_bitmap_foreach (session_index, vcm->wr_bitmap, ({ clib_spinlock_lock (&vcm->sessions_lockp); rv = vppcom_session_at_index (session_index, &session); if (rv < 0) { clib_spinlock_unlock (&vcm->sessions_lockp); if (VPPCOM_DEBUG > 0) clib_warning ("[%d] session %d specified in " "write_map is closed.", vcm->my_pid, session_index); bits_set = VPPCOM_EBADFD; goto select_done; } rv = vppcom_session_write_ready (session, session_index); clib_spinlock_unlock (&vcm->sessions_lockp); if (rv > 0) { // TBD: clib_warning clib_bitmap_set_no_check (write_map, session_index, 1); bits_set++; } })); clib_bitmap_foreach (session_index, vcm->ex_bitmap, ({ clib_spinlock_lock (&vcm->sessions_lockp); rv = vppcom_session_at_index (session_index, &session); if (rv < 0) { clib_spinlock_unlock (&vcm->sessions_lockp); if (VPPCOM_DEBUG > 1) clib_warning ("[%d] session %d specified in " "except_map is closed.", vcm->my_pid, session_index); bits_set = VPPCOM_EBADFD; goto select_done; } rv = vppcom_session_read_ready (session, session_index); clib_spinlock_unlock (&vcm->sessions_lockp); if (rv < 0) { // TBD: clib_warning clib_bitmap_set_no_check (except_map, session_index, 1); bits_set++; } })); /* *INDENT-ON* */ } while (clib_time_now (&vcm->clib_time) < timeout); select_done: return (bits_set); } /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */