aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/scripts/topo_installation.py
blob: 5c91abbd0f0c8fd9d26de7dd9126db2381c50ad1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
#!/usr/bin/env python

# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""This script provides copy and installation of VPP build deb packages.
   As destinations are used all DUT nodes from the topology file."""

import sys
import argparse
from yaml import load

from resources.libraries.python.ssh import SSH


def ssh_no_error(ssh, cmd, sudo=False):
    """Execute a command over ssh channel, and log and exit if the command
    fails.

    :param ssh: SSH() object connected to a node.
    :param cmd: Command line to execute on remote node.
    :type ssh: SSH() object
    :type cmd: str
    :return: stdout from the SSH command.
    :rtype: str
    """

    if sudo:
        ret, stdo, stde = ssh.exec_command_sudo(cmd, timeout=60)
    else:
        ret, stdo, stde = ssh.exec_command(cmd, timeout=60)

    if ret != 0:
        print 'Command execution failed: "{}"'.format(cmd)
        print 'stdout: {0}'.format(stdo)
        print 'stderr: {0}'.format(stde)
        raise RuntimeError('Unexpected ssh command failure')

    return stdo


def ssh_ignore_error(ssh, cmd, sudo=False):
    """Execute a command over ssh channel, ignore errors.

    :param ssh: SSH() object connected to a node.
    :param cmd: Command line to execute on remote node.
    :type ssh: SSH() object
    :type cmd: str
    :return: stdout from the SSH command.
    :rtype: str
    """

    if sudo:
        ret, stdo, stde = ssh.exec_command_sudo(cmd)
    else:
        ret, stdo, stde = ssh.exec_command(cmd)

    if ret != 0:
        print 'Command execution failed: "{}"'.format(cmd)
        print 'stdout: {0}'.format(stdo)
        print 'stderr: {0}'.format(stde)

    return stdo


def main():
    """Copy and installation of VPP packages."""

    parser = argparse.ArgumentParser()
    parser.add_argument("-t", "--topo", required=True,
                        help="Topology file")
    parser.add_argument("-d", "--directory", required=True,
                        help="Installation directory")
    parser.add_argument("-p", "--packages", required=False, nargs='+',
                        help="Packages paths to copy")
    parser.add_argument("-c", "--cancel", help="Cancel installation",
                        action="store_true")
    parser.add_argument("-hc", "--honeycomb", help="Include Honeycomb package.",
                        required=False, default=False)

    args = parser.parse_args()
    topology_file = args.topo
    packages = args.packages
    install_dir = args.directory
    cancel_installation = args.cancel
    honeycomb = args.honeycomb

    work_file = open(topology_file)
    topology = load(work_file.read())['nodes']

    def fix_interrupted(package):
        """If there are interrupted installations, clean them up."""

        cmd = "dpkg -l | grep {0}".format(package)
        ret, _, _ = ssh.exec_command(cmd)
        if ret == 0:
            # Try to fix interrupted installations
            cmd = 'dpkg --configure -a'
            stdout = ssh_no_error(ssh, cmd, sudo=True)
            print "###TI {}".format(stdout)
            # Try to remove installed packages
            cmd = 'apt-get purge -y "{0}.*"'.format(package)
            stdout = ssh_no_error(ssh, cmd, sudo=True)
            print "###TI {}".format(stdout)

    ssh = SSH()
    for node in topology:
        if topology[node]['type'] == "DUT":
            print "###TI host: {}".format(topology[node]['host'])
            ssh.connect(topology[node])

            if cancel_installation:
                # Remove installation directory on DUT
                cmd = "rm -r {}".format(install_dir)
                stdout = ssh_ignore_error(ssh, cmd)
                print "###TI {}".format(stdout)

                if honeycomb:
                    fix_interrupted("honeycomb")
                    # remove HC logs
                    cmd = "rm -rf /var/log/honeycomb"
                    stdout = ssh_ignore_error(ssh, cmd, sudo=True)
                    print "###TI {}".format(stdout)
                fix_interrupted("vpp")

            else:
                # Create installation directory on DUT
                cmd = "rm -r {0}; mkdir {0}".format(install_dir)
                stdout = ssh_no_error(ssh, cmd)
                print "###TI {}".format(stdout)

                if honeycomb:
                    smd = "ls ~/honeycomb | grep .deb"
                    stdout = ssh_ignore_error(ssh, smd)
                    if "honeycomb" in stdout:
                        # If custom honeycomb packages exist, use them
                        cmd = "cp ~/honeycomb/*.deb {0}".format(install_dir)
                        stdout = ssh_no_error(ssh, cmd)
                        print "###TI {}".format(stdout)
                    else:
                        # Copy packages from local path to installation dir
                        for deb in packages:
                            print "###TI scp: {}".format(deb)
                            ssh.scp(local_path=deb, remote_path=install_dir)
                else:
                    # Copy packages from local path to installation dir
                    for deb in packages:
                        print "###TI scp: {}".format(deb)
                        ssh.scp(local_path=deb, remote_path=install_dir)

                if honeycomb:
                    fix_interrupted("honeycomb")
                fix_interrupted("vpp")

                # Installation of deb packages
                cmd = "dpkg -i --force-all {}/*.deb".format(install_dir)
                stdout = ssh_no_error(ssh, cmd, sudo=True)
                print "###TI {}".format(stdout)

if __name__ == "__main__":
    sys.exit(main())
an>&vlib_global_main.elog_main, e); ed->data[0] = rv; ed->data[1] = s->bytes_sent; ed->data[2] = s->bytes_to_send; } /* Poke the session layer */ if (svm_fifo_set_event (txf)) { /* Fabricate TX event, send to vpp */ evt.fifo = txf; evt.event_type = FIFO_EVENT_APP_TX; if (unix_shared_memory_queue_add (tm->vpp_event_queue[txf->master_thread_index], (u8 *) & evt, 0 /* do wait for mutex */ )) clib_warning ("could not enqueue event"); } } } static void receive_test_chunk (tclient_main_t * tm, session_t * s) { svm_fifo_t *rx_fifo = s->server_rx_fifo; int n_read, test_bytes = 0; u32 my_thread_index = vlib_get_thread_index (); /* Allow enqueuing of new event */ // svm_fifo_unset_event (rx_fifo); if (test_bytes) { n_read = svm_fifo_dequeue_nowait (rx_fifo, vec_len (tm->rx_buf[my_thread_index]), tm->rx_buf[my_thread_index]); } else { n_read = svm_fifo_max_dequeue (rx_fifo); svm_fifo_dequeue_drop (rx_fifo, n_read); } if (n_read > 0) { if (TCP_BUILTIN_CLIENT_DBG) { /* *INDENT-OFF* */ ELOG_TYPE_DECLARE (e) = { .format = "rx-deq: %d bytes", .format_args = "i4", }; /* *INDENT-ON* */ struct { u32 data[1]; } *ed; ed = ELOG_DATA (&vlib_global_main.elog_main, e); ed->data[0] = n_read; } if (test_bytes) { int i; for (i = 0; i < n_read; i++) { if (tm->rx_buf[my_thread_index][i] != ((s->bytes_received + i) & 0xff)) { clib_warning ("read %d error at byte %lld, 0x%x not 0x%x", n_read, s->bytes_received + i, tm->rx_buf[my_thread_index][i], ((s->bytes_received + i) & 0xff)); } } } s->bytes_to_receive -= n_read; s->bytes_received += n_read; } } static uword builtin_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { tclient_main_t *tm = &tclient_main; int my_thread_index = vlib_get_thread_index (); session_t *sp; int i; int delete_session; u32 *connection_indices; u32 *connections_this_batch; u32 nconnections_this_batch; connection_indices = tm->connection_index_by_thread[my_thread_index]; connections_this_batch = tm->connections_this_batch_by_thread[my_thread_index]; if ((tm->run_test == 0) || ((vec_len (connection_indices) == 0) && vec_len (connections_this_batch) == 0)) return 0; /* Grab another pile of connections */ if (PREDICT_FALSE (vec_len (connections_this_batch) == 0)) { nconnections_this_batch = clib_min (tm->connections_per_batch, vec_len (connection_indices)); ASSERT (nconnections_this_batch > 0); vec_validate (connections_this_batch, nconnections_this_batch - 1); clib_memcpy (connections_this_batch, connection_indices + vec_len (connection_indices) - nconnections_this_batch, nconnections_this_batch * sizeof (u32)); _vec_len (connection_indices) -= nconnections_this_batch; } if (PREDICT_FALSE (tm->prev_conns != tm->connections_per_batch && tm->prev_conns == vec_len (connections_this_batch))) { tm->repeats++; tm->prev_conns = vec_len (connections_this_batch); if (tm->repeats == 500000) { clib_warning ("stuck clients"); } } else { tm->prev_conns = vec_len (connections_this_batch); tm->repeats = 0; } for (i = 0; i < vec_len (connections_this_batch); i++) { delete_session = 1; sp = pool_elt_at_index (tm->sessions, connections_this_batch[i]); if (sp->bytes_to_send > 0) { send_test_chunk (tm, sp); delete_session = 0; } if (sp->bytes_to_receive > 0) { receive_test_chunk (tm, sp); delete_session = 0; } if (PREDICT_FALSE (delete_session == 1)) { u32 index, thread_index; stream_session_t *s; __sync_fetch_and_add (&tm->tx_total, sp->bytes_sent); __sync_fetch_and_add (&tm->rx_total, sp->bytes_received); session_parse_handle (sp->vpp_session_handle, &index, &thread_index); s = session_get_if_valid (index, thread_index); if (s) { vnet_disconnect_args_t _a, *a = &_a; a->handle = session_handle (s); a->app_index = tm->app_index; vnet_disconnect_session (a); vec_delete (connections_this_batch, 1, i); i--; __sync_fetch_and_add (&tm->ready_connections, -1); } else clib_warning ("session AWOL?"); /* Kick the debug CLI process */ if (tm->ready_connections == 0) { signal_evt_to_cli (2); } } } tm->connection_index_by_thread[my_thread_index] = connection_indices; tm->connections_this_batch_by_thread[my_thread_index] = connections_this_batch; return 0; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (builtin_client_node) = { .function = builtin_client_node_fn, .name = "builtin-tcp-client", .type = VLIB_NODE_TYPE_INPUT, .state = VLIB_NODE_STATE_DISABLED, }; /* *INDENT-ON* */ static int create_api_loopback (tclient_main_t * tm) { api_main_t *am = &api_main; vl_shmem_hdr_t *shmem_hdr; shmem_hdr = am->shmem_hdr; tm->vl_input_queue = shmem_hdr->vl_input_queue; tm->my_client_index = vl_api_memclnt_create_internal ("tcp_test_client", tm->vl_input_queue); return 0; } static int tcp_test_clients_init (vlib_main_t * vm) { tclient_main_t *tm = &tclient_main; vlib_thread_main_t *vtm = vlib_get_thread_main (); u32 num_threads; int i; if (create_api_loopback (tm)) return -1; num_threads = 1 /* main thread */ + vtm->n_threads; /* Init test data. Big buffer */ vec_validate (tm->connect_test_data, 1024 * 1024 - 1); for (i = 0; i < vec_len (tm->connect_test_data); i++) tm->connect_test_data[i] = i & 0xff; vec_validate (tm->rx_buf, num_threads - 1); for (i = 0; i < num_threads; i++) vec_validate (tm->rx_buf[i], vec_len (tm->connect_test_data) - 1); tm->is_init = 1; vec_validate (tm->connection_index_by_thread, vtm->n_vlib_mains); vec_validate (tm->connections_this_batch_by_thread, vtm->n_vlib_mains); vec_validate (tm->vpp_event_queue, vtm->n_vlib_mains); return 0; } static int builtin_session_connected_callback (u32 app_index, u32 api_context, stream_session_t * s, u8 is_fail) { tclient_main_t *tm = &tclient_main; session_t *session; u32 session_index; u8 thread_index = vlib_get_thread_index (); if (is_fail) { clib_warning ("connection %d failed!", api_context); signal_evt_to_cli (-1); return 0; } ASSERT (s->thread_index == thread_index); if (!tm->vpp_event_queue[thread_index]) tm->vpp_event_queue[thread_index] = session_manager_get_vpp_event_queue (thread_index); /* * Setup session */ clib_spinlock_lock_if_init (&tm->sessions_lock); pool_get (tm->sessions, session); clib_spinlock_unlock_if_init (&tm->sessions_lock); memset (session, 0, sizeof (*session)); session_index = session - tm->sessions; session->bytes_to_send = tm->bytes_to_send; session->bytes_to_receive = tm->no_return ? 0ULL : tm->bytes_to_send; session->server_rx_fifo = s->server_rx_fifo; session->server_rx_fifo->client_session_index = session_index; session->server_tx_fifo = s->server_tx_fifo; session->server_tx_fifo->client_session_index = session_index; session->vpp_session_handle = session_handle (s); vec_add1 (tm->connection_index_by_thread[thread_index], session_index); __sync_fetch_and_add (&tm->ready_connections, 1); if (tm->ready_connections == tm->expected_connections) { tm->run_test = 1; /* Signal the CLI process that the action is starting... */ signal_evt_to_cli (1); } return 0; } static void builtin_session_reset_callback (stream_session_t * s) { if (s->session_state == SESSION_STATE_READY) clib_warning ("Reset active connection %U", format_stream_session, s, 2); stream_session_cleanup (s); return; } static int builtin_session_create_callback (stream_session_t * s) { return 0; } static void builtin_session_disconnect_callback (stream_session_t * s) { tclient_main_t *tm = &tclient_main; vnet_disconnect_args_t _a, *a = &_a; a->handle = session_handle (s); a->app_index = tm->app_index; vnet_disconnect_session (a); return; } static int builtin_server_rx_callback (stream_session_t * s) { return 0; } /* *INDENT-OFF* */ static session_cb_vft_t builtin_clients = { .session_reset_callback = builtin_session_reset_callback, .session_connected_callback = builtin_session_connected_callback, .session_accept_callback = builtin_session_create_callback, .session_disconnect_callback = builtin_session_disconnect_callback, .builtin_server_rx_callback = builtin_server_rx_callback }; /* *INDENT-ON* */ static clib_error_t * attach_builtin_test_clients_app (u8 * appns_id, u64 appns_flags, u64 appns_secret) { tclient_main_t *tm = &tclient_main; vnet_app_attach_args_t _a, *a = &_a; u8 segment_name[128]; u32 segment_name_length, prealloc_fifos; u64 options[16]; clib_error_t *error = 0; segment_name_length = ARRAY_LEN (segment_name); memset (a, 0, sizeof (*a)); memset (options, 0, sizeof (options)); a->api_client_index = tm->my_client_index; a->segment_name = segment_name; a->segment_name_length = segment_name_length; a->session_cb_vft = &builtin_clients; prealloc_fifos = tm->prealloc_fifos ? tm->expected_connections : 1; options[SESSION_OPTIONS_ACCEPT_COOKIE] = 0x12345678; options[SESSION_OPTIONS_SEGMENT_SIZE] = (2ULL << 32); options[SESSION_OPTIONS_RX_FIFO_SIZE] = tm->fifo_size; options[SESSION_OPTIONS_TX_FIFO_SIZE] = tm->fifo_size; options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = tm->private_segment_count; options[APP_OPTIONS_PRIVATE_SEGMENT_SIZE] = tm->private_segment_size; options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = prealloc_fifos; options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN; if (appns_id) { options[APP_OPTIONS_FLAGS] |= appns_flags; options[APP_OPTIONS_NAMESPACE_SECRET] = appns_secret; } a->options = options; a->namespace_id = appns_id; if ((error = vnet_application_attach (a))) return error; tm->app_index = a->app_index; return 0; } static void * tclient_thread_fn (void *arg) { return 0; } /** Start a transmit thread */ int start_tx_pthread (tclient_main_t * tm) { if (tm->client_thread_handle == 0) { int rv = pthread_create (&tm->client_thread_handle, NULL /*attr */ , tclient_thread_fn, 0); if (rv) { tm->client_thread_handle = 0; return -1; } } return 0; } clib_error_t * clients_connect (vlib_main_t * vm, u8 * uri, u32 n_clients) { tclient_main_t *tm = &tclient_main; vnet_connect_args_t _a, *a = &_a; clib_error_t *error = 0; int i; for (i = 0; i < n_clients; i++) { memset (a, 0, sizeof (*a)); a->uri = (char *) uri; a->api_context = i; a->app_index = tm->app_index; a->mp = 0; if ((error = vnet_connect_uri (a))) return error; /* Crude pacing for call setups */ if ((i % 4) == 0) vlib_process_suspend (vm, 10e-6); ASSERT (i + 1 >= tm->ready_connections); while (i + 1 - tm->ready_connections > 1000) { vlib_process_suspend (vm, 100e-6); } } return 0; } static clib_error_t * test_tcp_clients_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { tclient_main_t *tm = &tclient_main; vlib_thread_main_t *thread_main = vlib_get_thread_main (); uword *event_data = 0, event_type; u8 *default_connect_uri = (u8 *) "tcp://6.0.1.1/1234", *uri, *appns_id; u64 tmp, total_bytes, appns_flags = 0, appns_secret = 0; f64 test_timeout = 20.0, syn_timeout = 20.0, delta; f64 time_before_connects; u32 n_clients = 1; int preallocate_sessions = 0; char *transfer_type; clib_error_t *error = 0; int i; tm->bytes_to_send = 8192; tm->no_return = 0; tm->fifo_size = 64 << 10; tm->connections_per_batch = 1000; tm->private_segment_count = 0; tm->private_segment_size = 0; tm->vlib_main = vm; if (thread_main->n_vlib_mains > 1) clib_spinlock_init (&tm->sessions_lock); vec_free (tm->connect_uri); while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "nclients %d", &n_clients)) ; else if (unformat (input, "mbytes %lld", &tmp)) tm->bytes_to_send = tmp << 20; else if (unformat (input, "gbytes %lld", &tmp)) tm->bytes_to_send = tmp << 30; else if (unformat (input, "bytes %lld", &tm->bytes_to_send)) ; else if (unformat (input, "uri %s", &tm->connect_uri)) ; else if (unformat (input, "test-timeout %f", &test_timeout)) ; else if (unformat (input, "syn-timeout %f", &syn_timeout)) ; else if (unformat (input, "no-return")) tm->no_return = 1; else if (unformat (input, "fifo-size %d", &tm->fifo_size)) tm->fifo_size <<= 10; else if (unformat (input, "private-segment-count %d", &tm->private_segment_count)) ; else if (unformat (input, "private-segment-size %U", unformat_memory_size, &tmp)) { if (tmp >= 0x100000000ULL) return clib_error_return (0, "private segment size %lld (%llu) too large", tmp, tmp); tm->private_segment_size = tmp; } else if (unformat (input, "preallocate-fifos")) tm->prealloc_fifos = 1; else if (unformat (input, "preallocate-sessions")) preallocate_sessions = 1; else if (unformat (input, "client-batch %d", &tm->connections_per_batch)) ; else if (unformat (input, "appns %_%v%_", &appns_id)) ; else if (unformat (input, "all-scope")) appns_flags |= (APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE | APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE); else if (unformat (input, "local-scope")) appns_flags = APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE; else if (unformat (input, "global-scope")) appns_flags = APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE; else if (unformat (input, "secret %lu", &appns_secret)) ; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); } /* Store cli process node index for signalling */ tm->cli_node_index = vlib_get_current_process (vm)->node_runtime.node_index; if (tm->is_init == 0) { if (tcp_test_clients_init (vm)) return clib_error_return (0, "failed init"); } tm->ready_connections = 0; tm->expected_connections = n_clients; tm->rx_total = 0; tm->tx_total = 0; uri = default_connect_uri; if (tm->connect_uri) uri = tm->connect_uri; #if TCP_BUILTIN_CLIENT_PTHREAD start_tx_pthread (); #endif vlib_worker_thread_barrier_sync (vm); vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ ); vlib_worker_thread_barrier_release (vm); if (tm->test_client_attached == 0) { if ((error = attach_builtin_test_clients_app (appns_id, appns_flags, appns_secret))) { vec_free (appns_id); clib_error_report (error); return error; } vec_free (appns_id); } tm->test_client_attached = 1; /* Turn on the builtin client input nodes */ for (i = 0; i < thread_main->n_vlib_mains; i++) vlib_node_set_state (vlib_mains[i], builtin_client_node.index, VLIB_NODE_STATE_POLLING); if (preallocate_sessions) { session_t *sp __attribute__ ((unused)); for (i = 0; i < n_clients; i++) pool_get (tm->sessions, sp); for (i = 0; i < n_clients; i++) pool_put_index (tm->sessions, i); } /* Fire off connect requests */ time_before_connects = vlib_time_now (vm); if ((error = clients_connect (vm, uri, n_clients))) return error; /* Park until the sessions come up, or ten seconds elapse... */ vlib_process_wait_for_event_or_clock (vm, syn_timeout); event_type = vlib_process_get_events (vm, &event_data); switch (event_type) { case ~0: vlib_cli_output (vm, "Timeout with only %d sessions active...", tm->ready_connections); goto cleanup; case 1: delta = vlib_time_now (vm) - time_before_connects; if (delta != 0.0) { vlib_cli_output (vm, "%d three-way handshakes in %.2f seconds, %.2f/sec", n_clients, delta, ((f64) n_clients) / delta); } tm->test_start_time = vlib_time_now (tm->vlib_main); vlib_cli_output (vm, "Test started at %.6f", tm->test_start_time); break; default: vlib_cli_output (vm, "unexpected event(1): %d", event_type); goto cleanup; } /* Now wait for the sessions to finish... */ vlib_process_wait_for_event_or_clock (vm, test_timeout); event_type = vlib_process_get_events (vm, &event_data); switch (event_type) { case ~0: vlib_cli_output (vm, "Timeout with %d sessions still active...", tm->ready_connections); goto cleanup; case 2: tm->test_end_time = vlib_time_now (vm); vlib_cli_output (vm, "Test finished at %.6f", tm->test_end_time); break; default: vlib_cli_output (vm, "unexpected event(2): %d", event_type); goto cleanup; } delta = tm->test_end_time - tm->test_start_time; if (delta != 0.0) { total_bytes = (tm->no_return ? tm->tx_total : tm->rx_total); transfer_type = tm->no_return ? "half-duplex" : "full-duplex"; vlib_cli_output (vm, "%lld bytes (%lld mbytes, %lld gbytes) in %.2f seconds", total_bytes, total_bytes / (1ULL << 20), total_bytes / (1ULL << 30), delta); vlib_cli_output (vm, "%.2f bytes/second %s", ((f64) total_bytes) / (delta), transfer_type); vlib_cli_output (vm, "%.4f gbit/second %s", (((f64) total_bytes * 8.0) / delta / 1e9), transfer_type); } else vlib_cli_output (vm, "zero delta-t?"); cleanup: tm->run_test = 0; for (i = 0; i < vec_len (tm->connection_index_by_thread); i++) { vec_reset_length (tm->connection_index_by_thread[i]); vec_reset_length (tm->connections_this_batch_by_thread[i]); } pool_free (tm->sessions); /* Detach the application, so we can use different fifo sizes next time */ if (tm->test_client_attached) { vnet_app_detach_args_t _da, *da = &_da; int rv; da->app_index = tm->app_index; rv = vnet_application_detach (da); if (rv) vlib_cli_output (vm, "WARNING: app detach failed..."); tm->test_client_attached = 0; tm->app_index = ~0; } return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (test_clients_command, static) = { .path = "test tcp clients", .short_help = "test tcp clients [nclients %d] [[m|g]bytes <bytes>] " "[test-timeout <time>][syn-timeout <time>][no-return][fifo-size <size>]" "[private-segment-count <count>][private-segment-size <bytes>[m|g]]" "[preallocate-fifos][preallocate-sessions][client-batch <batch-size>]" "[uri <tcp://ip/port>]", .function = test_tcp_clients_command_fn, .is_mp_safe = 1, }; /* *INDENT-ON* */ clib_error_t * tcp_test_clients_main_init (vlib_main_t * vm) { tclient_main_t *tm = &tclient_main; tm->is_init = 0; return 0; } VLIB_INIT_FUNCTION (tcp_test_clients_main_init); /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */