aboutsummaryrefslogtreecommitdiffstats
path: root/tests/vpp/perf/ip4/2n1l-10ge2p1x710-ethip4udp-ip4base-oacl50sl-10kflows-ndrpdr.robot
blob: 3e03e63d40c895e8f78110f22e30ef5c4f0360d6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

*** Settings ***
| Resource | resources/libraries/robot/shared/default.robot
|
| Force Tags | 2_NODE_SINGLE_LINK_TOPO | PERFTEST | HW_ENV | NDRPDR
| ... | NIC_Intel-X710 | ETH | IP4FWD | FEATURE | ACL | ACL_STATELESS
| ... | OACL | ACL50 | 10K_FLOWS | DRV_VFIO_PCI
| ... | RXQ_SIZE_0 | TXQ_SIZE_0
| ... | ethip4udp-ip4base-oacl50sl-10kflows
|
| Suite Setup | Setup suite topology interfaces | performance
| Suite Teardown | Tear down suite | performance
| Test Setup | Setup test | performance
| Test Teardown | Tear down test | performance | acl
|
| Test Template | Local Template
|
| Documentation | *RFC2544: Packet throughput IPv4 test cases with ACL*
|
| ... | *[Top] Network Topologies:* TG-DUT1-TG 2-node circular topology\
| ... | with single links between nodes.
| ... | *[Enc] Packet Encapsulations:* Eth-IPv4-UDP for IPv4 routing.
| ... | *[Cfg] DUT configuration:* DUT1 is configured with IPv4 routing.\
| ... | Required ACL rules are applied to input paths of both DUT1 intefaces.\
| ... | DUT1 is tested with ${nic_name}.\
| ... | *[Ver] TG verification:* TG finds and reports throughput NDR (Non Drop\
| ... | Rate) with zero packet loss tolerance and throughput PDR (Partial Drop\
| ... | Rate) with non-zero packet loss tolerance (LT) expressed in percentage\
| ... | of packets transmitted. NDR and PDR are discovered for different\
| ... | Ethernet L2 frame sizes using MLRsearch library.\
| ... | Test packets are generated by TG on links to DUT1. TG traffic profile\
| ... | contains two L3 flow-groups (flow-group per direction, ${flows_per_dir}\
| ... | flows per flow-group) with all packets containing Ethernet header, IPv4\
| ... | header with IP protocol=61 and static payload. MAC addresses are\
| ... | matching MAC addresses of the TG node interfaces.
| ... | *[Ref] Applicable standard specifications:* RFC2544.

*** Variables ***
| @{plugins_to_enable}= | dpdk_plugin.so | perfmon_plugin.so | acl_plugin.so
| ${crypto_type}= | ${None}
| ${nic_name}= | Intel-X710
| ${nic_driver}= | vfio-pci
| ${nic_rxq_size}= | 0
| ${nic_txq_size}= | 0
| ${nic_pfs}= | 2
| ${nic_vfs}= | 0
| ${osi_layer}= | L3
| ${overhead}= | ${0}
# ACL test setup
| ${acl_action}= | permit
| ${acl_apply_type}= | output
| ${no_hit_aces_number}= | 50
| ${flows_per_dir}= | 10k
| ${ip_nr}= | ${10}
# starting points for non-hitting ACLs
| ${src_ip_start}= | 30.30.30.1
| ${dst_ip_start}= | 40.40.40.1
| ${ip_step}= | ${1}
| ${sport_start}= | ${1000}
| ${dport_start}= | ${1000}
| ${port_step}= | ${1}
| ${trex_stream1_subnet}= | 10.10.10.0/24
| ${trex_stream2_subnet}= | 20.20.20.0/24
# Traffic profile
| ${traffic_profile}= | trex-stl-2n-ethip4udp-10u1000p-conc

*** Keywords ***
| Local Template
| |
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config.
| | ... | Each DUT uses ${phy_cores} physical core(s) for worker threads.
| | ... | [Ver] Measure NDR and PDR values using MLRsearch algorithm.\
| |
| | ... | *Arguments:*
| | ... | - frame_size - Framesize in Bytes in integer or string (IMIX_v4_1).
| | ... | Type: integer, string
| | ... | - phy_cores - Number of physical cores. Type: integer
| | ... | - rxq - Number of RX queues, default value: ${None}. Type: integer
| |
| | [Arguments] | ${frame_size} | ${phy_cores} | ${rxq}=${None}
| |
| | Set Test Variable | \${frame_size}
| |
| | Given Set Max Rate And Jumbo
| | And Add worker threads to all DUTs | ${phy_cores} | ${rxq}
| | And Pre-initialize layer driver | ${nic_driver}
| | And Apply startup configuration on all VPP DUTs
| | When Initialize layer driver | ${nic_driver}
| | And Initialize layer interface
| | And Initialize IPv4 routing with IPv4 ACLs on DUT1 in circular topology
| | ... | ${ip_nr}
| | Then Find NDR and PDR intervals using optimized search

*** Test Cases ***
| 64B-1c-ethip4udp-ip4base-oacl50sl-10kflows-ndrpdr
| | [Tags] | 64B | 1C
| | frame_size=${64} | phy_cores=${1}

| 64B-2c-ethip4udp-ip4base-oacl50sl-10kflows-ndrpdr
| | [Tags] | 64B | 2C
| | frame_size=${64} | phy_cores=${2}

| 64B-4c-ethip4udp-ip4base-oacl50sl-10kflows-ndrpdr
| | [Tags] | 64B | 4C
| | frame_size=${64} | phy_cores=${4}

| 1518B-1c-ethip4udp-ip4base-oacl50sl-10kflows-ndrpdr
| | [Tags] | 1518B | 1C
| | frame_size=${1518} | phy_cores=${1}

| 1518B-2c-ethip4udp-ip4base-oacl50sl-10kflows-ndrpdr
| | [Tags] | 1518B | 2C
| | frame_size=${1518} | phy_cores=${2}

| 1518B-4c-ethip4udp-ip4base-oacl50sl-10kflows-ndrpdr
| | [Tags] | 1518B | 4C
| | frame_size=${1518} | phy_cores=${4}

| 9000B-1c-ethip4udp-ip4base-oacl50sl-10kflows-ndrpdr
| | [Tags] | 9000B | 1C
| | frame_size=${9000} | phy_cores=${1}

| 9000B-2c-ethip4udp-ip4base-oacl50sl-10kflows-ndrpdr
| | [Tags] | 9000B | 2C
| | frame_size=${9000} | phy_cores=${2}

| 9000B-4c-ethip4udp-ip4base-oacl50sl-10kflows-ndrpdr
| | [Tags] | 9000B | 4C
| | frame_size=${9000} | phy_cores=${4}

| IMIX-1c-ethip4udp-ip4base-oacl50sl-10kflows-ndrpdr
| | [Tags] | IMIX | 1C
| | frame_size=IMIX_v4_1 | phy_cores=${1}

| IMIX-2c-ethip4udp-ip4base-oacl50sl-10kflows-ndrpdr
| | [Tags] | IMIX | 2C
| | frame_size=IMIX_v4_1 | phy_cores=${2}

| IMIX-4c-ethip4udp-ip4base-oacl50sl-10kflows-ndrpdr
| | [Tags] | IMIX | 4C
| | frame_size=IMIX_v4_1 | phy_cores=${4}
Q_WAIT, 0)) { VL_MSG_API_UNPOISON((void *)msg); u16 id = ntohs(*((u16 *)msg)); switch (id) { case VL_API_RX_THREAD_EXIT: vl_msg_api_free((void *) msg); /* signal waiting threads that this thread is about to terminate */ pthread_mutex_lock(&pm->queue_lock); rx_thread_done = true; pthread_cond_signal(&pm->terminate_cv); pthread_mutex_unlock(&pm->queue_lock); pthread_exit(0); return 0; break; case VL_API_MEMCLNT_RX_THREAD_SUSPEND: vl_msg_api_free((void * )msg); /* Suspend thread and signal reader */ pthread_mutex_lock(&pm->queue_lock); pthread_cond_signal(&pm->suspend_cv); /* Wait for the resume signal */ pthread_cond_wait (&pm->resume_cv, &pm->queue_lock); pthread_mutex_unlock(&pm->queue_lock); break; case VL_API_MEMCLNT_READ_TIMEOUT: clib_warning("Received read timeout in async thread\n"); vl_msg_api_free((void *) msg); break; case VL_API_MEMCLNT_KEEPALIVE: mp = (void *)msg; rmp = vl_msg_api_alloc (sizeof (*rmp)); clib_memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs(VL_API_MEMCLNT_KEEPALIVE_REPLY); rmp->context = mp->context; shmem_hdr = am->shmem_hdr; vl_msg_api_send_shmem(shmem_hdr->vl_input_queue, (u8 *)&rmp); vl_msg_api_free((void *) msg); break; default: vac_api_handler((void *)msg); } } } static void * vac_timeout_thread_fn (void *arg) { vl_api_memclnt_read_timeout_t *ep; vac_main_t *pm = &vac_main; api_main_t *am = vlibapi_get_main(); struct timespec ts; struct timeval tv; int rv; while (pm->timeout_loop) { /* Wait for poke */ pthread_mutex_lock(&pm->timeout_lock); while (!timeout_in_progress) pthread_cond_wait (&pm->timeout_cv, &pm->timeout_lock); /* Starting timer */ gettimeofday(&tv, NULL); ts.tv_sec = tv.tv_sec + read_timeout; ts.tv_nsec = 0; if (!timeout_cancelled) { rv = pthread_cond_timedwait (&pm->timeout_cancel_cv, &pm->timeout_lock, &ts); if (rv == ETIMEDOUT && !timeout_thread_cancelled) { ep = vl_msg_api_alloc (sizeof (*ep)); ep->_vl_msg_id = ntohs(VL_API_MEMCLNT_READ_TIMEOUT); vl_msg_api_send_shmem(am->vl_input_queue, (u8 *)&ep); } } pthread_mutex_unlock(&pm->timeout_lock); } pthread_exit(0); } void vac_rx_suspend (void) { api_main_t *am = vlibapi_get_main(); vac_main_t *pm = &vac_main; vl_api_memclnt_rx_thread_suspend_t *ep; if (!pm->rx_thread_handle) return; pthread_mutex_lock(&pm->queue_lock); if (rx_is_running) { ep = vl_msg_api_alloc (sizeof (*ep)); ep->_vl_msg_id = ntohs(VL_API_MEMCLNT_RX_THREAD_SUSPEND); vl_msg_api_send_shmem(am->vl_input_queue, (u8 *)&ep); /* Wait for RX thread to tell us it has suspended */ pthread_cond_wait(&pm->suspend_cv, &pm->queue_lock); rx_is_running = false; } pthread_mutex_unlock(&pm->queue_lock); } void vac_rx_resume (void) { vac_main_t *pm = &vac_main; if (!pm->rx_thread_handle) return; pthread_mutex_lock(&pm->queue_lock); if (rx_is_running) goto unlock; pthread_cond_signal(&pm->resume_cv); rx_is_running = true; unlock: pthread_mutex_unlock(&pm->queue_lock); } static uword * vac_msg_table_get_hash (void) { api_main_t *am = vlibapi_get_main(); return (am->msg_index_by_name_and_crc); } int vac_msg_table_size(void) { api_main_t *am = vlibapi_get_main(); return hash_elts(am->msg_index_by_name_and_crc); } int vac_connect (char * name, char * chroot_prefix, vac_callback_t cb, int rx_qlen) { rx_thread_done = false; int rv = 0; vac_main_t *pm = &vac_main; assert (clib_mem_get_heap ()); init(); if (chroot_prefix != NULL) vl_set_memory_root_path (chroot_prefix); if ((rv = vl_client_api_map("/vpe-api"))) { clib_warning ("vl_client_api_map returned %d", rv); return rv; } if (vl_client_connect(name, 0, rx_qlen) < 0) { vl_client_api_unmap(); return (-1); } if (cb) { /* Start the rx queue thread */ rv = pthread_create(&pm->rx_thread_handle, NULL, vac_rx_thread_fn, 0); if (rv) { clib_warning("pthread_create returned %d", rv); vl_client_api_unmap(); return (-1); } vac_callback = cb; rx_is_running = true; } /* Start read timeout thread */ rv = pthread_create(&pm->timeout_thread_handle, NULL, vac_timeout_thread_fn, 0); if (rv) { clib_warning("pthread_create returned %d", rv); vl_client_api_unmap(); return (-1); } pm->connected_to_vlib = 1; return (0); } static void set_timeout (unsigned short timeout) { vac_main_t *pm = &vac_main; pthread_mutex_lock(&pm->timeout_lock); read_timeout = timeout; timeout_in_progress = true; timeout_cancelled = false; pthread_cond_signal(&pm->timeout_cv); pthread_mutex_unlock(&pm->timeout_lock); } static void unset_timeout (void) { vac_main_t *pm = &vac_main; pthread_mutex_lock(&pm->timeout_lock); timeout_in_progress = false; timeout_cancelled = true; pthread_cond_signal(&pm->timeout_cancel_cv); pthread_mutex_unlock(&pm->timeout_lock); } int vac_disconnect (void) { api_main_t *am = vlibapi_get_main(); vac_main_t *pm = &vac_main; uword junk; int rv = 0; if (!pm->connected_to_vlib) return 0; if (pm->rx_thread_handle) { vl_api_rx_thread_exit_t *ep; ep = vl_msg_api_alloc (sizeof (*ep)); ep->_vl_msg_id = ntohs(VL_API_RX_THREAD_EXIT); vl_msg_api_send_shmem(am->vl_input_queue, (u8 *)&ep); /* wait (with timeout) until RX thread has finished */ struct timespec ts; struct timeval tv; gettimeofday(&tv, NULL); ts.tv_sec = tv.tv_sec + 5; ts.tv_nsec = 0; pthread_mutex_lock(&pm->queue_lock); if (rx_thread_done == false) rv = pthread_cond_timedwait(&pm->terminate_cv, &pm->queue_lock, &ts); pthread_mutex_unlock(&pm->queue_lock); /* now join so we wait until thread has -really- finished */ if (rv == ETIMEDOUT) pthread_cancel(pm->rx_thread_handle); else pthread_join(pm->rx_thread_handle, (void **) &junk); } if (pm->timeout_thread_handle) { /* cancel, wake then join the timeout thread */ pm->timeout_loop = 0; timeout_thread_cancelled = true; set_timeout(0); pthread_join(pm->timeout_thread_handle, (void **) &junk); } vl_client_disconnect(); vl_client_api_unmap(); //vac_callback = 0; cleanup(); return (0); } int vac_read (char **p, int *l, u16 timeout) { svm_queue_t *q; api_main_t *am = vlibapi_get_main(); vac_main_t *pm = &vac_main; vl_api_memclnt_keepalive_t *mp; vl_api_memclnt_keepalive_reply_t *rmp; uword msg; msgbuf_t *msgbuf; int rv; vl_shmem_hdr_t *shmem_hdr; /* svm_queue_sub(below) returns {-1, -2} */ if (!pm->connected_to_vlib) return VAC_NOT_CONNECTED; *l = 0; /* svm_queue_sub(below) returns {-1, -2} */ if (am->our_pid == 0) return (VAC_SHM_NOT_READY); /* Poke timeout thread */ if (timeout) set_timeout(timeout); q = am->vl_input_queue; again: rv = svm_queue_sub(q, (u8 *)&msg, SVM_Q_WAIT, 0); if (rv == 0) { VL_MSG_API_UNPOISON((void *)msg); u16 msg_id = ntohs(*((u16 *)msg)); switch (msg_id) { case VL_API_RX_THREAD_EXIT: vl_msg_api_free((void *) msg); goto error; case VL_API_MEMCLNT_RX_THREAD_SUSPEND: goto error; case VL_API_MEMCLNT_READ_TIMEOUT: goto error; case VL_API_MEMCLNT_KEEPALIVE: /* Handle an alive-check ping from vpp. */ mp = (void *)msg; rmp = vl_msg_api_alloc (sizeof (*rmp)); clib_memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs(VL_API_MEMCLNT_KEEPALIVE_REPLY); rmp->context = mp->context; shmem_hdr = am->shmem_hdr; vl_msg_api_send_shmem(shmem_hdr->vl_input_queue, (u8 *)&rmp); vl_msg_api_free((void *) msg); /* * Python code is blissfully unaware of these pings, so * act as if it never happened... */ goto again; default: msgbuf = (msgbuf_t *)(((u8 *)msg) - offsetof(msgbuf_t, data)); *l = ntohl(msgbuf->data_len); if (*l == 0) { fprintf(stderr, "Unregistered API message: %d\n", msg_id); goto error; } } *p = (char *)msg; } else { fprintf(stderr, "Read failed with %d\n", rv); } /* Let timeout notification thread know we're done */ if (timeout) unset_timeout(); return (rv); error: if (timeout) unset_timeout(); vl_msg_api_free((void *) msg); /* Client might forget to resume RX thread on failure */ vac_rx_resume (); return VAC_TIMEOUT; } /* * XXX: Makes the assumption that client_index is the first member */ typedef struct _vl_api_header { u16 _vl_msg_id; u32 client_index; } __attribute__ ((packed)) vl_api_header_t; static u32 vac_client_index (void) { return (vlibapi_get_main()->my_client_index); } int vac_write (char *p, int l) { int rv = -1; api_main_t *am = vlibapi_get_main(); vl_api_header_t *mp = vl_msg_api_alloc(l); svm_queue_t *q; vac_main_t *pm = &vac_main; if (!pm->connected_to_vlib) return VAC_NOT_CONNECTED; if (!mp) return (-1); memcpy(mp, p, l); mp->client_index = vac_client_index(); q = am->shmem_hdr->vl_input_queue; rv = svm_queue_add(q, (u8 *)&mp, 0); if (rv != 0) { fprintf(stderr, "vpe_api_write fails: %d\n", rv); /* Clear message */ vac_free(mp); } return (rv); } int vac_get_msg_index (char * name) { return vl_msg_api_get_msg_index ((u8 *)name); } int vac_msg_table_max_index(void) { int max = 0; hash_pair_t *hp; uword *h = vac_msg_table_get_hash(); hash_foreach_pair (hp, h, ({ if (hp->value[0] > max) max = hp->value[0]; })); return max; } void vac_set_error_handler (vac_error_callback_t cb) { assert (clib_mem_get_heap ()); if (cb) clib_error_register_handler (cb, 0); } /* * Required if application doesn't use a VPP heap. */ void vac_mem_init (size_t size) { if (mem_initialized) return; if (size == 0) clib_mem_init (0, 1 << 30); // default else clib_mem_init (0, size); mem_initialized = true; }