summaryrefslogtreecommitdiffstats
path: root/src/vnet/session-apps/echo_client.c
diff options
context:
space:
mode:
authorFlorin Coras <fcoras@cisco.com>2018-05-04 15:46:57 -0700
committerDave Barach <openvpp@barachs.net>2018-05-09 11:34:06 +0000
commit8e43d04ca4f4496aaefc4f5e2b6e1c0951624099 (patch)
tree3addc5766002d5224dde2c7fa4efe018480830e4 /src/vnet/session-apps/echo_client.c
parentee7f0bd9e7ce4106d3b9511b0efede4326bded51 (diff)
session: cleanup session tx function
- rework the function to declutter and avoid building more than one tx frame - add dual loop although benefits in my tests seem to be minimal - improve tcp/udp echo external apps. They have slightly better throughput than internal echo apps. - udp bugfixes Change-Id: Iea4a245b1b1bb407a7f403dedcce2664a49f774b Signed-off-by: Florin Coras <fcoras@cisco.com>
Diffstat (limited to 'src/vnet/session-apps/echo_client.c')
-rw-r--r--src/vnet/session-apps/echo_client.c58
1 files changed, 54 insertions, 4 deletions
diff --git a/src/vnet/session-apps/echo_client.c b/src/vnet/session-apps/echo_client.c
index d47b0d97337..3c1904cb55e 100644
--- a/src/vnet/session-apps/echo_client.c
+++ b/src/vnet/session-apps/echo_client.c
@@ -56,11 +56,60 @@ send_data_chunk (echo_client_main_t * ecm, session_t * s)
s->bytes_to_send);
if (!ecm->is_dgram)
- rv = app_send_stream (&s->data, test_data + test_buf_offset,
- bytes_this_chunk, 0);
+ {
+ if (ecm->no_copy)
+ {
+ svm_fifo_t *f = s->data.tx_fifo;
+ rv = clib_min (svm_fifo_max_enqueue (f), bytes_this_chunk);
+ svm_fifo_enqueue_nocopy (f, rv);
+ if (svm_fifo_set_event (f))
+ {
+ session_fifo_event_t evt;
+ evt.fifo = f;
+ evt.event_type = FIFO_EVENT_APP_TX;
+ svm_queue_add (s->data.vpp_evt_q, (u8 *) & evt, 0);
+ }
+ }
+ else
+ rv = app_send_stream (&s->data, test_data + test_buf_offset,
+ bytes_this_chunk, 0);
+ }
else
- rv = app_send_dgram (&s->data, test_data + test_buf_offset,
- bytes_this_chunk, 0);
+ {
+ if (ecm->no_copy)
+ {
+ session_dgram_hdr_t hdr;
+ svm_fifo_t *f = s->data.tx_fifo;
+ app_session_transport_t *at = &s->data.transport;
+ u32 max_enqueue = svm_fifo_max_enqueue (f);
+
+ if (max_enqueue <= sizeof (session_dgram_hdr_t))
+ return;
+
+ max_enqueue -= sizeof (session_dgram_hdr_t);
+ rv = clib_min (max_enqueue, bytes_this_chunk);
+
+ hdr.data_length = rv;
+ hdr.data_offset = 0;
+ clib_memcpy (&hdr.rmt_ip, &at->rmt_ip, sizeof (ip46_address_t));
+ hdr.is_ip4 = at->is_ip4;
+ hdr.rmt_port = at->rmt_port;
+ clib_memcpy (&hdr.lcl_ip, &at->lcl_ip, sizeof (ip46_address_t));
+ hdr.lcl_port = at->lcl_port;
+ svm_fifo_enqueue_nowait (f, sizeof (hdr), (u8 *) & hdr);
+ svm_fifo_enqueue_nocopy (f, rv);
+ if (svm_fifo_set_event (f))
+ {
+ session_fifo_event_t evt;
+ evt.fifo = f;
+ evt.event_type = FIFO_EVENT_APP_TX;
+ svm_queue_add (s->data.vpp_evt_q, (u8 *) & evt, 0);
+ }
+ }
+ else
+ rv = app_send_dgram (&s->data, test_data + test_buf_offset,
+ bytes_this_chunk, 0);
+ }
/* If we managed to enqueue data... */
if (rv > 0)
@@ -591,6 +640,7 @@ echo_clients_command_fn (vlib_main_t * vm,
ecm->test_failed = 0;
ecm->vlib_main = vm;
ecm->tls_engine = TLS_ENGINE_OPENSSL;
+ ecm->no_copy = 0;
if (thread_main->n_vlib_mains > 1)
clib_spinlock_init (&ecm->sessions_lock);