diff options
-rw-r--r-- | src/tests/vnet/session/tcp_echo.c | 49 | ||||
-rw-r--r-- | src/tests/vnet/session/udp_echo.c | 5 | ||||
-rw-r--r-- | src/vcl/vppcom.c | 81 | ||||
-rw-r--r-- | src/vnet/session-apps/echo_client.c | 6 | ||||
-rw-r--r-- | src/vnet/session-apps/proxy.c | 6 | ||||
-rw-r--r-- | src/vnet/session/application_interface.h | 12 | ||||
-rw-r--r-- | src/vnet/session/application_worker.c | 4 | ||||
-rw-r--r-- | src/vnet/session/session.c | 7 | ||||
-rw-r--r-- | src/vnet/session/session_node.c | 6 | ||||
-rw-r--r-- | src/vnet/session/session_types.h | 2 | ||||
-rw-r--r-- | test/test_vcl.py | 33 |
11 files changed, 75 insertions, 136 deletions
diff --git a/src/tests/vnet/session/tcp_echo.c b/src/tests/vnet/session/tcp_echo.c index 435a414d097..3531a5307a5 100644 --- a/src/tests/vnet/session/tcp_echo.c +++ b/src/tests/vnet/session/tcp_echo.c @@ -532,15 +532,6 @@ recv_data_chunk (echo_main_t * em, echo_session_t * s, u8 * rx_buf) while (n_to_read > 0); } -void -client_handle_rx (echo_main_t * em, session_event_t * e, u8 * rx_buf) -{ - echo_session_t *s; - - s = pool_elt_at_index (em->sessions, e->fifo->client_session_index); - recv_data_chunk (em, s, rx_buf); -} - static void send_data_chunk (echo_main_t * em, echo_session_t * s) { @@ -596,42 +587,6 @@ client_thread_fn (void *arg) pthread_exit (0); } -/* - * Rx thread that handles all connections. - * - * Not used. - */ -void * -client_rx_thread_fn (void *arg) -{ - session_event_t _e, *e = &_e; - echo_main_t *em = &echo_main; - static u8 *rx_buf = 0; - svm_msg_q_msg_t msg; - - vec_validate (rx_buf, 1 << 20); - - while (!em->time_to_stop && em->state != STATE_READY) - ; - - while (!em->time_to_stop) - { - svm_msg_q_sub (em->our_event_queue, &msg, SVM_Q_WAIT, 0); - e = svm_msg_q_msg_data (em->our_event_queue, &msg); - switch (e->event_type) - { - case FIFO_EVENT_APP_RX: - client_handle_rx (em, e, rx_buf); - break; - default: - clib_warning ("unknown event type %d", e->event_type); - break; - } - svm_msg_q_free_msg (em->our_event_queue, &msg); - } - pthread_exit (0); -} - void client_send_connect (echo_main_t * em) { @@ -1101,7 +1056,7 @@ server_handle_rx (echo_main_t * em, session_event_t * e) u32 offset, to_dequeue; echo_session_t *s; - s = pool_elt_at_index (em->sessions, e->fifo->client_session_index); + s = pool_elt_at_index (em->sessions, e->session_index); /* Clear event only once. Otherwise, if we do it in the loop by calling * app_recv_stream, we may end up with a lot of unhandled rx events on the @@ -1161,7 +1116,7 @@ server_handle_mq (echo_main_t * em) e = svm_msg_q_msg_data (em->our_event_queue, &msg); switch (e->event_type) { - case FIFO_EVENT_APP_RX: + case SESSION_IO_EVT_RX: server_handle_rx (em, e); break; default: diff --git a/src/tests/vnet/session/udp_echo.c b/src/tests/vnet/session/udp_echo.c index 9fda73d1307..4fd6c8635a8 100644 --- a/src/tests/vnet/session/udp_echo.c +++ b/src/tests/vnet/session/udp_echo.c @@ -1014,7 +1014,8 @@ server_handle_fifo_event_rx (udp_echo_main_t * utm, u32 session_index) /* If event wasn't set, add one */ if (svm_fifo_set_event (tx_fifo)) - app_send_io_evt_to_vpp (session->vpp_evt_q, tx_fifo, + app_send_io_evt_to_vpp (session->vpp_evt_q, + tx_fifo->master_session_index, SESSION_IO_EVT_TX, SVM_Q_WAIT); } } @@ -1043,7 +1044,7 @@ server_handle_event_queue (udp_echo_main_t * utm) switch (e->event_type) { case SESSION_IO_EVT_RX: - server_handle_fifo_event_rx (utm, e->fifo->client_session_index); + server_handle_fifo_event_rx (utm, e->session_index); break; default: diff --git a/src/vcl/vppcom.c b/src/vcl/vppcom.c index 3abde98288a..fa37a1da68f 100644 --- a/src/vcl/vppcom.c +++ b/src/vcl/vppcom.c @@ -649,7 +649,7 @@ vcl_handle_mq_event (vcl_worker_t * wrk, session_event_t * e) { case SESSION_IO_EVT_RX: case SESSION_IO_EVT_TX: - session = vcl_session_get (wrk, e->fifo->client_session_index); + session = vcl_session_get (wrk, e->session_index); if (!session || !(session->session_state & STATE_OPEN)) break; vec_add1 (wrk->unhandled_evts_vector, *e); @@ -1491,8 +1491,7 @@ vppcom_session_connect (uint32_t session_handle, vppcom_endpt_t * server_ep) static u8 vcl_is_rx_evt_for_session (session_event_t * e, u32 sid, u8 is_ct) { - return (e->event_type == SESSION_IO_EVT_RX - && e->fifo->client_session_index == sid); + return (e->event_type == SESSION_IO_EVT_RX && e->session_index == sid); } static inline int @@ -1529,20 +1528,19 @@ vppcom_session_read_internal (uint32_t session_handle, void *buf, int n, rx_fifo = is_ct ? s->ct_rx_fifo : s->rx_fifo; s->has_rx_evt = 0; - if (is_ct) - svm_fifo_unset_event (s->rx_fifo); - if (svm_fifo_is_empty (rx_fifo)) { - svm_fifo_unset_event (rx_fifo); if (is_nonblocking) - return VPPCOM_EWOULDBLOCK; + { + svm_fifo_unset_event (s->rx_fifo); + return VPPCOM_EWOULDBLOCK; + } while (svm_fifo_is_empty (rx_fifo)) { if (vcl_session_is_closing (s)) return vcl_session_closing_error (s); - svm_fifo_unset_event (rx_fifo); + svm_fifo_unset_event (s->rx_fifo); svm_msg_q_lock (mq); if (svm_msg_q_is_empty (mq)) svm_msg_q_wait (mq); @@ -1551,10 +1549,7 @@ vppcom_session_read_internal (uint32_t session_handle, void *buf, int n, e = svm_msg_q_msg_data (mq, &msg); svm_msg_q_unlock (mq); if (!vcl_is_rx_evt_for_session (e, s->session_index, is_ct)) - { - clib_warning ("THIS ONE type %u", e->event_type); - vcl_handle_mq_event (wrk, e); - } + vcl_handle_mq_event (wrk, e); svm_msg_q_free_msg (mq, &msg); } } @@ -1565,7 +1560,7 @@ vppcom_session_read_internal (uint32_t session_handle, void *buf, int n, n_read = app_recv_stream_raw (rx_fifo, buf, n, 0, peek); if (svm_fifo_is_empty (rx_fifo)) - svm_fifo_unset_event (rx_fifo); + svm_fifo_unset_event (s->rx_fifo); VDBG (2, "vpp handle 0x%llx, sid %u: read %d bytes from (%p)", s->vpp_handle, session_handle, n_read, rx_fifo); @@ -1676,8 +1671,7 @@ vppcom_data_segment_copy (void *buf, vppcom_data_segments_t ds, u32 max_bytes) static u8 vcl_is_tx_evt_for_session (session_event_t * e, u32 sid, u8 is_ct) { - return (e->event_type == SESSION_IO_EVT_TX - && e->fifo->client_session_index == sid); + return (e->event_type == SESSION_IO_EVT_TX && e->session_index == sid); } static inline int @@ -1758,7 +1752,8 @@ vppcom_session_write_inline (uint32_t session_handle, void *buf, size_t n, !is_ct /* do_evt */ , SVM_Q_WAIT); if (is_ct && svm_fifo_set_event (s->tx_fifo)) - app_send_io_evt_to_vpp (s->vpp_evt_q, s->tx_fifo, et, SVM_Q_WAIT); + app_send_io_evt_to_vpp (s->vpp_evt_q, s->tx_fifo->master_session_index, + et, SVM_Q_WAIT); ASSERT (n_write > 0); @@ -1782,13 +1777,22 @@ vppcom_session_write_msg (uint32_t session_handle, void *buf, size_t n) 1 /* is_flush */ ); } -#define vcl_fifo_rx_evt_valid_or_break(_fifo) \ -if (PREDICT_FALSE (svm_fifo_is_empty (_fifo))) \ - { \ - svm_fifo_unset_event (_fifo); \ - if (svm_fifo_is_empty (_fifo)) \ - break; \ - } \ +#define vcl_fifo_rx_evt_valid_or_break(_s) \ +if (PREDICT_FALSE (svm_fifo_is_empty (_s->rx_fifo))) \ + { \ + if (!vcl_session_is_ct (_s)) \ + { \ + svm_fifo_unset_event (_s->rx_fifo); \ + if (svm_fifo_is_empty (_s->rx_fifo)) \ + break; \ + } \ + else if (svm_fifo_is_empty (_s->ct_rx_fifo)) \ + { \ + svm_fifo_unset_event (_s->ct_rx_fifo); \ + if (svm_fifo_is_empty (_s->ct_rx_fifo)) \ + break; \ + } \ + } \ static void vcl_select_handle_mq_event (vcl_worker_t * wrk, session_event_t * e, @@ -1803,10 +1807,10 @@ vcl_select_handle_mq_event (vcl_worker_t * wrk, session_event_t * e, switch (e->event_type) { - case FIFO_EVENT_APP_RX: - vcl_fifo_rx_evt_valid_or_break (e->fifo); - sid = e->fifo->client_session_index; + case SESSION_IO_EVT_RX: + sid = e->session_index; session = vcl_session_get (wrk, sid); + vcl_fifo_rx_evt_valid_or_break (session); if (!session) break; if (sid < n_bits && read_map) @@ -1816,7 +1820,7 @@ vcl_select_handle_mq_event (vcl_worker_t * wrk, session_event_t * e, } break; case FIFO_EVENT_APP_TX: - sid = e->fifo->client_session_index; + sid = e->session_index; session = vcl_session_get (wrk, sid); if (!session) break; @@ -1937,8 +1941,10 @@ vppcom_select_condvar (vcl_worker_t * wrk, int n_bits, u32 * bits_set) { time_to_wait = (time_to_wait == -1) ? 1e6 : time_to_wait; - return vcl_select_handle_mq (wrk, wrk->app_event_queue, n_bits, read_map, - write_map, except_map, time_to_wait, bits_set); + vcl_select_handle_mq (wrk, wrk->app_event_queue, n_bits, read_map, + write_map, except_map, (bits_set ? 0 : time_to_wait), + bits_set); + return *bits_set; } static int @@ -2069,7 +2075,7 @@ vep_verify_epoll_chain (vcl_worker_t * wrk, u32 vep_idx) vppcom_epoll_t *vep; u32 sid = vep_idx; - if (VPPCOM_DEBUG <= 1) + if (VPPCOM_DEBUG <= 2) return; /* Assumes caller has acquired spinlock: vcm->sessions_lockp */ @@ -2354,11 +2360,10 @@ vcl_epoll_wait_handle_mq_event (vcl_worker_t * wrk, session_event_t * e, switch (e->event_type) { case SESSION_IO_EVT_RX: - ASSERT (e->fifo->client_thread_index == vcl_get_worker_index ()); - vcl_fifo_rx_evt_valid_or_break (e->fifo); - sid = e->fifo->client_session_index; + sid = e->session_index; if (!(session = vcl_session_get (wrk, sid))) break; + vcl_fifo_rx_evt_valid_or_break (session); session_events = session->vep.ev.events; if (!(EPOLLIN & session->vep.ev.events) || session->has_rx_evt) break; @@ -2368,7 +2373,7 @@ vcl_epoll_wait_handle_mq_event (vcl_worker_t * wrk, session_event_t * e, session->has_rx_evt = 1; break; case SESSION_IO_EVT_TX: - sid = e->fifo->client_session_index; + sid = e->session_index; if (!(session = vcl_session_get (wrk, sid))) break; session_events = session->vep.ev.events; @@ -2513,8 +2518,9 @@ vppcom_epoll_wait_condvar (vcl_worker_t * wrk, struct epoll_event *events, int maxevents, u32 n_evts, double wait_for_time) { wait_for_time = (wait_for_time == -1) ? (double) 1e6 : wait_for_time; - return vcl_epoll_wait_handle_mq (wrk, wrk->app_event_queue, events, - maxevents, wait_for_time, &n_evts); + vcl_epoll_wait_handle_mq (wrk, wrk->app_event_queue, events, maxevents, + (n_evts ? 0 : wait_for_time), &n_evts); + return n_evts; } static int @@ -2583,7 +2589,6 @@ vppcom_epoll_wait (uint32_t vep_handle, struct epoll_event *events, break; } } - vec_delete (wrk->unhandled_evts_vector, i, 0); } diff --git a/src/vnet/session-apps/echo_client.c b/src/vnet/session-apps/echo_client.c index 4d8089cde1b..c15798dedee 100644 --- a/src/vnet/session-apps/echo_client.c +++ b/src/vnet/session-apps/echo_client.c @@ -62,7 +62,8 @@ send_data_chunk (echo_client_main_t * ecm, eclient_session_t * s) svm_fifo_t *f = s->data.tx_fifo; rv = clib_min (svm_fifo_max_enqueue (f), bytes_this_chunk); svm_fifo_enqueue_nocopy (f, rv); - session_send_io_evt_to_thread_custom (f, s->thread_index, + session_send_io_evt_to_thread_custom (&f->master_session_index, + s->thread_index, SESSION_IO_EVT_TX); } else @@ -95,7 +96,8 @@ send_data_chunk (echo_client_main_t * ecm, eclient_session_t * s) hdr.lcl_port = at->lcl_port; svm_fifo_enqueue_nowait (f, sizeof (hdr), (u8 *) & hdr); svm_fifo_enqueue_nocopy (f, rv); - session_send_io_evt_to_thread_custom (f, s->thread_index, + session_send_io_evt_to_thread_custom (&f->master_session_index, + s->thread_index, SESSION_IO_EVT_TX); } else diff --git a/src/vnet/session-apps/proxy.c b/src/vnet/session-apps/proxy.c index 1ee5f5a741c..2e03ccc9f91 100644 --- a/src/vnet/session-apps/proxy.c +++ b/src/vnet/session-apps/proxy.c @@ -212,7 +212,8 @@ proxy_rx_callback (session_t * s) if (svm_fifo_set_event (active_open_tx_fifo)) { u32 ao_thread_index = active_open_tx_fifo->master_thread_index; - if (session_send_io_evt_to_thread_custom (active_open_tx_fifo, + u32 ao_session_index = active_open_tx_fifo->master_session_index; + if (session_send_io_evt_to_thread_custom (&ao_session_index, ao_thread_index, SESSION_IO_EVT_TX)) clib_warning ("failed to enqueue tx evt"); @@ -356,7 +357,8 @@ active_open_rx_callback (session_t * s) if (svm_fifo_set_event (proxy_tx_fifo)) { u8 thread_index = proxy_tx_fifo->master_thread_index; - return session_send_io_evt_to_thread_custom (proxy_tx_fifo, + u32 session_index = proxy_tx_fifo->master_session_index; + return session_send_io_evt_to_thread_custom (&session_index, thread_index, SESSION_IO_EVT_TX); } diff --git a/src/vnet/session/application_interface.h b/src/vnet/session/application_interface.h index d4dfeec54dc..56d034e18ed 100644 --- a/src/vnet/session/application_interface.h +++ b/src/vnet/session/application_interface.h @@ -407,7 +407,7 @@ app_send_ctrl_evt_to_vpp (svm_msg_q_t * mq, app_session_evt_t * app_evt) * @return 0 if success, negative integer otherwise */ static inline int -app_send_io_evt_to_vpp (svm_msg_q_t * mq, svm_fifo_t * f, u8 evt_type, +app_send_io_evt_to_vpp (svm_msg_q_t * mq, u32 session_index, u8 evt_type, u8 noblock) { session_event_t *evt; @@ -429,7 +429,7 @@ app_send_io_evt_to_vpp (svm_msg_q_t * mq, svm_fifo_t * f, u8 evt_type, return -2; } evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg); - evt->fifo = f; + evt->session_index = session_index; evt->event_type = evt_type; svm_msg_q_add_and_unlock (mq, &msg); return 0; @@ -441,7 +441,7 @@ app_send_io_evt_to_vpp (svm_msg_q_t * mq, svm_fifo_t * f, u8 evt_type, svm_msg_q_wait (mq); msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING); evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg); - evt->fifo = f; + evt->session_index = session_index; evt->event_type = evt_type; if (svm_msg_q_is_full (mq)) svm_msg_q_wait (mq); @@ -478,7 +478,8 @@ app_send_dgram_raw (svm_fifo_t * f, app_session_transport_t * at, if ((rv = svm_fifo_enqueue_nowait (f, actual_write, data)) > 0) { if (do_evt && svm_fifo_set_event (f)) - app_send_io_evt_to_vpp (vpp_evt_q, f, evt_type, noblock); + app_send_io_evt_to_vpp (vpp_evt_q, f->master_session_index, evt_type, + noblock); } ASSERT (rv); return rv; @@ -501,7 +502,8 @@ app_send_stream_raw (svm_fifo_t * f, svm_msg_q_t * vpp_evt_q, u8 * data, if ((rv = svm_fifo_enqueue_nowait (f, len, data)) > 0) { if (do_evt && svm_fifo_set_event (f)) - app_send_io_evt_to_vpp (vpp_evt_q, f, evt_type, noblock); + app_send_io_evt_to_vpp (vpp_evt_q, f->master_session_index, evt_type, + noblock); } return rv; } diff --git a/src/vnet/session/application_worker.c b/src/vnet/session/application_worker.c index 7c888882093..85a6fede429 100644 --- a/src/vnet/session/application_worker.c +++ b/src/vnet/session/application_worker.c @@ -562,7 +562,7 @@ app_send_io_evt_rx (app_worker_t * app_wrk, session_t * s, u8 lock) ASSERT (!svm_msg_q_msg_is_invalid (&msg)); evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg); - evt->fifo = s->rx_fifo; + evt->session_index = s->rx_fifo->client_session_index; evt->event_type = SESSION_IO_EVT_RX; (void) svm_fifo_set_event (s->rx_fifo); @@ -599,7 +599,7 @@ app_send_io_evt_tx (app_worker_t * app_wrk, session_t * s, u8 lock) evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg); evt->event_type = SESSION_IO_EVT_TX; - evt->fifo = s->tx_fifo; + evt->session_index = s->tx_fifo->client_session_index; return app_enqueue_evt (mq, &msg, lock); } diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 6e24d562f98..0a294dc6be1 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -66,7 +66,7 @@ session_send_evt_to_thread (void *data, void *args, u32 thread_index, case SESSION_IO_EVT_TX: case SESSION_IO_EVT_TX_FLUSH: case SESSION_IO_EVT_BUILTIN_RX: - evt->fifo = data; + evt->session_index = *(u32 *) data; break; case SESSION_IO_EVT_BUILTIN_TX: case SESSION_CTRL_EVT_CLOSE: @@ -85,7 +85,8 @@ session_send_evt_to_thread (void *data, void *args, u32 thread_index, int session_send_io_evt_to_thread (svm_fifo_t * f, session_evt_type_t evt_type) { - return session_send_evt_to_thread (f, 0, f->master_thread_index, evt_type); + return session_send_evt_to_thread (&f->master_session_index, 0, + f->master_thread_index, evt_type); } int @@ -560,7 +561,7 @@ session_main_flush_enqueue_events (u8 transport_proto, u32 thread_index) continue; } - if (svm_fifo_is_empty (s->rx_fifo)) + if (svm_fifo_has_event (s->rx_fifo) || svm_fifo_is_empty (s->rx_fifo)) continue; if (PREDICT_FALSE (session_enqueue_notify_inline (s))) diff --git a/src/vnet/session/session_node.c b/src/vnet/session/session_node.c index db5123b8b2d..7cbd0d9ab04 100644 --- a/src/vnet/session/session_node.c +++ b/src/vnet/session/session_node.c @@ -821,7 +821,7 @@ session_tx_fifo_dequeue_internal (vlib_main_t * vm, session_t *s = wrk->ctx.s; application_t *app; - if (PREDICT_FALSE (s->session_state == SESSION_STATE_CLOSED)) + if (PREDICT_FALSE (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED)) return 0; app = application_get (s->t_app_index); svm_fifo_unset_event (s->tx_fifo); @@ -831,7 +831,7 @@ session_tx_fifo_dequeue_internal (vlib_main_t * vm, always_inline session_t * session_event_get_session (session_event_t * e, u8 thread_index) { - return session_get_if_valid (e->fifo->master_session_index, thread_index); + return session_get_if_valid (e->session_index, thread_index); } static void @@ -1103,7 +1103,7 @@ session_node_cmp_event (session_event_t * e, svm_fifo_t * f) case SESSION_IO_EVT_RX: case SESSION_IO_EVT_TX: case SESSION_IO_EVT_BUILTIN_RX: - if (e->fifo == f) + if (e->session_index == f->master_session_index) return 1; break; case SESSION_CTRL_EVT_CLOSE: diff --git a/src/vnet/session/session_types.h b/src/vnet/session/session_types.h index 9e51d69db42..3b6ab3dce88 100644 --- a/src/vnet/session/session_types.h +++ b/src/vnet/session/session_types.h @@ -326,7 +326,7 @@ typedef struct u8 postponed; union { - svm_fifo_t *fifo; + u32 session_index; session_handle_t session_handle; session_rpc_args_t rpc_args; struct diff --git a/test/test_vcl.py b/test/test_vcl.py index 9a8662d2275..b60e0152051 100644 --- a/test/test_vcl.py +++ b/test/test_vcl.py @@ -371,37 +371,6 @@ class VCLCutThruTestCase(VCLTestCase): self.client_bi_dir_nsock_test_args) -class LDPThruHostStackEcho(VCLTestCase): - """ LDP Thru Host Stack Echo """ - - @classmethod - def setUpClass(cls): - super(LDPThruHostStackEcho, cls).setUpClass() - - @classmethod - def tearDownClass(cls): - super(LDPThruHostStackEcho, cls).tearDownClass() - - def setUp(self): - super(LDPThruHostStackEcho, self).setUp() - - self.thru_host_stack_setup() - self.client_echo_test_args = ["-E", self.echo_phrase, "-X", - self.loop0.local_ip4, - self.server_port] - - def tearDown(self): - self.thru_host_stack_tear_down() - super(LDPThruHostStackEcho, self).tearDown() - - def test_ldp_thru_host_stack_echo(self): - """ run LDP thru host stack echo test """ - - self.thru_host_stack_test("sock_test_server", self.server_args, - "sock_test_client", - self.client_echo_test_args) - - class VCLThruHostStackEcho(VCLTestCase): """ VCL Thru Host Stack Echo """ @@ -494,6 +463,7 @@ class VCLThruHostStackBidirNsock(VCLTestCase): self.server_port] def tearDown(self): + self.logger.debug(self.vapi.cli("show session verbose 2")) self.thru_host_stack_tear_down() super(VCLThruHostStackBidirNsock, self).tearDown() @@ -537,6 +507,7 @@ class LDPThruHostStackBidirNsock(VCLTestCase): self.server_port] def tearDown(self): + self.logger.debug(self.vapi.cli("show session verbose 2")) self.thru_host_stack_tear_down() super(LDPThruHostStackBidirNsock, self).tearDown() |