diff options
author | Florin Coras <fcoras@cisco.com> | 2018-02-08 15:10:09 -0800 |
---|---|---|
committer | Florin Coras <fcoras@cisco.com> | 2018-02-14 00:54:43 -0800 |
commit | f8f516a8b0ccab2f5d9796f90419bf2661c750af (patch) | |
tree | f02f6c01ed1bf33aeb4ebb5714af470e537f87c2 /src/vnet/session/segment_manager.c | |
parent | 7758bf68a03a32f17c07154172157f5bdf30e684 (diff) |
session: support local sessions and deprecate redirects
Memfd backed shared memory segments can only be negotiated over sockets.
For such scenarios, the existing redirect mechanism that establishes
cut-through sessions does not work anymore as the two peer application
do not share such a socket.
This patch adds support for local sessions, as opposed to sessions
backed by a transport connection, in a way that is almost transparent to
the two applications by reusing the existing binary api messages.
Moreover, all segment allocations are now entirely done through the
segment manager valloc, so segment overlaps due to independent
allocations previously required for redirects are completely avoided.
The one notable characteristic of local sessions (cut-through from app
perspective) notification messages is that they carry pointers to two
event queues, one for each app peer, instead of one. For
transport-backed sessions one of the queues can be inferred but for
local session they cannot.
Change-Id: Ia443fb63e2d9d8e43490275062a708f039038175
Signed-off-by: Florin Coras <fcoras@cisco.com>
Diffstat (limited to 'src/vnet/session/segment_manager.c')
-rw-r--r-- | src/vnet/session/segment_manager.c | 53 |
1 files changed, 29 insertions, 24 deletions
diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c index f8af3fb45c8..eb2455732e6 100644 --- a/src/vnet/session/segment_manager.c +++ b/src/vnet/session/segment_manager.c @@ -27,8 +27,9 @@ static u32 segment_name_counter = 0; /** * Default fifo and segment size. TODO config. */ -u32 default_fifo_size = 1 << 12; -u32 default_segment_size = 1 << 20; +static u32 default_fifo_size = 1 << 12; +static u32 default_segment_size = 1 << 20; +static u32 default_app_evt_queue_size = 128; segment_manager_properties_t * segment_manager_properties_get (segment_manager_t * sm) @@ -42,6 +43,7 @@ segment_manager_properties_init (segment_manager_properties_t * props) props->add_segment_size = default_segment_size; props->rx_fifo_size = default_fifo_size; props->tx_fifo_size = default_fifo_size; + props->evt_q_size = default_app_evt_queue_size; return props; } @@ -67,7 +69,7 @@ segment_manager_segment_index (segment_manager_t * sm, /** * Remove segment without lock */ -always_inline void +void segment_manager_del_segment (segment_manager_t * sm, svm_fifo_segment_private_t * fs) { @@ -131,6 +133,7 @@ segment_manager_get_segment_w_lock (segment_manager_t * sm, u32 segment_index) void segment_manager_segment_reader_unlock (segment_manager_t * sm) { + ASSERT (sm->segments_rwlock->n_readers > 0); clib_rwlock_reader_unlock (&sm->segments_rwlock); } @@ -146,7 +149,7 @@ segment_manager_segment_writer_unlock (segment_manager_t * sm) * If needed a writer's lock is acquired before allocating a new segment * to avoid affecting any of the segments pool readers. */ -always_inline int +int segment_manager_add_segment (segment_manager_t * sm, u32 segment_size) { segment_manager_main_t *smm = &segment_manager_main; @@ -243,7 +246,7 @@ segment_manager_new () */ int segment_manager_init (segment_manager_t * sm, u32 first_seg_size, - u32 evt_q_size, u32 prealloc_fifo_pairs) + u32 prealloc_fifo_pairs) { u32 rx_fifo_size, tx_fifo_size, pair_size; u32 rx_rounded_data_size, tx_rounded_data_size; @@ -283,10 +286,11 @@ segment_manager_init (segment_manager_t * sm, u32 first_seg_size, return seg_index; } + segment = segment_manager_get_segment (sm, seg_index); if (i == 0) - sm->event_queue = segment_manager_alloc_queue (sm, evt_q_size); + sm->event_queue = segment_manager_alloc_queue (segment, + props->evt_q_size); - segment = segment_manager_get_segment (sm, seg_index); svm_fifo_segment_preallocate_fifo_pairs (segment, props->rx_fifo_size, props->tx_fifo_size, @@ -304,7 +308,9 @@ segment_manager_init (segment_manager_t * sm, u32 first_seg_size, clib_warning ("Failed to allocate segment"); return seg_index; } - sm->event_queue = segment_manager_alloc_queue (sm, evt_q_size); + segment = segment_manager_get_segment (sm, seg_index); + sm->event_queue = segment_manager_alloc_queue (segment, + props->evt_q_size); } return 0; @@ -422,10 +428,10 @@ segment_manager_init_del (segment_manager_t * sm) } } -always_inline int -segment_try_alloc_fifos (svm_fifo_segment_private_t * fifo_segment, - u32 rx_fifo_size, u32 tx_fifo_size, - svm_fifo_t ** rx_fifo, svm_fifo_t ** tx_fifo) +int +segment_manager_try_alloc_fifos (svm_fifo_segment_private_t * fifo_segment, + u32 rx_fifo_size, u32 tx_fifo_size, + svm_fifo_t ** rx_fifo, svm_fifo_t ** tx_fifo) { rx_fifo_size = clib_max (rx_fifo_size, default_fifo_size); *rx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, rx_fifo_size, @@ -466,7 +472,7 @@ segment_manager_alloc_session_fifos (segment_manager_t * sm, svm_fifo_t ** tx_fifo, u32 * fifo_segment_index) { - svm_fifo_segment_private_t *fifo_segment; + svm_fifo_segment_private_t *fifo_segment = 0; int alloc_fail = 1, rv = 0, new_fs_index; segment_manager_properties_t *props; u8 added_a_segment = 0; @@ -481,9 +487,10 @@ segment_manager_alloc_session_fifos (segment_manager_t * sm, /* *INDENT-OFF* */ segment_manager_foreach_segment_w_lock (fifo_segment, sm, ({ - alloc_fail = segment_try_alloc_fifos (fifo_segment, props->rx_fifo_size, - props->tx_fifo_size, rx_fifo, - tx_fifo); + alloc_fail = segment_manager_try_alloc_fifos (fifo_segment, + props->rx_fifo_size, + props->tx_fifo_size, + rx_fifo, tx_fifo); /* Exit with lock held, drop it after notifying app */ if (!alloc_fail) goto alloc_success; @@ -528,9 +535,10 @@ alloc_check: return SESSION_ERROR_SEG_CREATE; } fifo_segment = segment_manager_get_segment_w_lock (sm, new_fs_index); - alloc_fail = segment_try_alloc_fifos (fifo_segment, props->rx_fifo_size, - props->tx_fifo_size, rx_fifo, - tx_fifo); + alloc_fail = segment_manager_try_alloc_fifos (fifo_segment, + props->rx_fifo_size, + props->tx_fifo_size, + rx_fifo, tx_fifo); added_a_segment = 1; goto alloc_check; } @@ -588,16 +596,13 @@ segment_manager_dealloc_fifos (u32 segment_index, svm_fifo_t * rx_fifo, * Must be called with lock held */ svm_queue_t * -segment_manager_alloc_queue (segment_manager_t * sm, u32 queue_size) +segment_manager_alloc_queue (svm_fifo_segment_private_t * segment, + u32 queue_size) { - svm_fifo_segment_private_t *segment; ssvm_shared_header_t *sh; svm_queue_t *q; void *oldheap; - ASSERT (!pool_is_free_index (sm->segments, 0)); - - segment = segment_manager_get_segment (sm, 0); sh = segment->ssvm.sh; oldheap = ssvm_push_heap (sh); |