From 7cd468a3d7dee7d6c92f69a0bb7061ae208ec727 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Mon, 19 Dec 2016 23:05:39 +0100 Subject: Reorganize source tree to use single autotools instance Change-Id: I7b51f88292e057c6443b12224486f2d0c9f8ae23 Signed-off-by: Damjan Marion --- src/vlibmemory/memory_shared.c | 852 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 852 insertions(+) create mode 100644 src/vlibmemory/memory_shared.c (limited to 'src/vlibmemory/memory_shared.c') diff --git a/src/vlibmemory/memory_shared.c b/src/vlibmemory/memory_shared.c new file mode 100644 index 00000000..d8d32004 --- /dev/null +++ b/src/vlibmemory/memory_shared.c @@ -0,0 +1,852 @@ +/* + *------------------------------------------------------------------ + * memclnt_shared.c - API message handling, common code for both clients + * and the vlib process itself. + * + * + * Copyright (c) 2009 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define vl_typedefs +#include +#undef vl_typedefs + +static inline void * +vl_msg_api_alloc_internal (int nbytes, int pool, int may_return_null) +{ + int i; + msgbuf_t *rv; + ring_alloc_t *ap; + unix_shared_memory_queue_t *q; + void *oldheap; + vl_shmem_hdr_t *shmem_hdr; + api_main_t *am = &api_main; + + shmem_hdr = am->shmem_hdr; + + if (shmem_hdr == 0) + { + clib_warning ("shared memory header NULL"); + return 0; + } + + /* account for the msgbuf_t header */ + nbytes += sizeof (msgbuf_t); + + if (shmem_hdr->vl_rings == 0) + { + clib_warning ("vl_rings NULL"); + ASSERT (0); + abort (); + } + + if (shmem_hdr->client_rings == 0) + { + clib_warning ("client_rings NULL"); + ASSERT (0); + abort (); + } + + ap = pool ? shmem_hdr->vl_rings : shmem_hdr->client_rings; + for (i = 0; i < vec_len (ap); i++) + { + /* Too big? */ + if (nbytes > ap[i].size) + { + continue; + } + + q = ap[i].rp; + if (pool == 0) + { + pthread_mutex_lock (&q->mutex); + } + rv = (msgbuf_t *) (&q->data[0] + q->head * q->elsize); + /* + * Is this item still in use? + */ + if (rv->q) + { + /* yes, loser; try next larger pool */ + ap[i].misses++; + if (pool == 0) + pthread_mutex_unlock (&q->mutex); + continue; + } + /* OK, we have a winner */ + ap[i].hits++; + /* + * Remember the source queue, although we + * don't need to know the queue to free the item. + */ + rv->q = q; + q->head++; + if (q->head == q->maxsize) + q->head = 0; + + if (pool == 0) + pthread_mutex_unlock (&q->mutex); + goto out; + } + + /* + * Request too big, or head element of all size-compatible rings + * still in use. Fall back to shared-memory malloc. + */ + am->ring_misses++; + + pthread_mutex_lock (&am->vlib_rp->mutex); + oldheap = svm_push_data_heap (am->vlib_rp); + if (may_return_null) + { + rv = clib_mem_alloc_or_null (nbytes); + if (PREDICT_FALSE (rv == 0)) + { + svm_pop_heap (oldheap); + pthread_mutex_unlock (&am->vlib_rp->mutex); + return 0; + } + } + else + rv = clib_mem_alloc (nbytes); + + rv->q = 0; + svm_pop_heap (oldheap); + pthread_mutex_unlock (&am->vlib_rp->mutex); + +out: + rv->data_len = htonl (nbytes - sizeof (msgbuf_t)); + return (rv->data); +} + +void * +vl_msg_api_alloc (int nbytes) +{ + int pool; + api_main_t *am = &api_main; + vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; + + /* + * Clients use pool-0, vlib proc uses pool 1 + */ + pool = (am->our_pid == shmem_hdr->vl_pid); + return vl_msg_api_alloc_internal (nbytes, pool, 0 /* may_return_null */ ); +} + +void * +vl_msg_api_alloc_or_null (int nbytes) +{ + int pool; + api_main_t *am = &api_main; + vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; + + pool = (am->our_pid == shmem_hdr->vl_pid); + return vl_msg_api_alloc_internal (nbytes, pool, 1 /* may_return_null */ ); +} + +void * +vl_msg_api_alloc_as_if_client (int nbytes) +{ + return vl_msg_api_alloc_internal (nbytes, 0, 0 /* may_return_null */ ); +} + +void * +vl_msg_api_alloc_as_if_client_or_null (int nbytes) +{ + return vl_msg_api_alloc_internal (nbytes, 0, 1 /* may_return_null */ ); +} + +void +vl_msg_api_free (void *a) +{ + msgbuf_t *rv; + void *oldheap; + api_main_t *am = &api_main; + + rv = (msgbuf_t *) (((u8 *) a) - offsetof (msgbuf_t, data)); + + /* + * Here's the beauty of the scheme. Only one proc/thread has + * control of a given message buffer. To free a buffer, we just clear the + * queue field, and leave. No locks, no hits, no errors... + */ + if (rv->q) + { + rv->q = 0; + return; + } + + pthread_mutex_lock (&am->vlib_rp->mutex); + oldheap = svm_push_data_heap (am->vlib_rp); + clib_mem_free (rv); + svm_pop_heap (oldheap); + pthread_mutex_unlock (&am->vlib_rp->mutex); +} + +static void +vl_msg_api_free_nolock (void *a) +{ + msgbuf_t *rv; + void *oldheap; + api_main_t *am = &api_main; + + rv = (msgbuf_t *) (((u8 *) a) - offsetof (msgbuf_t, data)); + /* + * Here's the beauty of the scheme. Only one proc/thread has + * control of a given message buffer. To free a buffer, we just clear the + * queue field, and leave. No locks, no hits, no errors... + */ + if (rv->q) + { + rv->q = 0; + return; + } + + oldheap = svm_push_data_heap (am->vlib_rp); + clib_mem_free (rv); + svm_pop_heap (oldheap); +} + +void +vl_set_memory_root_path (char *name) +{ + api_main_t *am = &api_main; + + am->root_path = name; +} + +void +vl_set_memory_uid (int uid) +{ + api_main_t *am = &api_main; + + am->api_uid = uid; +} + +void +vl_set_memory_gid (int gid) +{ + api_main_t *am = &api_main; + + am->api_gid = gid; +} + +void +vl_set_global_memory_baseva (u64 baseva) +{ + api_main_t *am = &api_main; + + am->global_baseva = baseva; +} + +void +vl_set_global_memory_size (u64 size) +{ + api_main_t *am = &api_main; + + am->global_size = size; +} + +void +vl_set_api_memory_size (u64 size) +{ + api_main_t *am = &api_main; + + am->api_size = size; +} + +void +vl_set_global_pvt_heap_size (u64 size) +{ + api_main_t *am = &api_main; + + am->global_pvt_heap_size = size; +} + +void +vl_set_api_pvt_heap_size (u64 size) +{ + api_main_t *am = &api_main; + + am->api_pvt_heap_size = size; +} + +int +vl_map_shmem (char *region_name, int is_vlib) +{ + svm_map_region_args_t _a, *a = &_a; + svm_region_t *vlib_rp, *root_rp; + void *oldheap; + vl_shmem_hdr_t *shmem_hdr = 0; + api_main_t *am = &api_main; + int i; + struct timespec ts, tsrem; + + if (is_vlib == 0) + svm_region_init_chroot (am->root_path); + + memset (a, 0, sizeof (*a)); + + a->name = region_name; + a->size = am->api_size ? am->api_size : (16 << 20); + a->flags = SVM_FLAGS_MHEAP; + a->uid = am->api_uid; + a->gid = am->api_gid; + a->pvt_heap_size = am->api_pvt_heap_size; + + vlib_rp = svm_region_find_or_create (a); + + if (vlib_rp == 0) + return (-2); + + pthread_mutex_lock (&vlib_rp->mutex); + /* Has someone else set up the shared-memory variable table? */ + if (vlib_rp->user_ctx) + { + am->shmem_hdr = (void *) vlib_rp->user_ctx; + am->our_pid = getpid (); + if (is_vlib) + { + unix_shared_memory_queue_t *q; + uword old_msg; + /* + * application restart. Reset cached pids, API message + * rings, list of clients; otherwise, various things + * fail. (e.g. queue non-empty notification) + */ + + /* ghosts keep the region from disappearing properly */ + svm_client_scan_this_region_nolock (vlib_rp); + am->shmem_hdr->application_restarts++; + q = am->shmem_hdr->vl_input_queue; + am->shmem_hdr->vl_pid = getpid (); + q->consumer_pid = am->shmem_hdr->vl_pid; + /* Drain the input queue, freeing msgs */ + for (i = 0; i < 10; i++) + { + if (pthread_mutex_trylock (&q->mutex) == 0) + { + pthread_mutex_unlock (&q->mutex); + goto mutex_ok; + } + ts.tv_sec = 0; + ts.tv_nsec = 10000 * 1000; /* 10 ms */ + while (nanosleep (&ts, &tsrem) < 0) + ts = tsrem; + } + /* Mutex buggered, "fix" it */ + memset (&q->mutex, 0, sizeof (q->mutex)); + clib_warning ("forcibly release main input queue mutex"); + + mutex_ok: + am->vlib_rp = vlib_rp; + while (unix_shared_memory_queue_sub (q, + (u8 *) & old_msg, + 1 /* nowait */ ) + != -2 /* queue underflow */ ) + { + vl_msg_api_free_nolock ((void *) old_msg); + am->shmem_hdr->restart_reclaims++; + } + pthread_mutex_unlock (&vlib_rp->mutex); + root_rp = svm_get_root_rp (); + ASSERT (root_rp); + /* Clean up the root region client list */ + pthread_mutex_lock (&root_rp->mutex); + svm_client_scan_this_region_nolock (root_rp); + pthread_mutex_unlock (&root_rp->mutex); + } + else + { + pthread_mutex_unlock (&vlib_rp->mutex); + } + am->vlib_rp = vlib_rp; + vec_add1 (am->mapped_shmem_regions, vlib_rp); + return 0; + } + /* Clients simply have to wait... */ + if (!is_vlib) + { + pthread_mutex_unlock (&vlib_rp->mutex); + + /* Wait up to 100 seconds... */ + for (i = 0; i < 10000; i++) + { + ts.tv_sec = 0; + ts.tv_nsec = 10000 * 1000; /* 10 ms */ + while (nanosleep (&ts, &tsrem) < 0) + ts = tsrem; + if (vlib_rp->user_ctx) + goto ready; + } + /* Clean up and leave... */ + svm_region_unmap (vlib_rp); + clib_warning ("region init fail"); + return (-2); + + ready: + am->shmem_hdr = (void *) vlib_rp->user_ctx; + am->our_pid = getpid (); + am->vlib_rp = vlib_rp; + vec_add1 (am->mapped_shmem_regions, vlib_rp); + return 0; + } + + /* Nope, it's our problem... */ + + oldheap = svm_push_data_heap (vlib_rp); + + vec_validate (shmem_hdr, 0); + shmem_hdr->version = VL_SHM_VERSION; + + /* vlib main input queue */ + shmem_hdr->vl_input_queue = + unix_shared_memory_queue_init (1024, sizeof (uword), getpid (), + am->vlib_signal); + + /* Set up the msg ring allocator */ +#define _(sz,n) \ + do { \ + ring_alloc_t _rp; \ + _rp.rp = unix_shared_memory_queue_init ((n), (sz), 0, 0); \ + _rp.size = (sz); \ + _rp.nitems = n; \ + _rp.hits = 0; \ + _rp.misses = 0; \ + vec_add1(shmem_hdr->vl_rings, _rp); \ + } while (0); + + foreach_vl_aring_size; +#undef _ + +#define _(sz,n) \ + do { \ + ring_alloc_t _rp; \ + _rp.rp = unix_shared_memory_queue_init ((n), (sz), 0, 0); \ + _rp.size = (sz); \ + _rp.nitems = n; \ + _rp.hits = 0; \ + _rp.misses = 0; \ + vec_add1(shmem_hdr->client_rings, _rp); \ + } while (0); + + foreach_clnt_aring_size; +#undef _ + + am->shmem_hdr = shmem_hdr; + am->vlib_rp = vlib_rp; + am->our_pid = getpid (); + if (is_vlib) + am->shmem_hdr->vl_pid = am->our_pid; + + svm_pop_heap (oldheap); + + /* + * After absolutely everything that a client might see is set up, + * declare the shmem region valid + */ + vlib_rp->user_ctx = shmem_hdr; + + pthread_mutex_unlock (&vlib_rp->mutex); + vec_add1 (am->mapped_shmem_regions, vlib_rp); + return 0; +} + +void +vl_register_mapped_shmem_region (svm_region_t * rp) +{ + api_main_t *am = &api_main; + + vec_add1 (am->mapped_shmem_regions, rp); +} + +void +vl_unmap_shmem (void) +{ + svm_region_t *rp; + int i; + api_main_t *am = &api_main; + + if (!svm_get_root_rp ()) + return; + + for (i = 0; i < vec_len (am->mapped_shmem_regions); i++) + { + rp = am->mapped_shmem_regions[i]; + svm_region_unmap (rp); + } + + vec_free (am->mapped_shmem_regions); + am->shmem_hdr = 0; + + svm_region_exit (); + /* $$$ more careful cleanup, valgrind run... */ + vec_free (am->msg_handlers); + vec_free (am->msg_endian_handlers); + vec_free (am->msg_print_handlers); +} + +void +vl_msg_api_send_shmem (unix_shared_memory_queue_t * q, u8 * elem) +{ + api_main_t *am = &api_main; + uword *trace = (uword *) elem; + + if (am->tx_trace && am->tx_trace->enabled) + vl_msg_api_trace (am, am->tx_trace, (void *) trace[0]); + + (void) unix_shared_memory_queue_add (q, elem, 0 /* nowait */ ); +} + +void +vl_msg_api_send_shmem_nolock (unix_shared_memory_queue_t * q, u8 * elem) +{ + api_main_t *am = &api_main; + uword *trace = (uword *) elem; + + if (am->tx_trace && am->tx_trace->enabled) + vl_msg_api_trace (am, am->tx_trace, (void *) trace[0]); + + (void) unix_shared_memory_queue_add_nolock (q, elem); +} + +static void +vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) +{ + serialize_main_t _sm, *sm = &_sm; + api_main_t *am = &api_main; + u8 *tblv; + u32 nmsgs; + int i; + u8 *name_and_crc; + u32 msg_index; + + am->my_client_index = mp->index; + am->my_registration = (vl_api_registration_t *) (uword) mp->handle; + + /* Clean out any previous hash table (unlikely) */ + if (am->msg_index_by_name_and_crc) + { + int i; + u8 **keys = 0; + hash_pair_t *hp; + /* *INDENT-OFF* */ + hash_foreach_pair (hp, am->msg_index_by_name_and_crc, + ({ + vec_add1 (keys, (u8 *) hp->key); + })); + /* *INDENT-ON* */ + for (i = 0; i < vec_len (keys); i++) + vec_free (keys[i]); + vec_free (keys); + } + + am->msg_index_by_name_and_crc = hash_create_string (0, sizeof (uword)); + + /* Recreate the vnet-side API message handler table */ + tblv = (u8 *) mp->message_table; + serialize_open_vector (sm, tblv); + unserialize_integer (sm, &nmsgs, sizeof (u32)); + + for (i = 0; i < nmsgs; i++) + { + msg_index = unserialize_likely_small_unsigned_integer (sm); + unserialize_cstring (sm, (char **) &name_and_crc); + hash_set_mem (am->msg_index_by_name_and_crc, name_and_crc, msg_index); + } +} + +u32 +vl_api_get_msg_index (u8 * name_and_crc) +{ + api_main_t *am = &api_main; + uword *p; + + if (am->msg_index_by_name_and_crc) + { + p = hash_get_mem (am->msg_index_by_name_and_crc, name_and_crc); + if (p) + return p[0]; + } + return ~0; +} + +int +vl_client_connect (char *name, int ctx_quota, int input_queue_size) +{ + svm_region_t *svm; + vl_api_memclnt_create_t *mp; + vl_api_memclnt_create_reply_t *rp; + unix_shared_memory_queue_t *vl_input_queue; + vl_shmem_hdr_t *shmem_hdr; + int rv = 0; + void *oldheap; + api_main_t *am = &api_main; + + if (am->my_registration) + { + clib_warning ("client %s already connected...", name); + return -1; + } + + if (am->vlib_rp == 0) + { + clib_warning ("am->vlib_rp NULL"); + return -1; + } + + svm = am->vlib_rp; + shmem_hdr = am->shmem_hdr; + + if (shmem_hdr == 0 || shmem_hdr->vl_input_queue == 0) + { + clib_warning ("shmem_hdr / input queue NULL"); + return -1; + } + + pthread_mutex_lock (&svm->mutex); + oldheap = svm_push_data_heap (svm); + vl_input_queue = + unix_shared_memory_queue_init (input_queue_size, sizeof (uword), + getpid (), 0); + pthread_mutex_unlock (&svm->mutex); + svm_pop_heap (oldheap); + + am->my_client_index = ~0; + am->my_registration = 0; + am->vl_input_queue = vl_input_queue; + + mp = vl_msg_api_alloc (sizeof (vl_api_memclnt_create_t)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_MEMCLNT_CREATE); + mp->ctx_quota = ctx_quota; + mp->input_queue = (uword) vl_input_queue; + strncpy ((char *) mp->name, name, sizeof (mp->name) - 1); + + vl_msg_api_send_shmem (shmem_hdr->vl_input_queue, (u8 *) & mp); + + while (1) + { + int qstatus; + struct timespec ts, tsrem; + int i; + + /* Wait up to 10 seconds */ + for (i = 0; i < 1000; i++) + { + qstatus = unix_shared_memory_queue_sub (vl_input_queue, (u8 *) & rp, + 1 /* nowait */ ); + if (qstatus == 0) + goto read_one_msg; + ts.tv_sec = 0; + ts.tv_nsec = 10000 * 1000; /* 10 ms */ + while (nanosleep (&ts, &tsrem) < 0) + ts = tsrem; + } + /* Timeout... */ + clib_warning ("memclnt_create_reply timeout"); + return -1; + + read_one_msg: + if (ntohs (rp->_vl_msg_id) != VL_API_MEMCLNT_CREATE_REPLY) + { + clib_warning ("unexpected reply: id %d", ntohs (rp->_vl_msg_id)); + continue; + } + rv = clib_net_to_host_u32 (rp->response); + + vl_msg_api_handler ((void *) rp); + break; + } + return (rv); +} + +static void +vl_api_memclnt_delete_reply_t_handler (vl_api_memclnt_delete_reply_t * mp) +{ + void *oldheap; + api_main_t *am = &api_main; + + pthread_mutex_lock (&am->vlib_rp->mutex); + oldheap = svm_push_data_heap (am->vlib_rp); + unix_shared_memory_queue_free (am->vl_input_queue); + pthread_mutex_unlock (&am->vlib_rp->mutex); + svm_pop_heap (oldheap); + + am->my_client_index = ~0; + am->my_registration = 0; + am->vl_input_queue = 0; +} + +void +vl_client_disconnect (void) +{ + vl_api_memclnt_delete_t *mp; + vl_api_memclnt_delete_reply_t *rp; + unix_shared_memory_queue_t *vl_input_queue; + vl_shmem_hdr_t *shmem_hdr; + time_t begin; + api_main_t *am = &api_main; + + ASSERT (am->vlib_rp); + shmem_hdr = am->shmem_hdr; + ASSERT (shmem_hdr && shmem_hdr->vl_input_queue); + + vl_input_queue = am->vl_input_queue; + + mp = vl_msg_api_alloc (sizeof (vl_api_memclnt_delete_t)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_MEMCLNT_DELETE); + mp->index = am->my_client_index; + mp->handle = (uword) am->my_registration; + + vl_msg_api_send_shmem (shmem_hdr->vl_input_queue, (u8 *) & mp); + + /* + * Have to be careful here, in case the client is disconnecting + * because e.g. the vlib process died, or is unresponsive. + */ + + begin = time (0); + while (1) + { + time_t now; + + now = time (0); + + if (now >= (begin + 2)) + { + clib_warning ("peer unresponsive, give up"); + am->my_client_index = ~0; + am->my_registration = 0; + am->shmem_hdr = 0; + break; + } + if (unix_shared_memory_queue_sub (vl_input_queue, (u8 *) & rp, 1) < 0) + continue; + + /* drain the queue */ + if (ntohs (rp->_vl_msg_id) != VL_API_MEMCLNT_DELETE_REPLY) + { + vl_msg_api_handler ((void *) rp); + continue; + } + vl_msg_api_handler ((void *) rp); + break; + } +} + +static inline vl_api_registration_t * +vl_api_client_index_to_registration_internal (u32 handle) +{ + vl_api_registration_t **regpp; + vl_api_registration_t *regp; + api_main_t *am = &api_main; + u32 index; + + index = vl_msg_api_handle_get_index (handle); + if ((am->shmem_hdr->application_restarts & VL_API_EPOCH_MASK) + != vl_msg_api_handle_get_epoch (handle)) + { + vl_msg_api_increment_missing_client_counter (); + return 0; + } + + regpp = am->vl_clients + index; + + if (pool_is_free (am->vl_clients, regpp)) + { + vl_msg_api_increment_missing_client_counter (); + return 0; + } + regp = *regpp; + return (regp); +} + +vl_api_registration_t * +vl_api_client_index_to_registration (u32 index) +{ + return (vl_api_client_index_to_registration_internal (index)); +} + +unix_shared_memory_queue_t * +vl_api_client_index_to_input_queue (u32 index) +{ + vl_api_registration_t *regp; + api_main_t *am = &api_main; + + /* Special case: vlib trying to send itself a message */ + if (index == (u32) ~ 0) + return (am->shmem_hdr->vl_input_queue); + + regp = vl_api_client_index_to_registration_internal (index); + if (!regp) + return 0; + return (regp->vl_input_queue); +} + +#define foreach_api_client_msg \ +_(MEMCLNT_CREATE_REPLY, memclnt_create_reply) \ +_(MEMCLNT_DELETE_REPLY, memclnt_delete_reply) + +int +vl_client_api_map (char *region_name) +{ + int rv; + + if ((rv = vl_map_shmem (region_name, 0 /* is_vlib */ )) < 0) + { + return rv; + } + +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, 0 /* name */, \ + vl_api_##n##_t_handler, \ + 0/* cleanup */, 0/* endian */, 0/* print */, \ + sizeof(vl_api_##n##_t), 1); + foreach_api_client_msg; +#undef _ + return 0; +} + +void +vl_client_api_unmap (void) +{ + vl_unmap_shmem (); +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ -- cgit 1.2.3-korg From 842b9c59cc21b3e2917aaa25069fb15addf976f1 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Mon, 9 Jan 2017 15:54:00 -0500 Subject: Self-service garbage collection for the API message allocator Change-Id: Iadc08eede15fa5978e4010bbece0232aab8b0fee Signed-off-by: Dave Barach --- src/vlibapi/api.h | 3 ++- src/vlibmemory/api.h | 3 +++ src/vlibmemory/memory_shared.c | 21 +++++++++++++++++++++ src/vlibmemory/memory_vlib.c | 7 ++++--- 4 files changed, 30 insertions(+), 4 deletions(-) (limited to 'src/vlibmemory/memory_shared.c') diff --git a/src/vlibapi/api.h b/src/vlibapi/api.h index 970a0ee0..fcb101d7 100644 --- a/src/vlibapi/api.h +++ b/src/vlibapi/api.h @@ -124,6 +124,7 @@ typedef struct u8 *is_mp_safe; struct ring_alloc_ *arings; u32 ring_misses; + u32 garbage_collects; u32 missing_clients; vl_api_trace_t *rx_trace; vl_api_trace_t *tx_trace; @@ -212,7 +213,7 @@ typedef struct msgbuf_ { unix_shared_memory_queue_t *q; u32 data_len; - u32 pad; + u32 gc_mark_timestamp; u8 data[0]; } msgbuf_t; diff --git a/src/vlibmemory/api.h b/src/vlibmemory/api.h index 54a0a001..8e44c20d 100644 --- a/src/vlibmemory/api.h +++ b/src/vlibmemory/api.h @@ -86,6 +86,9 @@ typedef struct vl_shmem_hdr_ /* Number of messages reclaimed during application restart */ u32 restart_reclaims; + /* Number of garbage-collected messages */ + u32 garbage_collects; + } vl_shmem_hdr_t; #define VL_SHM_VERSION 2 diff --git a/src/vlibmemory/memory_shared.c b/src/vlibmemory/memory_shared.c index d8d32004..c41f32f7 100644 --- a/src/vlibmemory/memory_shared.c +++ b/src/vlibmemory/memory_shared.c @@ -95,12 +95,31 @@ vl_msg_api_alloc_internal (int nbytes, int pool, int may_return_null) */ if (rv->q) { + u32 now = (u32) time (0); + + if (PREDICT_TRUE (rv->gc_mark_timestamp == 0)) + rv->gc_mark_timestamp = now; + else + { + if (now - rv->gc_mark_timestamp > 10) + { + if (CLIB_DEBUG > 0) + clib_warning ("garbage collect pool %d ring %d index %d", + pool, i, q->head); + shmem_hdr->garbage_collects++; + goto collected; + } + } + + /* yes, loser; try next larger pool */ ap[i].misses++; if (pool == 0) pthread_mutex_unlock (&q->mutex); continue; } + collected: + /* OK, we have a winner */ ap[i].hits++; /* @@ -108,6 +127,7 @@ vl_msg_api_alloc_internal (int nbytes, int pool, int may_return_null) * don't need to know the queue to free the item. */ rv->q = q; + rv->gc_mark_timestamp = 0; q->head++; if (q->head == q->maxsize) q->head = 0; @@ -201,6 +221,7 @@ vl_msg_api_free (void *a) if (rv->q) { rv->q = 0; + rv->gc_mark_timestamp = 0; return; } diff --git a/src/vlibmemory/memory_vlib.c b/src/vlibmemory/memory_vlib.c index 69f35d72..7d21c9dd 100644 --- a/src/vlibmemory/memory_vlib.c +++ b/src/vlibmemory/memory_vlib.c @@ -853,9 +853,10 @@ vl_api_ring_command (vlib_main_t * vm, vlib_cli_output (vm, "%d ring miss fallback allocations\n", am->ring_misses); - vlib_cli_output (vm, "%d application restarts, %d reclaimed msgs\n", - shmem_hdr->application_restarts, - shmem_hdr->restart_reclaims); + vlib_cli_output + (vm, "%d application restarts, %d reclaimed msgs, %d garbage collects\n", + shmem_hdr->application_restarts, + shmem_hdr->restart_reclaims, shmem_hdr->garbage_collects); return 0; } -- cgit 1.2.3-korg From 5c6c4bfd64722a9a2d410a3e58a817721a083702 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Tue, 11 Apr 2017 13:12:48 -0400 Subject: move binary-api client-only routines to memory_client.c Change-Id: I0755f731b1b01e6a1a231948d498c625a2c966b7 Signed-off-by: Dave Barach --- src/vlibmemory/memory_client.c | 250 +++++++++++++++++++++++++++++++++++++++-- src/vlibmemory/memory_shared.c | 241 --------------------------------------- 2 files changed, 241 insertions(+), 250 deletions(-) (limited to 'src/vlibmemory/memory_shared.c') diff --git a/src/vlibmemory/memory_client.c b/src/vlibmemory/memory_client.c index 234a0a5a..25b06f65 100644 --- a/src/vlibmemory/memory_client.c +++ b/src/vlibmemory/memory_client.c @@ -104,23 +104,234 @@ vl_api_rx_thread_exit_t_handler (vl_api_rx_thread_exit_t * mp) } static void -noop_handler (void *notused) +vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) { + serialize_main_t _sm, *sm = &_sm; + api_main_t *am = &api_main; + u8 *tblv; + u32 nmsgs; + int i; + u8 *name_and_crc; + u32 msg_index; + + am->my_client_index = mp->index; + am->my_registration = (vl_api_registration_t *) (uword) mp->handle; + + /* Clean out any previous hash table (unlikely) */ + if (am->msg_index_by_name_and_crc) + { + int i; + u8 **keys = 0; + hash_pair_t *hp; + /* *INDENT-OFF* */ + hash_foreach_pair (hp, am->msg_index_by_name_and_crc, + ({ + vec_add1 (keys, (u8 *) hp->key); + })); + /* *INDENT-ON* */ + for (i = 0; i < vec_len (keys); i++) + vec_free (keys[i]); + vec_free (keys); + } + + am->msg_index_by_name_and_crc = hash_create_string (0, sizeof (uword)); + + /* Recreate the vnet-side API message handler table */ + tblv = (u8 *) mp->message_table; + serialize_open_vector (sm, tblv); + unserialize_integer (sm, &nmsgs, sizeof (u32)); + + for (i = 0; i < nmsgs; i++) + { + msg_index = unserialize_likely_small_unsigned_integer (sm); + unserialize_cstring (sm, (char **) &name_and_crc); + hash_set_mem (am->msg_index_by_name_and_crc, name_and_crc, msg_index); + } } -#define foreach_api_msg \ -_(RX_THREAD_EXIT, rx_thread_exit) +static void +noop_handler (void *notused) +{ +} -static int -connect_to_vlib_internal (char *svm_name, char *client_name, - int rx_queue_size, int want_pthread) +int +vl_client_connect (char *name, int ctx_quota, int input_queue_size) { + svm_region_t *svm; + vl_api_memclnt_create_t *mp; + vl_api_memclnt_create_reply_t *rp; + unix_shared_memory_queue_t *vl_input_queue; + vl_shmem_hdr_t *shmem_hdr; int rv = 0; - memory_client_main_t *mm = &memory_client_main; + void *oldheap; + api_main_t *am = &api_main; - if ((rv = vl_client_api_map (svm_name))) + if (am->my_registration) + { + clib_warning ("client %s already connected...", name); + return -1; + } + + if (am->vlib_rp == 0) + { + clib_warning ("am->vlib_rp NULL"); + return -1; + } + + svm = am->vlib_rp; + shmem_hdr = am->shmem_hdr; + + if (shmem_hdr == 0 || shmem_hdr->vl_input_queue == 0) + { + clib_warning ("shmem_hdr / input queue NULL"); + return -1; + } + + pthread_mutex_lock (&svm->mutex); + oldheap = svm_push_data_heap (svm); + vl_input_queue = + unix_shared_memory_queue_init (input_queue_size, sizeof (uword), + getpid (), 0); + pthread_mutex_unlock (&svm->mutex); + svm_pop_heap (oldheap); + + am->my_client_index = ~0; + am->my_registration = 0; + am->vl_input_queue = vl_input_queue; + + mp = vl_msg_api_alloc (sizeof (vl_api_memclnt_create_t)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_MEMCLNT_CREATE); + mp->ctx_quota = ctx_quota; + mp->input_queue = (uword) vl_input_queue; + strncpy ((char *) mp->name, name, sizeof (mp->name) - 1); + + vl_msg_api_send_shmem (shmem_hdr->vl_input_queue, (u8 *) & mp); + + while (1) + { + int qstatus; + struct timespec ts, tsrem; + int i; + + /* Wait up to 10 seconds */ + for (i = 0; i < 1000; i++) + { + qstatus = unix_shared_memory_queue_sub (vl_input_queue, (u8 *) & rp, + 1 /* nowait */ ); + if (qstatus == 0) + goto read_one_msg; + ts.tv_sec = 0; + ts.tv_nsec = 10000 * 1000; /* 10 ms */ + while (nanosleep (&ts, &tsrem) < 0) + ts = tsrem; + } + /* Timeout... */ + clib_warning ("memclnt_create_reply timeout"); + return -1; + + read_one_msg: + if (ntohs (rp->_vl_msg_id) != VL_API_MEMCLNT_CREATE_REPLY) + { + clib_warning ("unexpected reply: id %d", ntohs (rp->_vl_msg_id)); + continue; + } + rv = clib_net_to_host_u32 (rp->response); + + vl_msg_api_handler ((void *) rp); + break; + } + return (rv); +} + +static void +vl_api_memclnt_delete_reply_t_handler (vl_api_memclnt_delete_reply_t * mp) +{ + void *oldheap; + api_main_t *am = &api_main; + + pthread_mutex_lock (&am->vlib_rp->mutex); + oldheap = svm_push_data_heap (am->vlib_rp); + unix_shared_memory_queue_free (am->vl_input_queue); + pthread_mutex_unlock (&am->vlib_rp->mutex); + svm_pop_heap (oldheap); + + am->my_client_index = ~0; + am->my_registration = 0; + am->vl_input_queue = 0; +} + +void +vl_client_disconnect (void) +{ + vl_api_memclnt_delete_t *mp; + vl_api_memclnt_delete_reply_t *rp; + unix_shared_memory_queue_t *vl_input_queue; + vl_shmem_hdr_t *shmem_hdr; + time_t begin; + api_main_t *am = &api_main; + + ASSERT (am->vlib_rp); + shmem_hdr = am->shmem_hdr; + ASSERT (shmem_hdr && shmem_hdr->vl_input_queue); + + vl_input_queue = am->vl_input_queue; + + mp = vl_msg_api_alloc (sizeof (vl_api_memclnt_delete_t)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_MEMCLNT_DELETE); + mp->index = am->my_client_index; + mp->handle = (uword) am->my_registration; + + vl_msg_api_send_shmem (shmem_hdr->vl_input_queue, (u8 *) & mp); + + /* + * Have to be careful here, in case the client is disconnecting + * because e.g. the vlib process died, or is unresponsive. + */ + + begin = time (0); + while (1) + { + time_t now; + + now = time (0); + + if (now >= (begin + 2)) + { + clib_warning ("peer unresponsive, give up"); + am->my_client_index = ~0; + am->my_registration = 0; + am->shmem_hdr = 0; + break; + } + if (unix_shared_memory_queue_sub (vl_input_queue, (u8 *) & rp, 1) < 0) + continue; + + /* drain the queue */ + if (ntohs (rp->_vl_msg_id) != VL_API_MEMCLNT_DELETE_REPLY) + { + vl_msg_api_handler ((void *) rp); + continue; + } + vl_msg_api_handler ((void *) rp); + break; + } +} + +#define foreach_api_msg \ +_(RX_THREAD_EXIT, rx_thread_exit) \ +_(MEMCLNT_CREATE_REPLY, memclnt_create_reply) \ +_(MEMCLNT_DELETE_REPLY, memclnt_delete_reply) + + +int +vl_client_api_map (char *region_name) +{ + int rv; + + if ((rv = vl_map_shmem (region_name, 0 /* is_vlib */ )) < 0) { - clib_warning ("vl_client_api map rv %d", rv); return rv; } @@ -133,6 +344,27 @@ connect_to_vlib_internal (char *svm_name, char *client_name, sizeof(vl_api_##n##_t), 1); foreach_api_msg; #undef _ + return 0; +} + +void +vl_client_api_unmap (void) +{ + vl_unmap_shmem (); +} + +static int +connect_to_vlib_internal (char *svm_name, char *client_name, + int rx_queue_size, int want_pthread) +{ + int rv = 0; + memory_client_main_t *mm = &memory_client_main; + + if ((rv = vl_client_api_map (svm_name))) + { + clib_warning ("vl_client_api map rv %d", rv); + return rv; + } if (vl_client_connect (client_name, 0 /* punt quota */ , rx_queue_size /* input queue */ ) < 0) diff --git a/src/vlibmemory/memory_shared.c b/src/vlibmemory/memory_shared.c index c41f32f7..6cea5df9 100644 --- a/src/vlibmemory/memory_shared.c +++ b/src/vlibmemory/memory_shared.c @@ -559,52 +559,6 @@ vl_msg_api_send_shmem_nolock (unix_shared_memory_queue_t * q, u8 * elem) (void) unix_shared_memory_queue_add_nolock (q, elem); } -static void -vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) -{ - serialize_main_t _sm, *sm = &_sm; - api_main_t *am = &api_main; - u8 *tblv; - u32 nmsgs; - int i; - u8 *name_and_crc; - u32 msg_index; - - am->my_client_index = mp->index; - am->my_registration = (vl_api_registration_t *) (uword) mp->handle; - - /* Clean out any previous hash table (unlikely) */ - if (am->msg_index_by_name_and_crc) - { - int i; - u8 **keys = 0; - hash_pair_t *hp; - /* *INDENT-OFF* */ - hash_foreach_pair (hp, am->msg_index_by_name_and_crc, - ({ - vec_add1 (keys, (u8 *) hp->key); - })); - /* *INDENT-ON* */ - for (i = 0; i < vec_len (keys); i++) - vec_free (keys[i]); - vec_free (keys); - } - - am->msg_index_by_name_and_crc = hash_create_string (0, sizeof (uword)); - - /* Recreate the vnet-side API message handler table */ - tblv = (u8 *) mp->message_table; - serialize_open_vector (sm, tblv); - unserialize_integer (sm, &nmsgs, sizeof (u32)); - - for (i = 0; i < nmsgs; i++) - { - msg_index = unserialize_likely_small_unsigned_integer (sm); - unserialize_cstring (sm, (char **) &name_and_crc); - hash_set_mem (am->msg_index_by_name_and_crc, name_and_crc, msg_index); - } -} - u32 vl_api_get_msg_index (u8 * name_and_crc) { @@ -620,171 +574,6 @@ vl_api_get_msg_index (u8 * name_and_crc) return ~0; } -int -vl_client_connect (char *name, int ctx_quota, int input_queue_size) -{ - svm_region_t *svm; - vl_api_memclnt_create_t *mp; - vl_api_memclnt_create_reply_t *rp; - unix_shared_memory_queue_t *vl_input_queue; - vl_shmem_hdr_t *shmem_hdr; - int rv = 0; - void *oldheap; - api_main_t *am = &api_main; - - if (am->my_registration) - { - clib_warning ("client %s already connected...", name); - return -1; - } - - if (am->vlib_rp == 0) - { - clib_warning ("am->vlib_rp NULL"); - return -1; - } - - svm = am->vlib_rp; - shmem_hdr = am->shmem_hdr; - - if (shmem_hdr == 0 || shmem_hdr->vl_input_queue == 0) - { - clib_warning ("shmem_hdr / input queue NULL"); - return -1; - } - - pthread_mutex_lock (&svm->mutex); - oldheap = svm_push_data_heap (svm); - vl_input_queue = - unix_shared_memory_queue_init (input_queue_size, sizeof (uword), - getpid (), 0); - pthread_mutex_unlock (&svm->mutex); - svm_pop_heap (oldheap); - - am->my_client_index = ~0; - am->my_registration = 0; - am->vl_input_queue = vl_input_queue; - - mp = vl_msg_api_alloc (sizeof (vl_api_memclnt_create_t)); - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = ntohs (VL_API_MEMCLNT_CREATE); - mp->ctx_quota = ctx_quota; - mp->input_queue = (uword) vl_input_queue; - strncpy ((char *) mp->name, name, sizeof (mp->name) - 1); - - vl_msg_api_send_shmem (shmem_hdr->vl_input_queue, (u8 *) & mp); - - while (1) - { - int qstatus; - struct timespec ts, tsrem; - int i; - - /* Wait up to 10 seconds */ - for (i = 0; i < 1000; i++) - { - qstatus = unix_shared_memory_queue_sub (vl_input_queue, (u8 *) & rp, - 1 /* nowait */ ); - if (qstatus == 0) - goto read_one_msg; - ts.tv_sec = 0; - ts.tv_nsec = 10000 * 1000; /* 10 ms */ - while (nanosleep (&ts, &tsrem) < 0) - ts = tsrem; - } - /* Timeout... */ - clib_warning ("memclnt_create_reply timeout"); - return -1; - - read_one_msg: - if (ntohs (rp->_vl_msg_id) != VL_API_MEMCLNT_CREATE_REPLY) - { - clib_warning ("unexpected reply: id %d", ntohs (rp->_vl_msg_id)); - continue; - } - rv = clib_net_to_host_u32 (rp->response); - - vl_msg_api_handler ((void *) rp); - break; - } - return (rv); -} - -static void -vl_api_memclnt_delete_reply_t_handler (vl_api_memclnt_delete_reply_t * mp) -{ - void *oldheap; - api_main_t *am = &api_main; - - pthread_mutex_lock (&am->vlib_rp->mutex); - oldheap = svm_push_data_heap (am->vlib_rp); - unix_shared_memory_queue_free (am->vl_input_queue); - pthread_mutex_unlock (&am->vlib_rp->mutex); - svm_pop_heap (oldheap); - - am->my_client_index = ~0; - am->my_registration = 0; - am->vl_input_queue = 0; -} - -void -vl_client_disconnect (void) -{ - vl_api_memclnt_delete_t *mp; - vl_api_memclnt_delete_reply_t *rp; - unix_shared_memory_queue_t *vl_input_queue; - vl_shmem_hdr_t *shmem_hdr; - time_t begin; - api_main_t *am = &api_main; - - ASSERT (am->vlib_rp); - shmem_hdr = am->shmem_hdr; - ASSERT (shmem_hdr && shmem_hdr->vl_input_queue); - - vl_input_queue = am->vl_input_queue; - - mp = vl_msg_api_alloc (sizeof (vl_api_memclnt_delete_t)); - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = ntohs (VL_API_MEMCLNT_DELETE); - mp->index = am->my_client_index; - mp->handle = (uword) am->my_registration; - - vl_msg_api_send_shmem (shmem_hdr->vl_input_queue, (u8 *) & mp); - - /* - * Have to be careful here, in case the client is disconnecting - * because e.g. the vlib process died, or is unresponsive. - */ - - begin = time (0); - while (1) - { - time_t now; - - now = time (0); - - if (now >= (begin + 2)) - { - clib_warning ("peer unresponsive, give up"); - am->my_client_index = ~0; - am->my_registration = 0; - am->shmem_hdr = 0; - break; - } - if (unix_shared_memory_queue_sub (vl_input_queue, (u8 *) & rp, 1) < 0) - continue; - - /* drain the queue */ - if (ntohs (rp->_vl_msg_id) != VL_API_MEMCLNT_DELETE_REPLY) - { - vl_msg_api_handler ((void *) rp); - continue; - } - vl_msg_api_handler ((void *) rp); - break; - } -} - static inline vl_api_registration_t * vl_api_client_index_to_registration_internal (u32 handle) { @@ -834,36 +623,6 @@ vl_api_client_index_to_input_queue (u32 index) return (regp->vl_input_queue); } -#define foreach_api_client_msg \ -_(MEMCLNT_CREATE_REPLY, memclnt_create_reply) \ -_(MEMCLNT_DELETE_REPLY, memclnt_delete_reply) - -int -vl_client_api_map (char *region_name) -{ - int rv; - - if ((rv = vl_map_shmem (region_name, 0 /* is_vlib */ )) < 0) - { - return rv; - } - -#define _(N,n) \ - vl_msg_api_set_handlers(VL_API_##N, 0 /* name */, \ - vl_api_##n##_t_handler, \ - 0/* cleanup */, 0/* endian */, 0/* print */, \ - sizeof(vl_api_##n##_t), 1); - foreach_api_client_msg; -#undef _ - return 0; -} - -void -vl_client_api_unmap (void) -{ - vl_unmap_shmem (); -} - /* * fd.io coding-style-patch-verification: ON * -- cgit 1.2.3-korg From e72be39cd0f498178fd62dfc0a0b0daa2b633f62 Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Wed, 26 Apr 2017 13:59:20 -0700 Subject: A sprinkling of const in vlibmemory/api.h and friends Change-Id: I953ebb37eeec7de0c4a6b00258c3c67a83cbc020 Signed-off-by: Neale Ranns --- src/svm/svm.c | 6 +++--- src/svm/svm.h | 10 +++++----- src/svm/svmdb.h | 2 +- src/vlibapi/api.h | 11 ++++++----- src/vlibapi/api_shared.c | 4 ++-- src/vlibmemory/api.h | 20 ++++++++++---------- src/vlibmemory/memory_client.c | 16 +++++++++------- src/vlibmemory/memory_shared.c | 4 ++-- src/vlibmemory/memory_vlib.c | 4 ++-- src/vpp/api/api_main.c | 2 +- 10 files changed, 41 insertions(+), 38 deletions(-) (limited to 'src/vlibmemory/memory_shared.c') diff --git a/src/svm/svm.c b/src/svm/svm.c index e4ca98e1..97add5a7 100644 --- a/src/svm/svm.c +++ b/src/svm/svm.c @@ -796,7 +796,7 @@ svm_region_init (void) } void -svm_region_init_chroot (char *root_path) +svm_region_init_chroot (const char *root_path) { svm_map_region_args_t _a, *a = &_a; @@ -813,7 +813,7 @@ svm_region_init_chroot (char *root_path) } void -svm_region_init_chroot_uid_gid (char *root_path, int uid, int gid) +svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid) { svm_map_region_args_t _a, *a = &_a; @@ -1151,7 +1151,7 @@ svm_client_scan_this_region_nolock (svm_region_t * rp) * Scan svm regions for dead clients */ void -svm_client_scan (char *root_path) +svm_client_scan (const char *root_path) { int i, j; svm_main_region_t *mp; diff --git a/src/svm/svm.h b/src/svm/svm.h index 0b87dbcb..06797fa1 100644 --- a/src/svm/svm.h +++ b/src/svm/svm.h @@ -69,8 +69,8 @@ typedef struct svm_region_ typedef struct svm_map_region_args_ { - char *root_path; /* NULL means use the truly global arena */ - char *name; + const char *root_path; /* NULL means use the truly global arena */ + const char *name; u64 baseva; u64 size; u64 pvt_heap_size; @@ -115,12 +115,12 @@ typedef struct void *svm_region_find_or_create (svm_map_region_args_t * a); void svm_region_init (void); -void svm_region_init_chroot (char *root_path); -void svm_region_init_chroot_uid_gid (char *root_path, int uid, int gid); +void svm_region_init_chroot (const char *root_path); +void svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid); void svm_region_init_args (svm_map_region_args_t * a); void svm_region_exit (void); void svm_region_unmap (void *rp_arg); -void svm_client_scan (char *root_path); +void svm_client_scan (const char *root_path); void svm_client_scan_this_region_nolock (svm_region_t * rp); u8 *shm_name_from_svm_map_region_args (svm_map_region_args_t * a); diff --git a/src/svm/svmdb.h b/src/svm/svmdb.h index e02628a0..e35be8aa 100644 --- a/src/svm/svmdb.h +++ b/src/svm/svmdb.h @@ -83,7 +83,7 @@ typedef struct typedef struct { - char *root_path; + const char *root_path; uword size; u32 uid; u32 gid; diff --git a/src/vlibapi/api.h b/src/vlibapi/api.h index a62fa644..7538050e 100644 --- a/src/vlibapi/api.h +++ b/src/vlibapi/api.h @@ -127,7 +127,7 @@ typedef struct void (**msg_cleanup_handlers) (void *); void (**msg_endian_handlers) (void *); void (**msg_print_handlers) (void *, void *); - char **msg_names; + const char **msg_names; u8 *message_bounce; u8 *is_mp_safe; struct ring_alloc_ *arings; @@ -195,8 +195,8 @@ typedef struct /* client side message index hash table */ uword *msg_index_by_name_and_crc; - char *region_name; - char *root_path; + const char *region_name; + const char *root_path; /* Replay in progress? */ int replay_in_progress; @@ -276,8 +276,9 @@ void vl_msg_api_register_pd_handler (void *handler, int vl_msg_api_pd_handler (void *mp, int rv); void vl_msg_api_set_first_available_msg_id (u16 first_avail); -u16 vl_msg_api_get_msg_ids (char *name, int n); -void vl_msg_api_add_msg_name_crc (api_main_t * am, char *string, u32 id); +u16 vl_msg_api_get_msg_ids (const char *name, int n); +void vl_msg_api_add_msg_name_crc (api_main_t * am, const char *string, + u32 id); u32 vl_api_get_msg_index (u8 * name_and_crc); /* node_serialize.c prototypes */ diff --git a/src/vlibapi/api_shared.c b/src/vlibapi/api_shared.c index 6774e3dd..0817f38e 100644 --- a/src/vlibapi/api_shared.c +++ b/src/vlibapi/api_shared.c @@ -828,7 +828,7 @@ vl_msg_api_set_first_available_msg_id (u16 first_avail) } u16 -vl_msg_api_get_msg_ids (char *name, int n) +vl_msg_api_get_msg_ids (const char *name, int n) { api_main_t *am = &api_main; u8 *name_copy; @@ -872,7 +872,7 @@ vl_msg_api_get_msg_ids (char *name, int n) } void -vl_msg_api_add_msg_name_crc (api_main_t * am, char *string, u32 id) +vl_msg_api_add_msg_name_crc (api_main_t * am, const char *string, u32 id) { uword *p; diff --git a/src/vlibmemory/api.h b/src/vlibmemory/api.h index 8e44c20d..c195e181 100644 --- a/src/vlibmemory/api.h +++ b/src/vlibmemory/api.h @@ -123,20 +123,20 @@ void *vl_msg_api_alloc_or_null (int nbytes); void *vl_msg_api_alloc_as_if_client (int nbytes); void *vl_msg_api_alloc_as_if_client_or_null (int nbytes); void vl_msg_api_free (void *a); -int vl_map_shmem (char *region_name, int is_vlib); +int vl_map_shmem (const char *region_name, int is_vlib); void vl_register_mapped_shmem_region (svm_region_t * rp); void vl_unmap_shmem (void); void vl_msg_api_send_shmem (unix_shared_memory_queue_t * q, u8 * elem); void vl_msg_api_send_shmem_nolock (unix_shared_memory_queue_t * q, u8 * elem); void vl_msg_api_send (vl_api_registration_t * rp, u8 * elem); -int vl_client_connect (char *name, int ctx_quota, int input_queue_size); +int vl_client_connect (const char *name, int ctx_quota, int input_queue_size); void vl_client_disconnect (void); unix_shared_memory_queue_t *vl_api_client_index_to_input_queue (u32 index); vl_api_registration_t *vl_api_client_index_to_registration (u32 index); -int vl_client_api_map (char *region_name); +int vl_client_api_map (const char *region_name); void vl_client_api_unmap (void); -void vl_set_memory_region_name (char *name); -void vl_set_memory_root_path (char *root_path); +void vl_set_memory_region_name (const char *name); +void vl_set_memory_root_path (const char *root_path); void vl_set_memory_uid (int uid); void vl_set_memory_gid (int gid); void vl_set_global_memory_baseva (u64 baseva); @@ -146,12 +146,12 @@ void vl_set_global_pvt_heap_size (u64 size); void vl_set_api_pvt_heap_size (u64 size); void vl_enable_disable_memory_api (vlib_main_t * vm, int yesno); void vl_client_disconnect_from_vlib (void); -int vl_client_connect_to_vlib (char *svm_name, char *client_name, - int rx_queue_size); -int vl_client_connect_to_vlib_no_rx_pthread (char *svm_name, - char *client_name, +int vl_client_connect_to_vlib (const char *svm_name, + const char *client_name, int rx_queue_size); +int vl_client_connect_to_vlib_no_rx_pthread (const char *svm_name, + const char *client_name, int rx_queue_size); -u16 vl_client_get_first_plugin_msg_id (char *plugin_name); +u16 vl_client_get_first_plugin_msg_id (const char *plugin_name); void vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length); diff --git a/src/vlibmemory/memory_client.c b/src/vlibmemory/memory_client.c index 25b06f65..d48a4fa1 100644 --- a/src/vlibmemory/memory_client.c +++ b/src/vlibmemory/memory_client.c @@ -155,7 +155,7 @@ noop_handler (void *notused) } int -vl_client_connect (char *name, int ctx_quota, int input_queue_size) +vl_client_connect (const char *name, int ctx_quota, int input_queue_size) { svm_region_t *svm; vl_api_memclnt_create_t *mp; @@ -326,7 +326,7 @@ _(MEMCLNT_DELETE_REPLY, memclnt_delete_reply) int -vl_client_api_map (char *region_name) +vl_client_api_map (const char *region_name) { int rv; @@ -354,7 +354,8 @@ vl_client_api_unmap (void) } static int -connect_to_vlib_internal (char *svm_name, char *client_name, +connect_to_vlib_internal (const char *svm_name, + const char *client_name, int rx_queue_size, int want_pthread) { int rv = 0; @@ -388,15 +389,16 @@ connect_to_vlib_internal (char *svm_name, char *client_name, } int -vl_client_connect_to_vlib (char *svm_name, char *client_name, - int rx_queue_size) +vl_client_connect_to_vlib (const char *svm_name, + const char *client_name, int rx_queue_size) { return connect_to_vlib_internal (svm_name, client_name, rx_queue_size, 1 /* want pthread */ ); } int -vl_client_connect_to_vlib_no_rx_pthread (char *svm_name, char *client_name, +vl_client_connect_to_vlib_no_rx_pthread (const char *svm_name, + const char *client_name, int rx_queue_size) { return connect_to_vlib_internal (svm_name, client_name, rx_queue_size, @@ -437,7 +439,7 @@ static void vl_api_get_first_msg_id_reply_t_handler } u16 -vl_client_get_first_plugin_msg_id (char *plugin_name) +vl_client_get_first_plugin_msg_id (const char *plugin_name) { vl_api_get_first_msg_id_t *mp; api_main_t *am = &api_main; diff --git a/src/vlibmemory/memory_shared.c b/src/vlibmemory/memory_shared.c index 6cea5df9..aea90330 100644 --- a/src/vlibmemory/memory_shared.c +++ b/src/vlibmemory/memory_shared.c @@ -257,7 +257,7 @@ vl_msg_api_free_nolock (void *a) } void -vl_set_memory_root_path (char *name) +vl_set_memory_root_path (const char *name) { api_main_t *am = &api_main; @@ -321,7 +321,7 @@ vl_set_api_pvt_heap_size (u64 size) } int -vl_map_shmem (char *region_name, int is_vlib) +vl_map_shmem (const char *region_name, int is_vlib) { svm_map_region_args_t _a, *a = &_a; svm_region_t *vlib_rp, *root_rp; diff --git a/src/vlibmemory/memory_vlib.c b/src/vlibmemory/memory_vlib.c index 43574dea..29a5c2c2 100644 --- a/src/vlibmemory/memory_vlib.c +++ b/src/vlibmemory/memory_vlib.c @@ -361,7 +361,7 @@ _(GET_FIRST_MSG_ID, get_first_msg_id) * vl_api_init */ static int -memory_api_init (char *region_name) +memory_api_init (const char *region_name) { int rv; vl_msg_api_msg_config_t cfg; @@ -1202,7 +1202,7 @@ vlibmemory_init (vlib_main_t * vm) VLIB_INIT_FUNCTION (vlibmemory_init); void -vl_set_memory_region_name (char *name) +vl_set_memory_region_name (const char *name) { api_main_t *am = &api_main; diff --git a/src/vpp/api/api_main.c b/src/vpp/api/api_main.c index d48e4eff..ac09cd15 100644 --- a/src/vpp/api/api_main.c +++ b/src/vpp/api/api_main.c @@ -211,7 +211,7 @@ api_cli_output (void *notused, const char *fmt, ...) } u16 -vl_client_get_first_plugin_msg_id (char *plugin_name) +vl_client_get_first_plugin_msg_id (const char *plugin_name) { api_main_t *am = &api_main; vl_api_msg_range_t *rp; -- cgit 1.2.3-korg From 10d8cc6bf92851fcaec4a6b4c6d3554dc1eb2386 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Tue, 30 May 2017 09:30:07 -0400 Subject: Improve fifo allocator performance - add option to preallocate fifos in a segment - track active fifos with doubly linked list instead of vector - update udp redirect test code to read fifo pointers from API call instead of digging them up from fifo segment header - input-node based active-open session generator Change-Id: I804b81e99d95f8690d17e12660c6645995e28a9a Signed-off-by: Dave Barach Signed-off-by: Florin Coras Signed-off-by: Dave Barach --- src/svm/svm_fifo.h | 5 +- src/svm/svm_fifo_segment.c | 146 ++++++++++++-- src/svm/svm_fifo_segment.h | 30 ++- src/svm/test_svm_fifo1.c | 23 ++- src/uri/uri_tcp_test.c | 1 + src/uri/uri_udp_test.c | 39 ++-- src/vlibapi/api.h | 3 + src/vlibmemory/memory_shared.c | 22 ++- src/vlibmemory/memory_vlib.c | 26 +++ src/vnet/session/application.c | 18 +- src/vnet/session/application_interface.h | 2 + src/vnet/session/segment_manager.c | 79 ++++---- src/vnet/session/segment_manager.h | 3 + src/vnet/session/session.c | 38 +++- src/vnet/session/session.h | 8 +- src/vnet/session/session_api.c | 2 +- src/vnet/tcp/builtin_client.c | 330 +++++++++++++++++++------------ src/vnet/tcp/builtin_client.h | 13 +- src/vnet/tcp/builtin_http_server.c | 1 + src/vnet/tcp/builtin_server.c | 9 +- src/vnet/udp/builtin_server.c | 1 + 21 files changed, 534 insertions(+), 265 deletions(-) (limited to 'src/vlibmemory/memory_shared.c') diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index 69369163..9cb93ff4 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -38,7 +38,7 @@ format_function_t format_ooo_list; #define OOO_SEGMENT_INVALID_INDEX ((u32)~0) -typedef struct +typedef struct _svm_fifo { volatile u32 cursize; /**< current fifo size */ u32 nitems; @@ -62,7 +62,8 @@ typedef struct ooo_segment_t *ooo_segments; /**< Pool of ooo segments */ u32 ooos_list_head; /**< Head of out-of-order linked-list */ u32 ooos_newest; /**< Last segment to have been updated */ - + struct _svm_fifo *next; /**< next in freelist/active chain */ + struct _svm_fifo *prev; /**< prev in active chain */ CLIB_CACHE_LINE_ALIGN_MARK (data); } svm_fifo_t; diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index 281fae27..eef2168c 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -17,6 +17,71 @@ svm_fifo_segment_main_t svm_fifo_segment_main; +static void +preallocate_fifo_pairs (svm_fifo_segment_header_t * fsh, + svm_fifo_segment_create_args_t * a) +{ + u32 rx_fifo_size, tx_fifo_size; + svm_fifo_t *f; + u8 *rx_fifo_space, *tx_fifo_space; + int i; + + /* Parameter check */ + if (a->rx_fifo_size == 0 || a->tx_fifo_size == 0 + || a->preallocated_fifo_pairs == 0) + return; + + /* Calculate space requirements */ + rx_fifo_size = (sizeof (*f) + a->rx_fifo_size) * a->preallocated_fifo_pairs; + tx_fifo_size = (sizeof (*f) + a->tx_fifo_size) * a->preallocated_fifo_pairs; + + /* Allocate rx fifo space. May fail. */ + rx_fifo_space = clib_mem_alloc_aligned_at_offset + (rx_fifo_size, CLIB_CACHE_LINE_BYTES, 0 /* align_offset */ , + 0 /* os_out_of_memory */ ); + + /* Same for TX */ + tx_fifo_space = clib_mem_alloc_aligned_at_offset + (tx_fifo_size, CLIB_CACHE_LINE_BYTES, 0 /* align_offset */ , + 0 /* os_out_of_memory */ ); + + /* Make sure it worked. Clean up if it didn't... */ + if (rx_fifo_space == 0 || tx_fifo_space == 0) + { + if (rx_fifo_space) + clib_mem_free (rx_fifo_space); + else + clib_warning ("rx fifo preallocation failure: size %d npairs %d", + a->rx_fifo_size, a->preallocated_fifo_pairs); + + if (tx_fifo_space) + clib_mem_free (tx_fifo_space); + else + clib_warning ("tx fifo preallocation failure: size %d nfifos %d", + a->tx_fifo_size, a->preallocated_fifo_pairs); + return; + } + + /* Carve rx fifo space */ + f = (svm_fifo_t *) rx_fifo_space; + for (i = 0; i < a->preallocated_fifo_pairs; i++) + { + f->next = fsh->free_fifos[FIFO_SEGMENT_RX_FREELIST]; + fsh->free_fifos[FIFO_SEGMENT_RX_FREELIST] = f; + rx_fifo_space += sizeof (*f) + a->rx_fifo_size; + f = (svm_fifo_t *) rx_fifo_space; + } + /* Carve tx fifo space */ + f = (svm_fifo_t *) tx_fifo_space; + for (i = 0; i < a->preallocated_fifo_pairs; i++) + { + f->next = fsh->free_fifos[FIFO_SEGMENT_TX_FREELIST]; + fsh->free_fifos[FIFO_SEGMENT_TX_FREELIST] = f; + tx_fifo_space += sizeof (*f) + a->tx_fifo_size; + f = (svm_fifo_t *) tx_fifo_space; + } +} + /** (master) create an svm fifo segment */ int svm_fifo_segment_create (svm_fifo_segment_create_args_t * a) @@ -59,9 +124,7 @@ svm_fifo_segment_create (svm_fifo_segment_create_args_t * a) s->h = fsh; fsh->segment_name = format (0, "%s%c", a->segment_name, 0); - /* Avoid vec_add1(...) failure when adding a fifo, etc. */ - vec_validate (fsh->fifos, 64); - _vec_len (fsh->fifos) = 0; + preallocate_fifo_pairs (fsh, a); ssvm_pop_heap (oldheap); @@ -103,6 +166,8 @@ svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t * a) s->h = fsh; fsh->segment_name = format (0, "%s%c", a->segment_name, 0); + preallocate_fifo_pairs (fsh, a); + sh->ready = 1; a->new_segment_index = s - sm->segments; return (0); @@ -154,7 +219,8 @@ svm_fifo_segment_delete (svm_fifo_segment_private_t * s) svm_fifo_t * svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, - u32 data_size_in_bytes) + u32 data_size_in_bytes, + svm_fifo_segment_freelist_t list_index) { ssvm_shared_header_t *sh; svm_fifo_segment_header_t *fsh; @@ -167,6 +233,29 @@ svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, ssvm_lock (sh, 1, 0); oldheap = ssvm_push_heap (sh); + switch (list_index) + { + case FIFO_SEGMENT_RX_FREELIST: + case FIFO_SEGMENT_TX_FREELIST: + f = fsh->free_fifos[list_index]; + if (f) + { + fsh->free_fifos[list_index] = f->next; + /* (re)initialize the fifo, as in svm_fifo_create */ + memset (f, 0, sizeof (*f)); + f->nitems = data_size_in_bytes; + f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX; + goto found; + } + /* FALLTHROUGH */ + case FIFO_SEGMENT_FREELIST_NONE: + break; + + default: + clib_warning ("ignore bogus freelist %d", list_index); + break; + } + /* Note: this can fail, in which case: create another segment */ f = svm_fifo_create (data_size_in_bytes); if (PREDICT_FALSE (f == 0)) @@ -176,37 +265,62 @@ svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, return (0); } - vec_add1 (fsh->fifos, f); +found: + /* If rx_freelist add to active fifos list. When cleaning up segment, + * we need a list of active sessions that should be disconnected. Since + * both rx and tx fifos keep pointers to the session, it's enough to track + * only one. */ + if (list_index == FIFO_SEGMENT_RX_FREELIST) + { + if (fsh->fifos) + { + fsh->fifos->prev = f; + f->next = fsh->fifos; + } + fsh->fifos = f; + } + ssvm_pop_heap (oldheap); ssvm_unlock (sh); return (f); } void -svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f) +svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f, + svm_fifo_segment_freelist_t list_index) { ssvm_shared_header_t *sh; svm_fifo_segment_header_t *fsh; void *oldheap; - int i; sh = s->ssvm.sh; fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; ssvm_lock (sh, 1, 0); oldheap = ssvm_push_heap (sh); - for (i = 0; i < vec_len (fsh->fifos); i++) + + switch (list_index) { - if (fsh->fifos[i] == f) - { - vec_delete (fsh->fifos, 1, i); - goto found; - } + case FIFO_SEGMENT_RX_FREELIST: + /* Remove from active list */ + if (f->prev) + f->prev->next = f->next; + if (f->next) + f->next->prev = f->prev; + /* FALLTHROUGH */ + case FIFO_SEGMENT_TX_FREELIST: + /* Add to free list */ + f->next = fsh->free_fifos[list_index]; + fsh->free_fifos[list_index] = f; + /* FALLTHROUGH */ + case FIFO_SEGMENT_FREELIST_NONE: + break; + + default: + clib_warning ("ignore bogus freelist %d", list_index); + break; } - clib_warning ("fifo 0x%llx not found in fifo table...", f); -found: - clib_mem_free (f); ssvm_pop_heap (oldheap); ssvm_unlock (sh); } diff --git a/src/svm/svm_fifo_segment.h b/src/svm/svm_fifo_segment.h index 4218013a..31e14db5 100644 --- a/src/svm/svm_fifo_segment.h +++ b/src/svm/svm_fifo_segment.h @@ -19,10 +19,19 @@ #include #include +typedef enum +{ + FIFO_SEGMENT_FREELIST_NONE = -1, + FIFO_SEGMENT_RX_FREELIST = 0, + FIFO_SEGMENT_TX_FREELIST, + FIFO_SEGMENT_N_FREELISTS +} svm_fifo_segment_freelist_t; + typedef struct { - volatile svm_fifo_t **fifos; - u8 *segment_name; + svm_fifo_t *fifos; /**< Linked list of active RX fifos */ + u8 *segment_name; /**< Segment name */ + svm_fifo_t *free_fifos[FIFO_SEGMENT_N_FREELISTS]; /**< Free lists */ } svm_fifo_segment_header_t; typedef struct @@ -49,6 +58,9 @@ typedef struct char *segment_name; u32 segment_size; u32 new_segment_index; + u32 rx_fifo_size; + u32 tx_fifo_size; + u32 preallocated_fifo_pairs; } svm_fifo_segment_create_args_t; static inline svm_fifo_segment_private_t * @@ -61,13 +73,13 @@ svm_fifo_get_segment (u32 segment_index) static inline u8 svm_fifo_segment_has_fifos (svm_fifo_segment_private_t * fifo_segment) { - return vec_len ((svm_fifo_t **) fifo_segment->h->fifos) != 0; + return fifo_segment->h->fifos != 0; } -static inline svm_fifo_t ** -svm_fifo_segment_get_fifos (svm_fifo_segment_private_t * fifo_segment) +static inline svm_fifo_t * +svm_fifo_segment_get_fifo_list (svm_fifo_segment_private_t * fifo_segment) { - return (svm_fifo_t **) fifo_segment->h->fifos; + return fifo_segment->h->fifos; } #define foreach_ssvm_fifo_segment_api_error \ @@ -87,9 +99,11 @@ int svm_fifo_segment_attach (svm_fifo_segment_create_args_t * a); void svm_fifo_segment_delete (svm_fifo_segment_private_t * s); svm_fifo_t *svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, - u32 data_size_in_bytes); + u32 data_size_in_bytes, + svm_fifo_segment_freelist_t index); void svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, - svm_fifo_t * f); + svm_fifo_t * f, + svm_fifo_segment_freelist_t index); void svm_fifo_segment_init (u64 baseva, u32 timeout_in_seconds); u32 svm_fifo_segment_index (svm_fifo_segment_private_t * s); diff --git a/src/svm/test_svm_fifo1.c b/src/svm/test_svm_fifo1.c index 398dd6d7..63b4a9b7 100644 --- a/src/svm/test_svm_fifo1.c +++ b/src/svm/test_svm_fifo1.c @@ -30,6 +30,9 @@ hello_world (int verbose) a->segment_name = "fifo-test1"; a->segment_size = 256 << 10; + a->rx_fifo_size = 4096; + a->tx_fifo_size = 4096; + a->preallocated_fifo_pairs = 4; rv = svm_fifo_segment_create (a); @@ -38,7 +41,7 @@ hello_world (int verbose) sp = svm_fifo_get_segment (a->new_segment_index); - f = svm_fifo_segment_alloc_fifo (sp, 4096); + f = svm_fifo_segment_alloc_fifo (sp, 4096, FIFO_SEGMENT_RX_FREELIST); if (f == 0) return clib_error_return (0, "svm_fifo_segment_alloc_fifo failed"); @@ -63,7 +66,7 @@ hello_world (int verbose) else error = clib_error_return (0, "data test FAIL!"); - svm_fifo_segment_free_fifo (sp, f); + svm_fifo_segment_free_fifo (sp, f, FIFO_SEGMENT_RX_FREELIST); return error; } @@ -91,7 +94,7 @@ master (int verbose) sp = svm_fifo_get_segment (a->new_segment_index); - f = svm_fifo_segment_alloc_fifo (sp, 4096); + f = svm_fifo_segment_alloc_fifo (sp, 4096, FIFO_SEGMENT_RX_FREELIST); if (f == 0) return clib_error_return (0, "svm_fifo_segment_alloc_fifo failed"); @@ -129,7 +132,7 @@ mempig (int verbose) for (i = 0; i < 1000; i++) { - f = svm_fifo_segment_alloc_fifo (sp, 4096); + f = svm_fifo_segment_alloc_fifo (sp, 4096, FIFO_SEGMENT_RX_FREELIST); if (f == 0) break; vec_add1 (flist, f); @@ -139,14 +142,14 @@ mempig (int verbose) for (i = 0; i < vec_len (flist); i++) { f = flist[i]; - svm_fifo_segment_free_fifo (sp, f); + svm_fifo_segment_free_fifo (sp, f, FIFO_SEGMENT_RX_FREELIST); } _vec_len (flist) = 0; for (i = 0; i < 1000; i++) { - f = svm_fifo_segment_alloc_fifo (sp, 4096); + f = svm_fifo_segment_alloc_fifo (sp, 4096, FIFO_SEGMENT_RX_FREELIST); if (f == 0) break; vec_add1 (flist, f); @@ -156,7 +159,7 @@ mempig (int verbose) for (i = 0; i < vec_len (flist); i++) { f = flist[i]; - svm_fifo_segment_free_fifo (sp, f); + svm_fifo_segment_free_fifo (sp, f, FIFO_SEGMENT_RX_FREELIST); } return 0; @@ -185,7 +188,7 @@ offset (int verbose) sp = svm_fifo_get_segment (a->new_segment_index); - f = svm_fifo_segment_alloc_fifo (sp, 200 << 10); + f = svm_fifo_segment_alloc_fifo (sp, 200 << 10, FIFO_SEGMENT_RX_FREELIST); if (f == 0) return clib_error_return (0, "svm_fifo_segment_alloc_fifo failed"); @@ -226,9 +229,9 @@ slave (int verbose) { svm_fifo_segment_create_args_t _a, *a = &_a; svm_fifo_segment_private_t *sp; - svm_fifo_segment_header_t *fsh; svm_fifo_t *f; ssvm_shared_header_t *sh; + svm_fifo_segment_header_t *fsh; int rv; u8 *test_data; u8 *retrieved_data = 0; @@ -248,7 +251,7 @@ slave (int verbose) fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; /* might wanna wait.. */ - f = (svm_fifo_t *) fsh->fifos[0]; + f = fsh->fifos; /* Lazy bastards united */ test_data = format (0, "Hello world%c", 0); diff --git a/src/uri/uri_tcp_test.c b/src/uri/uri_tcp_test.c index 22f246e5..e201a359 100755 --- a/src/uri/uri_tcp_test.c +++ b/src/uri/uri_tcp_test.c @@ -193,6 +193,7 @@ application_send_attach (uri_tcp_test_main_t * utm) bmp->context = ntohl (0xfeedface); bmp->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_USE_FIFO | APP_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = 16; bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; diff --git a/src/uri/uri_udp_test.c b/src/uri/uri_udp_test.c index 8fb12ed2..45ad35a4 100644 --- a/src/uri/uri_udp_test.c +++ b/src/uri/uri_udp_test.c @@ -176,6 +176,7 @@ application_send_attach (uri_udp_test_main_t * utm) bmp->context = ntohl (0xfeedface); bmp->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_USE_FIFO | APP_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = 16; bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; @@ -522,7 +523,7 @@ vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) svm_fifo_segment_private_t *seg; unix_shared_memory_queue_t *client_q; vl_api_connect_uri_reply_t *rmp; - session_t *session; + session_t *session = 0; int rv = 0; /* Create the segment */ @@ -545,17 +546,12 @@ vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) pool_get (utm->sessions, session); - /* - * By construction the master's idea of the rx fifo ends up in - * fsh->fifos[0], and the master's idea of the tx fifo ends up in - * fsh->fifos[1]. - */ - session->server_rx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, - 128 * 1024); + session->server_rx_fifo = svm_fifo_segment_alloc_fifo + (utm->seg, 128 * 1024, FIFO_SEGMENT_RX_FREELIST); ASSERT (session->server_rx_fifo); - session->server_tx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, - 128 * 1024); + session->server_tx_fifo = svm_fifo_segment_alloc_fifo + (utm->seg, 128 * 1024, FIFO_SEGMENT_TX_FREELIST); ASSERT (session->server_tx_fifo); session->server_rx_fifo->master_session_index = session - utm->sessions; @@ -578,6 +574,12 @@ send_reply: rmp->context = mp->context; rmp->retval = ntohl (rv); rmp->segment_name_length = vec_len (a->segment_name); + if (session) + { + rmp->server_rx_fifo = pointer_to_uword (session->server_rx_fifo); + rmp->server_tx_fifo = pointer_to_uword (session->server_tx_fifo); + } + memcpy (rmp->segment_name, a->segment_name, vec_len (a->segment_name)); vec_free (a->segment_name); @@ -689,9 +691,7 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) svm_fifo_segment_create_args_t _a, *a = &_a; u32 segment_index; session_t *session; - ssvm_shared_header_t *sh; svm_fifo_segment_private_t *seg; - svm_fifo_segment_header_t *fsh; int rv; memset (a, 0, sizeof (*a)); @@ -707,22 +707,19 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) return; } - segment_index = vec_len (sm->segments) - 1; + segment_index = a->new_segment_index; vec_add2 (utm->seg, seg, 1); - memcpy (seg, sm->segments + segment_index, sizeof (*seg)); - sh = seg->ssvm.sh; - fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; - - while (vec_len (fsh->fifos) < 2) - sleep (1); + sleep (1); pool_get (utm->sessions, session); utm->cut_through_session_index = session - utm->sessions; - session->server_rx_fifo = (svm_fifo_t *) fsh->fifos[0]; + session->server_rx_fifo = uword_to_pointer (mp->server_rx_fifo, + svm_fifo_t *); ASSERT (session->server_rx_fifo); - session->server_tx_fifo = (svm_fifo_t *) fsh->fifos[1]; + session->server_tx_fifo = uword_to_pointer (mp->server_tx_fifo, + svm_fifo_t *); ASSERT (session->server_tx_fifo); } diff --git a/src/vlibapi/api.h b/src/vlibapi/api.h index 3403e1c6..0e2c2101 100644 --- a/src/vlibapi/api.h +++ b/src/vlibapi/api.h @@ -193,6 +193,9 @@ typedef struct i32 vlib_signal; + /* vlib input queue length */ + u32 vlib_input_queue_length; + /* client side message index hash table */ uword *msg_index_by_name_and_crc; diff --git a/src/vlibmemory/memory_shared.c b/src/vlibmemory/memory_shared.c index aea90330..41aa1231 100644 --- a/src/vlibmemory/memory_shared.c +++ b/src/vlibmemory/memory_shared.c @@ -104,8 +104,17 @@ vl_msg_api_alloc_internal (int nbytes, int pool, int may_return_null) if (now - rv->gc_mark_timestamp > 10) { if (CLIB_DEBUG > 0) - clib_warning ("garbage collect pool %d ring %d index %d", - pool, i, q->head); + { + u16 *msg_idp, msg_id; + clib_warning + ("garbage collect pool %d ring %d index %d", pool, i, + q->head); + msg_idp = (u16 *) (rv->data); + msg_id = clib_net_to_host_u16 (*msg_idp); + if (msg_id < vec_len (api_main.msg_names)) + clib_warning ("msg id %d name %s", (u32) msg_id, + api_main.msg_names[msg_id]); + } shmem_hdr->garbage_collects++; goto collected; } @@ -330,6 +339,7 @@ vl_map_shmem (const char *region_name, int is_vlib) api_main_t *am = &api_main; int i; struct timespec ts, tsrem; + u32 vlib_input_queue_length; if (is_vlib == 0) svm_region_init_chroot (am->root_path); @@ -449,9 +459,13 @@ vl_map_shmem (const char *region_name, int is_vlib) shmem_hdr->version = VL_SHM_VERSION; /* vlib main input queue */ + vlib_input_queue_length = 1024; + if (am->vlib_input_queue_length) + vlib_input_queue_length = am->vlib_input_queue_length; + shmem_hdr->vl_input_queue = - unix_shared_memory_queue_init (1024, sizeof (uword), getpid (), - am->vlib_signal); + unix_shared_memory_queue_init (vlib_input_queue_length, sizeof (uword), + getpid (), am->vlib_signal); /* Set up the msg ring allocator */ #define _(sz,n) \ diff --git a/src/vlibmemory/memory_vlib.c b/src/vlibmemory/memory_vlib.c index e5d88732..004a9974 100644 --- a/src/vlibmemory/memory_vlib.c +++ b/src/vlibmemory/memory_vlib.c @@ -1917,6 +1917,32 @@ api_config_fn (vlib_main_t * vm, unformat_input_t * input) VLIB_CONFIG_FUNCTION (api_config_fn, "api-trace"); +static clib_error_t * +api_queue_config_fn (vlib_main_t * vm, unformat_input_t * input) +{ + api_main_t *am = &api_main; + u32 nitems; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "length %d", &nitems) || + (unformat (input, "len %d", &nitems))) + { + if (nitems >= 1024) + am->vlib_input_queue_length = nitems; + else + clib_warning ("vlib input queue length %d too small, ignored", + nitems); + } + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + return 0; +} + +VLIB_CONFIG_FUNCTION (api_queue_config_fn, "api-queue"); + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index ccf9837f..c679b1f5 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -160,6 +160,7 @@ application_init (application_t * app, u32 api_client_index, u64 * options, props->rx_fifo_size = options[SESSION_OPTIONS_RX_FIFO_SIZE]; props->tx_fifo_size = options[SESSION_OPTIONS_TX_FIFO_SIZE]; props->add_segment = props->add_segment_size != 0; + props->preallocated_fifo_pairs = options[APP_OPTIONS_PREALLOC_FIFO_PAIRS]; props->use_private_segment = options[APP_OPTIONS_FLAGS] & APP_OPTIONS_FLAGS_BUILTIN_APP; @@ -395,7 +396,7 @@ application_format_connects (application_t * app, int verbose) vlib_main_t *vm = vlib_get_main (); segment_manager_t *sm; u8 *app_name, *s = 0; - int i, j; + int j; /* Header */ if (app == 0) @@ -419,22 +420,16 @@ application_format_connects (application_t * app, int verbose) for (j = 0; j < vec_len (sm->segment_indices); j++) { svm_fifo_segment_private_t *fifo_segment; - svm_fifo_t **fifos; + svm_fifo_t *fifo; u8 *str; fifo_segment = svm_fifo_get_segment (sm->segment_indices[j]); - fifos = svm_fifo_segment_get_fifos (fifo_segment); - for (i = 0; i < vec_len (fifos); i++) + fifo = svm_fifo_segment_get_fifo_list (fifo_segment); + while (fifo) { - svm_fifo_t *fifo; u32 session_index, thread_index; stream_session_t *session; - /* There are 2 fifos/session. Avoid printing twice. */ - if (i % 2) - continue; - - fifo = fifos[i]; session_index = fifo->master_session_index; thread_index = fifo->master_thread_index; @@ -448,9 +443,10 @@ application_format_connects (application_t * app, int verbose) s = format (s, "%-40s%-20s", str, app_name); vlib_cli_output (vm, "%v", s); - vec_reset_length (s); vec_free (str); + + fifo = fifo->next; } vec_free (s); } diff --git a/src/vnet/session/application_interface.h b/src/vnet/session/application_interface.h index 7d924c14..4d6f9def 100644 --- a/src/vnet/session/application_interface.h +++ b/src/vnet/session/application_interface.h @@ -119,10 +119,12 @@ typedef enum { APP_EVT_QUEUE_SIZE, APP_OPTIONS_FLAGS, + APP_OPTIONS_PREALLOC_FIFO_PAIRS, SESSION_OPTIONS_SEGMENT_SIZE, SESSION_OPTIONS_ADD_SEGMENT_SIZE, SESSION_OPTIONS_RX_FIFO_SIZE, SESSION_OPTIONS_TX_FIFO_SIZE, + SESSION_OPTIONS_PREALLOCATED_FIFO_PAIRS, SESSION_OPTIONS_ACCEPT_COOKIE, SESSION_OPTIONS_N_OPTIONS } app_attach_options_index_t; diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c index b13df21c..caf8eaa3 100644 --- a/src/vnet/session/segment_manager.c +++ b/src/vnet/session/segment_manager.c @@ -58,6 +58,9 @@ session_manager_add_segment_i (segment_manager_t * sm, u32 segment_size, ca->segment_name = (char *) segment_name; ca->segment_size = segment_size; + ca->rx_fifo_size = sm->properties->rx_fifo_size; + ca->tx_fifo_size = sm->properties->tx_fifo_size; + ca->preallocated_fifo_pairs = sm->properties->preallocated_fifo_pairs; rv = svm_fifo_segment_create (ca); if (rv) @@ -104,7 +107,8 @@ session_manager_add_first_segment (segment_manager_t * sm, u32 segment_size) } static void -segment_manager_alloc_process_private_segment () + segment_manager_alloc_process_private_segment + (segment_manager_properties_t * props) { svm_fifo_segment_create_args_t _a, *a = &_a; @@ -115,6 +119,9 @@ segment_manager_alloc_process_private_segment () a->segment_name = "process-private-segment"; a->segment_size = ~0; a->new_segment_index = ~0; + a->rx_fifo_size = props->rx_fifo_size; + a->tx_fifo_size = props->tx_fifo_size; + a->preallocated_fifo_pairs = props->preallocated_fifo_pairs; if (svm_fifo_segment_create_process_private (a)) clib_warning ("Failed to create process private segment"); @@ -151,7 +158,7 @@ segment_manager_init (segment_manager_t * sm, else { if (private_segment_index == ~0) - segment_manager_alloc_process_private_segment (); + segment_manager_alloc_process_private_segment (properties); ASSERT (private_segment_index != ~0); vec_add1 (sm->segment_indices, private_segment_index); } @@ -170,74 +177,46 @@ segment_manager_init (segment_manager_t * sm, void segment_manager_del (segment_manager_t * sm) { - u32 *deleted_sessions = 0; - u32 *deleted_thread_indices = 0; - int i, j; + int j; /* Across all fifo segments used by the server */ for (j = 0; j < vec_len (sm->segment_indices); j++) { svm_fifo_segment_private_t *fifo_segment; - svm_fifo_t **fifos; + svm_fifo_t *fifo; + /* Vector of fifos allocated in the segment */ fifo_segment = svm_fifo_get_segment (sm->segment_indices[j]); - fifos = svm_fifo_segment_get_fifos (fifo_segment); + fifo = svm_fifo_segment_get_fifo_list (fifo_segment); /* * Remove any residual sessions from the session lookup table * Don't bother deleting the individual fifos, we're going to * throw away the fifo segment in a minute. */ - for (i = 0; i < vec_len (fifos); i++) + while (fifo) { - svm_fifo_t *fifo; u32 session_index, thread_index; stream_session_t *session; - fifo = fifos[i]; session_index = fifo->master_session_index; thread_index = fifo->master_thread_index; session = stream_session_get (session_index, thread_index); - /* Add to the deleted_sessions vector (once!) */ - if (!session->is_deleted) - { - session->is_deleted = 1; - vec_add1 (deleted_sessions, session_index); - vec_add1 (deleted_thread_indices, thread_index); - } - } - - for (i = 0; i < vec_len (deleted_sessions); i++) - { - stream_session_t *session; - session = stream_session_get (deleted_sessions[i], - deleted_thread_indices[i]); - /* Instead of directly removing the session call disconnect */ session_send_session_evt_to_thread (stream_session_handle (session), FIFO_EVENT_DISCONNECT, - deleted_thread_indices[i]); - - /* - stream_session_table_del (smm, session); - pool_put(smm->sessions[deleted_thread_indices[i]], session); - */ + thread_index); + fifo = fifo->next; } - vec_reset_length (deleted_sessions); - vec_reset_length (deleted_thread_indices); - - /* Instead of removing the segment, test when removing the session if - * the segment can be removed + /* Instead of removing the segment, test when cleaning up disconnected + * sessions if the segment can be removed. */ - /* svm_fifo_segment_delete (fifo_segment); */ } clib_spinlock_free (&sm->lockp); - vec_free (deleted_sessions); - vec_free (deleted_thread_indices); pool_put (segment_managers, sm); } @@ -281,20 +260,27 @@ again: *fifo_segment_index = sm->segment_indices[i]; fifo_segment = svm_fifo_get_segment (*fifo_segment_index); + /* FC: cleanup, make sure sm->properties->xxx_fifo_size always set */ fifo_size = sm->properties->rx_fifo_size; fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; - *server_rx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); + *server_rx_fifo = + svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size, + FIFO_SEGMENT_RX_FREELIST); + /* FC: cleanup, make sure sm->properties->xxx_fifo_size always set */ fifo_size = sm->properties->tx_fifo_size; fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; - *server_tx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); + *server_tx_fifo = + svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size, + FIFO_SEGMENT_TX_FREELIST); if (*server_rx_fifo == 0) { /* This would be very odd, but handle it... */ if (*server_tx_fifo != 0) { - svm_fifo_segment_free_fifo (fifo_segment, *server_tx_fifo); + svm_fifo_segment_free_fifo (fifo_segment, *server_tx_fifo, + FIFO_SEGMENT_TX_FREELIST); *server_tx_fifo = 0; } continue; @@ -303,7 +289,8 @@ again: { if (*server_rx_fifo != 0) { - svm_fifo_segment_free_fifo (fifo_segment, *server_rx_fifo); + svm_fifo_segment_free_fifo (fifo_segment, *server_rx_fifo, + FIFO_SEGMENT_RX_FREELIST); *server_rx_fifo = 0; } continue; @@ -365,8 +352,10 @@ segment_manager_dealloc_fifos (u32 svm_segment_index, svm_fifo_t * rx_fifo, return; fifo_segment = svm_fifo_get_segment (svm_segment_index); - svm_fifo_segment_free_fifo (fifo_segment, rx_fifo); - svm_fifo_segment_free_fifo (fifo_segment, tx_fifo); + svm_fifo_segment_free_fifo (fifo_segment, rx_fifo, + FIFO_SEGMENT_RX_FREELIST); + svm_fifo_segment_free_fifo (fifo_segment, tx_fifo, + FIFO_SEGMENT_TX_FREELIST); /* Remove segment only if it holds no fifos and not the first */ if (sm->segment_indices[0] != svm_segment_index diff --git a/src/vnet/session/segment_manager.h b/src/vnet/session/segment_manager.h index 2710bb54..d4b73208 100644 --- a/src/vnet/session/segment_manager.h +++ b/src/vnet/session/segment_manager.h @@ -28,6 +28,9 @@ typedef struct _segment_manager_properties u32 rx_fifo_size; u32 tx_fifo_size; + /** Preallocated pool sizes */ + u32 preallocated_fifo_pairs; + /** Configured additional segment size */ u32 add_segment_size; diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index c5aaf2e2..02b0cced 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -1048,19 +1048,21 @@ session_vpp_event_queue_allocate (session_manager_main_t * smm, { api_main_t *am = &api_main; void *oldheap; + u32 event_queue_length = 2048; if (smm->vpp_event_queues[thread_index] == 0) { /* Allocate event fifo in the /vpe-api shared-memory segment */ oldheap = svm_push_data_heap (am->vlib_rp); + if (smm->configured_event_queue_length) + event_queue_length = smm->configured_event_queue_length; + smm->vpp_event_queues[thread_index] = - unix_shared_memory_queue_init (2048 /* nels $$$$ config */ , - sizeof (session_fifo_event_t), - 0 /* consumer pid */ , - 0 - /* (do not) send signal when queue non-empty */ - ); + unix_shared_memory_queue_init + (event_queue_length, + sizeof (session_fifo_event_t), 0 /* consumer pid */ , + 0 /* (do not) send signal when queue non-empty */ ); svm_pop_heap (oldheap); } @@ -1187,6 +1189,30 @@ session_manager_main_init (vlib_main_t * vm) } VLIB_INIT_FUNCTION (session_manager_main_init) + static clib_error_t *session_config_fn (vlib_main_t * vm, + unformat_input_t * input) +{ + session_manager_main_t *smm = &session_manager_main; + u32 nitems; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "event-queue-length %d", &nitems)) + { + if (nitems >= 2048) + smm->configured_event_queue_length = nitems; + else + clib_warning ("event queue length %d too small, ignored", nitems); + } + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + return 0; +} + +VLIB_CONFIG_FUNCTION (session_config_fn, "session"); + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index d60cca29..a8728649 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -125,14 +125,11 @@ typedef struct _stream_session_t u8 thread_index; - /** used during unbind processing */ - u8 is_deleted; - /** To avoid n**2 "one event per frame" check */ u8 enqueue_epoch; /** Pad to a multiple of 8 octets */ - u8 align_pad[2]; + u8 align_pad[4]; /** svm segment index where fifos were allocated */ u32 svm_segment_index; @@ -205,6 +202,9 @@ struct _session_manager_main /** vpp fifo event queue */ unix_shared_memory_queue_t **vpp_event_queues; + /** vpp fifo event queue configured length */ + u32 configured_event_queue_length; + /** Unique segment name counter */ u32 unique_segment_name_counter; diff --git a/src/vnet/session/session_api.c b/src/vnet/session/session_api.c index 8c073a08..98d6946a 100755 --- a/src/vnet/session/session_api.c +++ b/src/vnet/session/session_api.c @@ -419,7 +419,7 @@ done: REPLY_MACRO (VL_API_UNBIND_URI_REPLY); } -static void +void vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) { vl_api_connect_uri_reply_t *rmp; diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index aaefa7eb..768f0c3c 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -44,8 +44,6 @@ #undef vl_printfun #define TCP_BUILTIN_CLIENT_DBG (1) -#define TCP_BUILTIN_CLIENT_VPP_THREAD (0) -#define TCP_BUILTIN_CLIENT_PTHREAD (!TCP_BUILTIN_CLIENT_VPP_THREAD) static void send_test_chunk (tclient_main_t * tm, session_t * s) @@ -156,131 +154,76 @@ receive_test_chunk (tclient_main_t * tm, session_t * s) } } -#if TCP_BUILTIN_CLIENT_VPP_THREAD -#define THREAD_PROTOTYPE static void -#else -#define THREAD_PROTOTYPE static void * -#endif - -THREAD_PROTOTYPE -tclient_thread_fn (void *arg) +static uword +builtin_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame) { tclient_main_t *tm = &tclient_main; + int my_thread_index = vlib_get_thread_index (); vl_api_disconnect_session_t *dmp; session_t *sp; - struct timespec ts, tsrem; int i; - int try_tx, try_rx; - u32 *session_indices = 0; - clib_time_t ttime; - f64 before, after; - u64 rx_total; + int delete_session; + u32 *connection_indices; - clib_time_init (&ttime); + connection_indices = tm->connection_index_by_thread[my_thread_index]; - /* stats thread wants no signals. */ - { - sigset_t s; - sigfillset (&s); - pthread_sigmask (SIG_SETMASK, &s, 0); - } + if (tm->run_test == 0 || vec_len (connection_indices) == 0) + return 0; - clib_per_cpu_mheaps[vlib_get_thread_index ()] = clib_per_cpu_mheaps[0]; + for (i = 0; i < vec_len (connection_indices); i++) + { + delete_session = 1; - vec_validate (session_indices, 0); - vec_reset_length (session_indices); + sp = pool_elt_at_index (tm->sessions, connection_indices[i]); - while (1) - { - /* Wait until we're told to get busy */ - while (tm->run_test == 0 - || (tm->ready_connections != tm->expected_connections)) + if (sp->bytes_to_send > 0) { - ts.tv_sec = 0; - ts.tv_nsec = 100000000; - while (nanosleep (&ts, &tsrem) < 0) - ts = tsrem; + send_test_chunk (tm, sp); + delete_session = 0; } - tm->run_test = 0; - rx_total = 0; - - clib_warning ("Start test..."); - - before = clib_time_now (&ttime); - - do + if (sp->bytes_to_receive > 0) { - do - { - try_tx = try_rx = 0; - - /* *INDENT-OFF* */ - pool_foreach (sp, tm->sessions, - ({ - if (sp->bytes_to_send > 0) - { - send_test_chunk (tm, sp); - try_tx = 1; - } - })); - pool_foreach (sp, tm->sessions, - ({ - if (sp->bytes_to_receive > 0) - { - receive_test_chunk (tm, sp); - try_rx = 1; - } - else - { - /* Session is complete */ - vec_add1 (session_indices, sp - tm->sessions); - } - })); - /* Terminate any completed sessions */ - if (PREDICT_FALSE (_vec_len(session_indices) != 0)) - { - for (i = 0; i < _vec_len (session_indices); i++) - { - sp = pool_elt_at_index (tm->sessions, session_indices[i]); - rx_total += sp->bytes_received; - dmp = vl_msg_api_alloc_as_if_client (sizeof (*dmp)); - memset (dmp, 0, sizeof (*dmp)); - dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); - dmp->client_index = tm->my_client_index; - dmp->handle = sp->vpp_session_handle; - vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & dmp); - pool_put (tm->sessions, sp); - } - _vec_len(session_indices) = 0; - } - /* *INDENT-ON* */ - } - while (try_tx || try_rx); + receive_test_chunk (tm, sp); + delete_session = 0; } - while (0); - after = clib_time_now (&ttime); - - clib_warning ("Test complete %lld bytes in %.2f secs", - rx_total, (after - before)); - if ((after - before) != 0.0) + if (PREDICT_FALSE (delete_session == 1)) { - clib_warning ("%.2f bytes/second full-duplex", - ((f64) rx_total) / (after - before)); - clib_warning ("%.4f gbit/second full-duplex", - (((f64) rx_total * 8.0) / (after - before)) / 1e9); + __sync_fetch_and_add (&tm->rx_total, sp->bytes_received); + dmp = vl_msg_api_alloc_as_if_client (sizeof (*dmp)); + memset (dmp, 0, sizeof (*dmp)); + dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); + dmp->client_index = tm->my_client_index; + dmp->handle = sp->vpp_session_handle; + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & dmp); + vec_delete (connection_indices, 1, i); + tm->connection_index_by_thread[my_thread_index] = + connection_indices; + __sync_fetch_and_add (&tm->ready_connections, -1); + + /* Kick the debug CLI process */ + if (tm->ready_connections == 0) + { + tm->test_end_time = vlib_time_now (vm); + vlib_process_signal_event (vm, tm->cli_node_index, + 2, 0 /* data */ ); + } } - - if (pool_elts (tm->sessions)) - clib_warning ("BUG: %d active sessions remain...", - pool_elts (tm->sessions)); } - while (0); - /* NOTREACHED */ -#if TCP_BUILTIN_CLIENT_PTHREAD return 0; -#endif } +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (builtin_client_node) = +{ + .function = builtin_client_node_fn, + .name = "builtin-tcp-client", + .type = VLIB_NODE_TYPE_INPUT, + .state = VLIB_NODE_STATE_DISABLED, +}; +/* *INDENT-ON* */ + + /* So we don't get "no handler for... " msgs */ static void vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) @@ -299,6 +242,7 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) session_t *session; u32 session_index; i32 retval = /* clib_net_to_host_u32 ( */ mp->retval /*) */ ; + int i; if (retval < 0) { @@ -332,7 +276,29 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) /* Add it to the session lookup table */ hash_set (tm->session_index_by_vpp_handles, mp->handle, session_index); - tm->ready_connections++; + if (tm->ready_connections == tm->expected_connections - 1) + { + vlib_thread_main_t *thread_main = vlib_get_thread_main (); + int thread_index; + + thread_index = 0; + for (i = 0; i < pool_elts (tm->sessions); i++) + { + vec_add1 (tm->connection_index_by_thread[thread_index], i); + thread_index++; + if (thread_index == thread_main->n_vlib_mains) + thread_index = 0; + } + } + __sync_fetch_and_add (&tm->ready_connections, 1); + if (tm->ready_connections == tm->expected_connections) + { + tm->run_test = 1; + tm->test_start_time = vlib_time_now (tm->vlib_main); + /* Signal the CLI process that the action is starting... */ + vlib_process_signal_event (tm->vlib_main, tm->cli_node_index, + 1, 0 /* data */ ); + } } static int @@ -414,6 +380,7 @@ static int tcp_test_clients_init (vlib_main_t * vm) { tclient_main_t *tm = &tclient_main; + vlib_thread_main_t *thread_main = vlib_get_thread_main (); int i; tclient_api_hookup (vm); @@ -429,6 +396,46 @@ tcp_test_clients_init (vlib_main_t * vm) vec_validate (tm->rx_buf, vec_len (tm->connect_test_data) - 1); tm->is_init = 1; + tm->vlib_main = vm; + + vec_validate (tm->connection_index_by_thread, thread_main->n_vlib_mains); + return 0; +} + +static int +builtin_session_connected_callback (u32 app_index, u32 api_context, + stream_session_t * s, u8 is_fail) +{ + vl_api_connect_uri_reply_t _m, *mp = &_m; + unix_shared_memory_queue_t *q; + application_t *app; + unix_shared_memory_queue_t *vpp_queue; + + app = application_get (app_index); + q = vl_api_client_index_to_input_queue (app->api_client_index); + + if (!q) + return -1; + + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_CONNECT_URI_REPLY); + mp->context = api_context; + if (!is_fail) + { + vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); + mp->server_rx_fifo = pointer_to_uword (s->server_rx_fifo); + mp->server_tx_fifo = pointer_to_uword (s->server_tx_fifo); + mp->handle = stream_session_handle (s); + mp->vpp_event_queue_address = pointer_to_uword (vpp_queue); + mp->retval = 0; + s->session_state = SESSION_STATE_READY; + } + else + { + mp->retval = clib_host_to_net_u32 (VNET_API_ERROR_SESSION_CONNECT_FAIL); + } + + vl_api_connect_uri_reply_t_handler (mp); return 0; } @@ -461,7 +468,7 @@ builtin_server_rx_callback (stream_session_t * s) static session_cb_vft_t builtin_clients = { .session_reset_callback = builtin_session_reset_callback, - .session_connected_callback = send_session_connected_callback, + .session_connected_callback = builtin_session_connected_callback, .session_accept_callback = builtin_session_create_callback, .session_disconnect_callback = builtin_session_disconnect_callback, .builtin_server_rx_callback = builtin_server_rx_callback @@ -502,11 +509,16 @@ test_tcp_clients_command_fn (vlib_main_t * vm, vlib_cli_command_t * cmd) { tclient_main_t *tm = &tclient_main; + vlib_thread_main_t *thread_main = vlib_get_thread_main (); + uword *event_data = 0; + uword event_type; u8 *connect_uri = (u8 *) "tcp://6.0.1.1/1234"; u8 *uri; u32 n_clients = 1; int i; u64 tmp; + f64 cli_timeout = 20.0; + f64 delta; tm->bytes_to_send = 8192; vec_free (tm->connect_uri); @@ -523,6 +535,8 @@ test_tcp_clients_command_fn (vlib_main_t * vm, ; else if (unformat (input, "uri %s", &tm->connect_uri)) ; + else if (unformat (input, "cli-timeout %f", &cli_timeout)) + ; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); @@ -536,6 +550,7 @@ test_tcp_clients_command_fn (vlib_main_t * vm, tm->ready_connections = 0; tm->expected_connections = n_clients; + tm->rx_total = 0; uri = connect_uri; if (tm->connect_uri) @@ -556,40 +571,99 @@ test_tcp_clients_command_fn (vlib_main_t * vm, } #endif vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ ); - attach_builtin_test_clients (); + if (tm->test_client_attached == 0) + attach_builtin_test_clients (); + tm->test_client_attached = 1; + + /* Turn on the builtin client input nodes */ + for (i = 0; i < thread_main->n_vlib_mains; i++) + vlib_node_set_state (vlib_mains[i], builtin_client_node.index, + VLIB_NODE_STATE_POLLING); - /* Fire off connect requests, in something approaching a normal manner */ + tm->cli_node_index = vlib_get_current_process (vm)->node_runtime.node_index; + + /* Fire off connect requests */ for (i = 0; i < n_clients; i++) { - vl_api_connect_uri_t *cmp; - cmp = vl_msg_api_alloc_as_if_client (sizeof (*cmp)); + vl_api_connect_uri_t _cmp, *cmp = &_cmp; + void vl_api_connect_uri_t_handler (vl_api_connect_uri_t * cmp); + memset (cmp, 0, sizeof (*cmp)); cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); cmp->client_index = tm->my_client_index; cmp->context = ntohl (0xfeedface); memcpy (cmp->uri, uri, strlen ((char *) uri) + 1); - vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & cmp); + + vl_api_connect_uri_t_handler (cmp); + /* Crude pacing for call setups, 100k/sec */ + vlib_process_suspend (vm, 10e-6); + } + + /* Park until the sessions come up, or ten seconds elapse... */ + vlib_process_wait_for_event_or_clock (vm, 10.0 /* timeout, seconds */ ); + event_type = vlib_process_get_events (vm, &event_data); + + switch (event_type) + { + case ~0: + vlib_cli_output (vm, "Timeout with only %d sessions active...", + tm->ready_connections); + goto cleanup; + + case 1: + vlib_cli_output (vm, "Test started at %.6f", tm->test_start_time); + break; + + default: + vlib_cli_output (vm, "unexpected event(1): %d", event_type); + goto cleanup; } - tm->run_test = 1; + /* Now wait for the sessions to finish... */ + vlib_process_wait_for_event_or_clock (vm, cli_timeout); + event_type = vlib_process_get_events (vm, &event_data); + + switch (event_type) + { + case ~0: + vlib_cli_output (vm, "Timeout with %d sessions still active...", + tm->ready_connections); + goto cleanup; + + case 2: + vlib_cli_output (vm, "Test finished at %.6f", tm->test_end_time); + break; + + default: + vlib_cli_output (vm, "unexpected event(2): %d", event_type); + goto cleanup; + } + + delta = tm->test_end_time - tm->test_start_time; + + if (delta != 0.0) + { + vlib_cli_output (vm, + "%lld bytes (%lld mbytes, %lld gbytes) in %.2f seconds", + tm->rx_total, tm->rx_total / (1ULL << 20), + tm->rx_total / (1ULL << 30), delta); + vlib_cli_output (vm, "%.2f bytes/second full-duplex", + ((f64) tm->rx_total) / (delta)); + vlib_cli_output (vm, "%.4f gbit/second full-duplex", + (((f64) tm->rx_total * 8.0) / delta / 1e9)); + } + else + vlib_cli_output (vm, "zero delta-t?"); + +cleanup: + pool_free (tm->sessions); + for (i = 0; i < vec_len (tm->connection_index_by_thread); i++) + vec_reset_length (tm->connection_index_by_thread[i]); return 0; } -/* *INDENT-OFF* */ -#if TCP_BUILTIN_CLIENT_VPP_THREAD -VLIB_REGISTER_THREAD (builtin_client_reg, static) = -{ - .name = "tcp-builtin-client", - .function = tclient_thread_fn, - .fixed_count = 1, - .count = 1, - .no_data_structure_clone = 1, -}; -#endif -/* *INDENT-ON* */ - /* *INDENT-OFF* */ VLIB_CLI_COMMAND (test_clients_command, static) = { diff --git a/src/vnet/tcp/builtin_client.h b/src/vnet/tcp/builtin_client.h index 57d112e6..d5d79e53 100644 --- a/src/vnet/tcp/builtin_client.h +++ b/src/vnet/tcp/builtin_client.h @@ -83,14 +83,18 @@ typedef struct pid_t my_pid; - /* For deadman timers */ - clib_time_t clib_time; + f64 test_start_time; + f64 test_end_time; - /* Connection counts */ u32 expected_connections; + u32 **connection_index_by_thread; volatile u32 ready_connections; + volatile u32 finished_connections; - /* Signal variables */ + volatile u64 rx_total; + u32 cli_node_index; + + /* Signal variable */ volatile int run_test; /* Bytes to send */ @@ -107,6 +111,7 @@ typedef struct u8 test_return_packets; u8 is_init; + u8 test_client_attached; u32 node_index; diff --git a/src/vnet/tcp/builtin_http_server.c b/src/vnet/tcp/builtin_http_server.c index 763a46e9..8b4801cd 100644 --- a/src/vnet/tcp/builtin_http_server.c +++ b/src/vnet/tcp/builtin_http_server.c @@ -513,6 +513,7 @@ server_attach () a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 8 << 10; a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 32 << 10; a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; + a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = 16; a->segment_name = segment_name; a->segment_name_length = ARRAY_LEN (segment_name); diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index 64fc4a71..4f0e211c 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -62,7 +62,6 @@ int builtin_session_accept_callback (stream_session_t * s) { builtin_server_main_t *bsm = &builtin_server_main; - clib_warning ("called..."); bsm->vpp_queue[s->thread_index] = session_manager_get_vpp_event_queue (s->thread_index); @@ -76,7 +75,6 @@ builtin_session_disconnect_callback (stream_session_t * s) { builtin_server_main_t *bsm = &builtin_server_main; vnet_disconnect_args_t _a, *a = &_a; - clib_warning ("called..."); a->handle = stream_session_handle (s); a->app_index = bsm->app_index; @@ -280,10 +278,11 @@ server_attach () a->api_client_index = bsm->my_client_index; a->session_cb_vft = &builtin_session_cb_vft; a->options = options; - a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 128 << 20; - a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 1 << 16; - a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 1 << 16; + a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 512 << 20; + a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 64 << 10; + a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 64 << 10; a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; + a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = 8192; a->segment_name = segment_name; a->segment_name_length = ARRAY_LEN (segment_name); diff --git a/src/vnet/udp/builtin_server.c b/src/vnet/udp/builtin_server.c index 18684d54..7dd03670 100644 --- a/src/vnet/udp/builtin_server.c +++ b/src/vnet/udp/builtin_server.c @@ -111,6 +111,7 @@ attach_builtin_uri_server () options[SESSION_OPTIONS_ACCEPT_COOKIE] = 0x12345678; options[SESSION_OPTIONS_SEGMENT_SIZE] = (2 << 30); /*$$$$ config / arg */ options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; + options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = 1024; a->options = options; -- cgit 1.2.3-korg From 5beec81360146536086f1996869b4ee32ca37ddc Mon Sep 17 00:00:00 2001 From: Jan Srnicek Date: Fri, 24 Mar 2017 10:18:11 +0100 Subject: jvpp: make shm_prefix configurable (VPP-591) svm.c - set default map region root path only if root path is not already present memory_shared.c - added option for tests to send memory region name and root path in one variable, if so name and root path are separated here and set to map region structure so find function can find it properly jvpp-registry.c - added parameters shmPrefix to be able pass + removed sudo restriction specific shared memory prefix that is used while starting python tests(see framework.py) JVppRegistyImpl - added option to specify shmPrefix VppJNIConnection - added option to specify shmPrefix Change-Id: I3f89f867fb9b20eef00fbd497cb0e41b25d6eab7 Signed-off-by: Jan Srnicek Signed-off-by: Matej Perina --- src/svm/svm.c | 3 +- src/vlibmemory/memory_shared.c | 19 ++++++++++-- .../io/fd/vpp/jvpp/JVppRegistryImpl.java | 7 +++++ .../io/fd/vpp/jvpp/VppJNIConnection.java | 23 +++++++++++---- src/vpp-api/java/jvpp-registry/jvpp_registry.c | 34 ++++++++++++---------- 5 files changed, 62 insertions(+), 24 deletions(-) (limited to 'src/vlibmemory/memory_shared.c') diff --git a/src/svm/svm.c b/src/svm/svm.c index 600fa744..0442ecb2 100644 --- a/src/svm/svm.c +++ b/src/svm/svm.c @@ -862,7 +862,8 @@ svm_region_find_or_create (svm_map_region_args_t * a) ASSERT (mp); /* Map the named region from the correct chroot environment */ - a->root_path = (char *) mp->root_path; + if (a->root_path == NULL) + a->root_path = (char *) mp->root_path; /* * See if this region is already known. If it is, we're diff --git a/src/vlibmemory/memory_shared.c b/src/vlibmemory/memory_shared.c index 41aa1231..9bab6573 100644 --- a/src/vlibmemory/memory_shared.c +++ b/src/vlibmemory/memory_shared.c @@ -341,12 +341,25 @@ vl_map_shmem (const char *region_name, int is_vlib) struct timespec ts, tsrem; u32 vlib_input_queue_length; + memset (a, 0, sizeof (*a)); + + if (strstr (region_name, "-vpe-api")) + { + char root_path[strlen (region_name)]; + strncpy (root_path, region_name, strlen (region_name) - 8); + a->root_path = root_path; + am->root_path = root_path; + } + if (is_vlib == 0) svm_region_init_chroot (am->root_path); - memset (a, 0, sizeof (*a)); - - a->name = region_name; + if (a->root_path != NULL) + { + a->name = "/vpe-api"; + } + else + a->name = region_name; a->size = am->api_size ? am->api_size : (16 << 20); a->flags = SVM_FLAGS_MHEAP; a->uid = am->api_uid; diff --git a/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/JVppRegistryImpl.java b/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/JVppRegistryImpl.java index 98ef1c15..6e938ae3 100644 --- a/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/JVppRegistryImpl.java +++ b/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/JVppRegistryImpl.java @@ -48,6 +48,13 @@ public final class JVppRegistryImpl implements JVppRegistry, ControlPingCallback pingCalls = new HashMap<>(); } + public JVppRegistryImpl(final String clientName, final String shmPrefix) throws IOException { + connection = new VppJNIConnection(clientName, shmPrefix); + connection.connect(); + pluginRegistry = new ConcurrentHashMap<>(); + pingCalls = new HashMap<>(); + } + @Override public VppConnection getConnection() { return connection; diff --git a/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/VppJNIConnection.java b/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/VppJNIConnection.java index 320c1283..53eaa790 100644 --- a/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/VppJNIConnection.java +++ b/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/VppJNIConnection.java @@ -17,8 +17,11 @@ package io.fd.vpp.jvpp; import static io.fd.vpp.jvpp.NativeLibraryLoader.loadLibrary; +import static java.lang.String.format; import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -30,13 +33,14 @@ import java.util.logging.Logger; */ public final class VppJNIConnection implements VppConnection { private static final Logger LOG = Logger.getLogger(VppJNIConnection.class.getName()); + private static final String DEFAULT_SHM_PREFIX = "/vpe-api"; static { final String libName = "libjvpp_registry.so"; try { loadLibrary(libName, VppJNIConnection.class); } catch (IOException e) { - LOG.log(Level.SEVERE, String.format("Can't find vpp jni library: %s", libName), e); + LOG.log(Level.SEVERE, format("Can't find vpp jni library: %s", libName), e); throw new ExceptionInInitializerError(e); } } @@ -44,6 +48,7 @@ public final class VppJNIConnection implements VppConnection { private ConnectionInfo connectionInfo; private final String clientName; + private final String shmPrefix; private volatile boolean disconnected = false; /** @@ -54,6 +59,12 @@ public final class VppJNIConnection implements VppConnection { */ public VppJNIConnection(final String clientName) { this.clientName = Objects.requireNonNull(clientName, "Null clientName"); + this.shmPrefix = DEFAULT_SHM_PREFIX; + } + + public VppJNIConnection(final String clientName, final String shmPrefix) { + this.clientName = Objects.requireNonNull(clientName, "Null clientName"); + this.shmPrefix = Objects.requireNonNull(shmPrefix, "Null shmPrefix"); } /** @@ -73,16 +84,18 @@ public final class VppJNIConnection implements VppConnection { @Override public void connect() throws IOException { - _connect(); + _connect(shmPrefix); } - private void _connect() throws IOException { + private void _connect(final String shmPrefix) throws IOException { + Objects.requireNonNull(shmPrefix, "Shared memory prefix must be defined"); + synchronized (VppJNIConnection.class) { if (connections.containsKey(clientName)) { throw new IOException("Client " + clientName + " already connected"); } - connectionInfo = clientConnect(clientName); + connectionInfo = clientConnect(shmPrefix, clientName); if (connectionInfo.status != 0) { throw new IOException("Connection returned error " + connectionInfo.status); } @@ -130,7 +143,7 @@ public final class VppJNIConnection implements VppConnection { } } - private static native ConnectionInfo clientConnect(String clientName); + private static native ConnectionInfo clientConnect(String shmPrefix, String clientName); private static native void clientDisconnect(); diff --git a/src/vpp-api/java/jvpp-registry/jvpp_registry.c b/src/vpp-api/java/jvpp-registry/jvpp_registry.c index 66adfea0..1e2c0176 100644 --- a/src/vpp-api/java/jvpp-registry/jvpp_registry.c +++ b/src/vpp-api/java/jvpp-registry/jvpp_registry.c @@ -243,14 +243,13 @@ static int send_initial_control_ping() { return rv; } -static int connect_to_vpe(char *name) { +static int connect_to_vpe(char *shm_prefix, char *name) { jvpp_main_t * jm = &jvpp_main; api_main_t * am = &api_main; jvpp_registry_main_t * rm = &jvpp_registry_main; - if (vl_client_connect_to_vlib("/vpe-api", name, 32) < 0) + if (vl_client_connect_to_vlib(shm_prefix, name, 32) < 0) return -1; - jm->my_client_index = am->my_client_index; jm->vl_input_queue = am->shmem_hdr->vl_input_queue; @@ -268,9 +267,15 @@ static int connect_to_vpe(char *name) { } JNIEXPORT jobject JNICALL Java_io_fd_vpp_jvpp_VppJNIConnection_clientConnect( - JNIEnv *env, jclass obj, jstring clientName) { + JNIEnv *env, jclass obj, jstring shmPrefix, jstring clientName) { + /* + * TODO introducing memory prefix as variable can be used in hc2vpp + * to be able to run without root privileges + * https://jira.fd.io/browse/HC2VPP-176 + */ int rv; const char *client_name; + const char *shm_prefix; void vl_msg_reply_handler_hookup(void); jvpp_main_t * jm = &jvpp_main; jvpp_registry_main_t * rm = &jvpp_registry_main; @@ -280,15 +285,6 @@ JNIEXPORT jobject JNICALL Java_io_fd_vpp_jvpp_VppJNIConnection_clientConnect( jmethodID connectionInfoConstructor = (*env)->GetMethodID(env, connectionInfoClass, "", "(JII)V"); - /* - * Bail out now if we're not running as root - */ - if (geteuid() != 0) { - return (*env)->NewObject(env, connectionInfoClass, - connectionInfoConstructor, 0, 0, - VNET_API_ERROR_NOT_RUNNING_AS_ROOT); - } - if (rm->is_connected) { return (*env)->NewObject(env, connectionInfoClass, connectionInfoConstructor, 0, 0, @@ -296,17 +292,25 @@ JNIEXPORT jobject JNICALL Java_io_fd_vpp_jvpp_VppJNIConnection_clientConnect( } client_name = (*env)->GetStringUTFChars(env, clientName, 0); + shm_prefix = (*env)->GetStringUTFChars(env, shmPrefix, 0); + if (!client_name) { return (*env)->NewObject(env, connectionInfoClass, - connectionInfoConstructor, 0, 0, VNET_API_ERROR_INVALID_VALUE); + connectionInfoConstructor, 0, 0, VNET_API_ERROR_INVALID_VALUE, shmPrefix); + } + + if (!shm_prefix) { + return (*env)->NewObject(env, connectionInfoClass, + connectionInfoConstructor, 0, 0, VNET_API_ERROR_INVALID_VALUE, shmPrefix); } - rv = connect_to_vpe((char *) client_name); + rv = connect_to_vpe((char *) shm_prefix, (char *) client_name); if (rv < 0) clib_warning("connection failed, rv %d", rv); (*env)->ReleaseStringUTFChars(env, clientName, client_name); + (*env)->ReleaseStringUTFChars(env, shmPrefix, shm_prefix); return (*env)->NewObject(env, connectionInfoClass, connectionInfoConstructor, (jlong) pointer_to_uword (jm->vl_input_queue), -- cgit 1.2.3-korg From 3cdc25ffbaa572639f99e197172c568e4324bc03 Mon Sep 17 00:00:00 2001 From: Ole Troan Date: Thu, 17 Aug 2017 11:07:33 +0200 Subject: API: More gracefully fail when opening shared memory segment fails. API clients would fail with an ASSERT (and core dump) whenever the API shared memory segment could not be opened. This returns an error value to the client's connect instead. Change-Id: Id122a3a090b24b139c382ae09f341bde61fd2540 Signed-off-by: Ole Troan --- src/svm/svm.c | 13 ++++++++----- src/svm/svm_common.h | 2 +- src/vlibmemory/memory_shared.c | 8 ++++++-- 3 files changed, 15 insertions(+), 8 deletions(-) (limited to 'src/vlibmemory/memory_shared.c') diff --git a/src/svm/svm.c b/src/svm/svm.c index 0442ecb2..663324e0 100644 --- a/src/svm/svm.c +++ b/src/svm/svm.c @@ -733,7 +733,7 @@ svm_mutex_cleanup (void) } } -static void +static int svm_region_init_internal (svm_map_region_args_t * a) { svm_region_t *rp; @@ -742,7 +742,7 @@ svm_region_init_internal (svm_map_region_args_t * a) /* guard against klutz calls */ if (root_rp) - return; + return -1; root_rp_refcount++; @@ -757,7 +757,8 @@ svm_region_init_internal (svm_map_region_args_t * a) a->baseva += randomize_baseva; rp = svm_map_region (a); - ASSERT (rp); + if (!rp) + return -1; region_lock (rp, 3); @@ -778,6 +779,8 @@ svm_region_init_internal (svm_map_region_args_t * a) } region_unlock (rp); root_rp = rp; + + return 0; } void @@ -797,7 +800,7 @@ svm_region_init (void) svm_region_init_internal (a); } -void +int svm_region_init_chroot (const char *root_path) { svm_map_region_args_t _a, *a = &_a; @@ -811,7 +814,7 @@ svm_region_init_chroot (const char *root_path) a->uid = 0; a->gid = 0; - svm_region_init_internal (a); + return svm_region_init_internal (a); } void diff --git a/src/svm/svm_common.h b/src/svm/svm_common.h index 1f184432..1f6d83c0 100644 --- a/src/svm/svm_common.h +++ b/src/svm/svm_common.h @@ -110,7 +110,7 @@ typedef struct void *svm_region_find_or_create (svm_map_region_args_t * a); void svm_region_init (void); -void svm_region_init_chroot (const char *root_path); +int svm_region_init_chroot (const char *root_path); void svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid); void svm_region_init_args (svm_map_region_args_t * a); void svm_region_exit (void); diff --git a/src/vlibmemory/memory_shared.c b/src/vlibmemory/memory_shared.c index 9bab6573..fbdabd06 100644 --- a/src/vlibmemory/memory_shared.c +++ b/src/vlibmemory/memory_shared.c @@ -337,7 +337,7 @@ vl_map_shmem (const char *region_name, int is_vlib) void *oldheap; vl_shmem_hdr_t *shmem_hdr = 0; api_main_t *am = &api_main; - int i; + int i, rv; struct timespec ts, tsrem; u32 vlib_input_queue_length; @@ -352,7 +352,11 @@ vl_map_shmem (const char *region_name, int is_vlib) } if (is_vlib == 0) - svm_region_init_chroot (am->root_path); + { + rv = svm_region_init_chroot (am->root_path); + if (rv) + return rv; + } if (a->root_path != NULL) { -- cgit 1.2.3-korg From cfc997ef3da9f406afe5caad99fc98a53aab7a77 Mon Sep 17 00:00:00 2001 From: Dave Wallace Date: Tue, 22 Aug 2017 18:32:34 -0400 Subject: Fix vl_map_shmem() root_path dangling reference. Change-Id: I90c9d8e151cacf50a99ce76b7a589079303196e8 Signed-off-by: Dave Wallace --- src/vlibmemory/memory_shared.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'src/vlibmemory/memory_shared.c') diff --git a/src/vlibmemory/memory_shared.c b/src/vlibmemory/memory_shared.c index fbdabd06..8c646908 100644 --- a/src/vlibmemory/memory_shared.c +++ b/src/vlibmemory/memory_shared.c @@ -340,15 +340,18 @@ vl_map_shmem (const char *region_name, int is_vlib) int i, rv; struct timespec ts, tsrem; u32 vlib_input_queue_length; + char *vpe_api_region_suffix = "-vpe-api"; memset (a, 0, sizeof (*a)); - if (strstr (region_name, "-vpe-api")) + if (strstr (region_name, vpe_api_region_suffix)) { - char root_path[strlen (region_name)]; - strncpy (root_path, region_name, strlen (region_name) - 8); - a->root_path = root_path; - am->root_path = root_path; + u8 *root_path = format (0, "%s", region_name); + _vec_len (root_path) = (vec_len (root_path) - + strlen (vpe_api_region_suffix)); + vec_terminate_c_string (root_path); + a->root_path = (const char *) root_path; + am->root_path = (const char *) root_path; } if (is_vlib == 0) -- cgit 1.2.3-korg