From 7cd468a3d7dee7d6c92f69a0bb7061ae208ec727 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Mon, 19 Dec 2016 23:05:39 +0100 Subject: Reorganize source tree to use single autotools instance Change-Id: I7b51f88292e057c6443b12224486f2d0c9f8ae23 Signed-off-by: Damjan Marion --- src/svm/dir.dox | 21 + src/svm/persist.c | 258 +++++++++++ src/svm/ssvm.c | 178 ++++++++ src/svm/ssvm.h | 155 +++++++ src/svm/svm.c | 1237 +++++++++++++++++++++++++++++++++++++++++++++++++++ src/svm/svm.h | 207 +++++++++ src/svm/svm_test.c | 79 ++++ src/svm/svmdb.c | 671 ++++++++++++++++++++++++++++ src/svm/svmdb.h | 135 ++++++ src/svm/svmdbtool.c | 537 ++++++++++++++++++++++ src/svm/svmtool.c | 528 ++++++++++++++++++++++ 11 files changed, 4006 insertions(+) create mode 100644 src/svm/dir.dox create mode 100644 src/svm/persist.c create mode 100644 src/svm/ssvm.c create mode 100644 src/svm/ssvm.h create mode 100644 src/svm/svm.c create mode 100644 src/svm/svm.h create mode 100644 src/svm/svm_test.c create mode 100644 src/svm/svmdb.c create mode 100644 src/svm/svmdb.h create mode 100644 src/svm/svmdbtool.c create mode 100644 src/svm/svmtool.c (limited to 'src/svm') diff --git a/src/svm/dir.dox b/src/svm/dir.dox new file mode 100644 index 00000000..83246979 --- /dev/null +++ b/src/svm/dir.dox @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Copyright (c) 2016 Comcast Cable Communications Management, LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* Doxygen directory documentation */ +/** +@dir +@brief Shared virtual memory allocation library. +*/ diff --git a/src/svm/persist.c b/src/svm/persist.c new file mode 100644 index 00000000..023c596b --- /dev/null +++ b/src/svm/persist.c @@ -0,0 +1,258 @@ +/* + *------------------------------------------------------------------ + * persist.c - persistent data structure storage test / demo code + * + * Copyright (c) 2013 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef struct +{ + svmdb_client_t *c; +} persist_main_t; + +persist_main_t persist_main; + +typedef struct +{ + u8 *string1; + u8 *string2; +} demo_struct2_t; + +typedef struct +{ + demo_struct2_t *demo2; + u8 *name; +} demo_struct1_t; + +/* + * Data structures in persistent shared memory, all the time + */ +clib_error_t * +persist_malloc (persist_main_t * pm) +{ + demo_struct2_t *demo2; + demo_struct1_t *demo1; + time_t starttime = time (0); + char *datestring = ctime (&starttime); + void *oldheap; + + /* Get back the root pointer */ + demo1 = svmdb_local_get_variable_reference + (pm->c, SVMDB_NAMESPACE_VEC, "demo1_location"); + + /* It doesnt exist create our data structures */ + if (demo1 == 0) + { + /* If you want MP / thread safety, lock the region... */ + pthread_mutex_lock (&pm->c->db_rp->mutex); + + /* Switch to the shared memory region heap */ + oldheap = svm_push_data_heap (pm->c->db_rp); + + /* Allocate the top-level structure as a single element vector */ + vec_validate (demo1, 0); + + /* Allocate the next-level structure as a plain old memory obj */ + demo2 = clib_mem_alloc (sizeof (*demo2)); + + demo1->demo2 = demo2; + demo1->name = format (0, "My name is Ishmael%c", 0); + demo2->string1 = format (0, "Here is string1%c", 0); + demo2->string2 = format (0, "Born at %s%c", datestring, 0); + + /* Back to the process-private heap */ + svm_pop_heap (oldheap); + pthread_mutex_unlock (&pm->c->db_rp->mutex); + + /* + * Set the root pointer. Note: this guy switches heaps, locks, etc. + * We allocated demo1 as a vector to make this "just work..." + */ + svmdb_local_set_vec_variable (pm->c, "demo1_location", + demo1, sizeof (demo1)); + + } + else + { + /* retrieve and print data from shared memory */ + demo2 = demo1->demo2; + fformat (stdout, "name: %s\n", demo1->name); + fformat (stdout, "demo2 location: %llx\n", demo2); + fformat (stdout, "string1: %s\n", demo2->string1); + fformat (stdout, "string2: %s\n", demo2->string2); + } + return 0; +} + +void +unserialize_demo1 (serialize_main_t * sm, va_list * args) +{ + demo_struct1_t **result = va_arg (*args, demo_struct1_t **); + demo_struct1_t *demo1; + demo_struct2_t *demo2; + + /* Allocate data structures in process private memory */ + demo1 = clib_mem_alloc (sizeof (*demo1)); + demo2 = clib_mem_alloc (sizeof (*demo2)); + demo1->demo2 = demo2; + + /* retrieve data from shared memory checkpoint */ + unserialize_cstring (sm, (char **) &demo1->name); + unserialize_cstring (sm, (char **) &demo2->string1); + unserialize_cstring (sm, (char **) &demo2->string2); + *result = demo1; +} + +void +serialize_demo1 (serialize_main_t * sm, va_list * args) +{ + demo_struct1_t *demo1 = va_arg (*args, demo_struct1_t *); + demo_struct2_t *demo2 = demo1->demo2; + + serialize_cstring (sm, (char *) demo1->name); + serialize_cstring (sm, (char *) demo2->string1); + serialize_cstring (sm, (char *) demo2->string2); +} + +/* Serialize / unserialize variant */ +clib_error_t * +persist_serialize (persist_main_t * pm) +{ + u8 *checkpoint; + serialize_main_t sm; + + demo_struct2_t *demo2; + demo_struct1_t *demo1; + time_t starttime = time (0); + char *datestring = ctime (&starttime); + + /* Get back the root pointer */ + checkpoint = svmdb_local_get_vec_variable (pm->c, "demo1_checkpoint", + sizeof (u8)); + + /* It doesnt exist create our data structures */ + if (checkpoint == 0) + { + /* Allocate data structures in process-private memory */ + demo1 = clib_mem_alloc (sizeof (*demo2)); + vec_validate (demo1, 0); + demo2 = clib_mem_alloc (sizeof (*demo2)); + + demo1->demo2 = demo2; + demo1->name = format (0, "My name is Ishmael%c", 0); + demo2->string1 = format (0, "Here is string1%c", 0); + demo2->string2 = format (0, "Born at %s%c", datestring, 0); + + /* Create checkpoint */ + serialize_open_vector (&sm, checkpoint); + serialize (&sm, serialize_demo1, demo1); + checkpoint = serialize_close_vector (&sm); + + /* Copy checkpoint into shared memory */ + svmdb_local_set_vec_variable (pm->c, "demo1_checkpoint", + checkpoint, sizeof (u8)); + /* Toss the process-private-memory original.. */ + vec_free (checkpoint); + } + else + { + /* Open the checkpoint */ + unserialize_open_data (&sm, checkpoint, vec_len (checkpoint)); + unserialize (&sm, unserialize_demo1, &demo1); + + /* Toss the process-private-memory checkpoint copy */ + vec_free (checkpoint); + + /* Off we go... */ + demo2 = demo1->demo2; + fformat (stdout, "name: %s\n", demo1->name); + fformat (stdout, "demo2 location: %llx\n", demo2); + fformat (stdout, "string1: %s\n", demo2->string1); + fformat (stdout, "string2: %s\n", demo2->string2); + } + return 0; +} + + +int +main (int argc, char **argv) +{ + unformat_input_t _input, *input = &_input; + persist_main_t *pm = &persist_main; + clib_error_t *error = 0; + + /* Make a 4mb database arena, chroot so it's truly private */ + pm->c = svmdb_map_chroot_size ("/ptest", 4 << 20); + + ASSERT (pm->c); + + unformat_init_command_line (input, argv); + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "malloc")) + error = persist_malloc (pm); + else if (unformat (input, "serialize")) + error = persist_serialize (pm); + else + { + error = clib_error_return (0, "Unknown flavor '%U'", + format_unformat_error, input); + break; + } + } + + svmdb_unmap (pm->c); + + if (error) + { + clib_error_report (error); + exit (1); + } + return 0; +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/svm/ssvm.c b/src/svm/ssvm.c new file mode 100644 index 00000000..6f409eb6 --- /dev/null +++ b/src/svm/ssvm.c @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ssvm.h" + +int +ssvm_master_init (ssvm_private_t * ssvm, u32 master_index) +{ + int ssvm_fd; + u8 *ssvm_filename; + u8 junk = 0; + int flags; + ssvm_shared_header_t *sh; + u64 ticks = clib_cpu_time_now (); + u64 randomize_baseva; + void *oldheap; + + if (ssvm->ssvm_size == 0) + return SSVM_API_ERROR_NO_SIZE; + + ssvm_filename = format (0, "/dev/shm/%s%c", ssvm->name, 0); + + unlink ((char *) ssvm_filename); + + vec_free (ssvm_filename); + + ssvm_fd = shm_open ((char *) ssvm->name, O_RDWR | O_CREAT | O_EXCL, 0777); + + if (ssvm_fd < 0) + { + clib_unix_warning ("create segment '%s'", ssvm->name); + return SSVM_API_ERROR_CREATE_FAILURE; + } + + if (lseek (ssvm_fd, ssvm->ssvm_size, SEEK_SET) < 0) + { + clib_unix_warning ("lseek"); + close (ssvm_fd); + return SSVM_API_ERROR_SET_SIZE; + } + + if (write (ssvm_fd, &junk, 1) != 1) + { + clib_unix_warning ("set ssvm size"); + close (ssvm_fd); + return SSVM_API_ERROR_SET_SIZE; + } + + flags = MAP_SHARED; + if (ssvm->requested_va) + flags |= MAP_FIXED; + + randomize_baseva = (ticks & 15) * MMAP_PAGESIZE; + + if (ssvm->requested_va) + ssvm->requested_va += randomize_baseva; + + sh = ssvm->sh = + (ssvm_shared_header_t *) mmap ((void *) ssvm->requested_va, + ssvm->ssvm_size, PROT_READ | PROT_WRITE, + flags, ssvm_fd, 0); + + if (ssvm->sh == MAP_FAILED) + { + clib_unix_warning ("mmap"); + close (ssvm_fd); + return SSVM_API_ERROR_MMAP; + } + + close (ssvm_fd); + + ssvm->my_pid = getpid (); + sh->master_pid = ssvm->my_pid; + sh->ssvm_size = ssvm->ssvm_size; + sh->heap = mheap_alloc_with_flags + (((u8 *) sh) + MMAP_PAGESIZE, ssvm->ssvm_size - MMAP_PAGESIZE, + MHEAP_FLAG_DISABLE_VM | MHEAP_FLAG_THREAD_SAFE); + + sh->ssvm_va = pointer_to_uword (sh); + sh->master_index = master_index; + + oldheap = ssvm_push_heap (sh); + sh->name = format (0, "%s%c", ssvm->name, 0); + ssvm_pop_heap (oldheap); + + ssvm->i_am_master = 1; + + /* The application has to set set sh->ready... */ + return 0; +} + +int +ssvm_slave_init (ssvm_private_t * ssvm, int timeout_in_seconds) +{ + struct stat stat; + int ssvm_fd = -1; + ssvm_shared_header_t *sh; + + ssvm->i_am_master = 0; + + while (timeout_in_seconds-- > 0) + { + if (ssvm_fd < 0) + ssvm_fd = shm_open ((char *) ssvm->name, O_RDWR, 0777); + if (ssvm_fd < 0) + { + sleep (1); + continue; + } + if (fstat (ssvm_fd, &stat) < 0) + { + sleep (1); + continue; + } + + if (stat.st_size > 0) + goto map_it; + } + clib_warning ("slave timeout"); + return SSVM_API_ERROR_SLAVE_TIMEOUT; + +map_it: + sh = (void *) mmap (0, MMAP_PAGESIZE, PROT_READ | PROT_WRITE, MAP_SHARED, + ssvm_fd, 0); + if (sh == MAP_FAILED) + { + clib_unix_warning ("slave research mmap"); + close (ssvm_fd); + return SSVM_API_ERROR_MMAP; + } + + while (timeout_in_seconds-- > 0) + { + if (sh->ready) + goto re_map_it; + } + close (ssvm_fd); + munmap (sh, MMAP_PAGESIZE); + clib_warning ("slave timeout 2"); + return SSVM_API_ERROR_SLAVE_TIMEOUT; + +re_map_it: + ssvm->requested_va = (u64) sh->ssvm_va; + ssvm->ssvm_size = sh->ssvm_size; + munmap (sh, MMAP_PAGESIZE); + + sh = ssvm->sh = (void *) mmap ((void *) ssvm->requested_va, ssvm->ssvm_size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_FIXED, ssvm_fd, 0); + + if (sh == MAP_FAILED) + { + clib_unix_warning ("slave final mmap"); + close (ssvm_fd); + return SSVM_API_ERROR_MMAP; + } + sh->slave_pid = getpid (); + return 0; +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/svm/ssvm.h b/src/svm/ssvm.h new file mode 100644 index 00000000..9e61b9a0 --- /dev/null +++ b/src/svm/ssvm.h @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __included_ssvm_h__ +#define __included_ssvm_h__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MMAP_PAGESIZE (4<<10) +#define SSVM_N_OPAQUE 7 + +typedef struct +{ + /* Spin-lock */ + volatile u32 lock; + volatile u32 owner_pid; + int recursion_count; + u32 tag; /* for debugging */ + + /* The allocation arena */ + void *heap; + + /* Segment must be mapped at this address, or no supper */ + u64 ssvm_va; + /* The actual mmap size */ + u64 ssvm_size; + u32 master_pid; + u32 slave_pid; + u8 *name; + void *opaque[SSVM_N_OPAQUE]; + + /* Set when the master application thinks it's time to make the donuts */ + volatile u32 ready; + + /* Needed to make unique MAC addresses, etc. */ + u32 master_index; +} ssvm_shared_header_t; + +typedef struct +{ + ssvm_shared_header_t *sh; + u64 ssvm_size; + u32 my_pid; + u32 vlib_hw_if_index; + u8 *name; + uword requested_va; + int i_am_master; + u32 per_interface_next_index; + u32 *rx_queue; +} ssvm_private_t; + +always_inline void +ssvm_lock (ssvm_shared_header_t * h, u32 my_pid, u32 tag) +{ + if (h->owner_pid == my_pid) + { + h->recursion_count++; + return; + } + + while (__sync_lock_test_and_set (&h->lock, 1)) + ; + + h->owner_pid = my_pid; + h->recursion_count = 1; + h->tag = tag; +} + +always_inline void +ssvm_unlock (ssvm_shared_header_t * h) +{ + if (--h->recursion_count == 0) + { + h->owner_pid = 0; + h->tag = 0; + CLIB_MEMORY_BARRIER (); + h->lock = 0; + } +} + +static inline void * +ssvm_push_heap (ssvm_shared_header_t * sh) +{ + u8 *oldheap; + oldheap = clib_mem_set_heap (sh->heap); + return ((void *) oldheap); +} + +static inline void +ssvm_pop_heap (void *oldheap) +{ + clib_mem_set_heap (oldheap); +} + +#define foreach_ssvm_api_error \ +_(NO_NAME, "No shared segment name", -10) \ +_(NO_SIZE, "Size not set (master)", -11) \ +_(CREATE_FAILURE, "Create failed", -12) \ +_(SET_SIZE, "Set size failed", -13) \ +_(MMAP, "mmap failed", -14) \ +_(SLAVE_TIMEOUT, "Slave map timeout", -15) + +typedef enum +{ +#define _(n,s,c) SSVM_API_ERROR_##n = c, + foreach_ssvm_api_error +#undef _ +} ssvm_api_error_enum_t; + +#define SSVM_API_ERROR_NO_NAME (-10) + +int ssvm_master_init (ssvm_private_t * ssvm, u32 master_index); +int ssvm_slave_init (ssvm_private_t * ssvm, int timeout_in_seconds); + +#endif /* __included_ssvm_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/svm/svm.c b/src/svm/svm.c new file mode 100644 index 00000000..e4ca98e1 --- /dev/null +++ b/src/svm/svm.c @@ -0,0 +1,1237 @@ +/* + *------------------------------------------------------------------ + * svm.c - shared VM allocation, mmap(...MAP_FIXED...) + * library + * + * Copyright (c) 2009 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "svm.h" + +static svm_region_t *root_rp; +static int root_rp_refcount; + +#define MAXLOCK 2 +static pthread_mutex_t *mutexes_held[MAXLOCK]; +static int nheld; + +svm_region_t * +svm_get_root_rp (void) +{ + return root_rp; +} + +#define MUTEX_DEBUG + +static void +region_lock (svm_region_t * rp, int tag) +{ + pthread_mutex_lock (&rp->mutex); +#ifdef MUTEX_DEBUG + rp->mutex_owner_pid = getpid (); + rp->mutex_owner_tag = tag; +#endif + ASSERT (nheld < MAXLOCK); + /* + * Keep score of held mutexes so we can try to exit + * cleanly if the world comes to an end at the worst possible + * moment + */ + mutexes_held[nheld++] = &rp->mutex; +} + +static void +region_unlock (svm_region_t * rp) +{ + int i, j; +#ifdef MUTEX_DEBUG + rp->mutex_owner_pid = 0; + rp->mutex_owner_tag = 0; +#endif + + for (i = nheld - 1; i >= 0; i--) + { + if (mutexes_held[i] == &rp->mutex) + { + for (j = i; j < MAXLOCK - 1; j++) + mutexes_held[j] = mutexes_held[j + 1]; + nheld--; + goto found; + } + } + ASSERT (0); + +found: + CLIB_MEMORY_BARRIER (); + pthread_mutex_unlock (&rp->mutex); +} + + +static u8 * +format_svm_flags (u8 * s, va_list * args) +{ + uword f = va_arg (*args, uword); + + if (f & SVM_FLAGS_MHEAP) + s = format (s, "MHEAP "); + if (f & SVM_FLAGS_FILE) + s = format (s, "FILE "); + if (f & SVM_FLAGS_NODATA) + s = format (s, "NODATA "); + if (f & SVM_FLAGS_NEED_DATA_INIT) + s = format (s, "INIT "); + + return (s); +} + +static u8 * +format_svm_size (u8 * s, va_list * args) +{ + uword size = va_arg (*args, uword); + + if (size >= (1 << 20)) + { + s = format (s, "(%d mb)", size >> 20); + } + else if (size >= (1 << 10)) + { + s = format (s, "(%d kb)", size >> 10); + } + else + { + s = format (s, "(%d bytes)", size); + } + return (s); +} + +u8 * +format_svm_region (u8 * s, va_list * args) +{ + svm_region_t *rp = va_arg (*args, svm_region_t *); + int verbose = va_arg (*args, int); + int i; + uword lo, hi; + + s = format (s, "%s: base va 0x%x size 0x%x %U\n", + rp->region_name, rp->virtual_base, + rp->virtual_size, format_svm_size, rp->virtual_size); + s = format (s, " user_ctx 0x%x, bitmap_size %d\n", + rp->user_ctx, rp->bitmap_size); + + if (verbose) + { + s = format (s, " flags: 0x%x %U\n", rp->flags, + format_svm_flags, rp->flags); + s = format (s, + " region_heap 0x%x data_base 0x%x data_heap 0x%x\n", + rp->region_heap, rp->data_base, rp->data_heap); + } + + s = format (s, " %d clients, pids: ", vec_len (rp->client_pids)); + + for (i = 0; i < vec_len (rp->client_pids); i++) + s = format (s, "%d ", rp->client_pids[i]); + + s = format (s, "\n"); + + if (verbose) + { + lo = hi = ~0; + + s = format (s, " VM in use: "); + + for (i = 0; i < rp->bitmap_size; i++) + { + if (clib_bitmap_get_no_check (rp->bitmap, i) != 0) + { + if (lo == ~0) + { + hi = lo = rp->virtual_base + i * MMAP_PAGESIZE; + } + else + { + hi = rp->virtual_base + i * MMAP_PAGESIZE; + } + } + else + { + if (lo != ~0) + { + hi = rp->virtual_base + i * MMAP_PAGESIZE - 1; + s = format (s, " 0x%x - 0x%x (%dk)\n", lo, hi, + (hi - lo) >> 10); + lo = hi = ~0; + } + } + } + s = format (s, " rgn heap stats: %U", format_mheap, + rp->region_heap, 0); + if ((rp->flags & SVM_FLAGS_MHEAP) && rp->data_heap) + { + s = format (s, "\n data heap stats: %U", format_mheap, + rp->data_heap, 1); + } + s = format (s, "\n"); + } + + return (s); +} + +/* + * rnd_pagesize + * Round to a pagesize multiple, presumably 4k works + */ +static u64 +rnd_pagesize (u64 size) +{ + u64 rv; + + rv = (size + (MMAP_PAGESIZE - 1)) & ~(MMAP_PAGESIZE - 1); + return (rv); +} + +/* + * svm_data_region_setup + */ +static int +svm_data_region_create (svm_map_region_args_t * a, svm_region_t * rp) +{ + int fd; + u8 junk = 0; + uword map_size; + + map_size = rp->virtual_size - (MMAP_PAGESIZE + + (a->pvt_heap_size ? a->pvt_heap_size : + SVM_PVT_MHEAP_SIZE)); + + if (a->flags & SVM_FLAGS_FILE) + { + struct stat statb; + + fd = open (a->backing_file, O_RDWR | O_CREAT, 0777); + + if (fd < 0) + { + clib_unix_warning ("open"); + return -1; + } + + if (fstat (fd, &statb) < 0) + { + clib_unix_warning ("fstat"); + close (fd); + return -2; + } + + if (statb.st_mode & S_IFREG) + { + if (statb.st_size == 0) + { + if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1) + { + clib_unix_warning ("seek region size"); + close (fd); + return -3; + } + if (write (fd, &junk, 1) != 1) + { + clib_unix_warning ("set region size"); + close (fd); + return -3; + } + } + else + { + map_size = rnd_pagesize (statb.st_size); + } + } + else + { + map_size = a->backing_mmap_size; + } + + ASSERT (map_size <= rp->virtual_size - + (MMAP_PAGESIZE + SVM_PVT_MHEAP_SIZE)); + + if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED) + { + clib_unix_warning ("mmap"); + close (fd); + return -3; + } + close (fd); + rp->backing_file = (char *) format (0, "%s\0", a->backing_file); + rp->flags |= SVM_FLAGS_FILE; + } + + if (a->flags & SVM_FLAGS_MHEAP) + { + rp->data_heap = + mheap_alloc_with_flags ((void *) (rp->data_base), map_size, + MHEAP_FLAG_DISABLE_VM); + rp->flags |= SVM_FLAGS_MHEAP; + } + return 0; +} + +static int +svm_data_region_map (svm_map_region_args_t * a, svm_region_t * rp) +{ + int fd; + u8 junk = 0; + uword map_size; + struct stat statb; + + map_size = rp->virtual_size - + (MMAP_PAGESIZE + + (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE)); + + if (a->flags & SVM_FLAGS_FILE) + { + + fd = open (a->backing_file, O_RDWR, 0777); + + if (fd < 0) + { + clib_unix_warning ("open"); + return -1; + } + + if (fstat (fd, &statb) < 0) + { + clib_unix_warning ("fstat"); + close (fd); + return -2; + } + + if (statb.st_mode & S_IFREG) + { + if (statb.st_size == 0) + { + if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1) + { + clib_unix_warning ("seek region size"); + close (fd); + return -3; + } + if (write (fd, &junk, 1) != 1) + { + clib_unix_warning ("set region size"); + close (fd); + return -3; + } + } + else + { + map_size = rnd_pagesize (statb.st_size); + } + } + else + { + map_size = a->backing_mmap_size; + } + + ASSERT (map_size <= rp->virtual_size + - (MMAP_PAGESIZE + + + (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE))); + + if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED) + { + clib_unix_warning ("mmap"); + close (fd); + return -3; + } + close (fd); + } + return 0; +} + +u8 * +shm_name_from_svm_map_region_args (svm_map_region_args_t * a) +{ + u8 *path; + u8 *shm_name; + u8 *split_point; + u8 *mkdir_arg = 0; + int root_path_offset = 0; + int name_offset = 0; + + if (a->root_path) + { + /* Tolerate present or absent slashes */ + if (a->root_path[0] == '/') + root_path_offset++; + + /* create the root_path under /dev/shm + iterate through path creating directories */ + + path = format (0, "/dev/shm/%s%c", &a->root_path[root_path_offset], 0); + split_point = path + 1; + vec_add1 (mkdir_arg, '-'); + + while (*split_point) + { + while (*split_point && *split_point != '/') + { + vec_add1 (mkdir_arg, *split_point); + split_point++; + } + vec_add1 (mkdir_arg, 0); + + /* ready to descend another level */ + mkdir_arg[vec_len (mkdir_arg) - 1] = '-'; + split_point++; + } + vec_free (mkdir_arg); + vec_free (path); + + if (a->name[0] == '/') + name_offset = 1; + + shm_name = format (0, "/%s-%s%c", a->root_path, + &a->name[name_offset], 0); + } + else + shm_name = format (0, "%s%c", a->name, 0); + return (shm_name); +} + +/* + * svm_map_region + */ +void * +svm_map_region (svm_map_region_args_t * a) +{ + int svm_fd; + svm_region_t *rp; + pthread_mutexattr_t attr; + pthread_condattr_t cattr; + int deadman = 0; + u8 junk = 0; + void *oldheap; + int overhead_space; + int rv; + uword data_base; + int nbits, words, bit; + int pid_holding_region_lock; + u8 *shm_name; + int dead_region_recovery = 0; + int time_left; + struct stat stat; + struct timespec ts, tsrem; + + if (CLIB_DEBUG > 1) + clib_warning ("[%d] map region %s", getpid (), a->name); + + ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size); + ASSERT (a->name); + + shm_name = shm_name_from_svm_map_region_args (a); + + svm_fd = shm_open ((char *) shm_name, O_RDWR | O_CREAT | O_EXCL, 0777); + + if (svm_fd >= 0) + { + if (fchmod (svm_fd, 0770) < 0) + clib_unix_warning ("segment chmod"); + /* This turns out to fail harmlessly if the client starts first */ + if (fchown (svm_fd, a->uid, a->gid) < 0) + clib_unix_warning ("segment chown [ok if client starts first]"); + + vec_free (shm_name); + + if (lseek (svm_fd, a->size, SEEK_SET) == (off_t) - 1) + { + clib_warning ("seek region size"); + close (svm_fd); + return (0); + } + if (write (svm_fd, &junk, 1) != 1) + { + clib_warning ("set region size"); + close (svm_fd); + return (0); + } + + rp = mmap ((void *) a->baseva, a->size, + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0); + + if (rp == (svm_region_t *) MAP_FAILED) + { + clib_unix_warning ("mmap create"); + close (svm_fd); + return (0); + } + close (svm_fd); + memset (rp, 0, sizeof (*rp)); + + if (pthread_mutexattr_init (&attr)) + clib_unix_warning ("mutexattr_init"); + + if (pthread_mutexattr_setpshared (&attr, PTHREAD_PROCESS_SHARED)) + clib_unix_warning ("mutexattr_setpshared"); + + if (pthread_mutex_init (&rp->mutex, &attr)) + clib_unix_warning ("mutex_init"); + + if (pthread_mutexattr_destroy (&attr)) + clib_unix_warning ("mutexattr_destroy"); + + if (pthread_condattr_init (&cattr)) + clib_unix_warning ("condattr_init"); + + if (pthread_condattr_setpshared (&cattr, PTHREAD_PROCESS_SHARED)) + clib_unix_warning ("condattr_setpshared"); + + if (pthread_cond_init (&rp->condvar, &cattr)) + clib_unix_warning ("cond_init"); + + if (pthread_condattr_destroy (&cattr)) + clib_unix_warning ("condattr_destroy"); + + region_lock (rp, 1); + + rp->virtual_base = a->baseva; + rp->virtual_size = a->size; + + rp->region_heap = + mheap_alloc_with_flags ((void *) (a->baseva + MMAP_PAGESIZE), + (a->pvt_heap_size != 0) ? + a->pvt_heap_size : SVM_PVT_MHEAP_SIZE, + MHEAP_FLAG_DISABLE_VM); + oldheap = svm_push_pvt_heap (rp); + + rp->region_name = (char *) format (0, "%s%c", a->name, 0); + vec_add1 (rp->client_pids, getpid ()); + + nbits = rp->virtual_size / MMAP_PAGESIZE; + + ASSERT (nbits > 0); + rp->bitmap_size = nbits; + words = (nbits + BITS (uword) - 1) / BITS (uword); + vec_validate (rp->bitmap, words - 1); + + overhead_space = MMAP_PAGESIZE /* header */ + + ((a->pvt_heap_size != 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE); + + bit = 0; + data_base = (uword) rp->virtual_base; + + if (a->flags & SVM_FLAGS_NODATA) + rp->flags |= SVM_FLAGS_NEED_DATA_INIT; + + do + { + clib_bitmap_set_no_check (rp->bitmap, bit, 1); + bit++; + overhead_space -= MMAP_PAGESIZE; + data_base += MMAP_PAGESIZE; + } + while (overhead_space > 0); + + rp->data_base = (void *) data_base; + + /* + * Note: although the POSIX spec guarantees that only one + * process enters this block, we have to play games + * to hold off clients until e.g. the mutex is ready + */ + rp->version = SVM_VERSION; + + /* setup the data portion of the region */ + + rv = svm_data_region_create (a, rp); + if (rv) + { + clib_warning ("data_region_create: %d", rv); + } + + region_unlock (rp); + + svm_pop_heap (oldheap); + + return ((void *) rp); + } + else + { + svm_fd = shm_open ((char *) shm_name, O_RDWR, 0777); + + vec_free (shm_name); + + if (svm_fd < 0) + { + perror ("svm_region_map(mmap open)"); + return (0); + } + + time_left = 20; + while (1) + { + if (0 != fstat (svm_fd, &stat)) + { + clib_warning ("fstat failed: %d", errno); + close (svm_fd); + return (0); + } + if (stat.st_size > 0) + { + break; + } + if (0 == time_left) + { + clib_warning ("waiting for resize of shm file timed out"); + close (svm_fd); + return (0); + } + ts.tv_sec = 0; + ts.tv_nsec = 100000000; + while (nanosleep (&ts, &tsrem) < 0) + ts = tsrem; + time_left--; + } + + rp = mmap (0, MMAP_PAGESIZE, + PROT_READ | PROT_WRITE, MAP_SHARED, svm_fd, 0); + + if (rp == (svm_region_t *) MAP_FAILED) + { + close (svm_fd); + clib_warning ("mmap"); + return (0); + } + /* + * We lost the footrace to create this region; make sure + * the winner has crossed the finish line. + */ + while (rp->version == 0 && deadman++ < 5) + { + sleep (1); + } + + /* + * -ed? + */ + if (rp->version == 0) + { + clib_warning ("rp->version %d not %d", rp->version, SVM_VERSION); + close (svm_fd); + munmap (rp, a->size); + return (0); + } + /* Remap now that the region has been placed */ + a->baseva = rp->virtual_base; + a->size = rp->virtual_size; + munmap (rp, MMAP_PAGESIZE); + + rp = (void *) mmap ((void *) a->baseva, a->size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_FIXED, svm_fd, 0); + if ((uword) rp == (uword) MAP_FAILED) + { + clib_unix_warning ("mmap"); + close (svm_fd); + return (0); + } + + if ((uword) rp != rp->virtual_base) + { + clib_warning ("mmap botch"); + } + + /* + * Try to fix the region mutex if it is held by + * a dead process + */ + pid_holding_region_lock = rp->mutex_owner_pid; + if (pid_holding_region_lock && kill (pid_holding_region_lock, 0) < 0) + { + clib_warning + ("region %s mutex held by dead pid %d, tag %d, force unlock", + rp->region_name, pid_holding_region_lock, rp->mutex_owner_tag); + /* owner pid is nonexistent */ + rp->mutex.__data.__owner = 0; + rp->mutex.__data.__lock = 0; + dead_region_recovery = 1; + } + + if (dead_region_recovery) + clib_warning ("recovery: attempt to re-lock region"); + + region_lock (rp, 2); + oldheap = svm_push_pvt_heap (rp); + vec_add1 (rp->client_pids, getpid ()); + + if (dead_region_recovery) + clib_warning ("recovery: attempt svm_data_region_map"); + + rv = svm_data_region_map (a, rp); + if (rv) + { + clib_warning ("data_region_map: %d", rv); + } + + if (dead_region_recovery) + clib_warning ("unlock and continue"); + + region_unlock (rp); + + svm_pop_heap (oldheap); + + return ((void *) rp); + + } + return 0; /* NOTREACHED */ +} + +static void +svm_mutex_cleanup (void) +{ + int i; + for (i = 0; i < nheld; i++) + { + pthread_mutex_unlock (mutexes_held[i]); + } +} + +static void +svm_region_init_internal (svm_map_region_args_t * a) +{ + svm_region_t *rp; + u64 ticks = clib_cpu_time_now (); + uword randomize_baseva; + + /* guard against klutz calls */ + if (root_rp) + return; + + root_rp_refcount++; + + atexit (svm_mutex_cleanup); + + /* Randomize the shared-VM base at init time */ + if (MMAP_PAGESIZE <= (4 << 10)) + randomize_baseva = (ticks & 15) * MMAP_PAGESIZE; + else + randomize_baseva = (ticks & 3) * MMAP_PAGESIZE; + + a->baseva += randomize_baseva; + + rp = svm_map_region (a); + ASSERT (rp); + + region_lock (rp, 3); + + /* Set up the main region data structures */ + if (rp->flags & SVM_FLAGS_NEED_DATA_INIT) + { + svm_main_region_t *mp = 0; + void *oldheap; + + rp->flags &= ~(SVM_FLAGS_NEED_DATA_INIT); + + oldheap = svm_push_pvt_heap (rp); + vec_validate (mp, 0); + mp->name_hash = hash_create_string (0, sizeof (uword)); + mp->root_path = a->root_path ? format (0, "%s%c", a->root_path, 0) : 0; + rp->data_base = mp; + svm_pop_heap (oldheap); + } + region_unlock (rp); + root_rp = rp; +} + +void +svm_region_init (void) +{ + svm_map_region_args_t _a, *a = &_a; + + memset (a, 0, sizeof (*a)); + a->root_path = 0; + a->name = SVM_GLOBAL_REGION_NAME; + a->baseva = SVM_GLOBAL_REGION_BASEVA; + a->size = SVM_GLOBAL_REGION_SIZE; + a->flags = SVM_FLAGS_NODATA; + a->uid = 0; + a->gid = 0; + + svm_region_init_internal (a); +} + +void +svm_region_init_chroot (char *root_path) +{ + svm_map_region_args_t _a, *a = &_a; + + memset (a, 0, sizeof (*a)); + a->root_path = root_path; + a->name = SVM_GLOBAL_REGION_NAME; + a->baseva = SVM_GLOBAL_REGION_BASEVA; + a->size = SVM_GLOBAL_REGION_SIZE; + a->flags = SVM_FLAGS_NODATA; + a->uid = 0; + a->gid = 0; + + svm_region_init_internal (a); +} + +void +svm_region_init_chroot_uid_gid (char *root_path, int uid, int gid) +{ + svm_map_region_args_t _a, *a = &_a; + + memset (a, 0, sizeof (*a)); + a->root_path = root_path; + a->name = SVM_GLOBAL_REGION_NAME; + a->baseva = SVM_GLOBAL_REGION_BASEVA; + a->size = SVM_GLOBAL_REGION_SIZE; + a->flags = SVM_FLAGS_NODATA; + a->uid = uid; + a->gid = gid; + + svm_region_init_internal (a); +} + +void +svm_region_init_args (svm_map_region_args_t * a) +{ + svm_region_init_internal (a); +} + +void * +svm_region_find_or_create (svm_map_region_args_t * a) +{ + svm_main_region_t *mp; + svm_region_t *rp; + uword need_nbits; + int index, i; + void *oldheap; + uword *p; + u8 *name; + svm_subregion_t *subp; + + ASSERT (root_rp); + + a->size += MMAP_PAGESIZE + + ((a->pvt_heap_size != 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE); + a->size = rnd_pagesize (a->size); + + region_lock (root_rp, 4); + oldheap = svm_push_pvt_heap (root_rp); + mp = root_rp->data_base; + + ASSERT (mp); + + /* Map the named region from the correct chroot environment */ + a->root_path = (char *) mp->root_path; + + /* + * See if this region is already known. If it is, we're + * almost done... + */ + p = hash_get_mem (mp->name_hash, a->name); + + if (p) + { + rp = svm_map_region (a); + region_unlock (root_rp); + svm_pop_heap (oldheap); + return rp; + } + + /* Create the region. */ + ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size); + + need_nbits = a->size / MMAP_PAGESIZE; + + index = 1; /* $$$ fixme, figure out how many bit to really skip */ + + /* + * Scan the virtual space allocation bitmap, looking for a large + * enough chunk + */ + do + { + if (clib_bitmap_get_no_check (root_rp->bitmap, index) == 0) + { + for (i = 0; i < (need_nbits - 1); i++) + { + if (clib_bitmap_get_no_check (root_rp->bitmap, index + i) == 1) + { + index = index + i; + goto next; + } + } + break; + } + index++; + next:; + } + while (index < root_rp->bitmap_size); + + /* Completely out of VM? */ + if (index >= root_rp->bitmap_size) + { + clib_warning ("region %s: not enough VM to allocate 0x%llx (%lld)", + root_rp->region_name, a->size, a->size); + svm_pop_heap (oldheap); + region_unlock (root_rp); + return 0; + } + + /* + * Mark virtual space allocated + */ +#if CLIB_DEBUG > 1 + clib_warning ("set %d bits at index %d", need_nbits, index); +#endif + + for (i = 0; i < need_nbits; i++) + { + clib_bitmap_set_no_check (root_rp->bitmap, index + i, 1); + } + + /* Place this region where it goes... */ + a->baseva = root_rp->virtual_base + index * MMAP_PAGESIZE; + + rp = svm_map_region (a); + + pool_get (mp->subregions, subp); + name = format (0, "%s%c", a->name, 0); + subp->subregion_name = name; + + hash_set_mem (mp->name_hash, name, subp - mp->subregions); + + svm_pop_heap (oldheap); + + region_unlock (root_rp); + + return (rp); +} + +/* + * svm_region_unmap + * + * Let go of the indicated region. If the calling process + * is the last customer, throw it away completely. + * The root region mutex guarantees atomicity with respect to + * a new region client showing up at the wrong moment. + */ +void +svm_region_unmap (void *rp_arg) +{ + int i, mypid = getpid (); + int nclients_left; + void *oldheap; + uword virtual_base, virtual_size; + svm_region_t *rp = rp_arg; + char *name; + + /* + * If we take a signal while holding one or more shared-memory + * mutexes, we may end up back here from an otherwise + * benign exit handler. Bail out to avoid a recursive + * mutex screw-up. + */ + if (nheld) + return; + + ASSERT (rp); + ASSERT (root_rp); + + if (CLIB_DEBUG > 1) + clib_warning ("[%d] unmap region %s", getpid (), rp->region_name); + + region_lock (root_rp, 5); + region_lock (rp, 6); + + oldheap = svm_push_pvt_heap (rp); /* nb vec_delete() in the loop */ + + /* Remove the caller from the list of mappers */ + for (i = 0; i < vec_len (rp->client_pids); i++) + { + if (rp->client_pids[i] == mypid) + { + vec_delete (rp->client_pids, 1, i); + goto found; + } + } + clib_warning ("pid %d AWOL", mypid); + +found: + + svm_pop_heap (oldheap); + + nclients_left = vec_len (rp->client_pids); + virtual_base = rp->virtual_base; + virtual_size = rp->virtual_size; + + if (nclients_left == 0) + { + int index, nbits, i; + svm_main_region_t *mp; + uword *p; + svm_subregion_t *subp; + + /* Kill the region, last guy on his way out */ + + oldheap = svm_push_pvt_heap (root_rp); + name = vec_dup (rp->region_name); + + virtual_base = rp->virtual_base; + virtual_size = rp->virtual_size; + + /* Figure out which bits to clear in the root region bitmap */ + index = (virtual_base - root_rp->virtual_base) / MMAP_PAGESIZE; + + nbits = (virtual_size + MMAP_PAGESIZE - 1) / MMAP_PAGESIZE; + +#if CLIB_DEBUG > 1 + clib_warning ("clear %d bits at index %d", nbits, index); +#endif + /* Give back the allocated VM */ + for (i = 0; i < nbits; i++) + { + clib_bitmap_set_no_check (root_rp->bitmap, index + i, 0); + } + + mp = root_rp->data_base; + + p = hash_get_mem (mp->name_hash, name); + + /* Better never happen ... */ + if (p == NULL) + { + region_unlock (rp); + region_unlock (root_rp); + svm_pop_heap (oldheap); + clib_warning ("Region name '%s' not found?", name); + return; + } + + /* Remove from the root region subregion pool */ + subp = mp->subregions + p[0]; + pool_put (mp->subregions, subp); + + hash_unset_mem (mp->name_hash, name); + + vec_free (name); + + region_unlock (rp); + shm_unlink (rp->region_name); + munmap ((void *) virtual_base, virtual_size); + region_unlock (root_rp); + svm_pop_heap (oldheap); + return; + } + + region_unlock (rp); + region_unlock (root_rp); + + munmap ((void *) virtual_base, virtual_size); +} + +/* + * svm_region_exit + * There is no clean way to unlink the + * root region when all clients go away, + * so remove the pid entry and call it a day. + */ +void +svm_region_exit () +{ + void *oldheap; + int i, mypid = getpid (); + uword virtual_base, virtual_size; + + /* It felt so nice we did it twice... */ + if (root_rp == 0) + return; + + if (--root_rp_refcount > 0) + return; + + /* + * If we take a signal while holding one or more shared-memory + * mutexes, we may end up back here from an otherwise + * benign exit handler. Bail out to avoid a recursive + * mutex screw-up. + */ + if (nheld) + return; + + region_lock (root_rp, 7); + oldheap = svm_push_pvt_heap (root_rp); + + virtual_base = root_rp->virtual_base; + virtual_size = root_rp->virtual_size; + + for (i = 0; i < vec_len (root_rp->client_pids); i++) + { + if (root_rp->client_pids[i] == mypid) + { + vec_delete (root_rp->client_pids, 1, i); + goto found; + } + } + clib_warning ("pid %d AWOL", mypid); + +found: + + region_unlock (root_rp); + svm_pop_heap (oldheap); + + root_rp = 0; + munmap ((void *) virtual_base, virtual_size); +} + +void +svm_client_scan_this_region_nolock (svm_region_t * rp) +{ + int j; + int mypid = getpid (); + void *oldheap; + + for (j = 0; j < vec_len (rp->client_pids); j++) + { + if (mypid == rp->client_pids[j]) + continue; + if (rp->client_pids[j] && (kill (rp->client_pids[j], 0) < 0)) + { + clib_warning ("%s: cleanup ghost pid %d", + rp->region_name, rp->client_pids[j]); + /* nb: client vec in rp->region_heap */ + oldheap = svm_push_pvt_heap (rp); + vec_delete (rp->client_pids, 1, j); + j--; + svm_pop_heap (oldheap); + } + } +} + + +/* + * Scan svm regions for dead clients + */ +void +svm_client_scan (char *root_path) +{ + int i, j; + svm_main_region_t *mp; + svm_map_region_args_t *a = 0; + svm_region_t *root_rp; + svm_region_t *rp; + svm_subregion_t *subp; + u8 *name = 0; + u8 **svm_names = 0; + void *oldheap; + int mypid = getpid (); + + vec_validate (a, 0); + + svm_region_init_chroot (root_path); + + root_rp = svm_get_root_rp (); + + pthread_mutex_lock (&root_rp->mutex); + + mp = root_rp->data_base; + + for (j = 0; j < vec_len (root_rp->client_pids); j++) + { + if (mypid == root_rp->client_pids[j]) + continue; + if (root_rp->client_pids[j] && (kill (root_rp->client_pids[j], 0) < 0)) + { + clib_warning ("%s: cleanup ghost pid %d", + root_rp->region_name, root_rp->client_pids[j]); + /* nb: client vec in root_rp->region_heap */ + oldheap = svm_push_pvt_heap (root_rp); + vec_delete (root_rp->client_pids, 1, j); + j--; + svm_pop_heap (oldheap); + } + } + + /* + * Snapshoot names, can't hold root rp mutex across + * find_or_create. + */ + /* *INDENT-OFF* */ + pool_foreach (subp, mp->subregions, ({ + name = vec_dup (subp->subregion_name); + vec_add1(svm_names, name); + })); + /* *INDENT-ON* */ + + pthread_mutex_unlock (&root_rp->mutex); + + for (i = 0; i < vec_len (svm_names); i++) + { + vec_validate (a, 0); + a->root_path = root_path; + a->name = (char *) svm_names[i]; + rp = svm_region_find_or_create (a); + if (rp) + { + pthread_mutex_lock (&rp->mutex); + + svm_client_scan_this_region_nolock (rp); + + pthread_mutex_unlock (&rp->mutex); + svm_region_unmap (rp); + vec_free (svm_names[i]); + } + vec_free (a); + } + vec_free (svm_names); + + svm_region_exit (); + + vec_free (a); +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/svm/svm.h b/src/svm/svm.h new file mode 100644 index 00000000..0b87dbcb --- /dev/null +++ b/src/svm/svm.h @@ -0,0 +1,207 @@ +/* + *------------------------------------------------------------------ + * svm.h - shared VM allocation, mmap(...MAP_FIXED...) + * brain police + * + * Copyright (c) 2009 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#ifndef __included_svm_h__ +#define __included_svm_h__ + +#include +#include +#include + +#define MMAP_PAGESIZE (clib_mem_get_page_size()) + +#define SVM_VERSION ((1<<16) | 1) /* set to declare region ready. */ + +#define SVM_FLAGS_MHEAP (1<<0) /* region contains an mheap */ +#define SVM_FLAGS_FILE (1<<1) /* region backed by one or more files */ +#define SVM_FLAGS_NODATA (1<<2) /* region will be further subdivided */ +#define SVM_FLAGS_NEED_DATA_INIT (1<<3) + +#define SVM_PVT_MHEAP_SIZE (128<<10) /* region's private mheap (128k) */ + +typedef struct svm_region_ +{ + volatile uword version; + pthread_mutex_t mutex; + pthread_cond_t condvar; + int mutex_owner_pid; /* in case of trouble */ + int mutex_owner_tag; + uword flags; + uword virtual_base; /* base of the region object */ + uword virtual_size; + void *region_heap; + void *data_base; /* data portion base address */ + void *data_heap; /* data heap, if any */ + volatile void *user_ctx; /* user context pointer */ + /* stuff allocated in the region's heap */ + uword bitmap_size; /* nbits in virtual alloc bitmap */ + uword *bitmap; /* the bitmap */ + char *region_name; + char *backing_file; + char **filenames; + uword *client_pids; + /* pad */ + + /* next page: + * (64K) clib heap for the region itself + * + * data_base -> whatever is in this region + */ + +} svm_region_t; + +typedef struct svm_map_region_args_ +{ + char *root_path; /* NULL means use the truly global arena */ + char *name; + u64 baseva; + u64 size; + u64 pvt_heap_size; + uword flags; + char *backing_file; + uword backing_mmap_size; + /* uid, gid to own the svm region(s) */ + int uid; + int gid; +} svm_map_region_args_t; + + +/* + * Memory shared across all router instances. Packet buffers, etc + * Base should be "out of the way," and size should be big enough to + * cover everything we plan to put here. + */ +#define SVM_GLOBAL_REGION_BASEVA 0x30000000 +#define SVM_GLOBAL_REGION_SIZE (64<<20) +#define SVM_GLOBAL_REGION_NAME "/global_vm" + +/* + * Memory shared across individual router instances. + */ +#define SVM_OVERLAY_REGION_BASEVA \ + (SVM_GLOBAL_REGION_BASEVA + SVM_GLOBAL_REGION_SIZE) +#define SVM_OVERLAY_REGION_SIZE (1<<20) +#define SVM_OVERLAY_REGION_BASENAME "/overlay_vm" + +typedef struct +{ + u8 *subregion_name; +} svm_subregion_t; + +typedef struct +{ + svm_subregion_t *subregions; /* subregion pool */ + uword *name_hash; + u8 *root_path; +} svm_main_region_t; + + +void *svm_region_find_or_create (svm_map_region_args_t * a); +void svm_region_init (void); +void svm_region_init_chroot (char *root_path); +void svm_region_init_chroot_uid_gid (char *root_path, int uid, int gid); +void svm_region_init_args (svm_map_region_args_t * a); +void svm_region_exit (void); +void svm_region_unmap (void *rp_arg); +void svm_client_scan (char *root_path); +void svm_client_scan_this_region_nolock (svm_region_t * rp); +u8 *shm_name_from_svm_map_region_args (svm_map_region_args_t * a); + +static inline void * +svm_mem_alloc (svm_region_t * rp, uword size) +{ + u8 *oldheap; + ASSERT (rp->flags & SVM_FLAGS_MHEAP); + u8 *rv; + + pthread_mutex_lock (&rp->mutex); + oldheap = clib_mem_set_heap (rp->data_heap); + rv = clib_mem_alloc (size); + clib_mem_set_heap (oldheap); + pthread_mutex_unlock (&rp->mutex); + return (rv); +} + +static inline void * +svm_mem_alloc_aligned_at_offset (svm_region_t * rp, + uword size, uword align, uword offset) +{ + u8 *oldheap; + ASSERT (rp->flags & SVM_FLAGS_MHEAP); + u8 *rv; + + pthread_mutex_lock (&rp->mutex); + oldheap = clib_mem_set_heap (rp->data_heap); + rv = clib_mem_alloc_aligned_at_offset (size, align, offset, + 1 /* yes, call os_out_of_memory */ ); + clib_mem_set_heap (oldheap); + pthread_mutex_unlock (&rp->mutex); + return (rv); +} + +static inline void +svm_mem_free (svm_region_t * rp, void *ptr) +{ + u8 *oldheap; + ASSERT (rp->flags & SVM_FLAGS_MHEAP); + + pthread_mutex_lock (&rp->mutex); + oldheap = clib_mem_set_heap (rp->data_heap); + clib_mem_free (ptr); + clib_mem_set_heap (oldheap); + pthread_mutex_unlock (&rp->mutex); + +} + +static inline void * +svm_push_pvt_heap (svm_region_t * rp) +{ + u8 *oldheap; + oldheap = clib_mem_set_heap (rp->region_heap); + return ((void *) oldheap); +} + +static inline void * +svm_push_data_heap (svm_region_t * rp) +{ + u8 *oldheap; + oldheap = clib_mem_set_heap (rp->data_heap); + return ((void *) oldheap); +} + +static inline void +svm_pop_heap (void *oldheap) +{ + clib_mem_set_heap (oldheap); +} + +u8 *format_svm_region (u8 * s, va_list * args); + +svm_region_t *svm_get_root_rp (void); + +#endif /* __included_svm_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/svm/svm_test.c b/src/svm/svm_test.c new file mode 100644 index 00000000..ab0b9e24 --- /dev/null +++ b/src/svm/svm_test.c @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* + *------------------------------------------------------------------ + * svm_test.c -- brain police + *------------------------------------------------------------------ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "svm.h" + + +int +main (int argc, char **argv) +{ + svm_region_t *root_rp, *rp; + svm_map_region_args_t *a = 0; + + vec_validate (a, 0); + + root_rp = svm_region_init (); + + ASSERT (root_rp); + + a->name = "/qvnet"; + a->size = (4 << 10); + + rp = svm_region_find_or_create (root_rp, a); + + ASSERT (rp); + + *((u32 *) rp->data_base) = 0xdeadbeef; + svm_region_unmap (root_rp, rp); + + fformat (stdout, "exiting...\n"); + + exit (0); +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/svm/svmdb.c b/src/svm/svmdb.c new file mode 100644 index 00000000..03dfe7c3 --- /dev/null +++ b/src/svm/svmdb.c @@ -0,0 +1,671 @@ +/* + *------------------------------------------------------------------ + * svmdb.c -- simple shared memory database + * + * Copyright (c) 2009 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "svmdb.h" + +static void local_set_variable_nolock (svmdb_client_t * client, + svmdb_namespace_t namespace, + u8 * var, u8 * val, u32 elsize); + +always_inline void +region_lock (svm_region_t * rp, int tag) +{ + pthread_mutex_lock (&rp->mutex); +#ifdef MUTEX_DEBUG + rp->mutex_owner_pid = getpid (); + rp->mutex_owner_tag = tag; +#endif +} + +always_inline void +region_unlock (svm_region_t * rp) +{ +#ifdef MUTEX_DEBUG + rp->mutex_owner_pid = 0; + rp->mutex_owner_tag = 0; +#endif + pthread_mutex_unlock (&rp->mutex); +} + +svmdb_client_t * +svmdb_map (svmdb_map_args_t * dba) +{ + svmdb_client_t *client = 0; + svm_map_region_args_t *a = 0; + svm_region_t *db_rp; + void *oldheap; + svmdb_shm_hdr_t *hp = 0; + + vec_validate (client, 0); + vec_validate (a, 0); + + svm_region_init_chroot_uid_gid (dba->root_path, dba->uid, dba->gid); + + a->root_path = dba->root_path; + a->name = "/db"; + a->size = dba->size ? dba->size : SVMDB_DEFAULT_SIZE; + a->flags = SVM_FLAGS_MHEAP; + a->uid = dba->uid; + a->gid = dba->gid; + + db_rp = client->db_rp = svm_region_find_or_create (a); + + ASSERT (db_rp); + + vec_free (a); + + region_lock (client->db_rp, 10); + /* Has someone else set up the shared-memory variable table? */ + if (db_rp->user_ctx) + { + client->shm = (void *) db_rp->user_ctx; + client->pid = getpid (); + region_unlock (client->db_rp); + ASSERT (client->shm->version == SVMDB_SHM_VERSION); + return (client); + } + /* Nope, it's our problem... */ + + /* Add a bogus client (pid=0) so the svm won't be deallocated */ + oldheap = svm_push_pvt_heap (db_rp); + vec_add1 (client->db_rp->client_pids, 0); + svm_pop_heap (oldheap); + + oldheap = svm_push_data_heap (db_rp); + + vec_validate (hp, 0); + hp->version = SVMDB_SHM_VERSION; + hp->namespaces[SVMDB_NAMESPACE_STRING] + = hash_create_string (0, sizeof (uword)); + hp->namespaces[SVMDB_NAMESPACE_VEC] + = hash_create_string (0, sizeof (uword)); + + db_rp->user_ctx = hp; + client->shm = hp; + + svm_pop_heap (oldheap); + region_unlock (client->db_rp); + client->pid = getpid (); + + return (client); +} + +void +svmdb_unmap (svmdb_client_t * client) +{ + ASSERT (client); + + if (!svm_get_root_rp ()) + return; + + svm_region_unmap ((void *) client->db_rp); + svm_region_exit (); + vec_free (client); +} + +static void +notify_value (svmdb_value_t * v, svmdb_action_t a) +{ + int i; + int rv; + union sigval sv; + u32 value; + u32 *dead_registrations = 0; + + svmdb_notify_t *np; + + for (i = 0; i < vec_len (v->notifications); i++) + { + np = vec_elt_at_index (v->notifications, i); + if (np->action == a) + { + value = (np->action << 28) | (np->opaque); + sv.sival_ptr = (void *) (uword) value; + do + { + rv = 0; + if (sigqueue (np->pid, np->signum, sv) == 0) + break; + rv = errno; + } + while (rv == EAGAIN); + if (rv == 0) + continue; + vec_add1 (dead_registrations, i); + } + } + + for (i = 0; i < vec_len (dead_registrations); i++) + { + np = vec_elt_at_index (v->notifications, dead_registrations[i]); + clib_warning ("dead reg pid %d sig %d action %d opaque %x", + np->pid, np->signum, np->action, np->opaque); + vec_delete (v->notifications, 1, dead_registrations[i]); + } + vec_free (dead_registrations); +} + +int +svmdb_local_add_del_notification (svmdb_client_t * client, + svmdb_notification_args_t * a) +{ + uword *h; + void *oldheap; + hash_pair_t *hp; + svmdb_shm_hdr_t *shm; + u8 *dummy_value = 0; + svmdb_value_t *value; + svmdb_notify_t *np; + int i; + int rv = 0; + + ASSERT (a->elsize); + + region_lock (client->db_rp, 18); + shm = client->shm; + oldheap = svm_push_data_heap (client->db_rp); + + h = shm->namespaces[a->nspace]; + + hp = hash_get_pair_mem (h, a->var); + if (hp == 0) + { + local_set_variable_nolock (client, a->nspace, (u8 *) a->var, + dummy_value, a->elsize); + /* might have moved */ + h = shm->namespaces[a->nspace]; + hp = hash_get_pair_mem (h, a->var); + ASSERT (hp); + } + + value = pool_elt_at_index (shm->values, hp->value[0]); + + for (i = 0; i < vec_len (value->notifications); i++) + { + np = vec_elt_at_index (value->notifications, i); + if ((np->pid == client->pid) + && (np->signum == a->signum) + && (np->action == a->action) && (np->opaque == a->opaque)) + { + if (a->add_del == 0 /* delete */ ) + { + vec_delete (value->notifications, 1, i); + goto out; + } + else + { /* add */ + clib_warning + ("%s: ignore dup reg pid %d signum %d action %d opaque %x", + a->var, client->pid, a->signum, a->action, a->opaque); + rv = -2; + goto out; + } + } + } + if (a->add_del == 0) + { + rv = -3; + goto out; + } + + vec_add2 (value->notifications, np, 1); + np->pid = client->pid; + np->signum = a->signum; + np->action = a->action; + np->opaque = a->opaque; + +out: + svm_pop_heap (oldheap); + region_unlock (client->db_rp); + return rv; +} + + +static void +local_unset_variable_nolock (svmdb_client_t * client, + svmdb_namespace_t namespace, char *var) +{ + uword *h; + svmdb_value_t *oldvalue; + hash_pair_t *hp; + + h = client->shm->namespaces[namespace]; + hp = hash_get_pair_mem (h, var); + if (hp) + { + oldvalue = pool_elt_at_index (client->shm->values, hp->value[0]); + if (vec_len (oldvalue->notifications)) + notify_value (oldvalue, SVMDB_ACTION_UNSET); + /* zero length value means unset */ + _vec_len (oldvalue->value) = 0; + } + client->shm->namespaces[namespace] = h; +} + +void +svmdb_local_unset_string_variable (svmdb_client_t * client, char *var) +{ + void *oldheap; + + region_lock (client->db_rp, 11); + oldheap = svm_push_data_heap (client->db_rp); + local_unset_variable_nolock (client, SVMDB_NAMESPACE_STRING, var); + svm_pop_heap (oldheap); + region_unlock (client->db_rp); +} + +static void +local_set_variable_nolock (svmdb_client_t * client, + svmdb_namespace_t namespace, + u8 * var, u8 * val, u32 elsize) +{ + uword *h; + hash_pair_t *hp; + u8 *name; + svmdb_shm_hdr_t *shm; + + shm = client->shm; + h = shm->namespaces[namespace]; + hp = hash_get_pair_mem (h, var); + if (hp) + { + svmdb_value_t *oldvalue; + oldvalue = pool_elt_at_index (client->shm->values, hp->value[0]); + vec_alloc (oldvalue->value, vec_len (val) * elsize); + clib_memcpy (oldvalue->value, val, vec_len (val) * elsize); + _vec_len (oldvalue->value) = vec_len (val); + notify_value (oldvalue, SVMDB_ACTION_SET); + } + else + { + svmdb_value_t *newvalue; + pool_get (shm->values, newvalue); + memset (newvalue, 0, sizeof (*newvalue)); + newvalue->elsize = elsize; + vec_alloc (newvalue->value, vec_len (val) * elsize); + clib_memcpy (newvalue->value, val, vec_len (val) * elsize); + _vec_len (newvalue->value) = vec_len (val); + name = format (0, "%s%c", var, 0); + hash_set_mem (h, name, newvalue - shm->values); + } + shm->namespaces[namespace] = h; +} + +void +svmdb_local_set_string_variable (svmdb_client_t * client, + char *var, char *val) +{ + void *oldheap; + + region_lock (client->db_rp, 12); + oldheap = svm_push_data_heap (client->db_rp); + + local_unset_variable_nolock (client, SVMDB_NAMESPACE_STRING, var); + + local_set_variable_nolock (client, SVMDB_NAMESPACE_STRING, + (u8 *) var, (u8 *) val, 1 /* elsize */ ); + svm_pop_heap (oldheap); + region_unlock (client->db_rp); +} + +static u8 * +local_get_variable_nolock (svmdb_client_t * client, + svmdb_namespace_t namespace, u8 * var) +{ + uword *h; + uword *p; + svmdb_shm_hdr_t *shm; + svmdb_value_t *oldvalue; + + shm = client->shm; + h = shm->namespaces[namespace]; + p = hash_get_mem (h, var); + if (p) + { + oldvalue = pool_elt_at_index (shm->values, p[0]); + notify_value (oldvalue, SVMDB_ACTION_GET); + return (oldvalue->value); + } + return 0; +} + +void * +svmdb_local_get_variable_reference (svmdb_client_t * client, + svmdb_namespace_t namespace, char *var) +{ + u8 *rv; + + region_lock (client->db_rp, 19); + rv = local_get_variable_nolock (client, namespace, (u8 *) var); + region_unlock (client->db_rp); + return (void *) rv; +} + +char * +svmdb_local_get_string_variable (svmdb_client_t * client, char *var) +{ + u8 *rv = 0; + + region_lock (client->db_rp, 13); + rv = local_get_variable_nolock (client, SVMDB_NAMESPACE_STRING, (u8 *) var); + + if (rv && vec_len (rv)) + { + rv = format (0, "%s", rv); + vec_add1 (rv, 0); + } + region_unlock (client->db_rp); + return ((char *) rv); +} + +void +svmdb_local_dump_strings (svmdb_client_t * client) +{ + uword *h; + u8 *key; + u32 value; + svmdb_shm_hdr_t *shm = client->shm; + + region_lock (client->db_rp, 14); + + h = client->shm->namespaces[SVMDB_NAMESPACE_STRING]; + + /* *INDENT-OFF* */ + hash_foreach_mem(key, value, h, + ({ + svmdb_value_t *v = pool_elt_at_index (shm->values, value); + + fformat(stdout, "%s: %s\n", key, + vec_len(v->value) ? v->value : (u8 *)"(nil)"); + })); + /* *INDENT-ON* */ + region_unlock (client->db_rp); +} + +int +svmdb_local_serialize_strings (svmdb_client_t * client, char *filename) +{ + uword *h; + u8 *key; + u32 value; + svmdb_shm_hdr_t *shm = client->shm; + serialize_main_t _sm, *sm = &_sm; + clib_error_t *error = 0; + u8 *sanitized_name = 0; + int fd = 0; + + if (strstr (filename, "..") || index (filename, '/')) + { + error = clib_error_return (0, "Illegal characters in filename '%s'", + filename); + goto out; + } + + sanitized_name = format (0, "/tmp/%s%c", filename, 0); + + fd = creat ((char *) sanitized_name, 0644); + + if (fd < 0) + { + error = clib_error_return_unix (0, "Create '%s'", sanitized_name); + goto out; + } + + serialize_open_unix_file_descriptor (sm, fd); + + region_lock (client->db_rp, 20); + + h = client->shm->namespaces[SVMDB_NAMESPACE_STRING]; + + serialize_likely_small_unsigned_integer (sm, hash_elts (h)); + + /* *INDENT-OFF* */ + hash_foreach_mem(key, value, h, + ({ + svmdb_value_t *v = pool_elt_at_index (shm->values, value); + + /* Omit names with nil values */ + if (vec_len(v->value)) + { + serialize_cstring (sm, (char *)key); + serialize_cstring (sm, (char *)v->value); + } + })); + /* *INDENT-ON* */ + region_unlock (client->db_rp); + + serialize_close (sm); + +out: + if (fd > 0 && close (fd) < 0) + error = clib_error_return_unix (0, "close fd %d", fd); + + if (error) + { + clib_error_report (error); + return -1; + } + return 0; +} + +int +svmdb_local_unserialize_strings (svmdb_client_t * client, char *filename) +{ + serialize_main_t _sm, *sm = &_sm; + void *oldheap; + clib_error_t *error = 0; + u8 *key, *value; + int fd = 0; + u32 nelts; + int i; + + fd = open (filename, O_RDONLY); + + if (fd < 0) + { + error = clib_error_return_unix (0, "Failed to open '%s'", filename); + goto out; + } + + unserialize_open_unix_file_descriptor (sm, fd); + + region_lock (client->db_rp, 21); + oldheap = svm_push_data_heap (client->db_rp); + + nelts = unserialize_likely_small_unsigned_integer (sm); + + for (i = 0; i < nelts; i++) + { + unserialize_cstring (sm, (char **) &key); + unserialize_cstring (sm, (char **) &value); + local_set_variable_nolock (client, SVMDB_NAMESPACE_STRING, + key, value, 1 /* elsize */ ); + vec_free (key); + vec_free (value); + } + svm_pop_heap (oldheap); + region_unlock (client->db_rp); + + serialize_close (sm); + +out: + if (fd > 0 && close (fd) < 0) + error = clib_error_return_unix (0, "close fd %d", fd); + + if (error) + { + clib_error_report (error); + return -1; + } + return 0; +} + +void +svmdb_local_unset_vec_variable (svmdb_client_t * client, char *var) +{ + void *oldheap; + + region_lock (client->db_rp, 15); + oldheap = svm_push_data_heap (client->db_rp); + local_unset_variable_nolock (client, SVMDB_NAMESPACE_VEC, var); + svm_pop_heap (oldheap); + region_unlock (client->db_rp); +} + +void +svmdb_local_set_vec_variable (svmdb_client_t * client, + char *var, void *val_arg, u32 elsize) +{ + u8 *val = (u8 *) val_arg; + void *oldheap; + + region_lock (client->db_rp, 16); + oldheap = svm_push_data_heap (client->db_rp); + + local_unset_variable_nolock (client, SVMDB_NAMESPACE_VEC, var); + local_set_variable_nolock (client, SVMDB_NAMESPACE_VEC, (u8 *) var, + val, elsize); + + svm_pop_heap (oldheap); + region_unlock (client->db_rp); +} + +void * +svmdb_local_get_vec_variable (svmdb_client_t * client, char *var, u32 elsize) +{ + u8 *rv = 0; + u8 *copy = 0; + + region_lock (client->db_rp, 17); + + rv = local_get_variable_nolock (client, SVMDB_NAMESPACE_VEC, (u8 *) var); + + if (rv && vec_len (rv)) + { + /* Make a copy in process-local memory */ + vec_alloc (copy, vec_len (rv) * elsize); + clib_memcpy (copy, rv, vec_len (rv) * elsize); + _vec_len (copy) = vec_len (rv); + region_unlock (client->db_rp); + return (copy); + } + region_unlock (client->db_rp); + return (0); +} + +void +svmdb_local_dump_vecs (svmdb_client_t * client) +{ + uword *h; + u8 *key; + u32 value; + svmdb_shm_hdr_t *shm; + + region_lock (client->db_rp, 17); + shm = client->shm; + + h = client->shm->namespaces[SVMDB_NAMESPACE_VEC]; + + /* *INDENT-OFF* */ + hash_foreach_mem(key, value, h, + ({ + svmdb_value_t *v = pool_elt_at_index (shm->values, value); + (void) fformat(stdout, "%s:\n %U (%.2f)\n", key, + format_hex_bytes, v->value, + vec_len(v->value)*v->elsize, ((f64 *)(v->value))[0]); + })); + /* *INDENT-ON* */ + + region_unlock (client->db_rp); +} + +void * +svmdb_local_find_or_add_vec_variable (svmdb_client_t * client, + char *var, u32 nbytes) +{ + void *oldheap; + u8 *rv = 0; + + region_lock (client->db_rp, 18); + oldheap = svm_push_data_heap (client->db_rp); + + rv = local_get_variable_nolock (client, SVMDB_NAMESPACE_VEC, (u8 *) var); + + if (rv) + { + goto out; + } + else + { + uword *h; + u8 *name; + svmdb_shm_hdr_t *shm; + svmdb_value_t *newvalue; + + shm = client->shm; + h = shm->namespaces[SVMDB_NAMESPACE_VEC]; + + pool_get (shm->values, newvalue); + memset (newvalue, 0, sizeof (*newvalue)); + newvalue->elsize = 1; + vec_alloc (newvalue->value, nbytes); + _vec_len (newvalue->value) = nbytes; + name = format (0, "%s%c", var, 0); + hash_set_mem (h, name, newvalue - shm->values); + shm->namespaces[SVMDB_NAMESPACE_VEC] = h; + rv = newvalue->value; + } + +out: + svm_pop_heap (oldheap); + region_unlock (client->db_rp); + return (rv); +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/svm/svmdb.h b/src/svm/svmdb.h new file mode 100644 index 00000000..e02628a0 --- /dev/null +++ b/src/svm/svmdb.h @@ -0,0 +1,135 @@ +/* + *------------------------------------------------------------------ + * svmdb.h - shared VM database + * + * Copyright (c) 2009 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#ifndef __included_svmdb_h__ +#define __included_svmdb_h__ + +#include "svm.h" + +typedef enum +{ + SVMDB_ACTION_ILLEGAL = 0, + SVMDB_ACTION_GET, /* not clear why anyone would care */ + SVMDB_ACTION_SET, + SVMDB_ACTION_UNSET, +} svmdb_action_t; + +typedef struct +{ + int pid; + int signum; + u32 action:4; + u32 opaque:28; +} svmdb_notify_t; + +typedef struct +{ + u8 *value; + svmdb_notify_t *notifications; + u32 elsize; +} svmdb_value_t; + +typedef enum +{ + SVMDB_NAMESPACE_STRING = 0, + SVMDB_NAMESPACE_VEC, + SVMDB_N_NAMESPACES, +} svmdb_namespace_t; + +typedef struct +{ + uword version; + /* pool of values */ + svmdb_value_t *values; + uword *namespaces[SVMDB_N_NAMESPACES]; +} svmdb_shm_hdr_t; + +#define SVMDB_SHM_VERSION 2 + +typedef struct +{ + int flags; + int pid; + svm_region_t *db_rp; + svmdb_shm_hdr_t *shm; +} svmdb_client_t; + +typedef struct +{ + int add_del; + svmdb_namespace_t nspace; + char *var; + u32 elsize; + int signum; + u32 action:4; + u32 opaque:28; +} svmdb_notification_args_t; + +typedef struct +{ + char *root_path; + uword size; + u32 uid; + u32 gid; +} svmdb_map_args_t; + +/* + * Must be a reasonable number, several mb smaller than + * SVM_GLOBAL_REGION_SIZE, or no donut for you... + */ +#define SVMDB_DEFAULT_SIZE (4<<20) + +svmdb_client_t *svmdb_map (svmdb_map_args_t *); + +void svmdb_unmap (svmdb_client_t * client); +void svmdb_local_unset_string_variable (svmdb_client_t * client, char *var); +void svmdb_local_set_string_variable (svmdb_client_t * client, + char *var, char *val); +char *svmdb_local_get_string_variable (svmdb_client_t * client, char *var); +void *svmdb_local_get_variable_reference (svmdb_client_t * client, + svmdb_namespace_t ns, char *var); + +void svmdb_local_dump_strings (svmdb_client_t * client); + +void svmdb_local_unset_vec_variable (svmdb_client_t * client, char *var); +void svmdb_local_set_vec_variable (svmdb_client_t * client, + char *var, void *val, u32 elsize); +void *svmdb_local_get_vec_variable (svmdb_client_t * client, char *var, + u32 elsize); +void svmdb_local_dump_vecs (svmdb_client_t * client); + +int svmdb_local_add_del_notification (svmdb_client_t * client, + svmdb_notification_args_t * args); + +void *svmdb_local_find_or_add_vec_variable (svmdb_client_t * client, + char *var, u32 nbytes); + +int svmdb_local_serialize_strings (svmdb_client_t * client, char *filename); +int svmdb_local_unserialize_strings (svmdb_client_t * client, char *filename); + + +#endif /* __included_svmdb_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/svm/svmdbtool.c b/src/svm/svmdbtool.c new file mode 100644 index 00000000..a0af15fc --- /dev/null +++ b/src/svm/svmdbtool.c @@ -0,0 +1,537 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "svmdb.h" + +typedef struct +{ + svmdb_map_args_t map_args; + int uid, gid; + uword size; +} svmdbtool_main_t; + +svmdbtool_main_t svmdbtool_main; + +static inline svmdb_map_args_t * +map_arg_setup (char *chroot_path) +{ + svmdbtool_main_t *sm = &svmdbtool_main; + svmdb_map_args_t *ma = &sm->map_args; + + memset (ma, 0, sizeof (*ma)); + ma->root_path = chroot_path; + ma->size = sm->size; + ma->uid = sm->uid; + ma->gid = sm->gid; + return ma; +} + +static void +get_string (char *chroot_path, u8 * vbl) +{ + svmdb_client_t *c; + char *rv; + svmdb_map_args_t *ma; + + ma = map_arg_setup (chroot_path); + + c = svmdb_map (ma); + + rv = svmdb_local_get_string_variable (c, (char *) vbl); + + fformat (stdout, "%s\n", rv ? rv : "UNSET"); + vec_free (rv); + svmdb_unmap (c); +} + +static void +set_string (char *chroot_path, u8 * vbl, u8 * value) +{ + svmdb_client_t *c; + svmdb_map_args_t *ma; + + ma = map_arg_setup (chroot_path); + + c = svmdb_map (ma); + svmdb_local_set_string_variable (c, (char *) vbl, (char *) value); + svmdb_unmap (c); +} + +static void +unset_string (char *chroot_path, u8 * vbl) +{ + svmdb_client_t *c; + svmdb_map_args_t *ma; + + ma = map_arg_setup (chroot_path); + + c = svmdb_map (ma); + svmdb_local_unset_string_variable (c, (char *) vbl); + svmdb_unmap (c); +} + +static void +dump_strings (char *chroot_path) +{ + svmdb_client_t *c; + svmdb_map_args_t *ma; + + ma = map_arg_setup (chroot_path); + + c = svmdb_map (ma); + svmdb_local_dump_strings (c); + svmdb_unmap (c); +} + +static void +serialize_strings (char *chroot_path, char *filename) +{ + svmdb_client_t *c; + svmdb_map_args_t *ma; + + ma = map_arg_setup (chroot_path); + + c = svmdb_map (ma); + (void) svmdb_local_serialize_strings (c, filename); + svmdb_unmap (c); +} + +static void +unserialize_strings (char *chroot_path, char *filename) +{ + svmdb_client_t *c; + svmdb_map_args_t *ma; + + ma = map_arg_setup (chroot_path); + + c = svmdb_map (ma); + (void) svmdb_local_unserialize_strings (c, filename); + svmdb_unmap (c); +} + +static void +test_vlib_vec_rate (char *chroot_path, f64 vr) +{ + svmdb_client_t *c; + f64 *tv = 0; + svmdb_map_args_t *ma; + + ma = map_arg_setup (chroot_path); + + c = svmdb_map (ma); + + vec_add1 (tv, vr); + + svmdb_local_set_vec_variable (c, "vlib_vector_rate", (char *) tv, + sizeof (*tv)); + svmdb_unmap (c); + + vec_free (tv); +} + + + +static void +test_vec (char *chroot_path, u8 * vbl) +{ + svmdb_client_t *c; + u64 *tv = 0; + int i; + svmdb_map_args_t *ma; + + ma = map_arg_setup (chroot_path); + + c = svmdb_map (ma); + + /* my amp goes to 11 */ + for (i = 0; i < 11; i++) + { + vec_add1 (tv, i); + } + + svmdb_local_set_vec_variable (c, (char *) vbl, (char *) tv, sizeof (tv[0])); + svmdb_unmap (c); + + vec_free (tv); +} + +static void +fake_install (char *chroot_path, u8 * add_value) +{ + svmdb_client_t *c; + u8 *v = 0; + u8 **values = 0; + u8 *oldvalue; + u8 *value; + int nitems = 0, i; + serialize_main_t m; + svmdb_map_args_t *ma; + + ma = map_arg_setup (chroot_path); + + c = svmdb_map (ma); + + oldvalue = svmdb_local_get_vec_variable (c, "installed_sw", 1); + if (oldvalue) + { + unserialize_open_data (&m, oldvalue, vec_len (oldvalue)); + nitems = unserialize_likely_small_unsigned_integer (&m); + for (i = 0; i < nitems; i++) + { + unserialize_cstring (&m, (char **) &value); + vec_add1 (values, value); + } + vec_free (v); + } + nitems++; + value = format (0, "%s%c", add_value, 0); + + vec_add1 (values, value); + + fformat (stdout, "Resulting installed_sw vector:\n"); + + serialize_open_vector (&m, v); + serialize_likely_small_unsigned_integer (&m, vec_len (values)); + for (i = 0; i < vec_len (values); i++) + { + fformat (stdout, "%s\n", values[i]); + serialize_cstring (&m, (char *) values[i]); + } + + v = serialize_close_vector (&m); + + svmdb_local_set_vec_variable (c, "installed_sw", v, sizeof (v[0])); + svmdb_unmap (c); + + for (i = 0; i < vec_len (values); i++) + vec_free (values[i]); + vec_free (values); +} + +static void +sigaction_handler (int signum, siginfo_t * i, void *notused) +{ + u32 action, opaque; + + action = (u32) (uword) i->si_ptr; + action >>= 28; + opaque = (u32) (uword) i->si_ptr; + opaque &= ~(0xF0000000); + + clib_warning ("signal %d, action %d, opaque %x", signum, action, opaque); +} + +static void +test_reg (char *chroot_path, u8 * vbl) +{ + svmdb_client_t *c; + svmdb_notification_args_t args; + svmdb_notification_args_t *a = &args; + struct sigaction sa; + svmdb_map_args_t *ma; + + ma = map_arg_setup (chroot_path); + + memset (&sa, 0, sizeof (sa)); + sa.sa_sigaction = sigaction_handler; + sa.sa_flags = SA_SIGINFO; + if (sigaction (SIGUSR2, &sa, 0) < 0) + { + clib_unix_warning ("sigaction"); + return; + } + + memset (a, 0, sizeof (*a)); + + c = svmdb_map (ma); + + a->add_del = 1 /* add */ ; + a->nspace = SVMDB_NAMESPACE_STRING; + a->var = (char *) vbl; + a->elsize = 1; + a->signum = SIGUSR2; + a->action = SVMDB_ACTION_GET; + a->opaque = 0x0eadbeef; + + svmdb_local_add_del_notification (c, a); + + (void) svmdb_local_get_string_variable (c, (char *) vbl); + + a->add_del = 0; /* del */ + svmdb_local_add_del_notification (c, a); + + + + svmdb_unmap (c); +} + +static void +unset_vec (char *chroot_path, u8 * vbl) +{ + svmdb_client_t *c; + svmdb_map_args_t *ma; + + ma = map_arg_setup (chroot_path); + + c = svmdb_map (ma); + + svmdb_local_unset_vec_variable (c, (char *) vbl); + svmdb_unmap (c); +} + +static void +dump_vecs (char *chroot_path) +{ + svmdb_client_t *c; + svmdb_map_args_t *ma; + + ma = map_arg_setup (chroot_path); + + c = svmdb_map (ma); + + svmdb_local_dump_vecs (c); + svmdb_unmap (c); +} + +static void +crash_test (char *chroot_path) +{ + svmdb_client_t *c; + svmdb_map_args_t *ma; + + ma = map_arg_setup (chroot_path); + + c = svmdb_map (ma); + + clib_warning ("Grab region mutex and crash deliberately!"); + c->db_rp->mutex_owner_pid = getpid (); + c->db_rp->mutex_owner_tag = -13; + pthread_mutex_lock (&c->db_rp->mutex); + + abort (); +} + +static void +map_with_size (char *chroot_path, uword size) +{ + svmdb_client_t *c; + svmdb_map_args_t *ma; + + svmdbtool_main.size = size; + ma = map_arg_setup (chroot_path); + + c = svmdb_map (ma); + + svmdb_unmap (c); +} + +int +main (int argc, char **argv) +{ + unformat_input_t input; + int parsed = 0; + u8 *vbl = 0, *value = 0; + char *chroot_path = 0; + u8 *chroot_path_u8; + u8 *filename; + uword size; + f64 vr; + int uid, gid, rv; + struct passwd _pw, *pw; + struct group _grp, *grp; + char *s, buf[128]; + + svmdbtool_main.uid = geteuid (); + svmdbtool_main.gid = getegid (); + + unformat_init_command_line (&input, argv); + + while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (&input, "get-string %s", &vbl)) + { + get_string (chroot_path, vbl); + vec_free (vbl); + parsed++; + } + else if (unformat (&input, "set-string %s %s", &vbl, &value)) + { + set_string (chroot_path, vbl, value); + vec_free (vbl); + vec_free (value); + parsed++; + } + else if (unformat (&input, "unset-string %s", &vbl)) + { + unset_string (chroot_path, vbl); + vec_free (vbl); + parsed++; + } + else if (unformat (&input, "dump-strings")) + { + dump_strings (chroot_path); + parsed++; + } + else if (unformat (&input, "unset-vec %s", &vbl)) + { + unset_vec (chroot_path, vbl); + vec_free (vbl); + parsed++; + } + else if (unformat (&input, "dump-vecs")) + { + dump_vecs (chroot_path); + parsed++; + } + else if (unformat (&input, "test-vec %s", &vbl)) + { + test_vec (chroot_path, vbl); + // vec_free(vbl); + parsed++; + } + else if (unformat (&input, "vlib-vec-rate %f", &vr)) + { + test_vlib_vec_rate (chroot_path, vr); + parsed++; + } + else if (unformat (&input, "test-reg %s", &vbl)) + { + test_reg (chroot_path, vbl); + parsed++; + } + else if (unformat (&input, "crash-test")) + { + crash_test (chroot_path); + } + else if (unformat (&input, "chroot %s", &chroot_path_u8)) + { + chroot_path = (char *) chroot_path_u8; + } + else if (unformat (&input, "fake-install %s", &value)) + { + fake_install (chroot_path, value); + parsed++; + } + else if (unformat (&input, "size %d", &size)) + { + map_with_size (chroot_path, size); + parsed++; + } + else if (unformat (&input, "uid %d", &uid)) + svmdbtool_main.uid = uid; + else if (unformat (&input, "gid %d", &gid)) + svmdbtool_main.gid = gid; + else if (unformat (&input, "uid %s", &s)) + { + /* lookup the username */ + pw = NULL; + rv = getpwnam_r (s, &_pw, buf, sizeof (buf), &pw); + if (rv < 0) + { + fformat (stderr, "cannot fetch username %s", s); + exit (1); + } + if (pw == NULL) + { + fformat (stderr, "username %s does not exist", s); + exit (1); + } + vec_free (s); + svmdbtool_main.uid = pw->pw_uid; + } + else if (unformat (&input, "gid %s", &s)) + { + /* lookup the group name */ + grp = NULL; + rv = getgrnam_r (s, &_grp, buf, sizeof (buf), &grp); + if (rv != 0) + { + fformat (stderr, "cannot fetch group %s", s); + exit (1); + } + if (grp == NULL) + { + fformat (stderr, "group %s does not exist", s); + exit (1); + } + vec_free (s); + svmdbtool_main.gid = grp->gr_gid; + } + else if (unformat (&input, "serialize-strings %s", &filename)) + { + vec_add1 (filename, 0); + serialize_strings (chroot_path, (char *) filename); + parsed++; + } + else if (unformat (&input, "unserialize-strings %s", &filename)) + { + vec_add1 (filename, 0); + unserialize_strings (chroot_path, (char *) filename); + parsed++; + } + else + { + break; + } + } + + unformat_free (&input); + + if (!parsed) + { + fformat (stdout, "%s: get-string | set-string \n", + argv[0]); + fformat (stdout, " unset-string | dump-strings\n"); + fformat (stdout, " test-vec |\n"); + fformat (stdout, " unset-vec | dump-vecs\n"); + fformat (stdout, " chroot [uid ]\n"); + fformat (stdout, " [gid ]\n"); + } + + exit (0); +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/svm/svmtool.c b/src/svm/svmtool.c new file mode 100644 index 00000000..b3195514 --- /dev/null +++ b/src/svm/svmtool.c @@ -0,0 +1,528 @@ +/* + *------------------------------------------------------------------ + * svmtool.c + * + * Copyright (c) 2009 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "svm.h" + + + +/* + * format_all_svm_regions + * Maps / unmaps regions. Do NOT call from client code! + */ +u8 * +format_all_svm_regions (u8 * s, va_list * args) +{ + int verbose = va_arg (*args, int); + svm_region_t *root_rp = svm_get_root_rp (); + svm_main_region_t *mp; + svm_subregion_t *subp; + svm_region_t *rp; + svm_map_region_args_t *a = 0; + u8 **svm_names = 0; + u8 *name = 0; + int i; + + ASSERT (root_rp); + + pthread_mutex_lock (&root_rp->mutex); + + s = format (s, "%U", format_svm_region, root_rp, verbose); + + mp = root_rp->data_base; + + /* + * Snapshoot names, can't hold root rp mutex across + * find_or_create. + */ + /* *INDENT-OFF* */ + pool_foreach (subp, mp->subregions, ({ + name = vec_dup (subp->subregion_name); + vec_add1(svm_names, name); + })); + /* *INDENT-ON* */ + + pthread_mutex_unlock (&root_rp->mutex); + + for (i = 0; i < vec_len (svm_names); i++) + { + vec_validate (a, 0); + a->name = (char *) svm_names[i]; + rp = svm_region_find_or_create (a); + if (rp) + { + pthread_mutex_lock (&rp->mutex); + s = format (s, "%U", format_svm_region, rp, verbose); + pthread_mutex_unlock (&rp->mutex); + svm_region_unmap (rp); + vec_free (svm_names[i]); + } + vec_free (a); + } + vec_free (svm_names); + return (s); +} + +void +show (char *chroot_path, int verbose) +{ + svm_map_region_args_t *a = 0; + + vec_validate (a, 0); + + svm_region_init_chroot (chroot_path); + + fformat (stdout, "My pid is %d\n", getpid ()); + + fformat (stdout, "%U", format_all_svm_regions, verbose); + + svm_region_exit (); + + vec_free (a); +} + + +static void * +svm_map_region_nolock (svm_map_region_args_t * a) +{ + int svm_fd; + svm_region_t *rp; + int deadman = 0; + u8 *shm_name; + + ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size); + + shm_name = shm_name_from_svm_map_region_args (a); + + svm_fd = shm_open ((char *) shm_name, O_RDWR, 0777); + + if (svm_fd < 0) + { + perror ("svm_region_map(mmap open)"); + return (0); + } + vec_free (shm_name); + + rp = mmap (0, MMAP_PAGESIZE, PROT_READ | PROT_WRITE, MAP_SHARED, svm_fd, 0); + + if (rp == (svm_region_t *) MAP_FAILED) + { + close (svm_fd); + clib_warning ("mmap"); + return (0); + } + /* + * We lost the footrace to create this region; make sure + * the winner has crossed the finish line. + */ + while (rp->version == 0 && deadman++ < 5) + { + sleep (1); + } + + /* + * -ed? + */ + if (rp->version == 0) + { + clib_warning ("rp->version %d not %d", rp->version, SVM_VERSION); + munmap (rp, MMAP_PAGESIZE); + return (0); + } + /* Remap now that the region has been placed */ + a->baseva = rp->virtual_base; + a->size = rp->virtual_size; + munmap (rp, MMAP_PAGESIZE); + + rp = (void *) mmap ((void *) a->baseva, a->size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_FIXED, svm_fd, 0); + if ((uword) rp == (uword) MAP_FAILED) + { + clib_unix_warning ("mmap"); + return (0); + } + + if ((uword) rp != rp->virtual_base) + { + clib_warning ("mmap botch"); + } + + if (pthread_mutex_trylock (&rp->mutex)) + { + clib_warning ("rp->mutex LOCKED by pid %d, tag %d, cleared...", + rp->mutex_owner_pid, rp->mutex_owner_tag); + memset (&rp->mutex, 0, sizeof (rp->mutex)); + + } + else + { + clib_warning ("mutex OK...\n"); + pthread_mutex_unlock (&rp->mutex); + } + + return ((void *) rp); +} + +/* + * rnd_pagesize + * Round to a pagesize multiple, presumably 4k works + */ +static u64 +rnd_pagesize (u64 size) +{ + u64 rv; + + rv = (size + (MMAP_PAGESIZE - 1)) & ~(MMAP_PAGESIZE - 1); + return (rv); +} + +#define MUTEX_DEBUG + +always_inline void +region_lock (svm_region_t * rp, int tag) +{ + pthread_mutex_lock (&rp->mutex); +#ifdef MUTEX_DEBUG + rp->mutex_owner_pid = getpid (); + rp->mutex_owner_tag = tag; +#endif +} + +always_inline void +region_unlock (svm_region_t * rp) +{ +#ifdef MUTEX_DEBUG + rp->mutex_owner_pid = 0; + rp->mutex_owner_tag = 0; +#endif + pthread_mutex_unlock (&rp->mutex); +} + + +static void * +svm_existing_region_map_nolock (void *root_arg, svm_map_region_args_t * a) +{ + svm_region_t *root_rp = root_arg; + svm_main_region_t *mp; + svm_region_t *rp; + void *oldheap; + uword *p; + + a->size += MMAP_PAGESIZE + + (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE); + a->size = rnd_pagesize (a->size); + + region_lock (root_rp, 4); + oldheap = svm_push_pvt_heap (root_rp); + mp = root_rp->data_base; + + ASSERT (mp); + + p = hash_get_mem (mp->name_hash, a->name); + + if (p) + { + rp = svm_map_region_nolock (a); + region_unlock (root_rp); + svm_pop_heap (oldheap); + return rp; + } + return 0; + +} + +static void +trace (char *chroot_path, char *name, int enable_disable) +{ + svm_map_region_args_t *a = 0; + svm_region_t *db_rp; + void *oldheap; + + vec_validate (a, 0); + + svm_region_init_chroot (chroot_path); + + a->name = name; + a->size = 1 << 20; + a->flags = SVM_FLAGS_MHEAP; + + db_rp = svm_region_find_or_create (a); + + ASSERT (db_rp); + + region_lock (db_rp, 20); + + oldheap = svm_push_data_heap (db_rp); + + mheap_trace (db_rp->data_heap, enable_disable); + + svm_pop_heap (oldheap); + region_unlock (db_rp); + + svm_region_unmap ((void *) db_rp); + svm_region_exit (); + vec_free (a); +} + + + +static void +subregion_repair (char *chroot_path) +{ + int i; + svm_main_region_t *mp; + svm_map_region_args_t a; + svm_region_t *root_rp; + svm_region_t *rp; + svm_subregion_t *subp; + u8 *name = 0; + u8 **svm_names = 0; + + svm_region_init_chroot (chroot_path); + root_rp = svm_get_root_rp (); + + pthread_mutex_lock (&root_rp->mutex); + + mp = root_rp->data_base; + + /* + * Snapshoot names, can't hold root rp mutex across + * find_or_create. + */ + /* *INDENT-OFF* */ + pool_foreach (subp, mp->subregions, ({ + name = vec_dup (subp->subregion_name); + vec_add1(svm_names, name); + })); + /* *INDENT-ON* */ + + pthread_mutex_unlock (&root_rp->mutex); + + for (i = 0; i < vec_len (svm_names); i++) + { + memset (&a, 0, sizeof (a)); + a.root_path = chroot_path; + a.name = (char *) svm_names[i]; + fformat (stdout, "Checking %s region...\n", a.name); + rp = svm_existing_region_map_nolock (root_rp, &a); + if (rp) + { + svm_region_unmap (rp); + vec_free (svm_names[i]); + } + } + vec_free (svm_names); +} + +void +repair (char *chroot_path, int crash_root_region) +{ + svm_region_t *root_rp = 0; + svm_map_region_args_t *a = 0; + void *svm_map_region (svm_map_region_args_t * a); + int svm_fd; + u8 *shm_name; + + fformat (stdout, "our pid: %d\n", getpid ()); + + vec_validate (a, 0); + + a->root_path = chroot_path; + a->name = SVM_GLOBAL_REGION_NAME; + a->baseva = SVM_GLOBAL_REGION_BASEVA; + a->size = SVM_GLOBAL_REGION_SIZE; + a->flags = SVM_FLAGS_NODATA; + + shm_name = shm_name_from_svm_map_region_args (a); + + svm_fd = shm_open ((char *) shm_name, O_RDWR, 0777); + + if (svm_fd < 0) + { + perror ("svm_region_map(mmap open)"); + goto out; + } + + vec_free (shm_name); + + root_rp = mmap (0, MMAP_PAGESIZE, + PROT_READ | PROT_WRITE, MAP_SHARED, svm_fd, 0); + + if (root_rp == (svm_region_t *) MAP_FAILED) + { + close (svm_fd); + clib_warning ("mmap"); + goto out; + } + + /* Remap now that the region has been placed */ + clib_warning ("remap to 0x%x", root_rp->virtual_base); + + a->baseva = root_rp->virtual_base; + a->size = root_rp->virtual_size; + munmap (root_rp, MMAP_PAGESIZE); + + root_rp = (void *) mmap ((void *) a->baseva, a->size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_FIXED, svm_fd, 0); + if ((uword) root_rp == (uword) MAP_FAILED) + { + clib_unix_warning ("mmap"); + goto out; + } + + close (svm_fd); + + if ((uword) root_rp != root_rp->virtual_base) + { + clib_warning ("mmap botch"); + goto out; + } + + if (pthread_mutex_trylock (&root_rp->mutex)) + { + clib_warning ("root_rp->mutex LOCKED by pid %d, tag %d, cleared...", + root_rp->mutex_owner_pid, root_rp->mutex_owner_tag); + memset (&root_rp->mutex, 0, sizeof (root_rp->mutex)); + goto out; + } + else + { + clib_warning ("root_rp->mutex OK...\n"); + pthread_mutex_unlock (&root_rp->mutex); + } + +out: + vec_free (a); + /* + * Now that the root region is known to be OK, + * fix broken subregions + */ + subregion_repair (chroot_path); + + if (crash_root_region) + { + clib_warning ("Leaving root region locked on purpose..."); + pthread_mutex_lock (&root_rp->mutex); + root_rp->mutex_owner_pid = getpid (); + root_rp->mutex_owner_tag = 99; + } + svm_region_exit (); +} + +int +main (int argc, char **argv) +{ + unformat_input_t input; + int parsed = 0; + char *name; + char *chroot_path = 0; + u8 *chroot_u8; + + unformat_init_command_line (&input, argv); + + while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (&input, "show-verbose")) + { + show (chroot_path, 1); + parsed++; + } + else if (unformat (&input, "show")) + { + show (chroot_path, 0); + parsed++; + } + else if (unformat (&input, "client-scan")) + { + svm_client_scan (chroot_path); + parsed++; + } + else if (unformat (&input, "repair")) + { + repair (chroot_path, 0 /* fix it */ ); + parsed++; + } + else if (unformat (&input, "crash")) + { + repair (chroot_path, 1 /* crash it */ ); + parsed++; + } + else if (unformat (&input, "trace-on %s", &name)) + { + trace (chroot_path, name, 1); + parsed++; + } + else if (unformat (&input, "trace-off %s", &name)) + { + trace (chroot_path, name, 0); + parsed++; + } + else if (unformat (&input, "chroot %s", &chroot_u8)) + { + chroot_path = (char *) chroot_u8; + } + else + { + break; + } + } + + unformat_free (&input); + + if (!parsed) + { + fformat (stdout, + "%s: show | show-verbose | client-scan | trace-on \n", + argv[0]); + fformat (stdout, " trace-off \n"); + } + exit (0); +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ -- cgit 1.2.3-korg From 68b0fb0c620c7451ef1a6380c43c39de6614db51 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Tue, 28 Feb 2017 15:15:56 -0500 Subject: VPP-598: tcp stack initial commit Change-Id: I49e5ce0aae6e4ff634024387ceaf7dbc432a0351 Signed-off-by: Dave Barach Signed-off-by: Florin Coras --- src/Makefile.am | 1 + src/plugins/ioam/export-common/ioam_export.h | 2 +- src/plugins/ioam/ipfixcollector/ipfixcollector.c | 2 +- src/plugins/ioam/lib-vxlan-gpe/ioam_transit.c | 2 +- src/plugins/snat/in2out.c | 26 +- src/plugins/snat/out2in.c | 24 +- src/scripts/vnet/tcp | 18 +- src/scripts/vnet/udp | 19 + src/scripts/vnet/uri/tcp-setup.sh | 39 + src/scripts/vnet/uri/tcp_server | 4 + src/scripts/vnet/uri/udp | 19 + src/svm.am | 10 +- src/svm/ssvm.c | 16 + src/svm/ssvm.h | 18 +- src/svm/svm_fifo.c | 568 ++++++ src/svm/svm_fifo.h | 157 ++ src/svm/svm_fifo_segment.c | 193 ++ src/svm/svm_fifo_segment.h | 89 + src/svm/test_svm_fifo1.c | 361 ++++ src/uri.am | 22 + src/uri/uri_tcp_test.c | 916 +++++++++ src/uri/uri_udp_test.c | 553 ++++++ src/uri/uri_udp_test2.c | 954 +++++++++ src/uri/uritest.c | 484 +++++ src/vlib/buffer.c | 2 +- src/vlib/buffer.h | 68 + src/vlibmemory/unix_shared_memory_queue.c | 12 +- src/vlibmemory/unix_shared_memory_queue.h | 2 +- src/vnet.am | 66 +- src/vnet/api_errno.h | 21 +- src/vnet/bfd/bfd_udp.c | 4 +- src/vnet/buffer.h | 10 + src/vnet/classify/vnet_classify.c | 4 +- src/vnet/dhcp/dhcp_proxy.h | 2 +- src/vnet/flow/flow_report.h | 2 +- src/vnet/ip/ip.h | 4 +- src/vnet/ip/ip4.h | 42 +- src/vnet/ip/ip4_forward.c | 173 +- src/vnet/ip/ip4_packet.h | 26 +- src/vnet/ip/ip6.h | 44 +- src/vnet/ip/ip6_packet.h | 26 +- src/vnet/ip/punt.c | 2 +- src/vnet/ip/tcp_packet.h | 141 -- src/vnet/ip/udp.h | 315 --- src/vnet/ip/udp_error.def | 21 - src/vnet/ip/udp_format.c | 91 - src/vnet/ip/udp_init.c | 71 - src/vnet/ip/udp_local.c | 645 ------ src/vnet/ip/udp_packet.h | 65 - src/vnet/ip/udp_pg.c | 237 --- src/vnet/ipsec/ikev2.c | 2 +- src/vnet/ipsec/ikev2_cli.c | 2 +- src/vnet/ipsec/ikev2_crypto.c | 2 +- src/vnet/lisp-cp/packets.c | 65 +- src/vnet/lisp-cp/packets.h | 45 - src/vnet/lisp-gpe/interface.c | 2 +- src/vnet/lisp-gpe/lisp_gpe.h | 4 +- src/vnet/lisp-gpe/lisp_gpe_adjacency.c | 2 + src/vnet/session/application.c | 343 ++++ src/vnet/session/application.h | 120 ++ src/vnet/session/application_interface.c | 459 +++++ src/vnet/session/application_interface.h | 136 ++ src/vnet/session/hashes.c | 28 + src/vnet/session/node.c | 435 ++++ src/vnet/session/session.api | 429 ++++ src/vnet/session/session.c | 1286 ++++++++++++ src/vnet/session/session.h | 380 ++++ src/vnet/session/session_api.c | 821 ++++++++ src/vnet/session/session_cli.c | 189 ++ src/vnet/session/transport.c | 64 + src/vnet/session/transport.h | 250 +++ src/vnet/tcp/tcp.c | 708 +++++++ src/vnet/tcp/tcp.h | 624 ++++++ src/vnet/tcp/tcp_error.def | 35 + src/vnet/tcp/tcp_format.c | 136 ++ src/vnet/tcp/tcp_input.c | 2316 ++++++++++++++++++++++ src/vnet/tcp/tcp_newreno.c | 93 + src/vnet/tcp/tcp_output.c | 1412 +++++++++++++ src/vnet/tcp/tcp_packet.h | 184 ++ src/vnet/tcp/tcp_pg.c | 236 +++ src/vnet/tcp/tcp_syn_filter4.c | 542 +++++ src/vnet/tcp/tcp_timer.h | 29 + src/vnet/udp/builtin_server.c | 239 +++ src/vnet/udp/udp.c | 342 ++++ src/vnet/udp/udp.h | 362 ++++ src/vnet/udp/udp_error.def | 21 + src/vnet/udp/udp_format.c | 91 + src/vnet/udp/udp_input.c | 314 +++ src/vnet/udp/udp_local.c | 666 +++++++ src/vnet/udp/udp_packet.h | 65 + src/vnet/udp/udp_pg.c | 237 +++ src/vnet/vnet_all_api_h.h | 1 + src/vnet/vxlan-gpe/vxlan_gpe.h | 2 +- src/vnet/vxlan/vxlan.h | 2 +- src/vpp/api/vpe.api | 1 + src/vppinfra.am | 5 + src/vppinfra/bihash_16_8.h | 103 + src/vppinfra/bihash_48_8.h | 116 ++ src/vppinfra/tw_timer_16t_1w_2048sl.c | 26 + src/vppinfra/tw_timer_16t_1w_2048sl.h | 46 + 100 files changed, 18737 insertions(+), 1874 deletions(-) create mode 100644 src/scripts/vnet/udp create mode 100755 src/scripts/vnet/uri/tcp-setup.sh create mode 100644 src/scripts/vnet/uri/tcp_server create mode 100644 src/scripts/vnet/uri/udp create mode 100644 src/svm/svm_fifo.c create mode 100644 src/svm/svm_fifo.h create mode 100644 src/svm/svm_fifo_segment.c create mode 100644 src/svm/svm_fifo_segment.h create mode 100644 src/svm/test_svm_fifo1.c create mode 100644 src/uri.am create mode 100644 src/uri/uri_tcp_test.c create mode 100644 src/uri/uri_udp_test.c create mode 100644 src/uri/uri_udp_test2.c create mode 100644 src/uri/uritest.c delete mode 100644 src/vnet/ip/tcp_packet.h delete mode 100644 src/vnet/ip/udp.h delete mode 100644 src/vnet/ip/udp_error.def delete mode 100644 src/vnet/ip/udp_format.c delete mode 100644 src/vnet/ip/udp_init.c delete mode 100644 src/vnet/ip/udp_local.c delete mode 100644 src/vnet/ip/udp_packet.h delete mode 100644 src/vnet/ip/udp_pg.c create mode 100644 src/vnet/session/application.c create mode 100644 src/vnet/session/application.h create mode 100644 src/vnet/session/application_interface.c create mode 100644 src/vnet/session/application_interface.h create mode 100644 src/vnet/session/hashes.c create mode 100644 src/vnet/session/node.c create mode 100644 src/vnet/session/session.api create mode 100644 src/vnet/session/session.c create mode 100644 src/vnet/session/session.h create mode 100644 src/vnet/session/session_api.c create mode 100644 src/vnet/session/session_cli.c create mode 100644 src/vnet/session/transport.c create mode 100644 src/vnet/session/transport.h create mode 100644 src/vnet/tcp/tcp.c create mode 100644 src/vnet/tcp/tcp.h create mode 100644 src/vnet/tcp/tcp_error.def create mode 100644 src/vnet/tcp/tcp_format.c create mode 100644 src/vnet/tcp/tcp_input.c create mode 100644 src/vnet/tcp/tcp_newreno.c create mode 100644 src/vnet/tcp/tcp_output.c create mode 100644 src/vnet/tcp/tcp_packet.h create mode 100644 src/vnet/tcp/tcp_pg.c create mode 100644 src/vnet/tcp/tcp_syn_filter4.c create mode 100644 src/vnet/tcp/tcp_timer.h create mode 100644 src/vnet/udp/builtin_server.c create mode 100644 src/vnet/udp/udp.c create mode 100644 src/vnet/udp/udp.h create mode 100644 src/vnet/udp/udp_error.def create mode 100644 src/vnet/udp/udp_format.c create mode 100644 src/vnet/udp/udp_input.c create mode 100644 src/vnet/udp/udp_local.c create mode 100644 src/vnet/udp/udp_packet.h create mode 100644 src/vnet/udp/udp_pg.c create mode 100644 src/vppinfra/bihash_16_8.h create mode 100644 src/vppinfra/bihash_48_8.h create mode 100644 src/vppinfra/tw_timer_16t_1w_2048sl.c create mode 100644 src/vppinfra/tw_timer_16t_1w_2048sl.h (limited to 'src/svm') diff --git a/src/Makefile.am b/src/Makefile.am index 08feb29a..641707ed 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -88,6 +88,7 @@ include vlib-api.am include vnet.am include vpp.am include vpp-api-test.am +include uri.am SUBDIRS += plugins diff --git a/src/plugins/ioam/export-common/ioam_export.h b/src/plugins/ioam/export-common/ioam_export.h index e84dab0b..dd48a93b 100644 --- a/src/plugins/ioam/export-common/ioam_export.h +++ b/src/plugins/ioam/export-common/ioam_export.h @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include diff --git a/src/plugins/ioam/ipfixcollector/ipfixcollector.c b/src/plugins/ioam/ipfixcollector/ipfixcollector.c index 4ae47edc..71b934ec 100644 --- a/src/plugins/ioam/ipfixcollector/ipfixcollector.c +++ b/src/plugins/ioam/ipfixcollector/ipfixcollector.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include ipfix_collector_main_t ipfix_collector_main; diff --git a/src/plugins/ioam/lib-vxlan-gpe/ioam_transit.c b/src/plugins/ioam/lib-vxlan-gpe/ioam_transit.c index b42c357c..f334c983 100644 --- a/src/plugins/ioam/lib-vxlan-gpe/ioam_transit.c +++ b/src/plugins/ioam/lib-vxlan-gpe/ioam_transit.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/plugins/snat/in2out.c b/src/plugins/snat/in2out.c index e30c913c..b4b7793d 100644 --- a/src/plugins/snat/in2out.c +++ b/src/plugins/snat/in2out.c @@ -689,12 +689,12 @@ snat_hairpinning (snat_main_t *sm, ip4_header_t, dst_address); ip0->checksum = ip_csum_fold (sum0); - old_dst_port0 = tcp0->ports.dst; + old_dst_port0 = tcp0->dst; if (PREDICT_TRUE(new_dst_port0 != old_dst_port0)) { if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP)) { - tcp0->ports.dst = new_dst_port0; + tcp0->dst = new_dst_port0; sum0 = tcp0->checksum; sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0, ip4_header_t, dst_address); @@ -872,9 +872,9 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP)) { - old_port0 = tcp0->ports.src; - tcp0->ports.src = s0->out2in.port; - new_port0 = tcp0->ports.src; + old_port0 = tcp0->src_port; + tcp0->src_port = s0->out2in.port; + new_port0 = tcp0->src_port; sum0 = tcp0->checksum; sum0 = ip_csum_update (sum0, old_addr0, new_addr0, @@ -1012,9 +1012,9 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, if (PREDICT_TRUE(proto1 == SNAT_PROTOCOL_TCP)) { - old_port1 = tcp1->ports.src; - tcp1->ports.src = s1->out2in.port; - new_port1 = tcp1->ports.src; + old_port1 = tcp1->src_port; + tcp1->src_port = s1->out2in.port; + new_port1 = tcp1->src_port; sum1 = tcp1->checksum; sum1 = ip_csum_update (sum1, old_addr1, new_addr1, @@ -1188,9 +1188,9 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP)) { - old_port0 = tcp0->ports.src; - tcp0->ports.src = s0->out2in.port; - new_port0 = tcp0->ports.src; + old_port0 = tcp0->src_port; + tcp0->src_port = s0->out2in.port; + new_port0 = tcp0->src_port; sum0 = tcp0->checksum; sum0 = ip_csum_update (sum0, old_addr0, new_addr0, @@ -1667,8 +1667,8 @@ snat_in2out_fast_static_map_fn (vlib_main_t * vm, { if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP)) { - old_port0 = tcp0->ports.src; - tcp0->ports.src = new_port0; + old_port0 = tcp0->src_port; + tcp0->src_port = new_port0; sum0 = tcp0->checksum; sum0 = ip_csum_update (sum0, old_addr0, new_addr0, diff --git a/src/plugins/snat/out2in.c b/src/plugins/snat/out2in.c index 328f5ba4..3bfc0aa3 100644 --- a/src/plugins/snat/out2in.c +++ b/src/plugins/snat/out2in.c @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include #include @@ -602,9 +602,9 @@ snat_out2in_node_fn (vlib_main_t * vm, if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP)) { - old_port0 = tcp0->ports.dst; - tcp0->ports.dst = s0->in2out.port; - new_port0 = tcp0->ports.dst; + old_port0 = tcp0->dst_port; + tcp0->dst_port = s0->in2out.port; + new_port0 = tcp0->dst_port; sum0 = tcp0->checksum; sum0 = ip_csum_update (sum0, old_addr0, new_addr0, @@ -737,9 +737,9 @@ snat_out2in_node_fn (vlib_main_t * vm, if (PREDICT_TRUE(proto1 == SNAT_PROTOCOL_TCP)) { - old_port1 = tcp1->ports.dst; - tcp1->ports.dst = s1->in2out.port; - new_port1 = tcp1->ports.dst; + old_port1 = tcp1->dst_port; + tcp1->dst_port = s1->in2out.port; + new_port1 = tcp1->dst_port; sum1 = tcp1->checksum; sum1 = ip_csum_update (sum1, old_addr1, new_addr1, @@ -907,9 +907,9 @@ snat_out2in_node_fn (vlib_main_t * vm, if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP)) { - old_port0 = tcp0->ports.dst; - tcp0->ports.dst = s0->in2out.port; - new_port0 = tcp0->ports.dst; + old_port0 = tcp0->dst_port; + tcp0->dst_port = s0->in2out.port; + new_port0 = tcp0->dst_port; sum0 = tcp0->checksum; sum0 = ip_csum_update (sum0, old_addr0, new_addr0, @@ -1369,8 +1369,8 @@ snat_out2in_fast_node_fn (vlib_main_t * vm, { if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP)) { - old_port0 = tcp0->ports.dst; - tcp0->ports.dst = new_port0; + old_port0 = tcp0->dst_port; + tcp0->dst_port = new_port0; sum0 = tcp0->checksum; sum0 = ip_csum_update (sum0, old_addr0, new_addr0, diff --git a/src/scripts/vnet/tcp b/src/scripts/vnet/tcp index a2ee8b2d..b9c23c3a 100644 --- a/src/scripts/vnet/tcp +++ b/src/scripts/vnet/tcp @@ -1,16 +1,18 @@ +loop create +set int ip address loop0 192.168.1.1/8 +set int state loop0 up + packet-generator new { name x - limit 1 + limit 2048 node ip4-input - size 64-64 + size 100-100 + interface loop0 no-recycle data { - TCP: 1.2.3.4 -> 5.6.7.8 - TCP: 1234 -> 5678 + TCP: 192.168.1.2 -> 192.168.1.1 + TCP: 32415 -> 80 + SYN incrementing 100 } } - -tr add pg-input 100 -ip route 5.6.7.8/32 via local -ip route 1.2.3.4/32 via local diff --git a/src/scripts/vnet/udp b/src/scripts/vnet/udp new file mode 100644 index 00000000..7dda1eec --- /dev/null +++ b/src/scripts/vnet/udp @@ -0,0 +1,19 @@ +loop create +set int ip address loop0 192.168.1.1/8 +set int state loop0 up + +packet-generator new { + name udp + limit 512 + rate 1e4 + node ip4-input + size 100-100 + interface loop0 + no-recycle + data { + UDP: 192.168.1.2 - 192.168.2.255 -> 192.168.1.1 + UDP: 4321 -> 1234 + length 72 + incrementing 100 + } +} diff --git a/src/scripts/vnet/uri/tcp-setup.sh b/src/scripts/vnet/uri/tcp-setup.sh new file mode 100755 index 00000000..e0b01588 --- /dev/null +++ b/src/scripts/vnet/uri/tcp-setup.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +function topo_setup +{ + ip netns add vppns1 + ip link add veth_vpp1 type veth peer name vpp1 + ip link set dev vpp1 up + ip link set dev veth_vpp1 up netns vppns1 + + ip netns exec vppns1 \ + bash -c " + ip link set dev lo up + ip addr add 6.0.1.2/24 dev veth_vpp1 + " + + ethtool --offload vpp1 rx off tx off + ip netns exec vppns1 ethtool --offload veth_vpp1 rx off tx off + +} + +function topo_clean +{ + ip link del dev veth_vpp1 &> /dev/null + ip netns del vppns1 &> /dev/null +} + +if [ "$1" == "clean" ] ; then + topo_clean + exit 0 +else + topo_setup +fi + +# to test connectivity do: +# sudo ip netns exec vppns1 telnet 6.0.1.1 1234 +# to push traffic to the server +# dd if=/dev/zero bs=1024K count=512 | nc 6.0.1.1 +# to listen for incoming connection from vpp +# nc -l 1234 diff --git a/src/scripts/vnet/uri/tcp_server b/src/scripts/vnet/uri/tcp_server new file mode 100644 index 00000000..7f5a86de --- /dev/null +++ b/src/scripts/vnet/uri/tcp_server @@ -0,0 +1,4 @@ +create host-interface name vpp1 +set int state host-vpp1 up +set int ip address host-vpp1 6.0.1.1/24 +trace add af-packet-input 10 diff --git a/src/scripts/vnet/uri/udp b/src/scripts/vnet/uri/udp new file mode 100644 index 00000000..ca13b83c --- /dev/null +++ b/src/scripts/vnet/uri/udp @@ -0,0 +1,19 @@ +loop create +set int ip address loop0 10.0.0.1/32 +set int state loop0 up + +packet-generator new { + name udp + limit 512 + rate 1e4 + node ip4-input + size 100-100 + interface loop0 + no-recycle + data { + UDP: 192.168.1.2 - 192.168.2.255 -> 192.168.1.1 + UDP: 4321 -> 1234 + length 72 + incrementing 100 + } +} diff --git a/src/svm.am b/src/svm.am index 2cd385bd..442eba8e 100644 --- a/src/svm.am +++ b/src/svm.am @@ -13,13 +13,14 @@ bin_PROGRAMS += svmtool svmdbtool -nobase_include_HEADERS += svm/svm.h svm/ssvm.h svm/svmdb.h +nobase_include_HEADERS += svm/svm.h svm/ssvm.h svm/svmdb.h \ + svm/svm_fifo.h svm/svm_fifo_segment.h lib_LTLIBRARIES += libsvm.la libsvmdb.la +libsvm_la_SOURCES = svm/svm.c svm/ssvm.c svm/svm_fifo.c svm/svm_fifo_segment.c libsvm_la_LIBADD = libvppinfra.la -lrt -lpthread libsvm_la_DEPENDENCIES = libvppinfra.la -libsvm_la_SOURCES = svm/svm.c svm/ssvm.c svmtool_SOURCES = svm/svmtool.c svmtool_LDADD = libsvm.la libvppinfra.la -lpthread -lrt @@ -31,4 +32,9 @@ libsvmdb_la_SOURCES = svm/svmdb.c svmdbtool_SOURCES = svm/svmdbtool.c svmdbtool_LDADD = libsvmdb.la libsvm.la libvppinfra.la -lpthread -lrt +noinst_PROGRAMS += test_svm_fifo1 +test_svm_fifo1_SOURCES = svm/test_svm_fifo1.c +test_svm_fifo1_LDADD = libsvm.la libvppinfra.la -lpthread -lrt +test_svm_fifo1_LDFLAGS = -static + # vi:syntax=automake diff --git a/src/svm/ssvm.c b/src/svm/ssvm.c index 6f409eb6..6cda1f27 100644 --- a/src/svm/ssvm.c +++ b/src/svm/ssvm.c @@ -169,6 +169,22 @@ re_map_it: return 0; } +void +ssvm_delete (ssvm_private_t * ssvm) +{ + u8 *fn; + + fn = format (0, "/dev/shm/%s%c", ssvm->name, 0); + + /* Throw away the backing file */ + if (unlink ((char *) fn) < 0) + clib_unix_warning ("unlink segment '%s'", ssvm->name); + + munmap ((void *) ssvm->requested_va, ssvm->ssvm_size); + vec_free (fn); +} + + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/svm/ssvm.h b/src/svm/ssvm.h index 9e61b9a0..bccfc164 100644 --- a/src/svm/ssvm.h +++ b/src/svm/ssvm.h @@ -38,7 +38,10 @@ #include #include -#define MMAP_PAGESIZE (4<<10) +#ifndef MMAP_PAGESIZE +#define MMAP_PAGESIZE (clib_mem_get_page_size()) +#endif + #define SSVM_N_OPAQUE 7 typedef struct @@ -125,12 +128,12 @@ ssvm_pop_heap (void *oldheap) } #define foreach_ssvm_api_error \ -_(NO_NAME, "No shared segment name", -10) \ -_(NO_SIZE, "Size not set (master)", -11) \ -_(CREATE_FAILURE, "Create failed", -12) \ -_(SET_SIZE, "Set size failed", -13) \ -_(MMAP, "mmap failed", -14) \ -_(SLAVE_TIMEOUT, "Slave map timeout", -15) +_(NO_NAME, "No shared segment name", -100) \ +_(NO_SIZE, "Size not set (master)", -101) \ +_(CREATE_FAILURE, "Create failed", -102) \ +_(SET_SIZE, "Set size failed", -103) \ +_(MMAP, "mmap failed", -104) \ +_(SLAVE_TIMEOUT, "Slave map timeout", -105) typedef enum { @@ -143,6 +146,7 @@ typedef enum int ssvm_master_init (ssvm_private_t * ssvm, u32 master_index); int ssvm_slave_init (ssvm_private_t * ssvm, int timeout_in_seconds); +void ssvm_delete (ssvm_private_t * ssvm); #endif /* __included_ssvm_h__ */ diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c new file mode 100644 index 00000000..11f90193 --- /dev/null +++ b/src/svm/svm_fifo.c @@ -0,0 +1,568 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "svm_fifo.h" + +/** create an svm fifo, in the current heap. Fails vs blow up the process */ +svm_fifo_t * +svm_fifo_create (u32 data_size_in_bytes) +{ + svm_fifo_t *f; + pthread_mutexattr_t attr; + pthread_condattr_t cattr; + + f = clib_mem_alloc_aligned_or_null (sizeof (*f) + data_size_in_bytes, + CLIB_CACHE_LINE_BYTES); + if (f == 0) + return 0; + + memset (f, 0, sizeof (*f) + data_size_in_bytes); + f->nitems = data_size_in_bytes; + f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX; + + memset (&attr, 0, sizeof (attr)); + memset (&cattr, 0, sizeof (cattr)); + + if (pthread_mutexattr_init (&attr)) + clib_unix_warning ("mutexattr_init"); + if (pthread_mutexattr_setpshared (&attr, PTHREAD_PROCESS_SHARED)) + clib_unix_warning ("pthread_mutexattr_setpshared"); + if (pthread_mutex_init (&f->mutex, &attr)) + clib_unix_warning ("mutex_init"); + if (pthread_mutexattr_destroy (&attr)) + clib_unix_warning ("mutexattr_destroy"); + if (pthread_condattr_init (&cattr)) + clib_unix_warning ("condattr_init"); + if (pthread_condattr_setpshared (&cattr, PTHREAD_PROCESS_SHARED)) + clib_unix_warning ("condattr_setpshared"); + if (pthread_cond_init (&f->condvar, &cattr)) + clib_unix_warning ("cond_init1"); + if (pthread_condattr_destroy (&cattr)) + clib_unix_warning ("cond_init2"); + + return (f); +} + +always_inline ooo_segment_t * +ooo_segment_new (svm_fifo_t * f, u32 start, u32 length) +{ + ooo_segment_t *s; + + pool_get (f->ooo_segments, s); + + s->fifo_position = start; + s->length = length; + + s->prev = s->next = OOO_SEGMENT_INVALID_INDEX; + + return s; +} + +always_inline void +ooo_segment_del (svm_fifo_t * f, u32 index) +{ + ooo_segment_t *cur, *prev = 0, *next = 0; + cur = pool_elt_at_index (f->ooo_segments, index); + + if (cur->next != OOO_SEGMENT_INVALID_INDEX) + { + next = pool_elt_at_index (f->ooo_segments, cur->next); + next->prev = cur->prev; + } + + if (cur->prev != OOO_SEGMENT_INVALID_INDEX) + { + prev = pool_elt_at_index (f->ooo_segments, cur->prev); + prev->next = cur->next; + } + else + { + f->ooos_list_head = cur->next; + } + + pool_put (f->ooo_segments, cur); +} + +/** + * Add segment to fifo's out-of-order segment list. Takes care of merging + * adjacent segments and removing overlapping ones. + */ +static void +ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) +{ + ooo_segment_t *s, *new_s, *prev, *next, *it; + u32 new_index, position, end_offset, s_sof, s_eof, s_index; + + position = (f->tail + offset) % f->nitems; + end_offset = offset + length; + + if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX) + { + s = ooo_segment_new (f, position, length); + f->ooos_list_head = s - f->ooo_segments; + f->ooos_newest = f->ooos_list_head; + return; + } + + /* Find first segment that starts after new segment */ + s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head); + while (s->next != OOO_SEGMENT_INVALID_INDEX + && ooo_segment_offset (f, s) <= offset) + s = pool_elt_at_index (f->ooo_segments, s->next); + + s_index = s - f->ooo_segments; + s_sof = ooo_segment_offset (f, s); + s_eof = ooo_segment_end_offset (f, s); + + /* No overlap, add before current segment */ + if (end_offset < s_sof) + { + new_s = ooo_segment_new (f, position, length); + new_index = new_s - f->ooo_segments; + + /* Pool might've moved, get segment again */ + s = pool_elt_at_index (f->ooo_segments, s_index); + + if (s->prev != OOO_SEGMENT_INVALID_INDEX) + { + new_s->prev = s->prev; + + prev = pool_elt_at_index (f->ooo_segments, new_s->prev); + prev->next = new_index; + } + else + { + /* New head */ + f->ooos_list_head = new_index; + } + + new_s->next = s - f->ooo_segments; + s->prev = new_index; + f->ooos_newest = new_index; + return; + } + /* No overlap, add after current segment */ + else if (s_eof < offset) + { + new_s = ooo_segment_new (f, position, length); + new_index = new_s - f->ooo_segments; + + /* Pool might've moved, get segment again */ + s = pool_elt_at_index (f->ooo_segments, s_index); + + if (s->next != OOO_SEGMENT_INVALID_INDEX) + { + new_s->next = s->next; + + next = pool_elt_at_index (f->ooo_segments, new_s->next); + next->prev = new_index; + } + + new_s->prev = s - f->ooo_segments; + s->next = new_index; + f->ooos_newest = new_index; + + return; + } + + /* + * Merge needed + */ + + /* Merge at head */ + if (offset <= s_sof) + { + /* If we have a previous, check if we overlap */ + if (s->prev != OOO_SEGMENT_INVALID_INDEX) + { + prev = pool_elt_at_index (f->ooo_segments, s->prev); + + /* New segment merges prev and current. Remove previous and + * update position of current. */ + if (ooo_segment_end_offset (f, prev) >= offset) + { + s->fifo_position = prev->fifo_position; + s->length = s_eof - ooo_segment_offset (f, prev); + ooo_segment_del (f, s->prev); + } + } + else + { + s->fifo_position = position; + s->length = s_eof - ooo_segment_offset (f, s); + } + + /* The new segment's tail may cover multiple smaller ones */ + if (s_eof < end_offset) + { + /* Remove segments completely covered */ + it = (s->next != OOO_SEGMENT_INVALID_INDEX) ? + pool_elt_at_index (f->ooo_segments, s->next) : 0; + while (it && ooo_segment_end_offset (f, it) < end_offset) + { + next = (it->next != OOO_SEGMENT_INVALID_INDEX) ? + pool_elt_at_index (f->ooo_segments, it->next) : 0; + ooo_segment_del (f, it - f->ooo_segments); + it = next; + } + + /* Update length. Segment's start might have changed. */ + s->length = end_offset - ooo_segment_offset (f, s); + + /* If partial overlap with last, merge */ + if (it && ooo_segment_offset (f, it) < end_offset) + { + s->length += + it->length - (ooo_segment_offset (f, it) - end_offset); + ooo_segment_del (f, it - f->ooo_segments); + } + } + } + /* Last but overlapping previous */ + else if (s_eof <= end_offset) + { + s->length = end_offset - ooo_segment_offset (f, s); + } + /* New segment completely covered by current one */ + else + { + /* Do Nothing */ + } + + /* Most recently updated segment */ + f->ooos_newest = s - f->ooo_segments; +} + +/** + * Removes segments that can now be enqueued because the fifo's tail has + * advanced. Returns the number of bytes added to tail. + */ +static int +ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) +{ + ooo_segment_t *s; + u32 index, bytes = 0, diff; + + s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head); + + /* If last tail update overlaps one/multiple ooo segments, remove them */ + diff = (f->nitems + f->tail - s->fifo_position) % f->nitems; + while (0 < diff && diff < n_bytes_enqueued) + { + /* Segment end is beyond the tail. Advance tail and be done */ + if (diff < s->length) + { + f->tail += s->length - diff; + f->tail %= f->nitems; + break; + } + /* If we have next go on */ + else if (s->next != OOO_SEGMENT_INVALID_INDEX) + { + index = s - f->ooo_segments; + s = pool_elt_at_index (f->ooo_segments, s->next); + diff = (f->nitems + f->tail - s->fifo_position) % f->nitems; + ooo_segment_del (f, index); + } + /* End of search */ + else + { + break; + } + } + + /* If tail is adjacent to an ooo segment, 'consume' it */ + if (diff == 0) + { + bytes = ((f->nitems - f->cursize) >= s->length) ? s->length : + f->nitems - f->cursize; + + f->tail += bytes; + f->tail %= f->nitems; + + ooo_segment_del (f, s - f->ooo_segments); + } + + return bytes; +} + +static int +svm_fifo_enqueue_internal (svm_fifo_t * f, + int pid, u32 max_bytes, u8 * copy_from_here) +{ + u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; + u32 cursize, nitems; + + if (PREDICT_FALSE (f->cursize == f->nitems)) + return -2; /* fifo stuffed */ + + /* read cursize, which can only decrease while we're working */ + cursize = f->cursize; + nitems = f->nitems; + + /* Number of bytes we're going to copy */ + total_copy_bytes = (nitems - cursize) < max_bytes ? + (nitems - cursize) : max_bytes; + + if (PREDICT_TRUE (copy_from_here != 0)) + { + /* Number of bytes in first copy segment */ + first_copy_bytes = ((nitems - f->tail) < total_copy_bytes) + ? (nitems - f->tail) : total_copy_bytes; + + clib_memcpy (&f->data[f->tail], copy_from_here, first_copy_bytes); + f->tail += first_copy_bytes; + f->tail = (f->tail == nitems) ? 0 : f->tail; + + /* Number of bytes in second copy segment, if any */ + second_copy_bytes = total_copy_bytes - first_copy_bytes; + if (second_copy_bytes) + { + clib_memcpy (&f->data[f->tail], copy_from_here + first_copy_bytes, + second_copy_bytes); + f->tail += second_copy_bytes; + f->tail = (f->tail == nitems) ? 0 : f->tail; + } + } + else + { + /* Account for a zero-copy enqueue done elsewhere */ + ASSERT (max_bytes <= (nitems - cursize)); + f->tail += max_bytes; + f->tail = f->tail % nitems; + total_copy_bytes = max_bytes; + } + + /* Any out-of-order segments to collect? */ + if (PREDICT_FALSE (f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX)) + total_copy_bytes += ooo_segment_try_collect (f, total_copy_bytes); + + /* Atomically increase the queue length */ + __sync_fetch_and_add (&f->cursize, total_copy_bytes); + + return (total_copy_bytes); +} + +int +svm_fifo_enqueue_nowait (svm_fifo_t * f, + int pid, u32 max_bytes, u8 * copy_from_here) +{ + return svm_fifo_enqueue_internal (f, pid, max_bytes, copy_from_here); +} + +/** Enqueue a future segment. + * Two choices: either copies the entire segment, or copies nothing + * Returns 0 of the entire segment was copied + * Returns -1 if none of the segment was copied due to lack of space + */ + +static int +svm_fifo_enqueue_with_offset_internal2 (svm_fifo_t * f, + int pid, + u32 offset, + u32 required_bytes, + u8 * copy_from_here) +{ + u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; + u32 cursize, nitems; + u32 tail_plus_offset; + + ASSERT (offset > 0); + + /* read cursize, which can only decrease while we're working */ + cursize = f->cursize; + nitems = f->nitems; + + /* Will this request fit? */ + if ((required_bytes + offset) > (nitems - cursize)) + return -1; + + ooo_segment_add (f, offset, required_bytes); + + /* Number of bytes we're going to copy */ + total_copy_bytes = required_bytes; + tail_plus_offset = (f->tail + offset) % nitems; + + /* Number of bytes in first copy segment */ + first_copy_bytes = ((nitems - tail_plus_offset) < total_copy_bytes) + ? (nitems - tail_plus_offset) : total_copy_bytes; + + clib_memcpy (&f->data[tail_plus_offset], copy_from_here, first_copy_bytes); + + /* Number of bytes in second copy segment, if any */ + second_copy_bytes = total_copy_bytes - first_copy_bytes; + if (second_copy_bytes) + { + tail_plus_offset += first_copy_bytes; + tail_plus_offset %= nitems; + + ASSERT (tail_plus_offset == 0); + + clib_memcpy (&f->data[tail_plus_offset], + copy_from_here + first_copy_bytes, second_copy_bytes); + } + + return (0); +} + + +int +svm_fifo_enqueue_with_offset (svm_fifo_t * f, + int pid, + u32 offset, + u32 required_bytes, u8 * copy_from_here) +{ + return svm_fifo_enqueue_with_offset_internal2 + (f, pid, offset, required_bytes, copy_from_here); +} + + +static int +svm_fifo_dequeue_internal2 (svm_fifo_t * f, + int pid, u32 max_bytes, u8 * copy_here) +{ + u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; + u32 cursize, nitems; + + if (PREDICT_FALSE (f->cursize == 0)) + return -2; /* nothing in the fifo */ + + /* read cursize, which can only increase while we're working */ + cursize = f->cursize; + nitems = f->nitems; + + /* Number of bytes we're going to copy */ + total_copy_bytes = (cursize < max_bytes) ? cursize : max_bytes; + + if (PREDICT_TRUE (copy_here != 0)) + { + /* Number of bytes in first copy segment */ + first_copy_bytes = ((nitems - f->head) < total_copy_bytes) + ? (nitems - f->head) : total_copy_bytes; + clib_memcpy (copy_here, &f->data[f->head], first_copy_bytes); + f->head += first_copy_bytes; + f->head = (f->head == nitems) ? 0 : f->head; + + /* Number of bytes in second copy segment, if any */ + second_copy_bytes = total_copy_bytes - first_copy_bytes; + if (second_copy_bytes) + { + clib_memcpy (copy_here + first_copy_bytes, + &f->data[f->head], second_copy_bytes); + f->head += second_copy_bytes; + f->head = (f->head == nitems) ? 0 : f->head; + } + } + else + { + /* Account for a zero-copy dequeue done elsewhere */ + ASSERT (max_bytes <= cursize); + f->head += max_bytes; + f->head = f->head % nitems; + cursize -= max_bytes; + total_copy_bytes = max_bytes; + } + + __sync_fetch_and_sub (&f->cursize, total_copy_bytes); + + return (total_copy_bytes); +} + +int +svm_fifo_dequeue_nowait (svm_fifo_t * f, + int pid, u32 max_bytes, u8 * copy_here) +{ + return svm_fifo_dequeue_internal2 (f, pid, max_bytes, copy_here); +} + +int +svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes, + u8 * copy_here) +{ + u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; + u32 cursize, nitems; + + if (PREDICT_FALSE (f->cursize == 0)) + return -2; /* nothing in the fifo */ + + /* read cursize, which can only increase while we're working */ + cursize = f->cursize; + nitems = f->nitems; + + /* Number of bytes we're going to copy */ + total_copy_bytes = (cursize < max_bytes) ? cursize : max_bytes; + + if (PREDICT_TRUE (copy_here != 0)) + { + /* Number of bytes in first copy segment */ + first_copy_bytes = + ((nitems - f->head) < total_copy_bytes) ? + (nitems - f->head) : total_copy_bytes; + clib_memcpy (copy_here, &f->data[f->head], first_copy_bytes); + + /* Number of bytes in second copy segment, if any */ + second_copy_bytes = total_copy_bytes - first_copy_bytes; + if (second_copy_bytes) + { + clib_memcpy (copy_here + first_copy_bytes, &f->data[0], + second_copy_bytes); + } + } + return total_copy_bytes; +} + +int +svm_fifo_dequeue_drop (svm_fifo_t * f, int pid, u32 max_bytes) +{ + u32 total_drop_bytes, first_drop_bytes, second_drop_bytes; + u32 cursize, nitems; + + if (PREDICT_FALSE (f->cursize == 0)) + return -2; /* nothing in the fifo */ + + /* read cursize, which can only increase while we're working */ + cursize = f->cursize; + nitems = f->nitems; + + /* Number of bytes we're going to drop */ + total_drop_bytes = (cursize < max_bytes) ? cursize : max_bytes; + + /* Number of bytes in first copy segment */ + first_drop_bytes = + ((nitems - f->head) < total_drop_bytes) ? + (nitems - f->head) : total_drop_bytes; + f->head += first_drop_bytes; + f->head = (f->head == nitems) ? 0 : f->head; + + /* Number of bytes in second drop segment, if any */ + second_drop_bytes = total_drop_bytes - first_drop_bytes; + if (second_drop_bytes) + { + f->head += second_drop_bytes; + f->head = (f->head == nitems) ? 0 : f->head; + } + + __sync_fetch_and_sub (&f->cursize, total_drop_bytes); + + return total_drop_bytes; +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h new file mode 100644 index 00000000..70624b74 --- /dev/null +++ b/src/svm/svm_fifo.h @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __included_ssvm_fifo_h__ +#define __included_ssvm_fifo_h__ + +#include +#include +#include +#include +#include +#include +#include + +typedef enum +{ + SVM_FIFO_TAG_NOT_HELD = 0, + SVM_FIFO_TAG_DEQUEUE, + SVM_FIFO_TAG_ENQUEUE, +} svm_lock_tag_t; + +/** Out-of-order segment */ +typedef struct +{ + u32 next; /**< Next linked-list element pool index */ + u32 prev; /**< Previous linked-list element pool index */ + + u32 fifo_position; /**< Start of segment, normalized*/ + u32 length; /**< Length of segment */ +} ooo_segment_t; + +#define OOO_SEGMENT_INVALID_INDEX ((u32)~0) + +typedef struct +{ + pthread_mutex_t mutex; /* 8 bytes */ + pthread_cond_t condvar; /* 8 bytes */ + u32 owner_pid; + svm_lock_tag_t tag; + volatile u32 cursize; + u32 nitems; + + /* Backpointers */ + u32 server_session_index; + u32 client_session_index; + u8 server_thread_index; + u8 client_thread_index; + CLIB_CACHE_LINE_ALIGN_MARK (end_shared); + u32 head; + CLIB_CACHE_LINE_ALIGN_MARK (end_consumer); + + /* producer */ + u32 tail; + + ooo_segment_t *ooo_segments; /**< Pool of ooo segments */ + u32 ooos_list_head; /**< Head of out-of-order linked-list */ + u32 ooos_newest; /**< Last segment to have been updated */ + + CLIB_CACHE_LINE_ALIGN_MARK (data); +} svm_fifo_t; + +static inline int +svm_fifo_lock (svm_fifo_t * f, u32 pid, u32 tag, int nowait) +{ + if (PREDICT_TRUE (nowait == 0)) + pthread_mutex_lock (&f->mutex); + else + { + if (pthread_mutex_trylock (&f->mutex)) + return -1; + } + f->owner_pid = pid; + f->tag = tag; + return 0; +} + +static inline void +svm_fifo_unlock (svm_fifo_t * f) +{ + f->owner_pid = 0; + f->tag = 0; + CLIB_MEMORY_BARRIER (); + pthread_mutex_unlock (&f->mutex); +} + +static inline u32 +svm_fifo_max_dequeue (svm_fifo_t * f) +{ + return f->cursize; +} + +static inline u32 +svm_fifo_max_enqueue (svm_fifo_t * f) +{ + return f->nitems - f->cursize; +} + +static inline u8 +svm_fifo_has_ooo_data (svm_fifo_t * f) +{ + return f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX; +} + +svm_fifo_t *svm_fifo_create (u32 data_size_in_bytes); + +int svm_fifo_enqueue_nowait (svm_fifo_t * f, int pid, u32 max_bytes, + u8 * copy_from_here); + +int svm_fifo_enqueue_with_offset (svm_fifo_t * f, int pid, + u32 offset, u32 required_bytes, + u8 * copy_from_here); + +int svm_fifo_dequeue_nowait (svm_fifo_t * f, int pid, u32 max_bytes, + u8 * copy_here); + +int svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes, + u8 * copy_here); +int svm_fifo_dequeue_drop (svm_fifo_t * f, int pid, u32 max_bytes); + +always_inline ooo_segment_t * +svm_fifo_newest_ooo_segment (svm_fifo_t * f) +{ + return f->ooo_segments + f->ooos_newest; +} + +always_inline u32 +ooo_segment_offset (svm_fifo_t * f, ooo_segment_t * s) +{ + return ((f->nitems + s->fifo_position - f->tail) % f->nitems); +} + +always_inline u32 +ooo_segment_end_offset (svm_fifo_t * f, ooo_segment_t * s) +{ + return ((f->nitems + s->fifo_position + s->length - f->tail) % f->nitems); +} + +#endif /* __included_ssvm_fifo_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c new file mode 100644 index 00000000..acabb3bd --- /dev/null +++ b/src/svm/svm_fifo_segment.c @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +svm_fifo_segment_main_t svm_fifo_segment_main; + +/** (master) create an svm fifo segment */ +int +svm_fifo_segment_create (svm_fifo_segment_create_args_t * a) +{ + int rv; + svm_fifo_segment_private_t *s; + svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; + ssvm_shared_header_t *sh; + svm_fifo_segment_header_t *fsh; + void *oldheap; + + /* Allocate a fresh segment */ + pool_get (sm->segments, s); + memset (s, 0, sizeof (*s)); + + s->ssvm.ssvm_size = a->segment_size; + s->ssvm.i_am_master = 1; + s->ssvm.my_pid = getpid (); + s->ssvm.name = (u8 *) a->segment_name; + s->ssvm.requested_va = sm->next_baseva; + + rv = ssvm_master_init (&s->ssvm, s - sm->segments); + + if (rv) + { + _vec_len (s) = vec_len (s) - 1; + return (rv); + } + + /* Note; requested_va updated due to seg base addr randomization */ + sm->next_baseva = s->ssvm.requested_va + a->segment_size; + + sh = s->ssvm.sh; + oldheap = ssvm_push_heap (sh); + + /* Set up svm_fifo_segment shared header */ + fsh = clib_mem_alloc (sizeof (*fsh)); + memset (fsh, 0, sizeof (*fsh)); + sh->opaque[0] = fsh; + s->h = fsh; + fsh->segment_name = format (0, "%s%c", a->segment_name, 0); + + /* Avoid vec_add1(...) failure when adding a fifo, etc. */ + vec_validate (fsh->fifos, 64); + _vec_len (fsh->fifos) = 0; + + ssvm_pop_heap (oldheap); + + sh->ready = 1; + a->new_segment_index = s - sm->segments; + return (0); +} + +/** (slave) attach to an svm fifo segment */ +int +svm_fifo_segment_attach (svm_fifo_segment_create_args_t * a) +{ + int rv; + svm_fifo_segment_private_t *s; + svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; + ssvm_shared_header_t *sh; + svm_fifo_segment_header_t *fsh; + + /* Allocate a fresh segment */ + pool_get (sm->segments, s); + + memset (s, 0, sizeof (*s)); + + s->ssvm.ssvm_size = a->segment_size; + s->ssvm.my_pid = getpid (); + s->ssvm.name = (u8 *) a->segment_name; + s->ssvm.requested_va = sm->next_baseva; + + rv = ssvm_slave_init (&s->ssvm, sm->timeout_in_seconds); + + if (rv) + { + _vec_len (s) = vec_len (s) - 1; + return (rv); + } + + /* Fish the segment header */ + sh = s->ssvm.sh; + fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; + s->h = fsh; + + a->new_segment_index = s - sm->segments; + return (0); +} + +void +svm_fifo_segment_delete (svm_fifo_segment_private_t * s) +{ + svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; + ssvm_delete (&s->ssvm); + pool_put (sm->segments, s); +} + +svm_fifo_t * +svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, + u32 data_size_in_bytes) +{ + ssvm_shared_header_t *sh; + svm_fifo_segment_header_t *fsh; + svm_fifo_t *f; + void *oldheap; + + sh = s->ssvm.sh; + fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; + oldheap = ssvm_push_heap (sh); + + /* Note: this can fail, in which case: create another segment */ + f = svm_fifo_create (data_size_in_bytes); + if (f == 0) + { + ssvm_pop_heap (oldheap); + return (0); + } + + vec_add1 (fsh->fifos, f); + + ssvm_pop_heap (oldheap); + return (f); +} + +void +svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f) +{ + ssvm_shared_header_t *sh; + svm_fifo_segment_header_t *fsh; + void *oldheap; + int i; + + sh = s->ssvm.sh; + fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; + oldheap = ssvm_push_heap (sh); + + for (i = 0; i < vec_len (fsh->fifos); i++) + { + if (fsh->fifos[i] == f) + { + vec_delete (fsh->fifos, 1, i); + goto found; + } + } + clib_warning ("fifo 0x%llx not found in fifo table...", f); + +found: + clib_mem_free (f); + ssvm_pop_heap (oldheap); +} + +void +svm_fifo_segment_init (u64 baseva, u32 timeout_in_seconds) +{ + svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; + + sm->next_baseva = baseva; + sm->timeout_in_seconds = timeout_in_seconds; +} + +u32 +svm_fifo_segment_index (svm_fifo_segment_private_t * s) +{ + return s - svm_fifo_segment_main.segments; +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/svm/svm_fifo_segment.h b/src/svm/svm_fifo_segment.h new file mode 100644 index 00000000..793fa7c8 --- /dev/null +++ b/src/svm/svm_fifo_segment.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __included_ssvm_fifo_segment_h__ +#define __included_ssvm_fifo_segment_h__ + +#include "svm_fifo.h" +#include "ssvm.h" + +typedef struct +{ + volatile svm_fifo_t **fifos; + u8 *segment_name; +} svm_fifo_segment_header_t; + +typedef struct +{ + ssvm_private_t ssvm; + svm_fifo_segment_header_t *h; +} svm_fifo_segment_private_t; + +typedef struct +{ + /** pool of segments */ + svm_fifo_segment_private_t *segments; + /* Where to put the next one */ + u64 next_baseva; + u32 timeout_in_seconds; +} svm_fifo_segment_main_t; + +extern svm_fifo_segment_main_t svm_fifo_segment_main; + +typedef struct +{ + char *segment_name; + u32 segment_size; + u32 new_segment_index; +} svm_fifo_segment_create_args_t; + +static inline svm_fifo_segment_private_t * +svm_fifo_get_segment (u32 segment_index) +{ + svm_fifo_segment_main_t *ssm = &svm_fifo_segment_main; + return vec_elt_at_index (ssm->segments, segment_index); +} + +#define foreach_ssvm_fifo_segment_api_error \ +_(OUT_OF_SPACE, "Out of space in segment", -200) + +typedef enum +{ +#define _(n,s,c) SSVM_FIFO_SEGMENT_API_ERROR_##n = c, + foreach_ssvm_fifo_segment_api_error +#undef _ +} ssvm_fifo_segment_api_error_enum_t; + +int svm_fifo_segment_create (svm_fifo_segment_create_args_t * a); +int svm_fifo_segment_attach (svm_fifo_segment_create_args_t * a); +void svm_fifo_segment_delete (svm_fifo_segment_private_t * s); + +svm_fifo_t *svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, + u32 data_size_in_bytes); +void svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, + svm_fifo_t * f); + +void svm_fifo_segment_init (u64 baseva, u32 timeout_in_seconds); + +u32 svm_fifo_segment_index (svm_fifo_segment_private_t * s); + +#endif /* __included_ssvm_fifo_segment_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/svm/test_svm_fifo1.c b/src/svm/test_svm_fifo1.c new file mode 100644 index 00000000..355653df --- /dev/null +++ b/src/svm/test_svm_fifo1.c @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "svm_fifo_segment.h" + +clib_error_t * +hello_world (int verbose) +{ + svm_fifo_segment_create_args_t _a, *a = &_a; + svm_fifo_segment_private_t *sp; + svm_fifo_t *f; + int rv; + u8 *test_data; + u8 *retrieved_data = 0; + clib_error_t *error = 0; + int pid = getpid (); + + memset (a, 0, sizeof (*a)); + + a->segment_name = "fifo-test1"; + a->segment_size = 256 << 10; + + rv = svm_fifo_segment_create (a); + + if (rv) + return clib_error_return (0, "svm_fifo_segment_create returned %d", rv); + + sp = svm_fifo_get_segment (a->new_segment_index); + + f = svm_fifo_segment_alloc_fifo (sp, 4096); + + if (f == 0) + return clib_error_return (0, "svm_fifo_segment_alloc_fifo failed"); + + test_data = format (0, "Hello world%c", 0); + vec_validate (retrieved_data, vec_len (test_data) - 1); + + while (svm_fifo_max_enqueue (f) >= vec_len (test_data)) + svm_fifo_enqueue_nowait (f, pid, vec_len (test_data), test_data); + + while (svm_fifo_max_dequeue (f) >= vec_len (test_data)) + svm_fifo_dequeue_nowait (f, pid, vec_len (retrieved_data), + retrieved_data); + + while (svm_fifo_max_enqueue (f) >= vec_len (test_data)) + svm_fifo_enqueue_nowait (f, pid, vec_len (test_data), test_data); + + while (svm_fifo_max_dequeue (f) >= vec_len (test_data)) + svm_fifo_dequeue_nowait (f, pid, vec_len (retrieved_data), + retrieved_data); + + if (!memcmp (retrieved_data, test_data, vec_len (test_data))) + error = clib_error_return (0, "data test OK, got '%s'", retrieved_data); + else + error = clib_error_return (0, "data test FAIL!"); + + svm_fifo_segment_free_fifo (sp, f); + + return error; +} + +clib_error_t * +master (int verbose) +{ + svm_fifo_segment_create_args_t _a, *a = &_a; + svm_fifo_segment_private_t *sp; + svm_fifo_t *f; + int rv; + u8 *test_data; + u8 *retrieved_data = 0; + int i; + int pid = getpid (); + + memset (a, 0, sizeof (*a)); + + a->segment_name = "fifo-test1"; + a->segment_size = 256 << 10; + + rv = svm_fifo_segment_create (a); + + if (rv) + return clib_error_return (0, "svm_fifo_segment_create returned %d", rv); + + sp = svm_fifo_get_segment (a->new_segment_index); + + f = svm_fifo_segment_alloc_fifo (sp, 4096); + + if (f == 0) + return clib_error_return (0, "svm_fifo_segment_alloc_fifo failed"); + + test_data = format (0, "Hello world%c", 0); + vec_validate (retrieved_data, vec_len (test_data) - 1); + + for (i = 0; i < 1000; i++) + svm_fifo_enqueue_nowait (f, pid, vec_len (test_data), test_data); + + return clib_error_return (0, "master (enqueue) done"); +} + +clib_error_t * +mempig (int verbose) +{ + svm_fifo_segment_create_args_t _a, *a = &_a; + svm_fifo_segment_private_t *sp; + svm_fifo_t *f; + svm_fifo_t **flist = 0; + int rv; + int i; + + memset (a, 0, sizeof (*a)); + + a->segment_name = "fifo-test1"; + a->segment_size = 256 << 10; + + rv = svm_fifo_segment_create (a); + + if (rv) + return clib_error_return (0, "svm_fifo_segment_create returned %d", rv); + + sp = svm_fifo_get_segment (a->new_segment_index); + + for (i = 0; i < 1000; i++) + { + f = svm_fifo_segment_alloc_fifo (sp, 4096); + if (f == 0) + break; + vec_add1 (flist, f); + } + + fformat (stdout, "Try #1: created %d fifos...\n", vec_len (flist)); + for (i = 0; i < vec_len (flist); i++) + { + f = flist[i]; + svm_fifo_segment_free_fifo (sp, f); + } + + _vec_len (flist) = 0; + + for (i = 0; i < 1000; i++) + { + f = svm_fifo_segment_alloc_fifo (sp, 4096); + if (f == 0) + break; + vec_add1 (flist, f); + } + + fformat (stdout, "Try #2: created %d fifos...\n", vec_len (flist)); + for (i = 0; i < vec_len (flist); i++) + { + f = flist[i]; + svm_fifo_segment_free_fifo (sp, f); + } + + return 0; +} + +clib_error_t * +offset (int verbose) +{ + svm_fifo_segment_create_args_t _a, *a = &_a; + svm_fifo_segment_private_t *sp; + svm_fifo_t *f; + int rv; + u32 *test_data = 0; + u32 *recovered_data = 0; + int i; + int pid = getpid (); + + memset (a, 0, sizeof (*a)); + + a->segment_name = "fifo-test1"; + a->segment_size = 256 << 10; + + rv = svm_fifo_segment_create (a); + + if (rv) + return clib_error_return (0, "svm_fifo_segment_create returned %d", rv); + + sp = svm_fifo_get_segment (a->new_segment_index); + + f = svm_fifo_segment_alloc_fifo (sp, 200 << 10); + + if (f == 0) + return clib_error_return (0, "svm_fifo_segment_alloc_fifo failed"); + + for (i = 0; i < (3 * 1024); i++) + vec_add1 (test_data, i); + + /* Enqueue the first 1024 u32's */ + svm_fifo_enqueue_nowait (f, pid, 4096 /* bytes to enqueue */ , + (u8 *) test_data); + + /* Enqueue the third 1024 u32's 2048 ahead of the current tail */ + svm_fifo_enqueue_with_offset (f, pid, 4096, 4096, (u8 *) & test_data[2048]); + + /* Enqueue the second 1024 u32's at the current tail */ + svm_fifo_enqueue_nowait (f, pid, 4096 /* bytes to enqueue */ , + (u8 *) & test_data[1024]); + + vec_validate (recovered_data, (3 * 1024) - 1); + + svm_fifo_dequeue_nowait (f, pid, 3 * 4096, (u8 *) recovered_data); + + for (i = 0; i < (3 * 1024); i++) + { + if (recovered_data[i] != test_data[i]) + { + clib_warning ("[%d] expected %d recovered %d", i, + test_data[i], recovered_data[i]); + return clib_error_return (0, "offset test FAILED"); + } + } + + return clib_error_return (0, "offset test OK"); +} + +clib_error_t * +slave (int verbose) +{ + svm_fifo_segment_create_args_t _a, *a = &_a; + svm_fifo_segment_private_t *sp; + svm_fifo_segment_header_t *fsh; + svm_fifo_t *f; + ssvm_shared_header_t *sh; + int rv; + u8 *test_data; + u8 *retrieved_data = 0; + int pid = getpid (); + int i; + + memset (a, 0, sizeof (*a)); + + a->segment_name = "fifo-test1"; + + rv = svm_fifo_segment_attach (a); + + if (rv) + return clib_error_return (0, "svm_fifo_segment_attach returned %d", rv); + + sp = svm_fifo_get_segment (a->new_segment_index); + sh = sp->ssvm.sh; + fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; + + /* might wanna wait.. */ + f = (svm_fifo_t *) fsh->fifos[0]; + + /* Lazy bastards united */ + test_data = format (0, "Hello world%c", 0); + vec_validate (retrieved_data, vec_len (test_data) - 1); + + for (i = 0; i < 1000; i++) + { + svm_fifo_dequeue_nowait (f, pid, vec_len (retrieved_data), + retrieved_data); + if (memcmp (retrieved_data, test_data, vec_len (retrieved_data))) + return clib_error_return (0, "retrieved data incorrect, '%s'", + retrieved_data); + } + + return clib_error_return (0, "slave (dequeue) done"); +} + + +int +test_ssvm_fifo1 (unformat_input_t * input) +{ + clib_error_t *error = 0; + int verbose = 0; + int test_id = 0; + + svm_fifo_segment_init (0x200000000ULL, 20); + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "verbose %d", &verbose)) + ; + else if (unformat (input, "verbose")) + verbose = 1; + else if (unformat (input, "master")) + test_id = 1; + else if (unformat (input, "slave")) + test_id = 2; + else if (unformat (input, "mempig")) + test_id = 3; + else if (unformat (input, "offset")) + test_id = 4; + else + { + error = clib_error_create ("unknown input `%U'\n", + format_unformat_error, input); + goto out; + } + } + + switch (test_id) + { + case 0: + error = hello_world (verbose); + break; + + case 1: + error = master (verbose); + break; + + case 2: + error = slave (verbose); + break; + + case 3: + error = mempig (verbose); + break; + + case 4: + error = offset (verbose); + break; + + default: + error = clib_error_return (0, "test id %d unknown", test_id); + break; + } + +out: + if (error) + clib_error_report (error); + + return 0; +} + + + +int +main (int argc, char *argv[]) +{ + unformat_input_t i; + int r; + + unformat_init_command_line (&i, argv); + r = test_ssvm_fifo1 (&i); + unformat_free (&i); + return r; +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/uri.am b/src/uri.am new file mode 100644 index 00000000..8cdd77c6 --- /dev/null +++ b/src/uri.am @@ -0,0 +1,22 @@ +# Copyright (c) 2016 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +noinst_PROGRAMS += uri_udp_test2 uri_tcp_test + +uri_udp_test2_SOURCES = uri/uri_udp_test2.c +uri_udp_test2_LDADD = libvlibmemoryclient.la libvlibapi.la libsvm.la \ + libvppinfra.la -lpthread -lm -lrt + +uri_tcp_test_SOURCES = uri/uri_tcp_test.c +uri_tcp_test_LDADD = libvlibmemoryclient.la libvlibapi.la libsvm.la \ + libvppinfra.la -lpthread -lm -lrt diff --git a/src/uri/uri_tcp_test.c b/src/uri/uri_tcp_test.c new file mode 100644 index 00000000..ed5a37d8 --- /dev/null +++ b/src/uri/uri_tcp_test.c @@ -0,0 +1,916 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "../vnet/session/application_interface.h" + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +/* declare message handlers for each api */ + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) +#define vl_printfun +#include +#undef vl_printfun + +/* Satisfy external references when not linking with -lvlib */ +vlib_main_t vlib_global_main; +vlib_main_t **vlib_mains; + +typedef struct +{ + svm_fifo_t * server_rx_fifo; + svm_fifo_t * server_tx_fifo; + + u32 vpp_session_index; + u32 vpp_session_thread; +} session_t; + +typedef enum +{ + STATE_START, + STATE_READY, + STATE_DISCONNECTING, + STATE_FAILED +} connection_state_t; + +typedef struct +{ + /* vpe input queue */ + unix_shared_memory_queue_t *vl_input_queue; + + /* API client handle */ + u32 my_client_index; + + /* The URI we're playing with */ + u8 * uri; + + /* Session pool */ + session_t * sessions; + + /* Hash table for disconnect processing */ + uword * session_index_by_vpp_handles; + + /* intermediate rx buffer */ + u8 * rx_buf; + + /* URI for slave's connect */ + u8 * connect_uri; + + u32 connected_session_index; + + int i_am_master; + + /* drop all packets */ + int drop_packets; + + /* Our event queue */ + unix_shared_memory_queue_t * our_event_queue; + + /* $$$ single thread only for the moment */ + unix_shared_memory_queue_t * vpp_event_queue; + + pid_t my_pid; + + /* For deadman timers */ + clib_time_t clib_time; + + /* State of the connection, shared between msg RX thread and main thread */ + volatile connection_state_t state; + + /* Signal variables */ + volatile int time_to_stop; + volatile int time_to_print_stats; + + u32 configured_segment_size; + + /* VNET_API_ERROR_FOO -> "Foo" hash table */ + uword * error_string_by_error_number; + + /* convenience */ + svm_fifo_segment_main_t * segment_main; + + u8 *connect_test_data; +} uri_tcp_test_main_t; + +uri_tcp_test_main_t uri_tcp_test_main; + +#if CLIB_DEBUG > 0 +#define NITER 10000 +#else +#define NITER 4000000 +#endif + +int +wait_for_state_change (uri_tcp_test_main_t * utm, connection_state_t state) +{ +#if CLIB_DEBUG > 0 +#define TIMEOUT 600.0 +#else +#define TIMEOUT 600.0 +#endif + + f64 timeout = clib_time_now (&utm->clib_time) + TIMEOUT; + + while (clib_time_now (&utm->clib_time) < timeout) + { + if (utm->state == state) + return 0; + if (utm->state == STATE_FAILED) + return -1; + } + clib_warning ("timeout waiting for STATE_READY"); + return -1; +} + +static void +init_error_string_table (uri_tcp_test_main_t * utm) +{ + utm->error_string_by_error_number = hash_create (0, sizeof (uword)); + +#define _(n,v,s) hash_set (utm->error_string_by_error_number, -v, s); + foreach_vnet_api_error; +#undef _ + + hash_set (utm->error_string_by_error_number, 99, "Misc"); +} + +static void +stop_signal (int signum) +{ + uri_tcp_test_main_t *um = &uri_tcp_test_main; + + um->time_to_stop = 1; +} + +static void +stats_signal (int signum) +{ + uri_tcp_test_main_t *um = &uri_tcp_test_main; + + um->time_to_print_stats = 1; +} + +static clib_error_t * +setup_signal_handlers (void) +{ + signal (SIGINT, stats_signal); + signal (SIGQUIT, stop_signal); + signal (SIGTERM, stop_signal); + + return 0; +} + +void +vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...) +{ + clib_warning ("BUG"); +} + +int +connect_to_vpp (char *name) +{ + uri_tcp_test_main_t *utm = &uri_tcp_test_main; + api_main_t *am = &api_main; + + if (vl_client_connect_to_vlib ("/vpe-api", name, 32) < 0) + return -1; + + utm->vl_input_queue = am->shmem_hdr->vl_input_queue; + utm->my_client_index = am->my_client_index; + + return 0; +} + +static void +vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t *mp) +{ + svm_fifo_segment_create_args_t _a, *a = &_a; + int rv; + + a->segment_name = (char *) mp->segment_name; + a->segment_size = mp->segment_size; + /* Attach to the segment vpp created */ + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("svm_fifo_segment_attach ('%s') failed", + mp->segment_name); + return; + } + clib_warning ("Mapped new segment '%s' size %d", mp->segment_name, + mp->segment_size); +} + +static void +vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) +{ + uri_tcp_test_main_t *utm = &uri_tcp_test_main; + session_t * session; + vl_api_disconnect_session_reply_t * rmp; + uword * p; + int rv = 0; + u64 key; + + key = (((u64)mp->session_thread_index) << 32) | (u64)mp->session_index; + + p = hash_get (utm->session_index_by_vpp_handles, key); + + if (p) + { + session = pool_elt_at_index (utm->sessions, p[0]); + hash_unset (utm->session_index_by_vpp_handles, key); + pool_put (utm->sessions, session); + } + else + { + clib_warning ("couldn't find session key %llx", key); + rv = -11; + } + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + + rmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION_REPLY); + rmp->retval = rv; + rmp->session_index = mp->session_index; + rmp->session_thread_index = mp->session_thread_index; + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&rmp); +} + +static void +vl_api_reset_session_t_handler (vl_api_reset_session_t * mp) +{ + uri_tcp_test_main_t *utm = &uri_tcp_test_main; + session_t * session; + vl_api_reset_session_reply_t * rmp; + uword * p; + int rv = 0; + u64 key; + + key = (((u64)mp->session_thread_index) << 32) | (u64)mp->session_index; + + p = hash_get(utm->session_index_by_vpp_handles, key); + + if (p) + { + session = pool_elt_at_index(utm->sessions, p[0]); + hash_unset(utm->session_index_by_vpp_handles, key); + pool_put(utm->sessions, session); + } + else + { + clib_warning("couldn't find session key %llx", key); + rv = -11; + } + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION_REPLY); + rmp->retval = rv; + rmp->session_index = mp->session_index; + rmp->session_thread_index = mp->session_thread_index; + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&rmp); +} + +void +handle_fifo_event_connect_rx (uri_tcp_test_main_t *utm, session_fifo_event_t * e) +{ + svm_fifo_t * rx_fifo; + int n_read, bytes; + + rx_fifo = e->fifo; + + bytes = e->enqueue_length; + do + { + n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len(utm->rx_buf), + utm->rx_buf); + if (n_read > 0) + bytes -= n_read; + } + while (n_read < 0 || bytes > 0); + + // bytes_to_read = svm_fifo_max_dequeue (rx_fifo); + // + // bytes_to_read = vec_len(utm->rx_buf) > bytes_to_read ? + // bytes_to_read : vec_len(utm->rx_buf); + // + // buffer_offset = 0; + // while (bytes_to_read > 0) + // { + // rv = svm_fifo_dequeue_nowait2 (rx_fifo, mypid, + // bytes_to_read, + // utm->rx_buf + buffer_offset); + // if (rv > 0) + // { + // bytes_to_read -= rv; + // buffer_offset += rv; + // bytes_received += rv; + // } + // } + + + // while (bytes_received < bytes_sent) + // { + // rv = svm_fifo_dequeue_nowait2 (rx_fifo, mypid, + // vec_len (utm->rx_buf), + // utm->rx_buf); + // if (rv > 0) + // { + //#if CLIB_DEBUG > 0 + // int j; + // for (j = 0; j < rv; j++) + // { + // if (utm->rx_buf[j] != ((bytes_received + j) & 0xff)) + // { + // clib_warning ("error at byte %lld, 0x%x not 0x%x", + // bytes_received + j, + // utm->rx_buf[j], + // ((bytes_received + j )&0xff)); + // } + // } + //#endif + // bytes_received += (u64) rv; + // } + // } +} + +void +handle_connect_event_queue (uri_tcp_test_main_t * utm) +{ + session_fifo_event_t _e, *e = &_e;; + + unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e, 0 /* nowait */); + switch (e->event_type) + { + case FIFO_EVENT_SERVER_RX: + handle_fifo_event_connect_rx (utm, e); + break; + + case FIFO_EVENT_SERVER_EXIT: + return; + + default: + clib_warning("unknown event type %d", e->event_type); + break; + } +} + +void +uri_tcp_connect_send (uri_tcp_test_main_t *utm) +{ + u8 *test_data = utm->connect_test_data; + u64 bytes_sent = 0; + int rv; + int mypid = getpid(); + session_t * session; + svm_fifo_t *tx_fifo; + int buffer_offset, bytes_to_send = 0; + session_fifo_event_t evt; + static int serial_number = 0; + int i; + u32 max_chunk = 64 << 10, write; + + session = pool_elt_at_index (utm->sessions, utm->connected_session_index); + tx_fifo = session->server_tx_fifo; + + vec_validate (utm->rx_buf, vec_len (test_data) - 1); + + for (i = 0; i < 10; i++) + { + bytes_to_send = vec_len (test_data); + buffer_offset = 0; + while (bytes_to_send > 0) + { + write = bytes_to_send > max_chunk ? max_chunk : bytes_to_send; + rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, write, + test_data + buffer_offset); + + if (rv > 0) + { + bytes_to_send -= rv; + buffer_offset += rv; + bytes_sent += rv; + + /* Fabricate TX event, send to vpp */ + evt.fifo = tx_fifo; + evt.event_type = FIFO_EVENT_SERVER_TX; + /* $$$$ for event logging */ + evt.enqueue_length = rv; + evt.event_id = serial_number++; + + unix_shared_memory_queue_add (utm->vpp_event_queue, (u8 *) &evt, + 0 /* do wait for mutex */); + } + } + } +} + +static void +uri_tcp_client_test (uri_tcp_test_main_t * utm) +{ + vl_api_connect_uri_t * cmp; + vl_api_disconnect_session_t *dmp; + session_t *connected_session; + int i; + + cmp = vl_msg_api_alloc (sizeof (*cmp)); + memset (cmp, 0, sizeof (*cmp)); + + cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); + cmp->client_index = utm->my_client_index; + cmp->context = ntohl(0xfeedface); + memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&cmp); + + if (wait_for_state_change (utm, STATE_READY)) + { + return; + } + + /* Init test data */ + vec_validate (utm->connect_test_data, 64 * 1024 - 1); + for (i = 0; i < vec_len (utm->connect_test_data); i++) + utm->connect_test_data[i] = i & 0xff; + + /* Start reader thread */ + /* handle_connect_event_queue (utm); */ + + /* Start send */ + uri_tcp_connect_send (utm); + + /* Disconnect */ + connected_session = pool_elt_at_index(utm->sessions, + utm->connected_session_index); + dmp = vl_msg_api_alloc (sizeof (*dmp)); + memset (dmp, 0, sizeof (*dmp)); + dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); + dmp->client_index = utm->my_client_index; + dmp->session_index = connected_session->vpp_session_index; + dmp->session_thread_index = connected_session->vpp_session_thread; + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&dmp); +} + +void +handle_fifo_event_server_rx (uri_tcp_test_main_t *utm, session_fifo_event_t * e) +{ + svm_fifo_t * rx_fifo, * tx_fifo; + int n_read; + + session_fifo_event_t evt; + unix_shared_memory_queue_t *q; + int rv, bytes; + + rx_fifo = e->fifo; + tx_fifo = utm->sessions[rx_fifo->client_session_index].server_tx_fifo; + + bytes = e->enqueue_length; + do + { + n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len(utm->rx_buf), + utm->rx_buf); + + /* Reflect if a non-drop session */ + if (!utm->drop_packets && n_read > 0) + { + do + { + rv = svm_fifo_enqueue_nowait (tx_fifo, 0, n_read, utm->rx_buf); + } + while (rv == -2); + + /* Fabricate TX event, send to vpp */ + evt.fifo = tx_fifo; + evt.event_type = FIFO_EVENT_SERVER_TX; + /* $$$$ for event logging */ + evt.enqueue_length = n_read; + evt.event_id = e->event_id; + q = utm->vpp_event_queue; + unix_shared_memory_queue_add (q, (u8 *) &evt, 0 /* do wait for mutex */); + } + + if (n_read > 0) + bytes -= n_read; + } + while (n_read < 0 || bytes > 0); +} + +void +handle_event_queue (uri_tcp_test_main_t * utm) +{ + session_fifo_event_t _e, *e = &_e;; + + while (1) + { + unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *)e, + 0 /* nowait */); + switch (e->event_type) + { + case FIFO_EVENT_SERVER_RX: + handle_fifo_event_server_rx (utm, e); + break; + + case FIFO_EVENT_SERVER_EXIT: + return; + + default: + clib_warning ("unknown event type %d", e->event_type); + break; + } + if (PREDICT_FALSE(utm->time_to_stop == 1)) + break; + if (PREDICT_FALSE(utm->time_to_print_stats == 1)) + { + utm->time_to_print_stats = 0; + fformat(stdout, "%d connections\n", pool_elts (utm->sessions)); + } + } +} + +static void +vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) +{ + uri_tcp_test_main_t *utm = &uri_tcp_test_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + int rv; + + if (mp->retval) + { + clib_warning("bind failed: %d", mp->retval); + return; + } + + if (mp->segment_name_length == 0) + { + clib_warning("segment_name_length zero"); + return; + } + + a->segment_name = (char *) mp->segment_name; + a->segment_size = mp->segment_size; + + ASSERT(mp->server_event_queue_address); + + /* Attach to the segment vpp created */ + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning("svm_fifo_segment_attach ('%s') failed", mp->segment_name); + return; + } + + utm->our_event_queue = + (unix_shared_memory_queue_t *) mp->server_event_queue_address; + + utm->state = STATE_READY; +} + +static void +vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) +{ + uri_tcp_test_main_t *utm = &uri_tcp_test_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + session_t *session; + u32 session_index; + svm_fifo_t *rx_fifo, *tx_fifo; + int rv; + + if (mp->retval) + { + clib_warning ("connection failed with code: %d", mp->retval); + utm->state = STATE_FAILED; + return; + } + /* + * Attatch to segment + */ + + if (mp->segment_name_length == 0) + { + clib_warning ("segment_name_length zero"); + utm->state = STATE_FAILED; + return; + } + + a->segment_name = (char *) mp->segment_name; + a->segment_size = mp->segment_size; + + ASSERT(mp->client_event_queue_address); + + /* Attach to the segment vpp created */ + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("svm_fifo_segment_attach ('%s') failed", + mp->segment_name); + return; + } + + /* + * Save the queues + */ + + utm->our_event_queue = (unix_shared_memory_queue_t *) + mp->client_event_queue_address; + + utm->vpp_event_queue = (unix_shared_memory_queue_t *) + mp->vpp_event_queue_address; + + /* + * Setup session + */ + + pool_get (utm->sessions, session); + session_index = session - utm->sessions; + + rx_fifo = (svm_fifo_t *)mp->server_rx_fifo; + rx_fifo->client_session_index = session_index; + tx_fifo = (svm_fifo_t *)mp->server_tx_fifo; + tx_fifo->client_session_index = session_index; + + session->server_rx_fifo = rx_fifo; + session->server_tx_fifo = tx_fifo; + session->vpp_session_index = mp->session_index; + session->vpp_session_thread = mp->session_thread_index; + + /* Save handle */ + utm->connected_session_index = session_index; + + utm->state = STATE_READY; +} + +void +uri_tcp_bind (uri_tcp_test_main_t *utm) +{ + vl_api_bind_uri_t * bmp; + u32 fifo_size = 3 << 20; + bmp = vl_msg_api_alloc (sizeof (*bmp)); + memset (bmp, 0, sizeof (*bmp)); + + bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); + bmp->client_index = utm->my_client_index; + bmp->context = ntohl(0xfeedface); + bmp->initial_segment_size = 256<<20; /* size of initial segment */ + bmp->options[SESSION_OPTIONS_FLAGS] = + SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; + bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; + bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128<<20; + memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&bmp); +} + +static void +vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t *mp) +{ + uri_tcp_test_main_t *utm = &uri_tcp_test_main; + + if (mp->retval != 0) + clib_warning ("returned %d", ntohl(mp->retval)); + + utm->state = STATE_START; +} + +void +uri_tcp_unbind (uri_tcp_test_main_t *utm) +{ + vl_api_unbind_uri_t * ump; + + ump = vl_msg_api_alloc (sizeof (*ump)); + memset (ump, 0, sizeof (*ump)); + + ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); + ump->client_index = utm->my_client_index; + memcpy (ump->uri, utm->uri, vec_len (utm->uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&ump); +} + +static void +vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) +{ + uri_tcp_test_main_t *utm = &uri_tcp_test_main; + vl_api_accept_session_reply_t *rmp; + svm_fifo_t * rx_fifo, * tx_fifo; + session_t * session; + static f64 start_time; + u64 key; + u32 session_index; + + if (start_time == 0.0) + start_time = clib_time_now (&utm->clib_time); + + utm->vpp_event_queue = (unix_shared_memory_queue_t *) + mp->vpp_event_queue_address; + + /* Allocate local session and set it up */ + pool_get (utm->sessions, session); + session_index = session - utm->sessions; + + rx_fifo = (svm_fifo_t *)mp->server_rx_fifo; + rx_fifo->client_session_index = session_index; + tx_fifo = (svm_fifo_t *)mp->server_tx_fifo; + tx_fifo->client_session_index = session_index; + + session->server_rx_fifo = rx_fifo; + session->server_tx_fifo = tx_fifo; + + /* Add it to lookup table */ + key = (((u64)mp->session_thread_index) << 32) | (u64)mp->session_index; + hash_set (utm->session_index_by_vpp_handles, key, session_index); + + utm->state = STATE_READY; + + /* Stats printing */ + if (pool_elts (utm->sessions) && (pool_elts(utm->sessions) % 20000) == 0) + { + f64 now = clib_time_now (&utm->clib_time); + fformat (stdout, "%d active sessions in %.2f seconds, %.2f/sec...\n", + pool_elts(utm->sessions), now - start_time, + (f64)pool_elts(utm->sessions) / (now - start_time)); + } + + /* Send accept reply to vpp */ + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY); + rmp->session_type = mp->session_type; + rmp->session_index = mp->session_index; + rmp->session_thread_index = mp->session_thread_index; + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&rmp); +} + +void +uri_tcp_server_test (uri_tcp_test_main_t * utm) +{ + + /* Bind to uri */ + uri_tcp_bind (utm); + + if (wait_for_state_change (utm, STATE_READY)) + { + clib_warning ("timeout waiting for STATE_READY"); + return; + } + + /* Enter handle event loop */ + handle_event_queue (utm); + + /* Cleanup */ + uri_tcp_unbind (utm); + + if (wait_for_state_change (utm, STATE_START)) + { + clib_warning ("timeout waiting for STATE_START"); + return; + } + + fformat (stdout, "Test complete...\n"); +} + +#define foreach_uri_msg \ +_(BIND_URI_REPLY, bind_uri_reply) \ +_(UNBIND_URI_REPLY, unbind_uri_reply) \ +_(ACCEPT_SESSION, accept_session) \ +_(CONNECT_URI_REPLY, connect_uri_reply) \ +_(DISCONNECT_SESSION, disconnect_session) \ +_(RESET_SESSION, reset_session) \ +_(MAP_ANOTHER_SEGMENT, map_another_segment) + +void +uri_api_hookup (uri_tcp_test_main_t * utm) +{ +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_uri_msg; +#undef _ +} + +int +main (int argc, char **argv) +{ + uri_tcp_test_main_t *utm = &uri_tcp_test_main; + unformat_input_t _argv, *a = &_argv; + u8 *chroot_prefix; + u8 *heap; + u8 * bind_name = (u8 *) "tcp://0.0.0.0/1234"; + u32 tmp; + mheap_t *h; + session_t * session; + int i; + int i_am_master = 1, drop_packets = 0; + + clib_mem_init (0, 256 << 20); + + heap = clib_mem_get_per_cpu_heap (); + h = mheap_header (heap); + + /* make the main heap thread-safe */ + h->flags |= MHEAP_FLAG_THREAD_SAFE; + + vec_validate (utm->rx_buf, 65536); + + utm->session_index_by_vpp_handles = + hash_create (0, sizeof(uword)); + + utm->my_pid = getpid(); + utm->configured_segment_size = 1<<20; + + clib_time_init (&utm->clib_time); + init_error_string_table (utm); + svm_fifo_segment_init(0x200000000ULL, 20); + unformat_init_command_line (a, argv); + + while (unformat_check_input (a) != UNFORMAT_END_OF_INPUT) + { + if (unformat (a, "chroot prefix %s", &chroot_prefix)) + { + vl_set_memory_root_path ((char *) chroot_prefix); + } + else if (unformat (a, "uri %s", &bind_name)) + ; + else if (unformat (a, "segment-size %dM", &tmp)) + utm->configured_segment_size = tmp<<20; + else if (unformat (a, "segment-size %dG", &tmp)) + utm->configured_segment_size = tmp<<30; + else if (unformat (a, "master")) + i_am_master = 1; + else if (unformat (a, "slave")) + i_am_master = 0; + else if (unformat (a, "drop")) + drop_packets = 1; + else + { + fformat (stderr, "%s: usage [master|slave]\n"); + exit (1); + } + } + + utm->uri = format (0, "%s%c", bind_name, 0); + utm->i_am_master = i_am_master; + utm->segment_main = &svm_fifo_segment_main; + utm->drop_packets = drop_packets; + + utm->connect_uri = format (0, "tcp://6.0.1.2/1234%c", 0); + + setup_signal_handlers(); + uri_api_hookup (utm); + + if (connect_to_vpp (i_am_master? "uri_tcp_server":"uri_tcp_client") < 0) + { + svm_region_exit (); + fformat (stderr, "Couldn't connect to vpe, exiting...\n"); + exit (1); + } + + if (i_am_master == 0) + { + uri_tcp_client_test (utm); + exit (0); + } + + /* $$$$ hack preallocation */ + for (i = 0; i < 200000; i++) + { + pool_get (utm->sessions, session); + memset (session, 0, sizeof (*session)); + } + for (i = 0; i < 200000; i++) + pool_put_index (utm->sessions, i); + + uri_tcp_server_test (utm); + + vl_client_disconnect_from_vlib (); + exit (0); +} diff --git a/src/uri/uri_udp_test.c b/src/uri/uri_udp_test.c new file mode 100644 index 00000000..6f5284c9 --- /dev/null +++ b/src/uri/uri_udp_test.c @@ -0,0 +1,553 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +/* declare message handlers for each api */ + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) +#define vl_printfun +#include +#undef vl_printfun + +/* Satisfy external references when not linking with -lvlib */ +vlib_main_t vlib_global_main; +vlib_main_t **vlib_mains; + +typedef enum +{ + STATE_START, + STATE_READY, + STATE_DISCONNECTING, +} connection_state_t; + +typedef struct +{ + svm_fifo_t *server_rx_fifo; + svm_fifo_t *server_tx_fifo; +} session_t; + +typedef struct +{ + /* vpe input queue */ + unix_shared_memory_queue_t *vl_input_queue; + + /* API client handle */ + u32 my_client_index; + + /* The URI we're playing with */ + u8 *uri; + + /* Session pool */ + session_t *sessions; + + /* Hash table for disconnect processing */ + uword *session_index_by_vpp_handles; + + /* fifo segment */ + svm_fifo_segment_private_t *seg; + + /* intermediate rx buffer */ + u8 *rx_buf; + + /* Our event queue */ + unix_shared_memory_queue_t *our_event_queue; + + /* $$$ single thread only for the moment */ + unix_shared_memory_queue_t *vpp_event_queue; + + /* For deadman timers */ + clib_time_t clib_time; + + /* State of the connection, shared between msg RX thread and main thread */ + volatile connection_state_t state; + + volatile int time_to_stop; + volatile int time_to_print_stats; + + /* VNET_API_ERROR_FOO -> "Foo" hash table */ + uword *error_string_by_error_number; +} uri_udp_test_main_t; + +#if CLIB_DEBUG > 0 +#define NITER 1000 +#else +#define NITER 1000000 +#endif + +uri_udp_test_main_t uri_udp_test_main; + +static void +stop_signal (int signum) +{ + uri_udp_test_main_t *um = &uri_udp_test_main; + + um->time_to_stop = 1; +} + +static void +stats_signal (int signum) +{ + uri_udp_test_main_t *um = &uri_udp_test_main; + + um->time_to_print_stats = 1; +} + +static clib_error_t * +setup_signal_handlers (void) +{ + signal (SIGINT, stats_signal); + signal (SIGQUIT, stop_signal); + signal (SIGTERM, stop_signal); + + return 0; +} + +u8 * +format_api_error (u8 * s, va_list * args) +{ + uri_udp_test_main_t *utm = va_arg (*args, uri_udp_test_main_t *); + i32 error = va_arg (*args, u32); + uword *p; + + p = hash_get (utm->error_string_by_error_number, -error); + + if (p) + s = format (s, "%s", p[0]); + else + s = format (s, "%d", error); + return s; +} + +int +wait_for_state_change (uri_udp_test_main_t * utm, connection_state_t state) +{ + f64 timeout = clib_time_now (&utm->clib_time) + 5.0; + + while (clib_time_now (&utm->clib_time) < timeout) + { + if (utm->state == state) + return 0; + } + return -1; +} + +static void +vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) +{ + uri_udp_test_main_t *utm = &uri_udp_test_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + int rv; + + if (mp->segment_name_length == 0) + { + clib_warning ("segment_name_length zero"); + return; + } + + a->segment_name = (char *) mp->segment_name; + + /* Attach to the segment vpp created */ + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("sm_fifo_segment_create ('%s') failed", mp->segment_name); + return; + } + + utm->our_event_queue = (unix_shared_memory_queue_t *) + mp->server_event_queue_address; + + utm->state = STATE_READY; +} + +static void +vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp) +{ + uri_udp_test_main_t *utm = &uri_udp_test_main; + + if (mp->retval != 0) + clib_warning ("returned %d", ntohl (mp->retval)); + + utm->state = STATE_START; +} + +static void +vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) +{ + uri_udp_test_main_t *utm = &uri_udp_test_main; + vl_api_accept_session_reply_t *rmp; + svm_fifo_t *rx_fifo, *tx_fifo; + session_t *session; + static f64 start_time; + u64 key; + + if (start_time == 0.0) + start_time = clib_time_now (&utm->clib_time); + + utm->vpp_event_queue = (unix_shared_memory_queue_t *) + mp->vpp_event_queue_address; + + pool_get (utm->sessions, session); + + rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; + rx_fifo->client_session_index = session - utm->sessions; + tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; + tx_fifo->client_session_index = session - utm->sessions; + + session->server_rx_fifo = rx_fifo; + session->server_tx_fifo = tx_fifo; + + key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; + + hash_set (utm->session_index_by_vpp_handles, key, session - utm->sessions); + + utm->state = STATE_READY; + + if (pool_elts (utm->sessions) && (pool_elts (utm->sessions) % 20000) == 0) + { + f64 now = clib_time_now (&utm->clib_time); + fformat (stdout, "%d active sessions in %.2f seconds, %.2f/sec...\n", + pool_elts (utm->sessions), now - start_time, + (f64) pool_elts (utm->sessions) / (now - start_time)); + } + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY); + rmp->session_type = mp->session_type; + rmp->session_index = mp->session_index; + rmp->session_thread_index = mp->session_thread_index; + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); +} + +static void +vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) +{ + uri_udp_test_main_t *utm = &uri_udp_test_main; + session_t *session; + vl_api_disconnect_session_reply_t *rmp; + uword *p; + int rv = 0; + u64 key; + + key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; + + p = hash_get (utm->session_index_by_vpp_handles, key); + + if (p) + { + session = pool_elt_at_index (utm->sessions, p[0]); + hash_unset (utm->session_index_by_vpp_handles, key); + pool_put (utm->sessions, session); + } + else + { + clib_warning ("couldn't find session key %llx", key); + rv = -11; + } + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION_REPLY); + rmp->retval = rv; + rmp->session_index = mp->session_index; + rmp->session_thread_index = mp->session_thread_index; + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); +} + +#define foreach_uri_msg \ +_(BIND_URI_REPLY, bind_uri_reply) \ +_(UNBIND_URI_REPLY, unbind_uri_reply) \ +_(ACCEPT_SESSION, accept_session) \ +_(DISCONNECT_SESSION, disconnect_session) + +void +uri_api_hookup (uri_udp_test_main_t * utm) +{ +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_uri_msg; +#undef _ + +} + + +int +connect_to_vpp (char *name) +{ + uri_udp_test_main_t *utm = &uri_udp_test_main; + api_main_t *am = &api_main; + + if (vl_client_connect_to_vlib ("/vpe-api", name, 32) < 0) + return -1; + + utm->vl_input_queue = am->shmem_hdr->vl_input_queue; + utm->my_client_index = am->my_client_index; + + return 0; +} + +void +vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...) +{ + clib_warning ("BUG"); +} + +static void +init_error_string_table (uri_udp_test_main_t * utm) +{ + utm->error_string_by_error_number = hash_create (0, sizeof (uword)); + +#define _(n,v,s) hash_set (utm->error_string_by_error_number, -v, s); + foreach_vnet_api_error; +#undef _ + + hash_set (utm->error_string_by_error_number, 99, "Misc"); +} + +void +handle_fifo_event_server_rx (uri_udp_test_main_t * utm, + session_fifo_event_t * e) +{ + svm_fifo_t *rx_fifo, *tx_fifo; + int nbytes; + + session_fifo_event_t evt; + unix_shared_memory_queue_t *q; + int rv; + + rx_fifo = e->fifo; + tx_fifo = utm->sessions[rx_fifo->client_session_index].server_tx_fifo; + + do + { + nbytes = svm_fifo_dequeue_nowait (rx_fifo, 0, + vec_len (utm->rx_buf), utm->rx_buf); + } + while (nbytes <= 0); + do + { + rv = svm_fifo_enqueue_nowait (tx_fifo, 0, nbytes, utm->rx_buf); + } + while (rv == -2); + + /* Fabricate TX event, send to vpp */ + evt.fifo = tx_fifo; + evt.event_type = FIFO_EVENT_SERVER_TX; + /* $$$$ for event logging */ + evt.enqueue_length = nbytes; + evt.event_id = e->event_id; + q = utm->vpp_event_queue; + unix_shared_memory_queue_add (q, (u8 *) & evt, 0 /* do wait for mutex */ ); +} + +void +handle_event_queue (uri_udp_test_main_t * utm) +{ + session_fifo_event_t _e, *e = &_e;; + + while (1) + { + unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e, + 0 /* nowait */ ); + switch (e->event_type) + { + case FIFO_EVENT_SERVER_RX: + handle_fifo_event_server_rx (utm, e); + break; + + case FIFO_EVENT_SERVER_EXIT: + return; + + default: + clib_warning ("unknown event type %d", e->event_type); + break; + } + if (PREDICT_FALSE (utm->time_to_stop == 1)) + break; + if (PREDICT_FALSE (utm->time_to_print_stats == 1)) + { + utm->time_to_print_stats = 0; + fformat (stdout, "%d connections\n", pool_elts (utm->sessions)); + } + } +} + +void +uri_udp_test (uri_udp_test_main_t * utm) +{ + vl_api_bind_uri_t *bmp; + vl_api_unbind_uri_t *ump; + + bmp = vl_msg_api_alloc (sizeof (*bmp)); + memset (bmp, 0, sizeof (*bmp)); + + bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); + bmp->client_index = utm->my_client_index; + bmp->context = ntohl (0xfeedface); + bmp->segment_size = 2 << 30; + memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); + + if (wait_for_state_change (utm, STATE_READY)) + { + clib_warning ("timeout waiting for STATE_READY"); + return; + } + + handle_event_queue (utm); + + ump = vl_msg_api_alloc (sizeof (*ump)); + memset (ump, 0, sizeof (*ump)); + + ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); + ump->client_index = utm->my_client_index; + memcpy (ump->uri, utm->uri, vec_len (utm->uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump); + + if (wait_for_state_change (utm, STATE_START)) + { + clib_warning ("timeout waiting for STATE_START"); + return; + } + + fformat (stdout, "Test complete...\n"); +} + +int +main (int argc, char **argv) +{ + uri_udp_test_main_t *utm = &uri_udp_test_main; + unformat_input_t _argv, *a = &_argv; + u8 *chroot_prefix; + u8 *heap; + u8 *bind_name = (u8 *) "udp4:1234"; + mheap_t *h; + session_t *session; + int i; + + clib_mem_init (0, 256 << 20); + + heap = clib_mem_get_per_cpu_heap (); + h = mheap_header (heap); + + /* make the main heap thread-safe */ + h->flags |= MHEAP_FLAG_THREAD_SAFE; + + vec_validate (utm->rx_buf, 8192); + + utm->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); + + clib_time_init (&utm->clib_time); + init_error_string_table (utm); + svm_fifo_segment_init (0x200000000ULL, 20); + unformat_init_command_line (a, argv); + + while (unformat_check_input (a) != UNFORMAT_END_OF_INPUT) + { + if (unformat (a, "chroot prefix %s", &chroot_prefix)) + { + vl_set_memory_root_path ((char *) chroot_prefix); + } + else if (unformat (a, "uri %s", &bind_name)) + ; + else + { + fformat (stderr, "%s: usage [master|slave]\n"); + exit (1); + } + } + + utm->uri = format (0, "%s%c", bind_name, 0); + + setup_signal_handlers (); + + uri_api_hookup (utm); + + if (connect_to_vpp ("uri_udp_test") < 0) + { + svm_region_exit (); + fformat (stderr, "Couldn't connect to vpe, exiting...\n"); + exit (1); + } + + /* $$$$ hack preallocation */ + for (i = 0; i < 200000; i++) + { + pool_get (utm->sessions, session); + memset (session, 0, sizeof (*session)); + } + for (i = 0; i < 200000; i++) + pool_put_index (utm->sessions, i); + + uri_udp_test (utm); + + vl_client_disconnect_from_vlib (); + exit (0); +} + +#undef vl_api_version +#define vl_api_version(n,v) static u32 vpe_api_version = v; +#include +#undef vl_api_version + +void +vl_client_add_api_signatures (vl_api_memclnt_create_t * mp) +{ + /* + * Send the main API signature in slot 0. This bit of code must + * match the checks in ../vpe/api/api.c: vl_msg_api_version_check(). + */ + mp->api_versions[0] = clib_host_to_net_u32 (vpe_api_version); +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/uri/uri_udp_test2.c b/src/uri/uri_udp_test2.c new file mode 100644 index 00000000..ddfffaa6 --- /dev/null +++ b/src/uri/uri_udp_test2.c @@ -0,0 +1,954 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../vnet/session/application_interface.h" + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +/* declare message handlers for each api */ + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) +#define vl_printfun +#include +#undef vl_printfun + +/* Satisfy external references when not linking with -lvlib */ +vlib_main_t vlib_global_main; +vlib_main_t **vlib_mains; + +typedef enum +{ + STATE_START, + STATE_READY, + STATE_DISCONNECTING, +} connection_state_t; + +typedef struct +{ + svm_fifo_t *server_rx_fifo; + svm_fifo_t *server_tx_fifo; +} session_t; + +typedef struct +{ + /* vpe input queue */ + unix_shared_memory_queue_t *vl_input_queue; + + /* API client handle */ + u32 my_client_index; + + /* The URI we're playing with */ + u8 *uri; + + /* Session pool */ + session_t *sessions; + + /* Hash table for disconnect processing */ + uword *session_index_by_vpp_handles; + + /* fifo segment */ + svm_fifo_segment_private_t *seg; + + /* intermediate rx buffer */ + u8 *rx_buf; + + /* URI for connect */ + u8 *connect_uri; + + int i_am_master; + + /* Our event queue */ + unix_shared_memory_queue_t *our_event_queue; + + /* $$$ single thread only for the moment */ + unix_shared_memory_queue_t *vpp_event_queue; + + /* $$$$ hack: cut-through session index */ + volatile u32 cut_through_session_index; + + /* unique segment name counter */ + u32 unique_segment_index; + + pid_t my_pid; + + /* pthread handle */ + pthread_t cut_through_thread_handle; + + /* For deadman timers */ + clib_time_t clib_time; + + /* State of the connection, shared between msg RX thread and main thread */ + volatile connection_state_t state; + + volatile int time_to_stop; + volatile int time_to_print_stats; + + u32 configured_segment_size; + + /* VNET_API_ERROR_FOO -> "Foo" hash table */ + uword *error_string_by_error_number; + + /* convenience */ + svm_fifo_segment_main_t *segment_main; + +} uri_udp_test_main_t; + +#if CLIB_DEBUG > 0 +#define NITER 10000 +#else +#define NITER 4000000 +#endif + +uri_udp_test_main_t uri_udp_test_main; + +static void +stop_signal (int signum) +{ + uri_udp_test_main_t *um = &uri_udp_test_main; + + um->time_to_stop = 1; +} + +static void +stats_signal (int signum) +{ + uri_udp_test_main_t *um = &uri_udp_test_main; + + um->time_to_print_stats = 1; +} + +static clib_error_t * +setup_signal_handlers (void) +{ + signal (SIGINT, stats_signal); + signal (SIGQUIT, stop_signal); + signal (SIGTERM, stop_signal); + + return 0; +} + +u8 * +format_api_error (u8 * s, va_list * args) +{ + uri_udp_test_main_t *utm = va_arg (*args, uri_udp_test_main_t *); + i32 error = va_arg (*args, u32); + uword *p; + + p = hash_get (utm->error_string_by_error_number, -error); + + if (p) + s = format (s, "%s", p[0]); + else + s = format (s, "%d", error); + return s; +} + +int +wait_for_state_change (uri_udp_test_main_t * utm, connection_state_t state) +{ +#if CLIB_DEBUG > 0 +#define TIMEOUT 600.0 +#else +#define TIMEOUT 600.0 +#endif + + f64 timeout = clib_time_now (&utm->clib_time) + TIMEOUT; + + while (clib_time_now (&utm->clib_time) < timeout) + { + if (utm->state == state) + return 0; + } + return -1; +} + +u64 server_bytes_received, server_bytes_sent; + +static void * +cut_through_thread_fn (void *arg) +{ + session_t *s; + svm_fifo_t *rx_fifo; + svm_fifo_t *tx_fifo; + u8 *my_copy_buffer = 0; + uri_udp_test_main_t *utm = &uri_udp_test_main; + i32 actual_transfer; + int rv; + u32 buffer_offset; + + while (utm->cut_through_session_index == ~0) + ; + + s = pool_elt_at_index (utm->sessions, utm->cut_through_session_index); + + rx_fifo = s->server_rx_fifo; + tx_fifo = s->server_tx_fifo; + + vec_validate (my_copy_buffer, 64 * 1024 - 1); + + while (true) + { + /* We read from the tx fifo and write to the rx fifo */ + do + { + actual_transfer = svm_fifo_dequeue_nowait (tx_fifo, 0, + vec_len (my_copy_buffer), + my_copy_buffer); + } + while (actual_transfer <= 0); + + server_bytes_received += actual_transfer; + + buffer_offset = 0; + while (actual_transfer > 0) + { + rv = svm_fifo_enqueue_nowait (rx_fifo, 0, actual_transfer, + my_copy_buffer + buffer_offset); + if (rv > 0) + { + actual_transfer -= rv; + buffer_offset += rv; + server_bytes_sent += rv; + } + + } + if (PREDICT_FALSE (utm->time_to_stop)) + break; + } + + pthread_exit (0); +} + +static void +uri_udp_slave_test (uri_udp_test_main_t * utm) +{ + vl_api_connect_uri_t *cmp; + int i; + u8 *test_data = 0; + u64 bytes_received = 0, bytes_sent = 0; + i32 bytes_to_read; + int rv; + int mypid = getpid (); + f64 before, after, delta, bytes_per_second; + session_t *session; + svm_fifo_t *rx_fifo, *tx_fifo; + int buffer_offset, bytes_to_send = 0; + + vec_validate (test_data, 64 * 1024 - 1); + for (i = 0; i < vec_len (test_data); i++) + test_data[i] = i & 0xff; + + cmp = vl_msg_api_alloc (sizeof (*cmp)); + memset (cmp, 0, sizeof (*cmp)); + + cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); + cmp->client_index = utm->my_client_index; + cmp->context = ntohl (0xfeedface); + memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp); + + if (wait_for_state_change (utm, STATE_READY)) + { + clib_warning ("timeout waiting for STATE_READY"); + return; + } + + session = pool_elt_at_index (utm->sessions, utm->cut_through_session_index); + rx_fifo = session->server_rx_fifo; + tx_fifo = session->server_tx_fifo; + + before = clib_time_now (&utm->clib_time); + + vec_validate (utm->rx_buf, vec_len (test_data) - 1); + + for (i = 0; i < NITER; i++) + { + bytes_to_send = vec_len (test_data); + buffer_offset = 0; + while (bytes_to_send > 0) + { + rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, + bytes_to_send, + test_data + buffer_offset); + + if (rv > 0) + { + bytes_to_send -= rv; + buffer_offset += rv; + bytes_sent += rv; + } + } + + bytes_to_read = svm_fifo_max_dequeue (rx_fifo); + + bytes_to_read = vec_len (utm->rx_buf) > bytes_to_read ? + bytes_to_read : vec_len (utm->rx_buf); + + buffer_offset = 0; + while (bytes_to_read > 0) + { + rv = svm_fifo_dequeue_nowait (rx_fifo, mypid, + bytes_to_read, + utm->rx_buf + buffer_offset); + if (rv > 0) + { + bytes_to_read -= rv; + buffer_offset += rv; + bytes_received += rv; + } + } + } + while (bytes_received < bytes_sent) + { + rv = svm_fifo_dequeue_nowait (rx_fifo, mypid, + vec_len (utm->rx_buf), utm->rx_buf); + if (rv > 0) + { +#if CLIB_DEBUG > 0 + int j; + for (j = 0; j < rv; j++) + { + if (utm->rx_buf[j] != ((bytes_received + j) & 0xff)) + { + clib_warning ("error at byte %lld, 0x%x not 0x%x", + bytes_received + j, + utm->rx_buf[j], + ((bytes_received + j) & 0xff)); + } + } +#endif + bytes_received += (u64) rv; + } + } + + after = clib_time_now (&utm->clib_time); + delta = after - before; + bytes_per_second = 0.0; + + if (delta > 0.0) + bytes_per_second = (f64) bytes_received / delta; + + fformat (stdout, + "Done: %lld recv bytes in %.2f seconds, %.2f bytes/sec...\n\n", + bytes_received, delta, bytes_per_second); + fformat (stdout, + "Done: %lld sent bytes in %.2f seconds, %.2f bytes/sec...\n\n", + bytes_sent, delta, bytes_per_second); + fformat (stdout, + "client -> server -> client round trip: %.2f Gbit/sec \n\n", + (bytes_per_second * 8.0) / 1e9); +} + +static void +vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) +{ + uri_udp_test_main_t *utm = &uri_udp_test_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + int rv; + + if (mp->segment_name_length == 0) + { + clib_warning ("segment_name_length zero"); + return; + } + + a->segment_name = (char *) mp->segment_name; + a->segment_size = mp->segment_size; + + ASSERT (mp->server_event_queue_address); + + /* Attach to the segment vpp created */ + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("svm_fifo_segment_attach ('%s') failed", + mp->segment_name); + return; + } + + utm->our_event_queue = (unix_shared_memory_queue_t *) + mp->server_event_queue_address; + + utm->state = STATE_READY; +} + +static void +vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t * mp) +{ + svm_fifo_segment_create_args_t _a, *a = &_a; + int rv; + + a->segment_name = (char *) mp->segment_name; + a->segment_size = mp->segment_size; + /* Attach to the segment vpp created */ + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("svm_fifo_segment_attach ('%s') failed", + mp->segment_name); + return; + } + clib_warning ("Mapped new segment '%s' size %d", mp->segment_name, + mp->segment_size); +} + +static void +vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) +{ + u32 segment_index; + uri_udp_test_main_t *utm = &uri_udp_test_main; + svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + svm_fifo_segment_private_t *seg; + unix_shared_memory_queue_t *client_q; + vl_api_connect_uri_reply_t *rmp; + session_t *session; + int rv = 0; + + /* Create the segment */ + a->segment_name = (char *) format (0, "%d:segment%d%c", utm->my_pid, + utm->unique_segment_index++, 0); + a->segment_size = utm->configured_segment_size; + + rv = svm_fifo_segment_create (a); + if (rv) + { + clib_warning ("sm_fifo_segment_create ('%s') failed", a->segment_name); + rv = VNET_API_ERROR_URI_FIFO_CREATE_FAILED; + goto send_reply; + } + + vec_add2 (utm->seg, seg, 1); + + segment_index = vec_len (sm->segments) - 1; + + memcpy (seg, sm->segments + segment_index, sizeof (utm->seg[0])); + + pool_get (utm->sessions, session); + + /* + * By construction the master's idea of the rx fifo ends up in + * fsh->fifos[0], and the master's idea of the tx fifo ends up in + * fsh->fifos[1]. + */ + session->server_rx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, + 128 * 1024); + ASSERT (session->server_rx_fifo); + + session->server_tx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, + 128 * 1024); + ASSERT (session->server_tx_fifo); + + session->server_rx_fifo->server_session_index = session - utm->sessions; + session->server_tx_fifo->server_session_index = session - utm->sessions; + utm->cut_through_session_index = session - utm->sessions; + + rv = pthread_create (&utm->cut_through_thread_handle, + NULL /*attr */ , cut_through_thread_fn, 0); + if (rv) + { + clib_warning ("pthread_create returned %d", rv); + rv = VNET_API_ERROR_SYSCALL_ERROR_1; + } + +send_reply: + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + + rmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI_REPLY); + rmp->context = mp->context; + rmp->retval = ntohl (rv); + rmp->segment_name_length = vec_len (a->segment_name); + memcpy (rmp->segment_name, a->segment_name, vec_len (a->segment_name)); + + vec_free (a->segment_name); + + client_q = (unix_shared_memory_queue_t *) mp->client_queue_address; + vl_msg_api_send_shmem (client_q, (u8 *) & rmp); +} + +static void +vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp) +{ + uri_udp_test_main_t *utm = &uri_udp_test_main; + + if (mp->retval != 0) + clib_warning ("returned %d", ntohl (mp->retval)); + + utm->state = STATE_START; +} + +static void +vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) +{ + uri_udp_test_main_t *utm = &uri_udp_test_main; + vl_api_accept_session_reply_t *rmp; + svm_fifo_t *rx_fifo, *tx_fifo; + session_t *session; + static f64 start_time; + u64 key; + + if (start_time == 0.0) + start_time = clib_time_now (&utm->clib_time); + + utm->vpp_event_queue = (unix_shared_memory_queue_t *) + mp->vpp_event_queue_address; + + pool_get (utm->sessions, session); + + rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; + rx_fifo->client_session_index = session - utm->sessions; + tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; + tx_fifo->client_session_index = session - utm->sessions; + + session->server_rx_fifo = rx_fifo; + session->server_tx_fifo = tx_fifo; + + key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; + + hash_set (utm->session_index_by_vpp_handles, key, session - utm->sessions); + + utm->state = STATE_READY; + + if (pool_elts (utm->sessions) && (pool_elts (utm->sessions) % 20000) == 0) + { + f64 now = clib_time_now (&utm->clib_time); + fformat (stdout, "%d active sessions in %.2f seconds, %.2f/sec...\n", + pool_elts (utm->sessions), now - start_time, + (f64) pool_elts (utm->sessions) / (now - start_time)); + } + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY); + rmp->session_type = mp->session_type; + rmp->session_index = mp->session_index; + rmp->session_thread_index = mp->session_thread_index; + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); +} + +static void +vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) +{ + uri_udp_test_main_t *utm = &uri_udp_test_main; + session_t *session; + vl_api_disconnect_session_reply_t *rmp; + uword *p; + int rv = 0; + u64 key; + + key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; + + p = hash_get (utm->session_index_by_vpp_handles, key); + + if (p) + { + session = pool_elt_at_index (utm->sessions, p[0]); + hash_unset (utm->session_index_by_vpp_handles, key); + pool_put (utm->sessions, session); + } + else + { + clib_warning ("couldn't find session key %llx", key); + rv = -11; + } + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION_REPLY); + rmp->retval = rv; + rmp->session_index = mp->session_index; + rmp->session_thread_index = mp->session_thread_index; + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); +} + +static void +vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) +{ + svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; + uri_udp_test_main_t *utm = &uri_udp_test_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + ssvm_shared_header_t *sh; + svm_fifo_segment_private_t *seg; + svm_fifo_segment_header_t *fsh; + session_t *session; + u32 segment_index; + int rv; + + ASSERT (utm->i_am_master == 0); + + if (mp->segment_name_length == 0) + { + clib_warning ("segment_name_length zero"); + return; + } + + memset (a, 0, sizeof (*a)); + + a->segment_name = (char *) mp->segment_name; + + sleep (1); + + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("sm_fifo_segment_create ('%v') failed", mp->segment_name); + return; + } + + segment_index = vec_len (sm->segments) - 1; + + vec_add2 (utm->seg, seg, 1); + + memcpy (seg, sm->segments + segment_index, sizeof (*seg)); + sh = seg->ssvm.sh; + fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; + + while (vec_len (fsh->fifos) < 2) + sleep (1); + + pool_get (utm->sessions, session); + utm->cut_through_session_index = session - utm->sessions; + + session->server_rx_fifo = (svm_fifo_t *) fsh->fifos[0]; + ASSERT (session->server_rx_fifo); + session->server_tx_fifo = (svm_fifo_t *) fsh->fifos[1]; + ASSERT (session->server_tx_fifo); + + /* security: could unlink /dev/shm/segment_name> here, maybe */ + + utm->state = STATE_READY; +} + +#define foreach_uri_msg \ +_(BIND_URI_REPLY, bind_uri_reply) \ +_(CONNECT_URI, connect_uri) \ +_(CONNECT_URI_REPLY, connect_uri_reply) \ +_(UNBIND_URI_REPLY, unbind_uri_reply) \ +_(ACCEPT_SESSION, accept_session) \ +_(DISCONNECT_SESSION, disconnect_session) \ +_(MAP_ANOTHER_SEGMENT, map_another_segment) + +void +uri_api_hookup (uri_udp_test_main_t * utm) +{ +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_uri_msg; +#undef _ + +} + + +int +connect_to_vpp (char *name) +{ + uri_udp_test_main_t *utm = &uri_udp_test_main; + api_main_t *am = &api_main; + + if (vl_client_connect_to_vlib ("/vpe-api", name, 32) < 0) + return -1; + + utm->vl_input_queue = am->shmem_hdr->vl_input_queue; + utm->my_client_index = am->my_client_index; + + return 0; +} + +void +vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...) +{ + clib_warning ("BUG"); +} + +static void +init_error_string_table (uri_udp_test_main_t * utm) +{ + utm->error_string_by_error_number = hash_create (0, sizeof (uword)); + +#define _(n,v,s) hash_set (utm->error_string_by_error_number, -v, s); + foreach_vnet_api_error; +#undef _ + + hash_set (utm->error_string_by_error_number, 99, "Misc"); +} + +void +handle_fifo_event_server_rx (uri_udp_test_main_t * utm, + session_fifo_event_t * e) +{ + svm_fifo_t *rx_fifo, *tx_fifo; + int nbytes; + + session_fifo_event_t evt; + unix_shared_memory_queue_t *q; + int rv; + + rx_fifo = e->fifo; + tx_fifo = utm->sessions[rx_fifo->client_session_index].server_tx_fifo; + + do + { + nbytes = svm_fifo_dequeue_nowait (rx_fifo, 0, + vec_len (utm->rx_buf), utm->rx_buf); + } + while (nbytes <= 0); + do + { + rv = svm_fifo_enqueue_nowait (tx_fifo, 0, nbytes, utm->rx_buf); + } + while (rv == -2); + + /* Fabricate TX event, send to vpp */ + evt.fifo = tx_fifo; + evt.event_type = FIFO_EVENT_SERVER_TX; + /* $$$$ for event logging */ + evt.enqueue_length = nbytes; + evt.event_id = e->event_id; + q = utm->vpp_event_queue; + unix_shared_memory_queue_add (q, (u8 *) & evt, 0 /* do wait for mutex */ ); +} + +void +handle_event_queue (uri_udp_test_main_t * utm) +{ + session_fifo_event_t _e, *e = &_e;; + + while (1) + { + unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e, + 0 /* nowait */ ); + switch (e->event_type) + { + case FIFO_EVENT_SERVER_RX: + handle_fifo_event_server_rx (utm, e); + break; + + case FIFO_EVENT_SERVER_EXIT: + return; + + default: + clib_warning ("unknown event type %d", e->event_type); + break; + } + if (PREDICT_FALSE (utm->time_to_stop == 1)) + break; + if (PREDICT_FALSE (utm->time_to_print_stats == 1)) + { + utm->time_to_print_stats = 0; + fformat (stdout, "%d connections\n", pool_elts (utm->sessions)); + } + } +} + +void +uri_udp_test (uri_udp_test_main_t * utm) +{ + vl_api_bind_uri_t *bmp; + vl_api_unbind_uri_t *ump; + + bmp = vl_msg_api_alloc (sizeof (*bmp)); + memset (bmp, 0, sizeof (*bmp)); + + bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); + bmp->client_index = utm->my_client_index; + bmp->context = ntohl (0xfeedface); + bmp->initial_segment_size = 256 << 20; /* size of initial segment */ + bmp->options[SESSION_OPTIONS_FLAGS] = + SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 16 << 10; + bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 16 << 10; + bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; + memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); + + if (wait_for_state_change (utm, STATE_READY)) + { + clib_warning ("timeout waiting for STATE_READY"); + return; + } + + handle_event_queue (utm); + + ump = vl_msg_api_alloc (sizeof (*ump)); + memset (ump, 0, sizeof (*ump)); + + ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); + ump->client_index = utm->my_client_index; + memcpy (ump->uri, utm->uri, vec_len (utm->uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump); + + if (wait_for_state_change (utm, STATE_START)) + { + clib_warning ("timeout waiting for STATE_START"); + return; + } + + fformat (stdout, "Test complete...\n"); +} + +int +main (int argc, char **argv) +{ + uri_udp_test_main_t *utm = &uri_udp_test_main; + unformat_input_t _argv, *a = &_argv; + u8 *chroot_prefix; + u8 *heap; + u8 *bind_name = (u8 *) "udp://0.0.0.0/1234"; + u32 tmp; + mheap_t *h; + session_t *session; + int i; + int i_am_master = 1; + + clib_mem_init (0, 256 << 20); + + heap = clib_mem_get_per_cpu_heap (); + h = mheap_header (heap); + + /* make the main heap thread-safe */ + h->flags |= MHEAP_FLAG_THREAD_SAFE; + + vec_validate (utm->rx_buf, 8192); + + utm->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); + + utm->my_pid = getpid (); + utm->configured_segment_size = 1 << 20; + + clib_time_init (&utm->clib_time); + init_error_string_table (utm); + svm_fifo_segment_init (0x200000000ULL, 20); + unformat_init_command_line (a, argv); + + while (unformat_check_input (a) != UNFORMAT_END_OF_INPUT) + { + if (unformat (a, "chroot prefix %s", &chroot_prefix)) + { + vl_set_memory_root_path ((char *) chroot_prefix); + } + else if (unformat (a, "uri %s", &bind_name)) + ; + else if (unformat (a, "segment-size %dM", &tmp)) + utm->configured_segment_size = tmp << 20; + else if (unformat (a, "segment-size %dG", &tmp)) + utm->configured_segment_size = tmp << 30; + else if (unformat (a, "master")) + i_am_master = 1; + else if (unformat (a, "slave")) + i_am_master = 0; + else + { + fformat (stderr, "%s: usage [master|slave]\n"); + exit (1); + } + } + + utm->cut_through_session_index = ~0; + utm->uri = format (0, "%s%c", bind_name, 0); + utm->i_am_master = i_am_master; + utm->segment_main = &svm_fifo_segment_main; + + utm->connect_uri = format (0, "udp://10.0.0.1/1234%c", 0); + + setup_signal_handlers (); + + uri_api_hookup (utm); + + if (connect_to_vpp (i_am_master ? "uri_udp_master" : "uri_udp_slave") < 0) + { + svm_region_exit (); + fformat (stderr, "Couldn't connect to vpe, exiting...\n"); + exit (1); + } + + if (i_am_master == 0) + { + uri_udp_slave_test (utm); + exit (0); + } + + /* $$$$ hack preallocation */ + for (i = 0; i < 200000; i++) + { + pool_get (utm->sessions, session); + memset (session, 0, sizeof (*session)); + } + for (i = 0; i < 200000; i++) + pool_put_index (utm->sessions, i); + + uri_udp_test (utm); + + vl_client_disconnect_from_vlib (); + exit (0); +} + +#undef vl_api_version +#define vl_api_version(n,v) static u32 vpe_api_version = v; +#include +#undef vl_api_version + +void +vl_client_add_api_signatures (vl_api_memclnt_create_t * mp) +{ + /* + * Send the main API signature in slot 0. This bit of code must + * match the checks in ../vpe/api/api.c: vl_msg_api_version_check(). + */ + mp->api_versions[0] = clib_host_to_net_u32 (vpe_api_version); +} + +u32 +vl (void *p) +{ + return vec_len (p); +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/uri/uritest.c b/src/uri/uritest.c new file mode 100644 index 00000000..edcdb3ad --- /dev/null +++ b/src/uri/uritest.c @@ -0,0 +1,484 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +/* declare message handlers for each api */ + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) +#define vl_printfun +#include +#undef vl_printfun + +typedef enum +{ + STATE_START, + STATE_READY, + STATE_DISCONNECTING, +} connection_state_t; + +typedef struct +{ + /* vpe input queue */ + unix_shared_memory_queue_t *vl_input_queue; + + /* API client handle */ + u32 my_client_index; + + /* role */ + int i_am_master; + + /* The URI we're playing with */ + u8 *uri; + + /* fifo segment */ + svm_fifo_segment_private_t *seg; + + svm_fifo_t *rx_fifo; + svm_fifo_t *tx_fifo; + + /* For deadman timers */ + clib_time_t clib_time; + + /* State of the connection, shared between msg RX thread and main thread */ + volatile connection_state_t state; + + /* VNET_API_ERROR_FOO -> "Foo" hash table */ + uword *error_string_by_error_number; +} uritest_main_t; + +#if CLIB_DEBUG > 0 +#define NITER 1000 +#else +#define NITER 1000000 +#endif + +uritest_main_t uritest_main; + +u8 * +format_api_error (u8 * s, va_list * args) +{ + uritest_main_t *utm = va_arg (*args, uritest_main_t *); + i32 error = va_arg (*args, u32); + uword *p; + + p = hash_get (utm->error_string_by_error_number, -error); + + if (p) + s = format (s, "%s", p[0]); + else + s = format (s, "%d", error); + return s; +} + +int +wait_for_state_change (uritest_main_t * utm, connection_state_t state) +{ + f64 timeout = clib_time_now (&utm->clib_time) + 1.0; + + while (clib_time_now (&utm->clib_time) < timeout) + { + if (utm->state == state) + return 0; + } + return -1; +} + +static void +vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) +{ + uritest_main_t *utm = &uritest_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + int rv; + + ASSERT (utm->i_am_master); + + if (mp->segment_name_length == 0) + { + clib_warning ("segment_name_length zero"); + return; + } + + a->segment_name = (char *) mp->segment_name; + a->segment_size = mp->segment_size; + + /* Create the segment */ + rv = svm_fifo_segment_create (a); + if (rv) + { + clib_warning ("sm_fifo_segment_create ('%s') failed", mp->segment_name); + return; + } + + vec_validate (utm->seg, 0); + + memcpy (utm->seg, a->rv, sizeof (*utm->seg)); + + /* + * By construction the master's idea of the rx fifo ends up in + * fsh->fifos[0], and the master's idea of the tx fifo ends up in + * fsh->fifos[1]. + */ + utm->rx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, 10240); + ASSERT (utm->rx_fifo); + + utm->tx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, 10240); + ASSERT (utm->tx_fifo); + + utm->state = STATE_READY; +} + +static void +vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) +{ + uritest_main_t *utm = &uritest_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + ssvm_shared_header_t *sh; + svm_fifo_segment_header_t *fsh; + int rv; + + ASSERT (utm->i_am_master == 0); + + if (mp->segment_name_length == 0) + { + clib_warning ("segment_name_length zero"); + return; + } + + memset (a, 0, sizeof (*a)); + + a->segment_name = (char *) mp->segment_name; + + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("sm_fifo_segment_create ('%s') failed", mp->segment_name); + return; + } + + vec_validate (utm->seg, 0); + + memcpy (utm->seg, a->rv, sizeof (*utm->seg)); + sh = utm->seg->ssvm.sh; + fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; + + while (vec_len (fsh->fifos) < 2) + sleep (1); + + utm->rx_fifo = (svm_fifo_t *) fsh->fifos[1]; + ASSERT (utm->rx_fifo); + utm->tx_fifo = (svm_fifo_t *) fsh->fifos[0]; + ASSERT (utm->tx_fifo); + + /* security: could unlink /dev/shm/segment_name> here, maybe */ + + utm->state = STATE_READY; +} + +static void +vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp) +{ + uritest_main_t *utm = &uritest_main; + + if (mp->retval != 0) + clib_warning ("returned %d", ntohl (mp->retval)); + + utm->state = STATE_START; +} + +#define foreach_uri_msg \ +_(BIND_URI_REPLY, bind_uri_reply) \ +_(CONNECT_URI_REPLY, connect_uri_reply) \ +_(UNBIND_URI_REPLY, unbind_uri_reply) + +void +uri_api_hookup (uritest_main_t * utm) +{ +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_uri_msg; +#undef _ + +} + + +int +connect_to_vpp (char *name) +{ + uritest_main_t *utm = &uritest_main; + api_main_t *am = &api_main; + + if (vl_client_connect_to_vlib ("/vpe-api", name, 32) < 0) + return -1; + + utm->vl_input_queue = am->shmem_hdr->vl_input_queue; + utm->my_client_index = am->my_client_index; + + return 0; +} + +void +vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...) +{ + clib_warning ("BUG"); +} + +static void +init_error_string_table (uritest_main_t * utm) +{ + utm->error_string_by_error_number = hash_create (0, sizeof (uword)); + +#define _(n,v,s) hash_set (utm->error_string_by_error_number, -v, s); + foreach_vnet_api_error; +#undef _ + + hash_set (utm->error_string_by_error_number, 99, "Misc"); +} + +void +uritest_master (uritest_main_t * utm) +{ + vl_api_bind_uri_t *bmp; + vl_api_unbind_uri_t *ump; + int i; + u8 *test_data = 0; + u8 *reply = 0; + u32 reply_len; + int mypid = getpid (); + + for (i = 0; i < 2048; i++) + vec_add1 (test_data, 'a' + (i % 32)); + + bmp = vl_msg_api_alloc (sizeof (*bmp)); + memset (bmp, 0, sizeof (*bmp)); + + bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); + bmp->client_index = utm->my_client_index; + bmp->context = ntohl (0xfeedface); + bmp->segment_size = 256 << 10; + memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); + + if (wait_for_state_change (utm, STATE_READY)) + { + clib_warning ("timeout waiting for STATE_READY"); + return; + } + + for (i = 0; i < NITER; i++) + svm_fifo_enqueue (utm->tx_fifo, mypid, vec_len (test_data), test_data); + + vec_validate (reply, 0); + + reply_len = svm_fifo_dequeue (utm->rx_fifo, mypid, vec_len (reply), reply); + + if (reply_len != 1) + clib_warning ("reply length %d", reply_len); + + if (reply[0] == 1) + fformat (stdout, "Test OK..."); + + ump = vl_msg_api_alloc (sizeof (*ump)); + memset (ump, 0, sizeof (*ump)); + + ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); + ump->client_index = utm->my_client_index; + memcpy (ump->uri, utm->uri, vec_len (utm->uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump); + + if (wait_for_state_change (utm, STATE_START)) + { + clib_warning ("timeout waiting for STATE_READY"); + return; + } + + fformat (stdout, "Master done...\n"); +} + +void +uritest_slave (uritest_main_t * utm) +{ + vl_api_connect_uri_t *cmp; + int i, j; + u8 *test_data = 0; + u8 *reply = 0; + u32 bytes_received = 0; + u32 actual_bytes; + int mypid = getpid (); + u8 ok; + f64 before, after, delta, bytes_per_second; + + vec_validate (test_data, 4095); + + cmp = vl_msg_api_alloc (sizeof (*cmp)); + memset (cmp, 0, sizeof (*cmp)); + + cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); + cmp->client_index = utm->my_client_index; + cmp->context = ntohl (0xfeedface); + memcpy (cmp->uri, utm->uri, vec_len (utm->uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp); + + if (wait_for_state_change (utm, STATE_READY)) + { + clib_warning ("timeout waiting for STATE_READY"); + return; + } + + ok = 1; + before = clib_time_now (&utm->clib_time); + for (i = 0; i < NITER; i++) + { + actual_bytes = svm_fifo_dequeue (utm->rx_fifo, mypid, + vec_len (test_data), test_data); + j = 0; + while (j < actual_bytes) + { + if (test_data[j] != ('a' + (bytes_received % 32))) + ok = 0; + bytes_received++; + j++; + } + if (bytes_received == NITER * 2048) + break; + } + + vec_add1 (reply, ok); + + svm_fifo_enqueue (utm->tx_fifo, mypid, vec_len (reply), reply); + after = clib_time_now (&utm->clib_time); + delta = after - before; + bytes_per_second = 0.0; + + if (delta > 0.0) + bytes_per_second = (f64) bytes_received / delta; + + fformat (stdout, + "Slave done, %d bytes in %.2f seconds, %.2f bytes/sec...\n", + bytes_received, delta, bytes_per_second); +} + +int +main (int argc, char **argv) +{ + uritest_main_t *utm = &uritest_main; + unformat_input_t _argv, *a = &_argv; + u8 *chroot_prefix; + u8 *heap; + char *bind_name = "fifo:uritest"; + mheap_t *h; + int i_am_master = 0; + + clib_mem_init (0, 128 << 20); + + heap = clib_mem_get_per_cpu_heap (); + h = mheap_header (heap); + + /* make the main heap thread-safe */ + h->flags |= MHEAP_FLAG_THREAD_SAFE; + + clib_time_init (&utm->clib_time); + init_error_string_table (utm); + svm_fifo_segment_init (0x200000000ULL, 20); + unformat_init_command_line (a, argv); + + utm->uri = format (0, "%s%c", bind_name, 0); + + while (unformat_check_input (a) != UNFORMAT_END_OF_INPUT) + { + if (unformat (a, "master")) + i_am_master = 1; + else if (unformat (a, "slave")) + i_am_master = 0; + else if (unformat (a, "chroot prefix %s", &chroot_prefix)) + { + vl_set_memory_root_path ((char *) chroot_prefix); + } + else + { + fformat (stderr, "%s: usage [master|slave]\n"); + exit (1); + } + } + + uri_api_hookup (utm); + + if (connect_to_vpp (i_am_master ? "uritest_master" : "uritest_slave") < 0) + { + svm_region_exit (); + fformat (stderr, "Couldn't connect to vpe, exiting...\n"); + exit (1); + } + + utm->i_am_master = i_am_master; + + if (i_am_master) + uritest_master (utm); + else + uritest_slave (utm); + + vl_client_disconnect_from_vlib (); + exit (0); +} + +#undef vl_api_version +#define vl_api_version(n,v) static u32 vpe_api_version = v; +#include +#undef vl_api_version + +void +vl_client_add_api_signatures (vl_api_memclnt_create_t * mp) +{ + /* + * Send the main API signature in slot 0. This bit of code must + * match the checks in ../vpe/api/api.c: vl_msg_api_version_check(). + */ + mp->api_versions[0] = clib_host_to_net_u32 (vpe_api_version); +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vlib/buffer.c b/src/vlib/buffer.c index 4f5eb09d..9f26bec7 100644 --- a/src/vlib/buffer.c +++ b/src/vlib/buffer.c @@ -360,7 +360,7 @@ vlib_buffer_create_free_list_helper (vlib_main_t * vm, memset (f, 0, sizeof (f[0])); f->index = f - bm->buffer_free_list_pool; f->n_data_bytes = vlib_buffer_round_size (n_data_bytes); - f->min_n_buffers_each_physmem_alloc = 16; + f->min_n_buffers_each_physmem_alloc = VLIB_FRAME_SIZE; f->name = clib_mem_is_heap_object (name) ? name : format (0, "%s", name); /* Setup free buffer template. */ diff --git a/src/vlib/buffer.h b/src/vlib/buffer.h index 1f723f3b..69c8c7cc 100644 --- a/src/vlib/buffer.h +++ b/src/vlib/buffer.h @@ -240,6 +240,74 @@ vlib_get_buffer_opaque2 (vlib_buffer_t * b) return (void *) b->opaque2; } +/** \brief Get pointer to the end of buffer's data + * @param b pointer to the buffer + * @return pointer to tail of packet's data + */ +always_inline u8 * +vlib_buffer_get_tail (vlib_buffer_t * b) +{ + return b->data + b->current_data + b->current_length; +} + +/** \brief Append uninitialized data to buffer + * @param b pointer to the buffer + * @param size number of uninitialized bytes + * @return pointer to beginning of uninitialized data + */ +always_inline void * +vlib_buffer_put_uninit (vlib_buffer_t * b, u8 size) +{ + void *p = vlib_buffer_get_tail (b); + /* XXX make sure there's enough space */ + b->current_length += size; + return p; +} + +/** \brief Prepend uninitialized data to buffer + * @param b pointer to the buffer + * @param size number of uninitialized bytes + * @return pointer to beginning of uninitialized data + */ +always_inline void * +vlib_buffer_push_uninit (vlib_buffer_t * b, u8 size) +{ + ASSERT (b->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= size); + b->current_data -= size; + b->current_length += size; + + return vlib_buffer_get_current (b); +} + +/** \brief Make head room, typically for packet headers + * @param b pointer to the buffer + * @param size number of head room bytes + * @return pointer to start of buffer (current data) + */ +always_inline void * +vlib_buffer_make_headroom (vlib_buffer_t * b, u8 size) +{ + ASSERT (b->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= size); + b->current_data += size; + return vlib_buffer_get_current (b); +} + +/** \brief Retrieve bytes from buffer head + * @param b pointer to the buffer + * @param size number of bytes to pull + * @return pointer to start of buffer (current data) + */ +always_inline void * +vlib_buffer_pull (vlib_buffer_t * b, u8 size) +{ + if (b->current_length + VLIB_BUFFER_PRE_DATA_SIZE < size) + return 0; + + void *data = vlib_buffer_get_current (b); + vlib_buffer_advance (b, size); + return data; +} + /* Forward declaration. */ struct vlib_main_t; diff --git a/src/vlibmemory/unix_shared_memory_queue.c b/src/vlibmemory/unix_shared_memory_queue.c index 25d28910..e86edec3 100644 --- a/src/vlibmemory/unix_shared_memory_queue.c +++ b/src/vlibmemory/unix_shared_memory_queue.c @@ -33,18 +33,13 @@ * nels = number of elements on the queue * elsize = element size, presumably 4 and cacheline-size will * be popular choices. - * coid = consumer coid, from ChannelCreate * pid = consumer pid - * pulse_code = pulse code consumer expects - * pulse_value = pulse value consumer expects - * consumer_prio = consumer's priority, so pulses won't change - * the consumer's priority. * * The idea is to call this function in the queue consumer, * and e-mail the queue pointer to the producer(s). * - * The spp process / main thread allocates one of these - * at startup; its main input queue. The spp main input queue + * The vpp process / main thread allocates one of these + * at startup; its main input queue. The vpp main input queue * has a pointer to it in the shared memory segment header. * * You probably want to be on an svm data heap before calling this @@ -70,7 +65,7 @@ unix_shared_memory_queue_init (int nels, q->signal_when_queue_non_empty = signal_when_queue_non_empty; memset (&attr, 0, sizeof (attr)); - memset (&cattr, 0, sizeof (attr)); + memset (&cattr, 0, sizeof (cattr)); if (pthread_mutexattr_init (&attr)) clib_unix_warning ("mutexattr_init"); @@ -277,6 +272,7 @@ unix_shared_memory_queue_sub (unix_shared_memory_queue_t * q, clib_memcpy (elem, headp, q->elsize); q->head++; + /* $$$$ JFC shouldn't this be == 0? */ if (q->cursize == q->maxsize) need_broadcast = 1; diff --git a/src/vlibmemory/unix_shared_memory_queue.h b/src/vlibmemory/unix_shared_memory_queue.h index f758f17c..13800065 100644 --- a/src/vlibmemory/unix_shared_memory_queue.h +++ b/src/vlibmemory/unix_shared_memory_queue.h @@ -29,7 +29,7 @@ typedef struct _unix_shared_memory_queue pthread_cond_t condvar; /* 8 bytes */ int head; int tail; - int cursize; + volatile int cursize; int maxsize; int elsize; int consumer_pid; diff --git a/src/vnet.am b/src/vnet.am index 64484e18..923f61d8 100644 --- a/src/vnet.am +++ b/src/vnet.am @@ -324,11 +324,7 @@ libvnet_la_SOURCES += \ vnet/ip/ip_input_acl.c \ vnet/ip/lookup.c \ vnet/ip/ping.c \ - vnet/ip/punt.c \ - vnet/ip/udp_format.c \ - vnet/ip/udp_init.c \ - vnet/ip/udp_local.c \ - vnet/ip/udp_pg.c + vnet/ip/punt.c nobase_include_HEADERS += \ vnet/ip/format.h \ @@ -354,11 +350,7 @@ nobase_include_HEADERS += \ vnet/ip/ports.def \ vnet/ip/protocols.def \ vnet/ip/punt_error.def \ - vnet/ip/punt.h \ - vnet/ip/tcp_packet.h \ - vnet/ip/udp_error.def \ - vnet/ip/udp.h \ - vnet/ip/udp_packet.h + vnet/ip/punt.h API_FILES += vnet/ip/ip.api @@ -473,6 +465,38 @@ test_map_LDADD = libvnet.la libvppinfra.la libvlib.la \ test_map_LDFLAGS = -static endif +######################################## +# Layer 4 protocol: tcp +######################################## +libvnet_la_SOURCES += \ + vnet/tcp/tcp_format.c \ + vnet/tcp/tcp_pg.c \ + vnet/tcp/tcp_syn_filter4.c \ + vnet/tcp/tcp_output.c \ + vnet/tcp/tcp_input.c \ + vnet/tcp/tcp_newreno.c \ + vnet/tcp/tcp.c + +nobase_include_HEADERS += \ + vnet/tcp/tcp_packet.h \ + vnet/tcp/tcp_timer.h \ + vnet/tcp/tcp.h + +######################################## +# Layer 4 protocol: udp +######################################## +libvnet_la_SOURCES += \ + vnet/udp/udp.c \ + vnet/udp/udp_input.c \ + vnet/udp/builtin_server.c \ + vnet/udp/udp_format.c \ + vnet/udp/udp_local.c \ + vnet/udp/udp_pg.c + +nobase_include_HEADERS += \ + vnet/udp/udp_error.def \ + vnet/udp/udp.h \ + vnet/udp/udp_packet.h ######################################## # Tunnel protocol: gre @@ -833,6 +857,28 @@ libvnet_la_SOURCES += \ nobase_include_HEADERS += \ vnet/devices/ssvm/ssvm_eth.h +######################################## +# session managmeent +######################################## + +libvnet_la_SOURCES += \ + vnet/session/session.c \ + vnet/session/node.c \ + vnet/session/transport.c \ + vnet/session/application.c \ + vnet/session/session_cli.c \ + vnet/session/hashes.c \ + vnet/session/application_interface.c \ + vnet/session/session_api.c + +nobase_include_HEADERS += \ + vnet/session/session.h \ + vnet/session/application.h \ + vnet/session/transport.h \ + vnet/session/application_interface.h + +API_FILES += vnet/session/session.api + ######################################## # Linux packet interface ######################################## diff --git a/src/vnet/api_errno.h b/src/vnet/api_errno.h index 8680ef7c..861a5767 100644 --- a/src/vnet/api_errno.h +++ b/src/vnet/api_errno.h @@ -91,14 +91,19 @@ _(INVALID_ADDRESS_FAMILY, -97, "Invalid address family") \ _(INVALID_SUB_SW_IF_INDEX, -98, "Invalid sub-interface sw_if_index") \ _(TABLE_TOO_BIG, -99, "Table too big") \ _(CANNOT_ENABLE_DISABLE_FEATURE, -100, "Cannot enable/disable feature") \ -_(BFD_EEXIST, -101, "Duplicate BFD object") \ -_(BFD_ENOENT, -102, "No such BFD object") \ -_(BFD_EINUSE, -103, "BFD object in use") \ -_(BFD_NOTSUPP, -104, "BFD feature not supported") \ -_(LISP_RLOC_LOCAL, -105, "RLOC address is local") \ -_(BFD_EAGAIN, -106, "BFD object cannot be manipulated at this time") \ -_(INVALID_GPE_MODE, -107, "Invalid GPE mode") \ -_(LISP_GPE_ENTRIES_PRESENT, -108, "LISP GPE entries are present") +_(BFD_EEXIST, -101, "Duplicate BFD object") \ +_(BFD_ENOENT, -102, "No such BFD object") \ +_(BFD_EINUSE, -103, "BFD object in use") \ +_(BFD_NOTSUPP, -104, "BFD feature not supported") \ +_(ADDRESS_IN_USE, -105, "Address in use") \ +_(ADDRESS_NOT_IN_USE, -106, "Address not in use") \ +_(QUEUE_FULL, -107, "Queue full") \ +_(UNKNOWN_URI_TYPE, -108, "Unknown URI type") \ +_(URI_FIFO_CREATE_FAILED, -109, "URI FIFO segment create failed") \ +_(LISP_RLOC_LOCAL, -110, "RLOC address is local") \ +_(BFD_EAGAIN, -111, "BFD object cannot be manipulated at this time") \ +_(INVALID_GPE_MODE, -112, "Invalid GPE mode") \ +_(LISP_GPE_ENTRIES_PRESENT, -113, "LISP GPE entries are present") typedef enum { diff --git a/src/vnet/bfd/bfd_udp.c b/src/vnet/bfd/bfd_udp.c index 146faad6..cf05089b 100644 --- a/src/vnet/bfd/bfd_udp.c +++ b/src/vnet/bfd/bfd_udp.c @@ -18,12 +18,12 @@ #include #include #include -#include +#include +#include #include #include #include #include -#include #include #include #include diff --git a/src/vnet/buffer.h b/src/vnet/buffer.h index f1cc6371..3de01f2a 100644 --- a/src/vnet/buffer.h +++ b/src/vnet/buffer.h @@ -277,6 +277,16 @@ typedef struct u16 buffer_advance; } device_input_feat; + /* TCP */ + struct + { + u32 connection_index; + u32 seq_number; + u32 seq_end; + u32 ack_number; + u8 flags; + } tcp; + u32 unused[6]; }; } vnet_buffer_opaque_t; diff --git a/src/vnet/classify/vnet_classify.c b/src/vnet/classify/vnet_classify.c index 6093e2ac..b651a1f1 100644 --- a/src/vnet/classify/vnet_classify.c +++ b/src/vnet/classify/vnet_classify.c @@ -695,8 +695,8 @@ int vnet_classify_add_del_table (vnet_classify_main_t * cm, } #define foreach_tcp_proto_field \ -_(src_port) \ -_(dst_port) +_(src) \ +_(dst) #define foreach_udp_proto_field \ _(src_port) \ diff --git a/src/vnet/dhcp/dhcp_proxy.h b/src/vnet/dhcp/dhcp_proxy.h index c0d79c41..4586d883 100644 --- a/src/vnet/dhcp/dhcp_proxy.h +++ b/src/vnet/dhcp/dhcp_proxy.h @@ -26,7 +26,7 @@ #include #include #include -#include +#include typedef enum { #define dhcp_proxy_error(n,s) DHCP_PROXY_ERROR_##n, diff --git a/src/vnet/flow/flow_report.h b/src/vnet/flow/flow_report.h index 4e764377..e8ed3818 100644 --- a/src/vnet/flow/flow_report.h +++ b/src/vnet/flow/flow_report.h @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/vnet/ip/ip.h b/src/vnet/ip/ip.h index 02a1a963..70b4ccd8 100644 --- a/src/vnet/ip/ip.h +++ b/src/vnet/ip/ip.h @@ -50,8 +50,8 @@ #include #include -#include -#include +#include +#include #include #include diff --git a/src/vnet/ip/ip4.h b/src/vnet/ip/ip4.h index b184fbae..4e075d0f 100644 --- a/src/vnet/ip/ip4.h +++ b/src/vnet/ip/ip4.h @@ -309,8 +309,8 @@ ip4_compute_flow_hash (const ip4_header_t * ip, b = (flow_hash_config & IP_FLOW_HASH_REVERSE_SRC_DST) ? t1 : t2; b ^= (flow_hash_config & IP_FLOW_HASH_PROTO) ? ip->protocol : 0; - t1 = is_tcp_udp ? tcp->ports.src : 0; - t2 = is_tcp_udp ? tcp->ports.dst : 0; + t1 = is_tcp_udp ? tcp->src : 0; + t2 = is_tcp_udp ? tcp->dst : 0; t1 = (flow_hash_config & IP_FLOW_HASH_SRC_PORT) ? t1 : 0; t2 = (flow_hash_config & IP_FLOW_HASH_DST_PORT) ? t2 : 0; @@ -334,6 +334,44 @@ u8 *format_ip4_forward_next_trace (u8 * s, va_list * args); u32 ip4_tcp_udp_validate_checksum (vlib_main_t * vm, vlib_buffer_t * p0); +#define IP_DF 0x4000 /* don't fragment */ + +/** + * Push IPv4 header to buffer + * + * This does not support fragmentation. + * + * @param vm - vlib_main + * @param b - buffer to write the header to + * @param src - source IP + * @param dst - destination IP + * @param prot - payload proto + * + * @return - pointer to start of IP header + */ +always_inline void * +vlib_buffer_push_ip4 (vlib_main_t * vm, vlib_buffer_t * b, + ip4_address_t * src, ip4_address_t * dst, int proto) +{ + ip4_header_t *ih; + + /* make some room */ + ih = vlib_buffer_push_uninit (b, sizeof (ip4_header_t)); + + ih->ip_version_and_header_length = 0x45; + ih->tos = 0; + ih->length = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b)); + + /* No fragments */ + ih->flags_and_fragment_offset = clib_host_to_net_u16 (IP_DF); + ih->ttl = 255; + ih->protocol = proto; + ih->src_address.as_u32 = src->as_u32; + ih->dst_address.as_u32 = dst->as_u32; + + ih->checksum = ip4_header_checksum (ih); + return ih; +} #endif /* included_ip_ip4_h */ /* diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c index 8081b34b..66d91ab6 100644 --- a/src/vnet/ip/ip4_forward.c +++ b/src/vnet/ip/ip4_forward.c @@ -1478,8 +1478,18 @@ ip4_tcp_udp_validate_checksum (vlib_main_t * vm, vlib_buffer_t * p0) return p0->flags; } -static uword -ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) +/* *INDENT-OFF* */ +VNET_FEATURE_ARC_INIT (ip4_local) = +{ + .arc_name = "ip4-local", + .start_nodes = VNET_FEATURES ("ip4-local"), +}; +/* *INDENT-ON* */ + +static inline uword +ip4_local_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame, int head_of_feature_arc) { ip4_main_t *im = &ip4_main; ip_lookup_main_t *lm = &im->lookup_main; @@ -1487,6 +1497,7 @@ ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) u32 *from, *to_next, n_left_from, n_left_to_next; vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip4_input_node.index); + u8 arc_index = vnet_feat_arc_ip4_local.feature_arc_index; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -1513,7 +1524,7 @@ ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) i32 len_diff0, len_diff1; u8 error0, is_udp0, is_tcp_udp0, good_tcp_udp0, proto0; u8 error1, is_udp1, is_tcp_udp1, good_tcp_udp1, proto1; - u8 enqueue_code; + u32 sw_if_index0, sw_if_index1; pi0 = to_next[0] = from[0]; pi1 = to_next[1] = from[1]; @@ -1522,6 +1533,8 @@ ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) to_next += 2; n_left_to_next -= 2; + next0 = next1 = IP_LOCAL_NEXT_DROP; + p0 = vlib_get_buffer (vm, pi0); p1 = vlib_get_buffer (vm, pi1); @@ -1531,14 +1544,18 @@ ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) vnet_buffer (p0)->ip.start_of_ip_header = p0->current_data; vnet_buffer (p1)->ip.start_of_ip_header = p1->current_data; - fib_index0 = vec_elt (im->fib_index_by_sw_if_index, - vnet_buffer (p0)->sw_if_index[VLIB_RX]); + sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX]; + sw_if_index1 = vnet_buffer (p1)->sw_if_index[VLIB_RX]; + + fib_index0 = vec_elt (im->fib_index_by_sw_if_index, sw_if_index0); + fib_index1 = vec_elt (im->fib_index_by_sw_if_index, sw_if_index1); + + fib_index0 = vec_elt (im->fib_index_by_sw_if_index, sw_if_index0); fib_index0 = (vnet_buffer (p0)->sw_if_index[VLIB_TX] == (u32) ~ 0) ? fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX]; - fib_index1 = vec_elt (im->fib_index_by_sw_if_index, - vnet_buffer (p1)->sw_if_index[VLIB_RX]); + fib_index1 = vec_elt (im->fib_index_by_sw_if_index, sw_if_index1); fib_index1 = (vnet_buffer (p1)->sw_if_index[VLIB_TX] == (u32) ~ 0) ? fib_index1 : vnet_buffer (p1)->sw_if_index[VLIB_TX]; @@ -1557,6 +1574,13 @@ ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) until support of IP frag reassembly is implemented */ proto0 = ip4_is_fragment (ip0) ? 0xfe : ip0->protocol; proto1 = ip4_is_fragment (ip1) ? 0xfe : ip1->protocol; + + if (head_of_feature_arc == 0) + { + error0 = error1 = IP4_ERROR_UNKNOWN_PROTOCOL; + goto skip_checks; + } + is_udp0 = proto0 == IP_PROTOCOL_UDP; is_udp1 = proto1 == IP_PROTOCOL_UDP; is_tcp_udp0 = is_udp0 || proto0 == IP_PROTOCOL_TCP; @@ -1686,6 +1710,7 @@ ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) next0 = lm->local_next_by_ip_protocol[proto0]; next1 = lm->local_next_by_ip_protocol[proto1]; + skip_checks: next0 = error0 != IP4_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next0; next1 = @@ -1694,44 +1719,17 @@ ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) p0->error = error0 ? error_node->errors[error0] : 0; p1->error = error1 ? error_node->errors[error1] : 0; - enqueue_code = (next0 != next_index) + 2 * (next1 != next_index); - - if (PREDICT_FALSE (enqueue_code != 0)) + if (head_of_feature_arc) { - switch (enqueue_code) - { - case 1: - /* A B A */ - to_next[-2] = pi1; - to_next -= 1; - n_left_to_next += 1; - vlib_set_next_frame_buffer (vm, node, next0, pi0); - break; - - case 2: - /* A A B */ - to_next -= 1; - n_left_to_next += 1; - vlib_set_next_frame_buffer (vm, node, next1, pi1); - break; - - case 3: - /* A B B or A B C */ - to_next -= 2; - n_left_to_next += 2; - vlib_set_next_frame_buffer (vm, node, next0, pi0); - vlib_set_next_frame_buffer (vm, node, next1, pi1); - if (next0 == next1) - { - vlib_put_next_frame (vm, node, next_index, - n_left_to_next); - next_index = next1; - vlib_get_next_frame (vm, node, next_index, to_next, - n_left_to_next); - } - break; - } + if (PREDICT_TRUE (error0 == (u8) IP4_ERROR_UNKNOWN_PROTOCOL)) + vnet_feature_arc_start (arc_index, sw_if_index0, &next0, p0); + if (PREDICT_TRUE (error1 == (u8) IP4_ERROR_UNKNOWN_PROTOCOL)) + vnet_feature_arc_start (arc_index, sw_if_index1, &next1, p1); } + + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, + n_left_to_next, pi0, pi1, + next0, next1); } while (n_left_from > 0 && n_left_to_next > 0) @@ -1746,6 +1744,7 @@ ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) u8 error0, is_udp0, is_tcp_udp0, good_tcp_udp0, proto0; load_balance_t *lb0; const dpo_id_t *dpo0; + u32 sw_if_index0; pi0 = to_next[0] = from[0]; from += 1; @@ -1753,14 +1752,18 @@ ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) to_next += 1; n_left_to_next -= 1; + next0 = IP_LOCAL_NEXT_DROP; + p0 = vlib_get_buffer (vm, pi0); ip0 = vlib_buffer_get_current (p0); vnet_buffer (p0)->ip.start_of_ip_header = p0->current_data; - fib_index0 = vec_elt (im->fib_index_by_sw_if_index, - vnet_buffer (p0)->sw_if_index[VLIB_RX]); + sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX]; + + fib_index0 = vec_elt (im->fib_index_by_sw_if_index, sw_if_index0); + fib_index0 = (vnet_buffer (p0)->sw_if_index[VLIB_TX] == (u32) ~ 0) ? fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX]; @@ -1775,6 +1778,13 @@ ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) /* Treat IP frag packets as "experimental" protocol for now until support of IP frag reassembly is implemented */ proto0 = ip4_is_fragment (ip0) ? 0xfe : ip0->protocol; + + if (head_of_feature_arc == 0) + { + error0 = IP4_ERROR_UNKNOWN_PROTOCOL; + goto skip_check; + } + is_udp0 = proto0 == IP_PROTOCOL_UDP; is_tcp_udp0 = is_udp0 || proto0 == IP_PROTOCOL_TCP; @@ -1847,6 +1857,8 @@ ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) ip0->dst_address.as_u32 != 0xFFFFFFFF) ? IP4_ERROR_SRC_LOOKUP_MISS : error0); + skip_check: + next0 = lm->local_next_by_ip_protocol[proto0]; next0 = @@ -1854,18 +1866,15 @@ ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) p0->error = error0 ? error_node->errors[error0] : 0; - if (PREDICT_FALSE (next0 != next_index)) + if (head_of_feature_arc) { - n_left_to_next += 1; - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - - next_index = next0; - vlib_get_next_frame (vm, node, next_index, to_next, - n_left_to_next); - to_next[0] = pi0; - to_next += 1; - n_left_to_next -= 1; + if (PREDICT_TRUE (error0 == (u8) IP4_ERROR_UNKNOWN_PROTOCOL)) + vnet_feature_arc_start (arc_index, sw_if_index0, &next0, p0); } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, pi0, next0); + } vlib_put_next_frame (vm, node, next_index, n_left_to_next); @@ -1874,21 +1883,57 @@ ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) return frame->n_vectors; } +static uword +ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + return ip4_local_inline (vm, node, frame, 1 /* head of feature arc */ ); +} + +/* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip4_local_node) = { - .function = ip4_local,.name = "ip4-local",.vector_size = - sizeof (u32),.format_trace = - format_ip4_forward_next_trace,.n_next_nodes = - IP_LOCAL_N_NEXT,.next_nodes = + .function = ip4_local, + .name = "ip4-local", + .vector_size = sizeof (u32), + .format_trace = format_ip4_forward_next_trace, + .n_next_nodes = IP_LOCAL_N_NEXT, + .next_nodes = { - [IP_LOCAL_NEXT_DROP] = "error-drop", - [IP_LOCAL_NEXT_PUNT] = "error-punt", - [IP_LOCAL_NEXT_UDP_LOOKUP] = "ip4-udp-lookup", - [IP_LOCAL_NEXT_ICMP] = "ip4-icmp-input",} -,}; + [IP_LOCAL_NEXT_DROP] = "error-drop", + [IP_LOCAL_NEXT_PUNT] = "error-punt", + [IP_LOCAL_NEXT_UDP_LOOKUP] = "ip4-udp-lookup", + [IP_LOCAL_NEXT_ICMP] = "ip4-icmp-input",}, +}; +/* *INDENT-ON* */ VLIB_NODE_FUNCTION_MULTIARCH (ip4_local_node, ip4_local); +static uword +ip4_local_end_of_arc (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + return ip4_local_inline (vm, node, frame, 0 /* head of feature arc */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (ip4_local_end_of_arc_node,static) = { + .function = ip4_local_end_of_arc, + .name = "ip4-local-end-of-arc", + .vector_size = sizeof (u32), + + .format_trace = format_ip4_forward_next_trace, + .sibling_of = "ip4-local", +}; + +VLIB_NODE_FUNCTION_MULTIARCH (ip4_local_end_of_arc_node, ip4_local_end_of_arc) + +VNET_FEATURE_INIT (ip4_local_end_of_arc, static) = { + .arc_name = "ip4-local", + .node_name = "ip4-local-end-of-arc", + .runs_before = 0, /* not before any other features */ +}; +/* *INDENT-ON* */ + void ip4_register_protocol (u32 protocol, u32 node_index) { diff --git a/src/vnet/ip/ip4_packet.h b/src/vnet/ip/ip4_packet.h index 8da788b4..b2c1fcd4 100644 --- a/src/vnet/ip/ip4_packet.h +++ b/src/vnet/ip/ip4_packet.h @@ -41,7 +41,7 @@ #define included_ip4_packet_h #include /* for ip_csum_t */ -#include /* for tcp_header_t */ +#include /* for tcp_header_t */ #include /* for clib_net_to_host_u16 */ /* IP4 address which can be accessed either as 4 bytes @@ -342,10 +342,10 @@ ip4_tcp_reply_x1 (ip4_header_t * ip0, tcp_header_t * tcp0) ip0->src_address.data_u32 = dst0; ip0->dst_address.data_u32 = src0; - src0 = tcp0->ports.src; - dst0 = tcp0->ports.dst; - tcp0->ports.src = dst0; - tcp0->ports.dst = src0; + src0 = tcp0->src; + dst0 = tcp0->dst; + tcp0->src = dst0; + tcp0->dst = src0; } always_inline void @@ -363,14 +363,14 @@ ip4_tcp_reply_x2 (ip4_header_t * ip0, ip4_header_t * ip1, ip0->dst_address.data_u32 = src0; ip1->dst_address.data_u32 = src1; - src0 = tcp0->ports.src; - src1 = tcp1->ports.src; - dst0 = tcp0->ports.dst; - dst1 = tcp1->ports.dst; - tcp0->ports.src = dst0; - tcp1->ports.src = dst1; - tcp0->ports.dst = src0; - tcp1->ports.dst = src1; + src0 = tcp0->src; + src1 = tcp1->src; + dst0 = tcp0->dst; + dst1 = tcp1->dst; + tcp0->src = dst0; + tcp1->src = dst1; + tcp0->dst = src0; + tcp1->dst = src1; } #endif /* included_ip4_packet_h */ diff --git a/src/vnet/ip/ip6.h b/src/vnet/ip/ip6.h index 5456f0f2..2615fbfa 100644 --- a/src/vnet/ip/ip6.h +++ b/src/vnet/ip/ip6.h @@ -461,8 +461,8 @@ ip6_compute_flow_hash (const ip6_header_t * ip, b = (flow_hash_config & IP_FLOW_HASH_REVERSE_SRC_DST) ? t1 : t2; b ^= (flow_hash_config & IP_FLOW_HASH_PROTO) ? ip->protocol : 0; - t1 = is_tcp_udp ? tcp->ports.src : 0; - t2 = is_tcp_udp ? tcp->ports.dst : 0; + t1 = is_tcp_udp ? tcp->src : 0; + t2 = is_tcp_udp ? tcp->dst : 0; t1 = (flow_hash_config & IP_FLOW_HASH_SRC_PORT) ? t1 : 0; t2 = (flow_hash_config & IP_FLOW_HASH_DST_PORT) ? t2 : 0; @@ -497,6 +497,46 @@ int ip6_hbh_register_option (u8 option, int ip6_hbh_unregister_option (u8 option); void ip6_hbh_set_next_override (uword next); +/** + * Push IPv6 header to buffer + * + * @param vm - vlib_main + * @param b - buffer to write the header to + * @param src - source IP + * @param dst - destination IP + * @param prot - payload proto + * + * @return - pointer to start of IP header + */ +always_inline void * +vlib_buffer_push_ip6 (vlib_main_t * vm, vlib_buffer_t * b, + ip6_address_t * src, ip6_address_t * dst, int proto) +{ + ip6_header_t *ip6h; + u16 payload_length; + + /* make some room */ + ip6h = vlib_buffer_push_uninit (b, sizeof (ip6_header_t)); + + ip6h->ip_version_traffic_class_and_flow_label = + clib_host_to_net_u32 (0x6 << 28); + + /* calculate ip6 payload length */ + payload_length = vlib_buffer_length_in_chain (vm, b); + payload_length -= sizeof (*ip6h); + + ip6h->payload_length = clib_host_to_net_u16 (payload_length); + + ip6h->hop_limit = 0xff; + ip6h->protocol = proto; + clib_memcpy (ip6h->src_address.as_u8, src->as_u8, + sizeof (ip6h->src_address)); + clib_memcpy (ip6h->dst_address.as_u8, dst->as_u8, + sizeof (ip6h->src_address)); + + return ip6h; +} + #endif /* included_ip_ip6_h */ /* diff --git a/src/vnet/ip/ip6_packet.h b/src/vnet/ip/ip6_packet.h index 1e551c8b..4fd14b96 100644 --- a/src/vnet/ip/ip6_packet.h +++ b/src/vnet/ip/ip6_packet.h @@ -40,7 +40,7 @@ #ifndef included_ip6_packet_h #define included_ip6_packet_h -#include +#include #include typedef union @@ -373,10 +373,10 @@ ip6_tcp_reply_x1 (ip6_header_t * ip0, tcp_header_t * tcp0) { u16 src0, dst0; - src0 = tcp0->ports.src; - dst0 = tcp0->ports.dst; - tcp0->ports.src = dst0; - tcp0->ports.dst = src0; + src0 = tcp0->src; + dst0 = tcp0->dst; + tcp0->src = dst0; + tcp0->dst = src0; } } @@ -400,14 +400,14 @@ ip6_tcp_reply_x2 (ip6_header_t * ip0, ip6_header_t * ip1, { u16 src0, dst0, src1, dst1; - src0 = tcp0->ports.src; - src1 = tcp1->ports.src; - dst0 = tcp0->ports.dst; - dst1 = tcp1->ports.dst; - tcp0->ports.src = dst0; - tcp1->ports.src = dst1; - tcp0->ports.dst = src0; - tcp1->ports.dst = src1; + src0 = tcp0->src; + src1 = tcp1->src; + dst0 = tcp0->dst; + dst1 = tcp1->dst; + tcp0->src = dst0; + tcp1->src = dst1; + tcp0->dst = src0; + tcp1->dst = src1; } } diff --git a/src/vnet/ip/punt.c b/src/vnet/ip/punt.c index 9c735128..48558401 100644 --- a/src/vnet/ip/punt.c +++ b/src/vnet/ip/punt.c @@ -23,7 +23,7 @@ */ #include #include -#include +#include #include #define foreach_punt_next \ diff --git a/src/vnet/ip/tcp_packet.h b/src/vnet/ip/tcp_packet.h deleted file mode 100644 index 93f73e01..00000000 --- a/src/vnet/ip/tcp_packet.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (c) 2015 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/* - * ip4/tcp_packet.h: TCP packet format (see RFC 793) - * - * Copyright (c) 2008 Eliot Dresselhaus - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef included_tcp_packet_h -#define included_tcp_packet_h - -/* TCP flags bit 0 first. */ -#define foreach_tcp_flag \ - _ (FIN) \ - _ (SYN) \ - _ (RST) \ - _ (PSH) \ - _ (ACK) \ - _ (URG) \ - _ (ECE) \ - _ (CWR) - -enum -{ -#define _(f) TCP_FLAG_BIT_##f, - foreach_tcp_flag -#undef _ - TCP_N_FLAG_BITS, - -#define _(f) TCP_FLAG_##f = 1 << TCP_FLAG_BIT_##f, - foreach_tcp_flag -#undef _ -}; - -typedef struct -{ - /* Source and destination port. */ - union - { - union - { - struct - { - u16 src, dst; - }; - u32 src_and_dst; - } ports; - struct - { - u16 src_port, dst_port; - }; - }; - - /* Sequence and acknowledgment number. */ - u32 seq_number, ack_number; - - /* Size of TCP header in 32-bit units plus 4 reserved bits. */ - u8 tcp_header_u32s_and_reserved; - - /* see foreach_tcp_flag for enumation of tcp flags. */ - u8 flags; - - /* Current window advertised by sender. - This is the number of bytes sender is willing to receive - right now. */ - u16 window; - - /* Checksum of TCP pseudo header and data. */ - u16 checksum; - - u16 urgent_pointer; -} tcp_header_t; - -always_inline int -tcp_header_bytes (tcp_header_t * t) -{ - return (t->tcp_header_u32s_and_reserved >> 4) * sizeof (u32); -} - -/* TCP options. */ -typedef enum tcp_option_type -{ - TCP_OPTION_END = 0, - TCP_OPTION_NOP = 1, - TCP_OPTION_MSS = 2, - TCP_OPTION_WINDOW_SCALE = 3, - TCP_OPTION_SACK_PERMITTED = 4, - TCP_OPTION_SACK_BLOCK = 5, - TCP_OPTION_TIME_STAMP = 8, -} tcp_option_type_t; - -/* All except NOP and END have 1 byte length field. */ -typedef struct -{ - tcp_option_type_t type:8; - - /* Length of this option in bytes. */ - u8 length; -} tcp_option_with_length_t; - -#endif /* included_tcp_packet_h */ - - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ diff --git a/src/vnet/ip/udp.h b/src/vnet/ip/udp.h deleted file mode 100644 index bad58b5d..00000000 --- a/src/vnet/ip/udp.h +++ /dev/null @@ -1,315 +0,0 @@ -/* - * ip/udp.h: udp protocol - * - * Copyright (c) 2013 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef included_udp_h -#define included_udp_h - -#include -#include -#include -#include -#include -#include -#include - -typedef enum -{ -#define udp_error(n,s) UDP_ERROR_##n, -#include -#undef udp_error - UDP_N_ERROR, -} udp_error_t; - -#define foreach_udp4_dst_port \ -_ (67, dhcp_to_server) \ -_ (68, dhcp_to_client) \ -_ (500, ikev2) \ -_ (3784, bfd4) \ -_ (3785, bfd_echo4) \ -_ (4341, lisp_gpe) \ -_ (4342, lisp_cp) \ -_ (4739, ipfix) \ -_ (4789, vxlan) \ -_ (4789, vxlan6) \ -_ (4790, vxlan_gpe) \ -_ (6633, vpath_3) - - -#define foreach_udp6_dst_port \ -_ (547, dhcpv6_to_server) \ -_ (546, dhcpv6_to_client) \ -_ (3784, bfd6) \ -_ (3785, bfd_echo6) \ -_ (4341, lisp_gpe6) \ -_ (4342, lisp_cp6) \ -_ (4790, vxlan6_gpe) \ -_ (6633, vpath6_3) - -typedef enum -{ -#define _(n,f) UDP_DST_PORT_##f = n, - foreach_udp4_dst_port foreach_udp6_dst_port -#undef _ -} udp_dst_port_t; - -typedef enum -{ -#define _(n,f) UDP6_DST_PORT_##f = n, - foreach_udp6_dst_port -#undef _ -} udp6_dst_port_t; - -typedef struct -{ - /* Name (a c string). */ - char *name; - - /* GRE protocol type in host byte order. */ - udp_dst_port_t dst_port; - - /* Node which handles this type. */ - u32 node_index; - - /* Next index for this type. */ - u32 next_index; -} udp_dst_port_info_t; - -typedef enum -{ - UDP_IP6 = 0, - UDP_IP4, /* the code is full of is_ip4... */ - N_UDP_AF, -} udp_af_t; - -typedef struct -{ - udp_dst_port_info_t *dst_port_infos[N_UDP_AF]; - - /* Hash tables mapping name/protocol to protocol info index. */ - uword *dst_port_info_by_name[N_UDP_AF]; - uword *dst_port_info_by_dst_port[N_UDP_AF]; - - /* convenience */ - vlib_main_t *vlib_main; -} udp_main_t; - -always_inline udp_dst_port_info_t * -udp_get_dst_port_info (udp_main_t * um, udp_dst_port_t dst_port, u8 is_ip4) -{ - uword *p = hash_get (um->dst_port_info_by_dst_port[is_ip4], dst_port); - return p ? vec_elt_at_index (um->dst_port_infos[is_ip4], p[0]) : 0; -} - -format_function_t format_udp_header; -format_function_t format_udp_rx_trace; - -unformat_function_t unformat_udp_header; - -void udp_register_dst_port (vlib_main_t * vm, - udp_dst_port_t dst_port, - u32 node_index, u8 is_ip4); - -void udp_punt_unknown (vlib_main_t * vm, u8 is_ip4, u8 is_add); - -always_inline void -ip_udp_fixup_one (vlib_main_t * vm, vlib_buffer_t * b0, u8 is_ip4) -{ - u16 new_l0; - udp_header_t *udp0; - - if (is_ip4) - { - ip4_header_t *ip0; - ip_csum_t sum0; - u16 old_l0 = 0; - - ip0 = vlib_buffer_get_current (b0); - - /* fix the ing outer-IP checksum */ - sum0 = ip0->checksum; - /* old_l0 always 0, see the rewrite setup */ - new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)); - - sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t, - length /* changed member */ ); - ip0->checksum = ip_csum_fold (sum0); - ip0->length = new_l0; - - /* Fix UDP length */ - udp0 = (udp_header_t *) (ip0 + 1); - new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) - - sizeof (*ip0)); - udp0->length = new_l0; - } - else - { - ip6_header_t *ip0; - int bogus0; - - ip0 = vlib_buffer_get_current (b0); - - new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) - - sizeof (*ip0)); - ip0->payload_length = new_l0; - - /* Fix UDP length */ - udp0 = (udp_header_t *) (ip0 + 1); - udp0->length = new_l0; - - udp0->checksum = - ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip0, &bogus0); - ASSERT (bogus0 == 0); - - if (udp0->checksum == 0) - udp0->checksum = 0xffff; - } -} - -always_inline void -ip_udp_encap_one (vlib_main_t * vm, vlib_buffer_t * b0, u8 * ec0, word ec_len, - u8 is_ip4) -{ - vlib_buffer_advance (b0, -ec_len); - - if (is_ip4) - { - ip4_header_t *ip0; - - ip0 = vlib_buffer_get_current (b0); - - /* Apply the encap string. */ - clib_memcpy (ip0, ec0, ec_len); - ip_udp_fixup_one (vm, b0, 1); - } - else - { - ip6_header_t *ip0; - - ip0 = vlib_buffer_get_current (b0); - - /* Apply the encap string. */ - clib_memcpy (ip0, ec0, ec_len); - ip_udp_fixup_one (vm, b0, 0); - } -} - -always_inline void -ip_udp_encap_two (vlib_main_t * vm, vlib_buffer_t * b0, vlib_buffer_t * b1, - u8 * ec0, u8 * ec1, word ec_len, u8 is_v4) -{ - u16 new_l0, new_l1; - udp_header_t *udp0, *udp1; - - ASSERT (_vec_len (ec0) == _vec_len (ec1)); - - vlib_buffer_advance (b0, -ec_len); - vlib_buffer_advance (b1, -ec_len); - - if (is_v4) - { - ip4_header_t *ip0, *ip1; - ip_csum_t sum0, sum1; - u16 old_l0 = 0, old_l1 = 0; - - ip0 = vlib_buffer_get_current (b0); - ip1 = vlib_buffer_get_current (b1); - - /* Apply the encap string */ - clib_memcpy (ip0, ec0, ec_len); - clib_memcpy (ip1, ec1, ec_len); - - /* fix the ing outer-IP checksum */ - sum0 = ip0->checksum; - sum1 = ip1->checksum; - - /* old_l0 always 0, see the rewrite setup */ - new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)); - new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)); - - sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t, - length /* changed member */ ); - sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t, - length /* changed member */ ); - - ip0->checksum = ip_csum_fold (sum0); - ip1->checksum = ip_csum_fold (sum1); - - ip0->length = new_l0; - ip1->length = new_l1; - - /* Fix UDP length */ - udp0 = (udp_header_t *) (ip0 + 1); - udp1 = (udp_header_t *) (ip1 + 1); - - new_l0 = - clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) - - sizeof (*ip0)); - new_l1 = - clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1) - - sizeof (*ip1)); - udp0->length = new_l0; - udp1->length = new_l1; - } - else - { - ip6_header_t *ip0, *ip1; - int bogus0, bogus1; - - ip0 = vlib_buffer_get_current (b0); - ip1 = vlib_buffer_get_current (b1); - - /* Apply the encap string. */ - clib_memcpy (ip0, ec0, ec_len); - clib_memcpy (ip1, ec1, ec_len); - - new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) - - sizeof (*ip0)); - new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1) - - sizeof (*ip1)); - ip0->payload_length = new_l0; - ip1->payload_length = new_l1; - - /* Fix UDP length */ - udp0 = (udp_header_t *) (ip0 + 1); - udp1 = (udp_header_t *) (ip1 + 1); - - udp0->length = new_l0; - udp1->length = new_l1; - - udp0->checksum = - ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip0, &bogus0); - udp1->checksum = - ip6_tcp_udp_icmp_compute_checksum (vm, b1, ip1, &bogus1); - ASSERT (bogus0 == 0); - ASSERT (bogus1 == 0); - - if (udp0->checksum == 0) - udp0->checksum = 0xffff; - if (udp1->checksum == 0) - udp1->checksum = 0xffff; - } -} - -#endif /* included_udp_h */ - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ diff --git a/src/vnet/ip/udp_error.def b/src/vnet/ip/udp_error.def deleted file mode 100644 index bfdae0ac..00000000 --- a/src/vnet/ip/udp_error.def +++ /dev/null @@ -1,21 +0,0 @@ -/* - * udp_error.def: udp errors - * - * Copyright (c) 2013-2016 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -udp_error (NONE, "no error") -udp_error (NO_LISTENER, "no listener for dst port") -udp_error (LENGTH_ERROR, "UDP packets with length errors") -udp_error (PUNT, "no listener punt") diff --git a/src/vnet/ip/udp_format.c b/src/vnet/ip/udp_format.c deleted file mode 100644 index abdf561e..00000000 --- a/src/vnet/ip/udp_format.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (c) 2015 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/* - * ip/udp_format.c: udp formatting - * - * Copyright (c) 2008 Eliot Dresselhaus - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#include - -/* Format UDP header. */ -u8 * -format_udp_header (u8 * s, va_list * args) -{ - udp_header_t *udp = va_arg (*args, udp_header_t *); - u32 max_header_bytes = va_arg (*args, u32); - uword indent; - u32 header_bytes = sizeof (udp[0]); - - /* Nothing to do. */ - if (max_header_bytes < sizeof (udp[0])) - return format (s, "UDP header truncated"); - - indent = format_get_indent (s); - indent += 2; - - s = format (s, "UDP: %d -> %d", - clib_net_to_host_u16 (udp->src_port), - clib_net_to_host_u16 (udp->dst_port)); - - s = format (s, "\n%Ulength %d, checksum 0x%04x", - format_white_space, indent, - clib_net_to_host_u16 (udp->length), - clib_net_to_host_u16 (udp->checksum)); - - /* Recurse into next protocol layer. */ - if (max_header_bytes != 0 && header_bytes < max_header_bytes) - { - ip_main_t *im = &ip_main; - tcp_udp_port_info_t *pi; - - pi = ip_get_tcp_udp_port_info (im, udp->dst_port); - - if (pi && pi->format_header) - s = format (s, "\n%U%U", - format_white_space, indent - 2, pi->format_header, - /* next protocol header */ (udp + 1), - max_header_bytes - sizeof (udp[0])); - } - - return s; -} - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ diff --git a/src/vnet/ip/udp_init.c b/src/vnet/ip/udp_init.c deleted file mode 100644 index 1241ca4a..00000000 --- a/src/vnet/ip/udp_init.c +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2015 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/* - * ip/udp_init.c: udp initialization - * - * Copyright (c) 2008 Eliot Dresselhaus - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#include - -clib_error_t * -udp_init (vlib_main_t * vm) -{ - ip_main_t *im = &ip_main; - ip_protocol_info_t *pi; - clib_error_t *error; - - error = vlib_call_init_function (vm, ip_main_init); - - if (!error) - { - pi = ip_get_protocol_info (im, IP_PROTOCOL_UDP); - if (pi == 0) - return clib_error_return (0, "UDP protocol info AWOL"); - pi->format_header = format_udp_header; - pi->unformat_pg_edit = unformat_pg_udp_header; - } - - return 0; -} - -VLIB_INIT_FUNCTION (udp_init); - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ diff --git a/src/vnet/ip/udp_local.c b/src/vnet/ip/udp_local.c deleted file mode 100644 index 13ab6e4f..00000000 --- a/src/vnet/ip/udp_local.c +++ /dev/null @@ -1,645 +0,0 @@ -/* - * node.c: udp packet processing - * - * Copyright (c) 2013 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include -#include - -udp_main_t udp_main; - -#define foreach_udp_input_next \ - _ (PUNT, "error-punt") \ - _ (DROP, "error-drop") \ - _ (ICMP4_ERROR, "ip4-icmp-error") \ - _ (ICMP6_ERROR, "ip6-icmp-error") - -typedef enum -{ -#define _(s,n) UDP_INPUT_NEXT_##s, - foreach_udp_input_next -#undef _ - UDP_INPUT_N_NEXT, -} udp_input_next_t; - -typedef struct -{ - u16 src_port; - u16 dst_port; - u8 bound; -} udp_rx_trace_t; - -u8 * -format_udp_rx_trace (u8 * s, va_list * args) -{ - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - udp_rx_trace_t *t = va_arg (*args, udp_rx_trace_t *); - - s = format (s, "UDP: src-port %d dst-port %d%s", - clib_net_to_host_u16 (t->src_port), - clib_net_to_host_u16 (t->dst_port), - t->bound ? "" : " (no listener)"); - return s; -} - -typedef struct -{ - /* Sparse vector mapping udp dst_port in network byte order - to next index. */ - u16 *next_by_dst_port; - u8 punt_unknown; -} udp_input_runtime_t; - -vlib_node_registration_t udp4_input_node; -vlib_node_registration_t udp6_input_node; - -always_inline uword -udp46_input_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame, int is_ip4) -{ - udp_input_runtime_t *rt = is_ip4 ? - (void *) vlib_node_get_runtime_data (vm, udp4_input_node.index) - : (void *) vlib_node_get_runtime_data (vm, udp6_input_node.index); - __attribute__ ((unused)) u32 n_left_from, next_index, *from, *to_next; - word n_no_listener = 0; - u8 punt_unknown = rt->punt_unknown; - - from = vlib_frame_vector_args (from_frame); - n_left_from = from_frame->n_vectors; - - next_index = node->cached_next_index; - - while (n_left_from > 0) - { - u32 n_left_to_next; - - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - - while (n_left_from >= 4 && n_left_to_next >= 2) - { - u32 bi0, bi1; - vlib_buffer_t *b0, *b1; - udp_header_t *h0 = 0, *h1 = 0; - u32 i0, i1, dst_port0, dst_port1; - u32 advance0, advance1; - u32 error0, next0, error1, next1; - - /* Prefetch next iteration. */ - { - vlib_buffer_t *p2, *p3; - - p2 = vlib_get_buffer (vm, from[2]); - p3 = vlib_get_buffer (vm, from[3]); - - vlib_prefetch_buffer_header (p2, LOAD); - vlib_prefetch_buffer_header (p3, LOAD); - - CLIB_PREFETCH (p2->data, sizeof (h0[0]), LOAD); - CLIB_PREFETCH (p3->data, sizeof (h1[0]), LOAD); - } - - bi0 = from[0]; - bi1 = from[1]; - to_next[0] = bi0; - to_next[1] = bi1; - from += 2; - to_next += 2; - n_left_to_next -= 2; - n_left_from -= 2; - - b0 = vlib_get_buffer (vm, bi0); - b1 = vlib_get_buffer (vm, bi1); - - /* ip4/6_local hands us the ip header, not the udp header */ - if (is_ip4) - { - advance0 = sizeof (ip4_header_t); - advance1 = sizeof (ip4_header_t); - } - else - { - advance0 = sizeof (ip6_header_t); - advance1 = sizeof (ip6_header_t); - } - - if (PREDICT_FALSE (b0->current_length < advance0 + sizeof (*h0))) - { - error0 = UDP_ERROR_LENGTH_ERROR; - next0 = UDP_INPUT_NEXT_DROP; - } - else - { - vlib_buffer_advance (b0, advance0); - h0 = vlib_buffer_get_current (b0); - error0 = next0 = 0; - if (PREDICT_FALSE (clib_net_to_host_u16 (h0->length) > - vlib_buffer_length_in_chain (vm, b0))) - { - error0 = UDP_ERROR_LENGTH_ERROR; - next0 = UDP_INPUT_NEXT_DROP; - } - } - - if (PREDICT_FALSE (b1->current_length < advance1 + sizeof (*h1))) - { - error1 = UDP_ERROR_LENGTH_ERROR; - next1 = UDP_INPUT_NEXT_DROP; - } - else - { - vlib_buffer_advance (b1, advance1); - h1 = vlib_buffer_get_current (b1); - error1 = next1 = 0; - if (PREDICT_FALSE (clib_net_to_host_u16 (h1->length) > - vlib_buffer_length_in_chain (vm, b1))) - { - error1 = UDP_ERROR_LENGTH_ERROR; - next1 = UDP_INPUT_NEXT_DROP; - } - } - - /* Index sparse array with network byte order. */ - dst_port0 = (error0 == 0) ? h0->dst_port : 0; - dst_port1 = (error1 == 0) ? h1->dst_port : 0; - sparse_vec_index2 (rt->next_by_dst_port, dst_port0, dst_port1, - &i0, &i1); - next0 = (error0 == 0) ? vec_elt (rt->next_by_dst_port, i0) : next0; - next1 = (error1 == 0) ? vec_elt (rt->next_by_dst_port, i1) : next1; - - if (PREDICT_FALSE (i0 == SPARSE_VEC_INVALID_INDEX)) - { - // move the pointer back so icmp-error can find the - // ip packet header - vlib_buffer_advance (b0, -(word) advance0); - - if (PREDICT_FALSE (punt_unknown)) - { - b0->error = node->errors[UDP_ERROR_PUNT]; - next0 = UDP_INPUT_NEXT_PUNT; - } - else if (is_ip4) - { - icmp4_error_set_vnet_buffer (b0, - ICMP4_destination_unreachable, - ICMP4_destination_unreachable_port_unreachable, - 0); - next0 = UDP_INPUT_NEXT_ICMP4_ERROR; - n_no_listener++; - } - else - { - icmp6_error_set_vnet_buffer (b0, - ICMP6_destination_unreachable, - ICMP6_destination_unreachable_port_unreachable, - 0); - next0 = UDP_INPUT_NEXT_ICMP6_ERROR; - n_no_listener++; - } - } - else - { - b0->error = node->errors[UDP_ERROR_NONE]; - // advance to the payload - vlib_buffer_advance (b0, sizeof (*h0)); - } - - if (PREDICT_FALSE (i1 == SPARSE_VEC_INVALID_INDEX)) - { - // move the pointer back so icmp-error can find the - // ip packet header - vlib_buffer_advance (b1, -(word) advance1); - - if (PREDICT_FALSE (punt_unknown)) - { - b1->error = node->errors[UDP_ERROR_PUNT]; - next1 = UDP_INPUT_NEXT_PUNT; - } - else if (is_ip4) - { - icmp4_error_set_vnet_buffer (b1, - ICMP4_destination_unreachable, - ICMP4_destination_unreachable_port_unreachable, - 0); - next1 = UDP_INPUT_NEXT_ICMP4_ERROR; - n_no_listener++; - } - else - { - icmp6_error_set_vnet_buffer (b1, - ICMP6_destination_unreachable, - ICMP6_destination_unreachable_port_unreachable, - 0); - next1 = UDP_INPUT_NEXT_ICMP6_ERROR; - n_no_listener++; - } - } - else - { - b1->error = node->errors[UDP_ERROR_NONE]; - // advance to the payload - vlib_buffer_advance (b1, sizeof (*h1)); - } - - if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) - { - udp_rx_trace_t *tr = vlib_add_trace (vm, node, - b0, sizeof (*tr)); - if (b0->error != node->errors[UDP_ERROR_LENGTH_ERROR]) - { - tr->src_port = h0 ? h0->src_port : 0; - tr->dst_port = h0 ? h0->dst_port : 0; - tr->bound = (next0 != UDP_INPUT_NEXT_ICMP4_ERROR && - next0 != UDP_INPUT_NEXT_ICMP6_ERROR); - } - } - if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) - { - udp_rx_trace_t *tr = vlib_add_trace (vm, node, - b1, sizeof (*tr)); - if (b1->error != node->errors[UDP_ERROR_LENGTH_ERROR]) - { - tr->src_port = h1 ? h1->src_port : 0; - tr->dst_port = h1 ? h1->dst_port : 0; - tr->bound = (next1 != UDP_INPUT_NEXT_ICMP4_ERROR && - next1 != UDP_INPUT_NEXT_ICMP6_ERROR); - } - } - - vlib_validate_buffer_enqueue_x2 (vm, node, next_index, - to_next, n_left_to_next, - bi0, bi1, next0, next1); - } - - while (n_left_from > 0 && n_left_to_next > 0) - { - u32 bi0; - vlib_buffer_t *b0; - udp_header_t *h0 = 0; - u32 i0, next0; - u32 advance0; - - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer (vm, bi0); - - /* ip4/6_local hands us the ip header, not the udp header */ - if (is_ip4) - advance0 = sizeof (ip4_header_t); - else - advance0 = sizeof (ip6_header_t); - - if (PREDICT_FALSE (b0->current_length < advance0 + sizeof (*h0))) - { - b0->error = node->errors[UDP_ERROR_LENGTH_ERROR]; - next0 = UDP_INPUT_NEXT_DROP; - goto trace_x1; - } - - vlib_buffer_advance (b0, advance0); - - h0 = vlib_buffer_get_current (b0); - - if (PREDICT_TRUE (clib_net_to_host_u16 (h0->length) <= - vlib_buffer_length_in_chain (vm, b0))) - { - i0 = sparse_vec_index (rt->next_by_dst_port, h0->dst_port); - next0 = vec_elt (rt->next_by_dst_port, i0); - - if (PREDICT_FALSE (i0 == SPARSE_VEC_INVALID_INDEX)) - { - // move the pointer back so icmp-error can find the - // ip packet header - vlib_buffer_advance (b0, -(word) advance0); - - if (PREDICT_FALSE (punt_unknown)) - { - b0->error = node->errors[UDP_ERROR_PUNT]; - next0 = UDP_INPUT_NEXT_PUNT; - } - else if (is_ip4) - { - icmp4_error_set_vnet_buffer (b0, - ICMP4_destination_unreachable, - ICMP4_destination_unreachable_port_unreachable, - 0); - next0 = UDP_INPUT_NEXT_ICMP4_ERROR; - n_no_listener++; - } - else - { - icmp6_error_set_vnet_buffer (b0, - ICMP6_destination_unreachable, - ICMP6_destination_unreachable_port_unreachable, - 0); - next0 = UDP_INPUT_NEXT_ICMP6_ERROR; - n_no_listener++; - } - } - else - { - b0->error = node->errors[UDP_ERROR_NONE]; - // advance to the payload - vlib_buffer_advance (b0, sizeof (*h0)); - } - } - else - { - b0->error = node->errors[UDP_ERROR_LENGTH_ERROR]; - next0 = UDP_INPUT_NEXT_DROP; - } - - trace_x1: - if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) - { - udp_rx_trace_t *tr = vlib_add_trace (vm, node, - b0, sizeof (*tr)); - if (b0->error != node->errors[UDP_ERROR_LENGTH_ERROR]) - { - tr->src_port = h0->src_port; - tr->dst_port = h0->dst_port; - tr->bound = (next0 != UDP_INPUT_NEXT_ICMP4_ERROR && - next0 != UDP_INPUT_NEXT_ICMP6_ERROR); - } - } - - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, next0); - } - - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - } - vlib_error_count (vm, node->node_index, UDP_ERROR_NO_LISTENER, - n_no_listener); - return from_frame->n_vectors; -} - -static char *udp_error_strings[] = { -#define udp_error(n,s) s, -#include "udp_error.def" -#undef udp_error -}; - -static uword -udp4_input (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) -{ - return udp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ); -} - -static uword -udp6_input (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) -{ - return udp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ); -} - - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (udp4_input_node) = { - .function = udp4_input, - .name = "ip4-udp-lookup", - /* Takes a vector of packets. */ - .vector_size = sizeof (u32), - - .runtime_data_bytes = sizeof (udp_input_runtime_t), - - .n_errors = UDP_N_ERROR, - .error_strings = udp_error_strings, - - .n_next_nodes = UDP_INPUT_N_NEXT, - .next_nodes = { -#define _(s,n) [UDP_INPUT_NEXT_##s] = n, - foreach_udp_input_next -#undef _ - }, - - .format_buffer = format_udp_header, - .format_trace = format_udp_rx_trace, - .unformat_buffer = unformat_udp_header, -}; -/* *INDENT-ON* */ - -VLIB_NODE_FUNCTION_MULTIARCH (udp4_input_node, udp4_input); - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (udp6_input_node) = { - .function = udp6_input, - .name = "ip6-udp-lookup", - /* Takes a vector of packets. */ - .vector_size = sizeof (u32), - - .runtime_data_bytes = sizeof (udp_input_runtime_t), - - .n_errors = UDP_N_ERROR, - .error_strings = udp_error_strings, - - .n_next_nodes = UDP_INPUT_N_NEXT, - .next_nodes = { -#define _(s,n) [UDP_INPUT_NEXT_##s] = n, - foreach_udp_input_next -#undef _ - }, - - .format_buffer = format_udp_header, - .format_trace = format_udp_rx_trace, - .unformat_buffer = unformat_udp_header, -}; -/* *INDENT-ON* */ - -VLIB_NODE_FUNCTION_MULTIARCH (udp6_input_node, udp6_input); - -static void -add_dst_port (udp_main_t * um, - udp_dst_port_t dst_port, char *dst_port_name, u8 is_ip4) -{ - udp_dst_port_info_t *pi; - u32 i; - - vec_add2 (um->dst_port_infos[is_ip4], pi, 1); - i = pi - um->dst_port_infos[is_ip4]; - - pi->name = dst_port_name; - pi->dst_port = dst_port; - pi->next_index = pi->node_index = ~0; - - hash_set (um->dst_port_info_by_dst_port[is_ip4], dst_port, i); - - if (pi->name) - hash_set_mem (um->dst_port_info_by_name[is_ip4], pi->name, i); -} - -void -udp_register_dst_port (vlib_main_t * vm, - udp_dst_port_t dst_port, u32 node_index, u8 is_ip4) -{ - udp_main_t *um = &udp_main; - udp_dst_port_info_t *pi; - udp_input_runtime_t *rt; - u16 *n; - - { - clib_error_t *error = vlib_call_init_function (vm, udp_local_init); - if (error) - clib_error_report (error); - } - - pi = udp_get_dst_port_info (um, dst_port, is_ip4); - if (!pi) - { - add_dst_port (um, dst_port, 0, is_ip4); - pi = udp_get_dst_port_info (um, dst_port, is_ip4); - ASSERT (pi); - } - - pi->node_index = node_index; - pi->next_index = vlib_node_add_next (vm, - is_ip4 ? udp4_input_node.index - : udp6_input_node.index, node_index); - - /* Setup udp protocol -> next index sparse vector mapping. */ - rt = vlib_node_get_runtime_data - (vm, is_ip4 ? udp4_input_node.index : udp6_input_node.index); - n = sparse_vec_validate (rt->next_by_dst_port, - clib_host_to_net_u16 (dst_port)); - n[0] = pi->next_index; -} - -void -udp_punt_unknown (vlib_main_t * vm, u8 is_ip4, u8 is_add) -{ - udp_input_runtime_t *rt; - - { - clib_error_t *error = vlib_call_init_function (vm, udp_local_init); - if (error) - clib_error_report (error); - } - - rt = vlib_node_get_runtime_data - (vm, is_ip4 ? udp4_input_node.index : udp6_input_node.index); - - rt->punt_unknown = is_add; -} - -/* Parse a UDP header. */ -uword -unformat_udp_header (unformat_input_t * input, va_list * args) -{ - u8 **result = va_arg (*args, u8 **); - udp_header_t *udp; - __attribute__ ((unused)) int old_length; - u16 src_port, dst_port; - - /* Allocate space for IP header. */ - { - void *p; - - old_length = vec_len (*result); - vec_add2 (*result, p, sizeof (ip4_header_t)); - udp = p; - } - - memset (udp, 0, sizeof (udp[0])); - if (unformat (input, "src-port %d dst-port %d", &src_port, &dst_port)) - { - udp->src_port = clib_host_to_net_u16 (src_port); - udp->dst_port = clib_host_to_net_u16 (dst_port); - return 1; - } - return 0; -} - -static void -udp_setup_node (vlib_main_t * vm, u32 node_index) -{ - vlib_node_t *n = vlib_get_node (vm, node_index); - pg_node_t *pn = pg_get_node (node_index); - - n->format_buffer = format_udp_header; - n->unformat_buffer = unformat_udp_header; - pn->unformat_edit = unformat_pg_udp_header; -} - -clib_error_t * -udp_local_init (vlib_main_t * vm) -{ - udp_input_runtime_t *rt; - udp_main_t *um = &udp_main; - int i; - - { - clib_error_t *error; - error = vlib_call_init_function (vm, udp_init); - if (error) - clib_error_report (error); - } - - - for (i = 0; i < 2; i++) - { - um->dst_port_info_by_name[i] = hash_create_string (0, sizeof (uword)); - um->dst_port_info_by_dst_port[i] = hash_create (0, sizeof (uword)); - } - - udp_setup_node (vm, udp4_input_node.index); - udp_setup_node (vm, udp6_input_node.index); - - rt = vlib_node_get_runtime_data (vm, udp4_input_node.index); - - rt->next_by_dst_port = sparse_vec_new - ( /* elt bytes */ sizeof (rt->next_by_dst_port[0]), - /* bits in index */ BITS (((udp_header_t *) 0)->dst_port)); - - rt->punt_unknown = 0; - -#define _(n,s) add_dst_port (um, UDP_DST_PORT_##s, #s, 1 /* is_ip4 */); - foreach_udp4_dst_port -#undef _ - rt = vlib_node_get_runtime_data (vm, udp6_input_node.index); - - rt->next_by_dst_port = sparse_vec_new - ( /* elt bytes */ sizeof (rt->next_by_dst_port[0]), - /* bits in index */ BITS (((udp_header_t *) 0)->dst_port)); - - rt->punt_unknown = 0; - -#define _(n,s) add_dst_port (um, UDP_DST_PORT_##s, #s, 0 /* is_ip4 */); - foreach_udp6_dst_port -#undef _ - ip4_register_protocol (IP_PROTOCOL_UDP, udp4_input_node.index); - /* Note: ip6 differs from ip4, UDP is hotwired to ip6-udp-lookup */ - return 0; -} - -VLIB_INIT_FUNCTION (udp_local_init); - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ diff --git a/src/vnet/ip/udp_packet.h b/src/vnet/ip/udp_packet.h deleted file mode 100644 index beea3059..00000000 --- a/src/vnet/ip/udp_packet.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 2015 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/* - * ip4/udp_packet.h: UDP packet format - * - * Copyright (c) 2008 Eliot Dresselhaus - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef included_udp_packet_h -#define included_udp_packet_h - -typedef struct -{ - /* Source and destination port. */ - u16 src_port, dst_port; - - /* Length of UDP header plus payload. */ - u16 length; - - /* Checksum of UDP pseudo-header and data or - zero if checksum is disabled. */ - u16 checksum; -} udp_header_t; - -#endif /* included_udp_packet_h */ - - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ diff --git a/src/vnet/ip/udp_pg.c b/src/vnet/ip/udp_pg.c deleted file mode 100644 index c9d8d38c..00000000 --- a/src/vnet/ip/udp_pg.c +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Copyright (c) 2015 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/* - * ip/udp_pg: UDP packet-generator interface - * - * Copyright (c) 2008 Eliot Dresselhaus - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include /* for unformat_udp_udp_port */ - -#define UDP_PG_EDIT_LENGTH (1 << 0) -#define UDP_PG_EDIT_CHECKSUM (1 << 1) - -always_inline void -udp_pg_edit_function_inline (pg_main_t * pg, - pg_stream_t * s, - pg_edit_group_t * g, - u32 * packets, u32 n_packets, u32 flags) -{ - vlib_main_t *vm = vlib_get_main (); - u32 ip_offset, udp_offset; - - udp_offset = g->start_byte_offset; - ip_offset = (g - 1)->start_byte_offset; - - while (n_packets >= 1) - { - vlib_buffer_t *p0; - ip4_header_t *ip0; - udp_header_t *udp0; - u32 udp_len0; - - p0 = vlib_get_buffer (vm, packets[0]); - n_packets -= 1; - packets += 1; - - ip0 = (void *) (p0->data + ip_offset); - udp0 = (void *) (p0->data + udp_offset); - udp_len0 = clib_net_to_host_u16 (ip0->length) - sizeof (ip0[0]); - - if (flags & UDP_PG_EDIT_LENGTH) - udp0->length = - clib_net_to_host_u16 (vlib_buffer_length_in_chain (vm, p0) - - ip_offset); - - /* Initialize checksum with header. */ - if (flags & UDP_PG_EDIT_CHECKSUM) - { - ip_csum_t sum0; - - sum0 = clib_mem_unaligned (&ip0->src_address, u64); - - sum0 = ip_csum_with_carry - (sum0, clib_host_to_net_u32 (udp_len0 + (ip0->protocol << 16))); - - /* Invalidate possibly old checksum. */ - udp0->checksum = 0; - - sum0 = - ip_incremental_checksum_buffer (vm, p0, udp_offset, udp_len0, - sum0); - - sum0 = ~ip_csum_fold (sum0); - - /* Zero checksum means checksumming disabled. */ - sum0 = sum0 != 0 ? sum0 : 0xffff; - - udp0->checksum = sum0; - } - } -} - -static void -udp_pg_edit_function (pg_main_t * pg, - pg_stream_t * s, - pg_edit_group_t * g, u32 * packets, u32 n_packets) -{ - switch (g->edit_function_opaque) - { - case UDP_PG_EDIT_LENGTH: - udp_pg_edit_function_inline (pg, s, g, packets, n_packets, - UDP_PG_EDIT_LENGTH); - break; - - case UDP_PG_EDIT_CHECKSUM: - udp_pg_edit_function_inline (pg, s, g, packets, n_packets, - UDP_PG_EDIT_CHECKSUM); - break; - - case UDP_PG_EDIT_CHECKSUM | UDP_PG_EDIT_LENGTH: - udp_pg_edit_function_inline (pg, s, g, packets, n_packets, - UDP_PG_EDIT_CHECKSUM | UDP_PG_EDIT_LENGTH); - break; - - default: - ASSERT (0); - break; - } -} - -typedef struct -{ - pg_edit_t src_port, dst_port; - pg_edit_t length; - pg_edit_t checksum; -} pg_udp_header_t; - -static inline void -pg_udp_header_init (pg_udp_header_t * p) -{ - /* Initialize fields that are not bit fields in the IP header. */ -#define _(f) pg_edit_init (&p->f, udp_header_t, f); - _(src_port); - _(dst_port); - _(length); - _(checksum); -#undef _ -} - -uword -unformat_pg_udp_header (unformat_input_t * input, va_list * args) -{ - pg_stream_t *s = va_arg (*args, pg_stream_t *); - pg_udp_header_t *p; - u32 group_index; - - p = pg_create_edit_group (s, sizeof (p[0]), sizeof (udp_header_t), - &group_index); - pg_udp_header_init (p); - - /* Defaults. */ - p->checksum.type = PG_EDIT_UNSPECIFIED; - p->length.type = PG_EDIT_UNSPECIFIED; - - if (!unformat (input, "UDP: %U -> %U", - unformat_pg_edit, - unformat_tcp_udp_port, &p->src_port, - unformat_pg_edit, unformat_tcp_udp_port, &p->dst_port)) - goto error; - - /* Parse options. */ - while (1) - { - if (unformat (input, "length %U", - unformat_pg_edit, unformat_pg_number, &p->length)) - ; - - else if (unformat (input, "checksum %U", - unformat_pg_edit, unformat_pg_number, &p->checksum)) - ; - - /* Can't parse input: try next protocol level. */ - else - break; - } - - { - ip_main_t *im = &ip_main; - u16 dst_port; - tcp_udp_port_info_t *pi; - - pi = 0; - if (p->dst_port.type == PG_EDIT_FIXED) - { - dst_port = pg_edit_get_value (&p->dst_port, PG_EDIT_LO); - pi = ip_get_tcp_udp_port_info (im, dst_port); - } - - if (pi && pi->unformat_pg_edit - && unformat_user (input, pi->unformat_pg_edit, s)) - ; - - else if (!unformat_user (input, unformat_pg_payload, s)) - goto error; - - p = pg_get_edit_group (s, group_index); - if (p->checksum.type == PG_EDIT_UNSPECIFIED - || p->length.type == PG_EDIT_UNSPECIFIED) - { - pg_edit_group_t *g = pg_stream_get_group (s, group_index); - g->edit_function = udp_pg_edit_function; - g->edit_function_opaque = 0; - if (p->checksum.type == PG_EDIT_UNSPECIFIED) - g->edit_function_opaque |= UDP_PG_EDIT_CHECKSUM; - if (p->length.type == PG_EDIT_UNSPECIFIED) - g->edit_function_opaque |= UDP_PG_EDIT_LENGTH; - } - - return 1; - } - -error: - /* Free up any edits we may have added. */ - pg_free_edit_group (s); - return 0; -} - - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ diff --git a/src/vnet/ipsec/ikev2.c b/src/vnet/ipsec/ikev2.c index 09209334..2c1074d8 100644 --- a/src/vnet/ipsec/ikev2.c +++ b/src/vnet/ipsec/ikev2.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/vnet/ipsec/ikev2_cli.c b/src/vnet/ipsec/ikev2_cli.c index 5c88d8d4..05ed4e60 100644 --- a/src/vnet/ipsec/ikev2_cli.c +++ b/src/vnet/ipsec/ikev2_cli.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/src/vnet/ipsec/ikev2_crypto.c b/src/vnet/ipsec/ikev2_crypto.c index c201d3eb..ca56158f 100644 --- a/src/vnet/ipsec/ikev2_crypto.c +++ b/src/vnet/ipsec/ikev2_crypto.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/vnet/lisp-cp/packets.c b/src/vnet/lisp-cp/packets.c index 3a4f421b..f24024f1 100644 --- a/src/vnet/lisp-cp/packets.c +++ b/src/vnet/lisp-cp/packets.c @@ -15,7 +15,7 @@ #include #include -#include +#include /* Returns IP ID for the packet */ /* static u16 ip_id = 0; @@ -141,61 +141,6 @@ pkt_push_udp (vlib_main_t * vm, vlib_buffer_t * b, u16 sp, u16 dp) return uh; } -void * -pkt_push_ipv4 (vlib_main_t * vm, vlib_buffer_t * b, ip4_address_t * src, - ip4_address_t * dst, int proto) -{ - ip4_header_t *ih; - - /* make some room */ - ih = vlib_buffer_push_uninit (b, sizeof (ip4_header_t)); - - ih->ip_version_and_header_length = 0x45; - ih->tos = 0; - ih->length = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b)); - - /* iph->fragment_id = clib_host_to_net_u16(get_IP_ID ()); */ - - /* TODO: decide if we allow fragments in case of control */ - ih->flags_and_fragment_offset = clib_host_to_net_u16 (IP_DF); - ih->ttl = 255; - ih->protocol = proto; - ih->src_address.as_u32 = src->as_u32; - ih->dst_address.as_u32 = dst->as_u32; - - ih->checksum = ip4_header_checksum (ih); - return ih; -} - -void * -pkt_push_ipv6 (vlib_main_t * vm, vlib_buffer_t * b, ip6_address_t * src, - ip6_address_t * dst, int proto) -{ - ip6_header_t *ip6h; - u16 payload_length; - - /* make some room */ - ip6h = vlib_buffer_push_uninit (b, sizeof (ip6_header_t)); - - ip6h->ip_version_traffic_class_and_flow_label = - clib_host_to_net_u32 (0x6 << 28); - - /* calculate ip6 payload length */ - payload_length = vlib_buffer_length_in_chain (vm, b); - payload_length -= sizeof (*ip6h); - - ip6h->payload_length = clib_host_to_net_u16 (payload_length); - - ip6h->hop_limit = 0xff; - ip6h->protocol = proto; - clib_memcpy (ip6h->src_address.as_u8, src->as_u8, - sizeof (ip6h->src_address)); - clib_memcpy (ip6h->dst_address.as_u8, dst->as_u8, - sizeof (ip6h->src_address)); - - return ip6h; -} - void * pkt_push_ip (vlib_main_t * vm, vlib_buffer_t * b, ip_address_t * src, ip_address_t * dst, u32 proto) @@ -210,12 +155,12 @@ pkt_push_ip (vlib_main_t * vm, vlib_buffer_t * b, ip_address_t * src, switch (ip_addr_version (src)) { case IP4: - return pkt_push_ipv4 (vm, b, &ip_addr_v4 (src), &ip_addr_v4 (dst), - proto); + return vlib_buffer_push_ip4 (vm, b, &ip_addr_v4 (src), + &ip_addr_v4 (dst), proto); break; case IP6: - return pkt_push_ipv6 (vm, b, &ip_addr_v6 (src), &ip_addr_v6 (dst), - proto); + return vlib_buffer_push_ip6 (vm, b, &ip_addr_v6 (src), + &ip_addr_v6 (dst), proto); break; } diff --git a/src/vnet/lisp-cp/packets.h b/src/vnet/lisp-cp/packets.h index 212a1d78..f6da3bf4 100644 --- a/src/vnet/lisp-cp/packets.h +++ b/src/vnet/lisp-cp/packets.h @@ -26,51 +26,6 @@ void *pkt_push_udp_and_ip (vlib_main_t * vm, vlib_buffer_t * b, u16 sp, void *pkt_push_ecm_hdr (vlib_buffer_t * b); -always_inline u8 * -vlib_buffer_get_tail (vlib_buffer_t * b) -{ - return b->data + b->current_data + b->current_length; -} - -always_inline void * -vlib_buffer_put_uninit (vlib_buffer_t * b, u8 size) -{ - /* XXX should make sure there's enough space! */ - void *p = vlib_buffer_get_tail (b); - b->current_length += size; - return p; -} - -always_inline void * -vlib_buffer_push_uninit (vlib_buffer_t * b, u8 size) -{ - /* XXX should make sure there's enough space! */ - ASSERT (b->current_data >= size); - b->current_data -= size; - b->current_length += size; - - return vlib_buffer_get_current (b); -} - -always_inline void * -vlib_buffer_make_headroom (vlib_buffer_t * b, u8 size) -{ - /* XXX should make sure there's enough space! */ - b->current_data += size; - return vlib_buffer_get_current (b); -} - -always_inline void * -vlib_buffer_pull (vlib_buffer_t * b, u8 size) -{ - if (b->current_length < size) - return 0; - - void *data = vlib_buffer_get_current (b); - vlib_buffer_advance (b, size); - return data; -} - /* *INDENT-ON* */ /* diff --git a/src/vnet/lisp-gpe/interface.c b/src/vnet/lisp-gpe/interface.c index 13359277..292c7e6a 100644 --- a/src/vnet/lisp-gpe/interface.c +++ b/src/vnet/lisp-gpe/interface.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/vnet/lisp-gpe/lisp_gpe.h b/src/vnet/lisp-gpe/lisp_gpe.h index c898a7da..b5a50ec6 100644 --- a/src/vnet/lisp-gpe/lisp_gpe.h +++ b/src/vnet/lisp-gpe/lisp_gpe.h @@ -27,10 +27,12 @@ #include #include #include -#include +#include #include #include #include +#include +#include /** IP4-UDP-LISP encap header */ /* *INDENT-OFF* */ diff --git a/src/vnet/lisp-gpe/lisp_gpe_adjacency.c b/src/vnet/lisp-gpe/lisp_gpe_adjacency.c index 65006b81..dbcf7134 100644 --- a/src/vnet/lisp-gpe/lisp_gpe_adjacency.c +++ b/src/vnet/lisp-gpe/lisp_gpe_adjacency.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include /** * Memory pool of all adjacencies diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c new file mode 100644 index 00000000..a561e7d1 --- /dev/null +++ b/src/vnet/session/application.c @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +/* + * Pool from which we allocate all applications + */ +static application_t *app_pool; + +/* + * Hash table of apps by api client index + */ +static uword *app_by_api_client_index; + +int +application_api_queue_is_full (application_t * app) +{ + unix_shared_memory_queue_t *q; + + /* builtin servers are always OK */ + if (app->api_client_index == ~0) + return 0; + + q = vl_api_client_index_to_input_queue (app->api_client_index); + if (!q) + return 1; + + if (q->cursize == q->maxsize) + return 1; + return 0; +} + +static void +application_table_add (application_t * app) +{ + hash_set (app_by_api_client_index, app->api_client_index, app->index); +} + +static void +application_table_del (application_t * app) +{ + hash_unset (app_by_api_client_index, app->api_client_index); +} + +application_t * +application_lookup (u32 api_client_index) +{ + uword *p; + p = hash_get (app_by_api_client_index, api_client_index); + if (p) + return application_get (p[0]); + + return 0; +} + +void +application_del (application_t * app) +{ + session_manager_main_t *smm = vnet_get_session_manager_main (); + api_main_t *am = &api_main; + void *oldheap; + session_manager_t *sm; + + if (app->mode == APP_SERVER) + { + sm = session_manager_get (app->session_manager_index); + session_manager_del (smm, sm); + } + + /* Free the event fifo in the /vpe-api shared-memory segment */ + oldheap = svm_push_data_heap (am->vlib_rp); + if (app->event_queue) + unix_shared_memory_queue_free (app->event_queue); + svm_pop_heap (oldheap); + + application_table_del (app); + + pool_put (app_pool, app); +} + +application_t * +application_new (application_type_t type, session_type_t sst, + u32 api_client_index, u32 flags, session_cb_vft_t * cb_fns) +{ + session_manager_main_t *smm = vnet_get_session_manager_main (); + api_main_t *am = &api_main; + application_t *app; + void *oldheap; + session_manager_t *sm; + + pool_get (app_pool, app); + memset (app, 0, sizeof (*app)); + + /* Allocate event fifo in the /vpe-api shared-memory segment */ + oldheap = svm_push_data_heap (am->vlib_rp); + + /* Allocate server event queue */ + app->event_queue = + unix_shared_memory_queue_init (128 /* nels $$$$ config */ , + sizeof (session_fifo_event_t), + 0 /* consumer pid */ , + 0 + /* (do not) signal when queue non-empty */ + ); + + svm_pop_heap (oldheap); + + /* If a server, allocate session manager */ + if (type == APP_SERVER) + { + pool_get (smm->session_managers, sm); + memset (sm, 0, sizeof (*sm)); + + app->session_manager_index = sm - smm->session_managers; + } + else if (type == APP_CLIENT) + { + /* Allocate connect session manager if needed */ + if (smm->connect_manager_index[sst] == INVALID_INDEX) + connects_session_manager_init (smm, sst); + app->session_manager_index = smm->connect_manager_index[sst]; + } + + app->mode = type; + app->index = application_get_index (app); + app->session_type = sst; + app->api_client_index = api_client_index; + app->flags = flags; + app->cb_fns = *cb_fns; + + /* Add app to lookup by api_client_index table */ + application_table_add (app); + + return app; +} + +application_t * +application_get (u32 index) +{ + return pool_elt_at_index (app_pool, index); +} + +u32 +application_get_index (application_t * app) +{ + return app - app_pool; +} + +int +application_server_init (application_t * server, u32 segment_size, + u32 add_segment_size, u32 rx_fifo_size, + u32 tx_fifo_size, u8 ** segment_name) +{ + session_manager_main_t *smm = vnet_get_session_manager_main (); + session_manager_t *sm; + int rv; + + sm = session_manager_get (server->session_manager_index); + + /* Add first segment */ + if ((rv = session_manager_add_first_segment (smm, sm, segment_size, + segment_name))) + { + return rv; + } + + /* Setup session manager */ + sm->add_segment_size = add_segment_size; + sm->rx_fifo_size = rx_fifo_size; + sm->tx_fifo_size = tx_fifo_size; + sm->add_segment = sm->add_segment_size != 0; + return 0; +} + +u8 * +format_application_server (u8 * s, va_list * args) +{ + application_t *srv = va_arg (*args, application_t *); + int verbose = va_arg (*args, int); + vl_api_registration_t *regp; + stream_session_t *listener; + u8 *server_name, *str, *seg_name; + u32 segment_size; + + if (srv == 0) + { + if (verbose) + s = format (s, "%-40s%-20s%-15s%-15s%-10s", "Connection", "Server", + "Segment", "API Client", "Cookie"); + else + s = format (s, "%-40s%-20s", "Connection", "Server"); + + return s; + } + + regp = vl_api_client_index_to_registration (srv->api_client_index); + if (!regp) + server_name = format (0, "%s%c", regp->name, 0); + else + server_name = regp->name; + + listener = stream_session_listener_get (srv->session_type, + srv->session_index); + str = format (0, "%U", format_stream_session, listener, verbose); + + session_manager_get_segment_info (listener->server_segment_index, &seg_name, + &segment_size); + if (verbose) + { + s = format (s, "%-40s%-20s%-20s%-10d%-10d", str, server_name, + seg_name, srv->api_client_index, srv->accept_cookie); + } + else + s = format (s, "%-40s%-20s", str, server_name); + return s; +} + +u8 * +format_application_client (u8 * s, va_list * args) +{ + application_t *client = va_arg (*args, application_t *); + int verbose = va_arg (*args, int); + stream_session_t *session; + u8 *str, *seg_name; + u32 segment_size; + + if (client == 0) + { + if (verbose) + s = + format (s, "%-40s%-20s%-10s", "Connection", "Segment", + "API Client"); + else + s = format (s, "%-40s", "Connection"); + + return s; + } + + session = stream_session_get (client->session_index, client->thread_index); + str = format (0, "%U", format_stream_session, session, verbose); + + session_manager_get_segment_info (session->server_segment_index, &seg_name, + &segment_size); + if (verbose) + { + s = format (s, "%-40s%-20s%-10d%", str, seg_name, + client->api_client_index); + } + else + s = format (s, "%-40s", str); + return s; +} + +static clib_error_t * +show_app_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + application_t *app; + int do_server = 0; + int do_client = 0; + int verbose = 0; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "server")) + do_server = 1; + else if (unformat (input, "client")) + do_client = 1; + else if (unformat (input, "verbose")) + verbose = 1; + else + break; + } + + if (do_server) + { + if (pool_elts (app_pool)) + { + vlib_cli_output (vm, "%U", format_application_server, + 0 /* header */ , + verbose); + /* *INDENT-OFF* */ + pool_foreach (app, app_pool, + ({ + if (app->mode == APP_SERVER) + vlib_cli_output (vm, "%U", format_application_server, app, + verbose); + })); + /* *INDENT-ON* */ + } + else + vlib_cli_output (vm, "No active server bindings"); + } + + if (do_client) + { + if (pool_elts (app_pool)) + { + vlib_cli_output (vm, "%U", format_application_client, + 0 /* header */ , + verbose); + /* *INDENT-OFF* */ + pool_foreach (app, app_pool, + ({ + if (app->mode == APP_CLIENT) + vlib_cli_output (vm, "%U", format_application_client, app, + verbose); + })); + /* *INDENT-ON* */ + } + else + vlib_cli_output (vm, "No active server bindings"); + } + + return 0; +} + +VLIB_CLI_COMMAND (show_app_command, static) = +{ +.path = "show app",.short_help = + "show app [server|client] [verbose]",.function = show_app_command_fn,}; + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/session/application.h b/src/vnet/session/application.h new file mode 100644 index 00000000..027d6967 --- /dev/null +++ b/src/vnet/session/application.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SRC_VNET_SESSION_APPLICATION_H_ +#define SRC_VNET_SESSION_APPLICATION_H_ + +#include +#include + +typedef enum +{ + APP_SERVER, + APP_CLIENT +} application_type_t; + +typedef struct _stream_session_cb_vft +{ + /** Notify server of new segment */ + int (*add_segment_callback) (u32 api_client_index, const u8 * seg_name, + u32 seg_size); + + /** Notify server of newly accepted session */ + int (*session_accept_callback) (stream_session_t * new_session); + + /* Connection request callback */ + int (*session_connected_callback) (u32 api_client_index, + stream_session_t * s, u8 code); + + /** Notify app that session is closing */ + void (*session_disconnect_callback) (stream_session_t * s); + + /** Notify app that session was reset */ + void (*session_reset_callback) (stream_session_t * s); + + /* Direct RX callback, for built-in servers */ + int (*builtin_server_rx_callback) (stream_session_t * session); + + /* Redirect connection to local server */ + int (*redirect_connect_callback) (u32 api_client_index, void *mp); +} session_cb_vft_t; + +typedef struct _application +{ + /** Index in server pool */ + u32 index; + + /** Flags */ + u32 flags; + + /** Binary API connection index, ~0 if internal */ + u32 api_client_index; + + /* */ + u32 api_context; + + /** Application listens for events on this svm queue */ + unix_shared_memory_queue_t *event_queue; + + /** Stream session type */ + u8 session_type; + + /* Stream server mode: accept or connect */ + u8 mode; + + u32 session_manager_index; + + /* + * Bind/Listen specific + */ + + /** Accept cookie, for multiple session flavors ($$$ maybe) */ + u32 accept_cookie; + + /** Index of the listen session or connect session */ + u32 session_index; + + /** Session thread index for client connect sessions */ + u32 thread_index; + + /* + * Callbacks: shoulder-taps for the server/client + */ + session_cb_vft_t cb_fns; +} application_t; + +application_t *application_new (application_type_t type, session_type_t sst, + u32 api_client_index, u32 flags, + session_cb_vft_t * cb_fns); +void application_del (application_t * app); +application_t *application_get (u32 index); +application_t *application_lookup (u32 api_client_index); +u32 application_get_index (application_t * app); + +int +application_server_init (application_t * server, u32 segment_size, + u32 add_segment_size, u32 rx_fifo_size, + u32 tx_fifo_size, u8 ** segment_name); +int application_api_queue_is_full (application_t * app); + +#endif /* SRC_VNET_SESSION_APPLICATION_H_ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c new file mode 100644 index 00000000..0ea77fd8 --- /dev/null +++ b/src/vnet/session/application_interface.c @@ -0,0 +1,459 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include + +#include +#include +#include +#include + +/** @file + VPP's application/session API bind/unbind/connect/disconnect calls +*/ + +static u8 +ip_is_zero (ip46_address_t * ip46_address, u8 is_ip4) +{ + if (is_ip4) + return (ip46_address->ip4.as_u32 == 0); + else + return (ip46_address->as_u64[0] == 0 && ip46_address->as_u64[1] == 0); +} + +static u8 +ip_is_local (ip46_address_t * ip46_address, u8 is_ip4) +{ + fib_node_index_t fei; + fib_entry_flag_t flags; + fib_prefix_t prefix; + + /* Check if requester is local */ + if (is_ip4) + { + prefix.fp_len = 32; + prefix.fp_proto = FIB_PROTOCOL_IP4; + } + else + { + prefix.fp_len = 128; + prefix.fp_proto = FIB_PROTOCOL_IP6; + } + + clib_memcpy (&prefix.fp_addr, ip46_address, sizeof (ip46_address)); + fei = fib_table_lookup (0, &prefix); + flags = fib_entry_get_flags (fei); + + return (flags & FIB_ENTRY_FLAG_LOCAL); +} + +int +api_parse_session_handle (u64 handle, u32 * session_index, u32 * thread_index) +{ + session_manager_main_t *smm = vnet_get_session_manager_main (); + stream_session_t *pool; + + *thread_index = handle & 0xFFFFFFFF; + *session_index = handle >> 32; + + if (*thread_index >= vec_len (smm->sessions)) + return VNET_API_ERROR_INVALID_VALUE; + + pool = smm->sessions[*thread_index]; + + if (pool_is_free_index (pool, *session_index)) + return VNET_API_ERROR_INVALID_VALUE_2; + + return 0; +} + +int +vnet_bind_i (u32 api_client_index, ip46_address_t * ip46, u16 port_host_order, + session_type_t sst, u64 * options, session_cb_vft_t * cb_fns, + application_t ** app, u32 * len_seg_name, char *seg_name) +{ + u8 *segment_name = 0; + application_t *server = 0; + stream_session_t *listener; + u8 is_ip4; + + listener = + stream_session_lookup_listener (ip46, + clib_host_to_net_u16 (port_host_order), + sst); + + if (listener) + return VNET_API_ERROR_ADDRESS_IN_USE; + + if (application_lookup (api_client_index)) + { + clib_warning ("Only one bind supported for now"); + return VNET_API_ERROR_ADDRESS_IN_USE; + } + + is_ip4 = SESSION_TYPE_IP4_UDP == sst || SESSION_TYPE_IP4_TCP == sst; + if (!ip_is_zero (ip46, is_ip4) && !ip_is_local (ip46, is_ip4)) + return VNET_API_ERROR_INVALID_VALUE; + + /* Allocate and initialize stream server */ + server = application_new (APP_SERVER, sst, api_client_index, + options[SESSION_OPTIONS_FLAGS], cb_fns); + + application_server_init (server, options[SESSION_OPTIONS_SEGMENT_SIZE], + options[SESSION_OPTIONS_ADD_SEGMENT_SIZE], + options[SESSION_OPTIONS_RX_FIFO_SIZE], + options[SESSION_OPTIONS_TX_FIFO_SIZE], + &segment_name); + + /* Setup listen path down to transport */ + stream_session_start_listen (server->index, ip46, port_host_order); + + /* + * Return values + */ + + ASSERT (vec_len (segment_name) <= 128); + *len_seg_name = vec_len (segment_name); + memcpy (seg_name, segment_name, *len_seg_name); + *app = server; + + return 0; +} + +int +vnet_unbind_i (u32 api_client_index) +{ + application_t *server; + + /* + * Find the stream_server_t corresponding to the api client + */ + server = application_lookup (api_client_index); + if (!server) + return VNET_API_ERROR_INVALID_VALUE_2; + + /* Clear the listener */ + stream_session_stop_listen (server->index); + application_del (server); + + return 0; +} + +int +vnet_connect_i (u32 api_client_index, u32 api_context, session_type_t sst, + ip46_address_t * ip46, u16 port, u64 * options, void *mp, + session_cb_vft_t * cb_fns) +{ + stream_session_t *listener; + application_t *server, *app; + + /* + * Figure out if connecting to a local server + */ + listener = stream_session_lookup_listener (ip46, + clib_host_to_net_u16 (port), + sst); + if (listener) + { + server = application_get (listener->app_index); + + /* + * Server is willing to have a direct fifo connection created + * instead of going through the state machine, etc. + */ + if (server->flags & SESSION_OPTIONS_FLAGS_USE_FIFO) + return server->cb_fns. + redirect_connect_callback (server->api_client_index, mp); + } + + /* Create client app */ + app = application_new (APP_CLIENT, sst, api_client_index, + options[SESSION_OPTIONS_FLAGS], cb_fns); + + app->api_context = api_context; + + /* + * Not connecting to a local server. Create regular session + */ + stream_session_open (sst, ip46, port, app->index); + + return 0; +} + +/** + * unformat a vnet URI + * + * fifo://name + * tcp://ip46-addr:port + * udp://ip46-addr:port + * + * u8 ip46_address[16]; + * u16 port_in_host_byte_order; + * stream_session_type_t sst; + * u8 *fifo_name; + * + * if (unformat (input, "%U", unformat_vnet_uri, &ip46_address, + * &sst, &port, &fifo_name)) + * etc... + * + */ +uword +unformat_vnet_uri (unformat_input_t * input, va_list * args) +{ + ip46_address_t *address = va_arg (*args, ip46_address_t *); + session_type_t *sst = va_arg (*args, session_type_t *); + u16 *port = va_arg (*args, u16 *); + + if (unformat (input, "tcp://%U/%d", unformat_ip4_address, &address->ip4, + port)) + { + *sst = SESSION_TYPE_IP4_TCP; + return 1; + } + if (unformat (input, "udp://%U/%d", unformat_ip4_address, &address->ip4, + port)) + { + *sst = SESSION_TYPE_IP4_UDP; + return 1; + } + if (unformat (input, "udp://%U/%d", unformat_ip6_address, &address->ip6, + port)) + { + *sst = SESSION_TYPE_IP6_UDP; + return 1; + } + if (unformat (input, "tcp://%U/%d", unformat_ip6_address, &address->ip6, + port)) + { + *sst = SESSION_TYPE_IP6_TCP; + return 1; + } + + return 0; +} + +int +parse_uri (char *uri, session_type_t * sst, ip46_address_t * addr, + u16 * port_number_host_byte_order) +{ + unformat_input_t _input, *input = &_input; + + /* Make sure */ + uri = (char *) format (0, "%s%c", uri, 0); + + /* Parse uri */ + unformat_init_string (input, uri, strlen (uri)); + if (!unformat (input, "%U", unformat_vnet_uri, addr, sst, + port_number_host_byte_order)) + { + unformat_free (input); + return VNET_API_ERROR_INVALID_VALUE; + } + unformat_free (input); + + return 0; +} + +int +vnet_bind_uri (vnet_bind_args_t * a) +{ + application_t *server = 0; + u16 port_host_order; + session_type_t sst = SESSION_N_TYPES; + ip46_address_t ip46; + int rv; + + memset (&ip46, 0, sizeof (ip46)); + rv = parse_uri (a->uri, &sst, &ip46, &port_host_order); + if (rv) + return rv; + + if ((rv = vnet_bind_i (a->api_client_index, &ip46, port_host_order, sst, + a->options, a->session_cb_vft, &server, + &a->segment_name_length, a->segment_name))) + return rv; + + a->server_event_queue_address = (u64) server->event_queue; + return 0; +} + +session_type_t +session_type_from_proto_and_ip (session_api_proto_t proto, u8 is_ip4) +{ + if (proto == SESSION_PROTO_TCP) + { + if (is_ip4) + return SESSION_TYPE_IP4_TCP; + else + return SESSION_TYPE_IP6_TCP; + } + else + { + if (is_ip4) + return SESSION_TYPE_IP4_UDP; + else + return SESSION_TYPE_IP6_UDP; + } + + return SESSION_N_TYPES; +} + +int +vnet_unbind_uri (char *uri, u32 api_client_index) +{ + u16 port_number_host_byte_order; + session_type_t sst = SESSION_N_TYPES; + ip46_address_t ip46_address; + stream_session_t *listener; + int rv; + + rv = parse_uri (uri, &sst, &ip46_address, &port_number_host_byte_order); + if (rv) + return rv; + + listener = + stream_session_lookup_listener (&ip46_address, + clib_host_to_net_u16 + (port_number_host_byte_order), sst); + + if (!listener) + return VNET_API_ERROR_ADDRESS_NOT_IN_USE; + + /* External client? */ + if (api_client_index != ~0) + { + ASSERT (vl_api_client_index_to_registration (api_client_index)); + } + + return vnet_unbind_i (api_client_index); +} + +int +vnet_connect_uri (vnet_connect_args_t * a) +{ + ip46_address_t ip46_address; + u16 port; + session_type_t sst; + application_t *app; + int rv; + + app = application_lookup (a->api_client_index); + if (app) + { + clib_warning ("Already have a connect from this app"); + return VNET_API_ERROR_INVALID_VALUE_2; + } + + /* Parse uri */ + rv = parse_uri (a->uri, &sst, &ip46_address, &port); + if (rv) + return rv; + + return vnet_connect_i (a->api_client_index, a->api_context, sst, + &ip46_address, port, a->options, a->mp, + a->session_cb_vft); +} + +int +vnet_disconnect_session (u32 client_index, u32 session_index, + u32 thread_index) +{ + stream_session_t *session; + + session = stream_session_get (session_index, thread_index); + stream_session_disconnect (session); + + return 0; +} + + +int +vnet_bind (vnet_bind_args_t * a) +{ + application_t *server = 0; + session_type_t sst = SESSION_N_TYPES; + int rv; + + sst = session_type_from_proto_and_ip (a->proto, a->tep.is_ip4); + if ((rv = vnet_bind_i (a->api_client_index, &a->tep.ip, a->tep.port, sst, + a->options, a->session_cb_vft, &server, + &a->segment_name_length, a->segment_name))) + return rv; + + a->server_event_queue_address = (u64) server->event_queue; + a->handle = (u64) a->tep.vrf << 32 | (u64) server->session_index; + return 0; +} + +int +vnet_unbind (vnet_unbind_args_t * a) +{ + application_t *server; + + if (a->api_client_index != ~0) + { + ASSERT (vl_api_client_index_to_registration (a->api_client_index)); + } + + /* Make sure this is the right one */ + server = application_lookup (a->api_client_index); + ASSERT (server->session_index == (0xFFFFFFFF & a->handle)); + + /* TODO use handle to disambiguate namespaces/vrfs */ + return vnet_unbind_i (a->api_client_index); +} + +int +vnet_connect (vnet_connect_args_t * a) +{ + session_type_t sst; + application_t *app; + + app = application_lookup (a->api_client_index); + if (app) + { + clib_warning ("Already have a connect from this app"); + return VNET_API_ERROR_INVALID_VALUE_2; + } + + sst = session_type_from_proto_and_ip (a->proto, a->tep.is_ip4); + return vnet_connect_i (a->api_client_index, a->api_context, sst, &a->tep.ip, + a->tep.port, a->options, a->mp, a->session_cb_vft); +} + +int +vnet_disconnect (vnet_disconnect_args_t * a) +{ + stream_session_t *session; + u32 session_index, thread_index; + + if (api_parse_session_handle (a->handle, &session_index, &thread_index)) + { + clib_warning ("Invalid handle"); + return -1; + } + + session = stream_session_get (session_index, thread_index); + stream_session_disconnect (session); + + return 0; +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/session/application_interface.h b/src/vnet/session/application_interface.h new file mode 100644 index 00000000..8d87c067 --- /dev/null +++ b/src/vnet/session/application_interface.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __included_uri_h__ +#define __included_uri_h__ + +#include +#include +#include +#include +#include +#include + +typedef enum _session_api_proto +{ + SESSION_PROTO_TCP, + SESSION_PROTO_UDP +} session_api_proto_t; + +typedef struct _vnet_bind_args_t +{ + union + { + char *uri; + struct + { + transport_endpoint_t tep; + session_api_proto_t proto; + }; + }; + + u32 api_client_index; + u64 *options; + session_cb_vft_t *session_cb_vft; + + /* + * Results + */ + char *segment_name; + u32 segment_name_length; + u64 server_event_queue_address; + u64 handle; +} vnet_bind_args_t; + +typedef struct _vnet_unbind_args_t +{ + union + { + char *uri; + u64 handle; + }; + u32 api_client_index; +} vnet_unbind_args_t; + +typedef struct _vnet_connect_args +{ + union + { + char *uri; + struct + { + transport_endpoint_t tep; + session_api_proto_t proto; + }; + }; + u32 api_client_index; + u32 api_context; + u64 *options; + session_cb_vft_t *session_cb_vft; + + /* Used for redirects */ + void *mp; +} vnet_connect_args_t; + +typedef struct _vnet_disconnect_args_t +{ + u64 handle; + u32 api_client_index; +} vnet_disconnect_args_t; + +/* Bind / connect options */ +typedef enum +{ + SESSION_OPTIONS_FLAGS, + SESSION_OPTIONS_SEGMENT_SIZE, + SESSION_OPTIONS_ADD_SEGMENT_SIZE, + SESSION_OPTIONS_RX_FIFO_SIZE, + SESSION_OPTIONS_TX_FIFO_SIZE, + SESSION_OPTIONS_ACCEPT_COOKIE, + SESSION_OPTIONS_N_OPTIONS +} session_options_index_t; + +/** Server can handle delegated connect requests from local clients */ +#define SESSION_OPTIONS_FLAGS_USE_FIFO (1<<0) + +/** Server wants vpp to add segments when out of memory for fifos */ +#define SESSION_OPTIONS_FLAGS_ADD_SEGMENT (1<<1) + +#define VNET_CONNECT_REDIRECTED 123 + +int vnet_bind_uri (vnet_bind_args_t *); +int vnet_unbind_uri (char *uri, u32 api_client_index); +int vnet_connect_uri (vnet_connect_args_t * a); +int +vnet_disconnect_session (u32 client_index, u32 session_index, + u32 thread_index); + +int vnet_bind (vnet_bind_args_t * a); +int vnet_connect (vnet_connect_args_t * a); +int vnet_unbind (vnet_unbind_args_t * a); +int vnet_disconnect (vnet_disconnect_args_t * a); + +int +api_parse_session_handle (u64 handle, u32 * session_index, + u32 * thread_index); + +#endif /* __included_uri_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/session/hashes.c b/src/vnet/session/hashes.c new file mode 100644 index 00000000..1808dd73 --- /dev/null +++ b/src/vnet/session/hashes.c @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Generate typed init functions for multiple hash table styles... */ + +#include +#include + +#include + +#undef __included_bihash_template_h__ + +#include +#include + +#include diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c new file mode 100644 index 00000000..e467f4e9 --- /dev/null +++ b/src/vnet/session/node.c @@ -0,0 +1,435 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include + +vlib_node_registration_t session_queue_node; + +typedef struct +{ + u32 session_index; + u32 server_thread_index; +} session_queue_trace_t; + +/* packet trace format function */ +static u8 * +format_session_queue_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + session_queue_trace_t *t = va_arg (*args, session_queue_trace_t *); + + s = format (s, "SESSION_QUEUE: session index %d, server thread index %d", + t->session_index, t->server_thread_index); + return s; +} + +vlib_node_registration_t session_queue_node; + +#define foreach_session_queue_error \ +_(TX, "Packets transmitted") \ +_(TIMER, "Timer events") + +typedef enum +{ +#define _(sym,str) SESSION_QUEUE_ERROR_##sym, + foreach_session_queue_error +#undef _ + SESSION_QUEUE_N_ERROR, +} session_queue_error_t; + +static char *session_queue_error_strings[] = { +#define _(sym,string) string, + foreach_session_queue_error +#undef _ +}; + +static u32 session_type_to_next[] = { + SESSION_QUEUE_NEXT_TCP_IP4_OUTPUT, + SESSION_QUEUE_NEXT_IP4_LOOKUP, + SESSION_QUEUE_NEXT_TCP_IP6_OUTPUT, + SESSION_QUEUE_NEXT_IP6_LOOKUP, +}; + +always_inline int +session_fifo_rx_i (vlib_main_t * vm, vlib_node_runtime_t * node, + session_manager_main_t * smm, session_fifo_event_t * e0, + stream_session_t * s0, u32 thread_index, int *n_tx_packets, + u8 peek_data) +{ + u32 n_trace = vlib_get_trace_count (vm, node); + u32 left_to_snd0, max_len_to_snd0, len_to_deq0, n_bufs, snd_space0; + u32 n_frame_bytes, n_frames_per_evt; + transport_connection_t *tc0; + transport_proto_vft_t *transport_vft; + u32 next_index, next0, *to_next, n_left_to_next, bi0; + vlib_buffer_t *b0; + u32 rx_offset; + u16 snd_mss0; + u8 *data0; + int i; + + next_index = next0 = session_type_to_next[s0->session_type]; + + transport_vft = session_get_transport_vft (s0->session_type); + tc0 = transport_vft->get_connection (s0->connection_index, thread_index); + + /* Make sure we have space to send and there's something to dequeue */ + snd_space0 = transport_vft->send_space (tc0); + snd_mss0 = transport_vft->send_mss (tc0); + + if (snd_space0 == 0 || svm_fifo_max_dequeue (s0->server_tx_fifo) == 0 + || snd_mss0 == 0) + return 0; + + ASSERT (e0->enqueue_length > 0); + + /* Ensure we're not writing more than transport window allows */ + max_len_to_snd0 = clib_min (e0->enqueue_length, snd_space0); + + if (peek_data) + { + /* Offset in rx fifo from where to peek data */ + rx_offset = transport_vft->rx_fifo_offset (tc0); + } + + /* TODO check if transport is willing to send len_to_snd0 + * bytes (Nagle) */ + + n_frame_bytes = snd_mss0 * VLIB_FRAME_SIZE; + n_frames_per_evt = ceil ((double) max_len_to_snd0 / n_frame_bytes); + + n_bufs = vec_len (smm->tx_buffers[thread_index]); + left_to_snd0 = max_len_to_snd0; + for (i = 0; i < n_frames_per_evt; i++) + { + /* Make sure we have at least one full frame of buffers ready */ + if (PREDICT_FALSE (n_bufs < VLIB_FRAME_SIZE)) + { + vec_validate (smm->tx_buffers[thread_index], + n_bufs + VLIB_FRAME_SIZE - 1); + n_bufs += + vlib_buffer_alloc (vm, &smm->tx_buffers[thread_index][n_bufs], + VLIB_FRAME_SIZE); + + /* buffer shortage + * XXX 0.9 because when debugging we might not get a full frame */ + if (PREDICT_FALSE (n_bufs < 0.9 * VLIB_FRAME_SIZE)) + { + /* Keep track of how much we've dequeued and exit */ + e0->enqueue_length -= max_len_to_snd0 - left_to_snd0; + return -1; + } + + _vec_len (smm->tx_buffers[thread_index]) = n_bufs; + } + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + while (left_to_snd0 && n_left_to_next) + { + /* Get free buffer */ + n_bufs--; + bi0 = smm->tx_buffers[thread_index][n_bufs]; + _vec_len (smm->tx_buffers[thread_index]) = n_bufs; + + b0 = vlib_get_buffer (vm, bi0); + b0->error = 0; + b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID + | VNET_BUFFER_LOCALLY_ORIGINATED; + b0->current_data = 0; + + /* RX on the local interface. tx in default fib */ + vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0; + vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + + /* usual speculation, or the enqueue_x1 macro will barf */ + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + + VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); + if (PREDICT_FALSE (n_trace > 0)) + { + session_queue_trace_t *t0; + vlib_trace_buffer (vm, node, next_index, b0, + 1 /* follow_chain */ ); + vlib_set_trace_count (vm, node, --n_trace); + t0 = vlib_add_trace (vm, node, b0, sizeof (*t0)); + t0->session_index = s0->session_index; + t0->server_thread_index = s0->thread_index; + } + + if (1) + { + ELOG_TYPE_DECLARE (e) = + { + .format = "evt-dequeue: id %d length %d",.format_args = + "i4i4",}; + struct + { + u32 data[2]; + } *ed; + ed = ELOG_DATA (&vm->elog_main, e); + ed->data[0] = e0->event_id; + ed->data[1] = e0->enqueue_length; + } + + len_to_deq0 = (left_to_snd0 < snd_mss0) ? left_to_snd0 : snd_mss0; + + /* Make room for headers */ + data0 = vlib_buffer_make_headroom (b0, MAX_HDRS_LEN); + + /* Dequeue the data + * TODO 1) peek instead of dequeue + * 2) buffer chains */ + if (peek_data) + { + int n_bytes_read; + n_bytes_read = svm_fifo_peek (s0->server_tx_fifo, s0->pid, + rx_offset, len_to_deq0, data0); + if (n_bytes_read < 0) + goto dequeue_fail; + + /* Keep track of progress locally, transport is also supposed to + * increment it independently when pushing header */ + rx_offset += n_bytes_read; + } + else + { + if (svm_fifo_dequeue_nowait (s0->server_tx_fifo, s0->pid, + len_to_deq0, data0) < 0) + goto dequeue_fail; + } + + b0->current_length = len_to_deq0; + + /* Ask transport to push header */ + transport_vft->push_header (tc0, b0); + + left_to_snd0 -= len_to_deq0; + *n_tx_packets = *n_tx_packets + 1; + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + /* If we couldn't dequeue all bytes store progress */ + if (max_len_to_snd0 < e0->enqueue_length) + { + e0->enqueue_length -= max_len_to_snd0; + vec_add1 (smm->evts_partially_read[thread_index], *e0); + } + return 0; + +dequeue_fail: + /* Can't read from fifo. Store event rx progress, save as partially read, + * return buff to free list and return */ + e0->enqueue_length -= max_len_to_snd0 - left_to_snd0; + vec_add1 (smm->evts_partially_read[thread_index], *e0); + + to_next -= 1; + n_left_to_next += 1; + _vec_len (smm->tx_buffers[thread_index]) += 1; + + clib_warning ("dequeue fail"); + return 0; +} + +int +session_fifo_rx_peek (vlib_main_t * vm, vlib_node_runtime_t * node, + session_manager_main_t * smm, session_fifo_event_t * e0, + stream_session_t * s0, u32 thread_index, int *n_tx_pkts) +{ + return session_fifo_rx_i (vm, node, smm, e0, s0, thread_index, n_tx_pkts, + 1); +} + +int +session_fifo_rx_dequeue (vlib_main_t * vm, vlib_node_runtime_t * node, + session_manager_main_t * smm, + session_fifo_event_t * e0, stream_session_t * s0, + u32 thread_index, int *n_tx_pkts) +{ + return session_fifo_rx_i (vm, node, smm, e0, s0, thread_index, n_tx_pkts, + 0); +} + +static uword +session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + session_manager_main_t *smm = vnet_get_session_manager_main (); + session_fifo_event_t *my_fifo_events, *e; + u32 n_to_dequeue; + unix_shared_memory_queue_t *q; + int n_tx_packets = 0; + u32 my_thread_index = vm->cpu_index; + int i, rv; + + /* + * Update TCP time + */ + tcp_update_time (vlib_time_now (vm), my_thread_index); + + /* + * Get vpp queue events + */ + q = smm->vpp_event_queues[my_thread_index]; + if (PREDICT_FALSE (q == 0)) + return 0; + + /* min number of events we can dequeue without blocking */ + n_to_dequeue = q->cursize; + if (n_to_dequeue == 0) + return 0; + + my_fifo_events = smm->fifo_events[my_thread_index]; + + /* If we didn't manage to process previous events try going + * over them again without dequeuing new ones. + * XXX: Block senders to sessions that can't keep up */ + if (vec_len (my_fifo_events) >= 100) + goto skip_dequeue; + + /* See you in the next life, don't be late */ + if (pthread_mutex_trylock (&q->mutex)) + return 0; + + for (i = 0; i < n_to_dequeue; i++) + { + vec_add2 (my_fifo_events, e, 1); + unix_shared_memory_queue_sub_raw (q, (u8 *) e); + } + + /* The other side of the connection is not polling */ + if (q->cursize < (q->maxsize / 8)) + (void) pthread_cond_broadcast (&q->condvar); + pthread_mutex_unlock (&q->mutex); + + smm->fifo_events[my_thread_index] = my_fifo_events; + +skip_dequeue: + + for (i = 0; i < n_to_dequeue; i++) + { + svm_fifo_t *f0; /* $$$ prefetch 1 ahead maybe */ + stream_session_t *s0; + u32 server_session_index0, server_thread_index0; + session_fifo_event_t *e0; + + e0 = &my_fifo_events[i]; + f0 = e0->fifo; + server_session_index0 = f0->server_session_index; + server_thread_index0 = f0->server_thread_index; + + /* $$$ add multiple event queues, per vpp worker thread */ + ASSERT (server_thread_index0 == my_thread_index); + + s0 = pool_elt_at_index (smm->sessions[my_thread_index], + server_session_index0); + + ASSERT (s0->thread_index == my_thread_index); + + switch (e0->event_type) + { + case FIFO_EVENT_SERVER_TX: + /* Spray packets in per session type frames, since they go to + * different nodes */ + rv = (smm->session_rx_fns[s0->session_type]) (vm, node, smm, e0, s0, + my_thread_index, + &n_tx_packets); + if (rv < 0) + goto done; + + break; + + default: + clib_warning ("unhandled event type %d", e0->event_type); + } + } + +done: + + /* Couldn't process all events. Probably out of buffers */ + if (PREDICT_FALSE (i < n_to_dequeue)) + { + session_fifo_event_t *partially_read = + smm->evts_partially_read[my_thread_index]; + vec_add (partially_read, &my_fifo_events[i], n_to_dequeue - i); + vec_free (my_fifo_events); + smm->fifo_events[my_thread_index] = partially_read; + smm->evts_partially_read[my_thread_index] = 0; + } + else + { + vec_free (smm->fifo_events[my_thread_index]); + smm->fifo_events[my_thread_index] = + smm->evts_partially_read[my_thread_index]; + smm->evts_partially_read[my_thread_index] = 0; + } + + vlib_node_increment_counter (vm, session_queue_node.index, + SESSION_QUEUE_ERROR_TX, n_tx_packets); + + return n_tx_packets; +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (session_queue_node) = +{ + .function = session_queue_node_fn, + .name = "session-queue", + .format_trace = format_session_queue_trace, + .type = VLIB_NODE_TYPE_INPUT, + .n_errors = ARRAY_LEN (session_queue_error_strings), + .error_strings = session_queue_error_strings, + .n_next_nodes = SESSION_QUEUE_N_NEXT, + /* .state = VLIB_NODE_STATE_DISABLED, enable on-demand? */ + /* edit / add dispositions here */ + .next_nodes = + { + [SESSION_QUEUE_NEXT_DROP] = "error-drop", + [SESSION_QUEUE_NEXT_IP4_LOOKUP] = "ip4-lookup", + [SESSION_QUEUE_NEXT_IP6_LOOKUP] = "ip6-lookup", + [SESSION_QUEUE_NEXT_TCP_IP4_OUTPUT] = "tcp4-output", + [SESSION_QUEUE_NEXT_TCP_IP6_OUTPUT] = "tcp6-output", + }, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/session/session.api b/src/vnet/session/session.api new file mode 100644 index 00000000..a7b28c1d --- /dev/null +++ b/src/vnet/session/session.api @@ -0,0 +1,429 @@ +/* + * Copyright (c) 2015-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + /** \brief Bind to a given URI + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param accept_cookie - sender accept cookie, to identify this bind flavor + @param uri - a URI, e.g. "tcp://0.0.0.0/0/80" [ipv4] + "tcp://::/0/80" [ipv6] etc. + @param options - socket options, fifo sizes, etc. +*/ +define bind_uri { + u32 client_index; + u32 context; + u32 accept_cookie; + u32 initial_segment_size; + u8 uri[128]; + u64 options[16]; +}; + +/** \brief Unbind a given URI + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param uri - a URI, e.g. "tcp://0.0.0.0/0/80" [ipv4] + "tcp://::/0/80" [ipv6], etc. + @param options - socket options, fifo sizes, etc. +*/ +define unbind_uri { + u32 client_index; + u32 context; + u8 uri[128]; +}; + +/** \brief Connect to a given URI + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param accept_cookie - sender accept cookie, to identify this bind flavor + @param uri - a URI, e.g. "tcp4://0.0.0.0/0/80" + "tcp6://::/0/80" [ipv6], etc. + @param options - socket options, fifo sizes, etc. +*/ +define connect_uri { + u32 client_index; + u32 context; + u8 uri[128]; + u64 client_queue_address; + u64 options[16]; +}; + +/** \brief Bind reply + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param event_queue_address - vpp event queue address or 0 if this + connection shouldn't send events + @param segment_name_length - length of segment name + @param segment_name - name of segment client needs to attach to +*/ +define bind_uri_reply { + u32 context; + i32 retval; + u64 server_event_queue_address; + u8 segment_name_length; + u32 segment_size; + u8 segment_name[128]; +}; + +/** \brief unbind reply + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define unbind_uri_reply { + u32 context; + i32 retval; +}; + +/** \brief vpp->client, connect reply + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param server_rx_fifo - rx (vpp -> vpp-client) fifo address + @param server_tx_fifo - tx (vpp-client -> vpp) fifo address + @param session_index - session index; + @param session_thread_index - session thread index + @param session_type - session thread type + @param vpp_event_queue_address - vpp's event queue address + @param client_event_queue_address - client's event queue address + @param segment_name_length - non-zero if the client needs to attach to + the fifo segment + @param segment_name - set if the client needs to attach to the segment +*/ +define connect_uri_reply { + u32 context; + i32 retval; + u64 server_rx_fifo; + u64 server_tx_fifo; + u32 session_index; + u32 session_thread_index; + u8 session_type; + u64 client_event_queue_address; + u64 vpp_event_queue_address; + u32 segment_size; + u8 segment_name_length; + u8 segment_name[128]; +}; + +/** \brief vpp->client, please map an additional shared memory segment + @param context - sender context, to match reply w/ request + @param segment_name - +*/ +define map_another_segment { + u32 client_index; + u32 context; + u32 segment_size; + u8 segment_name[128]; +}; + +/** \brief client->vpp + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define map_another_segment_reply { + u32 context; + i32 retval; +}; + +/** \brief vpp->client, accept this session + @param context - sender context, to match reply w/ request + @param accept_cookie - tells client which bind flavor just occurred + @param rx_fifo_address - rx (vpp -> vpp-client) fifo address + @param tx_fifo_address - tx (vpp-client -> vpp) fifo address + @param session_index - index of new session + @param session_thread_index - thread index of new session + @param vpp_event_queue_address - vpp's event queue address + @param session_type - type of session + +*/ +define accept_session { + u32 client_index; + u32 context; + u32 accept_cookie; + u64 server_rx_fifo; + u64 server_tx_fifo; + u32 session_index; + u32 session_thread_index; + u64 vpp_event_queue_address; + u8 session_type; +}; + +/** \brief client->vpp, reply to an accept message + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param session_index - session index from accept_session / connect_reply + @param session_thread_index - thread index from accept_session / + connect_reply +*/ +define accept_session_reply { + u32 context; + i32 retval; + u8 session_type; + u8 session_thread_index; + u32 session_index; +}; + +/** \brief bidirectional disconnect API + @param client_index - opaque cookie to identify the sender + client to vpp direction only + @param context - sender context, to match reply w/ request + @param session_index - cookie #1 from accept_session / connect_reply + @param session_thread_index - cookie #2 +*/ +define disconnect_session { + u32 client_index; + u32 context; + u32 session_index; + u32 session_thread_index; +}; + +/** \brief bidirectional disconnect reply API + @param client_index - opaque cookie to identify the sender + client to vpp direction only + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param session_index - session index from accept_session / connect_reply + @param session_thread_index - thread index from accept_session / + connect_reply +*/ +define disconnect_session_reply { + u32 client_index; + u32 context; + i32 retval; + u32 session_index; + u32 session_thread_index; +}; + +/** \brief vpp->client reset session API + @param client_index - opaque cookie to identify the sender + client to vpp direction only + @param context - sender context, to match reply w/ request + @param session_index - session index from accept_session / connect_reply + @param session_thread_index - thread index from accept_session / + connect_reply +*/ +define reset_session { + u32 client_index; + u32 context; + u32 session_index; + u32 session_thread_index; +}; + +/** \brief client->vpp reset session reply + @param client_index - opaque cookie to identify the sender + client to vpp direction only + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param session_index - session index from accept_session / connect_reply + @param session_thread_index - thread index from accept_session / + connect_reply +*/ +define reset_session_reply { + u32 client_index; + u32 context; + i32 retval; + u32 session_index; + u32 session_thread_index; +}; + +/** \brief Bind to an ip:port pair for a given transport protocol + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param vrf - bind namespace + @param is_ip4 - flag that is 1 if ip address family is IPv4 + @param ip - ip address + @param port - port + @param proto - protocol 0 - TCP 1 - UDP + @param options - socket options, fifo sizes, etc. +*/ +define bind_sock { + u32 client_index; + u32 context; + u32 vrf; + u8 is_ip4; + u8 ip[16]; + u16 port; + u8 proto; + u64 options[16]; +}; + +/** \brief Unbind + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param handle - bind handle obtained from bind reply +*/ +define unbind_sock { + u32 client_index; + u32 context; + u64 handle; +}; + +/** \brief Connect to a remote peer + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param vrf - connection namespace + @param is_ip4 - flag that is 1 if ip address family is IPv4 + @param ip - ip address + @param port - port + @param proto - protocol 0 - TCP 1 - UDP + @param client_queue_address - client's API queue address. Non-zero when + used to perform redirects + @param options - socket options, fifo sizes, etc. +*/ +define connect_sock { + u32 client_index; + u32 context; + u32 vrf; + u8 is_ip4; + u8 ip[16]; + u16 port; + u8 proto; + u64 client_queue_address; + u64 options[16]; +}; + +/** \brief Bind reply + @param context - sender context, to match reply w/ request + @param handle - bind handle + @param retval - return code for the request + @param event_queue_address - vpp event queue address or 0 if this + connection shouldn't send events + @param segment_name_length - length of segment name + @param segment_name - name of segment client needs to attach to +*/ +define bind_sock_reply { + u32 context; + u64 handle; + i32 retval; + u64 server_event_queue_address; + u32 segment_size; + u8 segment_name_length; + u8 segment_name[128]; +}; + +/** \brief unbind reply + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define unbind_sock_reply { + u32 context; + i32 retval; +}; + +/** \brief vpp/server->client, connect reply + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param handle - connection handle + @param server_rx_fifo - rx (vpp -> vpp-client) fifo address + @param server_tx_fifo - tx (vpp-client -> vpp) fifo address + @param vpp_event_queue_address - vpp's event queue address + @param client_event_queue_address - client's event queue address + @param segment_name_length - non-zero if the client needs to attach to + the fifo segment + @param segment_name - set if the client needs to attach to the segment +*/ +define connect_sock_reply { + u32 context; + i32 retval; + u64 handle; + u64 server_rx_fifo; + u64 server_tx_fifo; + u64 client_event_queue_address; + u64 vpp_event_queue_address; + u32 segment_size; + u8 segment_name_length; + u8 segment_name[128]; +}; + +/** \brief bidirectional disconnect API + @param client_index - opaque cookie to identify the sender + client to vpp direction only + @param context - sender context, to match reply w/ request + @param handle - session handle obtained through accept/connect +*/ +define disconnect_sock { + u32 client_index; + u32 context; + u64 handle; +}; + +/** \brief bidirectional disconnect reply API + @param client_index - opaque cookie to identify the sender + client to vpp direction only + @param client_context - sender context, to match reply w/ request + @param handle - session handle obtained through accept/connect +*/ +define disconnect_sock_reply { + u32 client_index; + u32 context; + i32 retval; + u64 handle; +}; + +/** \brief vpp->client, accept this session + @param context - sender context, to match reply w/ request + @param accept_cookie - tells client which bind flavor just occurred + @param handle - session handle obtained through accept/connect + @param rx_fifo_address - rx (vpp -> vpp-client) fifo address + @param tx_fifo_address - tx (vpp-client -> vpp) fifo address + @param vpp_event_queue_address - vpp's event queue address +*/ +define accept_sock { + u32 client_index; + u32 context; + u32 accept_cookie; + u64 handle; + u64 server_rx_fifo; + u64 server_tx_fifo; + u64 vpp_event_queue_address; +}; + +/** \brief client->vpp, reply to an accept message + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param handle - session handle obtained through accept/connect +*/ +define accept_sock_reply { + u32 context; + i32 retval; + u64 handle; +}; + +/** \brief vpp->client reset session API + @param client_index - opaque cookie to identify the sender + client to vpp direction only + @param context - sender context, to match reply w/ request + @param handle - session handle obtained through accept/connect +*/ +define reset_sock { + u32 client_index; + u32 context; + u64 handle; +}; + +/** \brief client->vpp reset session reply + @param client_index - opaque cookie to identify the sender + client to vpp direction only + @param context - sender context, to match reply w/ request + @param handle - session handle obtained through accept/connect +*/ +define reset_sock_reply { + u32 client_index; + u32 context; + i32 retval; + u64 handle; +}; +/* + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ \ No newline at end of file diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c new file mode 100644 index 00000000..539da613 --- /dev/null +++ b/src/vnet/session/session.c @@ -0,0 +1,1286 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @file + * @brief Session and session manager + */ + +#include +#include +#include +#include +#include + +/** + * Per-type vector of transport protocol virtual function tables + */ +static transport_proto_vft_t *tp_vfts; + +session_manager_main_t session_manager_main; + +/* + * Session lookup key; (src-ip, dst-ip, src-port, dst-port, session-type) + * Value: (owner thread index << 32 | session_index); + */ +static void +stream_session_table_add_for_tc (u8 sst, transport_connection_t * tc, + u64 value) +{ + session_manager_main_t *smm = &session_manager_main; + session_kv4_t kv4; + session_kv6_t kv6; + + switch (sst) + { + case SESSION_TYPE_IP4_UDP: + case SESSION_TYPE_IP4_TCP: + make_v4_ss_kv_from_tc (&kv4, tc); + kv4.value = value; + clib_bihash_add_del_16_8 (&smm->v4_session_hash, &kv4, 1 /* is_add */ ); + break; + case SESSION_TYPE_IP6_UDP: + case SESSION_TYPE_IP6_TCP: + make_v6_ss_kv_from_tc (&kv6, tc); + kv6.value = value; + clib_bihash_add_del_48_8 (&smm->v6_session_hash, &kv6, 1 /* is_add */ ); + break; + default: + clib_warning ("Session type not supported"); + ASSERT (0); + } +} + +void +stream_session_table_add (session_manager_main_t * smm, stream_session_t * s, + u64 value) +{ + transport_connection_t *tc; + + tc = tp_vfts[s->session_type].get_connection (s->connection_index, + s->thread_index); + stream_session_table_add_for_tc (s->session_type, tc, value); +} + +static void +stream_session_half_open_table_add (u8 sst, transport_connection_t * tc, + u64 value) +{ + session_manager_main_t *smm = &session_manager_main; + session_kv4_t kv4; + session_kv6_t kv6; + + switch (sst) + { + case SESSION_TYPE_IP4_UDP: + case SESSION_TYPE_IP4_TCP: + make_v4_ss_kv_from_tc (&kv4, tc); + kv4.value = value; + clib_bihash_add_del_16_8 (&smm->v4_half_open_hash, &kv4, + 1 /* is_add */ ); + break; + case SESSION_TYPE_IP6_UDP: + case SESSION_TYPE_IP6_TCP: + make_v6_ss_kv_from_tc (&kv6, tc); + kv6.value = value; + clib_bihash_add_del_48_8 (&smm->v6_half_open_hash, &kv6, + 1 /* is_add */ ); + break; + default: + clib_warning ("Session type not supported"); + ASSERT (0); + } +} + +static int +stream_session_table_del_for_tc (session_manager_main_t * smm, u8 sst, + transport_connection_t * tc) +{ + session_kv4_t kv4; + session_kv6_t kv6; + + switch (sst) + { + case SESSION_TYPE_IP4_UDP: + case SESSION_TYPE_IP4_TCP: + make_v4_ss_kv_from_tc (&kv4, tc); + return clib_bihash_add_del_16_8 (&smm->v4_session_hash, &kv4, + 0 /* is_add */ ); + break; + case SESSION_TYPE_IP6_UDP: + case SESSION_TYPE_IP6_TCP: + make_v6_ss_kv_from_tc (&kv6, tc); + return clib_bihash_add_del_48_8 (&smm->v6_session_hash, &kv6, + 0 /* is_add */ ); + break; + default: + clib_warning ("Session type not supported"); + ASSERT (0); + } + + return 0; +} + +static int +stream_session_table_del (session_manager_main_t * smm, stream_session_t * s) +{ + transport_connection_t *ts; + + ts = tp_vfts[s->session_type].get_connection (s->connection_index, + s->thread_index); + return stream_session_table_del_for_tc (smm, s->session_type, ts); +} + +static void +stream_session_half_open_table_del (session_manager_main_t * smm, u8 sst, + transport_connection_t * tc) +{ + session_kv4_t kv4; + session_kv6_t kv6; + + switch (sst) + { + case SESSION_TYPE_IP4_UDP: + case SESSION_TYPE_IP4_TCP: + make_v4_ss_kv_from_tc (&kv4, tc); + clib_bihash_add_del_16_8 (&smm->v4_half_open_hash, &kv4, + 0 /* is_add */ ); + break; + case SESSION_TYPE_IP6_UDP: + case SESSION_TYPE_IP6_TCP: + make_v6_ss_kv_from_tc (&kv6, tc); + clib_bihash_add_del_48_8 (&smm->v6_half_open_hash, &kv6, + 0 /* is_add */ ); + break; + default: + clib_warning ("Session type not supported"); + ASSERT (0); + } +} + +stream_session_t * +stream_session_lookup_listener4 (ip4_address_t * lcl, u16 lcl_port, u8 proto) +{ + session_manager_main_t *smm = &session_manager_main; + session_kv4_t kv4; + int rv; + + make_v4_listener_kv (&kv4, lcl, lcl_port, proto); + rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4); + if (rv == 0) + return pool_elt_at_index (smm->listen_sessions[proto], (u32) kv4.value); + + /* Zero out the lcl ip */ + kv4.key[0] = 0; + rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4); + if (rv == 0) + return pool_elt_at_index (smm->listen_sessions[proto], kv4.value); + + return 0; +} + +/** Looks up a session based on the 5-tuple passed as argument. + * + * First it tries to find an established session, if this fails, it tries + * finding a listener session if this fails, it tries a lookup with a + * wildcarded local source (listener bound to all interfaces) + */ +stream_session_t * +stream_session_lookup4 (ip4_address_t * lcl, ip4_address_t * rmt, + u16 lcl_port, u16 rmt_port, u8 proto, + u32 my_thread_index) +{ + session_manager_main_t *smm = &session_manager_main; + session_kv4_t kv4; + int rv; + + /* Lookup session amongst established ones */ + make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto); + rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4); + if (rv == 0) + return stream_session_get_tsi (kv4.value, my_thread_index); + + /* If nothing is found, check if any listener is available */ + return stream_session_lookup_listener4 (lcl, lcl_port, proto); +} + +stream_session_t * +stream_session_lookup_listener6 (ip6_address_t * lcl, u16 lcl_port, u8 proto) +{ + session_manager_main_t *smm = &session_manager_main; + session_kv6_t kv6; + int rv; + + make_v6_listener_kv (&kv6, lcl, lcl_port, proto); + rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6); + if (rv == 0) + return pool_elt_at_index (smm->listen_sessions[proto], kv6.value); + + /* Zero out the lcl ip */ + kv6.key[0] = kv6.key[1] = 0; + rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6); + if (rv == 0) + return pool_elt_at_index (smm->listen_sessions[proto], kv6.value); + + return 0; +} + +/* Looks up a session based on the 5-tuple passed as argument. + * First it tries to find an established session, if this fails, it tries + * finding a listener session if this fails, it tries a lookup with a + * wildcarded local source (listener bound to all interfaces) */ +stream_session_t * +stream_session_lookup6 (ip6_address_t * lcl, ip6_address_t * rmt, + u16 lcl_port, u16 rmt_port, u8 proto, + u32 my_thread_index) +{ + session_manager_main_t *smm = vnet_get_session_manager_main (); + session_kv6_t kv6; + int rv; + + make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto); + rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6); + if (rv == 0) + return stream_session_get_tsi (kv6.value, my_thread_index); + + /* If nothing is found, check if any listener is available */ + return stream_session_lookup_listener6 (lcl, lcl_port, proto); +} + +stream_session_t * +stream_session_lookup_listener (ip46_address_t * lcl, u16 lcl_port, u8 proto) +{ + switch (proto) + { + case SESSION_TYPE_IP4_UDP: + case SESSION_TYPE_IP4_TCP: + return stream_session_lookup_listener4 (&lcl->ip4, lcl_port, proto); + break; + case SESSION_TYPE_IP6_UDP: + case SESSION_TYPE_IP6_TCP: + return stream_session_lookup_listener6 (&lcl->ip6, lcl_port, proto); + break; + } + return 0; +} + +static u64 +stream_session_half_open_lookup (session_manager_main_t * smm, + ip46_address_t * lcl, ip46_address_t * rmt, + u16 lcl_port, u16 rmt_port, u8 proto) +{ + session_kv4_t kv4; + session_kv6_t kv6; + int rv; + + switch (proto) + { + case SESSION_TYPE_IP4_UDP: + case SESSION_TYPE_IP4_TCP: + make_v4_ss_kv (&kv4, &lcl->ip4, &rmt->ip4, lcl_port, rmt_port, proto); + rv = clib_bihash_search_inline_16_8 (&smm->v4_half_open_hash, &kv4); + + if (rv == 0) + return kv4.value; + + return (u64) ~ 0; + break; + case SESSION_TYPE_IP6_UDP: + case SESSION_TYPE_IP6_TCP: + make_v6_ss_kv (&kv6, &lcl->ip6, &rmt->ip6, lcl_port, rmt_port, proto); + rv = clib_bihash_search_inline_48_8 (&smm->v6_half_open_hash, &kv6); + + if (rv == 0) + return kv6.value; + + return (u64) ~ 0; + break; + } + return 0; +} + +transport_connection_t * +stream_session_lookup_transport4 (session_manager_main_t * smm, + ip4_address_t * lcl, ip4_address_t * rmt, + u16 lcl_port, u16 rmt_port, u8 proto, + u32 my_thread_index) +{ + session_kv4_t kv4; + stream_session_t *s; + int rv; + + /* Lookup session amongst established ones */ + make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto); + rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4); + if (rv == 0) + { + s = stream_session_get_tsi (kv4.value, my_thread_index); + + return tp_vfts[s->session_type].get_connection (s->connection_index, + my_thread_index); + } + + /* If nothing is found, check if any listener is available */ + s = stream_session_lookup_listener4 (lcl, lcl_port, proto); + if (s) + return tp_vfts[s->session_type].get_listener (s->connection_index); + + /* Finally, try half-open connections */ + rv = clib_bihash_search_inline_16_8 (&smm->v4_half_open_hash, &kv4); + if (rv == 0) + return tp_vfts[proto].get_half_open (kv4.value & 0xFFFFFFFF); + + return 0; +} + +transport_connection_t * +stream_session_lookup_transport6 (session_manager_main_t * smm, + ip6_address_t * lcl, ip6_address_t * rmt, + u16 lcl_port, u16 rmt_port, u8 proto, + u32 my_thread_index) +{ + stream_session_t *s; + session_kv6_t kv6; + int rv; + + make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto); + rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6); + if (rv == 0) + { + s = stream_session_get_tsi (kv6.value, my_thread_index); + + return tp_vfts[s->session_type].get_connection (s->connection_index, + my_thread_index); + } + + /* If nothing is found, check if any listener is available */ + s = stream_session_lookup_listener6 (lcl, lcl_port, proto); + if (s) + return tp_vfts[s->session_type].get_listener (s->connection_index); + + /* Finally, try half-open connections */ + rv = clib_bihash_search_inline_48_8 (&smm->v6_half_open_hash, &kv6); + if (rv == 0) + return tp_vfts[s->session_type].get_half_open (kv6.value & 0xFFFFFFFF); + + return 0; +} + +/** + * Allocate vpp event queue (once) per worker thread + */ +void +vpp_session_event_queue_allocate (session_manager_main_t * smm, + u32 thread_index) +{ + api_main_t *am = &api_main; + void *oldheap; + + if (smm->vpp_event_queues[thread_index] == 0) + { + /* Allocate event fifo in the /vpe-api shared-memory segment */ + oldheap = svm_push_data_heap (am->vlib_rp); + + smm->vpp_event_queues[thread_index] = + unix_shared_memory_queue_init (2048 /* nels $$$$ config */ , + sizeof (session_fifo_event_t), + 0 /* consumer pid */ , + 0 + /* (do not) send signal when queue non-empty */ + ); + + svm_pop_heap (oldheap); + } +} + +void +session_manager_get_segment_info (u32 index, u8 ** name, u32 * size) +{ + svm_fifo_segment_private_t *s; + s = svm_fifo_get_segment (index); + *name = s->h->segment_name; + *size = s->ssvm.ssvm_size; +} + +always_inline int +session_manager_add_segment_i (session_manager_main_t * smm, + session_manager_t * sm, + u32 segment_size, u8 * segment_name) +{ + svm_fifo_segment_create_args_t _ca, *ca = &_ca; + int rv; + + memset (ca, 0, sizeof (*ca)); + + ca->segment_name = (char *) segment_name; + ca->segment_size = segment_size; + + rv = svm_fifo_segment_create (ca); + if (rv) + { + clib_warning ("svm_fifo_segment_create ('%s', %d) failed", + ca->segment_name, ca->segment_size); + vec_free (segment_name); + return -1; + } + + vec_add1 (sm->segment_indices, ca->new_segment_index); + + return 0; +} + +static int +session_manager_add_segment (session_manager_main_t * smm, + session_manager_t * sm) +{ + u8 *segment_name; + svm_fifo_segment_create_args_t _ca, *ca = &_ca; + u32 add_segment_size; + u32 default_segment_size = 128 << 10; + + memset (ca, 0, sizeof (*ca)); + segment_name = format (0, "%d-%d%c", getpid (), + smm->unique_segment_name_counter++, 0); + add_segment_size = + sm->add_segment_size ? sm->add_segment_size : default_segment_size; + + return session_manager_add_segment_i (smm, sm, add_segment_size, + segment_name); +} + +int +session_manager_add_first_segment (session_manager_main_t * smm, + session_manager_t * sm, u32 segment_size, + u8 ** segment_name) +{ + svm_fifo_segment_create_args_t _ca, *ca = &_ca; + memset (ca, 0, sizeof (*ca)); + *segment_name = format (0, "%d-%d%c", getpid (), + smm->unique_segment_name_counter++, 0); + return session_manager_add_segment_i (smm, sm, segment_size, *segment_name); +} + +void +session_manager_del (session_manager_main_t * smm, session_manager_t * sm) +{ + u32 *deleted_sessions = 0; + u32 *deleted_thread_indices = 0; + int i, j; + + /* Across all fifo segments used by the server */ + for (j = 0; j < vec_len (sm->segment_indices); j++) + { + svm_fifo_segment_private_t *fifo_segment; + svm_fifo_t **fifos; + /* Vector of fifos allocated in the segment */ + fifo_segment = svm_fifo_get_segment (sm->segment_indices[j]); + fifos = (svm_fifo_t **) fifo_segment->h->fifos; + + /* + * Remove any residual sessions from the session lookup table + * Don't bother deleting the individual fifos, we're going to + * throw away the fifo segment in a minute. + */ + for (i = 0; i < vec_len (fifos); i++) + { + svm_fifo_t *fifo; + u32 session_index, thread_index; + stream_session_t *session; + + fifo = fifos[i]; + session_index = fifo->server_session_index; + thread_index = fifo->server_thread_index; + + session = pool_elt_at_index (smm->sessions[thread_index], + session_index); + + /* Add to the deleted_sessions vector (once!) */ + if (!session->is_deleted) + { + session->is_deleted = 1; + vec_add1 (deleted_sessions, + session - smm->sessions[thread_index]); + vec_add1 (deleted_thread_indices, thread_index); + } + } + + for (i = 0; i < vec_len (deleted_sessions); i++) + { + stream_session_t *session; + + session = + pool_elt_at_index (smm->sessions[deleted_thread_indices[i]], + deleted_sessions[i]); + + /* Instead of directly removing the session call disconnect */ + stream_session_disconnect (session); + + /* + stream_session_table_del (smm, session); + pool_put(smm->sessions[deleted_thread_indices[i]], session); + */ + } + + vec_reset_length (deleted_sessions); + vec_reset_length (deleted_thread_indices); + + /* Instead of removing the segment, test when removing the session if + * the segment can be removed + */ + /* svm_fifo_segment_delete (fifo_segment); */ + } + + vec_free (deleted_sessions); + vec_free (deleted_thread_indices); +} + +int +session_manager_allocate_session_fifos (session_manager_main_t * smm, + session_manager_t * sm, + svm_fifo_t ** server_rx_fifo, + svm_fifo_t ** server_tx_fifo, + u32 * fifo_segment_index, + u8 * added_a_segment) +{ + svm_fifo_segment_private_t *fifo_segment; + u32 fifo_size, default_fifo_size = 8192 /* TODO config */ ; + int i; + + *added_a_segment = 0; + + /* Allocate svm fifos */ + ASSERT (vec_len (sm->segment_indices)); + +again: + for (i = 0; i < vec_len (sm->segment_indices); i++) + { + *fifo_segment_index = sm->segment_indices[i]; + fifo_segment = svm_fifo_get_segment (*fifo_segment_index); + + fifo_size = sm->rx_fifo_size; + fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; + *server_rx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); + + fifo_size = sm->tx_fifo_size; + fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; + *server_tx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); + + if (*server_rx_fifo == 0) + { + /* This would be very odd, but handle it... */ + if (*server_tx_fifo != 0) + { + svm_fifo_segment_free_fifo (fifo_segment, *server_tx_fifo); + *server_tx_fifo = 0; + } + continue; + } + if (*server_tx_fifo == 0) + { + if (*server_rx_fifo != 0) + { + svm_fifo_segment_free_fifo (fifo_segment, *server_rx_fifo); + *server_rx_fifo = 0; + } + continue; + } + break; + } + + /* See if we're supposed to create another segment */ + if (*server_rx_fifo == 0) + { + if (sm->add_segment) + { + if (*added_a_segment) + { + clib_warning ("added a segment, still cant allocate a fifo"); + return SESSION_ERROR_NEW_SEG_NO_SPACE; + } + + if (session_manager_add_segment (smm, sm)) + return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; + + *added_a_segment = 1; + goto again; + } + else + return SESSION_ERROR_NO_SPACE; + } + return 0; +} + +int +stream_session_create_i (session_manager_main_t * smm, application_t * app, + transport_connection_t * tc, + stream_session_t ** ret_s) +{ + int rv; + svm_fifo_t *server_rx_fifo = 0, *server_tx_fifo = 0; + u32 fifo_segment_index; + u32 pool_index, seg_size; + stream_session_t *s; + u64 value; + u32 thread_index = tc->thread_index; + session_manager_t *sm; + u8 segment_added; + u8 *seg_name; + + sm = session_manager_get (app->session_manager_index); + + /* Check the API queue */ + if (app->mode == APP_SERVER && application_api_queue_is_full (app)) + return SESSION_ERROR_API_QUEUE_FULL; + + if ((rv = session_manager_allocate_session_fifos (smm, sm, &server_rx_fifo, + &server_tx_fifo, + &fifo_segment_index, + &segment_added))) + return rv; + + if (segment_added && app->mode == APP_SERVER) + { + /* Send an API message to the external server, to map new segment */ + ASSERT (app->cb_fns.add_segment_callback); + + session_manager_get_segment_info (fifo_segment_index, &seg_name, + &seg_size); + if (app->cb_fns.add_segment_callback (app->api_client_index, seg_name, + seg_size)) + return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; + } + + /* Create the session */ + pool_get (smm->sessions[thread_index], s); + memset (s, 0, sizeof (*s)); + + /* Initialize backpointers */ + pool_index = s - smm->sessions[thread_index]; + server_rx_fifo->server_session_index = pool_index; + server_rx_fifo->server_thread_index = thread_index; + + server_tx_fifo->server_session_index = pool_index; + server_tx_fifo->server_thread_index = thread_index; + + s->server_rx_fifo = server_rx_fifo; + s->server_tx_fifo = server_tx_fifo; + + /* Initialize state machine, such as it is... */ + s->session_type = app->session_type; + s->session_state = SESSION_STATE_CONNECTING; + s->app_index = application_get_index (app); + s->server_segment_index = fifo_segment_index; + s->thread_index = thread_index; + s->session_index = pool_index; + + /* Attach transport to session */ + s->connection_index = tc->c_index; + + /* Attach session to transport */ + tc->s_index = s->session_index; + + /* Add to the main lookup table */ + value = (((u64) thread_index) << 32) | (u64) s->session_index; + stream_session_table_add_for_tc (app->session_type, tc, value); + + *ret_s = s; + + return 0; +} + +/* + * Enqueue data for delivery to session peer. Does not notify peer of enqueue + * event but on request can queue notification events for later delivery by + * calling stream_server_flush_enqueue_events(). + * + * @param tc Transport connection which is to be enqueued data + * @param data Data to be enqueued + * @param len Length of data to be enqueued + * @param queue_event Flag to indicate if peer is to be notified or if event + * is to be queued. The former is useful when more data is + * enqueued and only one event is to be generated. + * @return Number of bytes enqueued or a negative value if enqueueing failed. + */ +int +stream_session_enqueue_data (transport_connection_t * tc, u8 * data, u16 len, + u8 queue_event) +{ + stream_session_t *s; + int enqueued; + + s = stream_session_get (tc->s_index, tc->thread_index); + + /* Make sure there's enough space left. We might've filled the pipes */ + if (PREDICT_FALSE (len > svm_fifo_max_enqueue (s->server_rx_fifo))) + return -1; + + enqueued = svm_fifo_enqueue_nowait (s->server_rx_fifo, s->pid, len, data); + + if (queue_event) + { + /* Queue RX event on this fifo. Eventually these will need to be flushed + * by calling stream_server_flush_enqueue_events () */ + session_manager_main_t *smm = vnet_get_session_manager_main (); + u32 thread_index = s->thread_index; + u32 my_enqueue_epoch = smm->current_enqueue_epoch[thread_index]; + + if (s->enqueue_epoch != my_enqueue_epoch) + { + s->enqueue_epoch = my_enqueue_epoch; + vec_add1 (smm->session_indices_to_enqueue_by_thread[thread_index], + s - smm->sessions[thread_index]); + } + } + + return enqueued; +} + +/** Check if we have space in rx fifo to push more bytes */ +u8 +stream_session_no_space (transport_connection_t * tc, u32 thread_index, + u16 data_len) +{ + stream_session_t *s = stream_session_get (tc->c_index, thread_index); + + if (PREDICT_FALSE (s->session_state != SESSION_STATE_READY)) + return 1; + + if (data_len > svm_fifo_max_enqueue (s->server_rx_fifo)) + return 1; + + return 0; +} + +u32 +stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer, + u32 offset, u32 max_bytes) +{ + stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index); + return svm_fifo_peek (s->server_tx_fifo, s->pid, offset, max_bytes, buffer); +} + +u32 +stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes) +{ + stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index); + return svm_fifo_dequeue_drop (s->server_tx_fifo, s->pid, max_bytes); +} + +/** + * Notify session peer that new data has been enqueued. + * + * @param s Stream session for which the event is to be generated. + * @param block Flag to indicate if call should block if event queue is full. + * + * @return 0 on succes or negative number if failed to send notification. + */ +static int +stream_session_enqueue_notify (stream_session_t * s, u8 block) +{ + application_t *app; + session_fifo_event_t evt; + unix_shared_memory_queue_t *q; + static u32 serial_number; + + if (PREDICT_FALSE (s->session_state == SESSION_STATE_CLOSED)) + return 0; + + /* Get session's server */ + app = application_get (s->app_index); + + /* Fabricate event */ + evt.fifo = s->server_rx_fifo; + evt.event_type = FIFO_EVENT_SERVER_RX; + evt.event_id = serial_number++; + evt.enqueue_length = svm_fifo_max_dequeue (s->server_rx_fifo); + + /* Add event to server's event queue */ + q = app->event_queue; + + /* Based on request block (or not) for lack of space */ + if (block || PREDICT_TRUE (q->cursize < q->maxsize)) + unix_shared_memory_queue_add (app->event_queue, (u8 *) & evt, + 0 /* do wait for mutex */ ); + else + return -1; + + if (1) + { + ELOG_TYPE_DECLARE (e) = + { + .format = "evt-enqueue: id %d length %d",.format_args = "i4i4",}; + struct + { + u32 data[2]; + } *ed; + ed = ELOG_DATA (&vlib_global_main.elog_main, e); + ed->data[0] = evt.event_id; + ed->data[1] = evt.enqueue_length; + } + + return 0; +} + +/** + * Flushes queue of sessions that are to be notified of new data + * enqueued events. + * + * @param thread_index Thread index for which the flush is to be performed. + * @return 0 on success or a positive number indicating the number of + * failures due to API queue being full. + */ +int +session_manager_flush_enqueue_events (u32 thread_index) +{ + session_manager_main_t *smm = &session_manager_main; + u32 *session_indices_to_enqueue; + int i, errors = 0; + + session_indices_to_enqueue = + smm->session_indices_to_enqueue_by_thread[thread_index]; + + for (i = 0; i < vec_len (session_indices_to_enqueue); i++) + { + stream_session_t *s0; + + /* Get session */ + s0 = stream_session_get (session_indices_to_enqueue[i], thread_index); + if (stream_session_enqueue_notify (s0, 0 /* don't block */ )) + { + errors++; + } + } + + vec_reset_length (session_indices_to_enqueue); + + smm->session_indices_to_enqueue_by_thread[thread_index] = + session_indices_to_enqueue; + + /* Increment enqueue epoch for next round */ + smm->current_enqueue_epoch[thread_index]++; + + return errors; +} + +/* + * Start listening on server's ip/port pair for requested transport. + * + * Creates a 'dummy' stream session with state LISTENING to be used in session + * lookups, prior to establishing connection. Requests transport to build + * it's own specific listening connection. + */ +int +stream_session_start_listen (u32 server_index, ip46_address_t * ip, u16 port) +{ + session_manager_main_t *smm = &session_manager_main; + stream_session_t *s; + transport_connection_t *tc; + application_t *srv; + u32 tci; + + srv = application_get (server_index); + + pool_get (smm->listen_sessions[srv->session_type], s); + memset (s, 0, sizeof (*s)); + + s->session_type = srv->session_type; + s->session_state = SESSION_STATE_LISTENING; + s->session_index = s - smm->listen_sessions[srv->session_type]; + s->app_index = srv->index; + + /* Transport bind/listen */ + tci = tp_vfts[srv->session_type].bind (smm->vlib_main, s->session_index, ip, + port); + + /* Attach transport to session */ + s->connection_index = tci; + tc = tp_vfts[srv->session_type].get_listener (tci); + + srv->session_index = s->session_index; + + /* Add to the main lookup table */ + stream_session_table_add_for_tc (s->session_type, tc, s->session_index); + + return 0; +} + +void +stream_session_stop_listen (u32 server_index) +{ + session_manager_main_t *smm = &session_manager_main; + stream_session_t *listener; + transport_connection_t *tc; + application_t *srv; + + srv = application_get (server_index); + listener = pool_elt_at_index (smm->listen_sessions[srv->session_type], + srv->session_index); + + tc = tp_vfts[srv->session_type].get_listener (listener->connection_index); + stream_session_table_del_for_tc (smm, listener->session_type, tc); + + tp_vfts[srv->session_type].unbind (smm->vlib_main, + listener->connection_index); + pool_put (smm->listen_sessions[srv->session_type], listener); +} + +int +connect_server_add_segment_cb (application_t * ss, char *segment_name, + u32 segment_size) +{ + /* Does exactly nothing, but die */ + ASSERT (0); + return 0; +} + +void +connects_session_manager_init (session_manager_main_t * smm, u8 session_type) +{ + session_manager_t *sm; + u32 connect_fifo_size = 8 << 10; /* Config? */ + u32 default_segment_size = 1 << 20; + + pool_get (smm->session_managers, sm); + memset (sm, 0, sizeof (*sm)); + + sm->add_segment_size = default_segment_size; + sm->rx_fifo_size = connect_fifo_size; + sm->tx_fifo_size = connect_fifo_size; + sm->add_segment = 1; + + session_manager_add_segment (smm, sm); + smm->connect_manager_index[session_type] = sm - smm->session_managers; +} + +void +stream_session_connect_notify (transport_connection_t * tc, u8 sst, + u8 is_fail) +{ + session_manager_main_t *smm = &session_manager_main; + application_t *app; + stream_session_t *new_s = 0; + u64 value; + + value = stream_session_half_open_lookup (smm, &tc->lcl_ip, &tc->rmt_ip, + tc->lcl_port, tc->rmt_port, + tc->proto); + if (value == HALF_OPEN_LOOKUP_INVALID_VALUE) + { + clib_warning ("This can't be good!"); + return; + } + + app = application_get (value >> 32); + + if (!is_fail) + { + /* Create new session (server segments are allocated if needed) */ + if (stream_session_create_i (smm, app, tc, &new_s)) + return; + + app->session_index = stream_session_get_index (new_s); + app->thread_index = new_s->thread_index; + + /* Allocate vpp event queue for this thread if needed */ + vpp_session_event_queue_allocate (smm, tc->thread_index); + } + + /* Notify client */ + app->cb_fns.session_connected_callback (app->api_client_index, new_s, + is_fail); + + /* Cleanup session lookup */ + stream_session_half_open_table_del (smm, sst, tc); +} + +void +stream_session_accept_notify (transport_connection_t * tc) +{ + application_t *server; + stream_session_t *s; + + s = stream_session_get (tc->s_index, tc->thread_index); + server = application_get (s->app_index); + server->cb_fns.session_accept_callback (s); +} + +/** + * Notification from transport that connection is being closed. + * + * A disconnect is sent to application but state is not removed. Once + * disconnect is acknowledged by application, session disconnect is called. + * Ultimately this leads to close being called on transport (passive close). + */ +void +stream_session_disconnect_notify (transport_connection_t * tc) +{ + application_t *server; + stream_session_t *s; + + s = stream_session_get (tc->s_index, tc->thread_index); + server = application_get (s->app_index); + server->cb_fns.session_disconnect_callback (s); +} + +/** + * Cleans up session and associated app if needed. + */ +void +stream_session_delete (stream_session_t * s) +{ + session_manager_main_t *smm = vnet_get_session_manager_main (); + svm_fifo_segment_private_t *fifo_segment; + application_t *app; + int rv; + + /* delete from the main lookup table */ + rv = stream_session_table_del (smm, s); + + if (rv) + clib_warning ("hash delete error, rv %d", rv); + + /* Cleanup fifo segments */ + fifo_segment = svm_fifo_get_segment (s->server_segment_index); + svm_fifo_segment_free_fifo (fifo_segment, s->server_rx_fifo); + svm_fifo_segment_free_fifo (fifo_segment, s->server_tx_fifo); + + /* Cleanup app if client */ + app = application_get (s->app_index); + if (app->mode == APP_CLIENT) + { + application_del (app); + } + else if (app->mode == APP_SERVER) + { + session_manager_t *sm; + svm_fifo_segment_private_t *fifo_segment; + svm_fifo_t **fifos; + u32 fifo_index; + + sm = session_manager_get (app->session_manager_index); + + /* Delete fifo */ + fifo_segment = svm_fifo_get_segment (s->server_segment_index); + fifos = (svm_fifo_t **) fifo_segment->h->fifos; + + fifo_index = svm_fifo_segment_index (fifo_segment); + + /* Remove segment only if it holds no fifos and not the first */ + if (sm->segment_indices[0] != fifo_index && vec_len (fifos) == 0) + svm_fifo_segment_delete (fifo_segment); + } + + pool_put (smm->sessions[s->thread_index], s); +} + +/** + * Notification from transport that connection is being deleted + * + * This should be called only on previously fully established sessions. For + * instance failed connects should call stream_session_connect_notify and + * indicate that the connect has failed. + */ +void +stream_session_delete_notify (transport_connection_t * tc) +{ + stream_session_t *s; + + s = stream_session_get_if_valid (tc->s_index, tc->thread_index); + if (!s) + { + clib_warning ("Surprised!"); + return; + } + stream_session_delete (s); +} + +/** + * Notify application that connection has been reset. + */ +void +stream_session_reset_notify (transport_connection_t * tc) +{ + stream_session_t *s; + application_t *app; + s = stream_session_get (tc->s_index, tc->thread_index); + + app = application_get (s->app_index); + app->cb_fns.session_reset_callback (s); +} + +/** + * Accept a stream session. Optionally ping the server by callback. + */ +int +stream_session_accept (transport_connection_t * tc, u32 listener_index, + u8 sst, u8 notify) +{ + session_manager_main_t *smm = &session_manager_main; + application_t *server; + stream_session_t *s, *listener; + + int rv; + + /* Find the server */ + listener = pool_elt_at_index (smm->listen_sessions[sst], listener_index); + server = application_get (listener->app_index); + + if ((rv = stream_session_create_i (smm, server, tc, &s))) + return rv; + + /* Allocate vpp event queue for this thread if needed */ + vpp_session_event_queue_allocate (smm, tc->thread_index); + + /* Shoulder-tap the server */ + if (notify) + { + server->cb_fns.session_accept_callback (s); + } + + return 0; +} + +void +stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order, + u32 app_index) +{ + transport_connection_t *tc; + u32 tci; + u64 value; + + /* Ask transport to open connection */ + tci = tp_vfts[sst].open (addr, port_host_byte_order); + + /* Get transport connection */ + tc = tp_vfts[sst].get_half_open (tci); + + /* Store api_client_index and transport connection index */ + value = (((u64) app_index) << 32) | (u64) tc->c_index; + + /* Add to the half-open lookup table */ + stream_session_half_open_table_add (sst, tc, value); +} + +/** + * Disconnect session and propagate to transport. This should eventually + * result in a delete notification that allows us to cleanup session state. + * Called for both active/passive disconnects. + */ +void +stream_session_disconnect (stream_session_t * s) +{ + tp_vfts[s->session_type].close (s->connection_index, s->thread_index); + s->session_state = SESSION_STATE_CLOSED; +} + +/** + * Cleanup transport and session state. + */ +void +stream_session_cleanup (stream_session_t * s) +{ + tp_vfts[s->session_type].cleanup (s->connection_index, s->thread_index); + stream_session_delete (s); +} + +void +session_register_transport (u8 type, const transport_proto_vft_t * vft) +{ + session_manager_main_t *smm = vnet_get_session_manager_main (); + + vec_validate (tp_vfts, type); + tp_vfts[type] = *vft; + + /* If an offset function is provided, then peek instead of dequeue */ + smm->session_rx_fns[type] = + (vft->rx_fifo_offset) ? session_fifo_rx_peek : session_fifo_rx_dequeue; +} + +transport_proto_vft_t * +session_get_transport_vft (u8 type) +{ + if (type >= vec_len (tp_vfts)) + return 0; + return &tp_vfts[type]; +} + +static clib_error_t * +session_manager_main_init (vlib_main_t * vm) +{ + u32 num_threads; + vlib_thread_main_t *vtm = vlib_get_thread_main (); + session_manager_main_t *smm = &session_manager_main; + int i; + + smm->vlib_main = vm; + smm->vnet_main = vnet_get_main (); + + num_threads = 1 /* main thread */ + vtm->n_threads; + + if (num_threads < 1) + return clib_error_return (0, "n_thread_stacks not set"); + + /* $$$ config parameters */ + svm_fifo_segment_init (0x200000000ULL /* first segment base VA */ , + 20 /* timeout in seconds */ ); + + /* configure per-thread ** vectors */ + vec_validate (smm->sessions, num_threads - 1); + vec_validate (smm->session_indices_to_enqueue_by_thread, num_threads - 1); + vec_validate (smm->tx_buffers, num_threads - 1); + vec_validate (smm->fifo_events, num_threads - 1); + vec_validate (smm->evts_partially_read, num_threads - 1); + vec_validate (smm->current_enqueue_epoch, num_threads - 1); + vec_validate (smm->vpp_event_queues, num_threads - 1); + + /* $$$$ preallocate hack config parameter */ + for (i = 0; i < 200000; i++) + { + stream_session_t *ss; + pool_get (smm->sessions[0], ss); + memset (ss, 0, sizeof (*ss)); + } + + for (i = 0; i < 200000; i++) + pool_put_index (smm->sessions[0], i); + + clib_bihash_init_16_8 (&smm->v4_session_hash, "v4 session table", + 200000 /* $$$$ config parameter nbuckets */ , + (64 << 20) /*$$$ config parameter table size */ ); + clib_bihash_init_48_8 (&smm->v6_session_hash, "v6 session table", + 200000 /* $$$$ config parameter nbuckets */ , + (64 << 20) /*$$$ config parameter table size */ ); + + clib_bihash_init_16_8 (&smm->v4_half_open_hash, "v4 half-open table", + 200000 /* $$$$ config parameter nbuckets */ , + (64 << 20) /*$$$ config parameter table size */ ); + clib_bihash_init_48_8 (&smm->v6_half_open_hash, "v6 half-open table", + 200000 /* $$$$ config parameter nbuckets */ , + (64 << 20) /*$$$ config parameter table size */ ); + + for (i = 0; i < SESSION_N_TYPES; i++) + smm->connect_manager_index[i] = INVALID_INDEX; + + return 0; +} + +VLIB_INIT_FUNCTION (session_manager_main_init); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h new file mode 100644 index 00000000..cf14cca9 --- /dev/null +++ b/src/vnet/session/session.h @@ -0,0 +1,380 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __included_session_h__ +#define __included_session_h__ + +#include +#include +#include +#include +#include + +#define HALF_OPEN_LOOKUP_INVALID_VALUE ((u64)~0) +#define INVALID_INDEX ((u32)~0) + +/* TODO decide how much since we have pre-data as well */ +#define MAX_HDRS_LEN 100 /* Max number of bytes for headers */ + +typedef enum +{ + FIFO_EVENT_SERVER_RX, + FIFO_EVENT_SERVER_TX, + FIFO_EVENT_TIMEOUT, + FIFO_EVENT_SERVER_EXIT, +} fifo_event_type_t; + +#define foreach_session_input_error \ +_(NO_SESSION, "No session drops") \ +_(NO_LISTENER, "No listener for dst port drops") \ +_(ENQUEUED, "Packets pushed into rx fifo") \ +_(NOT_READY, "Session not ready packets") \ +_(FIFO_FULL, "Packets dropped for lack of rx fifo space") \ +_(EVENT_FIFO_FULL, "Events not sent for lack of event fifo space") \ +_(API_QUEUE_FULL, "Sessions not created for lack of API queue space") \ +_(NEW_SEG_NO_SPACE, "Created segment, couldn't allocate a fifo pair") \ +_(NO_SPACE, "Couldn't allocate a fifo pair") + +typedef enum +{ +#define _(sym,str) SESSION_ERROR_##sym, + foreach_session_input_error +#undef _ + SESSION_N_ERROR, +} session_error_t; + +/* Event queue input node static next indices */ +typedef enum +{ + SESSION_QUEUE_NEXT_DROP, + SESSION_QUEUE_NEXT_TCP_IP4_OUTPUT, + SESSION_QUEUE_NEXT_IP4_LOOKUP, + SESSION_QUEUE_NEXT_TCP_IP6_OUTPUT, + SESSION_QUEUE_NEXT_IP6_LOOKUP, + SESSION_QUEUE_N_NEXT, +} session_queue_next_t; + +#define foreach_session_type \ + _(IP4_TCP, ip4_tcp) \ + _(IP4_UDP, ip4_udp) \ + _(IP6_TCP, ip6_tcp) \ + _(IP6_UDP, ip6_udp) + +typedef enum +{ +#define _(A, a) SESSION_TYPE_##A, + foreach_session_type +#undef _ + SESSION_N_TYPES, +} session_type_t; + +/* + * Application session state + */ +typedef enum +{ + SESSION_STATE_LISTENING, + SESSION_STATE_CONNECTING, + SESSION_STATE_READY, + SESSION_STATE_CLOSED, + SESSION_STATE_N_STATES, +} stream_session_state_t; + +typedef CLIB_PACKED (struct + { + svm_fifo_t * fifo; + u8 event_type; + /* $$$$ for event logging */ + u16 event_id; + u32 enqueue_length; + }) session_fifo_event_t; + +typedef struct _stream_session_t +{ + /** Type */ + u8 session_type; + + /** State */ + u8 session_state; + + /** Session index in per_thread pool */ + u32 session_index; + + /** Transport specific */ + u32 connection_index; + + u8 thread_index; + + /** Application specific */ + u32 pid; + + /** fifo pointers. Once allocated, these do not move */ + svm_fifo_t *server_rx_fifo; + svm_fifo_t *server_tx_fifo; + + /** To avoid n**2 "one event per frame" check */ + u8 enqueue_epoch; + + /** used during unbind processing */ + u8 is_deleted; + + /** stream server pool index */ + u32 app_index; + + /** svm segment index */ + u32 server_segment_index; +} stream_session_t; + +typedef struct _session_manager +{ + /** segments mapped by this server */ + u32 *segment_indices; + + /** Session fifo sizes. They are provided for binds and take default + * values for connects */ + u32 rx_fifo_size; + u32 tx_fifo_size; + + /** Configured additional segment size */ + u32 add_segment_size; + + /** Flag that indicates if additional segments should be created */ + u8 add_segment; +} session_manager_t; + +/* Forward definition */ +typedef struct _session_manager_main session_manager_main_t; + +typedef int + (session_fifo_rx_fn) (vlib_main_t * vm, vlib_node_runtime_t * node, + session_manager_main_t * smm, + session_fifo_event_t * e0, stream_session_t * s0, + u32 thread_index, int *n_tx_pkts); + +extern session_fifo_rx_fn session_fifo_rx_peek; +extern session_fifo_rx_fn session_fifo_rx_dequeue; + +struct _session_manager_main +{ + /** Lookup tables for established sessions and listeners */ + clib_bihash_16_8_t v4_session_hash; + clib_bihash_48_8_t v6_session_hash; + + /** Lookup tables for half-open sessions */ + clib_bihash_16_8_t v4_half_open_hash; + clib_bihash_48_8_t v6_half_open_hash; + + /** Per worker thread session pools */ + stream_session_t **sessions; + + /** Pool of listen sessions. Same type as stream sessions to ease lookups */ + stream_session_t *listen_sessions[SESSION_N_TYPES]; + + /** Sparse vector to map dst port to stream server */ + u16 *stream_server_by_dst_port[SESSION_N_TYPES]; + + /** per-worker enqueue epoch counters */ + u8 *current_enqueue_epoch; + + /** Per-worker thread vector of sessions to enqueue */ + u32 **session_indices_to_enqueue_by_thread; + + /** per-worker tx buffer free lists */ + u32 **tx_buffers; + + /** Per worker-thread vector of partially read events */ + session_fifo_event_t **evts_partially_read; + + /** per-worker active event vectors */ + session_fifo_event_t **fifo_events; + + /** vpp fifo event queue */ + unix_shared_memory_queue_t **vpp_event_queues; + + /** Unique segment name counter */ + u32 unique_segment_name_counter; + + /* Connection manager used by incoming connects */ + u32 connect_manager_index[SESSION_N_TYPES]; + + session_manager_t *session_managers; + + /** Per transport rx function that can either dequeue or peek */ + session_fifo_rx_fn *session_rx_fns[SESSION_N_TYPES]; + + /* Convenience */ + vlib_main_t *vlib_main; + vnet_main_t *vnet_main; +}; + +extern session_manager_main_t session_manager_main; + +/* + * Session manager function + */ +always_inline session_manager_main_t * +vnet_get_session_manager_main () +{ + return &session_manager_main; +} + +always_inline session_manager_t * +session_manager_get (u32 index) +{ + return pool_elt_at_index (session_manager_main.session_managers, index); +} + +always_inline unix_shared_memory_queue_t * +session_manager_get_vpp_event_queue (u32 thread_index) +{ + return session_manager_main.vpp_event_queues[thread_index]; +} + +always_inline session_manager_t * +connects_session_manager_get (session_manager_main_t * smm, + session_type_t session_type) +{ + return pool_elt_at_index (smm->session_managers, + smm->connect_manager_index[session_type]); +} + +void session_manager_get_segment_info (u32 index, u8 ** name, u32 * size); +int session_manager_flush_enqueue_events (u32 thread_index); +int +session_manager_add_first_segment (session_manager_main_t * smm, + session_manager_t * sm, u32 segment_size, + u8 ** segment_name); +void +session_manager_del (session_manager_main_t * smm, session_manager_t * sm); +void +connects_session_manager_init (session_manager_main_t * smm, u8 session_type); + +/* + * Stream session functions + */ + +stream_session_t *stream_session_lookup_listener4 (ip4_address_t * lcl, + u16 lcl_port, u8 proto); +stream_session_t *stream_session_lookup4 (ip4_address_t * lcl, + ip4_address_t * rmt, u16 lcl_port, + u16 rmt_port, u8 proto, + u32 thread_index); +stream_session_t *stream_session_lookup_listener6 (ip6_address_t * lcl, + u16 lcl_port, u8 proto); +stream_session_t *stream_session_lookup6 (ip6_address_t * lcl, + ip6_address_t * rmt, u16 lcl_port, + u16 rmt_port, u8, u32 thread_index); +transport_connection_t + * stream_session_lookup_transport4 (session_manager_main_t * smm, + ip4_address_t * lcl, + ip4_address_t * rmt, u16 lcl_port, + u16 rmt_port, u8 proto, + u32 thread_index); +transport_connection_t + * stream_session_lookup_transport6 (session_manager_main_t * smm, + ip6_address_t * lcl, + ip6_address_t * rmt, u16 lcl_port, + u16 rmt_port, u8 proto, + u32 thread_index); +stream_session_t *stream_session_lookup_listener (ip46_address_t * lcl, + u16 lcl_port, u8 proto); + +always_inline stream_session_t * +stream_session_get_tsi (u64 ti_and_si, u32 thread_index) +{ + ASSERT ((u32) (ti_and_si >> 32) == thread_index); + return pool_elt_at_index (session_manager_main.sessions[thread_index], + ti_and_si & 0xFFFFFFFFULL); +} + +always_inline stream_session_t * +stream_session_get (u64 si, u32 thread_index) +{ + return pool_elt_at_index (session_manager_main.sessions[thread_index], si); +} + +always_inline stream_session_t * +stream_session_get_if_valid (u64 si, u32 thread_index) +{ + if (thread_index >= vec_len (session_manager_main.sessions)) + return 0; + + if (pool_is_free_index (session_manager_main.sessions[thread_index], si)) + return 0; + + return pool_elt_at_index (session_manager_main.sessions[thread_index], si); +} + +always_inline stream_session_t * +stream_session_listener_get (u8 sst, u64 si) +{ + return pool_elt_at_index (session_manager_main.listen_sessions[sst], si); +} + +always_inline u32 +stream_session_get_index (stream_session_t * s) +{ + if (s->session_state == SESSION_STATE_LISTENING) + return s - session_manager_main.listen_sessions[s->session_type]; + + return s - session_manager_main.sessions[s->thread_index]; +} + +always_inline u32 +stream_session_max_enqueue (transport_connection_t * tc) +{ + stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index); + return svm_fifo_max_enqueue (s->server_rx_fifo); +} + +int +stream_session_enqueue_data (transport_connection_t * tc, u8 * data, u16 len, + u8 queue_event); +u32 +stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer, + u32 offset, u32 max_bytes); +u32 stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes); + +void +stream_session_connect_notify (transport_connection_t * tc, u8 sst, + u8 is_fail); +void stream_session_accept_notify (transport_connection_t * tc); +void stream_session_disconnect_notify (transport_connection_t * tc); +void stream_session_delete_notify (transport_connection_t * tc); +void stream_session_reset_notify (transport_connection_t * tc); +int +stream_session_accept (transport_connection_t * tc, u32 listener_index, + u8 sst, u8 notify); +void stream_session_open (u8 sst, ip46_address_t * addr, + u16 port_host_byte_order, u32 api_client_index); +void stream_session_disconnect (stream_session_t * s); +void stream_session_cleanup (stream_session_t * s); +int +stream_session_start_listen (u32 server_index, ip46_address_t * ip, u16 port); +void stream_session_stop_listen (u32 server_index); + +u8 *format_stream_session (u8 * s, va_list * args); + +void session_register_transport (u8 type, const transport_proto_vft_t * vft); +transport_proto_vft_t *session_get_transport_vft (u8 type); + +#endif /* __included_session_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/session/session_api.c b/src/vnet/session/session_api.c new file mode 100644 index 00000000..9d068684 --- /dev/null +++ b/src/vnet/session/session_api.c @@ -0,0 +1,821 @@ +/* + * Copyright (c) 2015-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include "application_interface.h" + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include +#undef vl_printfun + +#include + +#define foreach_session_api_msg \ +_(MAP_ANOTHER_SEGMENT_REPLY, map_another_segment_reply) \ +_(BIND_URI, bind_uri) \ +_(UNBIND_URI, unbind_uri) \ +_(CONNECT_URI, connect_uri) \ +_(DISCONNECT_SESSION, disconnect_session) \ +_(DISCONNECT_SESSION_REPLY, disconnect_session_reply) \ +_(ACCEPT_SESSION_REPLY, accept_session_reply) \ +_(RESET_SESSION_REPLY, reset_session_reply) \ +_(BIND_SOCK, bind_sock) \ +_(UNBIND_SOCK, unbind_sock) \ +_(CONNECT_SOCK, connect_sock) \ +_(DISCONNECT_SOCK, disconnect_sock) \ +_(DISCONNECT_SOCK_REPLY, disconnect_sock_reply) \ +_(ACCEPT_SOCK_REPLY, accept_sock_reply) \ +_(RESET_SOCK_REPLY, reset_sock_reply) \ + +static int +send_add_segment_callback (u32 api_client_index, const u8 * segment_name, + u32 segment_size) +{ + vl_api_map_another_segment_t *mp; + unix_shared_memory_queue_t *q; + + q = vl_api_client_index_to_input_queue (api_client_index); + + if (!q) + return -1; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_MAP_ANOTHER_SEGMENT); + mp->segment_size = segment_size; + strncpy ((char *) mp->segment_name, (char *) segment_name, + sizeof (mp->segment_name) - 1); + + vl_msg_api_send_shmem (q, (u8 *) & mp); + + return 0; +} + +static int +send_session_accept_uri_callback (stream_session_t * s) +{ + vl_api_accept_session_t *mp; + unix_shared_memory_queue_t *q, *vpp_queue; + application_t *server = application_get (s->app_index); + + q = vl_api_client_index_to_input_queue (server->api_client_index); + vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); + + if (!q) + return -1; + + mp = vl_msg_api_alloc (sizeof (*mp)); + mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_ACCEPT_SESSION); + + /* Note: session_type is the first octet in all types of sessions */ + + mp->accept_cookie = server->accept_cookie; + mp->server_rx_fifo = (u64) s->server_rx_fifo; + mp->server_tx_fifo = (u64) s->server_tx_fifo; + mp->session_thread_index = s->thread_index; + mp->session_index = s->session_index; + mp->session_type = s->session_type; + mp->vpp_event_queue_address = (u64) vpp_queue; + vl_msg_api_send_shmem (q, (u8 *) & mp); + + return 0; +} + +static void +send_session_disconnect_uri_callback (stream_session_t * s) +{ + vl_api_disconnect_session_t *mp; + unix_shared_memory_queue_t *q; + application_t *app = application_get (s->app_index); + + q = vl_api_client_index_to_input_queue (app->api_client_index); + + if (!q) + return; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_DISCONNECT_SESSION); + + mp->session_thread_index = s->thread_index; + mp->session_index = s->session_index; + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +static int +send_session_connected_uri_callback (u32 api_client_index, + stream_session_t * s, u8 is_fail) +{ + vl_api_connect_uri_reply_t *mp; + unix_shared_memory_queue_t *q; + application_t *app = application_lookup (api_client_index); + u8 *seg_name; + unix_shared_memory_queue_t *vpp_queue; + + q = vl_api_client_index_to_input_queue (app->api_client_index); + + if (!q) + return -1; + + mp = vl_msg_api_alloc (sizeof (*mp)); + mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_CONNECT_URI_REPLY); + mp->context = app->api_context; + mp->retval = is_fail; + if (!is_fail) + { + vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); + mp->server_rx_fifo = (u64) s->server_rx_fifo; + mp->server_tx_fifo = (u64) s->server_tx_fifo; + mp->session_thread_index = s->thread_index; + mp->session_index = s->session_index; + mp->session_type = s->session_type; + mp->vpp_event_queue_address = (u64) vpp_queue; + mp->client_event_queue_address = (u64) app->event_queue; + + session_manager_get_segment_info (s->server_segment_index, &seg_name, + &mp->segment_size); + mp->segment_name_length = vec_len (seg_name); + if (mp->segment_name_length) + clib_memcpy (mp->segment_name, seg_name, mp->segment_name_length); + } + + vl_msg_api_send_shmem (q, (u8 *) & mp); + + /* Remove client if connect failed */ + if (is_fail) + application_del (app); + + return 0; +} + +/** + * Redirect a connect_uri message to the indicated server. + * Only sent if the server has bound the related port with + * URI_OPTIONS_FLAGS_USE_FIFO + */ +static int +redirect_connect_uri_callback (u32 server_api_client_index, void *mp_arg) +{ + vl_api_connect_uri_t *mp = mp_arg; + unix_shared_memory_queue_t *server_q, *client_q; + vlib_main_t *vm = vlib_get_main (); + f64 timeout = vlib_time_now (vm) + 0.5; + int rv = 0; + + server_q = vl_api_client_index_to_input_queue (server_api_client_index); + + if (!server_q) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto out; + } + + client_q = vl_api_client_index_to_input_queue (mp->client_index); + if (!client_q) + { + rv = VNET_API_ERROR_INVALID_VALUE_2; + goto out; + } + + /* Tell the server the client's API queue address, so it can reply */ + mp->client_queue_address = (u64) client_q; + + /* + * Bounce message handlers MUST NOT block the data-plane. + * Spin waiting for the queue lock, but + */ + + while (vlib_time_now (vm) < timeout) + { + rv = + unix_shared_memory_queue_add (server_q, (u8 *) & mp, 1 /*nowait */ ); + switch (rv) + { + /* correctly enqueued */ + case 0: + return VNET_CONNECT_REDIRECTED; + + /* continue spinning, wait for pthread_mutex_trylock to work */ + case -1: + continue; + + /* queue stuffed, drop the msg */ + case -2: + rv = VNET_API_ERROR_QUEUE_FULL; + goto out; + } + } +out: + /* Dispose of the message */ + vl_msg_api_free (mp); + return rv; +} + +static u64 +make_session_handle (stream_session_t * s) +{ + return (u64) s->session_index << 32 | (u64) s->thread_index; +} + +static int +send_session_accept_callback (stream_session_t * s) +{ + vl_api_accept_sock_t *mp; + unix_shared_memory_queue_t *q, *vpp_queue; + application_t *server = application_get (s->app_index); + + q = vl_api_client_index_to_input_queue (server->api_client_index); + vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); + + if (!q) + return -1; + + mp = vl_msg_api_alloc (sizeof (*mp)); + mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_ACCEPT_SOCK); + + /* Note: session_type is the first octet in all types of sessions */ + + mp->accept_cookie = server->accept_cookie; + mp->server_rx_fifo = (u64) s->server_rx_fifo; + mp->server_tx_fifo = (u64) s->server_tx_fifo; + mp->handle = make_session_handle (s); + mp->vpp_event_queue_address = (u64) vpp_queue; + vl_msg_api_send_shmem (q, (u8 *) & mp); + + return 0; +} + +static int +send_session_connected_callback (u32 api_client_index, stream_session_t * s, + u8 is_fail) +{ + vl_api_connect_sock_reply_t *mp; + unix_shared_memory_queue_t *q; + application_t *app = application_lookup (api_client_index); + u8 *seg_name; + unix_shared_memory_queue_t *vpp_queue; + + q = vl_api_client_index_to_input_queue (app->api_client_index); + + if (!q) + return -1; + + mp = vl_msg_api_alloc (sizeof (*mp)); + mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_CONNECT_SOCK_REPLY); + mp->context = app->api_context; + mp->retval = is_fail; + if (!is_fail) + { + vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); + mp->server_rx_fifo = (u64) s->server_rx_fifo; + mp->server_tx_fifo = (u64) s->server_tx_fifo; + mp->handle = make_session_handle (s); + mp->vpp_event_queue_address = (u64) vpp_queue; + mp->client_event_queue_address = (u64) app->event_queue; + + session_manager_get_segment_info (s->server_segment_index, &seg_name, + &mp->segment_size); + mp->segment_name_length = vec_len (seg_name); + if (mp->segment_name_length) + clib_memcpy (mp->segment_name, seg_name, mp->segment_name_length); + } + + vl_msg_api_send_shmem (q, (u8 *) & mp); + + /* Remove client if connect failed */ + if (is_fail) + application_del (app); + + return 0; +} + +static void +send_session_disconnect_callback (stream_session_t * s) +{ + vl_api_disconnect_sock_t *mp; + unix_shared_memory_queue_t *q; + application_t *app = application_get (s->app_index); + + q = vl_api_client_index_to_input_queue (app->api_client_index); + + if (!q) + return; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_DISCONNECT_SOCK); + + mp->handle = make_session_handle (s); + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +/** + * Redirect a connect_uri message to the indicated server. + * Only sent if the server has bound the related port with + * URI_OPTIONS_FLAGS_USE_FIFO + */ +static int +redirect_connect_callback (u32 server_api_client_index, void *mp_arg) +{ + vl_api_connect_sock_t *mp = mp_arg; + unix_shared_memory_queue_t *server_q, *client_q; + vlib_main_t *vm = vlib_get_main (); + f64 timeout = vlib_time_now (vm) + 0.5; + int rv = 0; + + server_q = vl_api_client_index_to_input_queue (server_api_client_index); + + if (!server_q) + { + rv = VNET_API_ERROR_INVALID_VALUE; + goto out; + } + + client_q = vl_api_client_index_to_input_queue (mp->client_index); + if (!client_q) + { + rv = VNET_API_ERROR_INVALID_VALUE_2; + goto out; + } + + /* Tell the server the client's API queue address, so it can reply */ + mp->client_queue_address = (u64) client_q; + + /* + * Bounce message handlers MUST NOT block the data-plane. + * Spin waiting for the queue lock, but + */ + + while (vlib_time_now (vm) < timeout) + { + rv = + unix_shared_memory_queue_add (server_q, (u8 *) & mp, 1 /*nowait */ ); + switch (rv) + { + /* correctly enqueued */ + case 0: + return VNET_CONNECT_REDIRECTED; + + /* continue spinning, wait for pthread_mutex_trylock to work */ + case -1: + continue; + + /* queue stuffed, drop the msg */ + case -2: + rv = VNET_API_ERROR_QUEUE_FULL; + goto out; + } + } +out: + /* Dispose of the message */ + vl_msg_api_free (mp); + return rv; +} + +static session_cb_vft_t uri_session_cb_vft = { + .session_accept_callback = send_session_accept_uri_callback, + .session_disconnect_callback = send_session_disconnect_uri_callback, + .session_connected_callback = send_session_connected_uri_callback, + .add_segment_callback = send_add_segment_callback, + .redirect_connect_callback = redirect_connect_uri_callback +}; + +static session_cb_vft_t session_cb_vft = { + .session_accept_callback = send_session_accept_callback, + .session_disconnect_callback = send_session_disconnect_callback, + .session_connected_callback = send_session_connected_callback, + .add_segment_callback = send_add_segment_callback, + .redirect_connect_callback = redirect_connect_callback +}; + +static int +api_session_not_valid (u32 session_index, u32 thread_index) +{ + session_manager_main_t *smm = vnet_get_session_manager_main (); + stream_session_t *pool; + + if (thread_index >= vec_len (smm->sessions)) + return VNET_API_ERROR_INVALID_VALUE; + + pool = smm->sessions[thread_index]; + + if (pool_is_free_index (pool, session_index)) + return VNET_API_ERROR_INVALID_VALUE_2; + + return 0; +} + +static void +vl_api_bind_uri_t_handler (vl_api_bind_uri_t * mp) +{ + vl_api_bind_uri_reply_t *rmp; + vnet_bind_args_t _a, *a = &_a; + char segment_name[128]; + u32 segment_name_length; + int rv; + + _Static_assert (sizeof (u64) * SESSION_OPTIONS_N_OPTIONS <= + sizeof (mp->options), + "Out of options, fix api message definition"); + + segment_name_length = ARRAY_LEN (segment_name); + + memset (a, 0, sizeof (*a)); + + a->uri = (char *) mp->uri; + a->api_client_index = mp->client_index; + a->options = mp->options; + a->segment_name = segment_name; + a->segment_name_length = segment_name_length; + a->session_cb_vft = &uri_session_cb_vft; + + a->options[SESSION_OPTIONS_SEGMENT_SIZE] = mp->initial_segment_size; + a->options[SESSION_OPTIONS_ACCEPT_COOKIE] = mp->accept_cookie; + rv = vnet_bind_uri (a); + + /* *INDENT-OFF* */ + REPLY_MACRO2 (VL_API_BIND_URI_REPLY, ({ + rmp->retval = rv; + if (!rv) + { + rmp->segment_name_length = 0; + /* $$$$ policy? */ + rmp->segment_size = mp->initial_segment_size; + if (segment_name_length) + { + memcpy (rmp->segment_name, segment_name, segment_name_length); + rmp->segment_name_length = segment_name_length; + } + rmp->server_event_queue_address = a->server_event_queue_address; + } + })); + /* *INDENT-ON* */ + +} + +static void +vl_api_unbind_uri_t_handler (vl_api_unbind_uri_t * mp) +{ + vl_api_unbind_uri_reply_t *rmp; + int rv; + + rv = vnet_unbind_uri ((char *) mp->uri, mp->client_index); + + REPLY_MACRO (VL_API_UNBIND_URI_REPLY); +} + +static void +vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) +{ + vnet_connect_args_t _a, *a = &_a; + + a->uri = (char *) mp->uri; + a->api_client_index = mp->client_index; + a->api_context = mp->context; + a->options = mp->options; + a->session_cb_vft = &uri_session_cb_vft; + a->mp = mp; + vnet_connect_uri (a); +} + +static void +vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) +{ + vl_api_disconnect_session_reply_t *rmp; + int rv; + + rv = api_session_not_valid (mp->session_index, mp->session_thread_index); + if (!rv) + rv = vnet_disconnect_session (mp->client_index, mp->session_index, + mp->session_thread_index); + + REPLY_MACRO (VL_API_DISCONNECT_SESSION_REPLY); +} + +static void +vl_api_disconnect_session_reply_t_handler (vl_api_disconnect_session_reply_t * + mp) +{ + if (api_session_not_valid (mp->session_index, mp->session_thread_index)) + { + clib_warning ("Invalid session!"); + return; + } + + /* Client objected to disconnecting the session, log and continue */ + if (mp->retval) + { + clib_warning ("client retval %d", mp->retval); + return; + } + + /* Disconnect has been confirmed. Confirm close to transport */ + vnet_disconnect_session (mp->client_index, mp->session_index, + mp->session_thread_index); +} + +static void +vl_api_reset_session_reply_t_handler (vl_api_reset_session_reply_t * mp) +{ + stream_session_t *s; + + if (api_session_not_valid (mp->session_index, mp->session_thread_index)) + { + clib_warning ("Invalid session!"); + return; + } + + /* Client objected to resetting the session, log and continue */ + if (mp->retval) + { + clib_warning ("client retval %d", mp->retval); + return; + } + + s = stream_session_get (mp->session_index, mp->session_thread_index); + + /* This comes as a response to a reset, transport only waiting for + * confirmation to remove connection state, no need to disconnect */ + stream_session_cleanup (s); +} + +static void +vl_api_accept_session_reply_t_handler (vl_api_accept_session_reply_t * mp) +{ + stream_session_t *s; + int rv; + + if (api_session_not_valid (mp->session_index, mp->session_thread_index)) + return; + + s = stream_session_get (mp->session_index, mp->session_thread_index); + rv = mp->retval; + + if (rv) + { + /* Server isn't interested, kill the session */ + stream_session_disconnect (s); + return; + } + + s->session_state = SESSION_STATE_READY; +} + +static void +vl_api_map_another_segment_reply_t_handler (vl_api_map_another_segment_reply_t + * mp) +{ + clib_warning ("not implemented"); +} + +static void +vl_api_bind_sock_t_handler (vl_api_bind_sock_t * mp) +{ + vl_api_bind_sock_reply_t *rmp; + vnet_bind_args_t _a, *a = &_a; + char segment_name[128]; + u32 segment_name_length; + int rv; + + STATIC_ASSERT (sizeof (u64) * SESSION_OPTIONS_N_OPTIONS <= + sizeof (mp->options), + "Out of options, fix api message definition"); + + segment_name_length = ARRAY_LEN (segment_name); + + memset (a, 0, sizeof (*a)); + + clib_memcpy (&a->tep.ip, mp->ip, + (mp->is_ip4 ? sizeof (ip4_address_t) : + sizeof (ip6_address_t))); + a->tep.is_ip4 = mp->is_ip4; + a->tep.port = mp->port; + a->tep.vrf = mp->vrf; + + a->api_client_index = mp->client_index; + a->options = mp->options; + a->segment_name = segment_name; + a->segment_name_length = segment_name_length; + a->session_cb_vft = &session_cb_vft; + + rv = vnet_bind_uri (a); + + /* *INDENT-OFF* */ + REPLY_MACRO2 (VL_API_BIND_SOCK_REPLY, ({ + rmp->retval = rv; + if (!rv) + { + rmp->segment_name_length = 0; + rmp->segment_size = mp->options[SESSION_OPTIONS_SEGMENT_SIZE]; + if (segment_name_length) + { + memcpy(rmp->segment_name, segment_name, segment_name_length); + rmp->segment_name_length = segment_name_length; + } + rmp->server_event_queue_address = a->server_event_queue_address; + } + })); + /* *INDENT-ON* */ +} + +static void +vl_api_unbind_sock_t_handler (vl_api_unbind_sock_t * mp) +{ + vl_api_unbind_sock_reply_t *rmp; + vnet_unbind_args_t _a, *a = &_a; + int rv; + + a->api_client_index = mp->client_index; + a->handle = mp->handle; + + rv = vnet_unbind (a); + + REPLY_MACRO (VL_API_UNBIND_SOCK_REPLY); +} + +static void +vl_api_connect_sock_t_handler (vl_api_connect_sock_t * mp) +{ + vnet_connect_args_t _a, *a = &_a; + + clib_memcpy (&a->tep.ip, mp->ip, + (mp->is_ip4 ? sizeof (ip4_address_t) : + sizeof (ip6_address_t))); + a->tep.is_ip4 = mp->is_ip4; + a->tep.port = mp->port; + a->tep.vrf = mp->vrf; + a->options = mp->options; + a->session_cb_vft = &session_cb_vft; + a->api_context = mp->context; + a->mp = mp; + + vnet_connect (a); +} + +static void +vl_api_disconnect_sock_t_handler (vl_api_disconnect_sock_t * mp) +{ + vnet_disconnect_args_t _a, *a = &_a; + vl_api_disconnect_sock_reply_t *rmp; + int rv; + + a->api_client_index = mp->client_index; + a->handle = mp->handle; + rv = vnet_disconnect (a); + + REPLY_MACRO (VL_API_DISCONNECT_SOCK_REPLY); +} + +static void +vl_api_disconnect_sock_reply_t_handler (vl_api_disconnect_sock_reply_t * mp) +{ + vnet_disconnect_args_t _a, *a = &_a; + + /* Client objected to disconnecting the session, log and continue */ + if (mp->retval) + { + clib_warning ("client retval %d", mp->retval); + return; + } + + a->api_client_index = mp->client_index; + a->handle = mp->handle; + + vnet_disconnect (a); +} + +static void +vl_api_reset_sock_reply_t_handler (vl_api_reset_sock_reply_t * mp) +{ + stream_session_t *s; + u32 session_index, thread_index; + + /* Client objected to resetting the session, log and continue */ + if (mp->retval) + { + clib_warning ("client retval %d", mp->retval); + return; + } + + if (api_parse_session_handle (mp->handle, &session_index, &thread_index)) + { + clib_warning ("Invalid handle"); + return; + } + + s = stream_session_get (session_index, thread_index); + + /* This comes as a response to a reset, transport only waiting for + * confirmation to remove connection state, no need to disconnect */ + stream_session_cleanup (s); +} + +static void +vl_api_accept_sock_reply_t_handler (vl_api_accept_sock_reply_t * mp) +{ + stream_session_t *s; + u32 session_index, thread_index; + + if (api_parse_session_handle (mp->handle, &session_index, &thread_index)) + { + clib_warning ("Invalid handle"); + return; + } + s = stream_session_get (session_index, thread_index); + + if (mp->retval) + { + /* Server isn't interested, kill the session */ + stream_session_disconnect (s); + return; + } + + s->session_state = SESSION_STATE_READY; +} + +#define vl_msg_name_crc_list +#include +#undef vl_msg_name_crc_list + +static void +setup_message_id_table (api_main_t * am) +{ +#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id); + foreach_vl_msg_name_crc_session; +#undef _ +} + +/* + * session_api_hookup + * Add uri's API message handlers to the table. + * vlib has alread mapped shared memory and + * added the client registration handlers. + * See .../open-repo/vlib/memclnt_vlib.c:memclnt_process() + */ +static clib_error_t * +session_api_hookup (vlib_main_t * vm) +{ + api_main_t *am = &api_main; + +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_session_api_msg; +#undef _ + + /* + * Messages which bounce off the data-plane to + * an API client. Simply tells the message handling infra not + * to free the message. + * + * Bounced message handlers MUST NOT block the data plane + */ + am->message_bounce[VL_API_CONNECT_URI] = 1; + am->message_bounce[VL_API_CONNECT_SOCK] = 1; + + /* + * Set up the (msg_name, crc, message-id) table + */ + setup_message_id_table (am); + + return 0; +} + +VLIB_API_INIT_FUNCTION (session_api_hookup); +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/session/session_cli.c b/src/vnet/session/session_cli.c new file mode 100644 index 00000000..b2943a1c --- /dev/null +++ b/src/vnet/session/session_cli.c @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include + +/** + * Format stream session as per the following format + * + * verbose: + * "Connection", "Rx fifo", "Tx fifo", "Session Index" + * non-verbose: + * "Connection" + */ +u8 * +format_stream_session (u8 * s, va_list * args) +{ + stream_session_t *ss = va_arg (*args, stream_session_t *); + int verbose = va_arg (*args, int); + transport_proto_vft_t *tp_vft; + u8 *str = 0; + + tp_vft = session_get_transport_vft (ss->session_type); + + if (verbose) + str = format (0, "%-20llp%-20llp%-15lld", ss->server_rx_fifo, + ss->server_tx_fifo, stream_session_get_index (ss)); + + if (ss->session_state == SESSION_STATE_READY) + { + s = format (s, "%-40U%v", tp_vft->format_connection, + ss->connection_index, ss->thread_index, str); + } + else if (ss->session_state == SESSION_STATE_LISTENING) + { + s = format (s, "%-40U%v", tp_vft->format_listener, ss->connection_index, + str); + } + else if (ss->session_state == SESSION_STATE_READY) + { + s = + format (s, "%-40U%v", tp_vft->format_half_open, ss->connection_index, + str); + } + else if (ss->session_state == SESSION_STATE_CLOSED) + { + s = format (s, "[CL] %-40U%v", tp_vft->format_connection, + ss->connection_index, ss->thread_index, str); + } + else + { + clib_warning ("Session in unknown state!"); + } + + vec_free (str); + + return s; +} + +static clib_error_t * +show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + session_manager_main_t *smm = &session_manager_main; + int verbose = 0, i; + stream_session_t *pool; + stream_session_t *s; + u8 *str = 0; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "verbose")) + verbose = 1; + else + break; + } + + for (i = 0; i < vec_len (smm->sessions); i++) + { + u32 once_per_pool; + pool = smm->sessions[i]; + + once_per_pool = 1; + + if (pool_elts (pool)) + { + + vlib_cli_output (vm, "Thread %d: %d active sessions", + i, pool_elts (pool)); + if (verbose) + { + if (once_per_pool) + { + str = format (str, "%-40s%-20s%-20s%-15s", + "Connection", "Rx fifo", "Tx fifo", + "Session Index"); + vlib_cli_output (vm, "%v", str); + vec_reset_length (str); + once_per_pool = 0; + } + + /* *INDENT-OFF* */ + pool_foreach (s, pool, + ({ + vlib_cli_output (vm, "%U", format_stream_session, s, verbose); + })); + /* *INDENT-ON* */ + } + } + else + vlib_cli_output (vm, "Thread %d: no active sessions", i); + } + vec_free (str); + + return 0; +} + +VLIB_CLI_COMMAND (show_uri_command, static) = +{ +.path = "show session",.short_help = "show session [verbose]",.function = + show_session_command_fn,}; + + +static clib_error_t * +clear_session_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + session_manager_main_t *smm = &session_manager_main; + u32 thread_index = 0; + u32 session_index = ~0; + stream_session_t *pool, *session; + application_t *server; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "thread %d", &thread_index)) + ; + else if (unformat (input, "session %d", &session_index)) + ; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + + if (session_index == ~0) + return clib_error_return (0, "session required, but not set."); + + if (thread_index > vec_len (smm->sessions)) + return clib_error_return (0, "thread %d out of range [0-%d]", + thread_index, vec_len (smm->sessions)); + + pool = smm->sessions[thread_index]; + + if (pool_is_free_index (pool, session_index)) + return clib_error_return (0, "session %d not active", session_index); + + session = pool_elt_at_index (pool, session_index); + server = application_get (session->app_index); + + /* Disconnect both app and transport */ + server->cb_fns.session_disconnect_callback (session); + + return 0; +} + +VLIB_CLI_COMMAND (clear_uri_session_command, static) = +{ +.path = "clear session",.short_help = + "clear session thread session ",.function = + clear_session_command_fn,}; + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/session/transport.c b/src/vnet/session/transport.c new file mode 100644 index 00000000..abd94ba4 --- /dev/null +++ b/src/vnet/session/transport.c @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +u32 +transport_endpoint_lookup (transport_endpoint_table_t *ht, ip46_address_t *ip, + u16 port) +{ + clib_bihash_kv_24_8_t kv; + int rv; + + kv.key[0] = ip->as_u64[0]; + kv.key[1] = ip->as_u64[1]; + kv.key[2] = port; + + rv = clib_bihash_search_inline_24_8 (ht, &kv); + if (rv == 0) + return kv.value; + + return TRANSPORT_ENDPOINT_INVALID_INDEX; +} + +void +transport_endpoint_table_add (transport_endpoint_table_t *ht, + transport_endpoint_t *te, u32 value) +{ + clib_bihash_kv_24_8_t kv; + + kv.key[0] = te->ip.as_u64[0]; + kv.key[1] = te->ip.as_u64[1]; + kv.key[2] = te->port; + kv.value = value; + + clib_bihash_add_del_24_8 (ht, &kv, 1); +} + +void +transport_endpoint_table_del (transport_endpoint_table_t *ht, + transport_endpoint_t *te) +{ + clib_bihash_kv_24_8_t kv; + + kv.key[0] = te->ip.as_u64[0]; + kv.key[1] = te->ip.as_u64[1]; + kv.key[2] = te->port; + + clib_bihash_add_del_24_8 (ht, &kv, 0); +} + + + diff --git a/src/vnet/session/transport.h b/src/vnet/session/transport.h new file mode 100644 index 00000000..2d4415ba --- /dev/null +++ b/src/vnet/session/transport.h @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef VNET_VNET_URI_TRANSPORT_H_ +#define VNET_VNET_URI_TRANSPORT_H_ + +#include +#include +#include +#include + +/* + * Protocol independent transport properties associated to a session + */ +typedef struct _transport_connection +{ + ip46_address_t rmt_ip; /**< Remote IP */ + ip46_address_t lcl_ip; /**< Local IP */ + u16 lcl_port; /**< Local port */ + u16 rmt_port; /**< Remote port */ + u8 proto; /**< Transport protocol id */ + + u32 s_index; /**< Parent session index */ + u32 c_index; /**< Connection index in transport pool */ + u8 is_ip4; /**< Flag if IP4 connection */ + u32 thread_index; /**< Worker-thread index */ + + /** Macros for 'derived classes' where base is named "connection" */ +#define c_lcl_ip connection.lcl_ip +#define c_rmt_ip connection.rmt_ip +#define c_lcl_ip4 connection.lcl_ip.ip4 +#define c_rmt_ip4 connection.rmt_ip.ip4 +#define c_lcl_ip6 connection.lcl_ip.ip6 +#define c_rmt_ip6 connection.rmt_ip.ip6 +#define c_lcl_port connection.lcl_port +#define c_rmt_port connection.rmt_port +#define c_proto connection.proto +#define c_state connection.state +#define c_s_index connection.s_index +#define c_c_index connection.c_index +#define c_is_ip4 connection.is_ip4 +#define c_thread_index connection.thread_index +} transport_connection_t; + +/* + * Transport protocol virtual function table + */ +typedef struct _transport_proto_vft +{ + /* + * Setup + */ + u32 (*bind) (vlib_main_t *, u32, ip46_address_t *, u16); + u32 (*unbind) (vlib_main_t *, u32); + int (*open) (ip46_address_t * addr, u16 port_host_byte_order); + void (*close) (u32 conn_index, u32 thread_index); + void (*cleanup) (u32 conn_index, u32 thread_index); + + /* + * Transmission + */ + u32 (*push_header) (transport_connection_t * tconn, vlib_buffer_t * b); + u16 (*send_mss) (transport_connection_t * tc); + u32 (*send_space) (transport_connection_t * tc); + u32 (*rx_fifo_offset) (transport_connection_t * tc); + + /* + * Connection retrieval + */ + transport_connection_t *(*get_connection) (u32 conn_idx, u32 thread_idx); + transport_connection_t *(*get_listener) (u32 conn_index); + transport_connection_t *(*get_half_open) (u32 conn_index); + + /* + * Format + */ + u8 *(*format_connection) (u8 * s, va_list * args); + u8 *(*format_listener) (u8 * s, va_list * args); + u8 *(*format_half_open) (u8 * s, va_list * args); + +} transport_proto_vft_t; + +/* 16 octets */ +typedef CLIB_PACKED (struct + { + union + { + struct + { + ip4_address_t src; ip4_address_t dst; + u16 src_port; + u16 dst_port; + /* align by making this 4 octets even though its a 1-bit field + * NOTE: avoid key overlap with other transports that use 5 tuples for + * session identification. + */ + u32 proto; + }; + u64 as_u64[2]; + }; + }) v4_connection_key_t; + +typedef CLIB_PACKED (struct + { + union + { + struct + { + /* 48 octets */ + ip6_address_t src; ip6_address_t dst; + u16 src_port; + u16 dst_port; u32 proto; u8 unused_for_now[8]; + }; u64 as_u64[6]; + }; + }) v6_connection_key_t; + +typedef clib_bihash_kv_16_8_t session_kv4_t; +typedef clib_bihash_kv_48_8_t session_kv6_t; + +always_inline void +make_v4_ss_kv (session_kv4_t * kv, ip4_address_t * lcl, ip4_address_t * rmt, + u16 lcl_port, u16 rmt_port, u8 proto) +{ + v4_connection_key_t key; + memset (&key, 0, sizeof (v4_connection_key_t)); + + key.src.as_u32 = lcl->as_u32; + key.dst.as_u32 = rmt->as_u32; + key.src_port = lcl_port; + key.dst_port = rmt_port; + key.proto = proto; + + kv->key[0] = key.as_u64[0]; + kv->key[1] = key.as_u64[1]; + kv->value = ~0ULL; +} + +always_inline void +make_v4_listener_kv (session_kv4_t * kv, ip4_address_t * lcl, u16 lcl_port, + u8 proto) +{ + v4_connection_key_t key; + memset (&key, 0, sizeof (v4_connection_key_t)); + + key.src.as_u32 = lcl->as_u32; + key.dst.as_u32 = 0; + key.src_port = lcl_port; + key.dst_port = 0; + key.proto = proto; + + kv->key[0] = key.as_u64[0]; + kv->key[1] = key.as_u64[1]; + kv->value = ~0ULL; +} + +always_inline void +make_v4_ss_kv_from_tc (session_kv4_t * kv, transport_connection_t * t) +{ + return make_v4_ss_kv (kv, &t->lcl_ip.ip4, &t->rmt_ip.ip4, t->lcl_port, + t->rmt_port, t->proto); +} + +always_inline void +make_v6_ss_kv (session_kv6_t * kv, ip6_address_t * lcl, ip6_address_t * rmt, + u16 lcl_port, u16 rmt_port, u8 proto) +{ + v6_connection_key_t key; + memset (&key, 0, sizeof (v6_connection_key_t)); + + key.src.as_u64[0] = lcl->as_u64[0]; + key.src.as_u64[1] = lcl->as_u64[1]; + key.dst.as_u64[0] = rmt->as_u64[0]; + key.dst.as_u64[1] = rmt->as_u64[1]; + key.src_port = lcl_port; + key.dst_port = rmt_port; + key.proto = proto; + + kv->key[0] = key.as_u64[0]; + kv->key[1] = key.as_u64[1]; + kv->value = ~0ULL; +} + +always_inline void +make_v6_listener_kv (session_kv6_t * kv, ip6_address_t * lcl, u16 lcl_port, + u8 proto) +{ + v6_connection_key_t key; + memset (&key, 0, sizeof (v6_connection_key_t)); + + key.src.as_u64[0] = lcl->as_u64[0]; + key.src.as_u64[1] = lcl->as_u64[1]; + key.dst.as_u64[0] = 0; + key.dst.as_u64[1] = 0; + key.src_port = lcl_port; + key.dst_port = 0; + key.proto = proto; + + kv->key[0] = key.as_u64[0]; + kv->key[1] = key.as_u64[1]; + kv->value = ~0ULL; +} + +always_inline void +make_v6_ss_kv_from_tc (session_kv6_t * kv, transport_connection_t * t) +{ + make_v6_ss_kv (kv, &t->lcl_ip.ip6, &t->rmt_ip.ip6, t->lcl_port, + t->rmt_port, t->proto); +} + +typedef struct _transport_endpoint +{ + ip46_address_t ip; + u16 port; + u8 is_ip4; + u32 vrf; +} transport_endpoint_t; + +typedef clib_bihash_24_8_t transport_endpoint_table_t; + +#define TRANSPORT_ENDPOINT_INVALID_INDEX ((u32)~0) + +u32 +transport_endpoint_lookup (transport_endpoint_table_t * ht, + ip46_address_t * ip, u16 port); +void transport_endpoint_table_add (transport_endpoint_table_t * ht, + transport_endpoint_t * te, u32 value); +void transport_endpoint_table_del (transport_endpoint_table_t * ht, + transport_endpoint_t * te); + +#endif /* VNET_VNET_URI_TRANSPORT_H_ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c new file mode 100644 index 00000000..0f9b7097 --- /dev/null +++ b/src/vnet/tcp/tcp.c @@ -0,0 +1,708 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +tcp_main_t tcp_main; + +static u32 +tcp_connection_bind (vlib_main_t * vm, u32 session_index, ip46_address_t * ip, + u16 port_host_byte_order, u8 is_ip4) +{ + tcp_main_t *tm = &tcp_main; + tcp_connection_t *listener; + + pool_get (tm->listener_pool, listener); + memset (listener, 0, sizeof (*listener)); + + listener->c_c_index = listener - tm->listener_pool; + listener->c_lcl_port = clib_host_to_net_u16 (port_host_byte_order); + + if (is_ip4) + listener->c_lcl_ip4.as_u32 = ip->ip4.as_u32; + else + clib_memcpy (&listener->c_lcl_ip6, &ip->ip6, sizeof (ip6_address_t)); + + listener->c_s_index = session_index; + listener->c_proto = SESSION_TYPE_IP4_TCP; + listener->state = TCP_STATE_LISTEN; + listener->c_is_ip4 = 1; + + return listener->c_c_index; +} + +u32 +tcp_session_bind_ip4 (vlib_main_t * vm, u32 session_index, + ip46_address_t * ip, u16 port_host_byte_order) +{ + return tcp_connection_bind (vm, session_index, ip, port_host_byte_order, 1); +} + +u32 +tcp_session_bind_ip6 (vlib_main_t * vm, u32 session_index, + ip46_address_t * ip, u16 port_host_byte_order) +{ + return tcp_connection_bind (vm, session_index, ip, port_host_byte_order, 0); + +} + +static void +tcp_session_unbind (u32 listener_index) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + pool_put_index (tm->listener_pool, listener_index); +} + +u32 +tcp_session_unbind_ip4 (vlib_main_t * vm, u32 listener_index) +{ + tcp_session_unbind (listener_index); + return 0; +} + +u32 +tcp_session_unbind_ip6 (vlib_main_t * vm, u32 listener_index) +{ + tcp_session_unbind (listener_index); + return 0; +} + +transport_connection_t * +tcp_session_get_listener (u32 listener_index) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + tcp_connection_t *tc; + tc = pool_elt_at_index (tm->listener_pool, listener_index); + return &tc->connection; +} + +/** + * Cleans up connection state. + * + * No notifications. + */ +void +tcp_connection_cleanup (tcp_connection_t * tc) +{ + tcp_main_t *tm = &tcp_main; + u32 tepi; + transport_endpoint_t *tep; + + /* Cleanup local endpoint if this was an active connect */ + tepi = transport_endpoint_lookup (&tm->local_endpoints_table, &tc->c_lcl_ip, + tc->c_lcl_port); + + /*XXX lock */ + if (tepi != TRANSPORT_ENDPOINT_INVALID_INDEX) + { + tep = pool_elt_at_index (tm->local_endpoints, tepi); + transport_endpoint_table_del (&tm->local_endpoints_table, tep); + pool_put (tm->local_endpoints, tep); + } + + /* Make sure all timers are cleared */ + tcp_connection_timers_reset (tc); + + /* Check if half-open */ + if (tc->state == TCP_STATE_SYN_SENT) + pool_put (tm->half_open_connections, tc); + else + pool_put (tm->connections[tc->c_thread_index], tc); +} + +/** + * Connection removal. + * + * This should be called only once connection enters CLOSED state. Note + * that it notifies the session of the removal event, so if the goal is to + * just remove the connection, call tcp_connection_cleanup instead. + */ +void +tcp_connection_del (tcp_connection_t * tc) +{ + stream_session_delete_notify (&tc->connection); + tcp_connection_cleanup (tc); +} + +/** + * Begin connection closing procedure. + * + * If at the end the connection is not in CLOSED state, it is not removed. + * Instead, we rely on on TCP to advance through state machine to either + * 1) LAST_ACK (passive close) whereby when the last ACK is received + * tcp_connection_del is called. This notifies session of the delete and + * calls cleanup. + * 2) TIME_WAIT (active close) whereby after 2MSL the 2MSL timer triggers + * and cleanup is called. + */ +void +tcp_connection_close (tcp_connection_t * tc) +{ + /* Send FIN if needed */ + if (tc->state == TCP_STATE_ESTABLISHED || tc->state == TCP_STATE_SYN_RCVD + || tc->state == TCP_STATE_CLOSE_WAIT) + tcp_send_fin (tc); + + /* Switch state */ + if (tc->state == TCP_STATE_ESTABLISHED || tc->state == TCP_STATE_SYN_RCVD) + tc->state = TCP_STATE_FIN_WAIT_1; + else if (tc->state == TCP_STATE_SYN_SENT) + tc->state = TCP_STATE_CLOSED; + else if (tc->state == TCP_STATE_CLOSE_WAIT) + tc->state = TCP_STATE_LAST_ACK; + + /* Half-close connections are not supported XXX */ + + if (tc->state == TCP_STATE_CLOSED) + tcp_connection_del (tc); +} + +void +tcp_session_close (u32 conn_index, u32 thread_index) +{ + tcp_connection_t *tc; + tc = tcp_connection_get (conn_index, thread_index); + tcp_connection_close (tc); +} + +void +tcp_session_cleanup (u32 conn_index, u32 thread_index) +{ + tcp_connection_t *tc; + tc = tcp_connection_get (conn_index, thread_index); + tcp_connection_cleanup (tc); +} + +void * +ip_interface_get_first_ip (u32 sw_if_index, u8 is_ip4) +{ + ip_lookup_main_t *lm4 = &ip4_main.lookup_main; + ip_lookup_main_t *lm6 = &ip6_main.lookup_main; + ip_interface_address_t *ia = 0; + + if (is_ip4) + { + /* *INDENT-OFF* */ + foreach_ip_interface_address (lm4, ia, sw_if_index, 1 /* unnumbered */ , + ({ + return ip_interface_address_get_address (lm4, ia); + })); + /* *INDENT-ON* */ + } + else + { + /* *INDENT-OFF* */ + foreach_ip_interface_address (lm6, ia, sw_if_index, 1 /* unnumbered */ , + ({ + return ip_interface_address_get_address (lm6, ia); + })); + /* *INDENT-ON* */ + } + + return 0; +} + +/** + * Allocate local port and add if successful add entry to local endpoint + * table to mark the pair as used. + */ +u16 +tcp_allocate_local_port (tcp_main_t * tm, ip46_address_t * ip) +{ + u8 unique = 0; + transport_endpoint_t *tep; + u32 time_now, tei; + u16 min = 1024, max = 65535, tries; /* XXX configurable ? */ + + tries = max - min; + time_now = tcp_time_now (); + + /* Start at random point or max */ + pool_get (tm->local_endpoints, tep); + clib_memcpy (&tep->ip, ip, sizeof (*ip)); + tep->port = random_u32 (&time_now) << 16; + tep->port = tep->port < min ? max : tep->port; + + /* Search for first free slot */ + while (tries) + { + tei = transport_endpoint_lookup (&tm->local_endpoints_table, &tep->ip, + tep->port); + if (tei == TRANSPORT_ENDPOINT_INVALID_INDEX) + { + unique = 1; + break; + } + + tep->port--; + + if (tep->port < min) + tep->port = max; + + tries--; + } + + if (unique) + { + transport_endpoint_table_add (&tm->local_endpoints_table, tep, + tep - tm->local_endpoints); + + return tep->port; + } + + /* Failed */ + pool_put (tm->local_endpoints, tep); + return -1; +} + +/** + * Initialize all connection timers as invalid + */ +void +tcp_connection_timers_init (tcp_connection_t * tc) +{ + int i; + + /* Set all to invalid */ + for (i = 0; i < TCP_N_TIMERS; i++) + { + tc->timers[i] = TCP_TIMER_HANDLE_INVALID; + } + + tc->rto = TCP_RTO_INIT; +} + +/** + * Stop all connection timers + */ +void +tcp_connection_timers_reset (tcp_connection_t * tc) +{ + int i; + for (i = 0; i < TCP_N_TIMERS; i++) + { + tcp_timer_reset (tc, i); + } +} + +/** Initialize tcp connection variables + * + * Should be called after having received a msg from the peer, i.e., a SYN or + * a SYNACK, such that connection options have already been exchanged. */ +void +tcp_connection_init_vars (tcp_connection_t * tc) +{ + tcp_connection_timers_init (tc); + tcp_set_snd_mss (tc); + tc->sack_sb.head = TCP_INVALID_SACK_HOLE_INDEX; + tcp_cc_init (tc); +} + +int +tcp_connection_open (ip46_address_t * rmt_addr, u16 rmt_port, u8 is_ip4) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + tcp_connection_t *tc; + fib_prefix_t prefix; + u32 fei, sw_if_index; + ip46_address_t lcl_addr; + u16 lcl_port; + + /* + * Find the local address and allocate port + */ + memset (&lcl_addr, 0, sizeof (lcl_addr)); + + /* Find a FIB path to the destination */ + clib_memcpy (&prefix.fp_addr, rmt_addr, sizeof (*rmt_addr)); + prefix.fp_proto = is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6; + prefix.fp_len = is_ip4 ? 32 : 128; + + fei = fib_table_lookup (0, &prefix); + + /* Couldn't find route to destination. Bail out. */ + if (fei == FIB_NODE_INDEX_INVALID) + return -1; + + sw_if_index = fib_entry_get_resolving_interface (fei); + + if (sw_if_index == (u32) ~ 0) + return -1; + + if (is_ip4) + { + ip4_address_t *ip4; + ip4 = ip_interface_get_first_ip (sw_if_index, 1); + lcl_addr.ip4.as_u32 = ip4->as_u32; + } + else + { + ip6_address_t *ip6; + ip6 = ip_interface_get_first_ip (sw_if_index, 0); + clib_memcpy (&lcl_addr.ip6, ip6, sizeof (*ip6)); + } + + /* Allocate source port */ + lcl_port = tcp_allocate_local_port (tm, &lcl_addr); + if (lcl_port < 1) + return -1; + + /* + * Create connection and send SYN + */ + + pool_get (tm->half_open_connections, tc); + memset (tc, 0, sizeof (*tc)); + + clib_memcpy (&tc->c_rmt_ip, rmt_addr, sizeof (ip46_address_t)); + clib_memcpy (&tc->c_lcl_ip, &lcl_addr, sizeof (ip46_address_t)); + tc->c_rmt_port = clib_host_to_net_u16 (rmt_port); + tc->c_lcl_port = clib_host_to_net_u16 (lcl_port); + tc->c_c_index = tc - tm->half_open_connections; + tc->c_is_ip4 = is_ip4; + + /* The other connection vars will be initialized after SYN ACK */ + tcp_connection_timers_init (tc); + + tcp_send_syn (tc); + + tc->state = TCP_STATE_SYN_SENT; + + return tc->c_c_index; +} + +int +tcp_session_open_ip4 (ip46_address_t * addr, u16 port) +{ + return tcp_connection_open (addr, port, 1); +} + +int +tcp_session_open_ip6 (ip46_address_t * addr, u16 port) +{ + return tcp_connection_open (addr, port, 0); +} + +u8 * +format_tcp_session_ip4 (u8 * s, va_list * args) +{ + u32 tci = va_arg (*args, u32); + u32 thread_index = va_arg (*args, u32); + tcp_connection_t *tc; + + tc = tcp_connection_get (tci, thread_index); + + s = format (s, "[%s] %U:%d->%U:%d", "tcp", format_ip4_address, + &tc->c_lcl_ip4, clib_net_to_host_u16 (tc->c_lcl_port), + format_ip4_address, &tc->c_rmt_ip4, + clib_net_to_host_u16 (tc->c_rmt_port)); + + return s; +} + +u8 * +format_tcp_session_ip6 (u8 * s, va_list * args) +{ + u32 tci = va_arg (*args, u32); + u32 thread_index = va_arg (*args, u32); + tcp_connection_t *tc = tcp_connection_get (tci, thread_index); + s = format (s, "[%s] %U:%d->%U:%d", "tcp", format_ip6_address, + &tc->c_lcl_ip6, clib_net_to_host_u16 (tc->c_lcl_port), + format_ip6_address, &tc->c_rmt_ip6, + clib_net_to_host_u16 (tc->c_rmt_port)); + return s; +} + +u8 * +format_tcp_listener_session_ip4 (u8 * s, va_list * args) +{ + u32 tci = va_arg (*args, u32); + tcp_connection_t *tc = tcp_listener_get (tci); + s = format (s, "[%s] %U:%d->%U:%d", "tcp", format_ip4_address, + &tc->c_lcl_ip4, clib_net_to_host_u16 (tc->c_lcl_port), + format_ip4_address, &tc->c_rmt_ip4, + clib_net_to_host_u16 (tc->c_rmt_port)); + return s; +} + +u8 * +format_tcp_listener_session_ip6 (u8 * s, va_list * args) +{ + u32 tci = va_arg (*args, u32); + tcp_connection_t *tc = tcp_listener_get (tci); + s = format (s, "[%s] %U:%d->%U:%d", "tcp", format_ip6_address, + &tc->c_lcl_ip6, clib_net_to_host_u16 (tc->c_lcl_port), + format_ip6_address, &tc->c_rmt_ip6, + clib_net_to_host_u16 (tc->c_rmt_port)); + return s; +} + +u8 * +format_tcp_half_open_session_ip4 (u8 * s, va_list * args) +{ + u32 tci = va_arg (*args, u32); + tcp_connection_t *tc = tcp_half_open_connection_get (tci); + s = format (s, "[%s] %U:%d->%U:%d", "tcp", format_ip4_address, + &tc->c_lcl_ip4, clib_net_to_host_u16 (tc->c_lcl_port), + format_ip4_address, &tc->c_rmt_ip4, + clib_net_to_host_u16 (tc->c_rmt_port)); + return s; +} + +u8 * +format_tcp_half_open_session_ip6 (u8 * s, va_list * args) +{ + u32 tci = va_arg (*args, u32); + tcp_connection_t *tc = tcp_half_open_connection_get (tci); + s = format (s, "[%s] %U:%d->%U:%d", "tcp", format_ip6_address, + &tc->c_lcl_ip6, clib_net_to_host_u16 (tc->c_lcl_port), + format_ip6_address, &tc->c_rmt_ip6, + clib_net_to_host_u16 (tc->c_rmt_port)); + return s; +} + +transport_connection_t * +tcp_session_get_transport (u32 conn_index, u32 thread_index) +{ + tcp_connection_t *tc = tcp_connection_get (conn_index, thread_index); + return &tc->connection; +} + +transport_connection_t * +tcp_half_open_session_get_transport (u32 conn_index) +{ + tcp_connection_t *tc = tcp_half_open_connection_get (conn_index); + return &tc->connection; +} + +u16 +tcp_session_send_mss (transport_connection_t * trans_conn) +{ + tcp_connection_t *tc = (tcp_connection_t *) trans_conn; + return tc->snd_mss; +} + +u32 +tcp_session_send_space (transport_connection_t * trans_conn) +{ + tcp_connection_t *tc = (tcp_connection_t *) trans_conn; + return tcp_available_snd_space (tc); +} + +u32 +tcp_session_rx_fifo_offset (transport_connection_t * trans_conn) +{ + tcp_connection_t *tc = (tcp_connection_t *) trans_conn; + return (tc->snd_una_max - tc->snd_una); +} + +/* *INDENT-OFF* */ +const static transport_proto_vft_t tcp4_proto = { + .bind = tcp_session_bind_ip4, + .unbind = tcp_session_unbind_ip4, + .push_header = tcp_push_header, + .get_connection = tcp_session_get_transport, + .get_listener = tcp_session_get_listener, + .get_half_open = tcp_half_open_session_get_transport, + .open = tcp_session_open_ip4, + .close = tcp_session_close, + .cleanup = tcp_session_cleanup, + .send_mss = tcp_session_send_mss, + .send_space = tcp_session_send_space, + .rx_fifo_offset = tcp_session_rx_fifo_offset, + .format_connection = format_tcp_session_ip4, + .format_listener = format_tcp_listener_session_ip4, + .format_half_open = format_tcp_half_open_session_ip4 +}; + +const static transport_proto_vft_t tcp6_proto = { + .bind = tcp_session_bind_ip6, + .unbind = tcp_session_unbind_ip6, + .push_header = tcp_push_header, + .get_connection = tcp_session_get_transport, + .get_listener = tcp_session_get_listener, + .get_half_open = tcp_half_open_session_get_transport, + .open = tcp_session_open_ip6, + .close = tcp_session_close, + .cleanup = tcp_session_cleanup, + .send_mss = tcp_session_send_mss, + .send_space = tcp_session_send_space, + .rx_fifo_offset = tcp_session_rx_fifo_offset, + .format_connection = format_tcp_session_ip6, + .format_listener = format_tcp_listener_session_ip6, + .format_half_open = format_tcp_half_open_session_ip6 +}; +/* *INDENT-ON* */ + +void +tcp_timer_keep_handler (u32 conn_index) +{ + u32 cpu_index = os_get_cpu_number (); + tcp_connection_t *tc; + + tc = tcp_connection_get (conn_index, cpu_index); + tc->timers[TCP_TIMER_KEEP] = TCP_TIMER_HANDLE_INVALID; + + tcp_connection_close (tc); +} + +void +tcp_timer_establish_handler (u32 conn_index) +{ + tcp_connection_t *tc; + u8 sst; + + tc = tcp_half_open_connection_get (conn_index); + tc->timers[TCP_TIMER_ESTABLISH] = TCP_TIMER_HANDLE_INVALID; + + ASSERT (tc->state == TCP_STATE_SYN_SENT); + + sst = tc->c_is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP; + stream_session_connect_notify (&tc->connection, sst, 1 /* fail */ ); + + tcp_connection_cleanup (tc); +} + +void +tcp_timer_2msl_handler (u32 conn_index) +{ + u32 cpu_index = os_get_cpu_number (); + tcp_connection_t *tc; + + tc = tcp_connection_get (conn_index, cpu_index); + tc->timers[TCP_TIMER_2MSL] = TCP_TIMER_HANDLE_INVALID; + + tcp_connection_del (tc); +} + +/* *INDENT-OFF* */ +static timer_expiration_handler *timer_expiration_handlers[TCP_N_TIMERS] = +{ + tcp_timer_retransmit_handler, + tcp_timer_delack_handler, + 0, + tcp_timer_keep_handler, + tcp_timer_2msl_handler, + tcp_timer_retransmit_syn_handler, + tcp_timer_establish_handler +}; +/* *INDENT-ON* */ + +static void +tcp_expired_timers_dispatch (u32 * expired_timers) +{ + int i; + u32 connection_index, timer_id; + + for (i = 0; i < vec_len (expired_timers); i++) + { + /* Get session index and timer id */ + connection_index = expired_timers[i] & 0x0FFFFFFF; + timer_id = expired_timers[i] >> 28; + + /* Handle expiration */ + (*timer_expiration_handlers[timer_id]) (connection_index); + } +} + +void +tcp_initialize_timer_wheels (tcp_main_t * tm) +{ + tw_timer_wheel_16t_2w_512sl_t *tw; + vec_foreach (tw, tm->timer_wheels) + { + tw_timer_wheel_init_16t_2w_512sl (tw, tcp_expired_timers_dispatch, + 100e-3 /* timer period 100ms */ , ~0); + tw->last_run_time = vlib_time_now (tm->vlib_main); + } +} + +clib_error_t * +tcp_init (vlib_main_t * vm) +{ + ip_main_t *im = &ip_main; + ip_protocol_info_t *pi; + tcp_main_t *tm = vnet_get_tcp_main (); + vlib_thread_main_t *vtm = vlib_get_thread_main (); + clib_error_t *error = 0; + u32 num_threads; + + tm->vlib_main = vm; + tm->vnet_main = vnet_get_main (); + + if ((error = vlib_call_init_function (vm, ip_main_init))) + return error; + if ((error = vlib_call_init_function (vm, ip4_lookup_init))) + return error; + if ((error = vlib_call_init_function (vm, ip6_lookup_init))) + return error; + + /* + * Registrations + */ + + /* Register with IP */ + pi = ip_get_protocol_info (im, IP_PROTOCOL_TCP); + if (pi == 0) + return clib_error_return (0, "TCP protocol info AWOL"); + pi->format_header = format_tcp_header; + pi->unformat_pg_edit = unformat_pg_tcp_header; + + ip4_register_protocol (IP_PROTOCOL_TCP, tcp4_input_node.index); + + /* Register as transport with URI */ + session_register_transport (SESSION_TYPE_IP4_TCP, &tcp4_proto); + session_register_transport (SESSION_TYPE_IP6_TCP, &tcp6_proto); + + /* + * Initialize data structures + */ + + num_threads = 1 /* main thread */ + vtm->n_threads; + vec_validate (tm->connections, num_threads - 1); + + /* Initialize per worker thread tx buffers (used for control messages) */ + vec_validate (tm->tx_buffers, num_threads - 1); + + /* Initialize timer wheels */ + vec_validate (tm->timer_wheels, num_threads - 1); + tcp_initialize_timer_wheels (tm); + + vec_validate (tm->delack_connections, num_threads - 1); + + /* Initialize clocks per tick for TCP timestamp. Used to compute + * monotonically increasing timestamps. */ + tm->tstamp_ticks_per_clock = vm->clib_time.seconds_per_clock + / TCP_TSTAMP_RESOLUTION; + + clib_bihash_init_24_8 (&tm->local_endpoints_table, "local endpoint table", + 200000 /* $$$$ config parameter nbuckets */ , + (64 << 20) /*$$$ config parameter table size */ ); + + return error; +} + +VLIB_INIT_FUNCTION (tcp_init); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h new file mode 100644 index 00000000..22f00a63 --- /dev/null +++ b/src/vnet/tcp/tcp.h @@ -0,0 +1,624 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _vnet_tcp_h_ +#define _vnet_tcp_h_ + +#include +#include +#include +#include +#include +#include + +#define TCP_TICK 10e-3 /**< TCP tick period (s) */ +#define THZ 1/TCP_TICK /**< TCP tick frequency */ +#define TCP_TSTAMP_RESOLUTION TCP_TICK /**< Time stamp resolution */ +#define TCP_PAWS_IDLE 24 * 24 * 60 * 60 * THZ /**< 24 days */ +#define TCP_MAX_OPTION_SPACE 40 + +#define TCP_DUPACK_THRESHOLD 3 +#define TCP_DEFAULT_RX_FIFO_SIZE 64 << 10 + +/** TCP FSM state definitions as per RFC793. */ +#define foreach_tcp_fsm_state \ + _(CLOSED, "CLOSED") \ + _(LISTEN, "LISTEN") \ + _(SYN_SENT, "SYN_SENT") \ + _(SYN_RCVD, "SYN_RCVD") \ + _(ESTABLISHED, "ESTABLISHED") \ + _(CLOSE_WAIT, "CLOSE_WAIT") \ + _(FIN_WAIT_1, "FIN_WAIT_1") \ + _(LAST_ACK, "LAST_ACK") \ + _(CLOSING, "CLOSING") \ + _(FIN_WAIT_2, "FIN_WAIT_2") \ + _(TIME_WAIT, "TIME_WAIT") + +typedef enum _tcp_state +{ +#define _(sym, str) TCP_STATE_##sym, + foreach_tcp_fsm_state +#undef _ + TCP_N_STATES +} tcp_state_t; + +format_function_t format_tcp_state; + +/** TCP timers */ +#define foreach_tcp_timer \ + _(RETRANSMIT, "RETRANSMIT") \ + _(DELACK, "DELAYED ACK") \ + _(PERSIST, "PERSIST") \ + _(KEEP, "KEEP") \ + _(2MSL, "2MSL") \ + _(RETRANSMIT_SYN, "RETRANSMIT_SYN") \ + _(ESTABLISH, "ESTABLISH") + +typedef enum _tcp_timers +{ +#define _(sym, str) TCP_TIMER_##sym, + foreach_tcp_timer +#undef _ + TCP_N_TIMERS +} tcp_timers_e; + +typedef void (timer_expiration_handler) (u32 index); + +extern timer_expiration_handler tcp_timer_delack_handler; +extern timer_expiration_handler tcp_timer_retransmit_handler; +extern timer_expiration_handler tcp_timer_retransmit_syn_handler; + +#define TCP_TIMER_HANDLE_INVALID ((u32) ~0) + +/* Timer delays as multiples of 100ms */ +#define TCP_TO_TIMER_TICK TCP_TICK*10 /* Period for converting from TCP + * ticks to timer units */ +#define TCP_DELACK_TIME 1 /* 0.1s */ +#define TCP_ESTABLISH_TIME 750 /* 75s */ +#define TCP_2MSL_TIME 300 /* 30s */ + +#define TCP_RTO_MAX 60 * THZ /* Min max RTO (60s) as per RFC6298 */ +#define TCP_RTT_MAX 30 * THZ /* 30s (probably too much) */ +#define TCP_RTO_SYN_RETRIES 3 /* SYN retries without doubling RTO */ +#define TCP_RTO_INIT 1 * THZ /* Initial retransmit timer */ + +void tcp_update_time (f64 now, u32 thread_index); + +/** TCP connection flags */ +#define foreach_tcp_connection_flag \ + _(DELACK, "Delay ACK") \ + _(SNDACK, "Send ACK") \ + _(BURSTACK, "Burst ACK set") \ + _(SENT_RCV_WND0, "Sent 0 receive window") \ + _(RECOVERY, "Recovery on") \ + _(FAST_RECOVERY, "Fast Recovery on") + +typedef enum _tcp_connection_flag_bits +{ +#define _(sym, str) TCP_CONN_##sym##_BIT, + foreach_tcp_connection_flag +#undef _ + TCP_CONN_N_FLAG_BITS +} tcp_connection_flag_bits_e; + +typedef enum _tcp_connection_flag +{ +#define _(sym, str) TCP_CONN_##sym = 1 << TCP_CONN_##sym##_BIT, + foreach_tcp_connection_flag +#undef _ + TCP_CONN_N_FLAGS +} tcp_connection_flags_e; + +/** TCP buffer flags */ +#define foreach_tcp_buf_flag \ + _ (ACK) /**< Sending ACK. */ \ + _ (DUPACK) /**< Sending DUPACK. */ \ + +enum +{ +#define _(f) TCP_BUF_BIT_##f, + foreach_tcp_buf_flag +#undef _ + TCP_N_BUF_BITS, +}; + +enum +{ +#define _(f) TCP_BUF_FLAG_##f = 1 << TCP_BUF_BIT_##f, + foreach_tcp_buf_flag +#undef _ +}; + +#define TCP_MAX_SACK_BLOCKS 5 /**< Max number of SACK blocks stored */ +#define TCP_INVALID_SACK_HOLE_INDEX ((u32)~0) + +typedef struct _sack_scoreboard_hole +{ + u32 next; /**< Index for next entry in linked list */ + u32 prev; /**< Index for previous entry in linked list */ + u32 start; /**< Start sequence number */ + u32 end; /**< End sequence number */ +} sack_scoreboard_hole_t; + +typedef struct _sack_scoreboard +{ + sack_scoreboard_hole_t *holes; /**< Pool of holes */ + u32 head; /**< Index to first entry */ + u32 sacked_bytes; /**< Number of bytes sacked in sb */ +} sack_scoreboard_t; + +typedef enum _tcp_cc_algorithm_type +{ + TCP_CC_NEWRENO, +} tcp_cc_algorithm_type_e; + +typedef struct _tcp_cc_algorithm tcp_cc_algorithm_t; + +typedef enum _tcp_cc_ack_t +{ + TCP_CC_ACK, + TCP_CC_DUPACK, + TCP_CC_PARTIALACK +} tcp_cc_ack_t; + +typedef struct _tcp_connection +{ + transport_connection_t connection; /**< Common transport data. First! */ + + u8 state; /**< TCP state as per tcp_state_t */ + u16 flags; /**< Connection flags (see tcp_conn_flags_e) */ + u32 timers[TCP_N_TIMERS]; /**< Timer handles into timer wheel */ + + /* TODO RFC4898 */ + + /** Send sequence variables RFC793 */ + u32 snd_una; /**< oldest unacknowledged sequence number */ + u32 snd_una_max; /**< newest unacknowledged sequence number + 1*/ + u32 snd_wnd; /**< send window */ + u32 snd_wl1; /**< seq number used for last snd.wnd update */ + u32 snd_wl2; /**< ack number used for last snd.wnd update */ + u32 snd_nxt; /**< next seq number to be sent */ + + /** Receive sequence variables RFC793 */ + u32 rcv_nxt; /**< next sequence number expected */ + u32 rcv_wnd; /**< receive window we expect */ + + u32 rcv_las; /**< rcv_nxt at last ack sent/rcv_wnd update */ + u32 iss; /**< initial sent sequence */ + u32 irs; /**< initial remote sequence */ + + /* Options */ + tcp_options_t opt; /**< TCP connection options parsed */ + u8 rcv_wscale; /**< Window scale to advertise to peer */ + u8 snd_wscale; /**< Window scale to use when sending */ + u32 tsval_recent; /**< Last timestamp received */ + u32 tsval_recent_age; /**< When last updated tstamp_recent*/ + + sack_block_t *snd_sacks; /**< Vector of SACKs to send. XXX Fixed size? */ + sack_scoreboard_t sack_sb; /**< SACK "scoreboard" that tracks holes */ + + u8 rcv_dupacks; /**< Number of DUPACKs received */ + u8 snt_dupacks; /**< Number of DUPACKs sent in a burst */ + + /* Congestion control */ + u32 cwnd; /**< Congestion window */ + u32 ssthresh; /**< Slow-start threshold */ + u32 prev_ssthresh; /**< ssthresh before congestion */ + u32 bytes_acked; /**< Bytes acknowledged by current segment */ + u32 rtx_bytes; /**< Retransmitted bytes */ + u32 tsecr_last_ack; /**< Timestamp echoed to us in last health ACK */ + tcp_cc_algorithm_t *cc_algo; /**< Congestion control algorithm */ + + /* RTT and RTO */ + u32 rto; /**< Retransmission timeout */ + u32 rto_boff; /**< Index for RTO backoff */ + u32 srtt; /**< Smoothed RTT */ + u32 rttvar; /**< Smoothed mean RTT difference. Approximates variance */ + u32 rtt_ts; /**< Timestamp for tracked ACK */ + u32 rtt_seq; /**< Sequence number for tracked ACK */ + + u16 snd_mss; /**< Send MSS */ +} tcp_connection_t; + +struct _tcp_cc_algorithm +{ + void (*rcv_ack) (tcp_connection_t * tc); + void (*rcv_cong_ack) (tcp_connection_t * tc, tcp_cc_ack_t ack); + void (*congestion) (tcp_connection_t * tc); + void (*recovered) (tcp_connection_t * tc); + void (*init) (tcp_connection_t * tc); +}; + +#define tcp_fastrecovery_on(tc) (tc)->flags |= TCP_CONN_FAST_RECOVERY +#define tcp_fastrecovery_off(tc) (tc)->flags &= ~TCP_CONN_FAST_RECOVERY +#define tcp_in_fastrecovery(tc) ((tc)->flags & TCP_CONN_FAST_RECOVERY) +#define tcp_in_recovery(tc) ((tc)->flags & (TCP_CONN_FAST_RECOVERY | TCP_CONN_RECOVERY)) +#define tcp_recovery_off(tc) ((tc)->flags &= ~(TCP_CONN_FAST_RECOVERY | TCP_CONN_RECOVERY)) +#define tcp_in_slowstart(tc) (tc->cwnd < tc->ssthresh) + +typedef enum +{ + TCP_IP4, + TCP_IP6, + TCP_N_AF, +} tcp_af_t; + +typedef enum _tcp_error +{ +#define tcp_error(n,s) TCP_ERROR_##n, +#include +#undef tcp_error + TCP_N_ERROR, +} tcp_error_t; + +typedef struct _tcp_lookup_dispatch +{ + u8 next, error; +} tcp_lookup_dispatch_t; + +typedef struct _tcp_main +{ + /* Per-worker thread tcp connection pools */ + tcp_connection_t **connections; + + /* Pool of listeners. */ + tcp_connection_t *listener_pool; + + /** Dispatch table by state and flags */ + tcp_lookup_dispatch_t dispatch_table[TCP_N_STATES][64]; + + u8 log2_tstamp_clocks_per_tick; + f64 tstamp_ticks_per_clock; + + /** per-worker tx buffer free lists */ + u32 **tx_buffers; + + /* Per worker-thread timer wheel for connections timers */ + tw_timer_wheel_16t_2w_512sl_t *timer_wheels; + + /* Convenience per worker-thread vector of connections to DELACK */ + u32 **delack_connections; + + /* Pool of half-open connections on which we've sent a SYN */ + tcp_connection_t *half_open_connections; + + /* Pool of local TCP endpoints */ + transport_endpoint_t *local_endpoints; + + /* Local endpoints lookup table */ + transport_endpoint_table_t local_endpoints_table; + + /* Congestion control algorithms registered */ + tcp_cc_algorithm_t *cc_algos; + + /* convenience */ + vlib_main_t *vlib_main; + vnet_main_t *vnet_main; + ip4_main_t *ip4_main; + ip6_main_t *ip6_main; +} tcp_main_t; + +extern tcp_main_t tcp_main; +extern vlib_node_registration_t tcp4_input_node; +extern vlib_node_registration_t tcp6_input_node; +extern vlib_node_registration_t tcp4_output_node; +extern vlib_node_registration_t tcp6_output_node; + +always_inline tcp_main_t * +vnet_get_tcp_main () +{ + return &tcp_main; +} + +always_inline tcp_connection_t * +tcp_connection_get (u32 conn_index, u32 thread_index) +{ + return pool_elt_at_index (tcp_main.connections[thread_index], conn_index); +} + +always_inline tcp_connection_t * +tcp_connection_get_if_valid (u32 conn_index, u32 thread_index) +{ + if (tcp_main.connections[thread_index] == 0) + return 0; + if (pool_is_free_index (tcp_main.connections[thread_index], conn_index)) + return 0; + return pool_elt_at_index (tcp_main.connections[thread_index], conn_index); +} + +void tcp_connection_close (tcp_connection_t * tc); +void tcp_connection_cleanup (tcp_connection_t * tc); +void tcp_connection_del (tcp_connection_t * tc); + +always_inline tcp_connection_t * +tcp_listener_get (u32 tli) +{ + return pool_elt_at_index (tcp_main.listener_pool, tli); +} + +always_inline tcp_connection_t * +tcp_half_open_connection_get (u32 conn_index) +{ + return pool_elt_at_index (tcp_main.half_open_connections, conn_index); +} + +void tcp_make_ack (tcp_connection_t * ts, vlib_buffer_t * b); +void tcp_make_finack (tcp_connection_t * tc, vlib_buffer_t * b); +void tcp_make_synack (tcp_connection_t * ts, vlib_buffer_t * b); +void tcp_send_reset (vlib_buffer_t * pkt, u8 is_ip4); +void tcp_send_syn (tcp_connection_t * tc); +void tcp_send_fin (tcp_connection_t * tc); +void tcp_set_snd_mss (tcp_connection_t * tc); + +always_inline u32 +tcp_end_seq (tcp_header_t * th, u32 len) +{ + return th->seq_number + tcp_is_syn (th) + tcp_is_fin (th) + len; +} + +/* Modulo arithmetic for TCP sequence numbers */ +#define seq_lt(_s1, _s2) ((i32)((_s1)-(_s2)) < 0) +#define seq_leq(_s1, _s2) ((i32)((_s1)-(_s2)) <= 0) +#define seq_gt(_s1, _s2) ((i32)((_s1)-(_s2)) > 0) +#define seq_geq(_s1, _s2) ((i32)((_s1)-(_s2)) >= 0) + +/* Modulo arithmetic for timestamps */ +#define timestamp_lt(_t1, _t2) ((i32)((_t1)-(_t2)) < 0) +#define timestamp_leq(_t1, _t2) ((i32)((_t1)-(_t2)) <= 0) + +always_inline u32 +tcp_flight_size (const tcp_connection_t * tc) +{ + return tc->snd_una_max - tc->snd_una - tc->sack_sb.sacked_bytes + + tc->rtx_bytes; +} + +/** + * Initial cwnd as per RFC5681 + */ +always_inline u32 +tcp_initial_cwnd (const tcp_connection_t * tc) +{ + if (tc->snd_mss > 2190) + return 2 * tc->snd_mss; + else if (tc->snd_mss > 1095) + return 3 * tc->snd_mss; + else + return 4 * tc->snd_mss; +} + +always_inline u32 +tcp_loss_wnd (const tcp_connection_t * tc) +{ + return tc->snd_mss; +} + +always_inline u32 +tcp_available_wnd (const tcp_connection_t * tc) +{ + return clib_min (tc->cwnd, tc->snd_wnd); +} + +always_inline u32 +tcp_available_snd_space (const tcp_connection_t * tc) +{ + u32 available_wnd = tcp_available_wnd (tc); + u32 flight_size = tcp_flight_size (tc); + + if (available_wnd <= flight_size) + return 0; + + return available_wnd - flight_size; +} + +void tcp_retransmit_first_unacked (tcp_connection_t * tc); + +void tcp_fast_retransmit (tcp_connection_t * tc); + +always_inline u32 +tcp_time_now (void) +{ + return clib_cpu_time_now () * tcp_main.tstamp_ticks_per_clock; +} + +u32 tcp_push_header (transport_connection_t * tconn, vlib_buffer_t * b); + +u32 +tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b, + u32 max_bytes); + +void tcp_connection_timers_init (tcp_connection_t * tc); +void tcp_connection_timers_reset (tcp_connection_t * tc); + +void tcp_connection_init_vars (tcp_connection_t * tc); + +always_inline void +tcp_connection_force_ack (tcp_connection_t * tc, vlib_buffer_t * b) +{ + /* Reset flags, make sure ack is sent */ + tc->flags = TCP_CONN_SNDACK; + vnet_buffer (b)->tcp.flags &= ~TCP_BUF_FLAG_DUPACK; +} + +always_inline void +tcp_timer_set (tcp_connection_t * tc, u8 timer_id, u32 interval) +{ + tc->timers[timer_id] + = tw_timer_start_16t_2w_512sl (&tcp_main.timer_wheels[tc->c_thread_index], + tc->c_c_index, timer_id, interval); +} + +always_inline void +tcp_retransmit_timer_set (tcp_main_t * tm, tcp_connection_t * tc) +{ + /* XXX Switch to faster TW */ + tcp_timer_set (tc, TCP_TIMER_RETRANSMIT, + clib_max (tc->rto * TCP_TO_TIMER_TICK, 1)); +} + +always_inline void +tcp_timer_reset (tcp_connection_t * tc, u8 timer_id) +{ + if (tc->timers[timer_id] == TCP_TIMER_HANDLE_INVALID) + return; + + tw_timer_stop_16t_2w_512sl (&tcp_main.timer_wheels[tc->c_thread_index], + tc->timers[timer_id]); + tc->timers[timer_id] = TCP_TIMER_HANDLE_INVALID; +} + +always_inline void +tcp_timer_update (tcp_connection_t * tc, u8 timer_id, u32 interval) +{ + if (tc->timers[timer_id] != TCP_TIMER_HANDLE_INVALID) + tw_timer_stop_16t_2w_512sl (&tcp_main.timer_wheels[tc->c_thread_index], + tc->timers[timer_id]); + tc->timers[timer_id] = + tw_timer_start_16t_2w_512sl (&tcp_main.timer_wheels[tc->c_thread_index], + tc->c_c_index, timer_id, interval); +} + +always_inline u8 +tcp_timer_is_active (tcp_connection_t * tc, tcp_timers_e timer) +{ + return tc->timers[timer] != TCP_TIMER_HANDLE_INVALID; +} + +void +scoreboard_remove_hole (sack_scoreboard_t * sb, + sack_scoreboard_hole_t * hole); + +always_inline sack_scoreboard_hole_t * +scoreboard_next_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole) +{ + if (hole->next != TCP_INVALID_SACK_HOLE_INDEX) + return pool_elt_at_index (sb->holes, hole->next); + return 0; +} + +always_inline sack_scoreboard_hole_t * +scoreboard_first_hole (sack_scoreboard_t * sb) +{ + if (sb->head != TCP_INVALID_SACK_HOLE_INDEX) + return pool_elt_at_index (sb->holes, sb->head); + return 0; +} + +always_inline void +scoreboard_clear (sack_scoreboard_t * sb) +{ + sack_scoreboard_hole_t *hole = scoreboard_first_hole (sb); + while ((hole = scoreboard_first_hole (sb))) + { + scoreboard_remove_hole (sb, hole); + } +} + +always_inline u32 +scoreboard_hole_bytes (sack_scoreboard_hole_t * hole) +{ + return hole->end - hole->start; +} + +always_inline void +tcp_cc_algo_register (tcp_cc_algorithm_type_e type, + const tcp_cc_algorithm_t * vft) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + vec_validate (tm->cc_algos, type); + + tm->cc_algos[type] = *vft; +} + +always_inline tcp_cc_algorithm_t * +tcp_cc_algo_get (tcp_cc_algorithm_type_e type) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + return &tm->cc_algos[type]; +} + +void tcp_cc_init (tcp_connection_t * tc); + +/** + * Push TCP header to buffer + * + * @param vm - vlib_main + * @param b - buffer to write the header to + * @param sp_net - source port net order + * @param dp_net - destination port net order + * @param seq - sequence number net order + * @param ack - ack number net order + * @param tcp_hdr_opts_len - header and options length in bytes + * @param flags - header flags + * @param wnd - window size + * + * @return - pointer to start of TCP header + */ +always_inline void * +vlib_buffer_push_tcp_net_order (vlib_buffer_t * b, u16 sp, u16 dp, u32 seq, + u32 ack, u8 tcp_hdr_opts_len, u8 flags, + u16 wnd) +{ + tcp_header_t *th; + + th = vlib_buffer_push_uninit (b, tcp_hdr_opts_len); + + th->src_port = sp; + th->dst_port = dp; + th->seq_number = seq; + th->ack_number = ack; + th->data_offset_and_reserved = (tcp_hdr_opts_len >> 2) << 4; + th->flags = flags; + th->window = wnd; + th->checksum = 0; + th->urgent_pointer = 0; + return th; +} + +/** + * Push TCP header to buffer + * + * @param vm - vlib_main + * @param b - buffer to write the header to + * @param sp_net - source port net order + * @param dp_net - destination port net order + * @param seq - sequence number host order + * @param ack - ack number host order + * @param tcp_hdr_opts_len - header and options length in bytes + * @param flags - header flags + * @param wnd - window size + * + * @return - pointer to start of TCP header + */ +always_inline void * +vlib_buffer_push_tcp (vlib_buffer_t * b, u16 sp_net, u16 dp_net, u32 seq, + u32 ack, u8 tcp_hdr_opts_len, u8 flags, u16 wnd) +{ + return vlib_buffer_push_tcp_net_order (b, sp_net, dp_net, + clib_host_to_net_u32 (seq), + clib_host_to_net_u32 (ack), + tcp_hdr_opts_len, flags, + clib_host_to_net_u16 (wnd)); +} + +#endif /* _vnet_tcp_h_ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/tcp/tcp_error.def b/src/vnet/tcp/tcp_error.def new file mode 100644 index 00000000..cff5ec13 --- /dev/null +++ b/src/vnet/tcp/tcp_error.def @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +tcp_error (NONE, "no error") +tcp_error (NO_LISTENER, "no listener for dst port") +tcp_error (LOOKUP_DROPS, "lookup drops") +tcp_error (DISPATCH, "Dispatch error") +tcp_error (ENQUEUED, "Packets pushed into rx fifo") +tcp_error (PURE_ACK, "Pure acks") +tcp_error (SYNS_RCVD, "SYNs received") +tcp_error (SYN_ACKS_RCVD, "SYN-ACKs received") +tcp_error (NOT_READY, "Session not ready for packets") +tcp_error (FIFO_FULL, "Packets dropped for lack of rx fifo space") +tcp_error (EVENT_FIFO_FULL, "Events not sent for lack of event fifo space") +tcp_error (API_QUEUE_FULL, "Sessions not created for lack of API queue space") +tcp_error (CREATE_SESSION_FAIL, "Sessions couldn't be allocated") +tcp_error (SEGMENT_INVALID, "Invalid segment") +tcp_error (ACK_INVALID, "Invalid ACK") +tcp_error (ACK_DUP, "Duplicate ACK") +tcp_error (ACK_OLD, "Old ACK") +tcp_error (PKTS_SENT, "Packets sent") +tcp_error (FILTERED_DUPACKS, "Filtered duplicate ACKs") +tcp_error (RST_SENT, "Resets sent") \ No newline at end of file diff --git a/src/vnet/tcp/tcp_format.c b/src/vnet/tcp/tcp_format.c new file mode 100644 index 00000000..7136741d --- /dev/null +++ b/src/vnet/tcp/tcp_format.c @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* + * tcp/tcp_format.c: tcp formatting + * + * Copyright (c) 2008 Eliot Dresselhaus + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +static u8 * +format_tcp_flags (u8 * s, va_list * args) +{ + int flags = va_arg (*args, int); + +#define _(f) if (flags & TCP_FLAG_##f) s = format (s, "%s, ", #f); + foreach_tcp_flag +#undef _ + return s; +} + +/* Format TCP header. */ +u8 * +format_tcp_header (u8 * s, va_list * args) +{ + tcp_header_t *tcp = va_arg (*args, tcp_header_t *); + u32 max_header_bytes = va_arg (*args, u32); + u32 header_bytes; + uword indent; + + /* Nothing to do. */ + if (max_header_bytes < sizeof (tcp[0])) + return format (s, "TCP header truncated"); + + indent = format_get_indent (s); + indent += 2; + header_bytes = tcp_header_bytes (tcp); + + s = format (s, "TCP: %d -> %d", clib_net_to_host_u16 (tcp->src), + clib_net_to_host_u16 (tcp->dst)); + + s = format (s, "\n%Useq. 0x%08x ack 0x%08x", format_white_space, indent, + clib_net_to_host_u32 (tcp->seq_number), + clib_net_to_host_u32 (tcp->ack_number)); + + s = format (s, "\n%Uflags %U, tcp header: %d bytes", format_white_space, + indent, format_tcp_flags, tcp->flags, header_bytes); + + s = format (s, "\n%Uwindow %d, checksum 0x%04x", format_white_space, indent, + clib_net_to_host_u16 (tcp->window), + clib_net_to_host_u16 (tcp->checksum)); + + +#if 0 + /* Format TCP options. */ + { + u8 *o; + u8 *option_start = (void *) (tcp + 1); + u8 *option_end = (void *) tcp + header_bytes; + + for (o = option_start; o < option_end;) + { + u32 length = o[1]; + switch (o[0]) + { + case TCP_OPTION_END: + length = 1; + o = option_end; + break; + + case TCP_OPTION_NOOP: + length = 1; + break; + + } + } + } +#endif + + /* Recurse into next protocol layer. */ + if (max_header_bytes != 0 && header_bytes < max_header_bytes) + { + ip_main_t *im = &ip_main; + tcp_udp_port_info_t *pi; + + pi = ip_get_tcp_udp_port_info (im, tcp->dst); + + if (pi && pi->format_header) + s = format (s, "\n%U%U", format_white_space, indent - 2, + pi->format_header, + /* next protocol header */ (void *) tcp + header_bytes, + max_header_bytes - header_bytes); + } + + return s; +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c new file mode 100644 index 00000000..daa0683b --- /dev/null +++ b/src/vnet/tcp/tcp_input.c @@ -0,0 +1,2316 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +static char *tcp_error_strings[] = { +#define tcp_error(n,s) s, +#include +#undef tcp_error +}; + +/* All TCP nodes have the same outgoing arcs */ +#define foreach_tcp_state_next \ + _ (DROP, "error-drop") \ + _ (TCP4_OUTPUT, "tcp4-output") \ + _ (TCP6_OUTPUT, "tcp6-output") + +typedef enum _tcp_established_next +{ +#define _(s,n) TCP_ESTABLISHED_NEXT_##s, + foreach_tcp_state_next +#undef _ + TCP_ESTABLISHED_N_NEXT, +} tcp_established_next_t; + +typedef enum _tcp_rcv_process_next +{ +#define _(s,n) TCP_RCV_PROCESS_NEXT_##s, + foreach_tcp_state_next +#undef _ + TCP_RCV_PROCESS_N_NEXT, +} tcp_rcv_process_next_t; + +typedef enum _tcp_syn_sent_next +{ +#define _(s,n) TCP_SYN_SENT_NEXT_##s, + foreach_tcp_state_next +#undef _ + TCP_SYN_SENT_N_NEXT, +} tcp_syn_sent_next_t; + +typedef enum _tcp_listen_next +{ +#define _(s,n) TCP_LISTEN_NEXT_##s, + foreach_tcp_state_next +#undef _ + TCP_LISTEN_N_NEXT, +} tcp_listen_next_t; + +/* Generic, state independent indices */ +typedef enum _tcp_state_next +{ +#define _(s,n) TCP_NEXT_##s, + foreach_tcp_state_next +#undef _ + TCP_STATE_N_NEXT, +} tcp_state_next_t; + +#define tcp_next_output(is_ip4) (is_ip4 ? TCP_NEXT_TCP4_OUTPUT \ + : TCP_NEXT_TCP6_OUTPUT) + +vlib_node_registration_t tcp4_established_node; +vlib_node_registration_t tcp6_established_node; + +/** + * Validate segment sequence number. As per RFC793: + * + * Segment Receive Test + * Length Window + * ------- ------- ------------------------------------------- + * 0 0 SEG.SEQ = RCV.NXT + * 0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND + * >0 0 not acceptable + * >0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND + * or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND + * + * This ultimately consists in checking if segment falls within the window. + * The one important difference compared to RFC793 is that we use rcv_las, + * or the rcv_nxt at last ack sent instead of rcv_nxt since that's the + * peer's reference when computing our receive window. + * + * This accepts only segments within the window. + */ +always_inline u8 +tcp_segment_in_rcv_wnd (tcp_connection_t * tc, u32 seq, u32 end_seq) +{ + return seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd) + && seq_geq (seq, tc->rcv_nxt); +} + +void +tcp_options_parse (tcp_header_t * th, tcp_options_t * to) +{ + const u8 *data; + u8 opt_len, opts_len, kind; + int j; + sack_block_t b; + + opts_len = (tcp_doff (th) << 2) - sizeof (tcp_header_t); + data = (const u8 *) (th + 1); + + /* Zero out all flags but those set in SYN */ + to->flags &= (TCP_OPTS_FLAG_SACK_PERMITTED | TCP_OPTS_FLAG_WSCALE); + + for (; opts_len > 0; opts_len -= opt_len, data += opt_len) + { + kind = data[0]; + + /* Get options length */ + if (kind == TCP_OPTION_EOL) + break; + else if (kind == TCP_OPTION_NOOP) + opt_len = 1; + else + { + /* broken options */ + if (opts_len < 2) + break; + opt_len = data[1]; + + /* weird option length */ + if (opt_len < 2 || opt_len > opts_len) + break; + } + + /* Parse options */ + switch (kind) + { + case TCP_OPTION_MSS: + if ((opt_len == TCP_OPTION_LEN_MSS) && tcp_syn (th)) + { + to->flags |= TCP_OPTS_FLAG_MSS; + to->mss = clib_net_to_host_u16 (*(u16 *) (data + 2)); + } + break; + case TCP_OPTION_WINDOW_SCALE: + if ((opt_len == TCP_OPTION_LEN_WINDOW_SCALE) && tcp_syn (th)) + { + to->flags |= TCP_OPTS_FLAG_WSCALE; + to->wscale = data[2]; + if (to->wscale > TCP_MAX_WND_SCALE) + { + clib_warning ("Illegal window scaling value: %d", + to->wscale); + to->wscale = TCP_MAX_WND_SCALE; + } + } + break; + case TCP_OPTION_TIMESTAMP: + if (opt_len == TCP_OPTION_LEN_TIMESTAMP) + { + to->flags |= TCP_OPTS_FLAG_TSTAMP; + to->tsval = clib_net_to_host_u32 (*(u32 *) (data + 2)); + to->tsecr = clib_net_to_host_u32 (*(u32 *) (data + 6)); + } + break; + case TCP_OPTION_SACK_PERMITTED: + if (opt_len == TCP_OPTION_LEN_SACK_PERMITTED && tcp_syn (th)) + to->flags |= TCP_OPTS_FLAG_SACK_PERMITTED; + break; + case TCP_OPTION_SACK_BLOCK: + /* If SACK permitted was not advertised or a SYN, break */ + if ((to->flags & TCP_OPTS_FLAG_SACK_PERMITTED) == 0 || tcp_syn (th)) + break; + + /* If too short or not correctly formatted, break */ + if (opt_len < 10 || ((opt_len - 2) % TCP_OPTION_LEN_SACK_BLOCK)) + break; + + to->flags |= TCP_OPTS_FLAG_SACK; + to->n_sack_blocks = (opt_len - 2) / TCP_OPTION_LEN_SACK_BLOCK; + vec_reset_length (to->sacks); + for (j = 0; j < to->n_sack_blocks; j++) + { + b.start = clib_net_to_host_u32 (*(u32 *) (data + 2 + 4 * j)); + b.end = clib_net_to_host_u32 (*(u32 *) (data + 6 + 4 * j)); + vec_add1 (to->sacks, b); + } + break; + default: + /* Nothing to see here */ + continue; + } + } +} + +always_inline int +tcp_segment_check_paws (tcp_connection_t * tc) +{ + /* XXX normally test for timestamp should be lt instead of leq, but for + * local testing this is not enough */ + return tcp_opts_tstamp (&tc->opt) && tc->tsval_recent + && timestamp_lt (tc->opt.tsval, tc->tsval_recent); +} + +/** + * Validate incoming segment as per RFC793 p. 69 and RFC1323 p. 19 + * + * It first verifies if segment has a wrapped sequence number (PAWS) and then + * does the processing associated to the first four steps (ignoring security + * and precedence): sequence number, rst bit and syn bit checks. + * + * @return 0 if segments passes validation. + */ +static int +tcp_segment_validate (vlib_main_t * vm, tcp_connection_t * tc0, + vlib_buffer_t * b0, tcp_header_t * th0, u32 * next0) +{ + u8 paws_failed; + + if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0))) + return -1; + + tcp_options_parse (th0, &tc0->opt); + + /* RFC1323: Check against wrapped sequence numbers (PAWS). If we have + * timestamp to echo and it's less than tsval_recent, drop segment + * but still send an ACK in order to retain TCP's mechanism for detecting + * and recovering from half-open connections */ + paws_failed = tcp_segment_check_paws (tc0); + if (paws_failed) + { + clib_warning ("paws failed"); + + /* If it just so happens that a segment updates tsval_recent for a + * segment over 24 days old, invalidate tsval_recent. */ + if (timestamp_lt (tc0->tsval_recent_age + TCP_PAWS_IDLE, + tcp_time_now ())) + { + /* Age isn't reset until we get a valid tsval (bsd inspired) */ + tc0->tsval_recent = 0; + } + else + { + /* Drop after ack if not rst */ + if (!tcp_rst (th0)) + { + tcp_make_ack (tc0, b0); + *next0 = tcp_next_output (tc0->c_is_ip4); + return -1; + } + } + } + + /* 1st: check sequence number */ + if (!tcp_segment_in_rcv_wnd (tc0, vnet_buffer (b0)->tcp.seq_number, + vnet_buffer (b0)->tcp.seq_end)) + { + if (!tcp_rst (th0)) + { + /* Send dup ack */ + tcp_make_ack (tc0, b0); + *next0 = tcp_next_output (tc0->c_is_ip4); + } + return -1; + } + + /* 2nd: check the RST bit */ + if (tcp_rst (th0)) + { + /* Notify session that connection has been reset. Switch + * state to closed and await for session to do the cleanup. */ + stream_session_reset_notify (&tc0->connection); + tc0->state = TCP_STATE_CLOSED; + return -1; + } + + /* 3rd: check security and precedence (skip) */ + + /* 4th: check the SYN bit */ + if (tcp_syn (th0)) + { + tcp_send_reset (b0, tc0->c_is_ip4); + return -1; + } + + /* If PAWS passed and segment in window, save timestamp */ + if (!paws_failed) + { + tc0->tsval_recent = tc0->opt.tsval; + tc0->tsval_recent_age = tcp_time_now (); + } + + return 0; +} + +always_inline int +tcp_rcv_ack_is_acceptable (tcp_connection_t * tc0, vlib_buffer_t * tb0) +{ + /* SND.UNA =< SEG.ACK =< SND.NXT */ + return (seq_leq (tc0->snd_una, vnet_buffer (tb0)->tcp.ack_number) + && seq_leq (vnet_buffer (tb0)->tcp.ack_number, tc0->snd_nxt)); +} + +/** + * Compute smoothed RTT as per VJ's '88 SIGCOMM and RFC6298 + * + * Note that although the original article, srtt and rttvar are scaled + * to minimize round-off errors, here we don't. Instead, we rely on + * better precision time measurements. + * + * TODO support us rtt resolution + */ +static void +tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt) +{ + int err; + + if (tc->srtt != 0) + { + err = mrtt - tc->srtt; + tc->srtt += err >> 3; + + /* XXX Drop in RTT results in RTTVAR increase and bigger RTO. + * The increase should be bound */ + tc->rttvar += (clib_abs (err) - tc->rttvar) >> 2; + } + else + { + /* First measurement. */ + tc->srtt = mrtt; + tc->rttvar = mrtt << 1; + } +} + +/** Update RTT estimate and RTO timer + * + * Measure RTT: We have two sources of RTT measurements: TSOPT and ACK + * timing. Middle boxes are known to fiddle with TCP options so we + * should give higher priority to ACK timing. + * + * return 1 if valid rtt 0 otherwise + */ +static int +tcp_update_rtt (tcp_connection_t * tc, u32 ack) +{ + u32 mrtt = 0; + + /* Karn's rule, part 1. Don't use retransmitted segments to estimate + * RTT because they're ambiguous. */ + if (tc->rtt_seq && seq_gt (ack, tc->rtt_seq) && !tc->rto_boff) + { + mrtt = tcp_time_now () - tc->rtt_ts; + tc->rtt_seq = 0; + } + + /* As per RFC7323 TSecr can be used for RTTM only if the segment advances + * snd_una, i.e., the left side of the send window: + * seq_lt (tc->snd_una, ack). Note: last condition could be dropped, we don't + * try to update rtt for dupacks */ + else if (tcp_opts_tstamp (&tc->opt) && tc->opt.tsecr && tc->bytes_acked) + { + mrtt = tcp_time_now () - tc->opt.tsecr; + } + + /* Ignore dubious measurements */ + if (mrtt == 0 || mrtt > TCP_RTT_MAX) + return 0; + + tcp_estimate_rtt (tc, mrtt); + + tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX); + + return 1; +} + +/** + * Dequeue bytes that have been acked and while at it update RTT estimates. + */ +static void +tcp_dequeue_acked (tcp_connection_t * tc, u32 ack) +{ + /* Dequeue the newly ACKed bytes */ + stream_session_dequeue_drop (&tc->connection, tc->bytes_acked); + + /* Update rtt and rto */ + if (tcp_update_rtt (tc, ack)) + { + /* Good ACK received and valid RTT, make sure retransmit backoff is 0 */ + tc->rto_boff = 0; + } +} + +/** Check if dupack as per RFC5681 Sec. 2 */ +always_inline u8 +tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 new_snd_wnd) +{ + return ((vnet_buffer (b)->tcp.ack_number == tc->snd_una) + && seq_gt (tc->snd_una_max, tc->snd_una) + && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number) + && (new_snd_wnd == tc->snd_wnd)); +} + +void +scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole) +{ + sack_scoreboard_hole_t *next, *prev; + + if (hole->next != TCP_INVALID_SACK_HOLE_INDEX) + { + next = pool_elt_at_index (sb->holes, hole->next); + next->prev = hole->prev; + } + + if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX) + { + prev = pool_elt_at_index (sb->holes, hole->prev); + prev->next = hole->next; + } + else + { + sb->head = hole->next; + } + + pool_put (sb->holes, hole); +} + +sack_scoreboard_hole_t * +scoreboard_insert_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * prev, + u32 start, u32 end) +{ + sack_scoreboard_hole_t *hole, *next; + u32 hole_index; + + pool_get (sb->holes, hole); + memset (hole, 0, sizeof (*hole)); + + hole->start = start; + hole->end = end; + hole_index = hole - sb->holes; + + if (prev) + { + hole->prev = prev - sb->holes; + hole->next = prev->next; + + if ((next = scoreboard_next_hole (sb, hole))) + next->prev = hole_index; + + prev->next = hole_index; + } + else + { + sb->head = hole_index; + hole->prev = TCP_INVALID_SACK_HOLE_INDEX; + hole->next = TCP_INVALID_SACK_HOLE_INDEX; + } + + return hole; +} + +static void +tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) +{ + sack_scoreboard_t *sb = &tc->sack_sb; + sack_block_t *blk, tmp; + sack_scoreboard_hole_t *hole, *next_hole; + u32 blk_index = 0; + int i, j; + + if (!tcp_opts_sack (tc) && sb->head == TCP_INVALID_SACK_HOLE_INDEX) + return; + + /* Remove invalid blocks */ + vec_foreach (blk, tc->opt.sacks) + { + if (seq_lt (blk->start, blk->end) + && seq_gt (blk->start, tc->snd_una) + && seq_gt (blk->start, ack) && seq_lt (blk->end, tc->snd_nxt)) + continue; + + vec_del1 (tc->opt.sacks, blk - tc->opt.sacks); + } + + /* Add block for cumulative ack */ + if (seq_gt (ack, tc->snd_una)) + { + tmp.start = tc->snd_una; + tmp.end = ack; + vec_add1 (tc->opt.sacks, tmp); + } + + if (vec_len (tc->opt.sacks) == 0) + return; + + /* Make sure blocks are ordered */ + for (i = 0; i < vec_len (tc->opt.sacks); i++) + for (j = i; j < vec_len (tc->opt.sacks); j++) + if (seq_lt (tc->opt.sacks[j].start, tc->opt.sacks[i].start)) + { + tmp = tc->opt.sacks[i]; + tc->opt.sacks[i] = tc->opt.sacks[j]; + tc->opt.sacks[j] = tmp; + } + + /* If no holes, insert the first that covers all outstanding bytes */ + if (sb->head == TCP_INVALID_SACK_HOLE_INDEX) + { + scoreboard_insert_hole (sb, 0, tc->snd_una, tc->snd_una_max); + } + + /* Walk the holes with the SACK blocks */ + hole = pool_elt_at_index (sb->holes, sb->head); + while (hole && blk_index < vec_len (tc->opt.sacks)) + { + blk = &tc->opt.sacks[blk_index]; + + if (seq_leq (blk->start, hole->start)) + { + /* Block covers hole. Remove hole */ + if (seq_geq (blk->end, hole->end)) + { + next_hole = scoreboard_next_hole (sb, hole); + + /* Byte accounting */ + if (seq_lt (hole->end, ack)) + { + /* Bytes lost because snd wnd left edge advances */ + if (seq_lt (next_hole->start, ack)) + sb->sacked_bytes -= next_hole->start - hole->end; + else + sb->sacked_bytes -= ack - hole->end; + } + else + { + sb->sacked_bytes += scoreboard_hole_bytes (hole); + } + + scoreboard_remove_hole (sb, hole); + hole = next_hole; + } + /* Partial overlap */ + else + { + sb->sacked_bytes += blk->end - hole->start; + hole->start = blk->end; + blk_index++; + } + } + else + { + /* Hole must be split */ + if (seq_leq (blk->end, hole->end)) + { + sb->sacked_bytes += blk->end - blk->start; + scoreboard_insert_hole (sb, hole, blk->end, hole->end); + hole->end = blk->start - 1; + blk_index++; + } + else + { + sb->sacked_bytes += hole->end - blk->start + 1; + hole->end = blk->start - 1; + hole = scoreboard_next_hole (sb, hole); + } + } + } +} + +/** Update snd_wnd + * + * If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set + * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */ +static void +tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd) +{ + if (tc->snd_wl1 < seq || (tc->snd_wl1 == seq && tc->snd_wl2 <= ack)) + { + tc->snd_wnd = snd_wnd; + tc->snd_wl1 = seq; + tc->snd_wl2 = ack; + } +} + +static void +tcp_cc_congestion (tcp_connection_t * tc) +{ + tc->cc_algo->congestion (tc); +} + +static void +tcp_cc_recover (tcp_connection_t * tc) +{ + if (tcp_in_fastrecovery (tc)) + { + tc->cc_algo->recovered (tc); + tcp_recovery_off (tc); + } + else if (tcp_in_recovery (tc)) + { + tcp_recovery_off (tc); + tc->cwnd = tcp_loss_wnd (tc); + } +} + +static void +tcp_cc_rcv_ack (tcp_connection_t * tc) +{ + u8 partial_ack; + + if (tcp_in_recovery (tc)) + { + partial_ack = seq_lt (tc->snd_una, tc->snd_una_max); + if (!partial_ack) + { + /* Clear retransmitted bytes. */ + tc->rtx_bytes = 0; + tcp_cc_recover (tc); + } + else + { + /* Clear retransmitted bytes. XXX should we clear all? */ + tc->rtx_bytes = 0; + tc->cc_algo->rcv_cong_ack (tc, TCP_CC_PARTIALACK); + + /* Retransmit first unacked segment */ + tcp_retransmit_first_unacked (tc); + } + } + else + { + tc->cc_algo->rcv_ack (tc); + } + + tc->rcv_dupacks = 0; + tc->tsecr_last_ack = tc->opt.tsecr; +} + +static void +tcp_cc_rcv_dupack (tcp_connection_t * tc, u32 ack) +{ + ASSERT (tc->snd_una == ack); + + tc->rcv_dupacks++; + if (tc->rcv_dupacks == TCP_DUPACK_THRESHOLD) + { + /* RFC6582 NewReno heuristic to avoid multiple fast retransmits */ + if (tc->opt.tsecr != tc->tsecr_last_ack) + { + tc->rcv_dupacks = 0; + return; + } + + tcp_fastrecovery_on (tc); + + /* Handle congestion and dupack */ + tcp_cc_congestion (tc); + tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK); + + tcp_fast_retransmit (tc); + + /* Post retransmit update cwnd to ssthresh and account for the + * three segments that have left the network and should've been + * buffered at the receiver */ + tc->cwnd = tc->ssthresh + TCP_DUPACK_THRESHOLD * tc->snd_mss; + } + else if (tc->rcv_dupacks > TCP_DUPACK_THRESHOLD) + { + ASSERT (tcp_in_fastrecovery (tc)); + + tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK); + } +} + +void +tcp_cc_init (tcp_connection_t * tc) +{ + tc->cc_algo = tcp_cc_algo_get (TCP_CC_NEWRENO); + tc->cc_algo->init (tc); +} + +static int +tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b, + tcp_header_t * th, u32 * next, u32 * error) +{ + u32 new_snd_wnd; + + /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) then send an + * ACK, drop the segment, and return */ + if (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)) + { + tcp_make_ack (tc, b); + *next = tcp_next_output (tc->c_is_ip4); + *error = TCP_ERROR_ACK_INVALID; + return -1; + } + + /* If old ACK, discard */ + if (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una)) + { + *error = TCP_ERROR_ACK_OLD; + return -1; + } + + if (tcp_opts_sack_permitted (&tc->opt)) + tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number); + + new_snd_wnd = clib_net_to_host_u32 (th->window) << tc->snd_wscale; + + if (tcp_ack_is_dupack (tc, b, new_snd_wnd)) + { + tcp_cc_rcv_dupack (tc, vnet_buffer (b)->tcp.ack_number); + *error = TCP_ERROR_ACK_DUP; + return -1; + } + + /* Valid ACK */ + tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una; + tc->snd_una = vnet_buffer (b)->tcp.ack_number; + + /* Dequeue ACKed packet and update RTT */ + tcp_dequeue_acked (tc, vnet_buffer (b)->tcp.ack_number); + + tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number, + vnet_buffer (b)->tcp.ack_number, new_snd_wnd); + + /* Updates congestion control (slow start/congestion avoidance) */ + tcp_cc_rcv_ack (tc); + + /* If everything has been acked, stop retransmit timer + * otherwise update */ + if (tc->snd_una == tc->snd_una_max) + tcp_timer_reset (tc, TCP_TIMER_RETRANSMIT); + else + tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, tc->rto); + + return 0; +} + +/** + * Build SACK list as per RFC2018. + * + * Makes sure the first block contains the segment that generated the current + * ACK and the following ones are the ones most recently reported in SACK + * blocks. + * + * @param tc TCP connection for which the SACK list is updated + * @param start Start sequence number of the newest SACK block + * @param end End sequence of the newest SACK block + */ +static void +tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end) +{ + sack_block_t *new_list = 0, block; + u32 n_elts; + int i; + u8 new_head = 0; + + /* If the first segment is ooo add it to the list. Last write might've moved + * rcv_nxt over the first segment. */ + if (seq_lt (tc->rcv_nxt, start)) + { + block.start = start; + block.end = end; + vec_add1 (new_list, block); + new_head = 1; + } + + /* Find the blocks still worth keeping. */ + for (i = 0; i < vec_len (tc->snd_sacks); i++) + { + /* Discard if: + * 1) rcv_nxt advanced beyond current block OR + * 2) Segment overlapped by the first segment, i.e., it has been merged + * into it.*/ + if (seq_leq (tc->snd_sacks[i].start, tc->rcv_nxt) + || seq_leq (tc->snd_sacks[i].start, end)) + continue; + + /* Save subsequent segments to new SACK list. */ + n_elts = clib_min (vec_len (tc->snd_sacks) - i, + TCP_MAX_SACK_BLOCKS - new_head); + vec_insert_elts (new_list, &tc->snd_sacks[i], n_elts, new_head); + break; + } + + /* Replace old vector with new one */ + vec_free (tc->snd_sacks); + tc->snd_sacks = new_list; +} + +/** Enqueue data for delivery to application */ +always_inline u32 +tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b, + u16 data_len) +{ + int written; + + /* Pure ACK. Update rcv_nxt and be done. */ + if (PREDICT_FALSE (data_len == 0)) + { + tc->rcv_nxt = vnet_buffer (b)->tcp.seq_end; + return TCP_ERROR_PURE_ACK; + } + + written = stream_session_enqueue_data (&tc->connection, + vlib_buffer_get_current (b), + data_len, 1 /* queue event */ ); + + /* Update rcv_nxt */ + if (PREDICT_TRUE (written == data_len)) + { + tc->rcv_nxt = vnet_buffer (b)->tcp.seq_end; + } + /* If more data written than expected, account for out-of-order bytes. */ + else if (written > data_len) + { + tc->rcv_nxt = vnet_buffer (b)->tcp.seq_end + written - data_len; + + /* Send ACK confirming the update */ + tc->flags |= TCP_CONN_SNDACK; + + /* Update SACK list if need be */ + if (tcp_opts_sack_permitted (&tc->opt)) + { + /* Remove SACK blocks that have been delivered */ + tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt); + } + } + else + { + ASSERT (0); + return TCP_ERROR_FIFO_FULL; + } + + return TCP_ERROR_ENQUEUED; +} + +/** Enqueue out-of-order data */ +always_inline u32 +tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b, + u16 data_len) +{ + stream_session_t *s0; + u32 offset, seq; + + s0 = stream_session_get (tc->c_s_index, tc->c_thread_index); + seq = vnet_buffer (b)->tcp.seq_number; + offset = seq - tc->rcv_nxt; + + if (svm_fifo_enqueue_with_offset (s0->server_rx_fifo, s0->pid, offset, + data_len, vlib_buffer_get_current (b))) + return TCP_ERROR_FIFO_FULL; + + /* Update SACK list if in use */ + if (tcp_opts_sack_permitted (&tc->opt)) + { + ooo_segment_t *newest; + u32 start, end; + + /* Get the newest segment from the fifo */ + newest = svm_fifo_newest_ooo_segment (s0->server_rx_fifo); + start = tc->rcv_nxt + ooo_segment_offset (s0->server_rx_fifo, newest); + end = tc->rcv_nxt + ooo_segment_end_offset (s0->server_rx_fifo, newest); + + tcp_update_sack_list (tc, start, end); + } + + return TCP_ERROR_ENQUEUED; +} + +/** + * Check if ACK could be delayed. DELACK timer is set only after frame is + * processed so this can return true for a full bursts of packets. + */ +always_inline int +tcp_can_delack (tcp_connection_t * tc) +{ + /* If there's no DELACK timer set and the last window sent wasn't 0 we + * can safely delay. */ + if (!tcp_timer_is_active (tc, TCP_TIMER_DELACK) + && (tc->flags & TCP_CONN_SENT_RCV_WND0) == 0 + && (tc->flags & TCP_CONN_SNDACK) == 0) + return 1; + + return 0; +} + +static int +tcp_segment_rcv (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b, + u16 n_data_bytes, u32 * next0) +{ + u32 error = 0; + + /* Handle out-of-order data */ + if (PREDICT_FALSE (vnet_buffer (b)->tcp.seq_number != tc->rcv_nxt)) + { + error = tcp_session_enqueue_ooo (tc, b, n_data_bytes); + + /* Don't send more than 3 dupacks per burst + * XXX decide if this is good */ + if (tc->snt_dupacks < 3) + { + /* RFC2581: Send DUPACK for fast retransmit */ + tcp_make_ack (tc, b); + *next0 = tcp_next_output (tc->c_is_ip4); + + /* Mark as DUPACK. We may filter these in output if + * the burst fills the holes. */ + vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_DUPACK; + + tc->snt_dupacks++; + } + + goto done; + } + + /* In order data, enqueue. Fifo figures out by itself if any out-of-order + * segments can be enqueued after fifo tail offset changes. */ + error = tcp_session_enqueue_data (tc, b, n_data_bytes); + + /* Check if ACK can be delayed */ + if (tcp_can_delack (tc)) + { + /* Nothing to do for pure ACKs */ + if (n_data_bytes == 0) + goto done; + + /* If connection has not been previously marked for delay ack + * add it to the list and flag it */ + if (!tc->flags & TCP_CONN_DELACK) + { + vec_add1 (tm->delack_connections[tc->c_thread_index], + tc->c_c_index); + tc->flags |= TCP_CONN_DELACK; + } + } + else + { + /* Check if a packet has already been enqueued to output for burst. + * If yes, then drop this one, otherwise, let it pass through to + * output */ + if ((tc->flags & TCP_CONN_BURSTACK) == 0) + { + *next0 = tcp_next_output (tc->c_is_ip4); + tcp_make_ack (tc, b); + error = TCP_ERROR_ENQUEUED; + + /* TODO: maybe add counter to ensure N acks will be sent/burst */ + tc->flags |= TCP_CONN_BURSTACK; + } + } + +done: + return error; +} + +void +delack_timers_init (tcp_main_t * tm, u32 thread_index) +{ + tcp_connection_t *tc; + u32 i, *conns; + tw_timer_wheel_16t_2w_512sl_t *tw; + + tw = &tm->timer_wheels[thread_index]; + conns = tm->delack_connections[thread_index]; + for (i = 0; i < vec_len (conns); i++) + { + tc = pool_elt_at_index (tm->connections[thread_index], conns[i]); + ASSERT (0 != tc); + + tc->timers[TCP_TIMER_DELACK] + = tw_timer_start_16t_2w_512sl (tw, conns[i], + TCP_TIMER_DELACK, TCP_DELACK_TIME); + } + vec_reset_length (tm->delack_connections[thread_index]); +} + +always_inline uword +tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame, int is_ip4) +{ + u32 n_left_from, next_index, *from, *to_next; + u32 my_thread_index = vm->cpu_index, errors = 0; + tcp_main_t *tm = vnet_get_tcp_main (); + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + tcp_header_t *th0 = 0; + tcp_connection_t *tc0; + ip4_header_t *ip40; + ip6_header_t *ip60; + u32 n_advance_bytes0, n_data_bytes0; + u32 next0 = TCP_ESTABLISHED_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index, + my_thread_index); + + /* Checksum computed by ipx_local no need to compute again */ + + if (is_ip4) + { + ip40 = vlib_buffer_get_current (b0); + th0 = ip4_next_header (ip40); + n_advance_bytes0 = (ip4_header_bytes (ip40) + + tcp_header_bytes (th0)); + n_data_bytes0 = clib_net_to_host_u16 (ip40->length) + - n_advance_bytes0; + } + else + { + ip60 = vlib_buffer_get_current (b0); + th0 = ip6_next_header (ip60); + n_advance_bytes0 = tcp_header_bytes (th0); + n_data_bytes0 = clib_net_to_host_u16 (ip60->payload_length) + - n_advance_bytes0; + n_advance_bytes0 += sizeof (ip60[0]); + } + + /* SYNs, FINs and data consume sequence numbers */ + vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number + + tcp_is_syn (th0) + tcp_is_fin (th0) + n_data_bytes0; + + /* TODO header prediction fast path */ + + /* 1-4: check SEQ, RST, SYN */ + if (PREDICT_FALSE (tcp_segment_validate (vm, tc0, b0, th0, &next0))) + { + error0 = TCP_ERROR_SEGMENT_INVALID; + goto drop; + } + + /* 5: check the ACK field */ + if (tcp_rcv_ack (tc0, b0, th0, &next0, &error0)) + { + goto drop; + } + + /* 6: check the URG bit TODO */ + + /* 7: process the segment text */ + vlib_buffer_advance (b0, n_advance_bytes0); + error0 = tcp_segment_rcv (tm, tc0, b0, n_data_bytes0, &next0); + + /* 8: check the FIN bit */ + if (tcp_fin (th0)) + { + /* Send ACK and enter CLOSE-WAIT */ + tcp_make_ack (tc0, b0); + tcp_connection_force_ack (tc0, b0); + next0 = tcp_next_output (tc0->c_is_ip4); + tc0->state = TCP_STATE_CLOSE_WAIT; + stream_session_disconnect_notify (&tc0->connection); + } + + drop: + b0->error = node->errors[error0]; + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + errors = session_manager_flush_enqueue_events (my_thread_index); + if (errors) + { + if (is_ip4) + vlib_node_increment_counter (vm, tcp4_established_node.index, + TCP_ERROR_EVENT_FIFO_FULL, errors); + else + vlib_node_increment_counter (vm, tcp6_established_node.index, + TCP_ERROR_EVENT_FIFO_FULL, errors); + } + + delack_timers_init (tm, my_thread_index); + + return from_frame->n_vectors; +} + +static uword +tcp4_established (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return tcp46_established_inline (vm, node, from_frame, 1 /* is_ip4 */ ); +} + +static uword +tcp6_established (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return tcp46_established_inline (vm, node, from_frame, 0 /* is_ip4 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (tcp4_established_node) = +{ + .function = tcp4_established, + .name = "tcp4-established", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32), + .n_errors = TCP_N_ERROR,.error_strings = tcp_error_strings, + .n_next_nodes = TCP_ESTABLISHED_N_NEXT, + .next_nodes = + { +#define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n, + foreach_tcp_state_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (tcp4_established_node, tcp4_established); + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (tcp6_established_node) = +{ + .function = tcp6_established, + .name = "tcp6-established", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32), + .n_errors = TCP_N_ERROR, + .error_strings = tcp_error_strings, + .n_next_nodes = TCP_ESTABLISHED_N_NEXT, + .next_nodes = + { +#define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n, + foreach_tcp_state_next +#undef _ + }, +}; +/* *INDENT-ON* */ + + +VLIB_NODE_FUNCTION_MULTIARCH (tcp6_established_node, tcp6_established); + +vlib_node_registration_t tcp4_syn_sent_node; +vlib_node_registration_t tcp6_syn_sent_node; + +always_inline uword +tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame, int is_ip4) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + u32 n_left_from, next_index, *from, *to_next; + u32 my_thread_index = vm->cpu_index, errors = 0; + u8 sst = is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0, ack0, seq0; + vlib_buffer_t *b0; + tcp_header_t *tcp0 = 0; + tcp_connection_t *tc0; + ip4_header_t *ip40; + ip6_header_t *ip60; + u32 n_advance_bytes0, n_data_bytes0; + tcp_connection_t *new_tc0; + u32 next0 = TCP_SYN_SENT_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + tc0 = + tcp_half_open_connection_get (vnet_buffer (b0)-> + tcp.connection_index); + + ack0 = vnet_buffer (b0)->tcp.ack_number; + seq0 = vnet_buffer (b0)->tcp.seq_number; + + /* Checksum computed by ipx_local no need to compute again */ + + if (is_ip4) + { + ip40 = vlib_buffer_get_current (b0); + tcp0 = ip4_next_header (ip40); + n_advance_bytes0 = (ip4_header_bytes (ip40) + + tcp_header_bytes (tcp0)); + n_data_bytes0 = clib_net_to_host_u16 (ip40->length) + - n_advance_bytes0; + } + else + { + ip60 = vlib_buffer_get_current (b0); + tcp0 = ip6_next_header (ip60); + n_advance_bytes0 = tcp_header_bytes (tcp0); + n_data_bytes0 = clib_net_to_host_u16 (ip60->payload_length) + - n_advance_bytes0; + n_advance_bytes0 += sizeof (ip60[0]); + } + + if (PREDICT_FALSE + (!tcp_ack (tcp0) && !tcp_rst (tcp0) && !tcp_syn (tcp0))) + goto drop; + + /* SYNs, FINs and data consume sequence numbers */ + vnet_buffer (b0)->tcp.seq_end = seq0 + tcp_is_syn (tcp0) + + tcp_is_fin (tcp0) + n_data_bytes0; + + /* + * 1. check the ACK bit + */ + + /* + * If the ACK bit is set + * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send a reset (unless + * the RST bit is set, if so drop the segment and return) + * + * and discard the segment. Return. + * If SND.UNA =< SEG.ACK =< SND.NXT then the ACK is acceptable. + */ + if (tcp_ack (tcp0)) + { + if (ack0 <= tc0->iss || ack0 > tc0->snd_nxt) + { + if (!tcp_rst (tcp0)) + tcp_send_reset (b0, is_ip4); + + goto drop; + } + + /* Make sure ACK is valid */ + if (tc0->snd_una > ack0) + goto drop; + } + + /* + * 2. check the RST bit + */ + + if (tcp_rst (tcp0)) + { + /* If ACK is acceptable, signal client that peer is not + * willing to accept connection and drop connection*/ + if (tcp_ack (tcp0)) + { + stream_session_connect_notify (&tc0->connection, sst, + 1 /* fail */ ); + tcp_connection_cleanup (tc0); + } + goto drop; + } + + /* + * 3. check the security and precedence (skipped) + */ + + /* + * 4. check the SYN bit + */ + + /* No SYN flag. Drop. */ + if (!tcp_syn (tcp0)) + goto drop; + + /* Stop connection establishment and retransmit timers */ + tcp_timer_reset (tc0, TCP_TIMER_ESTABLISH); + tcp_timer_reset (tc0, TCP_TIMER_RETRANSMIT_SYN); + + /* Valid SYN or SYN-ACK. Move connection from half-open pool to + * current thread pool. */ + pool_get (tm->connections[my_thread_index], new_tc0); + clib_memcpy (new_tc0, tc0, sizeof (*new_tc0)); + + new_tc0->c_thread_index = my_thread_index; + + /* Cleanup half-open connection XXX lock */ + pool_put (tm->half_open_connections, tc0); + + new_tc0->rcv_nxt = vnet_buffer (b0)->tcp.seq_end; + new_tc0->irs = seq0; + + /* Parse options */ + tcp_options_parse (tcp0, &new_tc0->opt); + tcp_connection_init_vars (new_tc0); + + if (tcp_opts_tstamp (&new_tc0->opt)) + { + new_tc0->tsval_recent = new_tc0->opt.tsval; + new_tc0->tsval_recent_age = tcp_time_now (); + } + + if (tcp_opts_wscale (&new_tc0->opt)) + new_tc0->snd_wscale = new_tc0->opt.wscale; + + new_tc0->snd_wnd = clib_net_to_host_u32 (tcp0->window) + << new_tc0->snd_wscale; + new_tc0->snd_wl1 = seq0; + new_tc0->snd_wl2 = ack0; + + /* SYN-ACK: See if we can switch to ESTABLISHED state */ + if (tcp_ack (tcp0)) + { + /* Our SYN is ACKed: we have iss < ack = snd_una */ + + /* TODO Dequeue acknowledged segments if we support Fast Open */ + new_tc0->snd_una = ack0; + new_tc0->state = TCP_STATE_ESTABLISHED; + + /* Notify app that we have connection */ + stream_session_connect_notify (&new_tc0->connection, sst, 0); + + /* Make sure after data segment processing ACK is sent */ + new_tc0->flags |= TCP_CONN_SNDACK; + } + /* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */ + else + { + new_tc0->state = TCP_STATE_SYN_RCVD; + + /* Notify app that we have connection XXX */ + stream_session_connect_notify (&new_tc0->connection, sst, 0); + + tcp_make_synack (new_tc0, b0); + next0 = tcp_next_output (is_ip4); + + goto drop; + } + + /* Read data, if any */ + if (n_data_bytes0) + { + error0 = + tcp_segment_rcv (tm, new_tc0, b0, n_data_bytes0, &next0); + if (error0 == TCP_ERROR_PURE_ACK) + error0 = TCP_ERROR_SYN_ACKS_RCVD; + } + else + { + tcp_make_ack (new_tc0, b0); + next0 = tcp_next_output (new_tc0->c_is_ip4); + } + + drop: + + b0->error = error0 ? node->errors[error0] : 0; + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + errors = session_manager_flush_enqueue_events (my_thread_index); + if (errors) + { + if (is_ip4) + vlib_node_increment_counter (vm, tcp4_established_node.index, + TCP_ERROR_EVENT_FIFO_FULL, errors); + else + vlib_node_increment_counter (vm, tcp6_established_node.index, + TCP_ERROR_EVENT_FIFO_FULL, errors); + } + + return from_frame->n_vectors; +} + +static uword +tcp4_syn_sent (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return tcp46_syn_sent_inline (vm, node, from_frame, 1 /* is_ip4 */ ); +} + +static uword +tcp6_syn_sent_rcv (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return tcp46_syn_sent_inline (vm, node, from_frame, 0 /* is_ip4 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (tcp4_syn_sent_node) = +{ + .function = tcp4_syn_sent, + .name = "tcp4-syn-sent", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32), + .n_errors = TCP_N_ERROR, + .error_strings = tcp_error_strings, + .n_next_nodes = TCP_SYN_SENT_N_NEXT, + .next_nodes = + { +#define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n, + foreach_tcp_state_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (tcp4_syn_sent_node, tcp4_syn_sent); + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (tcp6_syn_sent_node) = +{ + .function = tcp6_syn_sent_rcv, + .name = "tcp6-syn-sent", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32), + .n_errors = TCP_N_ERROR, + .error_strings = tcp_error_strings, + .n_next_nodes = TCP_SYN_SENT_N_NEXT, + .next_nodes = + { +#define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n, + foreach_tcp_state_next +#undef _ + } +,}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (tcp6_syn_sent_node, tcp6_syn_sent_rcv); +/** + * Handles reception for all states except LISTEN, SYN-SEND and ESTABLISHED + * as per RFC793 p. 64 + */ +always_inline uword +tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame, int is_ip4) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + u32 n_left_from, next_index, *from, *to_next; + u32 my_thread_index = vm->cpu_index, errors = 0; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + tcp_header_t *tcp0 = 0; + tcp_connection_t *tc0; + ip4_header_t *ip40; + ip6_header_t *ip60; + u32 n_advance_bytes0, n_data_bytes0; + u32 next0 = TCP_RCV_PROCESS_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index, + my_thread_index); + + /* Checksum computed by ipx_local no need to compute again */ + + if (is_ip4) + { + ip40 = vlib_buffer_get_current (b0); + tcp0 = ip4_next_header (ip40); + n_advance_bytes0 = (ip4_header_bytes (ip40) + + tcp_header_bytes (tcp0)); + n_data_bytes0 = clib_net_to_host_u16 (ip40->length) + - n_advance_bytes0; + } + else + { + ip60 = vlib_buffer_get_current (b0); + tcp0 = ip6_next_header (ip60); + n_advance_bytes0 = tcp_header_bytes (tcp0); + n_data_bytes0 = clib_net_to_host_u16 (ip60->payload_length) + - n_advance_bytes0; + n_advance_bytes0 += sizeof (ip60[0]); + } + + /* SYNs, FINs and data consume sequence numbers */ + vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number + + tcp_is_syn (tcp0) + tcp_is_fin (tcp0) + n_data_bytes0; + + /* + * Special treatment for CLOSED + */ + switch (tc0->state) + { + case TCP_STATE_CLOSED: + goto drop; + break; + } + + /* + * For all other states (except LISTEN) + */ + + /* 1-4: check SEQ, RST, SYN */ + if (PREDICT_FALSE + (tcp_segment_validate (vm, tc0, b0, tcp0, &next0))) + { + error0 = TCP_ERROR_SEGMENT_INVALID; + goto drop; + } + + /* 5: check the ACK field */ + switch (tc0->state) + { + case TCP_STATE_SYN_RCVD: + /* + * If the segment acknowledgment is not acceptable, form a + * reset segment, + * + * and send it. + */ + if (!tcp_rcv_ack_is_acceptable (tc0, b0)) + { + tcp_send_reset (b0, is_ip4); + goto drop; + } + /* Switch state to ESTABLISHED */ + tc0->state = TCP_STATE_ESTABLISHED; + + /* Initialize session variables */ + tc0->snd_una = vnet_buffer (b0)->tcp.ack_number; + tc0->snd_wnd = clib_net_to_host_u32 (tcp0->window) + << tc0->opt.wscale; + tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number; + tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number; + + /* Shoulder tap the server */ + stream_session_accept_notify (&tc0->connection); + + tcp_timer_reset (tc0, TCP_TIMER_RETRANSMIT_SYN); + break; + case TCP_STATE_ESTABLISHED: + /* We can get packets in established state here because they + * were enqueued before state change */ + if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0)) + goto drop; + + break; + case TCP_STATE_FIN_WAIT_1: + /* In addition to the processing for the ESTABLISHED state, if + * our FIN is now acknowledged then enter FIN-WAIT-2 and + * continue processing in that state. */ + if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0)) + goto drop; + tc0->state = TCP_STATE_FIN_WAIT_2; + /* Stop all timers, 2MSL will be set lower */ + tcp_connection_timers_reset (tc0); + break; + case TCP_STATE_FIN_WAIT_2: + /* In addition to the processing for the ESTABLISHED state, if + * the retransmission queue is empty, the user's CLOSE can be + * acknowledged ("ok") but do not delete the TCB. */ + if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0)) + goto drop; + /* check if rtx queue is empty and ack CLOSE TODO */ + break; + case TCP_STATE_CLOSE_WAIT: + /* Do the same processing as for the ESTABLISHED state. */ + if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0)) + goto drop; + break; + case TCP_STATE_CLOSING: + /* In addition to the processing for the ESTABLISHED state, if + * the ACK acknowledges our FIN then enter the TIME-WAIT state, + * otherwise ignore the segment. */ + if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0)) + goto drop; + + /* XXX test that send queue empty */ + tc0->state = TCP_STATE_TIME_WAIT; + goto drop; + + break; + case TCP_STATE_LAST_ACK: + /* The only thing that can arrive in this state is an + * acknowledgment of our FIN. If our FIN is now acknowledged, + * delete the TCB, enter the CLOSED state, and return. */ + + if (!tcp_rcv_ack_is_acceptable (tc0, b0)) + goto drop; + + tcp_connection_del (tc0); + goto drop; + + break; + case TCP_STATE_TIME_WAIT: + /* The only thing that can arrive in this state is a + * retransmission of the remote FIN. Acknowledge it, and restart + * the 2 MSL timeout. */ + + /* TODO */ + goto drop; + break; + default: + ASSERT (0); + } + + /* 6: check the URG bit TODO */ + + /* 7: process the segment text */ + switch (tc0->state) + { + case TCP_STATE_ESTABLISHED: + case TCP_STATE_FIN_WAIT_1: + case TCP_STATE_FIN_WAIT_2: + error0 = tcp_segment_rcv (tm, tc0, b0, n_data_bytes0, &next0); + break; + case TCP_STATE_CLOSE_WAIT: + case TCP_STATE_CLOSING: + case TCP_STATE_LAST_ACK: + case TCP_STATE_TIME_WAIT: + /* This should not occur, since a FIN has been received from the + * remote side. Ignore the segment text. */ + break; + } + + /* 8: check the FIN bit */ + if (!tcp_fin (tcp0)) + goto drop; + + switch (tc0->state) + { + case TCP_STATE_ESTABLISHED: + case TCP_STATE_SYN_RCVD: + /* Send FIN-ACK notify app and enter CLOSE-WAIT */ + tcp_connection_timers_reset (tc0); + tcp_make_finack (tc0, b0); + next0 = tcp_next_output (tc0->c_is_ip4); + stream_session_disconnect_notify (&tc0->connection); + tc0->state = TCP_STATE_CLOSE_WAIT; + break; + case TCP_STATE_CLOSE_WAIT: + case TCP_STATE_CLOSING: + case TCP_STATE_LAST_ACK: + /* move along .. */ + break; + case TCP_STATE_FIN_WAIT_1: + tc0->state = TCP_STATE_TIME_WAIT; + tcp_connection_timers_reset (tc0); + tcp_timer_set (tc0, TCP_TIMER_2MSL, TCP_2MSL_TIME); + break; + case TCP_STATE_FIN_WAIT_2: + /* Got FIN, send ACK! */ + tc0->state = TCP_STATE_TIME_WAIT; + tcp_timer_set (tc0, TCP_TIMER_2MSL, TCP_2MSL_TIME); + tcp_make_ack (tc0, b0); + next0 = tcp_next_output (is_ip4); + break; + case TCP_STATE_TIME_WAIT: + /* Remain in the TIME-WAIT state. Restart the 2 MSL time-wait + * timeout. + */ + tcp_timer_update (tc0, TCP_TIMER_2MSL, TCP_2MSL_TIME); + break; + } + + b0->error = error0 ? node->errors[error0] : 0; + + drop: + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + errors = session_manager_flush_enqueue_events (my_thread_index); + if (errors) + { + if (is_ip4) + vlib_node_increment_counter (vm, tcp4_established_node.index, + TCP_ERROR_EVENT_FIFO_FULL, errors); + else + vlib_node_increment_counter (vm, tcp6_established_node.index, + TCP_ERROR_EVENT_FIFO_FULL, errors); + } + + return from_frame->n_vectors; +} + +static uword +tcp4_rcv_process (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return tcp46_rcv_process_inline (vm, node, from_frame, 1 /* is_ip4 */ ); +} + +static uword +tcp6_rcv_process (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return tcp46_rcv_process_inline (vm, node, from_frame, 0 /* is_ip4 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (tcp4_rcv_process_node) = +{ + .function = tcp4_rcv_process, + .name = "tcp4-rcv-process", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32), + .n_errors = TCP_N_ERROR, + .error_strings = tcp_error_strings, + .n_next_nodes = TCP_RCV_PROCESS_N_NEXT, + .next_nodes = + { +#define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n, + foreach_tcp_state_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (tcp4_rcv_process_node, tcp4_rcv_process); + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (tcp6_rcv_process_node) = +{ + .function = tcp6_rcv_process, + .name = "tcp6-rcv-process", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32), + .n_errors = TCP_N_ERROR, + .error_strings = tcp_error_strings, + .n_next_nodes = TCP_RCV_PROCESS_N_NEXT, + .next_nodes = + { +#define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n, + foreach_tcp_state_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (tcp6_rcv_process_node, tcp6_rcv_process); + +vlib_node_registration_t tcp4_listen_node; +vlib_node_registration_t tcp6_listen_node; + +/** + * LISTEN state processing as per RFC 793 p. 65 + */ +always_inline uword +tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame, int is_ip4) +{ + u32 n_left_from, next_index, *from, *to_next; + u32 my_thread_index = vm->cpu_index; + tcp_main_t *tm = vnet_get_tcp_main (); + u8 sst = is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + tcp_header_t *th0 = 0; + tcp_connection_t *lc0; + ip4_header_t *ip40; + ip6_header_t *ip60; + tcp_connection_t *child0; + u32 error0 = TCP_ERROR_SYNS_RCVD, next0 = TCP_LISTEN_NEXT_DROP; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + lc0 = tcp_listener_get (vnet_buffer (b0)->tcp.connection_index); + + if (is_ip4) + { + ip40 = vlib_buffer_get_current (b0); + th0 = ip4_next_header (ip40); + } + else + { + ip60 = vlib_buffer_get_current (b0); + th0 = ip6_next_header (ip60); + } + + /* Create child session. For syn-flood protection use filter */ + + /* 1. first check for an RST */ + if (tcp_rst (th0)) + goto drop; + + /* 2. second check for an ACK */ + if (tcp_ack (th0)) + { + tcp_send_reset (b0, is_ip4); + goto drop; + } + + /* 3. check for a SYN (did that already) */ + + /* Create child session and send SYN-ACK */ + pool_get (tm->connections[my_thread_index], child0); + memset (child0, 0, sizeof (*child0)); + + child0->c_c_index = child0 - tm->connections[my_thread_index]; + child0->c_lcl_port = lc0->c_lcl_port; + child0->c_rmt_port = th0->src_port; + child0->c_is_ip4 = is_ip4; + child0->c_thread_index = my_thread_index; + + if (is_ip4) + { + child0->c_lcl_ip4.as_u32 = ip40->dst_address.as_u32; + child0->c_rmt_ip4.as_u32 = ip40->src_address.as_u32; + } + else + { + clib_memcpy (&child0->c_lcl_ip6, &ip60->dst_address, + sizeof (ip6_address_t)); + clib_memcpy (&child0->c_rmt_ip6, &ip60->src_address, + sizeof (ip6_address_t)); + } + + if (stream_session_accept (&child0->connection, lc0->c_s_index, sst, + 0 /* notify */ )) + { + error0 = TCP_ERROR_CREATE_SESSION_FAIL; + goto drop; + } + + tcp_options_parse (th0, &child0->opt); + tcp_connection_init_vars (child0); + + child0->irs = vnet_buffer (b0)->tcp.seq_number; + child0->rcv_nxt = vnet_buffer (b0)->tcp.seq_number + 1; + child0->state = TCP_STATE_SYN_RCVD; + + /* RFC1323: TSval timestamps sent on {SYN} and {SYN,ACK} + * segments are used to initialize PAWS. */ + if (tcp_opts_tstamp (&child0->opt)) + { + child0->tsval_recent = child0->opt.tsval; + child0->tsval_recent_age = tcp_time_now (); + } + + /* Reuse buffer to make syn-ack and send */ + tcp_make_synack (child0, b0); + next0 = tcp_next_output (is_ip4); + + drop: + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + + } + + b0->error = error0 ? node->errors[error0] : 0; + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + return from_frame->n_vectors; +} + +static uword +tcp4_listen (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return tcp46_listen_inline (vm, node, from_frame, 1 /* is_ip4 */ ); +} + +static uword +tcp6_listen (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return tcp46_listen_inline (vm, node, from_frame, 0 /* is_ip4 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (tcp4_listen_node) = +{ + .function = tcp4_listen, + .name = "tcp4-listen", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32), + .n_errors = TCP_N_ERROR, + .error_strings = tcp_error_strings, + .n_next_nodes = TCP_LISTEN_N_NEXT, + .next_nodes = + { +#define _(s,n) [TCP_LISTEN_NEXT_##s] = n, + foreach_tcp_state_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (tcp4_listen_node, tcp4_listen); + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (tcp6_listen_node) = +{ + .function = tcp6_listen, + .name = "tcp6-listen", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32), + .n_errors = TCP_N_ERROR, + .error_strings = tcp_error_strings, + .n_next_nodes = TCP_LISTEN_N_NEXT, + .next_nodes = + { +#define _(s,n) [TCP_LISTEN_NEXT_##s] = n, + foreach_tcp_state_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (tcp6_listen_node, tcp6_listen); + +vlib_node_registration_t tcp4_input_node; +vlib_node_registration_t tcp6_input_node; + +typedef enum _tcp_input_next +{ + TCP_INPUT_NEXT_DROP, + TCP_INPUT_NEXT_LISTEN, + TCP_INPUT_NEXT_RCV_PROCESS, + TCP_INPUT_NEXT_SYN_SENT, + TCP_INPUT_NEXT_ESTABLISHED, + TCP_INPUT_NEXT_RESET, + TCP_INPUT_N_NEXT +} tcp_input_next_t; + +#define foreach_tcp4_input_next \ + _ (DROP, "error-drop") \ + _ (LISTEN, "tcp4-listen") \ + _ (RCV_PROCESS, "tcp4-rcv-process") \ + _ (SYN_SENT, "tcp4-syn-sent") \ + _ (ESTABLISHED, "tcp4-established") \ + _ (RESET, "tcp4-reset") + +#define foreach_tcp6_input_next \ + _ (DROP, "error-drop") \ + _ (LISTEN, "tcp6-listen") \ + _ (RCV_PROCESS, "tcp6-rcv-process") \ + _ (SYN_SENT, "tcp6-syn-sent") \ + _ (ESTABLISHED, "tcp6-established") \ + _ (RESET, "tcp6-reset") + +typedef struct +{ + u16 src_port; + u16 dst_port; + u8 state; +} tcp_rx_trace_t; + +const char *tcp_fsm_states[] = { +#define _(sym, str) str, + foreach_tcp_fsm_state +#undef _ +}; + +u8 * +format_tcp_state (u8 * s, va_list * args) +{ + tcp_state_t *state = va_arg (*args, tcp_state_t *); + + if (state[0] < TCP_N_STATES) + s = format (s, "%s", tcp_fsm_states[state[0]]); + else + s = format (s, "UNKNOWN"); + + return s; +} + +u8 * +format_tcp_rx_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *); + + s = format (s, "TCP: src-port %d dst-port %U%s\n", + clib_net_to_host_u16 (t->src_port), + clib_net_to_host_u16 (t->dst_port), format_tcp_state, t->state); + + return s; +} + +#define filter_flags (TCP_FLAG_SYN|TCP_FLAG_ACK|TCP_FLAG_RST|TCP_FLAG_FIN) + +always_inline uword +tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame, int is_ip4) +{ + u32 n_left_from, next_index, *from, *to_next; + u32 my_thread_index = vm->cpu_index; + tcp_main_t *tm = vnet_get_tcp_main (); + session_manager_main_t *ssm = vnet_get_session_manager_main (); + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + tcp_header_t *tcp0 = 0; + tcp_connection_t *tc0; + ip4_header_t *ip40; + ip6_header_t *ip60; + u32 error0 = TCP_ERROR_NO_LISTENER, next0 = TCP_INPUT_NEXT_DROP; + u8 flags0; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + + if (is_ip4) + { + ip40 = vlib_buffer_get_current (b0); + tcp0 = ip4_next_header (ip40); + + /* lookup session */ + tc0 = + (tcp_connection_t *) stream_session_lookup_transport4 (ssm, + &ip40->dst_address, + &ip40->src_address, + tcp0->dst_port, + tcp0->src_port, + SESSION_TYPE_IP4_TCP, + my_thread_index); + } + else + { + ip60 = vlib_buffer_get_current (b0); + tcp0 = ip6_next_header (ip60); + tc0 = + (tcp_connection_t *) stream_session_lookup_transport6 (ssm, + &ip60->src_address, + &ip60->dst_address, + tcp0->src_port, + tcp0->dst_port, + SESSION_TYPE_IP6_TCP, + my_thread_index); + } + + /* Session exists */ + if (PREDICT_TRUE (0 != tc0)) + { + /* Save connection index */ + vnet_buffer (b0)->tcp.connection_index = tc0->c_c_index; + vnet_buffer (b0)->tcp.seq_number = + clib_net_to_host_u32 (tcp0->seq_number); + vnet_buffer (b0)->tcp.ack_number = + clib_net_to_host_u32 (tcp0->ack_number); + + flags0 = tcp0->flags & filter_flags; + next0 = tm->dispatch_table[tc0->state][flags0].next; + error0 = tm->dispatch_table[tc0->state][flags0].error; + + if (PREDICT_FALSE (error0 == TCP_ERROR_DISPATCH)) + { + /* Overload tcp flags to store state */ + vnet_buffer (b0)->tcp.flags = tc0->state; + } + } + else + { + /* Send reset */ + next0 = TCP_INPUT_NEXT_RESET; + error0 = TCP_ERROR_NO_LISTENER; + vnet_buffer (b0)->tcp.flags = 0; + } + + b0->error = error0 ? node->errors[error0] : 0; + + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + return from_frame->n_vectors; +} + +static uword +tcp4_input (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ); +} + +static uword +tcp6_input (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (tcp4_input_node) = +{ + .function = tcp4_input, + .name = "tcp4-input", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32), + .n_errors = TCP_N_ERROR, + .error_strings = tcp_error_strings, + .n_next_nodes = TCP_INPUT_N_NEXT, + .next_nodes = + { +#define _(s,n) [TCP_INPUT_NEXT_##s] = n, + foreach_tcp4_input_next +#undef _ + }, + .format_buffer = format_tcp_header, + .format_trace = format_tcp_rx_trace, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (tcp4_input_node, tcp4_input); + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (tcp6_input_node) = +{ + .function = tcp6_input, + .name = "tcp6-input", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32), + .n_errors = TCP_N_ERROR, + .error_strings = tcp_error_strings, + .n_next_nodes = TCP_INPUT_N_NEXT, + .next_nodes = + { +#define _(s,n) [TCP_INPUT_NEXT_##s] = n, + foreach_tcp6_input_next +#undef _ + }, + .format_buffer = format_tcp_header, + .format_trace = format_tcp_rx_trace, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (tcp6_input_node, tcp6_input); +void +tcp_update_time (f64 now, u32 thread_index) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + tw_timer_expire_timers_16t_2w_512sl (&tm->timer_wheels[thread_index], now); +} + +static void +tcp_dispatch_table_init (tcp_main_t * tm) +{ + int i, j; + for (i = 0; i < ARRAY_LEN (tm->dispatch_table); i++) + for (j = 0; j < ARRAY_LEN (tm->dispatch_table[i]); j++) + { + tm->dispatch_table[i][j].next = TCP_INPUT_NEXT_DROP; + tm->dispatch_table[i][j].error = TCP_ERROR_DISPATCH; + } + +#define _(t,f,n,e) \ +do { \ + tm->dispatch_table[TCP_STATE_##t][f].next = (n); \ + tm->dispatch_table[TCP_STATE_##t][f].error = (e); \ +} while (0) + + /* SYNs for new connections -> tcp-listen. */ + _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE); + /* ACK for for a SYN-ACK -> tcp-rcv-process. */ + _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + /* SYN-ACK for a SYN */ + _(SYN_SENT, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, + TCP_ERROR_NONE); + _(SYN_SENT, TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE); + _(SYN_SENT, TCP_FLAG_RST, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE); + _(SYN_SENT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, + TCP_ERROR_NONE); + /* ACK for for established connection -> tcp-established. */ + _(ESTABLISHED, TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE); + /* FIN for for established connection -> tcp-established. */ + _(ESTABLISHED, TCP_FLAG_FIN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE); + _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, + TCP_ERROR_NONE); + /* ACK or FIN-ACK to our FIN */ + _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + _(FIN_WAIT_1, TCP_FLAG_ACK | TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, + TCP_ERROR_NONE); + /* FIN in reply to our FIN from the other side */ + _(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + /* FIN confirming that the peer (app) has closed */ + _(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + _(FIN_WAIT_2, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, + TCP_ERROR_NONE); + _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); +#undef _ +} + +clib_error_t * +tcp_input_init (vlib_main_t * vm) +{ + clib_error_t *error = 0; + tcp_main_t *tm = vnet_get_tcp_main (); + + if ((error = vlib_call_init_function (vm, tcp_init))) + return error; + + /* Initialize dispatch table. */ + tcp_dispatch_table_init (tm); + + return error; +} + +VLIB_INIT_FUNCTION (tcp_input_init); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/tcp/tcp_newreno.c b/src/vnet/tcp/tcp_newreno.c new file mode 100644 index 00000000..856dffe4 --- /dev/null +++ b/src/vnet/tcp/tcp_newreno.c @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +void +newreno_congestion (tcp_connection_t * tc) +{ + tc->prev_ssthresh = tc->ssthresh; + tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss); +} + +void +newreno_recovered (tcp_connection_t * tc) +{ + tc->cwnd = tc->ssthresh; +} + +void +newreno_rcv_ack (tcp_connection_t * tc) +{ + if (tcp_in_slowstart (tc)) + { + tc->cwnd += clib_min (tc->snd_mss, tc->bytes_acked); + } + else + { + /* Round up to 1 if needed */ + tc->cwnd += clib_max (tc->snd_mss * tc->snd_mss / tc->cwnd, 1); + } +} + +void +newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type) +{ + if (ack_type == TCP_CC_DUPACK) + { + tc->cwnd += tc->snd_mss; + } + else if (ack_type == TCP_CC_PARTIALACK) + { + tc->cwnd -= tc->bytes_acked; + if (tc->bytes_acked > tc->snd_mss) + tc->bytes_acked += tc->snd_mss; + } +} + +void +newreno_conn_init (tcp_connection_t * tc) +{ + tc->ssthresh = tc->snd_wnd; + tc->cwnd = tcp_initial_cwnd (tc); +} + +const static tcp_cc_algorithm_t tcp_newreno = { + .congestion = newreno_congestion, + .recovered = newreno_recovered, + .rcv_ack = newreno_rcv_ack, + .rcv_cong_ack = newreno_rcv_cong_ack, + .init = newreno_conn_init +}; + +clib_error_t * +newreno_init (vlib_main_t * vm) +{ + clib_error_t *error = 0; + + tcp_cc_algo_register (TCP_CC_NEWRENO, &tcp_newreno); + + return error; +} + +VLIB_INIT_FUNCTION (newreno_init); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c new file mode 100644 index 00000000..dbcf1f74 --- /dev/null +++ b/src/vnet/tcp/tcp_output.c @@ -0,0 +1,1412 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +vlib_node_registration_t tcp4_output_node; +vlib_node_registration_t tcp6_output_node; + +typedef enum _tcp_output_nect +{ + TCP_OUTPUT_NEXT_DROP, + TCP_OUTPUT_NEXT_IP_LOOKUP, + TCP_OUTPUT_N_NEXT +} tcp_output_next_t; + +#define foreach_tcp4_output_next \ + _ (DROP, "error-drop") \ + _ (IP_LOOKUP, "ip4-lookup") + +#define foreach_tcp6_output_next \ + _ (DROP, "error-drop") \ + _ (IP_LOOKUP, "ip6-lookup") + +static char *tcp_error_strings[] = { +#define tcp_error(n,s) s, +#include +#undef tcp_error +}; + +typedef struct +{ + u16 src_port; + u16 dst_port; + u8 state; +} tcp_tx_trace_t; + +u16 dummy_mtu = 400; + +u8 * +format_tcp_tx_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + + s = format (s, "TBD\n"); + + return s; +} + +void +tcp_set_snd_mss (tcp_connection_t * tc) +{ + u16 snd_mss; + + /* TODO find our iface MTU */ + snd_mss = dummy_mtu; + + /* TODO cache mss and consider PMTU discovery */ + snd_mss = tc->opt.mss < snd_mss ? tc->opt.mss : snd_mss; + + tc->snd_mss = snd_mss; + + if (tc->snd_mss == 0) + { + clib_warning ("snd mss is 0"); + tc->snd_mss = dummy_mtu; + } +} + +static u8 +tcp_window_compute_scale (u32 available_space) +{ + u8 wnd_scale = 0; + while (wnd_scale < TCP_MAX_WND_SCALE + && (available_space >> wnd_scale) > TCP_WND_MAX) + wnd_scale++; + return wnd_scale; +} + +/** + * Compute initial window and scale factor. As per RFC1323, window field in + * SYN and SYN-ACK segments is never scaled. + */ +u32 +tcp_initial_window_to_advertise (tcp_connection_t * tc) +{ + u32 available_space; + + /* Initial wnd for SYN. Fifos are not allocated yet. + * Use some predefined value */ + if (tc->state != TCP_STATE_SYN_RCVD) + { + return TCP_DEFAULT_RX_FIFO_SIZE; + } + + available_space = stream_session_max_enqueue (&tc->connection); + tc->rcv_wscale = tcp_window_compute_scale (available_space); + tc->rcv_wnd = clib_min (available_space, TCP_WND_MAX << tc->rcv_wscale); + + return clib_min (tc->rcv_wnd, TCP_WND_MAX); +} + +/** + * Compute and return window to advertise, scaled as per RFC1323 + */ +u32 +tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state) +{ + u32 available_space, wnd, scaled_space; + + if (state != TCP_STATE_ESTABLISHED) + return tcp_initial_window_to_advertise (tc); + + available_space = stream_session_max_enqueue (&tc->connection); + scaled_space = available_space >> tc->rcv_wscale; + + /* Need to update scale */ + if (PREDICT_FALSE ((scaled_space == 0 && available_space != 0)) + || (scaled_space >= TCP_WND_MAX)) + tc->rcv_wscale = tcp_window_compute_scale (available_space); + + wnd = clib_min (available_space, TCP_WND_MAX << tc->rcv_wscale); + tc->rcv_wnd = wnd; + + return wnd >> tc->rcv_wscale; +} + +/** + * Write TCP options to segment. + */ +u32 +tcp_options_write (u8 * data, tcp_options_t * opts) +{ + u32 opts_len = 0; + u32 buf, seq_len = 4; + + if (tcp_opts_mss (opts)) + { + *data++ = TCP_OPTION_MSS; + *data++ = TCP_OPTION_LEN_MSS; + buf = clib_host_to_net_u16 (opts->mss); + clib_memcpy (data, &buf, sizeof (opts->mss)); + data += sizeof (opts->mss); + opts_len += TCP_OPTION_LEN_MSS; + } + + if (tcp_opts_wscale (opts)) + { + *data++ = TCP_OPTION_WINDOW_SCALE; + *data++ = TCP_OPTION_LEN_WINDOW_SCALE; + *data++ = opts->wscale; + opts_len += TCP_OPTION_LEN_WINDOW_SCALE; + } + + if (tcp_opts_sack_permitted (opts)) + { + *data++ = TCP_OPTION_SACK_PERMITTED; + *data++ = TCP_OPTION_LEN_SACK_PERMITTED; + opts_len += TCP_OPTION_LEN_SACK_PERMITTED; + } + + if (tcp_opts_tstamp (opts)) + { + *data++ = TCP_OPTION_TIMESTAMP; + *data++ = TCP_OPTION_LEN_TIMESTAMP; + buf = clib_host_to_net_u32 (opts->tsval); + clib_memcpy (data, &buf, sizeof (opts->tsval)); + data += sizeof (opts->tsval); + buf = clib_host_to_net_u32 (opts->tsecr); + clib_memcpy (data, &buf, sizeof (opts->tsecr)); + data += sizeof (opts->tsecr); + opts_len += TCP_OPTION_LEN_TIMESTAMP; + } + + if (tcp_opts_sack (opts)) + { + int i; + u32 n_sack_blocks = clib_min (vec_len (opts->sacks), + TCP_OPTS_MAX_SACK_BLOCKS); + + if (n_sack_blocks != 0) + { + *data++ = TCP_OPTION_SACK_BLOCK; + *data++ = 2 + n_sack_blocks * TCP_OPTION_LEN_SACK_BLOCK; + for (i = 0; i < n_sack_blocks; i++) + { + buf = clib_host_to_net_u32 (opts->sacks[i].start); + clib_memcpy (data, &buf, seq_len); + data += seq_len; + buf = clib_host_to_net_u32 (opts->sacks[i].end); + clib_memcpy (data, &buf, seq_len); + data += seq_len; + } + opts_len += 2 + n_sack_blocks * TCP_OPTION_LEN_SACK_BLOCK; + } + } + + /* Terminate TCP options */ + if (opts_len % 4) + { + *data++ = TCP_OPTION_EOL; + opts_len += TCP_OPTION_LEN_EOL; + } + + /* Pad with zeroes to a u32 boundary */ + while (opts_len % 4) + { + *data++ = TCP_OPTION_NOOP; + opts_len += TCP_OPTION_LEN_NOOP; + } + return opts_len; +} + +always_inline int +tcp_make_syn_options (tcp_options_t * opts, u32 initial_wnd) +{ + u8 len = 0; + + opts->flags |= TCP_OPTS_FLAG_MSS; + opts->mss = dummy_mtu; /*XXX discover that */ + len += TCP_OPTION_LEN_MSS; + + opts->flags |= TCP_OPTS_FLAG_WSCALE; + opts->wscale = tcp_window_compute_scale (initial_wnd); + len += TCP_OPTION_LEN_WINDOW_SCALE; + + opts->flags |= TCP_OPTS_FLAG_TSTAMP; + opts->tsval = tcp_time_now (); + opts->tsecr = 0; + len += TCP_OPTION_LEN_TIMESTAMP; + + opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED; + len += TCP_OPTION_LEN_SACK_PERMITTED; + + /* Align to needed boundary */ + len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN; + return len; +} + +always_inline int +tcp_make_synack_options (tcp_connection_t * tc, tcp_options_t * opts) +{ + u8 len = 0; + + opts->flags |= TCP_OPTS_FLAG_MSS; + opts->mss = dummy_mtu; /*XXX discover that */ + len += TCP_OPTION_LEN_MSS; + + if (tcp_opts_wscale (&tc->opt)) + { + opts->flags |= TCP_OPTS_FLAG_WSCALE; + opts->wscale = tc->rcv_wscale; + len += TCP_OPTION_LEN_WINDOW_SCALE; + } + + if (tcp_opts_tstamp (&tc->opt)) + { + opts->flags |= TCP_OPTS_FLAG_TSTAMP; + opts->tsval = tcp_time_now (); + opts->tsecr = tc->tsval_recent; + len += TCP_OPTION_LEN_TIMESTAMP; + } + + if (tcp_opts_sack_permitted (&tc->opt)) + { + opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED; + len += TCP_OPTION_LEN_SACK_PERMITTED; + } + + /* Align to needed boundary */ + len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN; + return len; +} + +always_inline int +tcp_make_established_options (tcp_connection_t * tc, tcp_options_t * opts) +{ + u8 len = 0; + + opts->flags = 0; + + if (tcp_opts_tstamp (&tc->opt)) + { + opts->flags |= TCP_OPTS_FLAG_TSTAMP; + opts->tsval = tcp_time_now (); + opts->tsecr = tc->tsval_recent; + len += TCP_OPTION_LEN_TIMESTAMP; + } + if (tcp_opts_sack_permitted (&tc->opt)) + { + if (vec_len (tc->snd_sacks)) + { + opts->flags |= TCP_OPTS_FLAG_SACK; + opts->sacks = tc->snd_sacks; + opts->n_sack_blocks = vec_len (tc->snd_sacks); + len += 2 + TCP_OPTION_LEN_SACK_BLOCK * opts->n_sack_blocks; + } + } + + /* Align to needed boundary */ + len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN; + return len; +} + +always_inline int +tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts, + tcp_state_t state) +{ + switch (state) + { + case TCP_STATE_ESTABLISHED: + case TCP_STATE_FIN_WAIT_1: + return tcp_make_established_options (tc, opts); + case TCP_STATE_SYN_RCVD: + return tcp_make_synack_options (tc, opts); + case TCP_STATE_SYN_SENT: + return tcp_make_syn_options (opts, + tcp_initial_window_to_advertise (tc)); + default: + clib_warning ("Not handled!"); + return 0; + } +} + +#define tcp_get_free_buffer_index(tm, bidx) \ +do { \ + u32 *my_tx_buffers, n_free_buffers; \ + u32 cpu_index = tm->vlib_main->cpu_index; \ + my_tx_buffers = tm->tx_buffers[cpu_index]; \ + if (PREDICT_FALSE(vec_len (my_tx_buffers) == 0)) \ + { \ + n_free_buffers = 32; /* TODO config or macro */ \ + vec_validate (my_tx_buffers, n_free_buffers - 1); \ + _vec_len(my_tx_buffers) = vlib_buffer_alloc_from_free_list ( \ + tm->vlib_main, my_tx_buffers, n_free_buffers, \ + VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); \ + tm->tx_buffers[cpu_index] = my_tx_buffers; \ + } \ + /* buffer shortage */ \ + if (PREDICT_FALSE (vec_len (my_tx_buffers) == 0)) \ + return; \ + *bidx = my_tx_buffers[_vec_len (my_tx_buffers)-1]; \ + _vec_len (my_tx_buffers) -= 1; \ +} while (0) + +always_inline void +tcp_reuse_buffer (vlib_main_t * vm, vlib_buffer_t * b) +{ + vlib_buffer_t *it = b; + do + { + it->current_data = 0; + it->current_length = 0; + it->total_length_not_including_first_buffer = 0; + } + while ((it->flags & VLIB_BUFFER_NEXT_PRESENT) + && (it = vlib_get_buffer (vm, it->next_buffer))); + + /* Leave enough space for headers */ + vlib_buffer_make_headroom (b, MAX_HDRS_LEN); +} + +/** + * Prepare ACK + */ +void +tcp_make_ack_i (tcp_connection_t * tc, vlib_buffer_t * b, tcp_state_t state, + u8 flags) +{ + tcp_options_t _snd_opts, *snd_opts = &_snd_opts; + u8 tcp_opts_len, tcp_hdr_opts_len; + tcp_header_t *th; + u16 wnd; + + wnd = tcp_window_to_advertise (tc, state); + + /* Make and write options */ + tcp_opts_len = tcp_make_established_options (tc, snd_opts); + tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t); + + th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt, + tc->rcv_nxt, tcp_hdr_opts_len, flags, wnd); + + tcp_options_write ((u8 *) (th + 1), snd_opts); + + /* Mark as ACK */ + vnet_buffer (b)->tcp.connection_index = tc->c_c_index; +} + +/** + * Convert buffer to ACK + */ +void +tcp_make_ack (tcp_connection_t * tc, vlib_buffer_t * b) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + vlib_main_t *vm = tm->vlib_main; + + tcp_reuse_buffer (vm, b); + tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK); + vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_ACK; +} + +/** + * Convert buffer to FIN-ACK + */ +void +tcp_make_finack (tcp_connection_t * tc, vlib_buffer_t * b) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + vlib_main_t *vm = tm->vlib_main; + + tcp_reuse_buffer (vm, b); + tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK | TCP_FLAG_FIN); + + /* Reset flags, make sure ack is sent */ + tc->flags = TCP_CONN_SNDACK; + vnet_buffer (b)->tcp.flags &= ~TCP_BUF_FLAG_DUPACK; + + tc->snd_nxt += 1; +} + +/** + * Convert buffer to SYN-ACK + */ +void +tcp_make_synack (tcp_connection_t * tc, vlib_buffer_t * b) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + vlib_main_t *vm = tm->vlib_main; + tcp_options_t _snd_opts, *snd_opts = &_snd_opts; + u8 tcp_opts_len, tcp_hdr_opts_len; + tcp_header_t *th; + u16 initial_wnd; + u32 time_now; + + memset (snd_opts, 0, sizeof (*snd_opts)); + + tcp_reuse_buffer (vm, b); + + /* Set random initial sequence */ + time_now = tcp_time_now (); + + tc->iss = random_u32 (&time_now); + tc->snd_una = tc->iss; + tc->snd_nxt = tc->iss + 1; + tc->snd_una_max = tc->snd_nxt; + + initial_wnd = tcp_initial_window_to_advertise (tc); + + /* Make and write options */ + tcp_opts_len = tcp_make_synack_options (tc, snd_opts); + tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t); + + th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss, + tc->rcv_nxt, tcp_hdr_opts_len, + TCP_FLAG_SYN | TCP_FLAG_ACK, initial_wnd); + + tcp_options_write ((u8 *) (th + 1), snd_opts); + + vnet_buffer (b)->tcp.connection_index = tc->c_c_index; + vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_ACK; + + /* Init retransmit timer */ + tcp_retransmit_timer_set (tm, tc); +} + +always_inline void +tcp_enqueue_to_ip_lookup (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, + u8 is_ip4) +{ + u32 *to_next, next_index; + vlib_frame_t *f; + + b->flags |= VNET_BUFFER_LOCALLY_ORIGINATED; + b->error = 0; + + /* Default FIB for now */ + vnet_buffer (b)->sw_if_index[VLIB_TX] = 0; + + /* Send to IP lookup */ + next_index = is_ip4 ? ip4_lookup_node.index : ip6_lookup_node.index; + f = vlib_get_frame_to_node (vm, next_index); + + /* Enqueue the packet */ + to_next = vlib_frame_vector_args (f); + to_next[0] = bi; + f->n_vectors = 1; + vlib_put_frame_to_node (vm, next_index, f); +} + +int +tcp_make_reset_in_place (vlib_main_t * vm, vlib_buffer_t * b0, + tcp_state_t state, u32 my_thread_index, u8 is_ip4) +{ + u8 tcp_hdr_len = sizeof (tcp_header_t); + ip4_header_t *ih4; + ip6_header_t *ih6; + tcp_header_t *th0; + ip4_address_t src_ip40; + ip6_address_t src_ip60; + u16 src_port0; + u32 tmp; + + /* Find IP and TCP headers */ + if (is_ip4) + { + ih4 = vlib_buffer_get_current (b0); + th0 = ip4_next_header (ih4); + } + else + { + ih6 = vlib_buffer_get_current (b0); + th0 = ip6_next_header (ih6); + } + + /* Swap src and dst ip */ + if (is_ip4) + { + ASSERT ((ih4->ip_version_and_header_length & 0xF0) == 0x40); + src_ip40.as_u32 = ih4->src_address.as_u32; + ih4->src_address.as_u32 = ih4->dst_address.as_u32; + ih4->dst_address.as_u32 = src_ip40.as_u32; + + /* Chop the end of the pkt */ + b0->current_length += ip4_header_bytes (ih4) + tcp_hdr_len; + } + else + { + ASSERT ((ih6->ip_version_traffic_class_and_flow_label & 0xF0) == 0x60); + clib_memcpy (&src_ip60, &ih6->src_address, sizeof (ip6_address_t)); + clib_memcpy (&ih6->src_address, &ih6->dst_address, + sizeof (ip6_address_t)); + clib_memcpy (&ih6->dst_address, &src_ip60, sizeof (ip6_address_t)); + + /* Chop the end of the pkt */ + b0->current_length += sizeof (ip6_header_t) + tcp_hdr_len; + } + + /* Try to determine what/why we're actually resetting and swap + * src and dst ports */ + if (state == TCP_STATE_CLOSED) + { + if (!tcp_syn (th0)) + return -1; + + tmp = clib_net_to_host_u32 (th0->seq_number); + + /* Got a SYN for no listener. */ + th0->flags = TCP_FLAG_RST | TCP_FLAG_ACK; + th0->ack_number = clib_host_to_net_u32 (tmp + 1); + th0->seq_number = 0; + + } + else if (state >= TCP_STATE_SYN_SENT) + { + th0->flags = TCP_FLAG_RST | TCP_FLAG_ACK; + th0->seq_number = th0->ack_number; + th0->ack_number = 0; + } + + src_port0 = th0->src_port; + th0->src_port = th0->dst_port; + th0->dst_port = src_port0; + th0->window = 0; + th0->data_offset_and_reserved = (tcp_hdr_len >> 2) << 4; + th0->urgent_pointer = 0; + + /* Compute checksum */ + if (is_ip4) + { + th0->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ih4); + } + else + { + int bogus = ~0; + th0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0, ih6, &bogus); + ASSERT (!bogus); + } + + return 0; +} + +/** + * Send reset without reusing existing buffer + */ +void +tcp_send_reset (vlib_buffer_t * pkt, u8 is_ip4) +{ + vlib_buffer_t *b; + u32 bi; + tcp_main_t *tm = vnet_get_tcp_main (); + vlib_main_t *vm = tm->vlib_main; + u8 tcp_hdr_len, flags = 0; + tcp_header_t *th, *pkt_th; + u32 seq, ack; + ip4_header_t *ih4, *pkt_ih4; + ip6_header_t *ih6, *pkt_ih6; + + tcp_get_free_buffer_index (tm, &bi); + b = vlib_get_buffer (vm, bi); + + /* Leave enough space for headers */ + vlib_buffer_make_headroom (b, MAX_HDRS_LEN); + + /* Make and write options */ + tcp_hdr_len = sizeof (tcp_header_t); + + if (is_ip4) + { + pkt_ih4 = vlib_buffer_get_current (pkt); + pkt_th = ip4_next_header (pkt_ih4); + } + else + { + pkt_ih6 = vlib_buffer_get_current (pkt); + pkt_th = ip6_next_header (pkt_ih6); + } + + if (tcp_ack (pkt_th)) + { + flags = TCP_FLAG_RST; + seq = pkt_th->ack_number; + ack = 0; + } + else + { + flags = TCP_FLAG_RST | TCP_FLAG_ACK; + seq = 0; + ack = clib_host_to_net_u32 (vnet_buffer (pkt)->tcp.seq_end); + } + + th = vlib_buffer_push_tcp_net_order (b, pkt_th->dst_port, pkt_th->src_port, + seq, ack, tcp_hdr_len, flags, 0); + + /* Swap src and dst ip */ + if (is_ip4) + { + ASSERT ((pkt_ih4->ip_version_and_header_length & 0xF0) == 0x40); + ih4 = vlib_buffer_push_ip4 (vm, b, &pkt_ih4->dst_address, + &pkt_ih4->src_address, IP_PROTOCOL_TCP); + th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4); + } + else + { + int bogus = ~0; + pkt_ih6 = (ip6_header_t *) (pkt_th - 1); + ASSERT ((pkt_ih6->ip_version_traffic_class_and_flow_label & 0xF0) == + 0x60); + ih6 = + vlib_buffer_push_ip6 (vm, b, &pkt_ih6->dst_address, + &pkt_ih6->src_address, IP_PROTOCOL_TCP); + th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus); + ASSERT (!bogus); + } + + tcp_enqueue_to_ip_lookup (vm, b, bi, is_ip4); +} + +void +tcp_push_ip_hdr (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b) +{ + tcp_header_t *th = vlib_buffer_get_current (b); + + if (tc->c_is_ip4) + { + ip4_header_t *ih; + ih = vlib_buffer_push_ip4 (tm->vlib_main, b, &tc->c_lcl_ip4, + &tc->c_rmt_ip4, IP_PROTOCOL_TCP); + th->checksum = ip4_tcp_udp_compute_checksum (tm->vlib_main, b, ih); + } + else + { + ip6_header_t *ih; + int bogus = ~0; + + ih = vlib_buffer_push_ip6 (tm->vlib_main, b, &tc->c_lcl_ip6, + &tc->c_rmt_ip6, IP_PROTOCOL_TCP); + th->checksum = ip6_tcp_udp_icmp_compute_checksum (tm->vlib_main, b, ih, + &bogus); + ASSERT (!bogus); + } +} + +/** + * Send SYN + * + * Builds a SYN packet for a half-open connection and sends it to ipx_lookup. + * The packet is not forwarded through tcpx_output to avoid doing lookups + * in the half_open pool. + */ +void +tcp_send_syn (tcp_connection_t * tc) +{ + vlib_buffer_t *b; + u32 bi; + tcp_main_t *tm = vnet_get_tcp_main (); + vlib_main_t *vm = tm->vlib_main; + u8 tcp_hdr_opts_len, tcp_opts_len; + tcp_header_t *th; + u32 time_now; + u16 initial_wnd; + tcp_options_t snd_opts; + + tcp_get_free_buffer_index (tm, &bi); + b = vlib_get_buffer (vm, bi); + + /* Leave enough space for headers */ + vlib_buffer_make_headroom (b, MAX_HDRS_LEN); + + /* Set random initial sequence */ + time_now = tcp_time_now (); + + tc->iss = random_u32 (&time_now); + tc->snd_una = tc->iss; + tc->snd_una_max = tc->snd_nxt = tc->iss + 1; + + initial_wnd = tcp_initial_window_to_advertise (tc); + + /* Make and write options */ + memset (&snd_opts, 0, sizeof (snd_opts)); + tcp_opts_len = tcp_make_syn_options (&snd_opts, initial_wnd); + tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t); + + th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss, + tc->rcv_nxt, tcp_hdr_opts_len, TCP_FLAG_SYN, + initial_wnd); + + tcp_options_write ((u8 *) (th + 1), &snd_opts); + + /* Measure RTT with this */ + tc->rtt_ts = tcp_time_now (); + tc->rtt_seq = tc->snd_nxt; + + /* Start retransmit trimer */ + tcp_timer_set (tc, TCP_TIMER_RETRANSMIT_SYN, tc->rto * TCP_TO_TIMER_TICK); + tc->rto_boff = 0; + + /* Set the connection establishment timer */ + tcp_timer_set (tc, TCP_TIMER_ESTABLISH, TCP_ESTABLISH_TIME); + + tcp_push_ip_hdr (tm, tc, b); + tcp_enqueue_to_ip_lookup (vm, b, bi, tc->c_is_ip4); +} + +always_inline void +tcp_enqueue_to_output (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, u8 is_ip4) +{ + u32 *to_next, next_index; + vlib_frame_t *f; + + b->flags |= VNET_BUFFER_LOCALLY_ORIGINATED; + b->error = 0; + + /* Decide where to send the packet */ + next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index; + f = vlib_get_frame_to_node (vm, next_index); + + /* Enqueue the packet */ + to_next = vlib_frame_vector_args (f); + to_next[0] = bi; + f->n_vectors = 1; + vlib_put_frame_to_node (vm, next_index, f); +} + +/** + * Send FIN + */ +void +tcp_send_fin (tcp_connection_t * tc) +{ + vlib_buffer_t *b; + u32 bi; + tcp_main_t *tm = vnet_get_tcp_main (); + vlib_main_t *vm = tm->vlib_main; + + tcp_get_free_buffer_index (tm, &bi); + b = vlib_get_buffer (vm, bi); + + /* Leave enough space for headers */ + vlib_buffer_make_headroom (b, MAX_HDRS_LEN); + + tcp_make_finack (tc, b); + + tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); +} + +always_inline u8 +tcp_make_state_flags (tcp_state_t next_state) +{ + switch (next_state) + { + case TCP_STATE_ESTABLISHED: + return TCP_FLAG_ACK; + case TCP_STATE_SYN_RCVD: + return TCP_FLAG_SYN | TCP_FLAG_ACK; + case TCP_STATE_SYN_SENT: + return TCP_FLAG_SYN; + case TCP_STATE_LAST_ACK: + case TCP_STATE_FIN_WAIT_1: + return TCP_FLAG_FIN; + default: + clib_warning ("Shouldn't be here!"); + } + return 0; +} + +/** + * Push TCP header and update connection variables + */ +static void +tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, + tcp_state_t next_state) +{ + u32 advertise_wnd, data_len; + u8 tcp_opts_len, tcp_hdr_opts_len, opts_write_len, flags; + tcp_options_t _snd_opts, *snd_opts = &_snd_opts; + tcp_header_t *th; + + data_len = b->current_length; + vnet_buffer (b)->tcp.flags = 0; + + /* Make and write options */ + memset (snd_opts, 0, sizeof (*snd_opts)); + tcp_opts_len = tcp_make_options (tc, snd_opts, next_state); + tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t); + + /* Get rcv window to advertise */ + advertise_wnd = tcp_window_to_advertise (tc, next_state); + flags = tcp_make_state_flags (next_state); + + /* Push header and options */ + th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt, + tc->rcv_nxt, tcp_hdr_opts_len, flags, + advertise_wnd); + + opts_write_len = tcp_options_write ((u8 *) (th + 1), snd_opts); + + ASSERT (opts_write_len == tcp_opts_len); + + /* Tag the buffer with the connection index */ + vnet_buffer (b)->tcp.connection_index = tc->c_c_index; + + tc->snd_nxt += data_len; +} + +/* Send delayed ACK when timer expires */ +void +tcp_timer_delack_handler (u32 index) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + vlib_main_t *vm = tm->vlib_main; + u32 thread_index = os_get_cpu_number (); + tcp_connection_t *tc; + vlib_buffer_t *b; + u32 bi; + + tc = tcp_connection_get (index, thread_index); + + /* Get buffer */ + tcp_get_free_buffer_index (tm, &bi); + b = vlib_get_buffer (vm, bi); + + /* Fill in the ACK */ + tcp_make_ack (tc, b); + + tc->timers[TCP_TIMER_DELACK] = TCP_TIMER_HANDLE_INVALID; + tc->flags &= ~TCP_CONN_DELACK; + + tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); +} + +/** Build a retransmit segment + * + * @return the number of bytes in the segment or 0 if there's nothing to + * retransmit + * */ +u32 +tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b, + u32 max_bytes) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + vlib_main_t *vm = tm->vlib_main; + u32 n_bytes, offset = 0; + sack_scoreboard_hole_t *hole; + u32 hole_size; + + tcp_reuse_buffer (vm, b); + + ASSERT (tc->state == TCP_STATE_ESTABLISHED); + ASSERT (max_bytes != 0); + + if (tcp_opts_sack_permitted (&tc->opt)) + { + /* XXX get first hole not retransmitted yet */ + hole = scoreboard_first_hole (&tc->sack_sb); + if (!hole) + return 0; + + offset = hole->start - tc->snd_una; + hole_size = hole->end - hole->start; + + ASSERT (hole_size); + + if (hole_size < max_bytes) + max_bytes = hole_size; + } + else + { + if (seq_geq (tc->snd_nxt, tc->snd_una_max)) + return 0; + } + + n_bytes = stream_session_peek_bytes (&tc->connection, + vlib_buffer_get_current (b), offset, + max_bytes); + ASSERT (n_bytes != 0); + + tc->snd_nxt += n_bytes; + tcp_push_hdr_i (tc, b, tc->state); + + return n_bytes; +} + +static void +tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + vlib_main_t *vm = tm->vlib_main; + u32 thread_index = os_get_cpu_number (); + tcp_connection_t *tc; + vlib_buffer_t *b; + u32 bi, max_bytes, snd_space; + + if (is_syn) + { + tc = tcp_half_open_connection_get (index); + } + else + { + tc = tcp_connection_get (index, thread_index); + } + + /* Make sure timer handle is set to invalid */ + tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID; + + /* Increment RTO backoff (also equal to number of retries) */ + tc->rto_boff += 1; + + /* Go back to first un-acked byte */ + tc->snd_nxt = tc->snd_una; + + /* Get buffer */ + tcp_get_free_buffer_index (tm, &bi); + b = vlib_get_buffer (vm, bi); + + if (tc->state == TCP_STATE_ESTABLISHED) + { + tcp_fastrecovery_off (tc); + + /* Exponential backoff */ + tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX); + + /* Figure out what and how many bytes we can send */ + snd_space = tcp_available_snd_space (tc); + max_bytes = clib_min (tc->snd_mss, snd_space); + tcp_prepare_retransmit_segment (tc, b, max_bytes); + + tc->rtx_bytes += max_bytes; + + /* No fancy recovery for now! */ + scoreboard_clear (&tc->sack_sb); + } + else + { + /* Retransmit for SYN/SYNACK */ + ASSERT (tc->state == TCP_STATE_SYN_RCVD + || tc->state == TCP_STATE_SYN_SENT); + + /* Try without increasing RTO a number of times. If this fails, + * start growing RTO exponentially */ + if (tc->rto_boff > TCP_RTO_SYN_RETRIES) + tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX); + + vlib_buffer_make_headroom (b, MAX_HDRS_LEN); + tcp_push_hdr_i (tc, b, tc->state); + } + + if (!is_syn) + { + tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); + + /* Re-enable retransmit timer */ + tcp_retransmit_timer_set (tm, tc); + } + else + { + ASSERT (tc->state == TCP_STATE_SYN_SENT); + + /* This goes straight to ipx_lookup */ + tcp_push_ip_hdr (tm, tc, b); + tcp_enqueue_to_ip_lookup (vm, b, bi, tc->c_is_ip4); + + /* Re-enable retransmit timer */ + tcp_timer_set (tc, TCP_TIMER_RETRANSMIT_SYN, + tc->rto * TCP_TO_TIMER_TICK); + } +} + +void +tcp_timer_retransmit_handler (u32 index) +{ + tcp_timer_retransmit_handler_i (index, 0); +} + +void +tcp_timer_retransmit_syn_handler (u32 index) +{ + tcp_timer_retransmit_handler_i (index, 1); +} + +/** + * Retansmit first unacked segment */ +void +tcp_retransmit_first_unacked (tcp_connection_t * tc) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + u32 snd_nxt = tc->snd_nxt; + vlib_buffer_t *b; + u32 bi; + + tc->snd_nxt = tc->snd_una; + + /* Get buffer */ + tcp_get_free_buffer_index (tm, &bi); + b = vlib_get_buffer (tm->vlib_main, bi); + + tcp_prepare_retransmit_segment (tc, b, tc->snd_mss); + tcp_enqueue_to_output (tm->vlib_main, b, bi, tc->c_is_ip4); + + tc->snd_nxt = snd_nxt; + tc->rtx_bytes += tc->snd_mss; +} + +void +tcp_fast_retransmit (tcp_connection_t * tc) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + u32 snd_space, max_bytes, n_bytes, bi; + vlib_buffer_t *b; + + ASSERT (tcp_in_fastrecovery (tc)); + + clib_warning ("fast retransmit!"); + + /* Start resending from first un-acked segment */ + tc->snd_nxt = tc->snd_una; + + snd_space = tcp_available_snd_space (tc); + + while (snd_space) + { + tcp_get_free_buffer_index (tm, &bi); + b = vlib_get_buffer (tm->vlib_main, bi); + + max_bytes = clib_min (tc->snd_mss, snd_space); + n_bytes = tcp_prepare_retransmit_segment (tc, b, max_bytes); + + /* Nothing left to retransmit */ + if (n_bytes == 0) + return; + + tcp_enqueue_to_output (tm->vlib_main, b, bi, tc->c_is_ip4); + + snd_space -= n_bytes; + } + + /* If window allows, send new data */ + tc->snd_nxt = tc->snd_una_max; +} + +always_inline u32 +tcp_session_has_ooo_data (tcp_connection_t * tc) +{ + stream_session_t *s = + stream_session_get (tc->c_s_index, tc->c_thread_index); + return svm_fifo_has_ooo_data (s->server_rx_fifo); +} + +always_inline uword +tcp46_output_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame, int is_ip4) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + u32 n_left_from, next_index, *from, *to_next; + u32 my_thread_index = vm->cpu_index; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + tcp_connection_t *tc0; + tcp_header_t *th0; + u32 error0 = TCP_ERROR_PKTS_SENT, next0 = TCP_OUTPUT_NEXT_IP_LOOKUP; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index, + my_thread_index); + th0 = vlib_buffer_get_current (b0); + + if (is_ip4) + { + ip4_header_t *ih0; + ih0 = vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4, + &tc0->c_rmt_ip4, IP_PROTOCOL_TCP); + th0->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ih0); + } + else + { + ip6_header_t *ih0; + int bogus = ~0; + + ih0 = vlib_buffer_push_ip6 (vm, b0, &tc0->c_lcl_ip6, + &tc0->c_rmt_ip6, IP_PROTOCOL_TCP); + th0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0, ih0, + &bogus); + ASSERT (!bogus); + } + + /* Filter out DUPACKs if there are no OOO segments left */ + if (PREDICT_FALSE + (vnet_buffer (b0)->tcp.flags & TCP_BUF_FLAG_DUPACK)) + { + tc0->snt_dupacks--; + ASSERT (tc0->snt_dupacks >= 0); + if (!tcp_session_has_ooo_data (tc0)) + { + error0 = TCP_ERROR_FILTERED_DUPACKS; + next0 = TCP_OUTPUT_NEXT_DROP; + goto done; + } + } + + /* Retransmitted SYNs do reach this but it should be harmless */ + tc0->rcv_las = tc0->rcv_nxt; + + /* Stop DELACK timer and fix flags */ + tc0->flags &= + ~(TCP_CONN_SNDACK | TCP_CONN_DELACK | TCP_CONN_BURSTACK); + if (tcp_timer_is_active (tc0, TCP_TIMER_DELACK)) + { + tcp_timer_reset (tc0, TCP_TIMER_DELACK); + } + + /* If not retransmitting + * 1) update snd_una_max (SYN, SYNACK, new data, FIN) + * 2) If we're not tracking an ACK, start tracking */ + if (seq_lt (tc0->snd_una_max, tc0->snd_nxt)) + { + tc0->snd_una_max = tc0->snd_nxt; + if (tc0->rtt_ts == 0) + { + tc0->rtt_ts = tcp_time_now (); + tc0->rtt_seq = tc0->snd_nxt; + } + } + + /* Set the retransmit timer if not set already and not + * doing a pure ACK */ + if (!tcp_timer_is_active (tc0, TCP_TIMER_RETRANSMIT) + && tc0->snd_nxt != tc0->snd_una) + { + tcp_retransmit_timer_set (tm, tc0); + tc0->rto_boff = 0; + } + + /* set fib index to default and lookup node */ + /* XXX network virtualization (vrf/vni) */ + vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0; + vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + + b0->flags |= VNET_BUFFER_LOCALLY_ORIGINATED; + + done: + b0->error = error0 != 0 ? node->errors[error0] : 0; + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + return from_frame->n_vectors; +} + +static uword +tcp4_output (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return tcp46_output_inline (vm, node, from_frame, 1 /* is_ip4 */ ); +} + +static uword +tcp6_output (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return tcp46_output_inline (vm, node, from_frame, 0 /* is_ip4 */ ); +} + +VLIB_REGISTER_NODE (tcp4_output_node) = +{ + .function = tcp4_output,.name = "tcp4-output", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32),.n_errors = TCP_N_ERROR,.error_strings = + tcp_error_strings,.n_next_nodes = TCP_OUTPUT_N_NEXT,.next_nodes = + { +#define _(s,n) [TCP_OUTPUT_NEXT_##s] = n, + foreach_tcp4_output_next +#undef _ + } +,.format_buffer = format_tcp_header,.format_trace = format_tcp_tx_trace,}; + +VLIB_NODE_FUNCTION_MULTIARCH (tcp4_output_node, tcp4_output) +VLIB_REGISTER_NODE (tcp6_output_node) = +{ + .function = tcp6_output,.name = "tcp6-output", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32),.n_errors = TCP_N_ERROR,.error_strings = + tcp_error_strings,.n_next_nodes = TCP_OUTPUT_N_NEXT,.next_nodes = + { +#define _(s,n) [TCP_OUTPUT_NEXT_##s] = n, + foreach_tcp6_output_next +#undef _ + } +,.format_buffer = format_tcp_header,.format_trace = format_tcp_tx_trace,}; + +VLIB_NODE_FUNCTION_MULTIARCH (tcp6_output_node, tcp6_output) u32 +tcp_push_header (transport_connection_t * tconn, vlib_buffer_t * b) +{ + tcp_connection_t *tc; + + tc = (tcp_connection_t *) tconn; + tcp_push_hdr_i (tc, b, TCP_STATE_ESTABLISHED); + return 0; +} + +typedef enum _tcp_reset_next +{ + TCP_RESET_NEXT_DROP, + TCP_RESET_NEXT_IP_LOOKUP, + TCP_RESET_N_NEXT +} tcp_reset_next_t; + +#define foreach_tcp4_reset_next \ + _(DROP, "error-drop") \ + _(IP_LOOKUP, "ip4-lookup") + +#define foreach_tcp6_reset_next \ + _(DROP, "error-drop") \ + _(IP_LOOKUP, "ip6-lookup") + +static uword +tcp46_send_reset_inline (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame, u8 is_ip4) +{ + u32 n_left_from, next_index, *from, *to_next; + u32 my_thread_index = vm->cpu_index; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + u32 error0 = TCP_ERROR_RST_SENT, next0 = TCP_RESET_NEXT_IP_LOOKUP; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + + if (tcp_make_reset_in_place (vm, b0, vnet_buffer (b0)->tcp.flags, + my_thread_index, is_ip4)) + { + error0 = TCP_ERROR_LOOKUP_DROPS; + next0 = TCP_RESET_NEXT_DROP; + goto done; + } + + /* Prepare to send to IP lookup */ + vnet_buffer (b0)->sw_if_index[VLIB_TX] = 0; + next0 = TCP_RESET_NEXT_IP_LOOKUP; + + done: + b0->error = error0 != 0 ? node->errors[error0] : 0; + b0->flags |= VNET_BUFFER_LOCALLY_ORIGINATED; + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + return from_frame->n_vectors; +} + +static uword +tcp4_send_reset (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return tcp46_send_reset_inline (vm, node, from_frame, 1); +} + +static uword +tcp6_send_reset (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return tcp46_send_reset_inline (vm, node, from_frame, 0); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (tcp4_reset_node) = { + .function = tcp4_send_reset, + .name = "tcp4-reset", + .vector_size = sizeof (u32), + .n_errors = TCP_N_ERROR, + .error_strings = tcp_error_strings, + .n_next_nodes = TCP_RESET_N_NEXT, + .next_nodes = { +#define _(s,n) [TCP_RESET_NEXT_##s] = n, + foreach_tcp4_reset_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (tcp6_reset_node) = { + .function = tcp6_send_reset, + .name = "tcp6-reset", + .vector_size = sizeof (u32), + .n_errors = TCP_N_ERROR, + .error_strings = tcp_error_strings, + .n_next_nodes = TCP_RESET_N_NEXT, + .next_nodes = { +#define _(s,n) [TCP_RESET_NEXT_##s] = n, + foreach_tcp6_reset_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/tcp/tcp_packet.h b/src/vnet/tcp/tcp_packet.h new file mode 100644 index 00000000..866c5fd6 --- /dev/null +++ b/src/vnet/tcp/tcp_packet.h @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef included_tcp_packet_h +#define included_tcp_packet_h + +#include + +/* TCP flags bit 0 first. */ +#define foreach_tcp_flag \ + _ (FIN) /**< No more data from sender. */ \ + _ (SYN) /**< Synchronize sequence numbers. */ \ + _ (RST) /**< Reset the connection. */ \ + _ (PSH) /**< Push function. */ \ + _ (ACK) /**< Ack field significant. */ \ + _ (URG) /**< Urgent pointer field significant. */ \ + _ (ECE) /**< ECN-echo. Receiver got CE packet */ \ + _ (CWR) /**< Sender reduced congestion window */ + +enum +{ +#define _(f) TCP_FLAG_BIT_##f, + foreach_tcp_flag +#undef _ + TCP_N_FLAG_BITS, +}; + +enum +{ +#define _(f) TCP_FLAG_##f = 1 << TCP_FLAG_BIT_##f, + foreach_tcp_flag +#undef _ +}; + +typedef struct _tcp_header +{ + union + { + struct + { + u16 src_port; /**< Source port. */ + u16 dst_port; /**< Destination port. */ + }; + struct + { + u16 src, dst; + }; + }; + + u32 seq_number; /**< Sequence number of the first data octet in this + * segment, except when SYN is present. If SYN + * is present the seq number is is the ISN and the + * first data octet is ISN+1 */ + u32 ack_number; /**< Acknowledgement number if ACK is set. It contains + * the value of the next sequence number the sender + * of the segment is expecting to receive. */ + u8 data_offset_and_reserved; + u8 flags; /**< Flags: see the macro above */ + u16 window; /**< Number of bytes sender is willing to receive. */ + + u16 checksum; /**< Checksum of TCP pseudo header and data. */ + u16 urgent_pointer; /**< Seq number of the byte after the urgent data. */ +} __attribute__ ((packed)) tcp_header_t; + +/* Flag tests that return 0 or !0 */ +#define tcp_doff(_th) ((_th)->data_offset_and_reserved >> 4) +#define tcp_fin(_th) ((_th)->flags & TCP_FLAG_FIN) +#define tcp_syn(_th) ((_th)->flags & TCP_FLAG_SYN) +#define tcp_rst(_th) ((_th)->flags & TCP_FLAG_RST) +#define tcp_psh(_th) ((_th)->flags & TCP_FLAG_PSH) +#define tcp_ack(_th) ((_th)->flags & TCP_FLAG_ACK) +#define tcp_urg(_th) ((_th)->flags & TCP_FLAG_URG) +#define tcp_ece(_th) ((_th)->flags & TCP_FLAG_ECE) +#define tcp_cwr(_th) ((_th)->flags & TCP_FLAG_CWR) + +/* Flag tests that return 0 or 1 */ +#define tcp_is_syn(_th) !!((_th)->flags & TCP_FLAG_SYN) +#define tcp_is_fin(_th) !!((_th)->flags & TCP_FLAG_FIN) + +always_inline int +tcp_header_bytes (tcp_header_t * t) +{ + return tcp_doff (t) * sizeof (u32); +} + +/* + * TCP options. + */ + +typedef enum tcp_option_type +{ + TCP_OPTION_EOL = 0, /**< End of options. */ + TCP_OPTION_NOOP = 1, /**< No operation. */ + TCP_OPTION_MSS = 2, /**< Limit MSS. */ + TCP_OPTION_WINDOW_SCALE = 3, /**< Window scale. */ + TCP_OPTION_SACK_PERMITTED = 4, /**< Selective Ack permitted. */ + TCP_OPTION_SACK_BLOCK = 5, /**< Selective Ack block. */ + TCP_OPTION_TIMESTAMP = 8, /**< Timestamps. */ + TCP_OPTION_UTO = 28, /**< User timeout. */ + TCP_OPTION_AO = 29, /**< Authentication Option. */ +} tcp_option_type_t; + +#define foreach_tcp_options_flag \ + _ (MSS) /**< MSS advertised in SYN */ \ + _ (TSTAMP) /**< Timestamp capability advertised in SYN */ \ + _ (WSCALE) /**< Wnd scale capability advertised in SYN */ \ + _ (SACK_PERMITTED) /**< SACK capability advertised in SYN */ \ + _ (SACK) /**< SACK present */ + +enum +{ +#define _(f) TCP_OPTS_FLAG_BIT_##f, + foreach_tcp_options_flag +#undef _ + TCP_OPTIONS_N_FLAG_BITS, +}; + +enum +{ +#define _(f) TCP_OPTS_FLAG_##f = 1 << TCP_OPTS_FLAG_BIT_##f, + foreach_tcp_options_flag +#undef _ +}; + +typedef struct _sack_block +{ + u32 start; /**< Start sequence number */ + u32 end; /**< End sequence number */ +} sack_block_t; + +typedef struct +{ + u8 flags; /** Option flags, see above */ + + /* Received options */ + u16 mss; /**< Maximum segment size advertised by peer */ + u8 wscale; /**< Window scale advertised by peer */ + u32 tsval; /**< Peer's timestamp value */ + u32 tsecr; /**< Echoed/reflected time stamp */ + sack_block_t *sacks; /**< SACK blocks received */ + u8 n_sack_blocks; /**< Number of SACKs blocks */ +} tcp_options_t; + +/* Flag tests that return 0 or !0 */ +#define tcp_opts_mss(_to) ((_to)->flags & TCP_OPTS_FLAG_MSS) +#define tcp_opts_tstamp(_to) ((_to)->flags & TCP_OPTS_FLAG_TSTAMP) +#define tcp_opts_wscale(_to) ((_to)->flags & TCP_OPTS_FLAG_WSCALE) +#define tcp_opts_sack(_to) ((_to)->flags & TCP_OPTS_FLAG_SACK) +#define tcp_opts_sack_permitted(_to) ((_to)->flags & TCP_OPTS_FLAG_SACK_PERMITTED) + +/* TCP option lengths */ +#define TCP_OPTION_LEN_EOL 1 +#define TCP_OPTION_LEN_NOOP 1 +#define TCP_OPTION_LEN_MSS 4 +#define TCP_OPTION_LEN_WINDOW_SCALE 3 +#define TCP_OPTION_LEN_SACK_PERMITTED 2 +#define TCP_OPTION_LEN_TIMESTAMP 10 +#define TCP_OPTION_LEN_SACK_BLOCK 8 + +#define TCP_WND_MAX 65535U +#define TCP_MAX_WND_SCALE 14 /* See RFC 1323 */ +#define TCP_OPTS_ALIGN 4 +#define TCP_OPTS_MAX_SACK_BLOCKS 3 +#endif /* included_tcp_packet_h */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/tcp/tcp_pg.c b/src/vnet/tcp/tcp_pg.c new file mode 100644 index 00000000..dc324049 --- /dev/null +++ b/src/vnet/tcp/tcp_pg.c @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* + * ip/tcp_pg: TCP packet-generator interface + * + * Copyright (c) 2008 Eliot Dresselhaus + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +/* TCP flags bit 0 first. */ +#define foreach_tcp_flag \ + _ (FIN) \ + _ (SYN) \ + _ (RST) \ + _ (PSH) \ + _ (ACK) \ + _ (URG) \ + _ (ECE) \ + _ (CWR) + +static void +tcp_pg_edit_function (pg_main_t * pg, + pg_stream_t * s, + pg_edit_group_t * g, + u32 * packets, + u32 n_packets) +{ + vlib_main_t * vm = vlib_get_main(); + u32 ip_offset, tcp_offset; + + tcp_offset = g->start_byte_offset; + ip_offset = (g-1)->start_byte_offset; + + while (n_packets >= 1) + { + vlib_buffer_t * p0; + ip4_header_t * ip0; + tcp_header_t * tcp0; + ip_csum_t sum0; + u32 tcp_len0; + + p0 = vlib_get_buffer (vm, packets[0]); + n_packets -= 1; + packets += 1; + + ASSERT (p0->current_data == 0); + ip0 = (void *) (p0->data + ip_offset); + tcp0 = (void *) (p0->data + tcp_offset); + tcp_len0 = clib_net_to_host_u16 (ip0->length) - sizeof (ip0[0]); + + /* Initialize checksum with header. */ + if (BITS (sum0) == 32) + { + sum0 = clib_mem_unaligned (&ip0->src_address, u32); + sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&ip0->dst_address, u32)); + } + else + sum0 = clib_mem_unaligned (&ip0->src_address, u64); + + sum0 = ip_csum_with_carry + (sum0, clib_host_to_net_u32 (tcp_len0 + (ip0->protocol << 16))); + + /* Invalidate possibly old checksum. */ + tcp0->checksum = 0; + + sum0 = ip_incremental_checksum_buffer (vm, p0, tcp_offset, tcp_len0, sum0); + + tcp0->checksum = ~ ip_csum_fold (sum0); + } +} + +typedef struct { + pg_edit_t src, dst; + pg_edit_t seq_number, ack_number; + pg_edit_t data_offset_and_reserved; +#define _(f) pg_edit_t f##_flag; + foreach_tcp_flag +#undef _ + pg_edit_t window; + pg_edit_t checksum; + pg_edit_t urgent_pointer; +} pg_tcp_header_t; + +static inline void +pg_tcp_header_init (pg_tcp_header_t * p) +{ + /* Initialize fields that are not bit fields in the IP header. */ +#define _(f) pg_edit_init (&p->f, tcp_header_t, f); + _ (src); + _ (dst); + _ (seq_number); + _ (ack_number); + _ (window); + _ (checksum); + _ (urgent_pointer); +#undef _ + + /* Initialize bit fields. */ +#define _(f) \ + pg_edit_init_bitfield (&p->f##_flag, tcp_header_t, \ + flags, \ + TCP_FLAG_BIT_##f, 1); + + foreach_tcp_flag +#undef _ + + pg_edit_init_bitfield (&p->data_offset_and_reserved, tcp_header_t, + data_offset_and_reserved, + 4, 4); +} + +uword +unformat_pg_tcp_header (unformat_input_t * input, va_list * args) +{ + pg_stream_t * s = va_arg (*args, pg_stream_t *); + pg_tcp_header_t * p; + u32 group_index; + + p = pg_create_edit_group (s, sizeof (p[0]), sizeof (tcp_header_t), + &group_index); + pg_tcp_header_init (p); + + /* Defaults. */ + pg_edit_set_fixed (&p->seq_number, 0); + pg_edit_set_fixed (&p->ack_number, 0); + + pg_edit_set_fixed (&p->data_offset_and_reserved, + sizeof (tcp_header_t) / sizeof (u32)); + + pg_edit_set_fixed (&p->window, 4096); + pg_edit_set_fixed (&p->urgent_pointer, 0); + +#define _(f) pg_edit_set_fixed (&p->f##_flag, 0); + foreach_tcp_flag +#undef _ + + p->checksum.type = PG_EDIT_UNSPECIFIED; + + if (! unformat (input, "TCP: %U -> %U", + unformat_pg_edit, + unformat_tcp_udp_port, &p->src, + unformat_pg_edit, + unformat_tcp_udp_port, &p->dst)) + goto error; + + /* Parse options. */ + while (1) + { + if (unformat (input, "window %U", + unformat_pg_edit, + unformat_pg_number, &p->window)) + ; + + else if (unformat (input, "checksum %U", + unformat_pg_edit, + unformat_pg_number, &p->checksum)) + ; + + /* Flags. */ +#define _(f) else if (unformat (input, #f)) pg_edit_set_fixed (&p->f##_flag, 1); + foreach_tcp_flag +#undef _ + + /* Can't parse input: try next protocol level. */ + else + break; + } + + { + ip_main_t * im = &ip_main; + u16 dst_port; + tcp_udp_port_info_t * pi; + + pi = 0; + if (p->dst.type == PG_EDIT_FIXED) + { + dst_port = pg_edit_get_value (&p->dst, PG_EDIT_LO); + pi = ip_get_tcp_udp_port_info (im, dst_port); + } + + if (pi && pi->unformat_pg_edit + && unformat_user (input, pi->unformat_pg_edit, s)) + ; + + else if (! unformat_user (input, unformat_pg_payload, s)) + goto error; + + if (p->checksum.type == PG_EDIT_UNSPECIFIED) + { + pg_edit_group_t * g = pg_stream_get_group (s, group_index); + g->edit_function = tcp_pg_edit_function; + g->edit_function_opaque = 0; + } + + return 1; + } + + error: + /* Free up any edits we may have added. */ + pg_free_edit_group (s); + return 0; +} + diff --git a/src/vnet/tcp/tcp_syn_filter4.c b/src/vnet/tcp/tcp_syn_filter4.c new file mode 100644 index 00000000..c7605a30 --- /dev/null +++ b/src/vnet/tcp/tcp_syn_filter4.c @@ -0,0 +1,542 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include + +typedef struct +{ + f64 next_reset; + f64 reset_interval; + u8 *syn_counts; +} syn_filter4_runtime_t; + +typedef struct +{ + u32 next_index; + int not_a_syn; + u8 filter_value; +} syn_filter4_trace_t; + +/* packet trace format function */ +static u8 * +format_syn_filter4_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + syn_filter4_trace_t *t = va_arg (*args, syn_filter4_trace_t *); + + s = format (s, "SYN_FILTER4: next index %d, %s", + t->next_index, t->not_a_syn ? "not a syn" : "syn"); + if (t->not_a_syn == 0) + s = format (s, ", filter value %d\n", t->filter_value); + else + s = format (s, "\n"); + return s; +} + +static vlib_node_registration_t syn_filter4_node; + +#define foreach_syn_filter_error \ +_(THROTTLED, "TCP SYN packet throttle drops") \ +_(OK, "TCP SYN packets passed") + +typedef enum +{ +#define _(sym,str) SYN_FILTER_ERROR_##sym, + foreach_syn_filter_error +#undef _ + SYN_FILTER_N_ERROR, +} syn_filter_error_t; + +static char *syn_filter4_error_strings[] = { +#define _(sym,string) string, + foreach_syn_filter_error +#undef _ +}; + +typedef enum +{ + SYN_FILTER_NEXT_DROP, + SYN_FILTER_N_NEXT, +} syn_filter_next_t; + +extern vnet_feature_arc_registration_t vnet_feat_arc_ip4_local; + +static uword +syn_filter4_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + u32 n_left_from, *from, *to_next; + syn_filter_next_t next_index; + u32 ok_syn_packets = 0; + vnet_feature_main_t *fm = &feature_main; + u8 arc_index = vnet_feat_arc_ip4_local.feature_arc_index; + vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc_index]; + syn_filter4_runtime_t *rt = (syn_filter4_runtime_t *) node->runtime_data; + f64 now = vlib_time_now (vm); + /* Shut up spurious gcc warnings. */ + u8 *c0 = 0, *c1 = 0, *c2 = 0, *c3 = 0; + + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + + if (now > rt->next_reset) + { + memset (rt->syn_counts, 0, vec_len (rt->syn_counts)); + rt->next_reset = now + rt->reset_interval; + } + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from >= 8 && n_left_to_next >= 4) + { + u32 bi0, bi1, bi2, bi3; + vlib_buffer_t *b0, *b1, *b2, *b3; + u32 next0, next1, next2, next3; + ip4_header_t *ip0, *ip1, *ip2, *ip3; + tcp_header_t *tcp0, *tcp1, *tcp2, *tcp3; + u32 not_a_syn0 = 1, not_a_syn1 = 1, not_a_syn2 = 1, not_a_syn3 = 1; + u64 hash0, hash1, hash2, hash3; + + /* Prefetch next iteration. */ + { + vlib_buffer_t *p4, *p5, *p6, *p7; + + p4 = vlib_get_buffer (vm, from[4]); + p5 = vlib_get_buffer (vm, from[5]); + p6 = vlib_get_buffer (vm, from[6]); + p7 = vlib_get_buffer (vm, from[7]); + + vlib_prefetch_buffer_header (p4, LOAD); + vlib_prefetch_buffer_header (p5, LOAD); + vlib_prefetch_buffer_header (p6, LOAD); + vlib_prefetch_buffer_header (p7, LOAD); + + CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE); + } + + /* speculatively enqueue b0 and b1 to the current next frame */ + to_next[0] = bi0 = from[0]; + to_next[1] = bi1 = from[1]; + to_next[2] = bi2 = from[2]; + to_next[3] = bi3 = from[3]; + from += 4; + to_next += 4; + n_left_from -= 4; + n_left_to_next -= 4; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + b2 = vlib_get_buffer (vm, bi2); + b3 = vlib_get_buffer (vm, bi3); + + vnet_get_config_data + (&cm->config_main, &b0->current_config_index, + &next0, 0 /* sizeof (c0[0]) */ ); + vnet_get_config_data + (&cm->config_main, &b1->current_config_index, + &next1, 0 /* sizeof (c0[0]) */ ); + vnet_get_config_data + (&cm->config_main, &b2->current_config_index, + &next2, 0 /* sizeof (c0[0]) */ ); + vnet_get_config_data + (&cm->config_main, &b3->current_config_index, + &next3, 0 /* sizeof (c0[0]) */ ); + + /* Not TCP? */ + ip0 = vlib_buffer_get_current (b0); + if (ip0->protocol != IP_PROTOCOL_TCP) + goto trace00; + + tcp0 = ip4_next_header (ip0); + /* + * Not a SYN? + * $$$$ hack: the TCP bitfield flags seem not to compile + * correct code. + */ + if (PREDICT_TRUE (!(tcp0->flags & 0x2))) + goto trace00; + + not_a_syn0 = 0; + hash0 = clib_xxhash ((u64) ip0->src_address.as_u32); + c0 = &rt->syn_counts[hash0 & (_vec_len (rt->syn_counts) - 1)]; + if (PREDICT_FALSE (*c0 >= 0x80)) + { + next0 = SYN_FILTER_NEXT_DROP; + b0->error = node->errors[SYN_FILTER_ERROR_THROTTLED]; + goto trace00; + } + *c0 += 1; + ok_syn_packets++; + + trace00: + if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) + && (b0->flags & VLIB_BUFFER_IS_TRACED))) + { + syn_filter4_trace_t *t = + vlib_add_trace (vm, node, b0, sizeof (*t)); + t->not_a_syn = not_a_syn0; + t->next_index = next0; + t->filter_value = not_a_syn0 ? 0 : *c0; + } + + /* Not TCP? */ + ip1 = vlib_buffer_get_current (b1); + if (ip1->protocol != IP_PROTOCOL_TCP) + goto trace01; + + tcp1 = ip4_next_header (ip1); + /* + * Not a SYN? + * $$$$ hack: the TCP bitfield flags seem not to compile + * correct code. + */ + if (PREDICT_TRUE (!(tcp1->flags & 0x2))) + goto trace01; + + not_a_syn1 = 0; + hash1 = clib_xxhash ((u64) ip1->src_address.as_u32); + c1 = &rt->syn_counts[hash1 & (_vec_len (rt->syn_counts) - 1)]; + if (PREDICT_FALSE (*c1 >= 0x80)) + { + next1 = SYN_FILTER_NEXT_DROP; + b1->error = node->errors[SYN_FILTER_ERROR_THROTTLED]; + goto trace01; + } + *c1 += 1; + ok_syn_packets++; + + trace01: + if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) + && (b1->flags & VLIB_BUFFER_IS_TRACED))) + { + syn_filter4_trace_t *t = + vlib_add_trace (vm, node, b1, sizeof (*t)); + t->not_a_syn = not_a_syn1; + t->next_index = next1; + t->filter_value = not_a_syn1 ? 0 : *c1; + } + + /* Not TCP? */ + ip2 = vlib_buffer_get_current (b2); + if (ip2->protocol != IP_PROTOCOL_TCP) + goto trace02; + + tcp2 = ip4_next_header (ip2); + /* + * Not a SYN? + * $$$$ hack: the TCP bitfield flags seem not to compile + * correct code. + */ + if (PREDICT_TRUE (!(tcp2->flags & 0x2))) + goto trace02; + + not_a_syn2 = 0; + hash2 = clib_xxhash ((u64) ip2->src_address.as_u32); + c2 = &rt->syn_counts[hash2 & (_vec_len (rt->syn_counts) - 1)]; + if (PREDICT_FALSE (*c2 >= 0x80)) + { + next2 = SYN_FILTER_NEXT_DROP; + b2->error = node->errors[SYN_FILTER_ERROR_THROTTLED]; + goto trace02; + } + *c2 += 1; + ok_syn_packets++; + + trace02: + if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) + && (b2->flags & VLIB_BUFFER_IS_TRACED))) + { + syn_filter4_trace_t *t = + vlib_add_trace (vm, node, b2, sizeof (*t)); + t->not_a_syn = not_a_syn2; + t->next_index = next2; + t->filter_value = not_a_syn2 ? 0 : *c2; + } + + /* Not TCP? */ + ip3 = vlib_buffer_get_current (b3); + if (ip3->protocol != IP_PROTOCOL_TCP) + goto trace03; + + tcp3 = ip4_next_header (ip3); + /* + * Not a SYN? + * $$$$ hack: the TCP bitfield flags seem not to compile + * correct code. + */ + if (PREDICT_TRUE (!(tcp3->flags & 0x2))) + goto trace03; + + not_a_syn3 = 0; + hash3 = clib_xxhash ((u64) ip3->src_address.as_u32); + c3 = &rt->syn_counts[hash3 & (_vec_len (rt->syn_counts) - 1)]; + if (PREDICT_FALSE (*c3 >= 0x80)) + { + next3 = SYN_FILTER_NEXT_DROP; + b3->error = node->errors[SYN_FILTER_ERROR_THROTTLED]; + goto trace03; + } + *c3 += 1; + ok_syn_packets++; + + trace03: + if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) + && (b3->flags & VLIB_BUFFER_IS_TRACED))) + { + syn_filter4_trace_t *t = + vlib_add_trace (vm, node, b3, sizeof (*t)); + t->not_a_syn = not_a_syn3; + t->next_index = next3; + t->filter_value = not_a_syn3 ? 0 : *c3; + } + vlib_validate_buffer_enqueue_x4 (vm, node, next_index, + to_next, n_left_to_next, + bi0, bi1, bi2, bi3, + next0, next1, next2, next3); + } + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + u32 next0; + ip4_header_t *ip0; + tcp_header_t *tcp0; + u32 not_a_syn0 = 1; + u32 hash0; + u8 *c0; + + /* speculatively enqueue b0 to the current next frame */ + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + + vnet_get_config_data + (&cm->config_main, &b0->current_config_index, + &next0, 0 /* sizeof (c0[0]) */ ); + + /* Not TCP? */ + ip0 = vlib_buffer_get_current (b0); + if (ip0->protocol != IP_PROTOCOL_TCP) + goto trace0; + + tcp0 = ip4_next_header (ip0); + /* + * Not a SYN? + * $$$$ hack: the TCP bitfield flags seem not to compile + * correct code. + */ + if (PREDICT_TRUE (!(tcp0->flags & 0x2))) + goto trace0; + + not_a_syn0 = 0; + hash0 = clib_xxhash ((u64) ip0->src_address.as_u32); + c0 = &rt->syn_counts[hash0 & (_vec_len (rt->syn_counts) - 1)]; + if (PREDICT_FALSE (*c0 >= 0x80)) + { + next0 = SYN_FILTER_NEXT_DROP; + b0->error = node->errors[SYN_FILTER_ERROR_THROTTLED]; + goto trace0; + } + *c0 += 1; + ok_syn_packets++; + + trace0: + + if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) + && (b0->flags & VLIB_BUFFER_IS_TRACED))) + { + syn_filter4_trace_t *t = + vlib_add_trace (vm, node, b0, sizeof (*t)); + t->not_a_syn = not_a_syn0; + t->next_index = next0; + t->filter_value = not_a_syn0 ? 0 : *c0; + } + + /* verify speculative enqueue, maybe switch current next frame */ + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + vlib_node_increment_counter (vm, syn_filter4_node.index, + SYN_FILTER_ERROR_OK, ok_syn_packets); + return frame->n_vectors; +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (syn_filter4_node, static) = +{ + .function = syn_filter4_node_fn, + .name = "syn-filter-4", + .vector_size = sizeof (u32), + .format_trace = format_syn_filter4_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .runtime_data_bytes = sizeof (syn_filter4_runtime_t), + .n_errors = ARRAY_LEN(syn_filter4_error_strings), + .error_strings = syn_filter4_error_strings, + + .n_next_nodes = SYN_FILTER_N_NEXT, + + /* edit / add dispositions here */ + .next_nodes = { + [SYN_FILTER_NEXT_DROP] = "error-drop", + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (syn_filter4_node, syn_filter4_node_fn); + +/* *INDENT-OFF* */ +VNET_FEATURE_INIT (syn_filter_4, static) = +{ + .arc_name = "ip4-local", + .node_name = "syn-filter-4", + .runs_before = VNET_FEATURES("ip4-local-end-of-arc"), +}; +/* *INDENT-ON* */ + +int +syn_filter_enable_disable (u32 sw_if_index, int enable_disable) +{ + vnet_main_t *vnm = vnet_get_main (); + vnet_sw_interface_t *sw; + int rv = 0; + + /* Utterly wrong? */ + if (pool_is_free_index (vnm->interface_main.sw_interfaces, sw_if_index)) + return VNET_API_ERROR_INVALID_SW_IF_INDEX; + + /* Not a physical port? */ + sw = vnet_get_sw_interface (vnm, sw_if_index); + if (sw->type != VNET_SW_INTERFACE_TYPE_HARDWARE) + return VNET_API_ERROR_INVALID_SW_IF_INDEX; + + if (enable_disable) + { + vlib_main_t *vm = vlib_get_main (); + syn_filter4_runtime_t *rt; + + rt = vlib_node_get_runtime_data (vm, syn_filter4_node.index); + vec_validate (rt->syn_counts, 1023); + /* + * Given perfect disperson / optimal hashing results: + * Allow 128k (successful) syns/sec. 1024, buckets each of which + * absorb 128 syns before filtering. Reset table once a second. + * Reality bites, lets try resetting once every 100ms. + */ + rt->reset_interval = 0.1; /* reset interval in seconds */ + } + + rv = vnet_feature_enable_disable ("ip4-local", "syn-filter-4", + sw_if_index, enable_disable, 0, 0); + + return rv; +} + +static clib_error_t * +syn_filter_enable_disable_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + vnet_main_t *vnm = vnet_get_main (); + u32 sw_if_index = ~0; + int enable_disable = 1; + int rv; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "disable")) + enable_disable = 0; + else if (unformat (input, "%U", unformat_vnet_sw_interface, + vnm, &sw_if_index)) + ; + else + break; + } + + if (sw_if_index == ~0) + return clib_error_return (0, "Please specify an interface..."); + + rv = syn_filter_enable_disable (sw_if_index, enable_disable); + + switch (rv) + { + case 0: + break; + + case VNET_API_ERROR_INVALID_SW_IF_INDEX: + return clib_error_return + (0, "Invalid interface, only works on physical ports"); + break; + + case VNET_API_ERROR_UNIMPLEMENTED: + return clib_error_return (0, + "Device driver doesn't support redirection"); + break; + + case VNET_API_ERROR_INVALID_VALUE: + return clib_error_return (0, "feature arc not found"); + + case VNET_API_ERROR_INVALID_VALUE_2: + return clib_error_return (0, "feature node not found"); + + default: + return clib_error_return (0, "syn_filter_enable_disable returned %d", + rv); + } + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (sr_content_command, static) = +{ + .path = "ip syn filter", + .short_help = "ip syn filter [disable]", + .function = syn_filter_enable_disable_command_fn, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/tcp/tcp_timer.h b/src/vnet/tcp/tcp_timer.h new file mode 100644 index 00000000..fa25268c --- /dev/null +++ b/src/vnet/tcp/tcp_timer.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __included_tcp_timer_h__ +#define __included_tcp_timer_h__ + +#include +#include + +#endif /* __included_tcp_timer_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/udp/builtin_server.c b/src/vnet/udp/builtin_server.c new file mode 100644 index 00000000..afa66ba4 --- /dev/null +++ b/src/vnet/udp/builtin_server.c @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** @file + udp builtin server +*/ + +#include +#include +#include + +/** per-worker built-in server copy buffers */ +u8 **copy_buffers; + +static int +builtin_session_create_callback (stream_session_t * s) +{ + /* Simple version: declare session ready-to-go... */ + s->session_state = SESSION_STATE_READY; + return 0; +} + +static void +builtin_session_disconnect_callback (stream_session_t * s) +{ + stream_session_disconnect (s); +} + +static int +builtin_server_rx_callback (stream_session_t * s) +{ + svm_fifo_t *rx_fifo, *tx_fifo; + u32 this_transfer; + int actual_transfer; + u8 *my_copy_buffer; + session_fifo_event_t evt; + unix_shared_memory_queue_t *q; + + my_copy_buffer = copy_buffers[s->thread_index]; + rx_fifo = s->server_rx_fifo; + tx_fifo = s->server_tx_fifo; + + this_transfer = svm_fifo_max_enqueue (tx_fifo) + < svm_fifo_max_dequeue (rx_fifo) ? + svm_fifo_max_enqueue (tx_fifo) : svm_fifo_max_dequeue (rx_fifo); + + vec_validate (my_copy_buffer, this_transfer - 1); + _vec_len (my_copy_buffer) = this_transfer; + + actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, 0, this_transfer, + my_copy_buffer); + ASSERT (actual_transfer == this_transfer); + actual_transfer = svm_fifo_enqueue_nowait (tx_fifo, 0, this_transfer, + my_copy_buffer); + + copy_buffers[s->thread_index] = my_copy_buffer; + + /* Fabricate TX event, send to ourselves */ + evt.fifo = tx_fifo; + evt.event_type = FIFO_EVENT_SERVER_TX; + /* $$$$ for event logging */ + evt.enqueue_length = actual_transfer; + evt.event_id = 0; + q = session_manager_get_vpp_event_queue (s->thread_index); + unix_shared_memory_queue_add (q, (u8 *) & evt, 0 /* do wait for mutex */ ); + + return 0; +} + +/* *INDENT-OFF* */ +static session_cb_vft_t builtin_server = { + .session_accept_callback = builtin_session_create_callback, + .session_disconnect_callback = builtin_session_disconnect_callback, + .builtin_server_rx_callback = builtin_server_rx_callback +}; +/* *INDENT-ON* */ + +static int +bind_builtin_uri_server (u8 * uri) +{ + vnet_bind_args_t _a, *a = &_a; + char segment_name[128]; + u32 segment_name_length; + int rv; + u64 options[16]; + + segment_name_length = ARRAY_LEN (segment_name); + + memset (a, 0, sizeof (*a)); + memset (options, 0, sizeof (options)); + + a->uri = (char *) uri; + a->api_client_index = ~0; /* built-in server */ + a->segment_name = segment_name; + a->segment_name_length = segment_name_length; + a->session_cb_vft = &builtin_server; + + options[SESSION_OPTIONS_ACCEPT_COOKIE] = 0x12345678; + options[SESSION_OPTIONS_SEGMENT_SIZE] = (2 << 30); /*$$$$ config / arg */ + a->options = options; + + rv = vnet_bind_uri (a); + + return rv; +} + +static int +unbind_builtin_uri_server (u8 * uri) +{ + int rv; + + rv = vnet_unbind_uri ((char *) uri, ~0 /* client_index */ ); + + return rv; +} + +static clib_error_t * +builtin_server_init (vlib_main_t * vm) +{ + vlib_thread_main_t *vtm = vlib_get_thread_main (); + u32 num_threads; + + num_threads = 1 /* main thread */ + vtm->n_threads; + + vec_validate (copy_buffers, num_threads - 1); + return 0; +} + +VLIB_INIT_FUNCTION (builtin_server_init); + +static clib_error_t * +builtin_uri_bind_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + u8 *uri = 0; + int rv; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "uri %s", &uri)) + ; + else + break; + } + + if (uri == 0) + return clib_error_return (0, "uri to bind not specified..."); + + rv = bind_builtin_uri_server (uri); + + vec_free (uri); + + switch (rv) + { + case 0: + break; + + default: + return clib_error_return (0, "bind_uri_server returned %d", rv); + break; + } + + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (builtin_uri_bind_command, static) = +{ + .path = "builtin uri bind", + .short_help = "builtin uri bind", + .function = builtin_uri_bind_command_fn, +}; +/* *INDENT-ON* */ + +static clib_error_t * +builtin_uri_unbind_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + u8 *uri = 0; + int rv; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "uri %s", &uri)) + ; + else + break; + } + + if (uri == 0) + return clib_error_return (0, "uri to unbind not specified..."); + + rv = unbind_builtin_uri_server (uri); + + vec_free (uri); + + switch (rv) + { + case 0: + break; + + default: + return clib_error_return (0, "unbind_uri_server returned %d", rv); + break; + } + + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (builtin_uri_unbind_command, static) = +{ + .path = "builtin uri unbind", + .short_help = "builtin uri unbind", + .function = builtin_uri_unbind_command_fn, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/udp/udp.c b/src/vnet/udp/udp.c new file mode 100644 index 00000000..9e740466 --- /dev/null +++ b/src/vnet/udp/udp.c @@ -0,0 +1,342 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** @file + udp state machine, etc. +*/ + +#include +#include +#include +#include + +udp_uri_main_t udp_uri_main; + +u32 +udp_session_bind_ip4 (vlib_main_t * vm, u32 session_index, + ip46_address_t * ip, u16 port_number_host_byte_order) +{ + udp_uri_main_t *um = vnet_get_udp_main (); + udp_connection_t *listener; + + pool_get (um->udp_listeners, listener); + memset (listener, 0, sizeof (udp_connection_t)); + listener->c_lcl_port = clib_host_to_net_u16 (port_number_host_byte_order); + listener->c_lcl_ip4.as_u32 = ip->ip4.as_u32; + listener->c_proto = SESSION_TYPE_IP4_UDP; + udp_register_dst_port (um->vlib_main, port_number_host_byte_order, + udp4_uri_input_node.index, 1 /* is_ipv4 */ ); + return 0; +} + +u32 +udp_session_bind_ip6 (vlib_main_t * vm, u32 session_index, + ip46_address_t * ip, u16 port_number_host_byte_order) +{ + udp_uri_main_t *um = vnet_get_udp_main (); + udp_connection_t *listener; + + pool_get (um->udp_listeners, listener); + listener->c_lcl_port = clib_host_to_net_u16 (port_number_host_byte_order); + clib_memcpy (&listener->c_lcl_ip6, &ip->ip6, sizeof (ip6_address_t)); + listener->c_proto = SESSION_TYPE_IP6_UDP; + udp_register_dst_port (um->vlib_main, port_number_host_byte_order, + udp4_uri_input_node.index, 0 /* is_ipv4 */ ); + return 0; +} + +u32 +udp_session_unbind_ip4 (vlib_main_t * vm, u32 listener_index) +{ + udp_connection_t *listener; + listener = udp_listener_get (listener_index); + + /* deregister the udp_local mapping */ + udp_unregister_dst_port (vm, listener->c_lcl_port, 1 /* is_ipv4 */ ); + return 0; +} + +u32 +udp_session_unbind_ip6 (vlib_main_t * vm, u32 listener_index) +{ + udp_connection_t *listener; + + listener = udp_listener_get (listener_index); + + /* deregister the udp_local mapping */ + udp_unregister_dst_port (vm, listener->c_lcl_port, 0 /* is_ipv4 */ ); + return 0; +} + +transport_connection_t * +udp_session_get_listener (u32 listener_index) +{ + udp_connection_t *us; + + us = udp_listener_get (listener_index); + return &us->connection; +} + +u32 +udp_push_header (transport_connection_t * tconn, vlib_buffer_t * b) +{ + udp_connection_t *us; + u8 *data; + udp_header_t *udp; + + us = (udp_connection_t *) tconn; + + if (tconn->is_ip4) + { + ip4_header_t *ip; + + data = vlib_buffer_get_current (b); + udp = (udp_header_t *) (data - sizeof (udp_header_t)); + ip = (ip4_header_t *) ((u8 *) udp - sizeof (ip4_header_t)); + + /* Build packet header, swap rx key src + dst fields */ + ip->src_address.as_u32 = us->c_lcl_ip4.as_u32; + ip->dst_address.as_u32 = us->c_rmt_ip4.as_u32; + ip->ip_version_and_header_length = 0x45; + ip->ttl = 254; + ip->protocol = IP_PROTOCOL_UDP; + ip->length = clib_host_to_net_u16 (b->current_length + sizeof (*udp)); + ip->checksum = ip4_header_checksum (ip); + + udp->src_port = us->c_lcl_port; + udp->dst_port = us->c_rmt_port; + udp->length = clib_host_to_net_u16 (b->current_length); + udp->checksum = 0; + + b->current_length = sizeof (*ip) + sizeof (*udp); + return SESSION_QUEUE_NEXT_IP4_LOOKUP; + } + else + { + vlib_main_t *vm = vlib_get_main (); + ip6_header_t *ip; + u16 payload_length; + int bogus = ~0; + + data = vlib_buffer_get_current (b); + udp = (udp_header_t *) (data - sizeof (udp_header_t)); + ip = (ip6_header_t *) ((u8 *) udp - sizeof (ip6_header_t)); + + /* Build packet header, swap rx key src + dst fields */ + clib_memcpy (&ip->src_address, &us->c_lcl_ip6, sizeof (ip6_address_t)); + clib_memcpy (&ip->dst_address, &us->c_rmt_ip6, sizeof (ip6_address_t)); + + ip->ip_version_traffic_class_and_flow_label = + clib_host_to_net_u32 (0x6 << 28); + + ip->hop_limit = 0xff; + ip->protocol = IP_PROTOCOL_UDP; + + payload_length = vlib_buffer_length_in_chain (vm, b); + payload_length -= sizeof (*ip); + + ip->payload_length = clib_host_to_net_u16 (payload_length); + + udp->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ip, &bogus); + ASSERT (!bogus); + + udp->src_port = us->c_lcl_port; + udp->dst_port = us->c_rmt_port; + udp->length = clib_host_to_net_u16 (b->current_length); + udp->checksum = 0; + + b->current_length = sizeof (*ip) + sizeof (*udp); + + return SESSION_QUEUE_NEXT_IP6_LOOKUP; + } +} + +transport_connection_t * +udp_session_get (u32 connection_index, u32 my_thread_index) +{ + udp_uri_main_t *um = vnet_get_udp_main (); + + udp_connection_t *us; + us = + pool_elt_at_index (um->udp_sessions[my_thread_index], connection_index); + return &us->connection; +} + +void +udp_session_close (u32 connection_index, u32 my_thread_index) +{ + udp_uri_main_t *um = vnet_get_udp_main (); + pool_put_index (um->udp_sessions[my_thread_index], connection_index); +} + +u8 * +format_udp_session_ip4 (u8 * s, va_list * args) +{ + u32 uci = va_arg (*args, u32); + u32 thread_index = va_arg (*args, u32); + udp_connection_t *u4; + + u4 = udp_connection_get (uci, thread_index); + + s = format (s, "[%s] %U:%d->%U:%d", "udp", format_ip4_address, + &u4->c_lcl_ip4, clib_net_to_host_u16 (u4->c_lcl_port), + format_ip4_address, &u4->c_rmt_ip4, + clib_net_to_host_u16 (u4->c_rmt_port)); + return s; +} + +u8 * +format_udp_session_ip6 (u8 * s, va_list * args) +{ + u32 uci = va_arg (*args, u32); + u32 thread_index = va_arg (*args, u32); + udp_connection_t *tc = udp_connection_get (uci, thread_index); + s = format (s, "[%s] %U:%d->%U:%d", "udp", format_ip6_address, + &tc->c_lcl_ip6, clib_net_to_host_u16 (tc->c_lcl_port), + format_ip6_address, &tc->c_rmt_ip6, + clib_net_to_host_u16 (tc->c_rmt_port)); + return s; +} + +u8 * +format_udp_listener_session_ip4 (u8 * s, va_list * args) +{ + u32 tci = va_arg (*args, u32); + udp_connection_t *tc = udp_listener_get (tci); + s = format (s, "[%s] %U:%d->%U:%d", "udp", format_ip4_address, + &tc->c_lcl_ip4, clib_net_to_host_u16 (tc->c_lcl_port), + format_ip4_address, &tc->c_rmt_ip4, + clib_net_to_host_u16 (tc->c_rmt_port)); + return s; +} + +u8 * +format_udp_listener_session_ip6 (u8 * s, va_list * args) +{ + u32 tci = va_arg (*args, u32); + udp_connection_t *tc = udp_listener_get (tci); + s = format (s, "[%s] %U:%d->%U:%d", "udp", format_ip6_address, + &tc->c_lcl_ip6, clib_net_to_host_u16 (tc->c_lcl_port), + format_ip6_address, &tc->c_rmt_ip6, + clib_net_to_host_u16 (tc->c_rmt_port)); + return s; +} + +u16 +udp_send_mss_uri (transport_connection_t * t) +{ + /* TODO figure out MTU of output interface */ + return 400; +} + +u32 +udp_send_space_uri (transport_connection_t * t) +{ + /* No constraint on TX window */ + return ~0; +} + +int +udp_open_connection (ip46_address_t * addr, u16 port) +{ + clib_warning ("Not implemented"); + return 0; +} + +/* *INDENT-OFF* */ +const static transport_proto_vft_t udp4_proto = { + .bind = udp_session_bind_ip4, + .open = udp_open_connection, + .unbind = udp_session_unbind_ip4, + .push_header = udp_push_header, + .get_connection = udp_session_get, + .get_listener = udp_session_get_listener, + .close = udp_session_close, + .send_mss = udp_send_mss_uri, + .send_space = udp_send_space_uri, + .format_connection = format_udp_session_ip4, + .format_listener = format_udp_listener_session_ip4 +}; + +const static transport_proto_vft_t udp6_proto = { + .bind = udp_session_bind_ip6, + .open = udp_open_connection, + .unbind = udp_session_unbind_ip6, + .push_header = udp_push_header, + .get_connection = udp_session_get, + .get_listener = udp_session_get_listener, + .close = udp_session_close, + .send_mss = udp_send_mss_uri, + .send_space = udp_send_space_uri, + .format_connection = format_udp_session_ip6, + .format_listener = format_udp_listener_session_ip6 +}; +/* *INDENT-ON* */ + +static clib_error_t * +udp_init (vlib_main_t * vm) +{ + udp_uri_main_t *um = vnet_get_udp_main (); + ip_main_t *im = &ip_main; + vlib_thread_main_t *tm = vlib_get_thread_main (); + u32 num_threads; + clib_error_t *error = 0; + ip_protocol_info_t *pi; + + um->vlib_main = vm; + um->vnet_main = vnet_get_main (); + + if ((error = vlib_call_init_function (vm, ip_main_init))) + return error; + if ((error = vlib_call_init_function (vm, ip4_lookup_init))) + return error; + if ((error = vlib_call_init_function (vm, ip6_lookup_init))) + return error; + + /* + * Registrations + */ + + /* IP registration */ + pi = ip_get_protocol_info (im, IP_PROTOCOL_UDP); + if (pi == 0) + return clib_error_return (0, "UDP protocol info AWOL"); + pi->format_header = format_udp_header; + pi->unformat_pg_edit = unformat_pg_udp_header; + + + /* Register as transport with URI */ + session_register_transport (SESSION_TYPE_IP4_UDP, &udp4_proto); + session_register_transport (SESSION_TYPE_IP6_UDP, &udp6_proto); + + /* + * Initialize data structures + */ + + num_threads = 1 /* main thread */ + tm->n_threads; + vec_validate (um->udp_sessions, num_threads - 1); + + return error; +} + +VLIB_INIT_FUNCTION (udp_init); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/udp/udp.h b/src/vnet/udp/udp.h new file mode 100644 index 00000000..7ab26ce9 --- /dev/null +++ b/src/vnet/udp/udp.h @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __included_udp_h__ +#define __included_udp_h__ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +typedef struct +{ + transport_connection_t connection; /** must be first */ + + /** ersatz MTU to limit fifo pushes to test data size */ + u32 mtu; +} udp_connection_t; + +typedef struct _udp_uri_main +{ + /* Per-worker thread udp connection pools */ + udp_connection_t **udp_sessions; + udp_connection_t *udp_listeners; + + /* convenience */ + vlib_main_t *vlib_main; + vnet_main_t *vnet_main; + ip4_main_t *ip4_main; + ip6_main_t *ip6_main; +} udp_uri_main_t; + +extern udp_uri_main_t udp_uri_main; +extern vlib_node_registration_t udp4_uri_input_node; + +always_inline udp_uri_main_t * +vnet_get_udp_main () +{ + return &udp_uri_main; +} + +always_inline udp_connection_t * +udp_connection_get (u32 conn_index, u32 thread_index) +{ + return pool_elt_at_index (udp_uri_main.udp_sessions[thread_index], + conn_index); +} + +always_inline udp_connection_t * +udp_listener_get (u32 conn_index) +{ + return pool_elt_at_index (udp_uri_main.udp_listeners, conn_index); +} + +typedef enum +{ +#define udp_error(n,s) UDP_ERROR_##n, +#include +#undef udp_error + UDP_N_ERROR, +} udp_error_t; + +#define foreach_udp4_dst_port \ +_ (67, dhcp_to_server) \ +_ (68, dhcp_to_client) \ +_ (500, ikev2) \ +_ (3784, bfd4) \ +_ (3785, bfd_echo4) \ +_ (4341, lisp_gpe) \ +_ (4342, lisp_cp) \ +_ (4739, ipfix) \ +_ (4789, vxlan) \ +_ (4789, vxlan6) \ +_ (4790, vxlan_gpe) \ +_ (6633, vpath_3) + + +#define foreach_udp6_dst_port \ +_ (547, dhcpv6_to_server) \ +_ (546, dhcpv6_to_client) \ +_ (3784, bfd6) \ +_ (3785, bfd_echo6) \ +_ (4341, lisp_gpe6) \ +_ (4342, lisp_cp6) \ +_ (4790, vxlan6_gpe) \ +_ (6633, vpath6_3) + +typedef enum +{ +#define _(n,f) UDP_DST_PORT_##f = n, + foreach_udp4_dst_port foreach_udp6_dst_port +#undef _ +} udp_dst_port_t; + +typedef enum +{ +#define _(n,f) UDP6_DST_PORT_##f = n, + foreach_udp6_dst_port +#undef _ +} udp6_dst_port_t; + +typedef struct +{ + /* Name (a c string). */ + char *name; + + /* GRE protocol type in host byte order. */ + udp_dst_port_t dst_port; + + /* Node which handles this type. */ + u32 node_index; + + /* Next index for this type. */ + u32 next_index; +} udp_dst_port_info_t; + +typedef enum +{ + UDP_IP6 = 0, + UDP_IP4, /* the code is full of is_ip4... */ + N_UDP_AF, +} udp_af_t; + +typedef struct +{ + udp_dst_port_info_t *dst_port_infos[N_UDP_AF]; + + /* Hash tables mapping name/protocol to protocol info index. */ + uword *dst_port_info_by_name[N_UDP_AF]; + uword *dst_port_info_by_dst_port[N_UDP_AF]; + + /* convenience */ + vlib_main_t *vlib_main; +} udp_main_t; + +always_inline udp_dst_port_info_t * +udp_get_dst_port_info (udp_main_t * um, udp_dst_port_t dst_port, u8 is_ip4) +{ + uword *p = hash_get (um->dst_port_info_by_dst_port[is_ip4], dst_port); + return p ? vec_elt_at_index (um->dst_port_infos[is_ip4], p[0]) : 0; +} + +format_function_t format_udp_header; +format_function_t format_udp_rx_trace; + +unformat_function_t unformat_udp_header; + +void udp_register_dst_port (vlib_main_t * vm, + udp_dst_port_t dst_port, + u32 node_index, u8 is_ip4); + +void +udp_unregister_dst_port (vlib_main_t * vm, + udp_dst_port_t dst_port, u8 is_ip4); + +void udp_punt_unknown (vlib_main_t * vm, u8 is_ip4, u8 is_add); + +always_inline void +ip_udp_fixup_one (vlib_main_t * vm, vlib_buffer_t * b0, u8 is_ip4) +{ + u16 new_l0; + udp_header_t *udp0; + + if (is_ip4) + { + ip4_header_t *ip0; + ip_csum_t sum0; + u16 old_l0 = 0; + + ip0 = vlib_buffer_get_current (b0); + + /* fix the ing outer-IP checksum */ + sum0 = ip0->checksum; + /* old_l0 always 0, see the rewrite setup */ + new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)); + + sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t, + length /* changed member */ ); + ip0->checksum = ip_csum_fold (sum0); + ip0->length = new_l0; + + /* Fix UDP length */ + udp0 = (udp_header_t *) (ip0 + 1); + new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) + - sizeof (*ip0)); + udp0->length = new_l0; + } + else + { + ip6_header_t *ip0; + int bogus0; + + ip0 = vlib_buffer_get_current (b0); + + new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) + - sizeof (*ip0)); + ip0->payload_length = new_l0; + + /* Fix UDP length */ + udp0 = (udp_header_t *) (ip0 + 1); + udp0->length = new_l0; + + udp0->checksum = + ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip0, &bogus0); + ASSERT (bogus0 == 0); + + if (udp0->checksum == 0) + udp0->checksum = 0xffff; + } +} + +always_inline void +ip_udp_encap_one (vlib_main_t * vm, vlib_buffer_t * b0, u8 * ec0, word ec_len, + u8 is_ip4) +{ + vlib_buffer_advance (b0, -ec_len); + + if (is_ip4) + { + ip4_header_t *ip0; + + ip0 = vlib_buffer_get_current (b0); + + /* Apply the encap string. */ + clib_memcpy (ip0, ec0, ec_len); + ip_udp_fixup_one (vm, b0, 1); + } + else + { + ip6_header_t *ip0; + + ip0 = vlib_buffer_get_current (b0); + + /* Apply the encap string. */ + clib_memcpy (ip0, ec0, ec_len); + ip_udp_fixup_one (vm, b0, 0); + } +} + +always_inline void +ip_udp_encap_two (vlib_main_t * vm, vlib_buffer_t * b0, vlib_buffer_t * b1, + u8 * ec0, u8 * ec1, word ec_len, u8 is_v4) +{ + u16 new_l0, new_l1; + udp_header_t *udp0, *udp1; + + ASSERT (_vec_len (ec0) == _vec_len (ec1)); + + vlib_buffer_advance (b0, -ec_len); + vlib_buffer_advance (b1, -ec_len); + + if (is_v4) + { + ip4_header_t *ip0, *ip1; + ip_csum_t sum0, sum1; + u16 old_l0 = 0, old_l1 = 0; + + ip0 = vlib_buffer_get_current (b0); + ip1 = vlib_buffer_get_current (b1); + + /* Apply the encap string */ + clib_memcpy (ip0, ec0, ec_len); + clib_memcpy (ip1, ec1, ec_len); + + /* fix the ing outer-IP checksum */ + sum0 = ip0->checksum; + sum1 = ip1->checksum; + + /* old_l0 always 0, see the rewrite setup */ + new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)); + new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)); + + sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t, + length /* changed member */ ); + sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t, + length /* changed member */ ); + + ip0->checksum = ip_csum_fold (sum0); + ip1->checksum = ip_csum_fold (sum1); + + ip0->length = new_l0; + ip1->length = new_l1; + + /* Fix UDP length */ + udp0 = (udp_header_t *) (ip0 + 1); + udp1 = (udp_header_t *) (ip1 + 1); + + new_l0 = + clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) - + sizeof (*ip0)); + new_l1 = + clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1) - + sizeof (*ip1)); + udp0->length = new_l0; + udp1->length = new_l1; + } + else + { + ip6_header_t *ip0, *ip1; + int bogus0, bogus1; + + ip0 = vlib_buffer_get_current (b0); + ip1 = vlib_buffer_get_current (b1); + + /* Apply the encap string. */ + clib_memcpy (ip0, ec0, ec_len); + clib_memcpy (ip1, ec1, ec_len); + + new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) + - sizeof (*ip0)); + new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1) + - sizeof (*ip1)); + ip0->payload_length = new_l0; + ip1->payload_length = new_l1; + + /* Fix UDP length */ + udp0 = (udp_header_t *) (ip0 + 1); + udp1 = (udp_header_t *) (ip1 + 1); + + udp0->length = new_l0; + udp1->length = new_l1; + + udp0->checksum = + ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip0, &bogus0); + udp1->checksum = + ip6_tcp_udp_icmp_compute_checksum (vm, b1, ip1, &bogus1); + ASSERT (bogus0 == 0); + ASSERT (bogus1 == 0); + + if (udp0->checksum == 0) + udp0->checksum = 0xffff; + if (udp1->checksum == 0) + udp1->checksum = 0xffff; + } +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ + +#endif /* __included_udp_h__ */ diff --git a/src/vnet/udp/udp_error.def b/src/vnet/udp/udp_error.def new file mode 100644 index 00000000..bfdae0ac --- /dev/null +++ b/src/vnet/udp/udp_error.def @@ -0,0 +1,21 @@ +/* + * udp_error.def: udp errors + * + * Copyright (c) 2013-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +udp_error (NONE, "no error") +udp_error (NO_LISTENER, "no listener for dst port") +udp_error (LENGTH_ERROR, "UDP packets with length errors") +udp_error (PUNT, "no listener punt") diff --git a/src/vnet/udp/udp_format.c b/src/vnet/udp/udp_format.c new file mode 100644 index 00000000..abdf561e --- /dev/null +++ b/src/vnet/udp/udp_format.c @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* + * ip/udp_format.c: udp formatting + * + * Copyright (c) 2008 Eliot Dresselhaus + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include + +/* Format UDP header. */ +u8 * +format_udp_header (u8 * s, va_list * args) +{ + udp_header_t *udp = va_arg (*args, udp_header_t *); + u32 max_header_bytes = va_arg (*args, u32); + uword indent; + u32 header_bytes = sizeof (udp[0]); + + /* Nothing to do. */ + if (max_header_bytes < sizeof (udp[0])) + return format (s, "UDP header truncated"); + + indent = format_get_indent (s); + indent += 2; + + s = format (s, "UDP: %d -> %d", + clib_net_to_host_u16 (udp->src_port), + clib_net_to_host_u16 (udp->dst_port)); + + s = format (s, "\n%Ulength %d, checksum 0x%04x", + format_white_space, indent, + clib_net_to_host_u16 (udp->length), + clib_net_to_host_u16 (udp->checksum)); + + /* Recurse into next protocol layer. */ + if (max_header_bytes != 0 && header_bytes < max_header_bytes) + { + ip_main_t *im = &ip_main; + tcp_udp_port_info_t *pi; + + pi = ip_get_tcp_udp_port_info (im, udp->dst_port); + + if (pi && pi->format_header) + s = format (s, "\n%U%U", + format_white_space, indent - 2, pi->format_header, + /* next protocol header */ (udp + 1), + max_header_bytes - sizeof (udp[0])); + } + + return s; +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/udp/udp_input.c b/src/vnet/udp/udp_input.c new file mode 100644 index 00000000..4d509335 --- /dev/null +++ b/src/vnet/udp/udp_input.c @@ -0,0 +1,314 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include +#include "../session/application_interface.h" + +vlib_node_registration_t udp4_uri_input_node; + +typedef struct +{ + u32 session; + u32 disposition; + u32 thread_index; +} udp4_uri_input_trace_t; + +/* packet trace format function */ +static u8 * +format_udp4_uri_input_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + udp4_uri_input_trace_t *t = va_arg (*args, udp4_uri_input_trace_t *); + + s = format (s, "UDP4_URI_INPUT: session %d, disposition %d, thread %d", + t->session, t->disposition, t->thread_index); + return s; +} + +typedef enum +{ + UDP4_URI_INPUT_NEXT_DROP, + UDP4_URI_INPUT_N_NEXT, +} udp4_uri_input_next_t; + +static char *udp4_uri_input_error_strings[] = { +#define _(sym,string) string, + foreach_session_input_error +#undef _ +}; + +static uword +udp4_uri_input_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + u32 n_left_from, *from, *to_next; + udp4_uri_input_next_t next_index; + udp_uri_main_t *um = vnet_get_udp_main (); + session_manager_main_t *smm = vnet_get_session_manager_main (); + u32 my_thread_index = vm->cpu_index; + u8 my_enqueue_epoch; + u32 *session_indices_to_enqueue; + static u32 serial_number; + int i; + + my_enqueue_epoch = ++smm->current_enqueue_epoch[my_thread_index]; + + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + u32 next0 = UDP4_URI_INPUT_NEXT_DROP; + u32 error0 = SESSION_ERROR_ENQUEUED; + udp_header_t *udp0; + ip4_header_t *ip0; + stream_session_t *s0; + svm_fifo_t *f0; + u16 udp_len0; + u8 *data0; + + /* speculatively enqueue b0 to the current next frame */ + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + + /* udp_local hands us a pointer to the udp data */ + + data0 = vlib_buffer_get_current (b0); + udp0 = (udp_header_t *) (data0 - sizeof (*udp0)); + + /* $$$$ fixme: udp_local doesn't do ip options correctly anyhow */ + ip0 = (ip4_header_t *) (((u8 *) udp0) - sizeof (*ip0)); + s0 = 0; + + /* lookup session */ + s0 = stream_session_lookup4 (&ip0->dst_address, &ip0->src_address, + udp0->dst_port, udp0->src_port, + SESSION_TYPE_IP4_UDP, my_thread_index); + + /* no listener */ + if (PREDICT_FALSE (s0 == 0)) + { + error0 = SESSION_ERROR_NO_LISTENER; + goto trace0; + } + + f0 = s0->server_rx_fifo; + + /* established hit */ + if (PREDICT_TRUE (s0->session_state == SESSION_STATE_READY)) + { + udp_len0 = clib_net_to_host_u16 (udp0->length); + + if (PREDICT_FALSE (udp_len0 > svm_fifo_max_enqueue (f0))) + { + error0 = SESSION_ERROR_FIFO_FULL; + goto trace0; + } + + svm_fifo_enqueue_nowait (f0, 0 /* pid */ , + udp_len0 - sizeof (*udp0), + (u8 *) (udp0 + 1)); + + b0->error = node->errors[SESSION_ERROR_ENQUEUED]; + + /* We need to send an RX event on this fifo */ + if (s0->enqueue_epoch != my_enqueue_epoch) + { + s0->enqueue_epoch = my_enqueue_epoch; + + vec_add1 (smm->session_indices_to_enqueue_by_thread + [my_thread_index], + s0 - smm->sessions[my_thread_index]); + } + } + /* listener hit */ + else if (s0->session_state == SESSION_STATE_LISTENING) + { + udp_connection_t *us; + int rv; + + error0 = SESSION_ERROR_NOT_READY; + + /* + * create udp transport session + */ + pool_get (um->udp_sessions[my_thread_index], us); + + us->mtu = 1024; /* $$$$ policy */ + + us->c_lcl_ip4.as_u32 = ip0->dst_address.as_u32; + us->c_rmt_ip4.as_u32 = ip0->src_address.as_u32; + us->c_lcl_port = udp0->dst_port; + us->c_rmt_port = udp0->src_port; + us->c_proto = SESSION_TYPE_IP4_UDP; + us->c_c_index = us - um->udp_sessions[my_thread_index]; + + /* + * create stream session and attach the udp session to it + */ + rv = stream_session_accept (&us->connection, s0->session_index, + SESSION_TYPE_IP4_UDP, + 1 /*notify */ ); + if (rv) + error0 = rv; + + } + else + { + + error0 = SESSION_ERROR_NOT_READY; + goto trace0; + } + + trace0: + b0->error = node->errors[error0]; + + if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) + && (b0->flags & VLIB_BUFFER_IS_TRACED))) + { + udp4_uri_input_trace_t *t = + vlib_add_trace (vm, node, b0, sizeof (*t)); + + t->session = ~0; + if (s0) + t->session = s0 - smm->sessions[my_thread_index]; + t->disposition = error0; + t->thread_index = my_thread_index; + } + + /* verify speculative enqueue, maybe switch current next frame */ + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + /* Send enqueue events */ + + session_indices_to_enqueue = + smm->session_indices_to_enqueue_by_thread[my_thread_index]; + + for (i = 0; i < vec_len (session_indices_to_enqueue); i++) + { + session_fifo_event_t evt; + unix_shared_memory_queue_t *q; + stream_session_t *s0; + application_t *server0; + + /* Get session */ + s0 = pool_elt_at_index (smm->sessions[my_thread_index], + session_indices_to_enqueue[i]); + + /* Get session's server */ + server0 = application_get (s0->app_index); + + /* Built-in server? Deliver the goods... */ + if (server0->cb_fns.builtin_server_rx_callback) + { + server0->cb_fns.builtin_server_rx_callback (s0); + continue; + } + + /* Fabricate event */ + evt.fifo = s0->server_rx_fifo; + evt.event_type = FIFO_EVENT_SERVER_RX; + evt.event_id = serial_number++; + evt.enqueue_length = svm_fifo_max_dequeue (s0->server_rx_fifo); + + /* Add event to server's event queue */ + q = server0->event_queue; + + /* Don't block for lack of space */ + if (PREDICT_TRUE (q->cursize < q->maxsize)) + unix_shared_memory_queue_add (server0->event_queue, (u8 *) & evt, + 0 /* do wait for mutex */ ); + else + { + vlib_node_increment_counter (vm, udp4_uri_input_node.index, + SESSION_ERROR_FIFO_FULL, 1); + } + if (1) + { + ELOG_TYPE_DECLARE (e) = + { + .format = "evt-enqueue: id %d length %d",.format_args = "i4i4",}; + struct + { + u32 data[2]; + } *ed; + ed = ELOG_DATA (&vlib_global_main.elog_main, e); + ed->data[0] = evt.event_id; + ed->data[1] = evt.enqueue_length; + } + } + + vec_reset_length (session_indices_to_enqueue); + + smm->session_indices_to_enqueue_by_thread[my_thread_index] = + session_indices_to_enqueue; + + return frame->n_vectors; +} + +VLIB_REGISTER_NODE (udp4_uri_input_node) = +{ + .function = udp4_uri_input_node_fn,.name = "udp4-uri-input",.vector_size = + sizeof (u32),.format_trace = format_udp4_uri_input_trace,.type = + VLIB_NODE_TYPE_INTERNAL,.n_errors = + ARRAY_LEN (udp4_uri_input_error_strings),.error_strings = + udp4_uri_input_error_strings,.n_next_nodes = UDP4_URI_INPUT_N_NEXT, + /* edit / add dispositions here */ + .next_nodes = + { + [UDP4_URI_INPUT_NEXT_DROP] = "error-drop",} +,}; + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/udp/udp_local.c b/src/vnet/udp/udp_local.c new file mode 100644 index 00000000..6b239f73 --- /dev/null +++ b/src/vnet/udp/udp_local.c @@ -0,0 +1,666 @@ +/* + * node.c: udp packet processing + * + * Copyright (c) 2013 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +udp_main_t udp_main; + +#define foreach_udp_input_next \ + _ (PUNT, "error-punt") \ + _ (DROP, "error-drop") \ + _ (ICMP4_ERROR, "ip4-icmp-error") \ + _ (ICMP6_ERROR, "ip6-icmp-error") + +typedef enum +{ +#define _(s,n) UDP_INPUT_NEXT_##s, + foreach_udp_input_next +#undef _ + UDP_INPUT_N_NEXT, +} udp_input_next_t; + +typedef struct +{ + u16 src_port; + u16 dst_port; + u8 bound; +} udp_rx_trace_t; + +u8 * +format_udp_rx_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + udp_rx_trace_t *t = va_arg (*args, udp_rx_trace_t *); + + s = format (s, "UDP: src-port %d dst-port %d%s", + clib_net_to_host_u16 (t->src_port), + clib_net_to_host_u16 (t->dst_port), + t->bound ? "" : " (no listener)"); + return s; +} + +typedef struct +{ + /* Sparse vector mapping udp dst_port in network byte order + to next index. */ + u16 *next_by_dst_port; + u8 punt_unknown; +} udp_input_runtime_t; + +vlib_node_registration_t udp4_input_node; +vlib_node_registration_t udp6_input_node; + +always_inline uword +udp46_input_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame, int is_ip4) +{ + udp_input_runtime_t *rt = is_ip4 ? + (void *) vlib_node_get_runtime_data (vm, udp4_input_node.index) + : (void *) vlib_node_get_runtime_data (vm, udp6_input_node.index); + __attribute__ ((unused)) u32 n_left_from, next_index, *from, *to_next; + word n_no_listener = 0; + u8 punt_unknown = rt->punt_unknown; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 bi0, bi1; + vlib_buffer_t *b0, *b1; + udp_header_t *h0 = 0, *h1 = 0; + u32 i0, i1, dst_port0, dst_port1; + u32 advance0, advance1; + u32 error0, next0, error1, next1; + + /* Prefetch next iteration. */ + { + vlib_buffer_t *p2, *p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header (p2, LOAD); + vlib_prefetch_buffer_header (p3, LOAD); + + CLIB_PREFETCH (p2->data, sizeof (h0[0]), LOAD); + CLIB_PREFETCH (p3->data, sizeof (h1[0]), LOAD); + } + + bi0 = from[0]; + bi1 = from[1]; + to_next[0] = bi0; + to_next[1] = bi1; + from += 2; + to_next += 2; + n_left_to_next -= 2; + n_left_from -= 2; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + + /* ip4/6_local hands us the ip header, not the udp header */ + if (is_ip4) + { + advance0 = sizeof (ip4_header_t); + advance1 = sizeof (ip4_header_t); + } + else + { + advance0 = sizeof (ip6_header_t); + advance1 = sizeof (ip6_header_t); + } + + if (PREDICT_FALSE (b0->current_length < advance0 + sizeof (*h0))) + { + error0 = UDP_ERROR_LENGTH_ERROR; + next0 = UDP_INPUT_NEXT_DROP; + } + else + { + vlib_buffer_advance (b0, advance0); + h0 = vlib_buffer_get_current (b0); + error0 = next0 = 0; + if (PREDICT_FALSE (clib_net_to_host_u16 (h0->length) > + vlib_buffer_length_in_chain (vm, b0))) + { + error0 = UDP_ERROR_LENGTH_ERROR; + next0 = UDP_INPUT_NEXT_DROP; + } + } + + if (PREDICT_FALSE (b1->current_length < advance1 + sizeof (*h1))) + { + error1 = UDP_ERROR_LENGTH_ERROR; + next1 = UDP_INPUT_NEXT_DROP; + } + else + { + vlib_buffer_advance (b1, advance1); + h1 = vlib_buffer_get_current (b1); + error1 = next1 = 0; + if (PREDICT_FALSE (clib_net_to_host_u16 (h1->length) > + vlib_buffer_length_in_chain (vm, b1))) + { + error1 = UDP_ERROR_LENGTH_ERROR; + next1 = UDP_INPUT_NEXT_DROP; + } + } + + /* Index sparse array with network byte order. */ + dst_port0 = (error0 == 0) ? h0->dst_port : 0; + dst_port1 = (error1 == 0) ? h1->dst_port : 0; + sparse_vec_index2 (rt->next_by_dst_port, dst_port0, dst_port1, + &i0, &i1); + next0 = (error0 == 0) ? vec_elt (rt->next_by_dst_port, i0) : next0; + next1 = (error1 == 0) ? vec_elt (rt->next_by_dst_port, i1) : next1; + + if (PREDICT_FALSE (i0 == SPARSE_VEC_INVALID_INDEX)) + { + // move the pointer back so icmp-error can find the + // ip packet header + vlib_buffer_advance (b0, -(word) advance0); + + if (PREDICT_FALSE (punt_unknown)) + { + b0->error = node->errors[UDP_ERROR_PUNT]; + next0 = UDP_INPUT_NEXT_PUNT; + } + else if (is_ip4) + { + icmp4_error_set_vnet_buffer (b0, + ICMP4_destination_unreachable, + ICMP4_destination_unreachable_port_unreachable, + 0); + next0 = UDP_INPUT_NEXT_ICMP4_ERROR; + n_no_listener++; + } + else + { + icmp6_error_set_vnet_buffer (b0, + ICMP6_destination_unreachable, + ICMP6_destination_unreachable_port_unreachable, + 0); + next0 = UDP_INPUT_NEXT_ICMP6_ERROR; + n_no_listener++; + } + } + else + { + b0->error = node->errors[UDP_ERROR_NONE]; + // advance to the payload + vlib_buffer_advance (b0, sizeof (*h0)); + } + + if (PREDICT_FALSE (i1 == SPARSE_VEC_INVALID_INDEX)) + { + // move the pointer back so icmp-error can find the + // ip packet header + vlib_buffer_advance (b1, -(word) advance1); + + if (PREDICT_FALSE (punt_unknown)) + { + b1->error = node->errors[UDP_ERROR_PUNT]; + next1 = UDP_INPUT_NEXT_PUNT; + } + else if (is_ip4) + { + icmp4_error_set_vnet_buffer (b1, + ICMP4_destination_unreachable, + ICMP4_destination_unreachable_port_unreachable, + 0); + next1 = UDP_INPUT_NEXT_ICMP4_ERROR; + n_no_listener++; + } + else + { + icmp6_error_set_vnet_buffer (b1, + ICMP6_destination_unreachable, + ICMP6_destination_unreachable_port_unreachable, + 0); + next1 = UDP_INPUT_NEXT_ICMP6_ERROR; + n_no_listener++; + } + } + else + { + b1->error = node->errors[UDP_ERROR_NONE]; + // advance to the payload + vlib_buffer_advance (b1, sizeof (*h1)); + } + + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + udp_rx_trace_t *tr = vlib_add_trace (vm, node, + b0, sizeof (*tr)); + if (b0->error != node->errors[UDP_ERROR_LENGTH_ERROR]) + { + tr->src_port = h0 ? h0->src_port : 0; + tr->dst_port = h0 ? h0->dst_port : 0; + tr->bound = (next0 != UDP_INPUT_NEXT_ICMP4_ERROR && + next0 != UDP_INPUT_NEXT_ICMP6_ERROR); + } + } + if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) + { + udp_rx_trace_t *tr = vlib_add_trace (vm, node, + b1, sizeof (*tr)); + if (b1->error != node->errors[UDP_ERROR_LENGTH_ERROR]) + { + tr->src_port = h1 ? h1->src_port : 0; + tr->dst_port = h1 ? h1->dst_port : 0; + tr->bound = (next1 != UDP_INPUT_NEXT_ICMP4_ERROR && + next1 != UDP_INPUT_NEXT_ICMP6_ERROR); + } + } + + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, + to_next, n_left_to_next, + bi0, bi1, next0, next1); + } + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + udp_header_t *h0 = 0; + u32 i0, next0; + u32 advance0; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + + /* ip4/6_local hands us the ip header, not the udp header */ + if (is_ip4) + advance0 = sizeof (ip4_header_t); + else + advance0 = sizeof (ip6_header_t); + + if (PREDICT_FALSE (b0->current_length < advance0 + sizeof (*h0))) + { + b0->error = node->errors[UDP_ERROR_LENGTH_ERROR]; + next0 = UDP_INPUT_NEXT_DROP; + goto trace_x1; + } + + vlib_buffer_advance (b0, advance0); + + h0 = vlib_buffer_get_current (b0); + + if (PREDICT_TRUE (clib_net_to_host_u16 (h0->length) <= + vlib_buffer_length_in_chain (vm, b0))) + { + i0 = sparse_vec_index (rt->next_by_dst_port, h0->dst_port); + next0 = vec_elt (rt->next_by_dst_port, i0); + + if (PREDICT_FALSE (i0 == SPARSE_VEC_INVALID_INDEX)) + { + // move the pointer back so icmp-error can find the + // ip packet header + vlib_buffer_advance (b0, -(word) advance0); + + if (PREDICT_FALSE (punt_unknown)) + { + b0->error = node->errors[UDP_ERROR_PUNT]; + next0 = UDP_INPUT_NEXT_PUNT; + } + else if (is_ip4) + { + icmp4_error_set_vnet_buffer (b0, + ICMP4_destination_unreachable, + ICMP4_destination_unreachable_port_unreachable, + 0); + next0 = UDP_INPUT_NEXT_ICMP4_ERROR; + n_no_listener++; + } + else + { + icmp6_error_set_vnet_buffer (b0, + ICMP6_destination_unreachable, + ICMP6_destination_unreachable_port_unreachable, + 0); + next0 = UDP_INPUT_NEXT_ICMP6_ERROR; + n_no_listener++; + } + } + else + { + b0->error = node->errors[UDP_ERROR_NONE]; + // advance to the payload + vlib_buffer_advance (b0, sizeof (*h0)); + } + } + else + { + b0->error = node->errors[UDP_ERROR_LENGTH_ERROR]; + next0 = UDP_INPUT_NEXT_DROP; + } + + trace_x1: + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + udp_rx_trace_t *tr = vlib_add_trace (vm, node, + b0, sizeof (*tr)); + if (b0->error != node->errors[UDP_ERROR_LENGTH_ERROR]) + { + tr->src_port = h0->src_port; + tr->dst_port = h0->dst_port; + tr->bound = (next0 != UDP_INPUT_NEXT_ICMP4_ERROR && + next0 != UDP_INPUT_NEXT_ICMP6_ERROR); + } + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + vlib_error_count (vm, node->node_index, UDP_ERROR_NO_LISTENER, + n_no_listener); + return from_frame->n_vectors; +} + +static char *udp_error_strings[] = { +#define udp_error(n,s) s, +#include "udp_error.def" +#undef udp_error +}; + +static uword +udp4_input (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame) +{ + return udp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ); +} + +static uword +udp6_input (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame) +{ + return udp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ); +} + + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (udp4_input_node) = { + .function = udp4_input, + .name = "ip4-udp-lookup", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32), + + .runtime_data_bytes = sizeof (udp_input_runtime_t), + + .n_errors = UDP_N_ERROR, + .error_strings = udp_error_strings, + + .n_next_nodes = UDP_INPUT_N_NEXT, + .next_nodes = { +#define _(s,n) [UDP_INPUT_NEXT_##s] = n, + foreach_udp_input_next +#undef _ + }, + + .format_buffer = format_udp_header, + .format_trace = format_udp_rx_trace, + .unformat_buffer = unformat_udp_header, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (udp4_input_node, udp4_input); + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (udp6_input_node) = { + .function = udp6_input, + .name = "ip6-udp-lookup", + /* Takes a vector of packets. */ + .vector_size = sizeof (u32), + + .runtime_data_bytes = sizeof (udp_input_runtime_t), + + .n_errors = UDP_N_ERROR, + .error_strings = udp_error_strings, + + .n_next_nodes = UDP_INPUT_N_NEXT, + .next_nodes = { +#define _(s,n) [UDP_INPUT_NEXT_##s] = n, + foreach_udp_input_next +#undef _ + }, + + .format_buffer = format_udp_header, + .format_trace = format_udp_rx_trace, + .unformat_buffer = unformat_udp_header, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (udp6_input_node, udp6_input); + +static void +add_dst_port (udp_main_t * um, + udp_dst_port_t dst_port, char *dst_port_name, u8 is_ip4) +{ + udp_dst_port_info_t *pi; + u32 i; + + vec_add2 (um->dst_port_infos[is_ip4], pi, 1); + i = pi - um->dst_port_infos[is_ip4]; + + pi->name = dst_port_name; + pi->dst_port = dst_port; + pi->next_index = pi->node_index = ~0; + + hash_set (um->dst_port_info_by_dst_port[is_ip4], dst_port, i); + + if (pi->name) + hash_set_mem (um->dst_port_info_by_name[is_ip4], pi->name, i); +} + +void +udp_register_dst_port (vlib_main_t * vm, + udp_dst_port_t dst_port, u32 node_index, u8 is_ip4) +{ + udp_main_t *um = &udp_main; + udp_dst_port_info_t *pi; + udp_input_runtime_t *rt; + u16 *n; + + { + clib_error_t *error = vlib_call_init_function (vm, udp_local_init); + if (error) + clib_error_report (error); + } + + pi = udp_get_dst_port_info (um, dst_port, is_ip4); + if (!pi) + { + add_dst_port (um, dst_port, 0, is_ip4); + pi = udp_get_dst_port_info (um, dst_port, is_ip4); + ASSERT (pi); + } + + pi->node_index = node_index; + pi->next_index = vlib_node_add_next (vm, + is_ip4 ? udp4_input_node.index + : udp6_input_node.index, node_index); + + /* Setup udp protocol -> next index sparse vector mapping. */ + rt = vlib_node_get_runtime_data + (vm, is_ip4 ? udp4_input_node.index : udp6_input_node.index); + n = sparse_vec_validate (rt->next_by_dst_port, + clib_host_to_net_u16 (dst_port)); + n[0] = pi->next_index; +} + +void +udp_unregister_dst_port (vlib_main_t * vm, udp_dst_port_t dst_port, u8 is_ip4) +{ + udp_main_t *um = &udp_main; + udp_dst_port_info_t *pi; + udp_input_runtime_t *rt; + u16 *n; + + pi = udp_get_dst_port_info (um, dst_port, is_ip4); + /* Not registered? Fagedaboudit */ + if (!pi) + return; + + /* Kill the mapping. Don't bother killing the pi, it may be back. */ + rt = vlib_node_get_runtime_data + (vm, is_ip4 ? udp4_input_node.index : udp6_input_node.index); + n = sparse_vec_validate (rt->next_by_dst_port, + clib_host_to_net_u16 (dst_port)); + n[0] = SPARSE_VEC_INVALID_INDEX; +} + +void +udp_punt_unknown (vlib_main_t * vm, u8 is_ip4, u8 is_add) +{ + udp_input_runtime_t *rt; + + { + clib_error_t *error = vlib_call_init_function (vm, udp_local_init); + if (error) + clib_error_report (error); + } + + rt = vlib_node_get_runtime_data + (vm, is_ip4 ? udp4_input_node.index : udp6_input_node.index); + + rt->punt_unknown = is_add; +} + +/* Parse a UDP header. */ +uword +unformat_udp_header (unformat_input_t * input, va_list * args) +{ + u8 **result = va_arg (*args, u8 **); + udp_header_t *udp; + __attribute__ ((unused)) int old_length; + u16 src_port, dst_port; + + /* Allocate space for IP header. */ + { + void *p; + + old_length = vec_len (*result); + vec_add2 (*result, p, sizeof (ip4_header_t)); + udp = p; + } + + memset (udp, 0, sizeof (udp[0])); + if (unformat (input, "src-port %d dst-port %d", &src_port, &dst_port)) + { + udp->src_port = clib_host_to_net_u16 (src_port); + udp->dst_port = clib_host_to_net_u16 (dst_port); + return 1; + } + return 0; +} + +static void +udp_setup_node (vlib_main_t * vm, u32 node_index) +{ + vlib_node_t *n = vlib_get_node (vm, node_index); + pg_node_t *pn = pg_get_node (node_index); + + n->format_buffer = format_udp_header; + n->unformat_buffer = unformat_udp_header; + pn->unformat_edit = unformat_pg_udp_header; +} + +clib_error_t * +udp_local_init (vlib_main_t * vm) +{ + udp_input_runtime_t *rt; + udp_main_t *um = &udp_main; + int i; + + { + clib_error_t *error; + error = vlib_call_init_function (vm, udp_init); + if (error) + clib_error_report (error); + } + + + for (i = 0; i < 2; i++) + { + um->dst_port_info_by_name[i] = hash_create_string (0, sizeof (uword)); + um->dst_port_info_by_dst_port[i] = hash_create (0, sizeof (uword)); + } + + udp_setup_node (vm, udp4_input_node.index); + udp_setup_node (vm, udp6_input_node.index); + + rt = vlib_node_get_runtime_data (vm, udp4_input_node.index); + + rt->next_by_dst_port = sparse_vec_new + ( /* elt bytes */ sizeof (rt->next_by_dst_port[0]), + /* bits in index */ BITS (((udp_header_t *) 0)->dst_port)); + + rt->punt_unknown = 0; + +#define _(n,s) add_dst_port (um, UDP_DST_PORT_##s, #s, 1 /* is_ip4 */); + foreach_udp4_dst_port +#undef _ + rt = vlib_node_get_runtime_data (vm, udp6_input_node.index); + + rt->next_by_dst_port = sparse_vec_new + ( /* elt bytes */ sizeof (rt->next_by_dst_port[0]), + /* bits in index */ BITS (((udp_header_t *) 0)->dst_port)); + + rt->punt_unknown = 0; + +#define _(n,s) add_dst_port (um, UDP_DST_PORT_##s, #s, 0 /* is_ip4 */); + foreach_udp6_dst_port +#undef _ + ip4_register_protocol (IP_PROTOCOL_UDP, udp4_input_node.index); + /* Note: ip6 differs from ip4, UDP is hotwired to ip6-udp-lookup */ + return 0; +} + +VLIB_INIT_FUNCTION (udp_local_init); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/udp/udp_packet.h b/src/vnet/udp/udp_packet.h new file mode 100644 index 00000000..beea3059 --- /dev/null +++ b/src/vnet/udp/udp_packet.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* + * ip4/udp_packet.h: UDP packet format + * + * Copyright (c) 2008 Eliot Dresselhaus + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef included_udp_packet_h +#define included_udp_packet_h + +typedef struct +{ + /* Source and destination port. */ + u16 src_port, dst_port; + + /* Length of UDP header plus payload. */ + u16 length; + + /* Checksum of UDP pseudo-header and data or + zero if checksum is disabled. */ + u16 checksum; +} udp_header_t; + +#endif /* included_udp_packet_h */ + + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/udp/udp_pg.c b/src/vnet/udp/udp_pg.c new file mode 100644 index 00000000..c9d8d38c --- /dev/null +++ b/src/vnet/udp/udp_pg.c @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* + * ip/udp_pg: UDP packet-generator interface + * + * Copyright (c) 2008 Eliot Dresselhaus + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include /* for unformat_udp_udp_port */ + +#define UDP_PG_EDIT_LENGTH (1 << 0) +#define UDP_PG_EDIT_CHECKSUM (1 << 1) + +always_inline void +udp_pg_edit_function_inline (pg_main_t * pg, + pg_stream_t * s, + pg_edit_group_t * g, + u32 * packets, u32 n_packets, u32 flags) +{ + vlib_main_t *vm = vlib_get_main (); + u32 ip_offset, udp_offset; + + udp_offset = g->start_byte_offset; + ip_offset = (g - 1)->start_byte_offset; + + while (n_packets >= 1) + { + vlib_buffer_t *p0; + ip4_header_t *ip0; + udp_header_t *udp0; + u32 udp_len0; + + p0 = vlib_get_buffer (vm, packets[0]); + n_packets -= 1; + packets += 1; + + ip0 = (void *) (p0->data + ip_offset); + udp0 = (void *) (p0->data + udp_offset); + udp_len0 = clib_net_to_host_u16 (ip0->length) - sizeof (ip0[0]); + + if (flags & UDP_PG_EDIT_LENGTH) + udp0->length = + clib_net_to_host_u16 (vlib_buffer_length_in_chain (vm, p0) + - ip_offset); + + /* Initialize checksum with header. */ + if (flags & UDP_PG_EDIT_CHECKSUM) + { + ip_csum_t sum0; + + sum0 = clib_mem_unaligned (&ip0->src_address, u64); + + sum0 = ip_csum_with_carry + (sum0, clib_host_to_net_u32 (udp_len0 + (ip0->protocol << 16))); + + /* Invalidate possibly old checksum. */ + udp0->checksum = 0; + + sum0 = + ip_incremental_checksum_buffer (vm, p0, udp_offset, udp_len0, + sum0); + + sum0 = ~ip_csum_fold (sum0); + + /* Zero checksum means checksumming disabled. */ + sum0 = sum0 != 0 ? sum0 : 0xffff; + + udp0->checksum = sum0; + } + } +} + +static void +udp_pg_edit_function (pg_main_t * pg, + pg_stream_t * s, + pg_edit_group_t * g, u32 * packets, u32 n_packets) +{ + switch (g->edit_function_opaque) + { + case UDP_PG_EDIT_LENGTH: + udp_pg_edit_function_inline (pg, s, g, packets, n_packets, + UDP_PG_EDIT_LENGTH); + break; + + case UDP_PG_EDIT_CHECKSUM: + udp_pg_edit_function_inline (pg, s, g, packets, n_packets, + UDP_PG_EDIT_CHECKSUM); + break; + + case UDP_PG_EDIT_CHECKSUM | UDP_PG_EDIT_LENGTH: + udp_pg_edit_function_inline (pg, s, g, packets, n_packets, + UDP_PG_EDIT_CHECKSUM | UDP_PG_EDIT_LENGTH); + break; + + default: + ASSERT (0); + break; + } +} + +typedef struct +{ + pg_edit_t src_port, dst_port; + pg_edit_t length; + pg_edit_t checksum; +} pg_udp_header_t; + +static inline void +pg_udp_header_init (pg_udp_header_t * p) +{ + /* Initialize fields that are not bit fields in the IP header. */ +#define _(f) pg_edit_init (&p->f, udp_header_t, f); + _(src_port); + _(dst_port); + _(length); + _(checksum); +#undef _ +} + +uword +unformat_pg_udp_header (unformat_input_t * input, va_list * args) +{ + pg_stream_t *s = va_arg (*args, pg_stream_t *); + pg_udp_header_t *p; + u32 group_index; + + p = pg_create_edit_group (s, sizeof (p[0]), sizeof (udp_header_t), + &group_index); + pg_udp_header_init (p); + + /* Defaults. */ + p->checksum.type = PG_EDIT_UNSPECIFIED; + p->length.type = PG_EDIT_UNSPECIFIED; + + if (!unformat (input, "UDP: %U -> %U", + unformat_pg_edit, + unformat_tcp_udp_port, &p->src_port, + unformat_pg_edit, unformat_tcp_udp_port, &p->dst_port)) + goto error; + + /* Parse options. */ + while (1) + { + if (unformat (input, "length %U", + unformat_pg_edit, unformat_pg_number, &p->length)) + ; + + else if (unformat (input, "checksum %U", + unformat_pg_edit, unformat_pg_number, &p->checksum)) + ; + + /* Can't parse input: try next protocol level. */ + else + break; + } + + { + ip_main_t *im = &ip_main; + u16 dst_port; + tcp_udp_port_info_t *pi; + + pi = 0; + if (p->dst_port.type == PG_EDIT_FIXED) + { + dst_port = pg_edit_get_value (&p->dst_port, PG_EDIT_LO); + pi = ip_get_tcp_udp_port_info (im, dst_port); + } + + if (pi && pi->unformat_pg_edit + && unformat_user (input, pi->unformat_pg_edit, s)) + ; + + else if (!unformat_user (input, unformat_pg_payload, s)) + goto error; + + p = pg_get_edit_group (s, group_index); + if (p->checksum.type == PG_EDIT_UNSPECIFIED + || p->length.type == PG_EDIT_UNSPECIFIED) + { + pg_edit_group_t *g = pg_stream_get_group (s, group_index); + g->edit_function = udp_pg_edit_function; + g->edit_function_opaque = 0; + if (p->checksum.type == PG_EDIT_UNSPECIFIED) + g->edit_function_opaque |= UDP_PG_EDIT_CHECKSUM; + if (p->length.type == PG_EDIT_UNSPECIFIED) + g->edit_function_opaque |= UDP_PG_EDIT_LENGTH; + } + + return 1; + } + +error: + /* Free up any edits we may have added. */ + pg_free_edit_group (s); + return 0; +} + + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/vnet_all_api_h.h b/src/vnet/vnet_all_api_h.h index 142acedc..c4075db6 100644 --- a/src/vnet/vnet_all_api_h.h +++ b/src/vnet/vnet_all_api_h.h @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include diff --git a/src/vnet/vxlan-gpe/vxlan_gpe.h b/src/vnet/vxlan-gpe/vxlan_gpe.h index 1b4bc44e..e768d230 100644 --- a/src/vnet/vxlan-gpe/vxlan_gpe.h +++ b/src/vnet/vxlan-gpe/vxlan_gpe.h @@ -29,7 +29,7 @@ #include #include #include -#include +#include /** * @brief VXLAN GPE header struct diff --git a/src/vnet/vxlan/vxlan.h b/src/vnet/vxlan/vxlan.h index adfa3a8e..dca1cd12 100644 --- a/src/vnet/vxlan/vxlan.h +++ b/src/vnet/vxlan/vxlan.h @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include diff --git a/src/vpp/api/vpe.api b/src/vpp/api/vpe.api index 24f48293..2d6e4f37 100644 --- a/src/vpp/api/vpe.api +++ b/src/vpp/api/vpe.api @@ -38,6 +38,7 @@ * IPSEC-GRE APIs: see .../src/vnet/ipsec-gre/{ipsec_gre.api, ipsec_gre_api.c} * LISP APIs: see .../src/vnet/lisp/{lisp.api, lisp_api.c} * LISP-GPE APIs: see .../src/vnet/lisp-gpe/{lisp_gpe.api, lisp_gpe_api.c} + * SESSION APIs: .../vnet/session/{session.api session_api.c} * MPLS APIs: see .../src/vnet/mpls/{mpls.api, mpls_api.c} * SR APIs: see .../src/vnet/sr/{sr.api, sr_api.c} * DPDK APIs: see ... /src/vnet/devices/dpdk/{dpdk.api, dpdk_api.c} diff --git a/src/vppinfra.am b/src/vppinfra.am index 8d375958..4b9f0c29 100644 --- a/src/vppinfra.am +++ b/src/vppinfra.am @@ -157,7 +157,9 @@ nobase_include_HEADERS = \ vppinfra/asm_mips.h \ vppinfra/asm_x86.h \ vppinfra/bihash_8_8.h \ + vppinfra/bihash_16_8.h \ vppinfra/bihash_24_8.h \ + vppinfra/bihash_48_8.h \ vppinfra/bihash_template.h \ vppinfra/bihash_template.c \ vppinfra/bitmap.h \ @@ -206,6 +208,7 @@ nobase_include_HEADERS = \ vppinfra/timer.h \ vppinfra/tw_timer_2t_1w_2048sl.h \ vppinfra/tw_timer_16t_2w_512sl.h \ + vppinfra/tw_timer_16t_1w_2048sl.h \ vppinfra/tw_timer_template.h \ vppinfra/tw_timer_template.c \ vppinfra/types.h \ @@ -261,6 +264,8 @@ CLIB_CORE = \ vppinfra/tw_timer_2t_1w_2048sl.c \ vppinfra/tw_timer_16t_2w_512sl.h \ vppinfra/tw_timer_16t_2w_512sl.c \ + vppinfra/tw_timer_16t_1w_2048sl.h \ + vppinfra/tw_timer_16t_1w_2048sl.c \ vppinfra/unformat.c \ vppinfra/vec.c \ vppinfra/vector.c \ diff --git a/src/vppinfra/bihash_16_8.h b/src/vppinfra/bihash_16_8.h new file mode 100644 index 00000000..ce80f70e --- /dev/null +++ b/src/vppinfra/bihash_16_8.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#undef BIHASH_TYPE + +#define BIHASH_TYPE _16_8 +#define BIHASH_KVP_PER_PAGE 4 + +#ifndef __included_bihash_16_8_h__ +#define __included_bihash_16_8_h__ + +#include +#include +#include +#include + +typedef struct +{ + u64 key[2]; + u64 value; +} clib_bihash_kv_16_8_t; + +static inline int +clib_bihash_is_free_16_8 (clib_bihash_kv_16_8_t * v) +{ + /* Free values are memset to 0xff, check a bit... */ + if (v->key[0] == ~0ULL && v->value == ~0ULL) + return 1; + return 0; +} + +#if __SSE4_2__ +#ifndef __defined_crc_u32__ +#define __defined_crc_u32__ +static inline u32 +crc_u32 (u32 data, u32 value) +{ + __asm__ volatile ("crc32l %[data], %[value];":[value] "+r" (value):[data] + "rm" (data)); + return value; +} +#endif /* __defined_crc_u32__ */ + +static inline u64 +clib_bihash_hash_16_8 (clib_bihash_kv_16_8_t * v) +{ + u32 *dp = (u32 *) & v->key[0]; + u32 value = 0; + + value = crc_u32 (dp[0], value); + value = crc_u32 (dp[1], value); + value = crc_u32 (dp[2], value); + value = crc_u32 (dp[3], value); + + return value; +} +#else +static inline u64 +clib_bihash_hash_16_8 (clib_bihash_kv_16_8_t * v) +{ + u64 tmp = v->key[0] ^ v->key[1]; + return clib_xxhash (tmp); +} +#endif + +static inline u8 * +format_bihash_kvp_16_8 (u8 * s, va_list * args) +{ + clib_bihash_kv_16_8_t *v = va_arg (*args, clib_bihash_kv_16_8_t *); + + s = format (s, "key %llu %llu value %llu", v->key[0], v->key[1], v->value); + return s; +} + +static inline int +clib_bihash_key_compare_16_8 (u64 * a, u64 * b) +{ + return ((a[0] ^ b[0]) | (a[1] ^ b[1])) == 0; +} + +#undef __included_bihash_template_h__ +#include + +#endif /* __included_bihash_16_8_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vppinfra/bihash_48_8.h b/src/vppinfra/bihash_48_8.h new file mode 100644 index 00000000..1a6e7691 --- /dev/null +++ b/src/vppinfra/bihash_48_8.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#undef BIHASH_TYPE + +#define BIHASH_TYPE _48_8 +#define BIHASH_KVP_PER_PAGE 4 + +#ifndef __included_bihash_48_8_h__ +#define __included_bihash_48_8_h__ + +#include +#include +#include +#include + +typedef struct +{ + u64 key[6]; + u64 value; +} clib_bihash_kv_48_8_t; + +static inline int +clib_bihash_is_free_48_8 (const clib_bihash_kv_48_8_t * v) +{ + /* Free values are memset to 0xff, check a bit... */ + if (v->key[0] == ~0ULL && v->value == ~0ULL) + return 1; + return 0; +} + +#if __SSE4_2__ +#ifndef __defined_crc_u32__ +#define __defined_crc_u32__ +static inline u32 +crc_u32 (u32 data, u32 value) +{ + __asm__ volatile ("crc32l %[data], %[value];":[value] "+r" (value):[data] + "rm" (data)); + return value; +} +#endif /* __defined_crc_u32__ */ + +static inline u64 +clib_bihash_hash_48_8 (const clib_bihash_kv_48_8_t * v) +{ + const u32 *dp = (const u32 *) &v->key[0]; + u32 value = 0; + + value = crc_u32 (dp[0], value); + value = crc_u32 (dp[1], value); + value = crc_u32 (dp[2], value); + value = crc_u32 (dp[3], value); + value = crc_u32 (dp[4], value); + value = crc_u32 (dp[5], value); + value = crc_u32 (dp[6], value); + value = crc_u32 (dp[7], value); + value = crc_u32 (dp[8], value); + value = crc_u32 (dp[9], value); + value = crc_u32 (dp[10], value); + value = crc_u32 (dp[11], value); + + return value; +} +#else +static inline u64 +clib_bihash_hash_48_8 (const clib_bihash_kv_48_8_t * v) +{ + u64 tmp = v->key[0] ^ v->key[1] ^ v->key[2] ^ v->key[3] ^ v->key[4] + ^ v->key[5]; + return clib_xxhash (tmp); +} +#endif + +static inline u8 * +format_bihash_kvp_48_8 (u8 * s, va_list * args) +{ + clib_bihash_kv_48_8_t *v = va_arg (*args, clib_bihash_kv_48_8_t *); + + s = format (s, "key %llu %llu %llu %llu %llu %llu value %llu", v->key[0], + v->key[1], v->key[2], v->key[3], v->key[4], v->key[5], + v->value); + return s; +} + +static inline int +clib_bihash_key_compare_48_8 (const u64 * a, const u64 * b) +{ + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2]) | (a[3] ^ b[3]) + | (a[4] ^ b[4]) | (a[5] ^ b[5])) == 0; +} + +#undef __included_bihash_template_h__ +#include + +#endif /* __included_bihash_48_8_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vppinfra/tw_timer_16t_1w_2048sl.c b/src/vppinfra/tw_timer_16t_1w_2048sl.c new file mode 100644 index 00000000..3f342045 --- /dev/null +++ b/src/vppinfra/tw_timer_16t_1w_2048sl.c @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "tw_timer_16t_1w_2048sl.h" +#include "tw_timer_template.c" + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vppinfra/tw_timer_16t_1w_2048sl.h b/src/vppinfra/tw_timer_16t_1w_2048sl.h new file mode 100644 index 00000000..685ac31e --- /dev/null +++ b/src/vppinfra/tw_timer_16t_1w_2048sl.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __included_tw_timer_16t_2w_512sl_h__ +#define __included_tw_timer_16t_2w_512sl_h__ + +/* ... So that a client app can create multiple wheel geometries */ +#undef TW_TIMER_WHEELS +#undef TW_SLOTS_PER_RING +#undef TW_RING_SHIFT +#undef TW_RING_MASK +#undef TW_TIMERS_PER_OBJECT +#undef LOG2_TW_TIMERS_PER_OBJECT +#undef TW_SUFFIX + +#define TW_TIMER_WHEELS 1 +#define TW_SLOTS_PER_RING 2048 +#define TW_RING_SHIFT 11 +#define TW_RING_MASK (TW_SLOTS_PER_RING -1) +#define TW_TIMERS_PER_OBJECT 16 +#define LOG2_TW_TIMERS_PER_OBJECT 4 +#define TW_SUFFIX _16t_1w_2048sl + +#include + +#endif /* __included_tw_timer_16t_2w_512sl_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ -- cgit 1.2.3-korg From e04c29942af6a130591059679531c9ffa3d7237a Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Wed, 1 Mar 2017 08:17:34 -0800 Subject: Cleanup URI code and TCP bugfixing - Add CLI/API to enable session layer, by default it's disabled - Improve rcv wnd computation - Improvements to tx path - URI code cleanup - Builtin test tcp server - Improve src port allocation Change-Id: I2ace498e76a0771d4c31a8075cc14fe33d7dfa38 Signed-off-by: Florin Coras --- src/scripts/vnet/uri/dummy_app.py | 65 +++ src/scripts/vnet/uri/tcp_server | 1 + src/svm/svm_fifo.c | 6 +- src/uri.am | 10 +- src/uri/uri_tcp_test.c | 792 +++++++++++++------------ src/uri/uri_udp_test.c | 442 +++++++++++++- src/uri/uri_udp_test2.c | 954 ------------------------------- src/uri/uritest.c | 484 ---------------- src/vnet.am | 1 + src/vnet/api_errno.h | 3 +- src/vnet/session/application.c | 27 +- src/vnet/session/application.h | 1 + src/vnet/session/application_interface.c | 6 +- src/vnet/session/node.c | 57 +- src/vnet/session/session.api | 22 + src/vnet/session/session.c | 86 ++- src/vnet/session/session.h | 23 +- src/vnet/session/session_api.c | 59 +- src/vnet/session/session_cli.c | 63 +- src/vnet/tcp/builtin_server.c | 135 +++++ src/vnet/tcp/tcp.c | 48 +- src/vnet/tcp/tcp.h | 4 +- src/vnet/tcp/tcp_input.c | 56 +- src/vnet/tcp/tcp_output.c | 90 ++- 24 files changed, 1460 insertions(+), 1975 deletions(-) create mode 100644 src/scripts/vnet/uri/dummy_app.py delete mode 100644 src/uri/uri_udp_test2.c delete mode 100644 src/uri/uritest.c create mode 100644 src/vnet/tcp/builtin_server.c (limited to 'src/svm') diff --git a/src/scripts/vnet/uri/dummy_app.py b/src/scripts/vnet/uri/dummy_app.py new file mode 100644 index 00000000..b80fbb28 --- /dev/null +++ b/src/scripts/vnet/uri/dummy_app.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python + +import socket +import sys +import bitstring + +# action can be reflect or drop +action = "drop" + +def handle_connection (connection, client_address): + print("Received connection from {}".format(repr(client_address))) + try: + while True: + data = connection.recv(4096) + if not data: + break; + if (action != "drop"): + connection.sendall(data) + finally: + connection.close() + +def run_server(ip, port): + print("Starting server {}:{}".format(repr(ip), repr(port))) + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + server_address = (ip, int(port)) + sock.bind(server_address) + sock.listen(1) + + while True: + connection, client_address = sock.accept() + handle_connection (connection, client_address) + +def prepare_data(): + buf = [] + for i in range (0, pow(2, 16)): + buf.append(i & 0xff) + return bytearray(buf) + +def run_client(ip, port): + print("Starting client {}:{}".format(repr(ip), repr(port))) + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + server_address = ("6.0.1.1", 1234) + sock.connect(server_address) + + data = prepare_data() + try: + sock.sendall(data) + finally: + sock.close() + +def run(mode, ip, port): + if (mode == "server"): + run_server (ip, port) + elif (mode == "client"): + run_client (ip, port) + else: + raise Exception("Unknown mode. Only client and server supported") + +if __name__ == "__main__": + if (len(sys.argv)) < 4: + raise Exception("Usage: ./dummy_app []") + if (len(sys.argv) == 5): + action = sys.argv[4] + + run (sys.argv[1], sys.argv[2], sys.argv[3]) diff --git a/src/scripts/vnet/uri/tcp_server b/src/scripts/vnet/uri/tcp_server index 7f5a86de..c29afc6f 100644 --- a/src/scripts/vnet/uri/tcp_server +++ b/src/scripts/vnet/uri/tcp_server @@ -2,3 +2,4 @@ create host-interface name vpp1 set int state host-vpp1 up set int ip address host-vpp1 6.0.1.1/24 trace add af-packet-input 10 +session enable diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index 11f90193..e3f534b1 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -508,9 +508,9 @@ svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes, { /* Number of bytes in first copy segment */ first_copy_bytes = - ((nitems - f->head) < total_copy_bytes) ? - (nitems - f->head) : total_copy_bytes; - clib_memcpy (copy_here, &f->data[f->head], first_copy_bytes); + ((nitems - f->head + offset) < total_copy_bytes) ? + (nitems - f->head + offset) : total_copy_bytes; + clib_memcpy (copy_here, &f->data[f->head + offset], first_copy_bytes); /* Number of bytes in second copy segment, if any */ second_copy_bytes = total_copy_bytes - first_copy_bytes; diff --git a/src/uri.am b/src/uri.am index 8cdd77c6..09b5b15b 100644 --- a/src/uri.am +++ b/src/uri.am @@ -11,12 +11,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -noinst_PROGRAMS += uri_udp_test2 uri_tcp_test +noinst_PROGRAMS += uri_udp_test uri_tcp_test -uri_udp_test2_SOURCES = uri/uri_udp_test2.c -uri_udp_test2_LDADD = libvlibmemoryclient.la libvlibapi.la libsvm.la \ - libvppinfra.la -lpthread -lm -lrt +uri_udp_test_SOURCES = uri/uri_udp_test.c +uri_udp_test_LDADD = libvlibmemoryclient.la libvlibapi.la libsvm.la \ + libvppinfra.la -lpthread -lm -lrt uri_tcp_test_SOURCES = uri/uri_tcp_test.c uri_tcp_test_LDADD = libvlibmemoryclient.la libvlibapi.la libsvm.la \ - libvppinfra.la -lpthread -lm -lrt + libvppinfra.la -lpthread -lm -lrt diff --git a/src/uri/uri_tcp_test.c b/src/uri/uri_tcp_test.c index ed5a37d8..6c9cf1db 100644 --- a/src/uri/uri_tcp_test.c +++ b/src/uri/uri_tcp_test.c @@ -20,16 +20,15 @@ #include #include #include +#include -#include "../vnet/session/application_interface.h" - -#define vl_typedefs /* define message structures */ +#define vl_typedefs /* define message structures */ #include #undef vl_typedefs /* declare message handlers for each api */ -#define vl_endianfun /* define message structures */ +#define vl_endianfun /* define message structures */ #include #undef vl_endianfun @@ -45,8 +44,8 @@ vlib_main_t **vlib_mains; typedef struct { - svm_fifo_t * server_rx_fifo; - svm_fifo_t * server_tx_fifo; + svm_fifo_t *server_rx_fifo; + svm_fifo_t *server_tx_fifo; u32 vpp_session_index; u32 vpp_session_thread; @@ -69,19 +68,19 @@ typedef struct u32 my_client_index; /* The URI we're playing with */ - u8 * uri; + u8 *uri; /* Session pool */ - session_t * sessions; + session_t *sessions; /* Hash table for disconnect processing */ - uword * session_index_by_vpp_handles; + uword *session_index_by_vpp_handles; /* intermediate rx buffer */ - u8 * rx_buf; + u8 *rx_buf; /* URI for slave's connect */ - u8 * connect_uri; + u8 *connect_uri; u32 connected_session_index; @@ -91,10 +90,10 @@ typedef struct int drop_packets; /* Our event queue */ - unix_shared_memory_queue_t * our_event_queue; + unix_shared_memory_queue_t *our_event_queue; /* $$$ single thread only for the moment */ - unix_shared_memory_queue_t * vpp_event_queue; + unix_shared_memory_queue_t *vpp_event_queue; pid_t my_pid; @@ -111,12 +110,15 @@ typedef struct u32 configured_segment_size; /* VNET_API_ERROR_FOO -> "Foo" hash table */ - uword * error_string_by_error_number; - - /* convenience */ - svm_fifo_segment_main_t * segment_main; + uword *error_string_by_error_number; u8 *connect_test_data; + pthread_t client_rx_thread_handle; + u32 client_bytes_received; + u8 test_return_packets; + + /* convenience */ + svm_fifo_segment_main_t *segment_main; } uri_tcp_test_main_t; uri_tcp_test_main_t uri_tcp_test_main; @@ -141,7 +143,7 @@ wait_for_state_change (uri_tcp_test_main_t * utm, connection_state_t state) while (clib_time_now (&utm->clib_time) < timeout) { if (utm->state == state) - return 0; + return 0; if (utm->state == STATE_FAILED) return -1; } @@ -209,7 +211,7 @@ connect_to_vpp (char *name) } static void -vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t *mp) +vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t * mp) { svm_fifo_segment_create_args_t _a, *a = &_a; int rv; @@ -221,24 +223,24 @@ vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t *mp) if (rv) { clib_warning ("svm_fifo_segment_attach ('%s') failed", - mp->segment_name); + mp->segment_name); return; } clib_warning ("Mapped new segment '%s' size %d", mp->segment_name, - mp->segment_size); + mp->segment_size); } static void vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) { uri_tcp_test_main_t *utm = &uri_tcp_test_main; - session_t * session; - vl_api_disconnect_session_reply_t * rmp; - uword * p; + session_t *session; + vl_api_disconnect_session_reply_t *rmp; + uword *p; int rv = 0; u64 key; - key = (((u64)mp->session_thread_index) << 32) | (u64)mp->session_index; + key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; p = hash_get (utm->session_index_by_vpp_handles, key); @@ -254,6 +256,8 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) rv = -11; } + utm->time_to_stop = 1; + rmp = vl_msg_api_alloc (sizeof (*rmp)); memset (rmp, 0, sizeof (*rmp)); @@ -261,32 +265,32 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) rmp->retval = rv; rmp->session_index = mp->session_index; rmp->session_thread_index = mp->session_thread_index; - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&rmp); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } static void vl_api_reset_session_t_handler (vl_api_reset_session_t * mp) { uri_tcp_test_main_t *utm = &uri_tcp_test_main; - session_t * session; - vl_api_reset_session_reply_t * rmp; - uword * p; + session_t *session; + vl_api_reset_session_reply_t *rmp; + uword *p; int rv = 0; u64 key; - key = (((u64)mp->session_thread_index) << 32) | (u64)mp->session_index; + key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - p = hash_get(utm->session_index_by_vpp_handles, key); + p = hash_get (utm->session_index_by_vpp_handles, key); if (p) { - session = pool_elt_at_index(utm->sessions, p[0]); - hash_unset(utm->session_index_by_vpp_handles, key); - pool_put(utm->sessions, session); + session = pool_elt_at_index (utm->sessions, p[0]); + hash_unset (utm->session_index_by_vpp_handles, key); + pool_put (utm->sessions, session); } else { - clib_warning("couldn't find session key %llx", key); + clib_warning ("couldn't find session key %llx", key); rv = -11; } @@ -296,301 +300,95 @@ vl_api_reset_session_t_handler (vl_api_reset_session_t * mp) rmp->retval = rv; rmp->session_index = mp->session_index; rmp->session_thread_index = mp->session_thread_index; - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&rmp); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } void -handle_fifo_event_connect_rx (uri_tcp_test_main_t *utm, session_fifo_event_t * e) +client_handle_fifo_event_rx (uri_tcp_test_main_t * utm, + session_fifo_event_t * e) { - svm_fifo_t * rx_fifo; - int n_read, bytes; + svm_fifo_t *rx_fifo; + int n_read, bytes, i; rx_fifo = e->fifo; bytes = e->enqueue_length; do { - n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len(utm->rx_buf), - utm->rx_buf); + n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len (utm->rx_buf), + utm->rx_buf); if (n_read > 0) - bytes -= n_read; + { + bytes -= n_read; + for (i = 0; i < n_read; i++) + { + if (utm->rx_buf[i] != ((utm->client_bytes_received + i) & 0xff)) + { + clib_warning ("error at byte %lld, 0x%x not 0x%x", + utm->client_bytes_received + i, + utm->rx_buf[i], + ((utm->client_bytes_received + i) & 0xff)); + } + } + utm->client_bytes_received += n_read; + } + } while (n_read < 0 || bytes > 0); - - // bytes_to_read = svm_fifo_max_dequeue (rx_fifo); - // - // bytes_to_read = vec_len(utm->rx_buf) > bytes_to_read ? - // bytes_to_read : vec_len(utm->rx_buf); - // - // buffer_offset = 0; - // while (bytes_to_read > 0) - // { - // rv = svm_fifo_dequeue_nowait2 (rx_fifo, mypid, - // bytes_to_read, - // utm->rx_buf + buffer_offset); - // if (rv > 0) - // { - // bytes_to_read -= rv; - // buffer_offset += rv; - // bytes_received += rv; - // } - // } - - - // while (bytes_received < bytes_sent) - // { - // rv = svm_fifo_dequeue_nowait2 (rx_fifo, mypid, - // vec_len (utm->rx_buf), - // utm->rx_buf); - // if (rv > 0) - // { - //#if CLIB_DEBUG > 0 - // int j; - // for (j = 0; j < rv; j++) - // { - // if (utm->rx_buf[j] != ((bytes_received + j) & 0xff)) - // { - // clib_warning ("error at byte %lld, 0x%x not 0x%x", - // bytes_received + j, - // utm->rx_buf[j], - // ((bytes_received + j )&0xff)); - // } - // } - //#endif - // bytes_received += (u64) rv; - // } - // } } void -handle_connect_event_queue (uri_tcp_test_main_t * utm) +client_handle_event_queue (uri_tcp_test_main_t * utm) { session_fifo_event_t _e, *e = &_e;; - unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e, 0 /* nowait */); + unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e, + 0 /* nowait */ ); switch (e->event_type) { case FIFO_EVENT_SERVER_RX: - handle_fifo_event_connect_rx (utm, e); + client_handle_fifo_event_rx (utm, e); break; case FIFO_EVENT_SERVER_EXIT: return; default: - clib_warning("unknown event type %d", e->event_type); + clib_warning ("unknown event type %d", e->event_type); break; } } -void -uri_tcp_connect_send (uri_tcp_test_main_t *utm) -{ - u8 *test_data = utm->connect_test_data; - u64 bytes_sent = 0; - int rv; - int mypid = getpid(); - session_t * session; - svm_fifo_t *tx_fifo; - int buffer_offset, bytes_to_send = 0; - session_fifo_event_t evt; - static int serial_number = 0; - int i; - u32 max_chunk = 64 << 10, write; - - session = pool_elt_at_index (utm->sessions, utm->connected_session_index); - tx_fifo = session->server_tx_fifo; - - vec_validate (utm->rx_buf, vec_len (test_data) - 1); - - for (i = 0; i < 10; i++) - { - bytes_to_send = vec_len (test_data); - buffer_offset = 0; - while (bytes_to_send > 0) - { - write = bytes_to_send > max_chunk ? max_chunk : bytes_to_send; - rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, write, - test_data + buffer_offset); - - if (rv > 0) - { - bytes_to_send -= rv; - buffer_offset += rv; - bytes_sent += rv; - - /* Fabricate TX event, send to vpp */ - evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; - /* $$$$ for event logging */ - evt.enqueue_length = rv; - evt.event_id = serial_number++; - - unix_shared_memory_queue_add (utm->vpp_event_queue, (u8 *) &evt, - 0 /* do wait for mutex */); - } - } - } -} - -static void -uri_tcp_client_test (uri_tcp_test_main_t * utm) -{ - vl_api_connect_uri_t * cmp; - vl_api_disconnect_session_t *dmp; - session_t *connected_session; - int i; - - cmp = vl_msg_api_alloc (sizeof (*cmp)); - memset (cmp, 0, sizeof (*cmp)); - - cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); - cmp->client_index = utm->my_client_index; - cmp->context = ntohl(0xfeedface); - memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&cmp); - - if (wait_for_state_change (utm, STATE_READY)) - { - return; - } - - /* Init test data */ - vec_validate (utm->connect_test_data, 64 * 1024 - 1); - for (i = 0; i < vec_len (utm->connect_test_data); i++) - utm->connect_test_data[i] = i & 0xff; - - /* Start reader thread */ - /* handle_connect_event_queue (utm); */ - - /* Start send */ - uri_tcp_connect_send (utm); - - /* Disconnect */ - connected_session = pool_elt_at_index(utm->sessions, - utm->connected_session_index); - dmp = vl_msg_api_alloc (sizeof (*dmp)); - memset (dmp, 0, sizeof (*dmp)); - dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); - dmp->client_index = utm->my_client_index; - dmp->session_index = connected_session->vpp_session_index; - dmp->session_thread_index = connected_session->vpp_session_thread; - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&dmp); -} - -void -handle_fifo_event_server_rx (uri_tcp_test_main_t *utm, session_fifo_event_t * e) -{ - svm_fifo_t * rx_fifo, * tx_fifo; - int n_read; - - session_fifo_event_t evt; - unix_shared_memory_queue_t *q; - int rv, bytes; - - rx_fifo = e->fifo; - tx_fifo = utm->sessions[rx_fifo->client_session_index].server_tx_fifo; - - bytes = e->enqueue_length; - do - { - n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len(utm->rx_buf), - utm->rx_buf); - - /* Reflect if a non-drop session */ - if (!utm->drop_packets && n_read > 0) - { - do - { - rv = svm_fifo_enqueue_nowait (tx_fifo, 0, n_read, utm->rx_buf); - } - while (rv == -2); - - /* Fabricate TX event, send to vpp */ - evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; - /* $$$$ for event logging */ - evt.enqueue_length = n_read; - evt.event_id = e->event_id; - q = utm->vpp_event_queue; - unix_shared_memory_queue_add (q, (u8 *) &evt, 0 /* do wait for mutex */); - } - - if (n_read > 0) - bytes -= n_read; - } - while (n_read < 0 || bytes > 0); -} - -void -handle_event_queue (uri_tcp_test_main_t * utm) +static void * +client_rx_thread_fn (void *arg) { - session_fifo_event_t _e, *e = &_e;; + session_fifo_event_t _e, *e = &_e; + uri_tcp_test_main_t *utm = &uri_tcp_test_main; + utm->client_bytes_received = 0; while (1) { - unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *)e, - 0 /* nowait */); + unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e, + 0 /* nowait */ ); switch (e->event_type) - { - case FIFO_EVENT_SERVER_RX: - handle_fifo_event_server_rx (utm, e); - break; - - case FIFO_EVENT_SERVER_EXIT: - return; - - default: - clib_warning ("unknown event type %d", e->event_type); - break; - } - if (PREDICT_FALSE(utm->time_to_stop == 1)) - break; - if (PREDICT_FALSE(utm->time_to_print_stats == 1)) - { - utm->time_to_print_stats = 0; - fformat(stdout, "%d connections\n", pool_elts (utm->sessions)); - } + { + case FIFO_EVENT_SERVER_RX: + client_handle_fifo_event_rx (utm, e); + break; + + case FIFO_EVENT_SERVER_EXIT: + return 0; + default: + clib_warning ("unknown event type %d", e->event_type); + break; + } + + if (PREDICT_FALSE (utm->time_to_stop == 1)) + break; } + pthread_exit (0); } -static void -vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) -{ - uri_tcp_test_main_t *utm = &uri_tcp_test_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - int rv; - - if (mp->retval) - { - clib_warning("bind failed: %d", mp->retval); - return; - } - - if (mp->segment_name_length == 0) - { - clib_warning("segment_name_length zero"); - return; - } - - a->segment_name = (char *) mp->segment_name; - a->segment_size = mp->segment_size; - - ASSERT(mp->server_event_queue_address); - - /* Attach to the segment vpp created */ - rv = svm_fifo_segment_attach (a); - if (rv) - { - clib_warning("svm_fifo_segment_attach ('%s') failed", mp->segment_name); - return; - } - - utm->our_event_queue = - (unix_shared_memory_queue_t *) mp->server_event_queue_address; - - utm->state = STATE_READY; -} static void vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) @@ -601,6 +399,7 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) u32 session_index; svm_fifo_t *rx_fifo, *tx_fifo; int rv; + u64 key; if (mp->retval) { @@ -608,6 +407,7 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) utm->state = STATE_FAILED; return; } + /* * Attatch to segment */ @@ -622,14 +422,14 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) a->segment_name = (char *) mp->segment_name; a->segment_size = mp->segment_size; - ASSERT(mp->client_event_queue_address); + ASSERT (mp->client_event_queue_address); /* Attach to the segment vpp created */ rv = svm_fifo_segment_attach (a); if (rv) { clib_warning ("svm_fifo_segment_attach ('%s') failed", - mp->segment_name); + mp->segment_name); return; } @@ -650,9 +450,9 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) pool_get (utm->sessions, session); session_index = session - utm->sessions; - rx_fifo = (svm_fifo_t *)mp->server_rx_fifo; + rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; rx_fifo->client_session_index = session_index; - tx_fifo = (svm_fifo_t *)mp->server_tx_fifo; + tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; tx_fifo->client_session_index = session_index; session->server_rx_fifo = rx_fifo; @@ -662,54 +462,193 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) /* Save handle */ utm->connected_session_index = session_index; - utm->state = STATE_READY; + + /* Add it to lookup table */ + key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; + hash_set (utm->session_index_by_vpp_handles, key, session_index); + + /* Start RX thread */ + rv = pthread_create (&utm->client_rx_thread_handle, + NULL /*attr */ , client_rx_thread_fn, 0); + if (rv) + { + clib_warning ("pthread_create returned %d", rv); + rv = VNET_API_ERROR_SYSCALL_ERROR_1; + } } void -uri_tcp_bind (uri_tcp_test_main_t *utm) +client_send_data (uri_tcp_test_main_t * utm) { - vl_api_bind_uri_t * bmp; - u32 fifo_size = 3 << 20; - bmp = vl_msg_api_alloc (sizeof (*bmp)); - memset (bmp, 0, sizeof (*bmp)); + u8 *test_data = utm->connect_test_data; + u64 bytes_sent = 0; + int rv; + int mypid = getpid (); + session_t *session; + svm_fifo_t *tx_fifo; + int buffer_offset, bytes_to_send = 0; + session_fifo_event_t evt; + static int serial_number = 0; + int i; + u32 max_chunk = 64 << 10, write; - bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); - bmp->client_index = utm->my_client_index; - bmp->context = ntohl(0xfeedface); - bmp->initial_segment_size = 256<<20; /* size of initial segment */ - bmp->options[SESSION_OPTIONS_FLAGS] = - SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; - bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; - bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; - bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128<<20; - memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&bmp); + session = pool_elt_at_index (utm->sessions, utm->connected_session_index); + tx_fifo = session->server_tx_fifo; + + vec_validate (utm->rx_buf, vec_len (test_data) - 1); + + for (i = 0; i < 1; i++) + { + bytes_to_send = vec_len (test_data); + buffer_offset = 0; + while (bytes_to_send > 0) + { + write = bytes_to_send > max_chunk ? max_chunk : bytes_to_send; + rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, write, + test_data + buffer_offset); + + if (rv > 0) + { + bytes_to_send -= rv; + buffer_offset += rv; + bytes_sent += rv; + + /* Fabricate TX event, send to vpp */ + evt.fifo = tx_fifo; + evt.event_type = FIFO_EVENT_SERVER_TX; + /* $$$$ for event logging */ + evt.enqueue_length = rv; + evt.event_id = serial_number++; + + unix_shared_memory_queue_add (utm->vpp_event_queue, + (u8 *) & evt, + 0 /* do wait for mutex */ ); + } + } + } + + if (utm->test_return_packets) + { + f64 timeout = clib_time_now (&utm->clib_time) + 2; + + /* Wait for the outstanding packets */ + while (utm->client_bytes_received < vec_len (test_data)) + { + if (clib_time_now (&utm->clib_time) > timeout) + { + clib_warning ("timed out waiting for the missing packets"); + break; + } + } + + utm->time_to_stop = 1; + } +} + +void +client_connect (uri_tcp_test_main_t * utm) +{ + vl_api_connect_uri_t *cmp; + cmp = vl_msg_api_alloc (sizeof (*cmp)); + memset (cmp, 0, sizeof (*cmp)); + + cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); + cmp->client_index = utm->my_client_index; + cmp->context = ntohl (0xfeedface); + memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp); +} + +void +client_disconnect (uri_tcp_test_main_t * utm) +{ + session_t *connected_session; + vl_api_disconnect_session_t *dmp; + connected_session = pool_elt_at_index (utm->sessions, + utm->connected_session_index); + dmp = vl_msg_api_alloc (sizeof (*dmp)); + memset (dmp, 0, sizeof (*dmp)); + dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); + dmp->client_index = utm->my_client_index; + dmp->session_index = connected_session->vpp_session_index; + dmp->session_thread_index = connected_session->vpp_session_thread; + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & dmp); +} + +static void +client_test (uri_tcp_test_main_t * utm) +{ + int i; + + client_connect (utm); + + if (wait_for_state_change (utm, STATE_READY)) + { + return; + } + + /* Init test data */ + vec_validate (utm->connect_test_data, 64 * 1024 - 1); + for (i = 0; i < vec_len (utm->connect_test_data); i++) + utm->connect_test_data[i] = i & 0xff; + + /* Start send */ + client_send_data (utm); + + /* Disconnect */ + client_disconnect (utm); } static void -vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t *mp) +vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) { uri_tcp_test_main_t *utm = &uri_tcp_test_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + int rv; - if (mp->retval != 0) - clib_warning ("returned %d", ntohl(mp->retval)); + if (mp->retval) + { + clib_warning ("bind failed: %d", mp->retval); + utm->state = STATE_FAILED; + return; + } - utm->state = STATE_START; + if (mp->segment_name_length == 0) + { + clib_warning ("segment_name_length zero"); + return; + } + + a->segment_name = (char *) mp->segment_name; + a->segment_size = mp->segment_size; + + ASSERT (mp->server_event_queue_address); + + /* Attach to the segment vpp created */ + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("svm_fifo_segment_attach ('%s') failed", + mp->segment_name); + return; + } + + utm->our_event_queue = + (unix_shared_memory_queue_t *) mp->server_event_queue_address; + + utm->state = STATE_READY; } -void -uri_tcp_unbind (uri_tcp_test_main_t *utm) +static void +vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp) { - vl_api_unbind_uri_t * ump; + uri_tcp_test_main_t *utm = &uri_tcp_test_main; - ump = vl_msg_api_alloc (sizeof (*ump)); - memset (ump, 0, sizeof (*ump)); + if (mp->retval != 0) + clib_warning ("returned %d", ntohl (mp->retval)); - ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); - ump->client_index = utm->my_client_index; - memcpy (ump->uri, utm->uri, vec_len (utm->uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&ump); + utm->state = STATE_START; } static void @@ -717,14 +656,14 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) { uri_tcp_test_main_t *utm = &uri_tcp_test_main; vl_api_accept_session_reply_t *rmp; - svm_fifo_t * rx_fifo, * tx_fifo; - session_t * session; + svm_fifo_t *rx_fifo, *tx_fifo; + session_t *session; static f64 start_time; u64 key; u32 session_index; if (start_time == 0.0) - start_time = clib_time_now (&utm->clib_time); + start_time = clib_time_now (&utm->clib_time); utm->vpp_event_queue = (unix_shared_memory_queue_t *) mp->vpp_event_queue_address; @@ -733,45 +672,159 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) pool_get (utm->sessions, session); session_index = session - utm->sessions; - rx_fifo = (svm_fifo_t *)mp->server_rx_fifo; + rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; rx_fifo->client_session_index = session_index; - tx_fifo = (svm_fifo_t *)mp->server_tx_fifo; + tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; tx_fifo->client_session_index = session_index; session->server_rx_fifo = rx_fifo; session->server_tx_fifo = tx_fifo; /* Add it to lookup table */ - key = (((u64)mp->session_thread_index) << 32) | (u64)mp->session_index; + key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; hash_set (utm->session_index_by_vpp_handles, key, session_index); utm->state = STATE_READY; /* Stats printing */ - if (pool_elts (utm->sessions) && (pool_elts(utm->sessions) % 20000) == 0) + if (pool_elts (utm->sessions) && (pool_elts (utm->sessions) % 20000) == 0) { f64 now = clib_time_now (&utm->clib_time); fformat (stdout, "%d active sessions in %.2f seconds, %.2f/sec...\n", - pool_elts(utm->sessions), now - start_time, - (f64)pool_elts(utm->sessions) / (now - start_time)); + pool_elts (utm->sessions), now - start_time, + (f64) pool_elts (utm->sessions) / (now - start_time)); } - /* Send accept reply to vpp */ + /* + * Send accept reply to vpp + */ rmp = vl_msg_api_alloc (sizeof (*rmp)); memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY); rmp->session_type = mp->session_type; rmp->session_index = mp->session_index; rmp->session_thread_index = mp->session_thread_index; - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&rmp); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } void -uri_tcp_server_test (uri_tcp_test_main_t * utm) +server_handle_fifo_event_rx (uri_tcp_test_main_t * utm, + session_fifo_event_t * e) { + svm_fifo_t *rx_fifo, *tx_fifo; + int n_read; + + session_fifo_event_t evt; + unix_shared_memory_queue_t *q; + int rv, bytes; + + rx_fifo = e->fifo; + tx_fifo = utm->sessions[rx_fifo->client_session_index].server_tx_fifo; + + bytes = e->enqueue_length; + do + { + n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len (utm->rx_buf), + utm->rx_buf); + + /* Reflect if a non-drop session */ + if (!utm->drop_packets && n_read > 0) + { + do + { + rv = svm_fifo_enqueue_nowait (tx_fifo, 0, n_read, utm->rx_buf); + } + while (rv == -2); + + /* Fabricate TX event, send to vpp */ + evt.fifo = tx_fifo; + evt.event_type = FIFO_EVENT_SERVER_TX; + /* $$$$ for event logging */ + evt.enqueue_length = n_read; + evt.event_id = e->event_id; + q = utm->vpp_event_queue; + unix_shared_memory_queue_add (q, (u8 *) & evt, + 0 /* do wait for mutex */ ); + } + + if (n_read > 0) + bytes -= n_read; + } + while (n_read < 0 || bytes > 0); +} + +void +server_handle_event_queue (uri_tcp_test_main_t * utm) +{ + session_fifo_event_t _e, *e = &_e;; + while (1) + { + unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e, + 0 /* nowait */ ); + switch (e->event_type) + { + case FIFO_EVENT_SERVER_RX: + server_handle_fifo_event_rx (utm, e); + break; + + case FIFO_EVENT_SERVER_EXIT: + return; + + default: + clib_warning ("unknown event type %d", e->event_type); + break; + } + if (PREDICT_FALSE (utm->time_to_stop == 1)) + break; + if (PREDICT_FALSE (utm->time_to_print_stats == 1)) + { + utm->time_to_print_stats = 0; + fformat (stdout, "%d connections\n", pool_elts (utm->sessions)); + } + } +} + +void +server_bind (uri_tcp_test_main_t * utm) +{ + vl_api_bind_uri_t *bmp; + u32 fifo_size = 3 << 20; + bmp = vl_msg_api_alloc (sizeof (*bmp)); + memset (bmp, 0, sizeof (*bmp)); + + bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); + bmp->client_index = utm->my_client_index; + bmp->context = ntohl (0xfeedface); + bmp->initial_segment_size = 256 << 20; /* size of initial segment */ + bmp->options[SESSION_OPTIONS_FLAGS] = + SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; + bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; + bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; + memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); +} + +void +server_unbind (uri_tcp_test_main_t * utm) +{ + vl_api_unbind_uri_t *ump; + + ump = vl_msg_api_alloc (sizeof (*ump)); + memset (ump, 0, sizeof (*ump)); + + ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); + ump->client_index = utm->my_client_index; + memcpy (ump->uri, utm->uri, vec_len (utm->uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump); +} + +void +server_test (uri_tcp_test_main_t * utm) +{ /* Bind to uri */ - uri_tcp_bind (utm); + server_bind (utm); if (wait_for_state_change (utm, STATE_READY)) { @@ -780,10 +833,10 @@ uri_tcp_server_test (uri_tcp_test_main_t * utm) } /* Enter handle event loop */ - handle_event_queue (utm); + server_handle_event_queue (utm); /* Cleanup */ - uri_tcp_unbind (utm); + server_unbind (utm); if (wait_for_state_change (utm, STATE_START)) { @@ -824,12 +877,12 @@ main (int argc, char **argv) unformat_input_t _argv, *a = &_argv; u8 *chroot_prefix; u8 *heap; - u8 * bind_name = (u8 *) "tcp://0.0.0.0/1234"; + u8 *bind_name = (u8 *) "tcp://0.0.0.0/1234"; u32 tmp; mheap_t *h; - session_t * session; + session_t *session; int i; - int i_am_master = 1, drop_packets = 0; + int i_am_master = 1, drop_packets = 0, test_return_packets = 0; clib_mem_init (0, 256 << 20); @@ -841,53 +894,54 @@ main (int argc, char **argv) vec_validate (utm->rx_buf, 65536); - utm->session_index_by_vpp_handles = - hash_create (0, sizeof(uword)); + utm->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); - utm->my_pid = getpid(); - utm->configured_segment_size = 1<<20; + utm->my_pid = getpid (); + utm->configured_segment_size = 1 << 20; clib_time_init (&utm->clib_time); init_error_string_table (utm); - svm_fifo_segment_init(0x200000000ULL, 20); + svm_fifo_segment_init (0x200000000ULL, 20); unformat_init_command_line (a, argv); while (unformat_check_input (a) != UNFORMAT_END_OF_INPUT) { if (unformat (a, "chroot prefix %s", &chroot_prefix)) - { - vl_set_memory_root_path ((char *) chroot_prefix); - } + { + vl_set_memory_root_path ((char *) chroot_prefix); + } else if (unformat (a, "uri %s", &bind_name)) - ; + ; else if (unformat (a, "segment-size %dM", &tmp)) - utm->configured_segment_size = tmp<<20; + utm->configured_segment_size = tmp << 20; else if (unformat (a, "segment-size %dG", &tmp)) - utm->configured_segment_size = tmp<<30; + utm->configured_segment_size = tmp << 30; else if (unformat (a, "master")) - i_am_master = 1; + i_am_master = 1; else if (unformat (a, "slave")) - i_am_master = 0; + i_am_master = 0; else if (unformat (a, "drop")) - drop_packets = 1; + drop_packets = 1; + else if (unformat (a, "test")) + test_return_packets = 1; else - { - fformat (stderr, "%s: usage [master|slave]\n"); - exit (1); - } + { + fformat (stderr, "%s: usage [master|slave]\n"); + exit (1); + } } utm->uri = format (0, "%s%c", bind_name, 0); utm->i_am_master = i_am_master; utm->segment_main = &svm_fifo_segment_main; utm->drop_packets = drop_packets; - + utm->test_return_packets = test_return_packets; utm->connect_uri = format (0, "tcp://6.0.1.2/1234%c", 0); - setup_signal_handlers(); + setup_signal_handlers (); uri_api_hookup (utm); - if (connect_to_vpp (i_am_master? "uri_tcp_server":"uri_tcp_client") < 0) + if (connect_to_vpp (i_am_master ? "uri_tcp_server" : "uri_tcp_client") < 0) { svm_region_exit (); fformat (stderr, "Couldn't connect to vpe, exiting...\n"); @@ -896,7 +950,7 @@ main (int argc, char **argv) if (i_am_master == 0) { - uri_tcp_client_test (utm); + client_test (utm); exit (0); } @@ -909,8 +963,16 @@ main (int argc, char **argv) for (i = 0; i < 200000; i++) pool_put_index (utm->sessions, i); - uri_tcp_server_test (utm); + server_test (utm); vl_client_disconnect_from_vlib (); exit (0); } + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/uri/uri_udp_test.c b/src/uri/uri_udp_test.c index 6f5284c9..54625d64 100644 --- a/src/uri/uri_udp_test.c +++ b/src/uri/uri_udp_test.c @@ -26,25 +26,25 @@ #include #include #include -#include -#include - -#include +#include +#include +#include +#include #define vl_typedefs /* define message structures */ -#include +#include #undef vl_typedefs /* declare message handlers for each api */ #define vl_endianfun /* define message structures */ -#include +#include #undef vl_endianfun /* instantiate all the print functions we know about */ #define vl_print(handle, ...) #define vl_printfun -#include +#include #undef vl_printfun /* Satisfy external references when not linking with -lvlib */ @@ -87,12 +87,28 @@ typedef struct /* intermediate rx buffer */ u8 *rx_buf; + /* URI for connect */ + u8 *connect_uri; + + int i_am_master; + /* Our event queue */ unix_shared_memory_queue_t *our_event_queue; /* $$$ single thread only for the moment */ unix_shared_memory_queue_t *vpp_event_queue; + /* $$$$ hack: cut-through session index */ + volatile u32 cut_through_session_index; + + /* unique segment name counter */ + u32 unique_segment_index; + + pid_t my_pid; + + /* pthread handle */ + pthread_t cut_through_thread_handle; + /* For deadman timers */ clib_time_t clib_time; @@ -102,14 +118,20 @@ typedef struct volatile int time_to_stop; volatile int time_to_print_stats; + u32 configured_segment_size; + /* VNET_API_ERROR_FOO -> "Foo" hash table */ uword *error_string_by_error_number; + + /* convenience */ + svm_fifo_segment_main_t *segment_main; + } uri_udp_test_main_t; #if CLIB_DEBUG > 0 -#define NITER 1000 +#define NITER 10000 #else -#define NITER 1000000 +#define NITER 4000000 #endif uri_udp_test_main_t uri_udp_test_main; @@ -159,7 +181,13 @@ format_api_error (u8 * s, va_list * args) int wait_for_state_change (uri_udp_test_main_t * utm, connection_state_t state) { - f64 timeout = clib_time_now (&utm->clib_time) + 5.0; +#if CLIB_DEBUG > 0 +#define TIMEOUT 600.0 +#else +#define TIMEOUT 600.0 +#endif + + f64 timeout = clib_time_now (&utm->clib_time) + TIMEOUT; while (clib_time_now (&utm->clib_time) < timeout) { @@ -169,6 +197,183 @@ wait_for_state_change (uri_udp_test_main_t * utm, connection_state_t state) return -1; } +u64 server_bytes_received, server_bytes_sent; + +static void * +cut_through_thread_fn (void *arg) +{ + session_t *s; + svm_fifo_t *rx_fifo; + svm_fifo_t *tx_fifo; + u8 *my_copy_buffer = 0; + uri_udp_test_main_t *utm = &uri_udp_test_main; + i32 actual_transfer; + int rv; + u32 buffer_offset; + + while (utm->cut_through_session_index == ~0) + ; + + s = pool_elt_at_index (utm->sessions, utm->cut_through_session_index); + + rx_fifo = s->server_rx_fifo; + tx_fifo = s->server_tx_fifo; + + vec_validate (my_copy_buffer, 64 * 1024 - 1); + + while (true) + { + /* We read from the tx fifo and write to the rx fifo */ + do + { + actual_transfer = svm_fifo_dequeue_nowait (tx_fifo, 0, + vec_len (my_copy_buffer), + my_copy_buffer); + } + while (actual_transfer <= 0); + + server_bytes_received += actual_transfer; + + buffer_offset = 0; + while (actual_transfer > 0) + { + rv = svm_fifo_enqueue_nowait (rx_fifo, 0, actual_transfer, + my_copy_buffer + buffer_offset); + if (rv > 0) + { + actual_transfer -= rv; + buffer_offset += rv; + server_bytes_sent += rv; + } + + } + if (PREDICT_FALSE (utm->time_to_stop)) + break; + } + + pthread_exit (0); +} + +static void +uri_udp_slave_test (uri_udp_test_main_t * utm) +{ + vl_api_connect_uri_t *cmp; + int i; + u8 *test_data = 0; + u64 bytes_received = 0, bytes_sent = 0; + i32 bytes_to_read; + int rv; + int mypid = getpid (); + f64 before, after, delta, bytes_per_second; + session_t *session; + svm_fifo_t *rx_fifo, *tx_fifo; + int buffer_offset, bytes_to_send = 0; + + vec_validate (test_data, 64 * 1024 - 1); + for (i = 0; i < vec_len (test_data); i++) + test_data[i] = i & 0xff; + + cmp = vl_msg_api_alloc (sizeof (*cmp)); + memset (cmp, 0, sizeof (*cmp)); + + cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); + cmp->client_index = utm->my_client_index; + cmp->context = ntohl (0xfeedface); + memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp); + + if (wait_for_state_change (utm, STATE_READY)) + { + clib_warning ("timeout waiting for STATE_READY"); + return; + } + + session = pool_elt_at_index (utm->sessions, utm->cut_through_session_index); + rx_fifo = session->server_rx_fifo; + tx_fifo = session->server_tx_fifo; + + before = clib_time_now (&utm->clib_time); + + vec_validate (utm->rx_buf, vec_len (test_data) - 1); + + for (i = 0; i < NITER; i++) + { + bytes_to_send = vec_len (test_data); + buffer_offset = 0; + while (bytes_to_send > 0) + { + rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, + bytes_to_send, + test_data + buffer_offset); + + if (rv > 0) + { + bytes_to_send -= rv; + buffer_offset += rv; + bytes_sent += rv; + } + } + + bytes_to_read = svm_fifo_max_dequeue (rx_fifo); + + bytes_to_read = vec_len (utm->rx_buf) > bytes_to_read ? + bytes_to_read : vec_len (utm->rx_buf); + + buffer_offset = 0; + while (bytes_to_read > 0) + { + rv = svm_fifo_dequeue_nowait (rx_fifo, mypid, + bytes_to_read, + utm->rx_buf + buffer_offset); + if (rv > 0) + { + bytes_to_read -= rv; + buffer_offset += rv; + bytes_received += rv; + } + } + } + while (bytes_received < bytes_sent) + { + rv = svm_fifo_dequeue_nowait (rx_fifo, mypid, + vec_len (utm->rx_buf), utm->rx_buf); + if (rv > 0) + { +#if CLIB_DEBUG > 0 + int j; + for (j = 0; j < rv; j++) + { + if (utm->rx_buf[j] != ((bytes_received + j) & 0xff)) + { + clib_warning ("error at byte %lld, 0x%x not 0x%x", + bytes_received + j, + utm->rx_buf[j], + ((bytes_received + j) & 0xff)); + } + } +#endif + bytes_received += (u64) rv; + } + } + + after = clib_time_now (&utm->clib_time); + delta = after - before; + bytes_per_second = 0.0; + + if (delta > 0.0) + bytes_per_second = (f64) bytes_received / delta; + + fformat (stdout, + "Done: %lld recv bytes in %.2f seconds, %.2f bytes/sec...\n\n", + bytes_received, delta, bytes_per_second); + fformat (stdout, + "Done: %lld sent bytes in %.2f seconds, %.2f bytes/sec...\n\n", + bytes_sent, delta, bytes_per_second); + fformat (stdout, + "client -> server -> client round trip: %.2f Gbit/sec \n\n", + (bytes_per_second * 8.0) / 1e9); +} + static void vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) { @@ -183,12 +388,16 @@ vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) } a->segment_name = (char *) mp->segment_name; + a->segment_size = mp->segment_size; + + ASSERT (mp->server_event_queue_address); /* Attach to the segment vpp created */ rv = svm_fifo_segment_attach (a); if (rv) { - clib_warning ("sm_fifo_segment_create ('%s') failed", mp->segment_name); + clib_warning ("svm_fifo_segment_attach ('%s') failed", + mp->segment_name); return; } @@ -198,6 +407,101 @@ vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) utm->state = STATE_READY; } +static void +vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t * mp) +{ + svm_fifo_segment_create_args_t _a, *a = &_a; + int rv; + + a->segment_name = (char *) mp->segment_name; + a->segment_size = mp->segment_size; + /* Attach to the segment vpp created */ + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("svm_fifo_segment_attach ('%s') failed", + mp->segment_name); + return; + } + clib_warning ("Mapped new segment '%s' size %d", mp->segment_name, + mp->segment_size); +} + +static void +vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) +{ + u32 segment_index; + uri_udp_test_main_t *utm = &uri_udp_test_main; + svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + svm_fifo_segment_private_t *seg; + unix_shared_memory_queue_t *client_q; + vl_api_connect_uri_reply_t *rmp; + session_t *session; + int rv = 0; + + /* Create the segment */ + a->segment_name = (char *) format (0, "%d:segment%d%c", utm->my_pid, + utm->unique_segment_index++, 0); + a->segment_size = utm->configured_segment_size; + + rv = svm_fifo_segment_create (a); + if (rv) + { + clib_warning ("sm_fifo_segment_create ('%s') failed", a->segment_name); + rv = VNET_API_ERROR_URI_FIFO_CREATE_FAILED; + goto send_reply; + } + + vec_add2 (utm->seg, seg, 1); + + segment_index = vec_len (sm->segments) - 1; + + memcpy (seg, sm->segments + segment_index, sizeof (utm->seg[0])); + + pool_get (utm->sessions, session); + + /* + * By construction the master's idea of the rx fifo ends up in + * fsh->fifos[0], and the master's idea of the tx fifo ends up in + * fsh->fifos[1]. + */ + session->server_rx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, + 128 * 1024); + ASSERT (session->server_rx_fifo); + + session->server_tx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, + 128 * 1024); + ASSERT (session->server_tx_fifo); + + session->server_rx_fifo->server_session_index = session - utm->sessions; + session->server_tx_fifo->server_session_index = session - utm->sessions; + utm->cut_through_session_index = session - utm->sessions; + + rv = pthread_create (&utm->cut_through_thread_handle, + NULL /*attr */ , cut_through_thread_fn, 0); + if (rv) + { + clib_warning ("pthread_create returned %d", rv); + rv = VNET_API_ERROR_SYSCALL_ERROR_1; + } + +send_reply: + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + + rmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI_REPLY); + rmp->context = mp->context; + rmp->retval = ntohl (rv); + rmp->segment_name_length = vec_len (a->segment_name); + memcpy (rmp->segment_name, a->segment_name, vec_len (a->segment_name)); + + vec_free (a->segment_name); + + client_q = (unix_shared_memory_queue_t *) mp->client_queue_address; + vl_msg_api_send_shmem (client_q, (u8 *) & rmp); +} + static void vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp) { @@ -293,18 +597,79 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } +static void +vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) +{ + svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; + uri_udp_test_main_t *utm = &uri_udp_test_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + ssvm_shared_header_t *sh; + svm_fifo_segment_private_t *seg; + svm_fifo_segment_header_t *fsh; + session_t *session; + u32 segment_index; + int rv; + + ASSERT (utm->i_am_master == 0); + + if (mp->segment_name_length == 0) + { + clib_warning ("segment_name_length zero"); + return; + } + + memset (a, 0, sizeof (*a)); + + a->segment_name = (char *) mp->segment_name; + + sleep (1); + + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("sm_fifo_segment_create ('%v') failed", mp->segment_name); + return; + } + + segment_index = vec_len (sm->segments) - 1; + + vec_add2 (utm->seg, seg, 1); + + memcpy (seg, sm->segments + segment_index, sizeof (*seg)); + sh = seg->ssvm.sh; + fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; + + while (vec_len (fsh->fifos) < 2) + sleep (1); + + pool_get (utm->sessions, session); + utm->cut_through_session_index = session - utm->sessions; + + session->server_rx_fifo = (svm_fifo_t *) fsh->fifos[0]; + ASSERT (session->server_rx_fifo); + session->server_tx_fifo = (svm_fifo_t *) fsh->fifos[1]; + ASSERT (session->server_tx_fifo); + + /* security: could unlink /dev/shm/segment_name> here, maybe */ + + utm->state = STATE_READY; +} + #define foreach_uri_msg \ _(BIND_URI_REPLY, bind_uri_reply) \ +_(CONNECT_URI, connect_uri) \ +_(CONNECT_URI_REPLY, connect_uri_reply) \ _(UNBIND_URI_REPLY, unbind_uri_reply) \ _(ACCEPT_SESSION, accept_session) \ -_(DISCONNECT_SESSION, disconnect_session) +_(DISCONNECT_SESSION, disconnect_session) \ +_(MAP_ANOTHER_SEGMENT, map_another_segment) void uri_api_hookup (uri_udp_test_main_t * utm) { #define _(N,n) \ vl_msg_api_set_handlers(VL_API_##N, #n, \ - vl_api_##n##_t_handler, \ + vl_api_##n##_t_handler, \ vl_noop_handler, \ vl_api_##n##_t_endian, \ vl_api_##n##_t_print, \ @@ -349,7 +714,7 @@ init_error_string_table (uri_udp_test_main_t * utm) } void -handle_fifo_event_server_rx (uri_udp_test_main_t * utm, +server_handle_fifo_event_rx (uri_udp_test_main_t * utm, session_fifo_event_t * e) { svm_fifo_t *rx_fifo, *tx_fifo; @@ -385,7 +750,7 @@ handle_fifo_event_server_rx (uri_udp_test_main_t * utm, } void -handle_event_queue (uri_udp_test_main_t * utm) +server_handle_event_queue (uri_udp_test_main_t * utm) { session_fifo_event_t _e, *e = &_e;; @@ -396,7 +761,7 @@ handle_event_queue (uri_udp_test_main_t * utm) switch (e->event_type) { case FIFO_EVENT_SERVER_RX: - handle_fifo_event_server_rx (utm, e); + server_handle_fifo_event_rx (utm, e); break; case FIFO_EVENT_SERVER_EXIT: @@ -428,7 +793,12 @@ uri_udp_test (uri_udp_test_main_t * utm) bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); bmp->client_index = utm->my_client_index; bmp->context = ntohl (0xfeedface); - bmp->segment_size = 2 << 30; + bmp->initial_segment_size = 256 << 20; /* size of initial segment */ + bmp->options[SESSION_OPTIONS_FLAGS] = + SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 16 << 10; + bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 16 << 10; + bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); @@ -438,7 +808,7 @@ uri_udp_test (uri_udp_test_main_t * utm) return; } - handle_event_queue (utm); + server_handle_event_queue (utm); ump = vl_msg_api_alloc (sizeof (*ump)); memset (ump, 0, sizeof (*ump)); @@ -464,10 +834,12 @@ main (int argc, char **argv) unformat_input_t _argv, *a = &_argv; u8 *chroot_prefix; u8 *heap; - u8 *bind_name = (u8 *) "udp4:1234"; + u8 *bind_name = (u8 *) "udp://0.0.0.0/1234"; + u32 tmp; mheap_t *h; session_t *session; int i; + int i_am_master = 1; clib_mem_init (0, 256 << 20); @@ -481,6 +853,9 @@ main (int argc, char **argv) utm->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); + utm->my_pid = getpid (); + utm->configured_segment_size = 1 << 20; + clib_time_init (&utm->clib_time); init_error_string_table (utm); svm_fifo_segment_init (0x200000000ULL, 20); @@ -494,6 +869,14 @@ main (int argc, char **argv) } else if (unformat (a, "uri %s", &bind_name)) ; + else if (unformat (a, "segment-size %dM", &tmp)) + utm->configured_segment_size = tmp << 20; + else if (unformat (a, "segment-size %dG", &tmp)) + utm->configured_segment_size = tmp << 30; + else if (unformat (a, "master")) + i_am_master = 1; + else if (unformat (a, "slave")) + i_am_master = 0; else { fformat (stderr, "%s: usage [master|slave]\n"); @@ -501,19 +884,30 @@ main (int argc, char **argv) } } + utm->cut_through_session_index = ~0; utm->uri = format (0, "%s%c", bind_name, 0); + utm->i_am_master = i_am_master; + utm->segment_main = &svm_fifo_segment_main; + + utm->connect_uri = format (0, "udp://10.0.0.1/1234%c", 0); setup_signal_handlers (); uri_api_hookup (utm); - if (connect_to_vpp ("uri_udp_test") < 0) + if (connect_to_vpp (i_am_master ? "uri_udp_master" : "uri_udp_slave") < 0) { svm_region_exit (); fformat (stderr, "Couldn't connect to vpe, exiting...\n"); exit (1); } + if (i_am_master == 0) + { + uri_udp_slave_test (utm); + exit (0); + } + /* $$$$ hack preallocation */ for (i = 0; i < 200000; i++) { @@ -531,7 +925,7 @@ main (int argc, char **argv) #undef vl_api_version #define vl_api_version(n,v) static u32 vpe_api_version = v; -#include +#include #undef vl_api_version void @@ -544,6 +938,12 @@ vl_client_add_api_signatures (vl_api_memclnt_create_t * mp) mp->api_versions[0] = clib_host_to_net_u32 (vpe_api_version); } +u32 +vl (void *p) +{ + return vec_len (p); +} + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/uri/uri_udp_test2.c b/src/uri/uri_udp_test2.c deleted file mode 100644 index ddfffaa6..00000000 --- a/src/uri/uri_udp_test2.c +++ /dev/null @@ -1,954 +0,0 @@ -/* - * Copyright (c) 2016 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../vnet/session/application_interface.h" - -#define vl_typedefs /* define message structures */ -#include -#undef vl_typedefs - -/* declare message handlers for each api */ - -#define vl_endianfun /* define message structures */ -#include -#undef vl_endianfun - -/* instantiate all the print functions we know about */ -#define vl_print(handle, ...) -#define vl_printfun -#include -#undef vl_printfun - -/* Satisfy external references when not linking with -lvlib */ -vlib_main_t vlib_global_main; -vlib_main_t **vlib_mains; - -typedef enum -{ - STATE_START, - STATE_READY, - STATE_DISCONNECTING, -} connection_state_t; - -typedef struct -{ - svm_fifo_t *server_rx_fifo; - svm_fifo_t *server_tx_fifo; -} session_t; - -typedef struct -{ - /* vpe input queue */ - unix_shared_memory_queue_t *vl_input_queue; - - /* API client handle */ - u32 my_client_index; - - /* The URI we're playing with */ - u8 *uri; - - /* Session pool */ - session_t *sessions; - - /* Hash table for disconnect processing */ - uword *session_index_by_vpp_handles; - - /* fifo segment */ - svm_fifo_segment_private_t *seg; - - /* intermediate rx buffer */ - u8 *rx_buf; - - /* URI for connect */ - u8 *connect_uri; - - int i_am_master; - - /* Our event queue */ - unix_shared_memory_queue_t *our_event_queue; - - /* $$$ single thread only for the moment */ - unix_shared_memory_queue_t *vpp_event_queue; - - /* $$$$ hack: cut-through session index */ - volatile u32 cut_through_session_index; - - /* unique segment name counter */ - u32 unique_segment_index; - - pid_t my_pid; - - /* pthread handle */ - pthread_t cut_through_thread_handle; - - /* For deadman timers */ - clib_time_t clib_time; - - /* State of the connection, shared between msg RX thread and main thread */ - volatile connection_state_t state; - - volatile int time_to_stop; - volatile int time_to_print_stats; - - u32 configured_segment_size; - - /* VNET_API_ERROR_FOO -> "Foo" hash table */ - uword *error_string_by_error_number; - - /* convenience */ - svm_fifo_segment_main_t *segment_main; - -} uri_udp_test_main_t; - -#if CLIB_DEBUG > 0 -#define NITER 10000 -#else -#define NITER 4000000 -#endif - -uri_udp_test_main_t uri_udp_test_main; - -static void -stop_signal (int signum) -{ - uri_udp_test_main_t *um = &uri_udp_test_main; - - um->time_to_stop = 1; -} - -static void -stats_signal (int signum) -{ - uri_udp_test_main_t *um = &uri_udp_test_main; - - um->time_to_print_stats = 1; -} - -static clib_error_t * -setup_signal_handlers (void) -{ - signal (SIGINT, stats_signal); - signal (SIGQUIT, stop_signal); - signal (SIGTERM, stop_signal); - - return 0; -} - -u8 * -format_api_error (u8 * s, va_list * args) -{ - uri_udp_test_main_t *utm = va_arg (*args, uri_udp_test_main_t *); - i32 error = va_arg (*args, u32); - uword *p; - - p = hash_get (utm->error_string_by_error_number, -error); - - if (p) - s = format (s, "%s", p[0]); - else - s = format (s, "%d", error); - return s; -} - -int -wait_for_state_change (uri_udp_test_main_t * utm, connection_state_t state) -{ -#if CLIB_DEBUG > 0 -#define TIMEOUT 600.0 -#else -#define TIMEOUT 600.0 -#endif - - f64 timeout = clib_time_now (&utm->clib_time) + TIMEOUT; - - while (clib_time_now (&utm->clib_time) < timeout) - { - if (utm->state == state) - return 0; - } - return -1; -} - -u64 server_bytes_received, server_bytes_sent; - -static void * -cut_through_thread_fn (void *arg) -{ - session_t *s; - svm_fifo_t *rx_fifo; - svm_fifo_t *tx_fifo; - u8 *my_copy_buffer = 0; - uri_udp_test_main_t *utm = &uri_udp_test_main; - i32 actual_transfer; - int rv; - u32 buffer_offset; - - while (utm->cut_through_session_index == ~0) - ; - - s = pool_elt_at_index (utm->sessions, utm->cut_through_session_index); - - rx_fifo = s->server_rx_fifo; - tx_fifo = s->server_tx_fifo; - - vec_validate (my_copy_buffer, 64 * 1024 - 1); - - while (true) - { - /* We read from the tx fifo and write to the rx fifo */ - do - { - actual_transfer = svm_fifo_dequeue_nowait (tx_fifo, 0, - vec_len (my_copy_buffer), - my_copy_buffer); - } - while (actual_transfer <= 0); - - server_bytes_received += actual_transfer; - - buffer_offset = 0; - while (actual_transfer > 0) - { - rv = svm_fifo_enqueue_nowait (rx_fifo, 0, actual_transfer, - my_copy_buffer + buffer_offset); - if (rv > 0) - { - actual_transfer -= rv; - buffer_offset += rv; - server_bytes_sent += rv; - } - - } - if (PREDICT_FALSE (utm->time_to_stop)) - break; - } - - pthread_exit (0); -} - -static void -uri_udp_slave_test (uri_udp_test_main_t * utm) -{ - vl_api_connect_uri_t *cmp; - int i; - u8 *test_data = 0; - u64 bytes_received = 0, bytes_sent = 0; - i32 bytes_to_read; - int rv; - int mypid = getpid (); - f64 before, after, delta, bytes_per_second; - session_t *session; - svm_fifo_t *rx_fifo, *tx_fifo; - int buffer_offset, bytes_to_send = 0; - - vec_validate (test_data, 64 * 1024 - 1); - for (i = 0; i < vec_len (test_data); i++) - test_data[i] = i & 0xff; - - cmp = vl_msg_api_alloc (sizeof (*cmp)); - memset (cmp, 0, sizeof (*cmp)); - - cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); - cmp->client_index = utm->my_client_index; - cmp->context = ntohl (0xfeedface); - memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp); - - if (wait_for_state_change (utm, STATE_READY)) - { - clib_warning ("timeout waiting for STATE_READY"); - return; - } - - session = pool_elt_at_index (utm->sessions, utm->cut_through_session_index); - rx_fifo = session->server_rx_fifo; - tx_fifo = session->server_tx_fifo; - - before = clib_time_now (&utm->clib_time); - - vec_validate (utm->rx_buf, vec_len (test_data) - 1); - - for (i = 0; i < NITER; i++) - { - bytes_to_send = vec_len (test_data); - buffer_offset = 0; - while (bytes_to_send > 0) - { - rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, - bytes_to_send, - test_data + buffer_offset); - - if (rv > 0) - { - bytes_to_send -= rv; - buffer_offset += rv; - bytes_sent += rv; - } - } - - bytes_to_read = svm_fifo_max_dequeue (rx_fifo); - - bytes_to_read = vec_len (utm->rx_buf) > bytes_to_read ? - bytes_to_read : vec_len (utm->rx_buf); - - buffer_offset = 0; - while (bytes_to_read > 0) - { - rv = svm_fifo_dequeue_nowait (rx_fifo, mypid, - bytes_to_read, - utm->rx_buf + buffer_offset); - if (rv > 0) - { - bytes_to_read -= rv; - buffer_offset += rv; - bytes_received += rv; - } - } - } - while (bytes_received < bytes_sent) - { - rv = svm_fifo_dequeue_nowait (rx_fifo, mypid, - vec_len (utm->rx_buf), utm->rx_buf); - if (rv > 0) - { -#if CLIB_DEBUG > 0 - int j; - for (j = 0; j < rv; j++) - { - if (utm->rx_buf[j] != ((bytes_received + j) & 0xff)) - { - clib_warning ("error at byte %lld, 0x%x not 0x%x", - bytes_received + j, - utm->rx_buf[j], - ((bytes_received + j) & 0xff)); - } - } -#endif - bytes_received += (u64) rv; - } - } - - after = clib_time_now (&utm->clib_time); - delta = after - before; - bytes_per_second = 0.0; - - if (delta > 0.0) - bytes_per_second = (f64) bytes_received / delta; - - fformat (stdout, - "Done: %lld recv bytes in %.2f seconds, %.2f bytes/sec...\n\n", - bytes_received, delta, bytes_per_second); - fformat (stdout, - "Done: %lld sent bytes in %.2f seconds, %.2f bytes/sec...\n\n", - bytes_sent, delta, bytes_per_second); - fformat (stdout, - "client -> server -> client round trip: %.2f Gbit/sec \n\n", - (bytes_per_second * 8.0) / 1e9); -} - -static void -vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) -{ - uri_udp_test_main_t *utm = &uri_udp_test_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - int rv; - - if (mp->segment_name_length == 0) - { - clib_warning ("segment_name_length zero"); - return; - } - - a->segment_name = (char *) mp->segment_name; - a->segment_size = mp->segment_size; - - ASSERT (mp->server_event_queue_address); - - /* Attach to the segment vpp created */ - rv = svm_fifo_segment_attach (a); - if (rv) - { - clib_warning ("svm_fifo_segment_attach ('%s') failed", - mp->segment_name); - return; - } - - utm->our_event_queue = (unix_shared_memory_queue_t *) - mp->server_event_queue_address; - - utm->state = STATE_READY; -} - -static void -vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t * mp) -{ - svm_fifo_segment_create_args_t _a, *a = &_a; - int rv; - - a->segment_name = (char *) mp->segment_name; - a->segment_size = mp->segment_size; - /* Attach to the segment vpp created */ - rv = svm_fifo_segment_attach (a); - if (rv) - { - clib_warning ("svm_fifo_segment_attach ('%s') failed", - mp->segment_name); - return; - } - clib_warning ("Mapped new segment '%s' size %d", mp->segment_name, - mp->segment_size); -} - -static void -vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) -{ - u32 segment_index; - uri_udp_test_main_t *utm = &uri_udp_test_main; - svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - svm_fifo_segment_private_t *seg; - unix_shared_memory_queue_t *client_q; - vl_api_connect_uri_reply_t *rmp; - session_t *session; - int rv = 0; - - /* Create the segment */ - a->segment_name = (char *) format (0, "%d:segment%d%c", utm->my_pid, - utm->unique_segment_index++, 0); - a->segment_size = utm->configured_segment_size; - - rv = svm_fifo_segment_create (a); - if (rv) - { - clib_warning ("sm_fifo_segment_create ('%s') failed", a->segment_name); - rv = VNET_API_ERROR_URI_FIFO_CREATE_FAILED; - goto send_reply; - } - - vec_add2 (utm->seg, seg, 1); - - segment_index = vec_len (sm->segments) - 1; - - memcpy (seg, sm->segments + segment_index, sizeof (utm->seg[0])); - - pool_get (utm->sessions, session); - - /* - * By construction the master's idea of the rx fifo ends up in - * fsh->fifos[0], and the master's idea of the tx fifo ends up in - * fsh->fifos[1]. - */ - session->server_rx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, - 128 * 1024); - ASSERT (session->server_rx_fifo); - - session->server_tx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, - 128 * 1024); - ASSERT (session->server_tx_fifo); - - session->server_rx_fifo->server_session_index = session - utm->sessions; - session->server_tx_fifo->server_session_index = session - utm->sessions; - utm->cut_through_session_index = session - utm->sessions; - - rv = pthread_create (&utm->cut_through_thread_handle, - NULL /*attr */ , cut_through_thread_fn, 0); - if (rv) - { - clib_warning ("pthread_create returned %d", rv); - rv = VNET_API_ERROR_SYSCALL_ERROR_1; - } - -send_reply: - rmp = vl_msg_api_alloc (sizeof (*rmp)); - memset (rmp, 0, sizeof (*rmp)); - - rmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI_REPLY); - rmp->context = mp->context; - rmp->retval = ntohl (rv); - rmp->segment_name_length = vec_len (a->segment_name); - memcpy (rmp->segment_name, a->segment_name, vec_len (a->segment_name)); - - vec_free (a->segment_name); - - client_q = (unix_shared_memory_queue_t *) mp->client_queue_address; - vl_msg_api_send_shmem (client_q, (u8 *) & rmp); -} - -static void -vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp) -{ - uri_udp_test_main_t *utm = &uri_udp_test_main; - - if (mp->retval != 0) - clib_warning ("returned %d", ntohl (mp->retval)); - - utm->state = STATE_START; -} - -static void -vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) -{ - uri_udp_test_main_t *utm = &uri_udp_test_main; - vl_api_accept_session_reply_t *rmp; - svm_fifo_t *rx_fifo, *tx_fifo; - session_t *session; - static f64 start_time; - u64 key; - - if (start_time == 0.0) - start_time = clib_time_now (&utm->clib_time); - - utm->vpp_event_queue = (unix_shared_memory_queue_t *) - mp->vpp_event_queue_address; - - pool_get (utm->sessions, session); - - rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; - rx_fifo->client_session_index = session - utm->sessions; - tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; - tx_fifo->client_session_index = session - utm->sessions; - - session->server_rx_fifo = rx_fifo; - session->server_tx_fifo = tx_fifo; - - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - - hash_set (utm->session_index_by_vpp_handles, key, session - utm->sessions); - - utm->state = STATE_READY; - - if (pool_elts (utm->sessions) && (pool_elts (utm->sessions) % 20000) == 0) - { - f64 now = clib_time_now (&utm->clib_time); - fformat (stdout, "%d active sessions in %.2f seconds, %.2f/sec...\n", - pool_elts (utm->sessions), now - start_time, - (f64) pool_elts (utm->sessions) / (now - start_time)); - } - - rmp = vl_msg_api_alloc (sizeof (*rmp)); - memset (rmp, 0, sizeof (*rmp)); - rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY); - rmp->session_type = mp->session_type; - rmp->session_index = mp->session_index; - rmp->session_thread_index = mp->session_thread_index; - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); -} - -static void -vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) -{ - uri_udp_test_main_t *utm = &uri_udp_test_main; - session_t *session; - vl_api_disconnect_session_reply_t *rmp; - uword *p; - int rv = 0; - u64 key; - - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - - p = hash_get (utm->session_index_by_vpp_handles, key); - - if (p) - { - session = pool_elt_at_index (utm->sessions, p[0]); - hash_unset (utm->session_index_by_vpp_handles, key); - pool_put (utm->sessions, session); - } - else - { - clib_warning ("couldn't find session key %llx", key); - rv = -11; - } - - rmp = vl_msg_api_alloc (sizeof (*rmp)); - memset (rmp, 0, sizeof (*rmp)); - rmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION_REPLY); - rmp->retval = rv; - rmp->session_index = mp->session_index; - rmp->session_thread_index = mp->session_thread_index; - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); -} - -static void -vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) -{ - svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; - uri_udp_test_main_t *utm = &uri_udp_test_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - ssvm_shared_header_t *sh; - svm_fifo_segment_private_t *seg; - svm_fifo_segment_header_t *fsh; - session_t *session; - u32 segment_index; - int rv; - - ASSERT (utm->i_am_master == 0); - - if (mp->segment_name_length == 0) - { - clib_warning ("segment_name_length zero"); - return; - } - - memset (a, 0, sizeof (*a)); - - a->segment_name = (char *) mp->segment_name; - - sleep (1); - - rv = svm_fifo_segment_attach (a); - if (rv) - { - clib_warning ("sm_fifo_segment_create ('%v') failed", mp->segment_name); - return; - } - - segment_index = vec_len (sm->segments) - 1; - - vec_add2 (utm->seg, seg, 1); - - memcpy (seg, sm->segments + segment_index, sizeof (*seg)); - sh = seg->ssvm.sh; - fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; - - while (vec_len (fsh->fifos) < 2) - sleep (1); - - pool_get (utm->sessions, session); - utm->cut_through_session_index = session - utm->sessions; - - session->server_rx_fifo = (svm_fifo_t *) fsh->fifos[0]; - ASSERT (session->server_rx_fifo); - session->server_tx_fifo = (svm_fifo_t *) fsh->fifos[1]; - ASSERT (session->server_tx_fifo); - - /* security: could unlink /dev/shm/segment_name> here, maybe */ - - utm->state = STATE_READY; -} - -#define foreach_uri_msg \ -_(BIND_URI_REPLY, bind_uri_reply) \ -_(CONNECT_URI, connect_uri) \ -_(CONNECT_URI_REPLY, connect_uri_reply) \ -_(UNBIND_URI_REPLY, unbind_uri_reply) \ -_(ACCEPT_SESSION, accept_session) \ -_(DISCONNECT_SESSION, disconnect_session) \ -_(MAP_ANOTHER_SEGMENT, map_another_segment) - -void -uri_api_hookup (uri_udp_test_main_t * utm) -{ -#define _(N,n) \ - vl_msg_api_set_handlers(VL_API_##N, #n, \ - vl_api_##n##_t_handler, \ - vl_noop_handler, \ - vl_api_##n##_t_endian, \ - vl_api_##n##_t_print, \ - sizeof(vl_api_##n##_t), 1); - foreach_uri_msg; -#undef _ - -} - - -int -connect_to_vpp (char *name) -{ - uri_udp_test_main_t *utm = &uri_udp_test_main; - api_main_t *am = &api_main; - - if (vl_client_connect_to_vlib ("/vpe-api", name, 32) < 0) - return -1; - - utm->vl_input_queue = am->shmem_hdr->vl_input_queue; - utm->my_client_index = am->my_client_index; - - return 0; -} - -void -vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...) -{ - clib_warning ("BUG"); -} - -static void -init_error_string_table (uri_udp_test_main_t * utm) -{ - utm->error_string_by_error_number = hash_create (0, sizeof (uword)); - -#define _(n,v,s) hash_set (utm->error_string_by_error_number, -v, s); - foreach_vnet_api_error; -#undef _ - - hash_set (utm->error_string_by_error_number, 99, "Misc"); -} - -void -handle_fifo_event_server_rx (uri_udp_test_main_t * utm, - session_fifo_event_t * e) -{ - svm_fifo_t *rx_fifo, *tx_fifo; - int nbytes; - - session_fifo_event_t evt; - unix_shared_memory_queue_t *q; - int rv; - - rx_fifo = e->fifo; - tx_fifo = utm->sessions[rx_fifo->client_session_index].server_tx_fifo; - - do - { - nbytes = svm_fifo_dequeue_nowait (rx_fifo, 0, - vec_len (utm->rx_buf), utm->rx_buf); - } - while (nbytes <= 0); - do - { - rv = svm_fifo_enqueue_nowait (tx_fifo, 0, nbytes, utm->rx_buf); - } - while (rv == -2); - - /* Fabricate TX event, send to vpp */ - evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; - /* $$$$ for event logging */ - evt.enqueue_length = nbytes; - evt.event_id = e->event_id; - q = utm->vpp_event_queue; - unix_shared_memory_queue_add (q, (u8 *) & evt, 0 /* do wait for mutex */ ); -} - -void -handle_event_queue (uri_udp_test_main_t * utm) -{ - session_fifo_event_t _e, *e = &_e;; - - while (1) - { - unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e, - 0 /* nowait */ ); - switch (e->event_type) - { - case FIFO_EVENT_SERVER_RX: - handle_fifo_event_server_rx (utm, e); - break; - - case FIFO_EVENT_SERVER_EXIT: - return; - - default: - clib_warning ("unknown event type %d", e->event_type); - break; - } - if (PREDICT_FALSE (utm->time_to_stop == 1)) - break; - if (PREDICT_FALSE (utm->time_to_print_stats == 1)) - { - utm->time_to_print_stats = 0; - fformat (stdout, "%d connections\n", pool_elts (utm->sessions)); - } - } -} - -void -uri_udp_test (uri_udp_test_main_t * utm) -{ - vl_api_bind_uri_t *bmp; - vl_api_unbind_uri_t *ump; - - bmp = vl_msg_api_alloc (sizeof (*bmp)); - memset (bmp, 0, sizeof (*bmp)); - - bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); - bmp->client_index = utm->my_client_index; - bmp->context = ntohl (0xfeedface); - bmp->initial_segment_size = 256 << 20; /* size of initial segment */ - bmp->options[SESSION_OPTIONS_FLAGS] = - SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; - bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 16 << 10; - bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 16 << 10; - bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; - memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); - - if (wait_for_state_change (utm, STATE_READY)) - { - clib_warning ("timeout waiting for STATE_READY"); - return; - } - - handle_event_queue (utm); - - ump = vl_msg_api_alloc (sizeof (*ump)); - memset (ump, 0, sizeof (*ump)); - - ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); - ump->client_index = utm->my_client_index; - memcpy (ump->uri, utm->uri, vec_len (utm->uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump); - - if (wait_for_state_change (utm, STATE_START)) - { - clib_warning ("timeout waiting for STATE_START"); - return; - } - - fformat (stdout, "Test complete...\n"); -} - -int -main (int argc, char **argv) -{ - uri_udp_test_main_t *utm = &uri_udp_test_main; - unformat_input_t _argv, *a = &_argv; - u8 *chroot_prefix; - u8 *heap; - u8 *bind_name = (u8 *) "udp://0.0.0.0/1234"; - u32 tmp; - mheap_t *h; - session_t *session; - int i; - int i_am_master = 1; - - clib_mem_init (0, 256 << 20); - - heap = clib_mem_get_per_cpu_heap (); - h = mheap_header (heap); - - /* make the main heap thread-safe */ - h->flags |= MHEAP_FLAG_THREAD_SAFE; - - vec_validate (utm->rx_buf, 8192); - - utm->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); - - utm->my_pid = getpid (); - utm->configured_segment_size = 1 << 20; - - clib_time_init (&utm->clib_time); - init_error_string_table (utm); - svm_fifo_segment_init (0x200000000ULL, 20); - unformat_init_command_line (a, argv); - - while (unformat_check_input (a) != UNFORMAT_END_OF_INPUT) - { - if (unformat (a, "chroot prefix %s", &chroot_prefix)) - { - vl_set_memory_root_path ((char *) chroot_prefix); - } - else if (unformat (a, "uri %s", &bind_name)) - ; - else if (unformat (a, "segment-size %dM", &tmp)) - utm->configured_segment_size = tmp << 20; - else if (unformat (a, "segment-size %dG", &tmp)) - utm->configured_segment_size = tmp << 30; - else if (unformat (a, "master")) - i_am_master = 1; - else if (unformat (a, "slave")) - i_am_master = 0; - else - { - fformat (stderr, "%s: usage [master|slave]\n"); - exit (1); - } - } - - utm->cut_through_session_index = ~0; - utm->uri = format (0, "%s%c", bind_name, 0); - utm->i_am_master = i_am_master; - utm->segment_main = &svm_fifo_segment_main; - - utm->connect_uri = format (0, "udp://10.0.0.1/1234%c", 0); - - setup_signal_handlers (); - - uri_api_hookup (utm); - - if (connect_to_vpp (i_am_master ? "uri_udp_master" : "uri_udp_slave") < 0) - { - svm_region_exit (); - fformat (stderr, "Couldn't connect to vpe, exiting...\n"); - exit (1); - } - - if (i_am_master == 0) - { - uri_udp_slave_test (utm); - exit (0); - } - - /* $$$$ hack preallocation */ - for (i = 0; i < 200000; i++) - { - pool_get (utm->sessions, session); - memset (session, 0, sizeof (*session)); - } - for (i = 0; i < 200000; i++) - pool_put_index (utm->sessions, i); - - uri_udp_test (utm); - - vl_client_disconnect_from_vlib (); - exit (0); -} - -#undef vl_api_version -#define vl_api_version(n,v) static u32 vpe_api_version = v; -#include -#undef vl_api_version - -void -vl_client_add_api_signatures (vl_api_memclnt_create_t * mp) -{ - /* - * Send the main API signature in slot 0. This bit of code must - * match the checks in ../vpe/api/api.c: vl_msg_api_version_check(). - */ - mp->api_versions[0] = clib_host_to_net_u32 (vpe_api_version); -} - -u32 -vl (void *p) -{ - return vec_len (p); -} - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ diff --git a/src/uri/uritest.c b/src/uri/uritest.c deleted file mode 100644 index edcdb3ad..00000000 --- a/src/uri/uritest.c +++ /dev/null @@ -1,484 +0,0 @@ -/* - * Copyright (c) 2016 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define vl_typedefs /* define message structures */ -#include -#undef vl_typedefs - -/* declare message handlers for each api */ - -#define vl_endianfun /* define message structures */ -#include -#undef vl_endianfun - -/* instantiate all the print functions we know about */ -#define vl_print(handle, ...) -#define vl_printfun -#include -#undef vl_printfun - -typedef enum -{ - STATE_START, - STATE_READY, - STATE_DISCONNECTING, -} connection_state_t; - -typedef struct -{ - /* vpe input queue */ - unix_shared_memory_queue_t *vl_input_queue; - - /* API client handle */ - u32 my_client_index; - - /* role */ - int i_am_master; - - /* The URI we're playing with */ - u8 *uri; - - /* fifo segment */ - svm_fifo_segment_private_t *seg; - - svm_fifo_t *rx_fifo; - svm_fifo_t *tx_fifo; - - /* For deadman timers */ - clib_time_t clib_time; - - /* State of the connection, shared between msg RX thread and main thread */ - volatile connection_state_t state; - - /* VNET_API_ERROR_FOO -> "Foo" hash table */ - uword *error_string_by_error_number; -} uritest_main_t; - -#if CLIB_DEBUG > 0 -#define NITER 1000 -#else -#define NITER 1000000 -#endif - -uritest_main_t uritest_main; - -u8 * -format_api_error (u8 * s, va_list * args) -{ - uritest_main_t *utm = va_arg (*args, uritest_main_t *); - i32 error = va_arg (*args, u32); - uword *p; - - p = hash_get (utm->error_string_by_error_number, -error); - - if (p) - s = format (s, "%s", p[0]); - else - s = format (s, "%d", error); - return s; -} - -int -wait_for_state_change (uritest_main_t * utm, connection_state_t state) -{ - f64 timeout = clib_time_now (&utm->clib_time) + 1.0; - - while (clib_time_now (&utm->clib_time) < timeout) - { - if (utm->state == state) - return 0; - } - return -1; -} - -static void -vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) -{ - uritest_main_t *utm = &uritest_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - int rv; - - ASSERT (utm->i_am_master); - - if (mp->segment_name_length == 0) - { - clib_warning ("segment_name_length zero"); - return; - } - - a->segment_name = (char *) mp->segment_name; - a->segment_size = mp->segment_size; - - /* Create the segment */ - rv = svm_fifo_segment_create (a); - if (rv) - { - clib_warning ("sm_fifo_segment_create ('%s') failed", mp->segment_name); - return; - } - - vec_validate (utm->seg, 0); - - memcpy (utm->seg, a->rv, sizeof (*utm->seg)); - - /* - * By construction the master's idea of the rx fifo ends up in - * fsh->fifos[0], and the master's idea of the tx fifo ends up in - * fsh->fifos[1]. - */ - utm->rx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, 10240); - ASSERT (utm->rx_fifo); - - utm->tx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, 10240); - ASSERT (utm->tx_fifo); - - utm->state = STATE_READY; -} - -static void -vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) -{ - uritest_main_t *utm = &uritest_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - ssvm_shared_header_t *sh; - svm_fifo_segment_header_t *fsh; - int rv; - - ASSERT (utm->i_am_master == 0); - - if (mp->segment_name_length == 0) - { - clib_warning ("segment_name_length zero"); - return; - } - - memset (a, 0, sizeof (*a)); - - a->segment_name = (char *) mp->segment_name; - - rv = svm_fifo_segment_attach (a); - if (rv) - { - clib_warning ("sm_fifo_segment_create ('%s') failed", mp->segment_name); - return; - } - - vec_validate (utm->seg, 0); - - memcpy (utm->seg, a->rv, sizeof (*utm->seg)); - sh = utm->seg->ssvm.sh; - fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; - - while (vec_len (fsh->fifos) < 2) - sleep (1); - - utm->rx_fifo = (svm_fifo_t *) fsh->fifos[1]; - ASSERT (utm->rx_fifo); - utm->tx_fifo = (svm_fifo_t *) fsh->fifos[0]; - ASSERT (utm->tx_fifo); - - /* security: could unlink /dev/shm/segment_name> here, maybe */ - - utm->state = STATE_READY; -} - -static void -vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp) -{ - uritest_main_t *utm = &uritest_main; - - if (mp->retval != 0) - clib_warning ("returned %d", ntohl (mp->retval)); - - utm->state = STATE_START; -} - -#define foreach_uri_msg \ -_(BIND_URI_REPLY, bind_uri_reply) \ -_(CONNECT_URI_REPLY, connect_uri_reply) \ -_(UNBIND_URI_REPLY, unbind_uri_reply) - -void -uri_api_hookup (uritest_main_t * utm) -{ -#define _(N,n) \ - vl_msg_api_set_handlers(VL_API_##N, #n, \ - vl_api_##n##_t_handler, \ - vl_noop_handler, \ - vl_api_##n##_t_endian, \ - vl_api_##n##_t_print, \ - sizeof(vl_api_##n##_t), 1); - foreach_uri_msg; -#undef _ - -} - - -int -connect_to_vpp (char *name) -{ - uritest_main_t *utm = &uritest_main; - api_main_t *am = &api_main; - - if (vl_client_connect_to_vlib ("/vpe-api", name, 32) < 0) - return -1; - - utm->vl_input_queue = am->shmem_hdr->vl_input_queue; - utm->my_client_index = am->my_client_index; - - return 0; -} - -void -vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...) -{ - clib_warning ("BUG"); -} - -static void -init_error_string_table (uritest_main_t * utm) -{ - utm->error_string_by_error_number = hash_create (0, sizeof (uword)); - -#define _(n,v,s) hash_set (utm->error_string_by_error_number, -v, s); - foreach_vnet_api_error; -#undef _ - - hash_set (utm->error_string_by_error_number, 99, "Misc"); -} - -void -uritest_master (uritest_main_t * utm) -{ - vl_api_bind_uri_t *bmp; - vl_api_unbind_uri_t *ump; - int i; - u8 *test_data = 0; - u8 *reply = 0; - u32 reply_len; - int mypid = getpid (); - - for (i = 0; i < 2048; i++) - vec_add1 (test_data, 'a' + (i % 32)); - - bmp = vl_msg_api_alloc (sizeof (*bmp)); - memset (bmp, 0, sizeof (*bmp)); - - bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); - bmp->client_index = utm->my_client_index; - bmp->context = ntohl (0xfeedface); - bmp->segment_size = 256 << 10; - memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); - - if (wait_for_state_change (utm, STATE_READY)) - { - clib_warning ("timeout waiting for STATE_READY"); - return; - } - - for (i = 0; i < NITER; i++) - svm_fifo_enqueue (utm->tx_fifo, mypid, vec_len (test_data), test_data); - - vec_validate (reply, 0); - - reply_len = svm_fifo_dequeue (utm->rx_fifo, mypid, vec_len (reply), reply); - - if (reply_len != 1) - clib_warning ("reply length %d", reply_len); - - if (reply[0] == 1) - fformat (stdout, "Test OK..."); - - ump = vl_msg_api_alloc (sizeof (*ump)); - memset (ump, 0, sizeof (*ump)); - - ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); - ump->client_index = utm->my_client_index; - memcpy (ump->uri, utm->uri, vec_len (utm->uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump); - - if (wait_for_state_change (utm, STATE_START)) - { - clib_warning ("timeout waiting for STATE_READY"); - return; - } - - fformat (stdout, "Master done...\n"); -} - -void -uritest_slave (uritest_main_t * utm) -{ - vl_api_connect_uri_t *cmp; - int i, j; - u8 *test_data = 0; - u8 *reply = 0; - u32 bytes_received = 0; - u32 actual_bytes; - int mypid = getpid (); - u8 ok; - f64 before, after, delta, bytes_per_second; - - vec_validate (test_data, 4095); - - cmp = vl_msg_api_alloc (sizeof (*cmp)); - memset (cmp, 0, sizeof (*cmp)); - - cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); - cmp->client_index = utm->my_client_index; - cmp->context = ntohl (0xfeedface); - memcpy (cmp->uri, utm->uri, vec_len (utm->uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp); - - if (wait_for_state_change (utm, STATE_READY)) - { - clib_warning ("timeout waiting for STATE_READY"); - return; - } - - ok = 1; - before = clib_time_now (&utm->clib_time); - for (i = 0; i < NITER; i++) - { - actual_bytes = svm_fifo_dequeue (utm->rx_fifo, mypid, - vec_len (test_data), test_data); - j = 0; - while (j < actual_bytes) - { - if (test_data[j] != ('a' + (bytes_received % 32))) - ok = 0; - bytes_received++; - j++; - } - if (bytes_received == NITER * 2048) - break; - } - - vec_add1 (reply, ok); - - svm_fifo_enqueue (utm->tx_fifo, mypid, vec_len (reply), reply); - after = clib_time_now (&utm->clib_time); - delta = after - before; - bytes_per_second = 0.0; - - if (delta > 0.0) - bytes_per_second = (f64) bytes_received / delta; - - fformat (stdout, - "Slave done, %d bytes in %.2f seconds, %.2f bytes/sec...\n", - bytes_received, delta, bytes_per_second); -} - -int -main (int argc, char **argv) -{ - uritest_main_t *utm = &uritest_main; - unformat_input_t _argv, *a = &_argv; - u8 *chroot_prefix; - u8 *heap; - char *bind_name = "fifo:uritest"; - mheap_t *h; - int i_am_master = 0; - - clib_mem_init (0, 128 << 20); - - heap = clib_mem_get_per_cpu_heap (); - h = mheap_header (heap); - - /* make the main heap thread-safe */ - h->flags |= MHEAP_FLAG_THREAD_SAFE; - - clib_time_init (&utm->clib_time); - init_error_string_table (utm); - svm_fifo_segment_init (0x200000000ULL, 20); - unformat_init_command_line (a, argv); - - utm->uri = format (0, "%s%c", bind_name, 0); - - while (unformat_check_input (a) != UNFORMAT_END_OF_INPUT) - { - if (unformat (a, "master")) - i_am_master = 1; - else if (unformat (a, "slave")) - i_am_master = 0; - else if (unformat (a, "chroot prefix %s", &chroot_prefix)) - { - vl_set_memory_root_path ((char *) chroot_prefix); - } - else - { - fformat (stderr, "%s: usage [master|slave]\n"); - exit (1); - } - } - - uri_api_hookup (utm); - - if (connect_to_vpp (i_am_master ? "uritest_master" : "uritest_slave") < 0) - { - svm_region_exit (); - fformat (stderr, "Couldn't connect to vpe, exiting...\n"); - exit (1); - } - - utm->i_am_master = i_am_master; - - if (i_am_master) - uritest_master (utm); - else - uritest_slave (utm); - - vl_client_disconnect_from_vlib (); - exit (0); -} - -#undef vl_api_version -#define vl_api_version(n,v) static u32 vpe_api_version = v; -#include -#undef vl_api_version - -void -vl_client_add_api_signatures (vl_api_memclnt_create_t * mp) -{ - /* - * Send the main API signature in slot 0. This bit of code must - * match the checks in ../vpe/api/api.c: vl_msg_api_version_check(). - */ - mp->api_versions[0] = clib_host_to_net_u32 (vpe_api_version); -} - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ diff --git a/src/vnet.am b/src/vnet.am index 7125a122..4e30ee92 100644 --- a/src/vnet.am +++ b/src/vnet.am @@ -461,6 +461,7 @@ libvnet_la_SOURCES += \ vnet/tcp/tcp_output.c \ vnet/tcp/tcp_input.c \ vnet/tcp/tcp_newreno.c \ + vnet/tcp/builtin_server.c \ vnet/tcp/tcp.c nobase_include_HEADERS += \ diff --git a/src/vnet/api_errno.h b/src/vnet/api_errno.h index 5e65ac7b..74d39bdb 100644 --- a/src/vnet/api_errno.h +++ b/src/vnet/api_errno.h @@ -103,7 +103,8 @@ _(LISP_RLOC_LOCAL, -110, "RLOC address is local") \ _(BFD_EAGAIN, -111, "BFD object cannot be manipulated at this time") \ _(INVALID_GPE_MODE, -112, "Invalid GPE mode") \ _(LISP_GPE_ENTRIES_PRESENT, -113, "LISP GPE entries are present") \ -_(ADDRESS_FOUND_FOR_INTERFACE, -114, "Address found for interface") +_(ADDRESS_FOUND_FOR_INTERFACE, -114, "Address found for interface") \ +_(SESSION_CONNECT_FAIL, -115, "Session failed to connect") typedef enum { diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index a561e7d1..a542eebe 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -154,6 +154,15 @@ application_get (u32 index) return pool_elt_at_index (app_pool, index); } +application_t * +application_get_if_valid (u32 index) +{ + if (pool_is_free_index (app_pool, index)) + return 0; + + return pool_elt_at_index (app_pool, index); +} + u32 application_get_index (application_t * app) { @@ -209,7 +218,7 @@ format_application_server (u8 * s, va_list * args) regp = vl_api_client_index_to_registration (srv->api_client_index); if (!regp) - server_name = format (0, "%s%c", regp->name, 0); + server_name = format (0, "builtin-%d%c", srv->index, 0); else server_name = regp->name; @@ -269,11 +278,17 @@ static clib_error_t * show_app_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { + session_manager_main_t *smm = &session_manager_main; application_t *app; int do_server = 0; int do_client = 0; int verbose = 0; + if (!smm->is_enabled) + { + clib_error_return (0, "session layer is not enabled"); + } + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "server")) @@ -323,16 +338,20 @@ show_app_command_fn (vlib_main_t * vm, unformat_input_t * input, /* *INDENT-ON* */ } else - vlib_cli_output (vm, "No active server bindings"); + vlib_cli_output (vm, "No active client bindings"); } return 0; } +/* *INDENT-OFF* */ VLIB_CLI_COMMAND (show_app_command, static) = { -.path = "show app",.short_help = - "show app [server|client] [verbose]",.function = show_app_command_fn,}; + .path = "show app", + .short_help = "show app [server|client] [verbose]", + .function = show_app_command_fn, +}; +/* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON diff --git a/src/vnet/session/application.h b/src/vnet/session/application.h index 027d6967..480828f7 100644 --- a/src/vnet/session/application.h +++ b/src/vnet/session/application.h @@ -100,6 +100,7 @@ application_t *application_new (application_type_t type, session_type_t sst, session_cb_vft_t * cb_fns); void application_del (application_t * app); application_t *application_get (u32 index); +application_t *application_get_if_valid (u32 index); application_t *application_lookup (u32 api_client_index); u32 application_get_index (application_t * app); diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c index 0ea77fd8..6ddfb70f 100644 --- a/src/vnet/session/application_interface.c +++ b/src/vnet/session/application_interface.c @@ -51,7 +51,7 @@ ip_is_local (ip46_address_t * ip46_address, u8 is_ip4) prefix.fp_proto = FIB_PROTOCOL_IP6; } - clib_memcpy (&prefix.fp_addr, ip46_address, sizeof (ip46_address)); + clib_memcpy (&prefix.fp_addr, ip46_address, sizeof (ip46_address_t)); fei = fib_table_lookup (0, &prefix); flags = fib_entry_get_flags (fei); @@ -186,9 +186,7 @@ vnet_connect_i (u32 api_client_index, u32 api_context, session_type_t sst, /* * Not connecting to a local server. Create regular session */ - stream_session_open (sst, ip46, port, app->index); - - return 0; + return stream_session_open (sst, ip46, port, app->index); } /** diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c index e467f4e9..399077de 100644 --- a/src/vnet/session/node.c +++ b/src/vnet/session/node.c @@ -104,9 +104,13 @@ session_fifo_rx_i (vlib_main_t * vm, vlib_node_runtime_t * node, snd_space0 = transport_vft->send_space (tc0); snd_mss0 = transport_vft->send_mss (tc0); + /* Can't make any progress */ if (snd_space0 == 0 || svm_fifo_max_dequeue (s0->server_tx_fifo) == 0 || snd_mss0 == 0) - return 0; + { + vec_add1 (smm->evts_partially_read[thread_index], *e0); + return 0; + } ASSERT (e0->enqueue_length > 0); @@ -143,7 +147,12 @@ session_fifo_rx_i (vlib_main_t * vm, vlib_node_runtime_t * node, if (PREDICT_FALSE (n_bufs < 0.9 * VLIB_FRAME_SIZE)) { /* Keep track of how much we've dequeued and exit */ - e0->enqueue_length -= max_len_to_snd0 - left_to_snd0; + if (left_to_snd0 != max_len_to_snd0) + { + e0->enqueue_length -= max_len_to_snd0 - left_to_snd0; + vec_add1 (smm->evts_partially_read[thread_index], *e0); + } + return -1; } @@ -185,12 +194,13 @@ session_fifo_rx_i (vlib_main_t * vm, vlib_node_runtime_t * node, t0->server_thread_index = s0->thread_index; } + /* *INDENT-OFF* */ if (1) { - ELOG_TYPE_DECLARE (e) = - { - .format = "evt-dequeue: id %d length %d",.format_args = - "i4i4",}; + ELOG_TYPE_DECLARE (e) = { + .format = "evt-dequeue: id %d length %d", + .format_args = "i4i4", + }; struct { u32 data[2]; @@ -199,6 +209,7 @@ session_fifo_rx_i (vlib_main_t * vm, vlib_node_runtime_t * node, ed->data[0] = e0->event_id; ed->data[1] = e0->enqueue_length; } + /* *INDENT-ON* */ len_to_deq0 = (left_to_snd0 < snd_mss0) ? left_to_snd0 : snd_mss0; @@ -289,7 +300,7 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, { session_manager_main_t *smm = vnet_get_session_manager_main (); session_fifo_event_t *my_fifo_events, *e; - u32 n_to_dequeue; + u32 n_to_dequeue, n_events; unix_shared_memory_queue_t *q; int n_tx_packets = 0; u32 my_thread_index = vm->cpu_index; @@ -309,14 +320,16 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, /* min number of events we can dequeue without blocking */ n_to_dequeue = q->cursize; - if (n_to_dequeue == 0) - return 0; - my_fifo_events = smm->fifo_events[my_thread_index]; - /* If we didn't manage to process previous events try going + if (n_to_dequeue == 0 && vec_len (my_fifo_events) == 0) + return 0; + + /* + * If we didn't manage to process previous events try going * over them again without dequeuing new ones. - * XXX: Block senders to sessions that can't keep up */ + */ + /* XXX: Block senders to sessions that can't keep up */ if (vec_len (my_fifo_events) >= 100) goto skip_dequeue; @@ -338,8 +351,8 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, smm->fifo_events[my_thread_index] = my_fifo_events; skip_dequeue: - - for (i = 0; i < n_to_dequeue; i++) + n_events = vec_len (my_fifo_events); + for (i = 0; i < n_events; i++) { svm_fifo_t *f0; /* $$$ prefetch 1 ahead maybe */ stream_session_t *s0; @@ -354,8 +367,13 @@ skip_dequeue: /* $$$ add multiple event queues, per vpp worker thread */ ASSERT (server_thread_index0 == my_thread_index); - s0 = pool_elt_at_index (smm->sessions[my_thread_index], - server_session_index0); + s0 = stream_session_get_if_valid (server_session_index0, + my_thread_index); + if (!s0) + { + clib_warning ("It's dead Jim!"); + continue; + } ASSERT (s0->thread_index == my_thread_index); @@ -380,11 +398,11 @@ skip_dequeue: done: /* Couldn't process all events. Probably out of buffers */ - if (PREDICT_FALSE (i < n_to_dequeue)) + if (PREDICT_FALSE (i < n_events)) { session_fifo_event_t *partially_read = smm->evts_partially_read[my_thread_index]; - vec_add (partially_read, &my_fifo_events[i], n_to_dequeue - i); + vec_add (partially_read, &my_fifo_events[i], n_events - i); vec_free (my_fifo_events); smm->fifo_events[my_thread_index] = partially_read; smm->evts_partially_read[my_thread_index] = 0; @@ -413,8 +431,7 @@ VLIB_REGISTER_NODE (session_queue_node) = .n_errors = ARRAY_LEN (session_queue_error_strings), .error_strings = session_queue_error_strings, .n_next_nodes = SESSION_QUEUE_N_NEXT, - /* .state = VLIB_NODE_STATE_DISABLED, enable on-demand? */ - /* edit / add dispositions here */ + .state = VLIB_NODE_STATE_DISABLED, .next_nodes = { [SESSION_QUEUE_NEXT_DROP] = "error-drop", diff --git a/src/vnet/session/session.api b/src/vnet/session/session.api index a7b28c1d..582765b5 100644 --- a/src/vnet/session/session.api +++ b/src/vnet/session/session.api @@ -422,6 +422,28 @@ define reset_sock_reply { i32 retval; u64 handle; }; + +/** \brief enable/disable session layer + @param client_index - opaque cookie to identify the sender + client to vpp direction only + @param context - sender context, to match reply w/ request + @param is_enable - disable session layer if 0, enable otherwise +*/ +define session_enable_disable { + u32 client_index; + u32 context; + u8 is_enable; +}; + +/** \brief Reply for session enable/disable + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define session_enable_disable_reply { + u32 context; + i32 retval; +}; + /* * Local Variables: * eval: (c-set-style "gnu") diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 539da613..422527e0 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -311,11 +311,11 @@ stream_session_half_open_lookup (session_manager_main_t * smm, } transport_connection_t * -stream_session_lookup_transport4 (session_manager_main_t * smm, - ip4_address_t * lcl, ip4_address_t * rmt, +stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt, u16 lcl_port, u16 rmt_port, u8 proto, u32 my_thread_index) { + session_manager_main_t *smm = &session_manager_main; session_kv4_t kv4; stream_session_t *s; int rv; @@ -345,11 +345,11 @@ stream_session_lookup_transport4 (session_manager_main_t * smm, } transport_connection_t * -stream_session_lookup_transport6 (session_manager_main_t * smm, - ip6_address_t * lcl, ip6_address_t * rmt, +stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt, u16 lcl_port, u16 rmt_port, u8 proto, u32 my_thread_index) { + session_manager_main_t *smm = &session_manager_main; stream_session_t *s; session_kv6_t kv6; int rv; @@ -554,7 +554,7 @@ session_manager_allocate_session_fifos (session_manager_main_t * smm, u8 * added_a_segment) { svm_fifo_segment_private_t *fifo_segment; - u32 fifo_size, default_fifo_size = 8192 /* TODO config */ ; + u32 fifo_size, default_fifo_size = 128 << 10; /* TODO config */ int i; *added_a_segment = 0; @@ -948,7 +948,7 @@ void connects_session_manager_init (session_manager_main_t * smm, u8 session_type) { session_manager_t *sm; - u32 connect_fifo_size = 8 << 10; /* Config? */ + u32 connect_fifo_size = 256 << 10; /* Config? */ u32 default_segment_size = 1 << 20; pool_get (smm->session_managers, sm); @@ -1055,10 +1055,15 @@ stream_session_delete (stream_session_t * s) svm_fifo_segment_free_fifo (fifo_segment, s->server_rx_fifo); svm_fifo_segment_free_fifo (fifo_segment, s->server_tx_fifo); - /* Cleanup app if client */ - app = application_get (s->app_index); + app = application_get_if_valid (s->app_index); + + /* No app. A possibility: after disconnect application called unbind */ + if (!app) + return; + if (app->mode == APP_CLIENT) { + /* Cleanup app if client */ application_del (app); } else if (app->mode == APP_SERVER) @@ -1068,6 +1073,7 @@ stream_session_delete (stream_session_t * s) svm_fifo_t **fifos; u32 fifo_index; + /* For server, see if any segments can be removed */ sm = session_manager_get (app->session_manager_index); /* Delete fifo */ @@ -1096,10 +1102,10 @@ stream_session_delete_notify (transport_connection_t * tc) { stream_session_t *s; + /* App might've been removed already */ s = stream_session_get_if_valid (tc->s_index, tc->thread_index); if (!s) { - clib_warning ("Surprised!"); return; } stream_session_delete (s); @@ -1151,16 +1157,24 @@ stream_session_accept (transport_connection_t * tc, u32 listener_index, return 0; } -void +int stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order, u32 app_index) { transport_connection_t *tc; u32 tci; u64 value; + int rv; /* Ask transport to open connection */ - tci = tp_vfts[sst].open (addr, port_host_byte_order); + rv = tp_vfts[sst].open (addr, port_host_byte_order); + if (rv < 0) + { + clib_warning ("Transport failed to open connection."); + return VNET_API_ERROR_SESSION_CONNECT_FAIL; + } + + tci = rv; /* Get transport connection */ tc = tp_vfts[sst].get_half_open (tci); @@ -1170,6 +1184,8 @@ stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order, /* Add to the half-open lookup table */ stream_session_half_open_table_add (sst, tc, value); + + return 0; } /** @@ -1216,16 +1232,13 @@ session_get_transport_vft (u8 type) } static clib_error_t * -session_manager_main_init (vlib_main_t * vm) +session_manager_main_enable (vlib_main_t * vm) { - u32 num_threads; - vlib_thread_main_t *vtm = vlib_get_thread_main (); session_manager_main_t *smm = &session_manager_main; + vlib_thread_main_t *vtm = vlib_get_thread_main (); + u32 num_threads; int i; - smm->vlib_main = vm; - smm->vnet_main = vnet_get_main (); - num_threads = 1 /* main thread */ + vtm->n_threads; if (num_threads < 1) @@ -1272,11 +1285,48 @@ session_manager_main_init (vlib_main_t * vm) for (i = 0; i < SESSION_N_TYPES; i++) smm->connect_manager_index[i] = INVALID_INDEX; + smm->is_enabled = 1; + return 0; } -VLIB_INIT_FUNCTION (session_manager_main_init); +clib_error_t * +vnet_session_enable_disable (vlib_main_t * vm, u8 is_en) +{ + if (is_en) + { + if (session_manager_main.is_enabled) + return 0; + + vlib_node_set_state (vm, session_queue_node.index, + VLIB_NODE_STATE_POLLING); + + return session_manager_main_enable (vm); + } + else + { + session_manager_main.is_enabled = 0; + vlib_node_set_state (vm, session_queue_node.index, + VLIB_NODE_STATE_DISABLED); + } + + return 0; +} + + +clib_error_t * +session_manager_main_init (vlib_main_t * vm) +{ + session_manager_main_t *smm = &session_manager_main; + + smm->vlib_main = vm; + smm->vnet_main = vnet_get_main (); + smm->is_enabled = 0; + + return 0; +} +VLIB_INIT_FUNCTION (session_manager_main_init) /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index cf14cca9..46e5ce2c 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -213,12 +213,15 @@ struct _session_manager_main /** Per transport rx function that can either dequeue or peek */ session_fifo_rx_fn *session_rx_fns[SESSION_N_TYPES]; + u8 is_enabled; + /* Convenience */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; }; extern session_manager_main_t session_manager_main; +extern vlib_node_registration_t session_queue_node; /* * Session manager function @@ -276,14 +279,12 @@ stream_session_t *stream_session_lookup6 (ip6_address_t * lcl, ip6_address_t * rmt, u16 lcl_port, u16 rmt_port, u8, u32 thread_index); transport_connection_t - * stream_session_lookup_transport4 (session_manager_main_t * smm, - ip4_address_t * lcl, + * stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt, u16 lcl_port, u16 rmt_port, u8 proto, u32 thread_index); transport_connection_t - * stream_session_lookup_transport6 (session_manager_main_t * smm, - ip6_address_t * lcl, + * stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt, u16 lcl_port, u16 rmt_port, u8 proto, u32 thread_index); @@ -338,6 +339,14 @@ stream_session_max_enqueue (transport_connection_t * tc) return svm_fifo_max_enqueue (s->server_rx_fifo); } +always_inline u32 +stream_session_fifo_size (transport_connection_t * tc) +{ + stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index); + return s->server_rx_fifo->nitems; +} + + int stream_session_enqueue_data (transport_connection_t * tc, u8 * data, u16 len, u8 queue_event); @@ -356,8 +365,8 @@ void stream_session_reset_notify (transport_connection_t * tc); int stream_session_accept (transport_connection_t * tc, u32 listener_index, u8 sst, u8 notify); -void stream_session_open (u8 sst, ip46_address_t * addr, - u16 port_host_byte_order, u32 api_client_index); +int stream_session_open (u8 sst, ip46_address_t * addr, + u16 port_host_byte_order, u32 api_client_index); void stream_session_disconnect (stream_session_t * s); void stream_session_cleanup (stream_session_t * s); int @@ -369,6 +378,8 @@ u8 *format_stream_session (u8 * s, va_list * args); void session_register_transport (u8 type, const transport_proto_vft_t * vft); transport_proto_vft_t *session_get_transport_vft (u8 type); +clib_error_t *vnet_session_enable_disable (vlib_main_t * vm, u8 is_en); + #endif /* __included_session_h__ */ /* diff --git a/src/vnet/session/session_api.c b/src/vnet/session/session_api.c index 9d068684..8852fc6e 100644 --- a/src/vnet/session/session_api.c +++ b/src/vnet/session/session_api.c @@ -52,6 +52,8 @@ _(DISCONNECT_SOCK, disconnect_sock) \ _(DISCONNECT_SOCK_REPLY, disconnect_sock_reply) \ _(ACCEPT_SOCK_REPLY, accept_sock_reply) \ _(RESET_SOCK_REPLY, reset_sock_reply) \ +_(SESSION_ENABLE_DISABLE, session_enable_disable) \ + static int send_add_segment_callback (u32 api_client_index, const u8 * segment_name, @@ -146,7 +148,6 @@ send_session_connected_uri_callback (u32 api_client_index, mp = vl_msg_api_alloc (sizeof (*mp)); mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_CONNECT_URI_REPLY); mp->context = app->api_context; - mp->retval = is_fail; if (!is_fail) { vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); @@ -157,6 +158,7 @@ send_session_connected_uri_callback (u32 api_client_index, mp->session_type = s->session_type; mp->vpp_event_queue_address = (u64) vpp_queue; mp->client_event_queue_address = (u64) app->event_queue; + mp->retval = 0; session_manager_get_segment_info (s->server_segment_index, &seg_name, &mp->segment_size); @@ -164,12 +166,22 @@ send_session_connected_uri_callback (u32 api_client_index, if (mp->segment_name_length) clib_memcpy (mp->segment_name, seg_name, mp->segment_name_length); } + else + { + mp->retval = VNET_API_ERROR_SESSION_CONNECT_FAIL; + } vl_msg_api_send_shmem (q, (u8 *) & mp); /* Remove client if connect failed */ if (is_fail) - application_del (app); + { + application_del (app); + } + else + { + s->session_state = SESSION_STATE_READY; + } return 0; } @@ -431,6 +443,17 @@ api_session_not_valid (u32 session_index, u32 thread_index) return 0; } +static void +vl_api_session_enable_disable_t_handler (vl_api_session_enable_disable_t * mp) +{ + vl_api_session_enable_disable_reply_t *rmp; + vlib_main_t *vm = vlib_get_main (); + int rv = 0; + + vnet_session_enable_disable (vm, mp->is_enable); + REPLY_MACRO (VL_API_SESSION_ENABLE_DISABLE_REPLY); +} + static void vl_api_bind_uri_t_handler (vl_api_bind_uri_t * mp) { @@ -476,7 +499,6 @@ vl_api_bind_uri_t_handler (vl_api_bind_uri_t * mp) } })); /* *INDENT-ON* */ - } static void @@ -493,7 +515,9 @@ vl_api_unbind_uri_t_handler (vl_api_unbind_uri_t * mp) static void vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) { + vl_api_connect_uri_reply_t *rmp; vnet_connect_args_t _a, *a = &_a; + int rv; a->uri = (char *) mp->uri; a->api_client_index = mp->client_index; @@ -501,7 +525,19 @@ vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) a->options = mp->options; a->session_cb_vft = &uri_session_cb_vft; a->mp = mp; - vnet_connect_uri (a); + + rv = vnet_connect_uri (a); + + if (rv == 0 || rv == VNET_CONNECT_REDIRECTED) + return; + + /* Got some error, relay it */ + + /* *INDENT-OFF* */ + REPLY_MACRO2 (VL_API_CONNECT_URI_REPLY, ({ + rmp->retval = rv; + })); + /* *INDENT-ON* */ } static void @@ -662,7 +698,9 @@ vl_api_unbind_sock_t_handler (vl_api_unbind_sock_t * mp) static void vl_api_connect_sock_t_handler (vl_api_connect_sock_t * mp) { + vl_api_connect_sock_reply_t *rmp; vnet_connect_args_t _a, *a = &_a; + int rv; clib_memcpy (&a->tep.ip, mp->ip, (mp->is_ip4 ? sizeof (ip4_address_t) : @@ -675,7 +713,18 @@ vl_api_connect_sock_t_handler (vl_api_connect_sock_t * mp) a->api_context = mp->context; a->mp = mp; - vnet_connect (a); + rv = vnet_connect (a); + + if (rv == 0 || rv == VNET_CONNECT_REDIRECTED) + return; + + /* Got some error, relay it */ + + /* *INDENT-OFF* */ + REPLY_MACRO2 (VL_API_CONNECT_URI_REPLY, ({ + rmp->retval = rv; + })); + /* *INDENT-ON* */ } static void diff --git a/src/vnet/session/session_cli.c b/src/vnet/session/session_cli.c index b2943a1c..b029ee65 100644 --- a/src/vnet/session/session_cli.c +++ b/src/vnet/session/session_cli.c @@ -60,7 +60,7 @@ format_stream_session (u8 * s, va_list * args) } else { - clib_warning ("Session in unknown state!"); + clib_warning ("Session in state: %d!", ss->session_state); } vec_free (str); @@ -78,6 +78,11 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, stream_session_t *s; u8 *str = 0; + if (!smm->is_enabled) + { + clib_error_return (0, "session layer is not enabled"); + } + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "verbose")) @@ -126,11 +131,14 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, return 0; } -VLIB_CLI_COMMAND (show_uri_command, static) = +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_session_command, static) = { -.path = "show session",.short_help = "show session [verbose]",.function = - show_session_command_fn,}; - + .path = "show session", + .short_help = "show session [verbose]", + .function = show_session_command_fn, +}; +/* *INDENT-ON* */ static clib_error_t * clear_session_command_fn (vlib_main_t * vm, unformat_input_t * input, @@ -142,6 +150,11 @@ clear_session_command_fn (vlib_main_t * vm, unformat_input_t * input, stream_session_t *pool, *session; application_t *server; + if (!smm->is_enabled) + { + clib_error_return (0, "session layer is not enabled"); + } + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "thread %d", &thread_index)) @@ -174,11 +187,43 @@ clear_session_command_fn (vlib_main_t * vm, unformat_input_t * input, return 0; } -VLIB_CLI_COMMAND (clear_uri_session_command, static) = +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (clear_session_command, static) = +{ + .path = "clear session", + .short_help = "clear session thread session ", + .function = clear_session_command_fn, +}; +/* *INDENT-ON* */ + +static clib_error_t * +session_enable_disable_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + u8 is_en = 1; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "enable")) + is_en = 1; + else if (unformat (input, "disable")) + is_en = 0; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + + return vnet_session_enable_disable (vm, is_en); +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (session_enable_disable_command, static) = { -.path = "clear session",.short_help = - "clear session thread session ",.function = - clear_session_command_fn,}; + .path = "session", + .short_help = "session [enable|disable]", + .function = session_enable_disable_fn, +}; +/* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c new file mode 100644 index 00000000..be65642a --- /dev/null +++ b/src/vnet/tcp/builtin_server.c @@ -0,0 +1,135 @@ +/* +* Copyright (c) 2015-2017 Cisco and/or its affiliates. +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at: +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include +#include +#include +#include + +int +builtin_session_accept_callback (stream_session_t * s) +{ + clib_warning ("called..."); + s->session_state = SESSION_STATE_READY; + return 0; +} + +void +builtin_session_disconnect_callback (stream_session_t * s) +{ + clib_warning ("called..."); +} + +int +builtin_session_connected_callback (u32 client_index, + stream_session_t * s, u8 is_fail) +{ + clib_warning ("called..."); + return -1; +} + +int +builtin_add_segment_callback (u32 client_index, + const u8 * seg_name, u32 seg_size) +{ + clib_warning ("called..."); + return -1; +} + +int +builtin_redirect_connect_callback (u32 client_index, void *mp) +{ + clib_warning ("called..."); + return -1; +} + +int +builtin_server_rx_callback (stream_session_t * s) +{ + clib_warning ("called..."); + return 0; +} + +static session_cb_vft_t builtin_session_cb_vft = { + .session_accept_callback = builtin_session_accept_callback, + .session_disconnect_callback = builtin_session_disconnect_callback, + .session_connected_callback = builtin_session_connected_callback, + .add_segment_callback = builtin_add_segment_callback, + .redirect_connect_callback = builtin_redirect_connect_callback, + .builtin_server_rx_callback = builtin_server_rx_callback +}; + +static int +server_create (vlib_main_t * vm) +{ + vnet_bind_args_t _a, *a = &_a; + u64 options[SESSION_OPTIONS_N_OPTIONS]; + char segment_name[128]; + + memset (a, 0, sizeof (*a)); + memset (options, 0, sizeof (options)); + + a->uri = "tcp://0.0.0.0/80"; + a->api_client_index = ~0; + a->session_cb_vft = &builtin_session_cb_vft; + a->options = options; + a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 256 << 10; + a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 64 << 10; + a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 64 << 10; + a->segment_name = segment_name; + a->segment_name_length = ARRAY_LEN (segment_name); + + return vnet_bind_uri (a); +} + +static clib_error_t * +server_create_command_fn (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + int rv; +#if 0 + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "whatever %d", &whatever)) + ; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } +#endif + + rv = server_create (vm); + switch (rv) + { + case 0: + break; + default: + return clib_error_return (0, "server_create returned %d", rv); + } + return 0; +} + +VLIB_CLI_COMMAND (server_create_command, static) = +{ +.path = "test server",.short_help = "test server",.function = + server_create_command_fn,}; + +/* +* fd.io coding-style-patch-verification: ON +* +* Local Variables: +* eval: (c-set-style "gnu") +* End: +*/ diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 0f9b7097..e5feaeb1 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -217,6 +217,7 @@ ip_interface_get_first_ip (u32 sw_if_index, u8 is_ip4) return 0; } +#define PORT_MASK ((1 << 16)- 1) /** * Allocate local port and add if successful add entry to local endpoint * table to mark the pair as used. @@ -224,7 +225,6 @@ ip_interface_get_first_ip (u32 sw_if_index, u8 is_ip4) u16 tcp_allocate_local_port (tcp_main_t * tm, ip46_address_t * ip) { - u8 unique = 0; transport_endpoint_t *tep; u32 time_now, tei; u16 min = 1024, max = 65535, tries; /* XXX configurable ? */ @@ -235,37 +235,34 @@ tcp_allocate_local_port (tcp_main_t * tm, ip46_address_t * ip) /* Start at random point or max */ pool_get (tm->local_endpoints, tep); clib_memcpy (&tep->ip, ip, sizeof (*ip)); - tep->port = random_u32 (&time_now) << 16; - tep->port = tep->port < min ? max : tep->port; /* Search for first free slot */ - while (tries) + for (; tries >= 0; tries--) { + u16 port = 0; + + /* Find a port in the specified range */ + while (1) + { + port = random_u32 (&time_now) & PORT_MASK; + if (PREDICT_TRUE (port >= min && port < max)) + break; + } + + tep->port = port; + + /* Look it up */ tei = transport_endpoint_lookup (&tm->local_endpoints_table, &tep->ip, tep->port); + /* If not found, we're done */ if (tei == TRANSPORT_ENDPOINT_INVALID_INDEX) { - unique = 1; - break; + transport_endpoint_table_add (&tm->local_endpoints_table, tep, + tep - tm->local_endpoints); + return tep->port; } - - tep->port--; - - if (tep->port < min) - tep->port = max; - - tries--; } - - if (unique) - { - transport_endpoint_table_add (&tm->local_endpoints_table, tep, - tep - tm->local_endpoints); - - return tep->port; - } - - /* Failed */ + /* No free ports */ pool_put (tm->local_endpoints, tep); return -1; } @@ -360,7 +357,10 @@ tcp_connection_open (ip46_address_t * rmt_addr, u16 rmt_port, u8 is_ip4) /* Allocate source port */ lcl_port = tcp_allocate_local_port (tm, &lcl_addr); if (lcl_port < 1) - return -1; + { + clib_warning ("Failed to allocate src port"); + return -1; + } /* * Create connection and send SYN diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 22f00a63..3560509d 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -30,7 +30,8 @@ #define TCP_MAX_OPTION_SPACE 40 #define TCP_DUPACK_THRESHOLD 3 -#define TCP_DEFAULT_RX_FIFO_SIZE 64 << 10 +#define TCP_MAX_RX_FIFO_SIZE 2 << 20 +#define TCP_IW_N_SEGMENTS 10 /** TCP FSM state definitions as per RFC793. */ #define foreach_tcp_fsm_state \ @@ -590,7 +591,6 @@ vlib_buffer_push_tcp_net_order (vlib_buffer_t * b, u16 sp, u16 dp, u32 seq, /** * Push TCP header to buffer * - * @param vm - vlib_main * @param b - buffer to write the header to * @param sp_net - source port net order * @param dp_net - destination port net order diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index daa0683b..0a907d0a 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -711,7 +711,7 @@ tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b, if (tcp_opts_sack_permitted (&tc->opt)) tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number); - new_snd_wnd = clib_net_to_host_u32 (th->window) << tc->snd_wscale; + new_snd_wnd = clib_net_to_host_u16 (th->window) << tc->snd_wscale; if (tcp_ack_is_dupack (tc, b, new_snd_wnd)) { @@ -1320,7 +1320,6 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* Parse options */ tcp_options_parse (tcp0, &new_tc0->opt); - tcp_connection_init_vars (new_tc0); if (tcp_opts_tstamp (&new_tc0->opt)) { @@ -1331,11 +1330,13 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (tcp_opts_wscale (&new_tc0->opt)) new_tc0->snd_wscale = new_tc0->opt.wscale; - new_tc0->snd_wnd = clib_net_to_host_u32 (tcp0->window) - << new_tc0->snd_wscale; + /* No scaling */ + new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window); new_tc0->snd_wl1 = seq0; new_tc0->snd_wl2 = ack0; + tcp_connection_init_vars (new_tc0); + /* SYN-ACK: See if we can switch to ESTABLISHED state */ if (tcp_ack (tcp0)) { @@ -1345,6 +1346,9 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, new_tc0->snd_una = ack0; new_tc0->state = TCP_STATE_ESTABLISHED; + /* Make sure las is initialized for the wnd computation */ + new_tc0->rcv_las = new_tc0->rcv_nxt; + /* Notify app that we have connection */ stream_session_connect_notify (&new_tc0->connection, sst, 0); @@ -1575,7 +1579,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* Initialize session variables */ tc0->snd_una = vnet_buffer (b0)->tcp.ack_number; - tc0->snd_wnd = clib_net_to_host_u32 (tcp0->window) + tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window) << tc0->opt.wscale; tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number; tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number; @@ -1899,7 +1903,6 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, } tcp_options_parse (th0, &child0->opt); - tcp_connection_init_vars (child0); child0->irs = vnet_buffer (b0)->tcp.seq_number; child0->rcv_nxt = vnet_buffer (b0)->tcp.seq_number + 1; @@ -1913,6 +1916,16 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, child0->tsval_recent_age = tcp_time_now (); } + if (tcp_opts_wscale (&child0->opt)) + child0->snd_wscale = child0->opt.wscale; + + /* No scaling */ + child0->snd_wnd = clib_net_to_host_u16 (th0->window); + child0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number; + child0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number; + + tcp_connection_init_vars (child0); + /* Reuse buffer to make syn-ack and send */ tcp_make_synack (child0, b0); next0 = tcp_next_output (is_ip4); @@ -1923,7 +1936,7 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, } - b0->error = error0 ? node->errors[error0] : 0; + b0->error = node->errors[error0]; vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); @@ -2069,7 +2082,6 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, u32 n_left_from, next_index, *from, *to_next; u32 my_thread_index = vm->cpu_index; tcp_main_t *tm = vnet_get_tcp_main (); - session_manager_main_t *ssm = vnet_get_session_manager_main (); from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -2109,26 +2121,26 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* lookup session */ tc0 = - (tcp_connection_t *) stream_session_lookup_transport4 (ssm, - &ip40->dst_address, - &ip40->src_address, - tcp0->dst_port, - tcp0->src_port, - SESSION_TYPE_IP4_TCP, - my_thread_index); + (tcp_connection_t *) + stream_session_lookup_transport4 (&ip40->dst_address, + &ip40->src_address, + tcp0->dst_port, + tcp0->src_port, + SESSION_TYPE_IP4_TCP, + my_thread_index); } else { ip60 = vlib_buffer_get_current (b0); tcp0 = ip6_next_header (ip60); tc0 = - (tcp_connection_t *) stream_session_lookup_transport6 (ssm, - &ip60->src_address, - &ip60->dst_address, - tcp0->src_port, - tcp0->dst_port, - SESSION_TYPE_IP6_TCP, - my_thread_index); + (tcp_connection_t *) + stream_session_lookup_transport6 (&ip60->src_address, + &ip60->dst_address, + tcp0->src_port, + tcp0->dst_port, + SESSION_TYPE_IP6_TCP, + my_thread_index); } /* Session exists */ diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index dbcf1f74..7e431cd0 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -90,6 +90,15 @@ tcp_window_compute_scale (u32 available_space) return wnd_scale; } +/** + * TCP's IW as recommended by RFC6928 + */ +always_inline u32 +tcp_initial_wnd_unscaled (tcp_connection_t * tc) +{ + return TCP_IW_N_SEGMENTS * dummy_mtu; +} + /** * Compute initial window and scale factor. As per RFC1323, window field in * SYN and SYN-ACK segments is never scaled. @@ -97,18 +106,15 @@ tcp_window_compute_scale (u32 available_space) u32 tcp_initial_window_to_advertise (tcp_connection_t * tc) { - u32 available_space; + u32 max_fifo; /* Initial wnd for SYN. Fifos are not allocated yet. - * Use some predefined value */ - if (tc->state != TCP_STATE_SYN_RCVD) - { - return TCP_DEFAULT_RX_FIFO_SIZE; - } + * Use some predefined value. For SYN-ACK we still want the + * scale to be computed in the same way */ + max_fifo = TCP_MAX_RX_FIFO_SIZE; - available_space = stream_session_max_enqueue (&tc->connection); - tc->rcv_wscale = tcp_window_compute_scale (available_space); - tc->rcv_wnd = clib_min (available_space, TCP_WND_MAX << tc->rcv_wscale); + tc->rcv_wscale = tcp_window_compute_scale (max_fifo); + tc->rcv_wnd = tcp_initial_wnd_unscaled (tc); return clib_min (tc->rcv_wnd, TCP_WND_MAX); } @@ -119,23 +125,43 @@ tcp_initial_window_to_advertise (tcp_connection_t * tc) u32 tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state) { - u32 available_space, wnd, scaled_space; + u32 available_space, max_fifo, observed_wnd; - if (state != TCP_STATE_ESTABLISHED) + if (state < TCP_STATE_ESTABLISHED) return tcp_initial_window_to_advertise (tc); + /* + * Figure out how much space we have available + */ available_space = stream_session_max_enqueue (&tc->connection); - scaled_space = available_space >> tc->rcv_wscale; + max_fifo = stream_session_fifo_size (&tc->connection); + + ASSERT (tc->opt.mss < max_fifo); + + if (available_space < tc->opt.mss && available_space < max_fifo / 8) + available_space = 0; - /* Need to update scale */ - if (PREDICT_FALSE ((scaled_space == 0 && available_space != 0)) - || (scaled_space >= TCP_WND_MAX)) - tc->rcv_wscale = tcp_window_compute_scale (available_space); + /* + * Use the above and what we know about what we've previously advertised + * to compute the new window + */ + observed_wnd = tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las); - wnd = clib_min (available_space, TCP_WND_MAX << tc->rcv_wscale); - tc->rcv_wnd = wnd; + /* Bad. Thou shalt not shrink */ + if (available_space < observed_wnd) + { + if (available_space == 0) + clib_warning ("Didn't shrink rcv window despite not having space"); + } + + tc->rcv_wnd = clib_min (available_space, TCP_WND_MAX << tc->rcv_wscale); + + if (tc->rcv_wnd == 0) + { + tc->flags |= TCP_CONN_SENT_RCV_WND0; + } - return wnd >> tc->rcv_wscale; + return tc->rcv_wnd >> tc->rcv_wscale; } /** @@ -225,7 +251,7 @@ tcp_options_write (u8 * data, tcp_options_t * opts) } always_inline int -tcp_make_syn_options (tcp_options_t * opts, u32 initial_wnd) +tcp_make_syn_options (tcp_options_t * opts, u8 wnd_scale) { u8 len = 0; @@ -234,7 +260,7 @@ tcp_make_syn_options (tcp_options_t * opts, u32 initial_wnd) len += TCP_OPTION_LEN_MSS; opts->flags |= TCP_OPTS_FLAG_WSCALE; - opts->wscale = tcp_window_compute_scale (initial_wnd); + opts->wscale = wnd_scale; len += TCP_OPTION_LEN_WINDOW_SCALE; opts->flags |= TCP_OPTS_FLAG_TSTAMP; @@ -327,8 +353,7 @@ tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts, case TCP_STATE_SYN_RCVD: return tcp_make_synack_options (tc, opts); case TCP_STATE_SYN_SENT: - return tcp_make_syn_options (opts, - tcp_initial_window_to_advertise (tc)); + return tcp_make_syn_options (opts, tc->rcv_wscale); default: clib_warning ("Not handled!"); return 0; @@ -732,7 +757,7 @@ tcp_send_syn (tcp_connection_t * tc) /* Make and write options */ memset (&snd_opts, 0, sizeof (snd_opts)); - tcp_opts_len = tcp_make_syn_options (&snd_opts, initial_wnd); + tcp_opts_len = tcp_make_syn_options (&snd_opts, tc->rcv_wscale); tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t); th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss, @@ -900,7 +925,7 @@ tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b, tcp_reuse_buffer (vm, b); - ASSERT (tc->state == TCP_STATE_ESTABLISHED); + ASSERT (tc->state >= TCP_STATE_ESTABLISHED); ASSERT (max_bytes != 0); if (tcp_opts_sack_permitted (&tc->opt)) @@ -929,7 +954,6 @@ tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b, max_bytes); ASSERT (n_bytes != 0); - tc->snd_nxt += n_bytes; tcp_push_hdr_i (tc, b, tc->state); return n_bytes; @@ -967,7 +991,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) tcp_get_free_buffer_index (tm, &bi); b = vlib_get_buffer (vm, bi); - if (tc->state == TCP_STATE_ESTABLISHED) + if (tc->state >= TCP_STATE_ESTABLISHED) { tcp_fastrecovery_off (tc); @@ -977,6 +1001,12 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) /* Figure out what and how many bytes we can send */ snd_space = tcp_available_snd_space (tc); max_bytes = clib_min (tc->snd_mss, snd_space); + + if (max_bytes == 0) + { + clib_warning ("no wnd to retransmit"); + return; + } tcp_prepare_retransmit_segment (tc, b, max_bytes); tc->rtx_bytes += max_bytes; @@ -996,7 +1026,11 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX); vlib_buffer_make_headroom (b, MAX_HDRS_LEN); + tcp_push_hdr_i (tc, b, tc->state); + + /* Account for the SYN */ + tc->snd_nxt += 1; } if (!is_syn) @@ -1163,8 +1197,8 @@ tcp46_output_inline (vlib_main_t * vm, if (PREDICT_FALSE (vnet_buffer (b0)->tcp.flags & TCP_BUF_FLAG_DUPACK)) { + ASSERT (tc0->snt_dupacks > 0); tc0->snt_dupacks--; - ASSERT (tc0->snt_dupacks >= 0); if (!tcp_session_has_ooo_data (tc0)) { error0 = TCP_ERROR_FILTERED_DUPACKS; -- cgit 1.2.3-korg From 6792ec059696a358b6c98d8d86e9740b34c01e24 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Mon, 13 Mar 2017 03:49:51 -0700 Subject: TCP/session improvements - Added svm fifo flag for tracking fifo dequeue events (replaces event length). Updated all code to switch to the new scheme. - More session debugging - Fix peek index wrap - Add a trivial socket test client - Fast retransmit/cc fixes - tx and rx SACK fixes and unit testing - SRTT computation fix - remove dupack/ack burst filters - improve ack rx - improved segment rx - builtin client test code Change-Id: Ic4eb2d5ca446eb2260ccd3ccbcdaa73c64e7f4e1 Signed-off-by: Florin Coras Signed-off-by: Dave Barach --- src/svm/svm_fifo.c | 35 +-- src/svm/svm_fifo.h | 28 ++- src/svm/svm_fifo_segment.h | 4 +- src/uri.am | 5 +- src/uri/uri_socket_test.c | 126 ++++++++++ src/uri/uri_tcp_test.c | 161 +++++++++---- src/uri/uri_udp_test.c | 13 +- src/vnet.am | 2 + src/vnet/session/application.h | 3 +- src/vnet/session/node.c | 127 ++++++---- src/vnet/session/session.c | 63 +++-- src/vnet/session/session.h | 19 +- src/vnet/session/session_cli.c | 2 +- src/vnet/session/session_debug.h | 38 ++- src/vnet/session/transport.h | 2 +- src/vnet/tcp/builtin_client.c | 411 +++++++++++++++++++++++++++++++ src/vnet/tcp/builtin_client.h | 131 ++++++++++ src/vnet/tcp/builtin_server.c | 91 +++++-- src/vnet/tcp/tcp.c | 37 ++- src/vnet/tcp/tcp.h | 111 +++++++-- src/vnet/tcp/tcp_debug.h | 252 ++++++++++++++++--- src/vnet/tcp/tcp_error.def | 7 +- src/vnet/tcp/tcp_input.c | 507 +++++++++++++++++++++++++-------------- src/vnet/tcp/tcp_output.c | 295 ++++++++++++++++------- src/vnet/tcp/tcp_packet.h | 2 +- src/vnet/tcp/tcp_test.c | 216 +++++++++++++++++ src/vnet/udp/builtin_server.c | 29 ++- src/vnet/udp/udp_input.c | 47 ++-- 28 files changed, 2201 insertions(+), 563 deletions(-) create mode 100644 src/uri/uri_socket_test.c create mode 100644 src/vnet/tcp/builtin_client.c create mode 100644 src/vnet/tcp/builtin_client.h create mode 100644 src/vnet/tcp/tcp_test.c (limited to 'src/svm') diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index e3f534b1..07b0d2df 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -13,7 +13,7 @@ * limitations under the License. */ -#include "svm_fifo.h" +#include /** create an svm fifo, in the current heap. Fails vs blow up the process */ svm_fifo_t * @@ -362,18 +362,19 @@ svm_fifo_enqueue_nowait (svm_fifo_t * f, return svm_fifo_enqueue_internal (f, pid, max_bytes, copy_from_here); } -/** Enqueue a future segment. +/** + * Enqueue a future segment. + * * Two choices: either copies the entire segment, or copies nothing * Returns 0 of the entire segment was copied * Returns -1 if none of the segment was copied due to lack of space */ - static int -svm_fifo_enqueue_with_offset_internal2 (svm_fifo_t * f, - int pid, - u32 offset, - u32 required_bytes, - u8 * copy_from_here) +svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, + int pid, + u32 offset, + u32 required_bytes, + u8 * copy_from_here) { u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; u32 cursize, nitems; @@ -424,14 +425,14 @@ svm_fifo_enqueue_with_offset (svm_fifo_t * f, u32 offset, u32 required_bytes, u8 * copy_from_here) { - return svm_fifo_enqueue_with_offset_internal2 + return svm_fifo_enqueue_with_offset_internal (f, pid, offset, required_bytes, copy_from_here); } static int -svm_fifo_dequeue_internal2 (svm_fifo_t * f, - int pid, u32 max_bytes, u8 * copy_here) +svm_fifo_dequeue_internal (svm_fifo_t * f, + int pid, u32 max_bytes, u8 * copy_here) { u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; u32 cursize, nitems; @@ -484,7 +485,7 @@ int svm_fifo_dequeue_nowait (svm_fifo_t * f, int pid, u32 max_bytes, u8 * copy_here) { - return svm_fifo_dequeue_internal2 (f, pid, max_bytes, copy_here); + return svm_fifo_dequeue_internal (f, pid, max_bytes, copy_here); } int @@ -492,7 +493,7 @@ svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes, u8 * copy_here) { u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; - u32 cursize, nitems; + u32 cursize, nitems, real_head; if (PREDICT_FALSE (f->cursize == 0)) return -2; /* nothing in the fifo */ @@ -500,6 +501,8 @@ svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes, /* read cursize, which can only increase while we're working */ cursize = f->cursize; nitems = f->nitems; + real_head = f->head + offset; + real_head = real_head >= nitems ? real_head - nitems : real_head; /* Number of bytes we're going to copy */ total_copy_bytes = (cursize < max_bytes) ? cursize : max_bytes; @@ -508,9 +511,9 @@ svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes, { /* Number of bytes in first copy segment */ first_copy_bytes = - ((nitems - f->head + offset) < total_copy_bytes) ? - (nitems - f->head + offset) : total_copy_bytes; - clib_memcpy (copy_here, &f->data[f->head + offset], first_copy_bytes); + ((nitems - real_head) < total_copy_bytes) ? + (nitems - real_head) : total_copy_bytes; + clib_memcpy (copy_here, &f->data[real_head], first_copy_bytes); /* Number of bytes in second copy segment, if any */ second_copy_bytes = total_copy_bytes - first_copy_bytes; diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index 70624b74..39556173 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -46,9 +46,11 @@ typedef struct { pthread_mutex_t mutex; /* 8 bytes */ pthread_cond_t condvar; /* 8 bytes */ - u32 owner_pid; svm_lock_tag_t tag; - volatile u32 cursize; + + volatile u32 cursize; /**< current fifo size */ + volatile u8 has_event; /**< non-zero if deq event exists */ + u32 owner_pid; u32 nitems; /* Backpointers */ @@ -112,6 +114,28 @@ svm_fifo_has_ooo_data (svm_fifo_t * f) return f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX; } +/** + * Sets fifo event flag. + * + * @return 1 if flag was not set. + */ +always_inline u8 +svm_fifo_set_event (svm_fifo_t * f) +{ + /* Probably doesn't need to be atomic. Still, better avoid surprises */ + return __sync_lock_test_and_set (&f->has_event, 1) == 0; +} + +/** + * Unsets fifo event flag. + */ +always_inline void +svm_fifo_unset_event (svm_fifo_t * f) +{ + /* Probably doesn't need to be atomic. Still, better avoid surprises */ + __sync_lock_test_and_set (&f->has_event, 0); +} + svm_fifo_t *svm_fifo_create (u32 data_size_in_bytes); int svm_fifo_enqueue_nowait (svm_fifo_t * f, int pid, u32 max_bytes, diff --git a/src/svm/svm_fifo_segment.h b/src/svm/svm_fifo_segment.h index 793fa7c8..ecb5653a 100644 --- a/src/svm/svm_fifo_segment.h +++ b/src/svm/svm_fifo_segment.h @@ -15,8 +15,8 @@ #ifndef __included_ssvm_fifo_segment_h__ #define __included_ssvm_fifo_segment_h__ -#include "svm_fifo.h" -#include "ssvm.h" +#include +#include typedef struct { diff --git a/src/uri.am b/src/uri.am index 09b5b15b..ad4d65d8 100644 --- a/src/uri.am +++ b/src/uri.am @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -noinst_PROGRAMS += uri_udp_test uri_tcp_test +noinst_PROGRAMS += uri_udp_test uri_tcp_test uri_socket_test uri_udp_test_SOURCES = uri/uri_udp_test.c uri_udp_test_LDADD = libvlibmemoryclient.la libvlibapi.la libsvm.la \ @@ -20,3 +20,6 @@ uri_udp_test_LDADD = libvlibmemoryclient.la libvlibapi.la libsvm.la \ uri_tcp_test_SOURCES = uri/uri_tcp_test.c uri_tcp_test_LDADD = libvlibmemoryclient.la libvlibapi.la libsvm.la \ libvppinfra.la -lpthread -lm -lrt + +uri_socket_test_SOURCES = uri/uri_socket_test.c +uri_socket_test_LDADD = libvppinfra.la -lpthread -lm -lrt diff --git a/src/uri/uri_socket_test.c b/src/uri/uri_socket_test.c new file mode 100644 index 00000000..9f049bda --- /dev/null +++ b/src/uri/uri_socket_test.c @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include + +int +main (int argc, char *argv[]) +{ + int sockfd, portno, n; + struct sockaddr_in serv_addr; + struct hostent *server; + u8 *rx_buffer = 0, *tx_buffer = 0; + u32 offset; + int iter, i; + if (0 && argc < 3) + { + fformat (stderr, "usage %s hostname port\n", argv[0]); + exit (0); + } + + portno = 1234; // atoi(argv[2]); + sockfd = socket (AF_INET, SOCK_STREAM, 0); + if (sockfd < 0) + { + clib_unix_error ("socket"); + exit (1); + } + server = gethostbyname ("6.0.1.1" /* argv[1] */ ); + if (server == NULL) + { + clib_unix_warning ("gethostbyname"); + exit (1); + } + bzero ((char *) &serv_addr, sizeof (serv_addr)); + serv_addr.sin_family = AF_INET; + bcopy ((char *) server->h_addr, + (char *) &serv_addr.sin_addr.s_addr, server->h_length); + serv_addr.sin_port = htons (portno); + if (connect (sockfd, (const void *) &serv_addr, sizeof (serv_addr)) < 0) + { + clib_unix_warning ("connect"); + exit (1); + } + + vec_validate (rx_buffer, 1400); + vec_validate (tx_buffer, 1400); + + for (i = 0; i < vec_len (tx_buffer); i++) + tx_buffer[i] = (i + 1) % 0xff; + + /* + * Send one packet to warm up the RX pipeline + */ + n = send (sockfd, tx_buffer, vec_len (tx_buffer), 0 /* flags */ ); + if (n != vec_len (tx_buffer)) + { + clib_unix_warning ("write"); + exit (0); + } + + for (iter = 0; iter < 100000; iter++) + { + if (iter < 99999) + { + n = send (sockfd, tx_buffer, vec_len (tx_buffer), 0 /* flags */ ); + if (n != vec_len (tx_buffer)) + { + clib_unix_warning ("write"); + exit (0); + } + } + offset = 0; + + do + { + n = recv (sockfd, rx_buffer + offset, + vec_len (rx_buffer) - offset, 0 /* flags */ ); + if (n < 0) + { + clib_unix_warning ("read"); + exit (0); + } + offset += n; + } + while (offset < vec_len (rx_buffer)); + + for (i = 0; i < vec_len (rx_buffer); i++) + { + if (rx_buffer[i] != tx_buffer[i]) + { + clib_warning ("[%d] read 0x%x not 0x%x", + rx_buffer[i], tx_buffer[i]); + exit (1); + } + } + + } + close (sockfd); + return 0; +} + + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/uri/uri_tcp_test.c b/src/uri/uri_tcp_test.c index 406a5f4e..e2834817 100644 --- a/src/uri/uri_tcp_test.c +++ b/src/uri/uri_tcp_test.c @@ -116,6 +116,7 @@ typedef struct pthread_t client_rx_thread_handle; u32 client_bytes_received; u8 test_return_packets; + u32 bytes_to_send; /* convenience */ svm_fifo_segment_main_t *segment_main; @@ -313,11 +314,16 @@ client_handle_fifo_event_rx (uri_tcp_test_main_t * utm, rx_fifo = e->fifo; - bytes = e->enqueue_length; + bytes = svm_fifo_max_dequeue (rx_fifo); + /* Allow enqueuing of new event */ + svm_fifo_unset_event (rx_fifo); + + /* Read the bytes */ do { - n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len (utm->rx_buf), - utm->rx_buf); + n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, + clib_min (vec_len (utm->rx_buf), + bytes), utm->rx_buf); if (n_read > 0) { bytes -= n_read; @@ -333,9 +339,17 @@ client_handle_fifo_event_rx (uri_tcp_test_main_t * utm, } utm->client_bytes_received += n_read; } + else + { + if (n_read == -2) + { + clib_warning ("weird!"); + break; + } + } } - while (n_read < 0 || bytes > 0); + while (bytes > 0); } void @@ -479,47 +493,41 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) } } -void -client_send_data (uri_tcp_test_main_t * utm) +static void +send_test_chunk (uri_tcp_test_main_t * utm, svm_fifo_t * tx_fifo, int mypid, + u32 bytes) { u8 *test_data = utm->connect_test_data; u64 bytes_sent = 0; - int rv; - int mypid = getpid (); - session_t *session; - svm_fifo_t *tx_fifo; - int buffer_offset, bytes_to_send = 0; + int test_buf_offset = 0; + u32 bytes_to_snd; + u32 queue_max_chunk = 64 << 10, actual_write; session_fifo_event_t evt; static int serial_number = 0; - int i; - u32 max_chunk = 64 << 10, write; - - session = pool_elt_at_index (utm->sessions, utm->connected_session_index); - tx_fifo = session->server_tx_fifo; + int rv; - vec_validate (utm->rx_buf, vec_len (test_data) - 1); + bytes_to_snd = (bytes == 0) ? vec_len (test_data) : bytes; + if (bytes_to_snd > vec_len (test_data)) + bytes_to_snd = vec_len (test_data); - for (i = 0; i < 1; i++) + while (bytes_to_snd > 0) { - bytes_to_send = vec_len (test_data); - buffer_offset = 0; - while (bytes_to_send > 0) + actual_write = + bytes_to_snd > queue_max_chunk ? queue_max_chunk : bytes_to_snd; + rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, actual_write, + test_data + test_buf_offset); + + if (rv > 0) { - write = bytes_to_send > max_chunk ? max_chunk : bytes_to_send; - rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, write, - test_data + buffer_offset); + bytes_to_snd -= rv; + test_buf_offset += rv; + bytes_sent += rv; - if (rv > 0) + if (svm_fifo_set_event (tx_fifo)) { - bytes_to_send -= rv; - buffer_offset += rv; - bytes_sent += rv; - /* Fabricate TX event, send to vpp */ evt.fifo = tx_fifo; evt.event_type = FIFO_EVENT_SERVER_TX; - /* $$$$ for event logging */ - evt.enqueue_length = rv; evt.event_id = serial_number++; unix_shared_memory_queue_add (utm->vpp_event_queue, @@ -528,13 +536,40 @@ client_send_data (uri_tcp_test_main_t * utm) } } } +} + +void +client_send_data (uri_tcp_test_main_t * utm) +{ + u8 *test_data = utm->connect_test_data; + int mypid = getpid (); + session_t *session; + svm_fifo_t *tx_fifo; + u32 n_iterations, leftover; + int i; + + session = pool_elt_at_index (utm->sessions, utm->connected_session_index); + tx_fifo = session->server_tx_fifo; + + vec_validate (utm->rx_buf, vec_len (test_data) - 1); + n_iterations = utm->bytes_to_send / vec_len (test_data); + + for (i = 0; i < n_iterations; i++) + { + send_test_chunk (utm, tx_fifo, mypid, 0); + } + + leftover = utm->bytes_to_send % vec_len (test_data); + if (leftover) + send_test_chunk (utm, tx_fifo, mypid, leftover); if (utm->test_return_packets) { f64 timeout = clib_time_now (&utm->clib_time) + 2; /* Wait for the outstanding packets */ - while (utm->client_bytes_received < vec_len (test_data)) + while (utm->client_bytes_received < + vec_len (test_data) * n_iterations + leftover) { if (clib_time_now (&utm->clib_time) > timeout) { @@ -542,9 +577,8 @@ client_send_data (uri_tcp_test_main_t * utm) break; } } - - utm->time_to_stop = 1; } + utm->time_to_stop = 1; } void @@ -599,6 +633,11 @@ client_test (uri_tcp_test_main_t * utm) /* Disconnect */ client_disconnect (utm); + + if (wait_for_state_change (utm, STATE_START)) + { + return; + } } static void @@ -714,7 +753,6 @@ server_handle_fifo_event_rx (uri_tcp_test_main_t * utm, { svm_fifo_t *rx_fifo, *tx_fifo; int n_read; - session_fifo_event_t evt; unix_shared_memory_queue_t *q; int rv, bytes; @@ -722,34 +760,46 @@ server_handle_fifo_event_rx (uri_tcp_test_main_t * utm, rx_fifo = e->fifo; tx_fifo = utm->sessions[rx_fifo->client_session_index].server_tx_fifo; - bytes = e->enqueue_length; + bytes = svm_fifo_max_dequeue (rx_fifo); + /* Allow enqueuing of a new event */ + svm_fifo_unset_event (rx_fifo); + + if (bytes == 0) + return; + + /* Read the bytes */ do { n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len (utm->rx_buf), utm->rx_buf); + if (n_read > 0) + bytes -= n_read; + + if (utm->drop_packets) + continue; /* Reflect if a non-drop session */ - if (!utm->drop_packets && n_read > 0) + if (n_read > 0) { do { rv = svm_fifo_enqueue_nowait (tx_fifo, 0, n_read, utm->rx_buf); } - while (rv == -2 && !utm->time_to_stop); - - /* Fabricate TX event, send to vpp */ - evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; - /* $$$$ for event logging */ - evt.enqueue_length = n_read; - evt.event_id = e->event_id; - q = utm->vpp_event_queue; - unix_shared_memory_queue_add (q, (u8 *) & evt, - 0 /* do wait for mutex */ ); - } + while (rv <= 0 && !utm->time_to_stop); - if (n_read > 0) - bytes -= n_read; + /* If event wasn't set, add one */ + if (svm_fifo_set_event (tx_fifo)) + { + /* Fabricate TX event, send to vpp */ + evt.fifo = tx_fifo; + evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_id = e->event_id; + + q = utm->vpp_event_queue; + unix_shared_memory_queue_add (q, (u8 *) & evt, + 0 /* do wait for mutex */ ); + } + } } while ((n_read < 0 || bytes > 0) && !utm->time_to_stop); } @@ -852,7 +902,10 @@ static void vl_api_disconnect_session_reply_t_handler (vl_api_disconnect_session_reply_t * mp) { + uri_tcp_test_main_t *utm = &uri_tcp_test_main; + clib_warning ("retval %d", ntohl (mp->retval)); + utm->state = STATE_START; } #define foreach_uri_msg \ @@ -888,6 +941,7 @@ main (int argc, char **argv) u8 *heap, *uri = 0; u8 *bind_uri = (u8 *) "tcp://0.0.0.0/1234"; u8 *connect_uri = (u8 *) "tcp://6.0.1.2/1234"; + u32 bytes_to_send = 64 << 10, mbytes; u32 tmp; mheap_t *h; session_t *session; @@ -934,6 +988,10 @@ main (int argc, char **argv) drop_packets = 1; else if (unformat (a, "test")) test_return_packets = 1; + else if (unformat (a, "mbytes %d", &mbytes)) + { + bytes_to_send = mbytes << 20; + } else { fformat (stderr, "%s: usage [master|slave]\n"); @@ -956,6 +1014,7 @@ main (int argc, char **argv) utm->segment_main = &svm_fifo_segment_main; utm->drop_packets = drop_packets; utm->test_return_packets = test_return_packets; + utm->bytes_to_send = bytes_to_send; setup_signal_handlers (); uri_api_hookup (utm); diff --git a/src/uri/uri_udp_test.c b/src/uri/uri_udp_test.c index 54625d64..e6c239c1 100644 --- a/src/uri/uri_udp_test.c +++ b/src/uri/uri_udp_test.c @@ -742,17 +742,20 @@ server_handle_fifo_event_rx (uri_udp_test_main_t * utm, /* Fabricate TX event, send to vpp */ evt.fifo = tx_fifo; evt.event_type = FIFO_EVENT_SERVER_TX; - /* $$$$ for event logging */ - evt.enqueue_length = nbytes; evt.event_id = e->event_id; - q = utm->vpp_event_queue; - unix_shared_memory_queue_add (q, (u8 *) & evt, 0 /* do wait for mutex */ ); + + if (svm_fifo_set_event (tx_fifo)) + { + q = utm->vpp_event_queue; + unix_shared_memory_queue_add (q, (u8 *) & evt, + 0 /* do wait for mutex */ ); + } } void server_handle_event_queue (uri_udp_test_main_t * utm) { - session_fifo_event_t _e, *e = &_e;; + session_fifo_event_t _e, *e = &_e; while (1) { diff --git a/src/vnet.am b/src/vnet.am index 3e73de8f..9c55e336 100644 --- a/src/vnet.am +++ b/src/vnet.am @@ -462,7 +462,9 @@ libvnet_la_SOURCES += \ vnet/tcp/tcp_output.c \ vnet/tcp/tcp_input.c \ vnet/tcp/tcp_newreno.c \ + vnet/tcp/builtin_client.c \ vnet/tcp/builtin_server.c \ + vnet/tcp/tcp_test.c \ vnet/tcp/tcp.c nobase_include_HEADERS += \ diff --git a/src/vnet/session/application.h b/src/vnet/session/application.h index a60a8b8b..480828f7 100644 --- a/src/vnet/session/application.h +++ b/src/vnet/session/application.h @@ -45,8 +45,7 @@ typedef struct _stream_session_cb_vft void (*session_reset_callback) (stream_session_t * s); /* Direct RX callback, for built-in servers */ - int (*builtin_server_rx_callback) (stream_session_t * session, - session_fifo_event_t * ep); + int (*builtin_server_rx_callback) (stream_session_t * session); /* Redirect connection to local server */ int (*redirect_connect_callback) (u32 api_client_index, void *mp); diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c index 822afebd..8681105c 100644 --- a/src/vnet/session/node.c +++ b/src/vnet/session/node.c @@ -13,21 +13,14 @@ * limitations under the License. */ +#include #include #include -#include -#include - #include - -#include -#include #include -#include - -#include -#include +#include #include +#include vlib_node_registration_t session_queue_node; @@ -52,8 +45,8 @@ format_session_queue_trace (u8 * s, va_list * args) vlib_node_registration_t session_queue_node; -#define foreach_session_queue_error \ -_(TX, "Packets transmitted") \ +#define foreach_session_queue_error \ +_(TX, "Packets transmitted") \ _(TIMER, "Timer events") typedef enum @@ -91,10 +84,10 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, transport_proto_vft_t *transport_vft; u32 next_index, next0, *to_next, n_left_to_next, bi0; vlib_buffer_t *b0; - u32 rx_offset; + u32 rx_offset = 0, max_dequeue0; u16 snd_mss0; u8 *data0; - int i; + int i, n_bytes_read; next_index = next0 = session_type_to_next[s0->session_type]; @@ -106,24 +99,33 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, snd_mss0 = transport_vft->send_mss (tc0); /* Can't make any progress */ - if (snd_space0 == 0 || svm_fifo_max_dequeue (s0->server_tx_fifo) == 0 - || snd_mss0 == 0) + if (snd_space0 == 0 || snd_mss0 == 0) { vec_add1 (smm->evts_partially_read[thread_index], *e0); return 0; } - ASSERT (e0->enqueue_length > 0); - - /* Ensure we're not writing more than transport window allows */ - max_len_to_snd0 = clib_min (e0->enqueue_length, snd_space0); - if (peek_data) { /* Offset in rx fifo from where to peek data */ rx_offset = transport_vft->tx_fifo_offset (tc0); } + /* Check how much we can pull. If buffering, subtract the offset */ + max_dequeue0 = svm_fifo_max_dequeue (s0->server_tx_fifo) - rx_offset; + + /* Allow enqueuing of a new event */ + svm_fifo_unset_event (s0->server_tx_fifo); + + /* Nothing to read return */ + if (max_dequeue0 == 0) + { + return 0; + } + + /* Ensure we're not writing more than transport window allows */ + max_len_to_snd0 = clib_min (max_dequeue0, snd_space0); + /* TODO check if transport is willing to send len_to_snd0 * bytes (Nagle) */ @@ -147,13 +149,10 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, * XXX 0.9 because when debugging we might not get a full frame */ if (PREDICT_FALSE (n_bufs < 0.9 * VLIB_FRAME_SIZE)) { - /* Keep track of how much we've dequeued and exit */ - if (left_to_snd0 != max_len_to_snd0) + if (svm_fifo_set_event (s0->server_tx_fifo)) { - e0->enqueue_length -= max_len_to_snd0 - left_to_snd0; vec_add1 (smm->evts_partially_read[thread_index], *e0); } - return -1; } @@ -198,9 +197,9 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, len_to_deq0 = (left_to_snd0 < snd_mss0) ? left_to_snd0 : snd_mss0; /* *INDENT-OFF* */ - SESSION_EVT_DBG(s0, SESSION_EVT_DEQ, ({ + SESSION_EVT_DBG(SESSION_EVT_DEQ, s0, ({ ed->data[0] = e0->event_id; - ed->data[1] = e0->enqueue_length; + ed->data[1] = max_dequeue0; ed->data[2] = len_to_deq0; ed->data[3] = left_to_snd0; })); @@ -214,29 +213,30 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, * 2) buffer chains */ if (peek_data) { - int n_bytes_read; n_bytes_read = svm_fifo_peek (s0->server_tx_fifo, s0->pid, rx_offset, len_to_deq0, data0); - if (n_bytes_read < 0) + if (n_bytes_read <= 0) goto dequeue_fail; /* Keep track of progress locally, transport is also supposed to - * increment it independently when pushing header */ + * increment it independently when pushing the header */ rx_offset += n_bytes_read; } else { - if (svm_fifo_dequeue_nowait (s0->server_tx_fifo, s0->pid, - len_to_deq0, data0) < 0) + n_bytes_read = svm_fifo_dequeue_nowait (s0->server_tx_fifo, + s0->pid, len_to_deq0, + data0); + if (n_bytes_read <= 0) goto dequeue_fail; } - b0->current_length = len_to_deq0; + b0->current_length = n_bytes_read; /* Ask transport to push header */ transport_vft->push_header (tc0, b0); - left_to_snd0 -= len_to_deq0; + left_to_snd0 -= n_bytes_read; *n_tx_packets = *n_tx_packets + 1; vlib_validate_buffer_enqueue_x1 (vm, node, next_index, @@ -246,25 +246,31 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - /* If we couldn't dequeue all bytes store progress */ - if (max_len_to_snd0 < e0->enqueue_length) + /* If we couldn't dequeue all bytes mark as partially read */ + if (max_len_to_snd0 < max_dequeue0) { - e0->enqueue_length -= max_len_to_snd0; - vec_add1 (smm->evts_partially_read[thread_index], *e0); + /* If we don't already have new event */ + if (svm_fifo_set_event (s0->server_tx_fifo)) + { + vec_add1 (smm->evts_partially_read[thread_index], *e0); + } } return 0; dequeue_fail: - /* Can't read from fifo. Store event rx progress, save as partially read, - * return buff to free list and return */ - e0->enqueue_length -= max_len_to_snd0 - left_to_snd0; - vec_add1 (smm->evts_partially_read[thread_index], *e0); + /* + * Can't read from fifo. If we don't already have an event, save as partially + * read, return buff to free list and return + */ + clib_warning ("dequeue fail"); - to_next -= 1; - n_left_to_next += 1; + if (svm_fifo_set_event (s0->server_tx_fifo)) + { + vec_add1 (smm->evts_partially_read[thread_index], *e0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1); _vec_len (smm->tx_buffers[thread_index]) += 1; - clib_warning ("dequeue fail"); return 0; } @@ -298,6 +304,7 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, session_fifo_event_t *my_fifo_events, *e; u32 n_to_dequeue, n_events; unix_shared_memory_queue_t *q; + application_t *app; int n_tx_packets = 0; u32 my_thread_index = vm->cpu_index; int i, rv; @@ -321,13 +328,18 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, if (n_to_dequeue == 0 && vec_len (my_fifo_events) == 0) return 0; + SESSION_EVT_DBG (SESSION_EVT_DEQ_NODE, 0); + /* * If we didn't manage to process previous events try going * over them again without dequeuing new ones. */ /* XXX: Block senders to sessions that can't keep up */ if (vec_len (my_fifo_events) >= 100) - goto skip_dequeue; + { + clib_warning ("too many fifo events unsolved"); + goto skip_dequeue; + } /* See you in the next life, don't be late */ if (pthread_mutex_trylock (&q->mutex)) @@ -352,19 +364,17 @@ skip_dequeue: { svm_fifo_t *f0; /* $$$ prefetch 1 ahead maybe */ stream_session_t *s0; - u32 server_session_index0, server_thread_index0; + u32 session_index0; session_fifo_event_t *e0; e0 = &my_fifo_events[i]; f0 = e0->fifo; - server_session_index0 = f0->server_session_index; - server_thread_index0 = f0->server_thread_index; + session_index0 = f0->server_session_index; /* $$$ add multiple event queues, per vpp worker thread */ - ASSERT (server_thread_index0 == my_thread_index); + ASSERT (f0->server_thread_index == my_thread_index); - s0 = stream_session_get_if_valid (server_session_index0, - my_thread_index); + s0 = stream_session_get_if_valid (session_index0, my_thread_index); if (CLIB_DEBUG && !s0) { @@ -385,11 +395,20 @@ skip_dequeue: rv = (smm->session_tx_fns[s0->session_type]) (vm, node, smm, e0, s0, my_thread_index, &n_tx_packets); + /* Out of buffers */ if (rv < 0) goto done; break; - + case FIFO_EVENT_SERVER_EXIT: + stream_session_disconnect (s0); + break; + case FIFO_EVENT_BUILTIN_RX: + svm_fifo_unset_event (s0->server_rx_fifo); + /* Get session's server */ + app = application_get (s0->app_index); + app->cb_fns.builtin_server_rx_callback (s0); + break; default: clib_warning ("unhandled event type %d", e0->event_type); } @@ -418,6 +437,8 @@ done: vlib_node_increment_counter (vm, session_queue_node.index, SESSION_QUEUE_ERROR_TX, n_tx_packets); + SESSION_EVT_DBG (SESSION_EVT_DEQ_NODE, 1); + return n_tx_packets; } diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 06e2a09a..f10918aa 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -804,30 +804,36 @@ stream_session_enqueue_notify (stream_session_t * s, u8 block) /* Get session's server */ app = application_get (s->app_index); - /* Fabricate event */ - evt.fifo = s->server_rx_fifo; - evt.event_type = FIFO_EVENT_SERVER_RX; - evt.event_id = serial_number++; - evt.enqueue_length = svm_fifo_max_dequeue (s->server_rx_fifo); - /* Built-in server? Hand event to the callback... */ if (app->cb_fns.builtin_server_rx_callback) - return app->cb_fns.builtin_server_rx_callback (s, &evt); - - /* Add event to server's event queue */ - q = app->event_queue; + return app->cb_fns.builtin_server_rx_callback (s); - /* Based on request block (or not) for lack of space */ - if (block || PREDICT_TRUE (q->cursize < q->maxsize)) - unix_shared_memory_queue_add (app->event_queue, (u8 *) & evt, - 0 /* do wait for mutex */ ); - else - return -1; + /* If no event, send one */ + if (svm_fifo_set_event (s->server_rx_fifo)) + { + /* Fabricate event */ + evt.fifo = s->server_rx_fifo; + evt.event_type = FIFO_EVENT_SERVER_RX; + evt.event_id = serial_number++; + + /* Add event to server's event queue */ + q = app->event_queue; + + /* Based on request block (or not) for lack of space */ + if (block || PREDICT_TRUE (q->cursize < q->maxsize)) + unix_shared_memory_queue_add (app->event_queue, (u8 *) & evt, + 0 /* do wait for mutex */ ); + else + { + clib_warning ("fifo full"); + return -1; + } + } /* *INDENT-OFF* */ - SESSION_EVT_DBG(s, SESSION_EVT_ENQ, ({ + SESSION_EVT_DBG(SESSION_EVT_ENQ, s, ({ ed->data[0] = evt.event_id; - ed->data[1] = evt.enqueue_length; + ed->data[1] = svm_fifo_max_dequeue (s->server_rx_fifo); })); /* *INDENT-ON* */ @@ -1192,8 +1198,29 @@ stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order, void stream_session_disconnect (stream_session_t * s) { +// session_fifo_event_t evt; + s->session_state = SESSION_STATE_CLOSED; + /* RPC to vpp evt queue in the right thread */ + tp_vfts[s->session_type].close (s->connection_index, s->thread_index); + +// { +// /* Fabricate event */ +// evt.fifo = s->server_rx_fifo; +// evt.event_type = FIFO_EVENT_SERVER_RX; +// evt.event_id = serial_number++; +// +// /* Based on request block (or not) for lack of space */ +// if (PREDICT_TRUE(q->cursize < q->maxsize)) +// unix_shared_memory_queue_add (app->event_queue, (u8 *) &evt, +// 0 /* do wait for mutex */); +// else +// { +// clib_warning("fifo full"); +// return -1; +// } +// } } /** diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index 96c00d87..a39bc06f 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -33,6 +33,7 @@ typedef enum FIFO_EVENT_SERVER_TX, FIFO_EVENT_TIMEOUT, FIFO_EVENT_SERVER_EXIT, + FIFO_EVENT_BUILTIN_RX } fifo_event_type_t; #define foreach_session_input_error \ @@ -91,14 +92,13 @@ typedef enum SESSION_STATE_N_STATES, } stream_session_state_t; -typedef CLIB_PACKED (struct - { - svm_fifo_t * fifo; - u8 event_type; - /* $$$$ for event logging */ - u16 event_id; - u32 enqueue_length; - }) session_fifo_event_t; +/* *INDENT-OFF* */ +typedef CLIB_PACKED (struct { + svm_fifo_t * fifo; + u8 event_type; + u16 event_id; +}) session_fifo_event_t; +/* *INDENT-ON* */ typedef struct _stream_session_t { @@ -333,7 +333,7 @@ stream_session_get_index (stream_session_t * s) } always_inline u32 -stream_session_max_enqueue (transport_connection_t * tc) +stream_session_max_rx_enqueue (transport_connection_t * tc) { stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index); return svm_fifo_max_enqueue (s->server_rx_fifo); @@ -346,7 +346,6 @@ stream_session_fifo_size (transport_connection_t * tc) return s->server_rx_fifo->nitems; } - int stream_session_enqueue_data (transport_connection_t * tc, u8 * data, u16 len, u8 queue_event); diff --git a/src/vnet/session/session_cli.c b/src/vnet/session/session_cli.c index b029ee65..38762afc 100644 --- a/src/vnet/session/session_cli.c +++ b/src/vnet/session/session_cli.c @@ -107,7 +107,7 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, { if (once_per_pool) { - str = format (str, "%-40s%-20s%-20s%-15s", + str = format (str, "%-50s%-20s%-20s%-15s", "Connection", "Rx fifo", "Tx fifo", "Session Index"); vlib_cli_output (vm, "%v", str); diff --git a/src/vnet/session/session_debug.h b/src/vnet/session/session_debug.h index 858f12e0..80a97cd5 100644 --- a/src/vnet/session/session_debug.h +++ b/src/vnet/session/session_debug.h @@ -21,7 +21,8 @@ #define foreach_session_dbg_evt \ _(ENQ, "enqueue") \ - _(DEQ, "dequeue") + _(DEQ, "dequeue") \ + _(DEQ_NODE, "dequeue") typedef enum _session_evt_dbg { @@ -30,7 +31,10 @@ typedef enum _session_evt_dbg #undef _ } session_evt_dbg_e; -#if TRANSPORT_DEBUG +#define SESSION_DBG (0) +#define SESSION_DEQ_NODE_EVTS (0) + +#if TRANSPORT_DEBUG && SESSION_DBG #define DEC_SESSION_ETD(_s, _e, _size) \ struct \ @@ -44,6 +48,12 @@ typedef enum _session_evt_dbg ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, \ _e, _tc->elog_track) +#define DEC_SESSION_ED(_e, _size) \ + struct \ + { \ + u32 data[_size]; \ + } * ed; \ + ed = ELOG_DATA (&vlib_global_main.elog_main, _e) #define SESSION_EVT_DEQ_HANDLER(_s, _body) \ { \ @@ -67,13 +77,33 @@ typedef enum _session_evt_dbg do { _body; } while (0); \ } +#if SESSION_DEQ_NODE_EVTS +#define SESSION_EVT_DEQ_NODE_HANDLER(_node_evt) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "deq-node: %s", \ + .format_args = "t4", \ + .n_enum_strings = 2, \ + .enum_strings = { \ + "start", \ + "end", \ + }, \ + }; \ + DEC_SESSION_ED(_e, 1); \ + ed->data[0] = _node_evt; \ +} +#else +#define SESSION_EVT_DEQ_NODE_HANDLER(_node_evt) +#endif + #define CONCAT_HELPER(_a, _b) _a##_b #define CC(_a, _b) CONCAT_HELPER(_a, _b) -#define SESSION_EVT_DBG(_s, _evt, _body) CC(_evt, _HANDLER)(_s, _body) +#define SESSION_EVT_DBG(_evt, _args...) CC(_evt, _HANDLER)(_args) #else -#define SESSION_EVT_DBG(_s, _evt, _body) +#define SESSION_EVT_DBG(_evt, _args...) #endif #endif /* SRC_VNET_SESSION_SESSION_DEBUG_H_ */ diff --git a/src/vnet/session/transport.h b/src/vnet/session/transport.h index 421121d2..2f912cbc 100644 --- a/src/vnet/session/transport.h +++ b/src/vnet/session/transport.h @@ -38,7 +38,7 @@ typedef struct _transport_connection u32 thread_index; /**< Worker-thread index */ #if TRANSPORT_DEBUG - elog_track_t elog_track; /**< Debug purposes */ + elog_track_t elog_track; /**< Event logging */ #endif /** Macros for 'derived classes' where base is named "connection" */ diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c new file mode 100644 index 00000000..a6eeb775 --- /dev/null +++ b/src/vnet/tcp/builtin_client.c @@ -0,0 +1,411 @@ +/* + * builtin_client.c - vpp built-in tcp client/connect code + * + * Copyright (c) 2017 by Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include +#include + +/* define message IDs */ +#include + +/* define message structures */ +#define vl_typedefs +#include +#undef vl_typedefs + +/* define generated endian-swappers */ +#define vl_endianfun +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include +#undef vl_printfun + +static void +send_test_chunk (tclient_main_t * tm, session_t * s) +{ + u8 *test_data = tm->connect_test_data; + int test_buf_offset = 0; + u32 bytes_this_chunk; + session_fifo_event_t evt; + static int serial_number = 0; + int rv; + + while (s->bytes_to_send > 0) + { + bytes_this_chunk = vec_len (test_data) < s->bytes_to_send + ? vec_len (test_data) : s->bytes_to_send; + + rv = svm_fifo_enqueue_nowait (s->server_tx_fifo, 0 /*pid */ , + bytes_this_chunk, + test_data + test_buf_offset); + + if (rv > 0) + { + s->bytes_to_send -= rv; + test_buf_offset += rv; + + if (svm_fifo_set_event (s->server_tx_fifo)) + { + /* Fabricate TX event, send to vpp */ + evt.fifo = s->server_tx_fifo; + evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_id = serial_number++; + + unix_shared_memory_queue_add (tm->vpp_event_queue, (u8 *) & evt, + 0 /* do wait for mutex */ ); + } + } + } +} + +static void +receive_test_chunk (tclient_main_t * tm, session_t * s) +{ + svm_fifo_t *rx_fifo = s->server_rx_fifo; + int n_read, bytes, i; + + bytes = svm_fifo_max_dequeue (rx_fifo); + /* Allow enqueuing of new event */ + svm_fifo_unset_event (rx_fifo); + + /* Read the bytes */ + do + { + n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len (tm->rx_buf), + tm->rx_buf); + if (n_read > 0) + { + bytes -= n_read; + for (i = 0; i < n_read; i++) + { + if (tm->rx_buf[i] != ((s->bytes_received + i) & 0xff)) + { + clib_warning ("read %d error at byte %lld, 0x%x not 0x%x", + n_read, s->bytes_received + i, + tm->rx_buf[i], + ((s->bytes_received + i) & 0xff)); + } + } + s->bytes_to_receive -= n_read; + s->bytes_received += n_read; + } + + } + while (n_read < 0 || bytes > 0); +} + +static void * +tclient_thread_fn (void *arg) +{ + tclient_main_t *tm = &tclient_main; + vl_api_disconnect_session_t *dmp; + session_t *sp; + struct timespec ts, tsrem; + int i; + int try_tx, try_rx; + u32 *session_indices = 0; + + /* stats thread wants no signals. */ + { + sigset_t s; + sigfillset (&s); + pthread_sigmask (SIG_SETMASK, &s, 0); + } + + while (1) + { + /* Wait until we're told to get busy */ + while (tm->run_test == 0 + || (tm->ready_connections != tm->expected_connections)) + { + ts.tv_sec = 0; + ts.tv_nsec = 100000000; + while (nanosleep (&ts, &tsrem) < 0) + ts = tsrem; + } + tm->run_test = 0; + + clib_warning ("Run %d iterations", tm->n_iterations); + + for (i = 0; i < tm->n_iterations; i++) + { + session_t *sp; + + do + { + try_tx = try_rx = 0; + + /* *INDENT-OFF* */ + pool_foreach (sp, tm->sessions, ({ + if (sp->bytes_to_send > 0) + { + send_test_chunk (tm, sp); + try_tx = 1; + } + })); + pool_foreach (sp, tm->sessions, ({ + if (sp->bytes_to_receive > 0) + { + receive_test_chunk (tm, sp); + try_rx = 1; + } + })); + /* *INDENT-ON* */ + + } + while (try_tx || try_rx); + } + clib_warning ("Done %d iterations", tm->n_iterations); + + /* Disconnect sessions... */ + vec_reset_length (session_indices); + pool_foreach (sp, tm->sessions, ( + { + vec_add1 (session_indices, + sp - tm->sessions); + } + )); + + for (i = 0; i < vec_len (session_indices); i++) + { + sp = pool_elt_at_index (tm->sessions, session_indices[i]); + dmp = vl_msg_api_alloc_as_if_client (sizeof (*dmp)); + memset (dmp, 0, sizeof (*dmp)); + dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); + dmp->client_index = tm->my_client_index; + dmp->session_index = sp->vpp_session_index; + dmp->session_thread_index = sp->vpp_session_thread; + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & dmp); + pool_put (tm->sessions, sp); + } + } + /* NOTREACHED */ + return 0; +} + +/* So we don't get "no handler for... " msgs */ +static void +vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) +{ + tclient_main_t *tm = &tclient_main; + + tm->my_client_index = mp->index; +} + +static void +vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) +{ + tclient_main_t *tm = &tclient_main; + session_t *session; + u32 session_index; + u64 key; + i32 retval = /* clib_net_to_host_u32 ( */ mp->retval /*) */ ; + + if (retval < 0) + { + clib_warning ("connection failed: retval %d", retval); + return; + } + + tm->our_event_queue = (unix_shared_memory_queue_t *) + mp->vpp_event_queue_address; + + tm->vpp_event_queue = (unix_shared_memory_queue_t *) + mp->vpp_event_queue_address; + + /* + * Setup session + */ + pool_get (tm->sessions, session); + memset (session, 0, sizeof (*session)); + session_index = session - tm->sessions; + session->bytes_to_receive = session->bytes_to_send = tm->bytes_to_send; + + session->server_rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; + session->server_rx_fifo->client_session_index = session_index; + session->server_tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; + session->server_tx_fifo->client_session_index = session_index; + + session->vpp_session_index = mp->session_index; + session->vpp_session_thread = mp->session_thread_index; + + /* Add it to the session lookup table */ + key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; + hash_set (tm->session_index_by_vpp_handles, key, session_index); + + tm->ready_connections++; +} + +static void +create_api_loopback (tclient_main_t * tm) +{ + vl_api_memclnt_create_t _m, *mp = &_m; + extern void vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t *); + api_main_t *am = &api_main; + vl_shmem_hdr_t *shmem_hdr; + + /* + * Create a "loopback" API client connection + * Don't do things like this unless you know what you're doing... + */ + + shmem_hdr = am->shmem_hdr; + tm->vl_input_queue = shmem_hdr->vl_input_queue; + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = VL_API_MEMCLNT_CREATE; + mp->context = 0xFEEDFACE; + mp->input_queue = (u64) tm->vl_input_queue; + strncpy ((char *) mp->name, "tcp_tester", sizeof (mp->name) - 1); + + vl_api_memclnt_create_t_handler (mp); +} + +#define foreach_tclient_static_api_msg \ +_(MEMCLNT_CREATE_REPLY, memclnt_create_reply) \ +_(CONNECT_URI_REPLY, connect_uri_reply) + +static clib_error_t * +tclient_api_hookup (vlib_main_t * vm) +{ + tclient_main_t *tm = &tclient_main; + vl_msg_api_msg_config_t _c, *c = &_c; + int i; + + /* Init test data */ + vec_validate (tm->connect_test_data, 64 * 1024 - 1); + for (i = 0; i < vec_len (tm->connect_test_data); i++) + tm->connect_test_data[i] = i & 0xff; + + tm->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); + vec_validate (tm->rx_buf, vec_len (tm->connect_test_data) - 1); + + /* Hook up client-side static APIs to our handlers */ +#define _(N,n) do { \ + c->id = VL_API_##N; \ + c->name = #n; \ + c->handler = vl_api_##n##_t_handler; \ + c->cleanup = vl_noop_handler; \ + c->endian = vl_api_##n##_t_endian; \ + c->print = vl_api_##n##_t_print; \ + c->size = sizeof(vl_api_##n##_t); \ + c->traced = 1; /* trace, so these msgs print */ \ + c->replay = 0; /* don't replay client create/delete msgs */ \ + c->message_bounce = 0; /* don't bounce this message */ \ + vl_msg_api_config(c);} while (0); + + foreach_tclient_static_api_msg; +#undef _ + + return 0; +} + +VLIB_API_INIT_FUNCTION (tclient_api_hookup); + +static clib_error_t * +test_tcp_clients_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + u8 *connect_uri = (u8 *) "tcp://6.0.1.2/1234"; + u8 *uri; + tclient_main_t *tm = &tclient_main; + int i; + u32 n_clients = 1; + + tm->bytes_to_send = 8192; + tm->n_iterations = 1; + vec_free (tm->connect_uri); + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "nclients %d", &n_clients)) + ; + else if (unformat (input, "iterations %d", &tm->n_iterations)) + ; + else if (unformat (input, "bytes %d", &tm->bytes_to_send)) + ; + else if (unformat (input, "uri %s", &tm->connect_uri)) + ; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + + tm->ready_connections = 0; + tm->expected_connections = n_clients; + uri = connect_uri; + if (tm->connect_uri) + uri = tm->connect_uri; + + create_api_loopback (tm); + + /* Start a transmit thread */ + if (tm->client_thread_handle == 0) + { + int rv = pthread_create (&tm->client_thread_handle, + NULL /*attr */ , tclient_thread_fn, 0); + if (rv) + { + tm->client_thread_handle = 0; + return clib_error_return (0, "pthread_create returned %d", rv); + } + } + + /* Fire off connect requests, in something approaching a normal manner */ + for (i = 0; i < n_clients; i++) + { + vl_api_connect_uri_t *cmp; + cmp = vl_msg_api_alloc_as_if_client (sizeof (*cmp)); + memset (cmp, 0, sizeof (*cmp)); + + cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); + cmp->client_index = tm->my_client_index; + cmp->context = ntohl (0xfeedface); + memcpy (cmp->uri, uri, strlen ((char *) uri) + 1); + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & cmp); + } + + tm->run_test = 1; + + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (test_clients_command, static) = +{ + .path = "test tcp clients", + .short_help = "test tcp clients", + .function = test_tcp_clients_command_fn, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/tcp/builtin_client.h b/src/vnet/tcp/builtin_client.h new file mode 100644 index 00000000..64030302 --- /dev/null +++ b/src/vnet/tcp/builtin_client.h @@ -0,0 +1,131 @@ + +/* + * tclient.h - skeleton vpp engine plug-in header file + * + * Copyright (c) + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __included_tclient_h__ +#define __included_tclient_h__ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +typedef struct +{ + u32 bytes_to_send; + u32 bytes_sent; + u32 bytes_to_receive; + u32 bytes_received; + + svm_fifo_t *server_rx_fifo; + svm_fifo_t *server_tx_fifo; + + u32 vpp_session_index; + u32 vpp_session_thread; +} session_t; + +typedef struct +{ + /* API message ID base */ + u16 msg_id_base; + + /* vpe input queue */ + unix_shared_memory_queue_t *vl_input_queue; + + /* API client handle */ + u32 my_client_index; + + /* The URI we're playing with */ + u8 *uri; + + /* Session pool */ + session_t *sessions; + + /* Hash table for disconnect processing */ + uword *session_index_by_vpp_handles; + + /* intermediate rx buffer */ + u8 *rx_buf; + + /* URI for slave's connect */ + u8 *connect_uri; + + u32 connected_session_index; + + int i_am_master; + + /* drop all packets */ + int drop_packets; + + /* Our event queue */ + unix_shared_memory_queue_t *our_event_queue; + + /* $$$ single thread only for the moment */ + unix_shared_memory_queue_t *vpp_event_queue; + + pid_t my_pid; + + /* For deadman timers */ + clib_time_t clib_time; + + /* Connection counts */ + u32 expected_connections; + volatile u32 ready_connections; + + /* Signal variables */ + volatile int run_test; + + /* Number of iterations */ + int n_iterations; + + /* Bytes to send */ + u32 bytes_to_send; + + u32 configured_segment_size; + + /* VNET_API_ERROR_FOO -> "Foo" hash table */ + uword *error_string_by_error_number; + + u8 *connect_test_data; + pthread_t client_thread_handle; + u32 client_bytes_received; + u8 test_return_packets; + + /* convenience */ + vlib_main_t *vlib_main; + vnet_main_t *vnet_main; + ethernet_main_t *ethernet_main; +} tclient_main_t; + +tclient_main_t tclient_main; + +vlib_node_registration_t tclient_node; + +#endif /* __included_tclient_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index dd6759c5..efd26e91 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -22,6 +22,7 @@ typedef struct { u8 *rx_buf; unix_shared_memory_queue_t **vpp_queue; + u32 byte_index; vlib_main_t *vlib_main; } builtin_server_main_t; @@ -37,6 +38,7 @@ builtin_session_accept_callback (stream_session_t * s) bsm->vpp_queue[s->thread_index] = session_manager_get_vpp_event_queue (s->thread_index); s->session_state = SESSION_STATE_READY; + bsm->byte_index = 0; return 0; } @@ -80,57 +82,94 @@ builtin_redirect_connect_callback (u32 client_index, void *mp) return -1; } +void +test_bytes (builtin_server_main_t * bsm, int actual_transfer) +{ + int i; + + for (i = 0; i < actual_transfer; i++) + { + if (bsm->rx_buf[i] != ((bsm->byte_index + i) & 0xff)) + { + clib_warning ("at %d expected %d got %d", bsm->byte_index + i, + (bsm->byte_index + i) & 0xff, bsm->rx_buf[i]); + } + } + bsm->byte_index += actual_transfer; +} + int -builtin_server_rx_callback (stream_session_t * s, session_fifo_event_t * e) +builtin_server_rx_callback (stream_session_t * s) { - int n_written, bytes, total_copy_bytes; - int n_read; - svm_fifo_t *tx_fifo; + u32 n_written, max_dequeue, max_enqueue, max_transfer; + int actual_transfer; + svm_fifo_t *tx_fifo, *rx_fifo; builtin_server_main_t *bsm = &builtin_server_main; session_fifo_event_t evt; static int serial_number = 0; - bytes = e->enqueue_length; - if (PREDICT_FALSE (bytes <= 0)) + max_dequeue = svm_fifo_max_dequeue (s->server_rx_fifo); + max_enqueue = svm_fifo_max_enqueue (s->server_tx_fifo); + + if (PREDICT_FALSE (max_dequeue == 0)) { - clib_warning ("bizarre rx callback: bytes %d", bytes); return 0; } tx_fifo = s->server_tx_fifo; + rx_fifo = s->server_rx_fifo; /* Number of bytes we're going to copy */ - total_copy_bytes = (bytes < (tx_fifo->nitems - tx_fifo->cursize)) ? bytes : - tx_fifo->nitems - tx_fifo->cursize; + max_transfer = (max_dequeue < max_enqueue) ? max_dequeue : max_enqueue; - if (PREDICT_FALSE (total_copy_bytes <= 0)) + /* No space in tx fifo */ + if (PREDICT_FALSE (max_transfer == 0)) { - clib_warning ("no space in tx fifo, event had %d bytes", bytes); + /* XXX timeout for session that are stuck */ + + /* Program self-tap to retry */ + if (svm_fifo_set_event (rx_fifo)) + { + evt.fifo = rx_fifo; + evt.event_type = FIFO_EVENT_BUILTIN_RX; + evt.event_id = 0; + unix_shared_memory_queue_add (bsm->vpp_queue[s->thread_index], + (u8 *) & evt, + 0 /* do wait for mutex */ ); + } + return 0; } - vec_validate (bsm->rx_buf, total_copy_bytes - 1); - _vec_len (bsm->rx_buf) = total_copy_bytes; + svm_fifo_unset_event (rx_fifo); + + vec_validate (bsm->rx_buf, max_transfer - 1); + _vec_len (bsm->rx_buf) = max_transfer; - n_read = svm_fifo_dequeue_nowait (s->server_rx_fifo, 0, total_copy_bytes, - bsm->rx_buf); - ASSERT (n_read == total_copy_bytes); + actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, 0, max_transfer, + bsm->rx_buf); + ASSERT (actual_transfer == max_transfer); + +// test_bytes (bsm, actual_transfer); /* * Echo back */ - n_written = svm_fifo_enqueue_nowait (tx_fifo, 0, n_read, bsm->rx_buf); - ASSERT (n_written == total_copy_bytes); + n_written = + svm_fifo_enqueue_nowait (tx_fifo, 0, actual_transfer, bsm->rx_buf); + ASSERT (n_written == max_transfer); - /* Fabricate TX event, send to vpp */ - evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; - evt.enqueue_length = total_copy_bytes; - evt.event_id = serial_number++; + if (svm_fifo_set_event (tx_fifo)) + { + /* Fabricate TX event, send to vpp */ + evt.fifo = tx_fifo; + evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_id = serial_number++; - unix_shared_memory_queue_add (bsm->vpp_queue[s->thread_index], (u8 *) & evt, - 0 /* do wait for mutex */ ); + unix_shared_memory_queue_add (bsm->vpp_queue[s->thread_index], + (u8 *) & evt, 0 /* do wait for mutex */ ); + } return 0; } @@ -164,7 +203,7 @@ server_create (vlib_main_t * vm) a->api_client_index = ~0; a->session_cb_vft = &builtin_session_cb_vft; a->options = options; - a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 256 << 10; + a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 128 << 20; a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 64 << 10; a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 64 << 10; a->segment_name = segment_name; diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 0d2e6d0e..c3df5bc1 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -328,7 +328,7 @@ tcp_connection_init_vars (tcp_connection_t * tc) { tcp_connection_timers_init (tc); tcp_set_snd_mss (tc); - tc->sack_sb.head = TCP_INVALID_SACK_HOLE_INDEX; + scoreboard_init (&tc->sack_sb); tcp_cc_init (tc); } @@ -558,17 +558,48 @@ tcp_session_send_mss (transport_connection_t * trans_conn) return tc->snd_mss; } +/** + * Compute tx window session is allowed to fill. + */ u32 tcp_session_send_space (transport_connection_t * trans_conn) { + u32 snd_space; tcp_connection_t *tc = (tcp_connection_t *) trans_conn; - return tcp_available_snd_space (tc); + + /* If we haven't gotten dupacks or if we did and have gotten sacked bytes + * then we can still send */ + if (PREDICT_TRUE (tcp_in_fastrecovery (tc) == 0 + && (tc->rcv_dupacks == 0 + || tc->sack_sb.last_sacked_bytes))) + { + snd_space = tcp_available_snd_space (tc); + + /* If we can't write at least a segment, don't try at all */ + if (snd_space < tc->snd_mss) + return 0; + return snd_space; + } + + /* If in fast recovery, send 1 SMSS if wnd allows */ + if (tcp_in_fastrecovery (tc) && tcp_available_snd_space (tc) + && tcp_fastrecovery_sent_1_smss (tc)) + { + tcp_fastrecovery_1_smss_on (tc); + return tc->snd_mss; + } + + return 0; } u32 tcp_session_tx_fifo_offset (transport_connection_t * trans_conn) { tcp_connection_t *tc = (tcp_connection_t *) trans_conn; + + ASSERT (seq_geq (tc->snd_nxt, tc->snd_una)); + + /* This still works if fast retransmit is on */ return (tc->snd_nxt - tc->snd_una); } @@ -762,7 +793,7 @@ tcp_main_enable (vlib_main_t * vm) vec_validate (tm->timer_wheels, num_threads - 1); tcp_initialize_timer_wheels (tm); - vec_validate (tm->delack_connections, num_threads - 1); +// vec_validate (tm->delack_connections, num_threads - 1); /* Initialize clocks per tick for TCP timestamp. Used to compute * monotonically increasing timestamps. */ diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 082ab1d8..b4286bc4 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -30,9 +30,10 @@ #define TCP_PAWS_IDLE 24 * 24 * 60 * 60 * THZ /**< 24 days */ #define TCP_MAX_OPTION_SPACE 40 -#define TCP_DUPACK_THRESHOLD 3 -#define TCP_MAX_RX_FIFO_SIZE 2 << 20 -#define TCP_IW_N_SEGMENTS 10 +#define TCP_DUPACK_THRESHOLD 3 +#define TCP_MAX_RX_FIFO_SIZE 2 << 20 +#define TCP_IW_N_SEGMENTS 10 +#define TCP_ALWAYS_ACK 0 /**< If on, we always ack */ /** TCP FSM state definitions as per RFC793. */ #define foreach_tcp_fsm_state \ @@ -102,13 +103,12 @@ void tcp_update_time (f64 now, u32 thread_index); /** TCP connection flags */ #define foreach_tcp_connection_flag \ - _(DELACK, "Delay ACK") \ _(SNDACK, "Send ACK") \ - _(BURSTACK, "Burst ACK set") \ _(FINSNT, "FIN sent") \ _(SENT_RCV_WND0, "Sent 0 receive window") \ _(RECOVERY, "Recovery on") \ - _(FAST_RECOVERY, "Fast Recovery on") + _(FAST_RECOVERY, "Fast Recovery on") \ + _(FR_1_SMSS, "Sent 1 SMSS") typedef enum _tcp_connection_flag_bits { @@ -160,8 +160,12 @@ typedef struct _sack_scoreboard_hole typedef struct _sack_scoreboard { sack_scoreboard_hole_t *holes; /**< Pool of holes */ - u32 head; /**< Index to first entry */ + u32 head; /**< Index of first entry */ + u32 tail; /**< Index of last entry */ u32 sacked_bytes; /**< Number of bytes sacked in sb */ + u32 last_sacked_bytes; /**< Number of bytes last sacked */ + u32 snd_una_adv; /**< Bytes to add to snd_una */ + u32 max_byte_sacked; /**< Highest byte acked */ } sack_scoreboard_t; typedef enum _tcp_cc_algorithm_type @@ -214,7 +218,7 @@ typedef struct _tcp_connection sack_block_t *snd_sacks; /**< Vector of SACKs to send. XXX Fixed size? */ sack_scoreboard_t sack_sb; /**< SACK "scoreboard" that tracks holes */ - u8 rcv_dupacks; /**< Number of DUPACKs received */ + u16 rcv_dupacks; /**< Number of DUPACKs received */ u8 snt_dupacks; /**< Number of DUPACKs sent in a burst */ /* Congestion control */ @@ -224,6 +228,7 @@ typedef struct _tcp_connection u32 bytes_acked; /**< Bytes acknowledged by current segment */ u32 rtx_bytes; /**< Retransmitted bytes */ u32 tsecr_last_ack; /**< Timestamp echoed to us in last healthy ACK */ + u32 snd_congestion; /**< snd_una_max when congestion is detected */ tcp_cc_algorithm_t *cc_algo; /**< Congestion control algorithm */ /* RTT and RTO */ @@ -250,8 +255,10 @@ struct _tcp_cc_algorithm #define tcp_fastrecovery_off(tc) (tc)->flags &= ~TCP_CONN_FAST_RECOVERY #define tcp_in_fastrecovery(tc) ((tc)->flags & TCP_CONN_FAST_RECOVERY) #define tcp_in_recovery(tc) ((tc)->flags & (TCP_CONN_FAST_RECOVERY | TCP_CONN_RECOVERY)) -#define tcp_recovery_off(tc) ((tc)->flags &= ~(TCP_CONN_FAST_RECOVERY | TCP_CONN_RECOVERY)) #define tcp_in_slowstart(tc) (tc->cwnd < tc->ssthresh) +#define tcp_fastrecovery_sent_1_smss(tc) ((tc)->flags & TCP_CONN_FR_1_SMSS) +#define tcp_fastrecovery_1_smss_on(tc) ((tc)->flags |= TCP_CONN_FR_1_SMSS) +#define tcp_fastrecovery_1_smss_off(tc) ((tc)->flags &= ~TCP_CONN_FR_1_SMSS) typedef enum { @@ -293,8 +300,8 @@ typedef struct _tcp_main /* Per worker-thread timer wheel for connections timers */ tw_timer_wheel_16t_2w_512sl_t *timer_wheels; - /* Convenience per worker-thread vector of connections to DELACK */ - u32 **delack_connections; +// /* Convenience per worker-thread vector of connections to DELACK */ +// u32 **delack_connections; /* Pool of half-open connections on which we've sent a SYN */ tcp_connection_t *half_open_connections; @@ -397,8 +404,16 @@ tcp_end_seq (tcp_header_t * th, u32 len) always_inline u32 tcp_flight_size (const tcp_connection_t * tc) { - return tc->snd_una_max - tc->snd_una - tc->sack_sb.sacked_bytes - + tc->rtx_bytes; + int flight_size; + + flight_size = (int) ((tc->snd_una_max - tc->snd_una) + tc->rtx_bytes) + - (tc->rcv_dupacks * tc->snd_mss) /* - tc->sack_sb.sacked_bytes */ ; + + /* Happens if we don't clear sacked bytes */ + if (flight_size < 0) + return 0; + + return flight_size; } /** @@ -439,9 +454,13 @@ tcp_available_snd_space (const tcp_connection_t * tc) return available_wnd - flight_size; } +void tcp_update_rcv_wnd (tcp_connection_t * tc); + void tcp_retransmit_first_unacked (tcp_connection_t * tc); void tcp_fast_retransmit (tcp_connection_t * tc); +void tcp_cc_congestion (tcp_connection_t * tc); +void tcp_cc_recover (tcp_connection_t * tc); always_inline u32 tcp_time_now (void) @@ -453,7 +472,7 @@ u32 tcp_push_header (transport_connection_t * tconn, vlib_buffer_t * b); u32 tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b, - u32 max_bytes); + u32 offset, u32 max_bytes); void tcp_connection_timers_init (tcp_connection_t * tc); void tcp_connection_timers_reset (tcp_connection_t * tc); @@ -476,14 +495,6 @@ tcp_timer_set (tcp_connection_t * tc, u8 timer_id, u32 interval) tc->c_c_index, timer_id, interval); } -always_inline void -tcp_retransmit_timer_set (tcp_connection_t * tc) -{ - /* XXX Switch to faster TW */ - tcp_timer_set (tc, TCP_TIMER_RETRANSMIT, - clib_max (tc->rto * TCP_TO_TIMER_TICK, 1)); -} - always_inline void tcp_timer_reset (tcp_connection_t * tc, u8 timer_id) { @@ -506,6 +517,27 @@ tcp_timer_update (tcp_connection_t * tc, u8 timer_id, u32 interval) tc->c_c_index, timer_id, interval); } +/* XXX Switch retransmit to faster TW */ +always_inline void +tcp_retransmit_timer_set (tcp_connection_t * tc) +{ + tcp_timer_set (tc, TCP_TIMER_RETRANSMIT, + clib_max (tc->rto * TCP_TO_TIMER_TICK, 1)); +} + +always_inline void +tcp_retransmit_timer_update (tcp_connection_t * tc) +{ + tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, + clib_max (tc->rto * TCP_TO_TIMER_TICK, 1)); +} + +always_inline void +tcp_retransmit_timer_reset (tcp_connection_t * tc) +{ + tcp_timer_reset (tc, TCP_TIMER_RETRANSMIT); +} + always_inline u8 tcp_timer_is_active (tcp_connection_t * tc, tcp_timers_e timer) { @@ -516,6 +548,14 @@ void scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole); +always_inline sack_scoreboard_hole_t * +scoreboard_get_hole (sack_scoreboard_t * sb, u32 index) +{ + if (index != TCP_INVALID_SACK_HOLE_INDEX) + return pool_elt_at_index (sb->holes, index); + return 0; +} + always_inline sack_scoreboard_hole_t * scoreboard_next_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole) { @@ -532,6 +572,14 @@ scoreboard_first_hole (sack_scoreboard_t * sb) return 0; } +always_inline sack_scoreboard_hole_t * +scoreboard_last_hole (sack_scoreboard_t * sb) +{ + if (sb->tail != TCP_INVALID_SACK_HOLE_INDEX) + return pool_elt_at_index (sb->holes, sb->tail); + return 0; +} + always_inline void scoreboard_clear (sack_scoreboard_t * sb) { @@ -540,6 +588,10 @@ scoreboard_clear (sack_scoreboard_t * sb) { scoreboard_remove_hole (sb, hole); } + sb->sacked_bytes = 0; + sb->last_sacked_bytes = 0; + sb->snd_una_adv = 0; + sb->max_byte_sacked = 0; } always_inline u32 @@ -548,6 +600,21 @@ scoreboard_hole_bytes (sack_scoreboard_hole_t * hole) return hole->end - hole->start; } +always_inline u32 +scoreboard_hole_index (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole) +{ + return hole - sb->holes; +} + +always_inline void +scoreboard_init (sack_scoreboard_t * sb) +{ + sb->head = TCP_INVALID_SACK_HOLE_INDEX; + sb->tail = TCP_INVALID_SACK_HOLE_INDEX; +} + +void tcp_rcv_sacks (tcp_connection_t * tc, u32 ack); + always_inline void tcp_cc_algo_register (tcp_cc_algorithm_type_e type, const tcp_cc_algorithm_t * vft) diff --git a/src/vnet/tcp/tcp_debug.h b/src/vnet/tcp/tcp_debug.h index 069c512d..5a71694e 100644 --- a/src/vnet/tcp/tcp_debug.h +++ b/src/vnet/tcp/tcp_debug.h @@ -19,6 +19,8 @@ #include #define TCP_DEBUG (1) +#define TCP_DEBUG_CC (1) +#define TCP_DEBUG_VERBOSE (0) #define foreach_tcp_dbg_evt \ _(INIT, "") \ @@ -30,14 +32,24 @@ _(DELETE, "delete") \ _(SYN_SENT, "SYN sent") \ _(FIN_SENT, "FIN sent") \ + _(ACK_SENT, "ACK sent") \ + _(DUPACK_SENT, "DUPACK sent") \ _(RST_SENT, "RST sent") \ _(SYN_RCVD, "SYN rcvd") \ _(ACK_RCVD, "ACK rcvd") \ + _(DUPACK_RCVD, "DUPACK rcvd") \ _(FIN_RCVD, "FIN rcvd") \ _(RST_RCVD, "RST rcvd") \ _(PKTIZE, "packetize") \ _(INPUT, "in") \ - _(TIMER_POP, "timer pop") + _(SND_WND, "snd_wnd update") \ + _(OUTPUT, "output") \ + _(TIMER_POP, "timer pop") \ + _(CC_RTX, "retransmit") \ + _(CC_EVT, "cc event") \ + _(CC_PACK, "cc partial ack") \ + _(SEG_INVALID, "invalid segment") \ + _(ACK_RCV_ERR, "invalid ack") \ typedef enum _tcp_dbg { @@ -73,10 +85,10 @@ typedef enum _tcp_dbg_evt ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, \ _e, _tc->c_elog_track) -#define TCP_EVT_INIT_HANDLER(_tc, ...) \ +#define TCP_EVT_INIT_HANDLER(_tc, _fmt, ...) \ { \ _tc->c_elog_track.name = \ - (char *) format (0, "%d%c", _tc->c_c_index, 0); \ + (char *) format (0, _fmt, _tc->c_c_index, 0); \ elog_track_register (&vlib_global_main.elog_main, &_tc->c_elog_track);\ } @@ -87,7 +99,7 @@ typedef enum _tcp_dbg_evt #define TCP_EVT_OPEN_HANDLER(_tc, ...) \ { \ - TCP_EVT_INIT_HANDLER(_tc); \ + TCP_EVT_INIT_HANDLER(_tc, "s%d%c"); \ ELOG_TYPE_DECLARE (_e) = \ { \ .format = "open: index %d", \ @@ -110,7 +122,7 @@ typedef enum _tcp_dbg_evt #define TCP_EVT_BIND_HANDLER(_tc, ...) \ { \ - TCP_EVT_INIT_HANDLER(_tc); \ + TCP_EVT_INIT_HANDLER(_tc, "l%d%c"); \ ELOG_TYPE_DECLARE (_e) = \ { \ .format = "bind: listener %d", \ @@ -138,16 +150,44 @@ typedef enum _tcp_dbg_evt .format = "delete: %d", \ .format_args = "i4", \ }; \ - DECLARE_ETD(_tc, _e, 0); \ + DECLARE_ETD(_tc, _e, 1); \ ed->data[0] = _tc->c_c_index; \ TCP_EVT_DEALLOC_HANDLER(_tc); \ } +#define TCP_EVT_ACK_SENT_HANDLER(_tc, ...) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "ack_prep: acked %u rcv_nxt %u rcv_wnd %u snd_nxt %u", \ + .format_args = "i4i4i4i4", \ + }; \ + DECLARE_ETD(_tc, _e, 4); \ + ed->data[0] = _tc->rcv_nxt - _tc->rcv_las; \ + ed->data[1] = _tc->rcv_nxt - _tc->irs; \ + ed->data[2] = _tc->rcv_wnd; \ + ed->data[3] = _tc->snd_nxt - _tc->iss; \ +} + +#define TCP_EVT_DUPACK_SENT_HANDLER(_tc, ...) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "dack_tx: rcv_nxt %u rcv_wnd %u snd_nxt %u av-wnd %u", \ + .format_args = "i4i4i4i4", \ + }; \ + DECLARE_ETD(_tc, _e, 4); \ + ed->data[0] = _tc->rcv_nxt - _tc->irs; \ + ed->data[1] = _tc->rcv_wnd; \ + ed->data[2] = _tc->snd_nxt - _tc->iss; \ + ed->data[3] = tcp_available_wnd(_tc); \ +} + #define TCP_EVT_SYN_SENT_HANDLER(_tc, ...) \ { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "SYN: iss %d", \ + .format = "SYNtx: iss %u", \ .format_args = "i4", \ }; \ DECLARE_ETD(_tc, _e, 1); \ @@ -158,7 +198,7 @@ typedef enum _tcp_dbg_evt { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "FIN: snd_nxt %d rcv_nxt %d", \ + .format = "FINtx: snd_nxt %d rcv_nxt %d", \ .format_args = "i4i4", \ }; \ DECLARE_ETD(_tc, _e, 2); \ @@ -170,7 +210,7 @@ typedef enum _tcp_dbg_evt { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "RST: snd_nxt %d rcv_nxt %d", \ + .format = "RSTtx: snd_nxt %d rcv_nxt %d", \ .format_args = "i4i4", \ }; \ DECLARE_ETD(_tc, _e, 2); \ @@ -180,10 +220,10 @@ typedef enum _tcp_dbg_evt #define TCP_EVT_SYN_RCVD_HANDLER(_tc, ...) \ { \ - TCP_EVT_INIT_HANDLER(_tc); \ + TCP_EVT_INIT_HANDLER(_tc, "s%d%c"); \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "SYN rcvd: irs %d", \ + .format = "SYNrx: irs %u", \ .format_args = "i4", \ }; \ DECLARE_ETD(_tc, _e, 1); \ @@ -194,7 +234,7 @@ typedef enum _tcp_dbg_evt { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "FIN rcvd: snd_nxt %d rcv_nxt %d", \ + .format = "FINrx: snd_nxt %d rcv_nxt %d", \ .format_args = "i4i4", \ }; \ DECLARE_ETD(_tc, _e, 2); \ @@ -206,7 +246,7 @@ typedef enum _tcp_dbg_evt { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "RST rcvd: snd_nxt %d rcv_nxt %d", \ + .format = "RSTrx: snd_nxt %d rcv_nxt %d", \ .format_args = "i4i4", \ }; \ DECLARE_ETD(_tc, _e, 2); \ @@ -214,54 +254,68 @@ typedef enum _tcp_dbg_evt ed->data[1] = _tc->rcv_nxt - _tc->irs; \ } -#define TCP_EVT_ACK_RCVD_HANDLER(_tc, ...) \ +#define TCP_EVT_ACK_RCVD_HANDLER(_tc, _ack, ...) \ { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "ACK: acked %u cwnd %u inflight %u", \ - .format_args = "i4i4i4", \ + .format = "acked: %u snd_una %u ack %u cwnd %u inflight %u", \ + .format_args = "i4i4i4i4i4", \ }; \ - DECLARE_ETD(_tc, _e, 3); \ + DECLARE_ETD(_tc, _e, 5); \ ed->data[0] = _tc->bytes_acked; \ - ed->data[1] = _tc->cwnd; \ - ed->data[2] = tcp_flight_size(_tc); \ + ed->data[1] = _tc->snd_una - _tc->iss; \ + ed->data[2] = _ack - _tc->iss; \ + ed->data[3] = _tc->cwnd; \ + ed->data[4] = tcp_flight_size(_tc); \ } -#define TCP_EVT_PKTIZE_HANDLER(_tc, ...) \ +#define TCP_EVT_DUPACK_RCVD_HANDLER(_tc, ...) \ { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "pktize: snd_una %u snd_nxt %u una_max %u", \ - .format_args = "i4i4i4", \ + .format = "dack_rx: snd_una %u cwnd %u snd_wnd %u inflight %u", \ + .format_args = "i4i4i4i4", \ }; \ - DECLARE_ETD(_tc, _e, 3); \ + DECLARE_ETD(_tc, _e, 4); \ ed->data[0] = _tc->snd_una - _tc->iss; \ - ed->data[1] = _tc->snd_nxt - _tc->iss; \ - ed->data[2] = _tc->snd_una_max - _tc->iss; \ + ed->data[1] = _tc->cwnd; \ + ed->data[2] = _tc->snd_wnd; \ + ed->data[3] = tcp_flight_size(_tc); \ } -#define TCP_EVT_OUTPUT_HANDLER(_tc, flags, n_bytes,...) \ +#define TCP_EVT_PKTIZE_HANDLER(_tc, ...) \ { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "out: flags %x, bytes %u", \ - .format_args = "i4i4", \ + .format = "pktize: una %u snd_nxt %u space %u flight %u rcv_wnd %u",\ + .format_args = "i4i4i4i4i4", \ }; \ - DECLARE_ETD(_tc, _e, 2); \ - ed->data[0] = flags; \ - ed->data[1] = n_bytes; \ + DECLARE_ETD(_tc, _e, 5); \ + ed->data[0] = _tc->snd_una - _tc->iss; \ + ed->data[1] = _tc->snd_nxt - _tc->iss; \ + ed->data[2] = tcp_available_snd_space (_tc); \ + ed->data[3] = tcp_flight_size (_tc); \ + ed->data[4] = _tc->rcv_wnd; \ } -#define TCP_EVT_INPUT_HANDLER(_tc, n_bytes, ...) \ +#define TCP_EVT_INPUT_HANDLER(_tc, _type, _len, _written, ...) \ { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "in: bytes %u rcv_nxt %u", \ - .format_args = "i4i4", \ + .format = "in: %s len %u written %d rcv_nxt %u free wnd %d", \ + .format_args = "t4i4i4i4i4", \ + .n_enum_strings = 2, \ + .enum_strings = { \ + "order", \ + "ooo", \ + }, \ }; \ - DECLARE_ETD(_tc, _e, 2); \ - ed->data[0] = n_bytes; \ - ed->data[1] = _tc->rcv_nxt - _tc->irs; \ + DECLARE_ETD(_tc, _e, 5); \ + ed->data[0] = _type; \ + ed->data[1] = _len; \ + ed->data[2] = _written; \ + ed->data[3] = (_tc->rcv_nxt - _tc->irs) + _written; \ + ed->data[4] = _tc->rcv_wnd - (_tc->rcv_nxt - _tc->rcv_las); \ } #define TCP_EVT_TIMER_POP_HANDLER(_tc_index, _timer_id, ...) \ @@ -296,9 +350,131 @@ typedef enum _tcp_dbg_evt ed->data[1] = _timer_id; \ } +#define TCP_EVT_SEG_INVALID_HANDLER(_tc, _seq, _end, ...) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "seg-inv: seq %u end %u rcv_las %u rcv_nxt %u wnd %u", \ + .format_args = "i4i4i4i4i4", \ + }; \ + DECLARE_ETD(_tc, _e, 5); \ + ed->data[0] = _seq - _tc->irs; \ + ed->data[1] = _end - _tc->irs; \ + ed->data[2] = _tc->rcv_las - _tc->irs; \ + ed->data[3] = _tc->rcv_nxt - _tc->irs; \ + ed->data[4] = _tc->rcv_wnd; \ +} + +#define TCP_EVT_ACK_RCV_ERR_HANDLER(_tc, _type, _ack, ...) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "ack-err: %s ack %u snd_una %u snd_nxt %u una_max %u", \ + .format_args = "t4i4i4i4i4", \ + .n_enum_strings = 3, \ + .enum_strings = { \ + "invalid", \ + "old", \ + "future", \ + }, \ + }; \ + DECLARE_ETD(_tc, _e, 5); \ + ed->data[0] = _type; \ + ed->data[1] = _ack - _tc->iss; \ + ed->data[2] = _tc->snd_una - _tc->iss; \ + ed->data[3] = _tc->snd_nxt - _tc->iss; \ + ed->data[4] = _tc->snd_una_max - _tc->iss; \ +} + +/* + * Congestion Control + */ + +#if TCP_DEBUG_CC +#define TCP_EVT_CC_RTX_HANDLER(_tc, offset, n_bytes, ...) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "rtx: snd_nxt %u offset %u snd %u rtx %u", \ + .format_args = "i4i4i4i4", \ + }; \ + DECLARE_ETD(_tc, _e, 4); \ + ed->data[0] = _tc->snd_nxt - _tc->iss; \ + ed->data[1] = offset; \ + ed->data[2] = n_bytes; \ + ed->data[3] = _tc->rtx_bytes; \ +} + +#define TCP_EVT_CC_EVT_HANDLER(_tc, _sub_evt, ...) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "cc: %s wnd %u snd_cong %u rtx_bytes %u", \ + .format_args = "t4i4i4i4", \ + .n_enum_strings = 5, \ + .enum_strings = { \ + "fast-rtx", \ + "rtx-timeout", \ + "first-rtx", \ + "recovered", \ + "congestion", \ + }, \ + }; \ + DECLARE_ETD(_tc, _e, 4); \ + ed->data[0] = _sub_evt; \ + ed->data[1] = tcp_available_snd_space (_tc); \ + ed->data[2] = _tc->snd_congestion - _tc->iss; \ + ed->data[3] = _tc->rtx_bytes; \ +} + +#define TCP_EVT_CC_PACK_HANDLER(_tc, ...) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "pack: snd_una %u snd_una_max %u", \ + .format_args = "i4i4", \ + }; \ + DECLARE_ETD(_tc, _e, 2); \ + ed->data[0] = _tc->snd_una - _tc->iss; \ + ed->data[1] = _tc->snd_una_max - _tc->iss; \ +} + +#else +#define TCP_EVT_CC_RTX_HANDLER(_tc, offset, n_bytes, ...) +#define TCP_EVT_CC_EVT_HANDLER(_tc, _sub_evt, _snd_space, ...) +#define TCP_EVT_CC_PACK_HANDLER(_tc, ...) +#endif + +#if TCP_DBG_VERBOSE +#define TCP_EVT_SND_WND_HANDLER(_tc, ...) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "snd_wnd update: %u ", \ + .format_args = "i4", \ + }; \ + DECLARE_ETD(_tc, _e, 1); \ + ed->data[0] = _tc->snd_wnd; \ +} + +#define TCP_EVT_OUTPUT_HANDLER(_tc, flags, n_bytes,...) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "out: flags %x, bytes %u", \ + .format_args = "i4i4", \ + }; \ + DECLARE_ETD(_tc, _e, 2); \ + ed->data[0] = flags; \ + ed->data[1] = n_bytes; \ +} +#else +#define TCP_EVT_SND_WND_HANDLER(_tc, ...) +#define TCP_EVT_OUTPUT_HANDLER(_tc, flags, n_bytes,...) +#endif + #define CONCAT_HELPER(_a, _b) _a##_b #define CC(_a, _b) CONCAT_HELPER(_a, _b) - #define TCP_EVT_DBG(_evt, _args...) CC(_evt, _HANDLER)(_args) #else diff --git a/src/vnet/tcp/tcp_error.def b/src/vnet/tcp/tcp_error.def index 2dbdd9b3..b91a08c0 100644 --- a/src/vnet/tcp/tcp_error.def +++ b/src/vnet/tcp/tcp_error.def @@ -12,12 +12,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - tcp_error (NONE, "no error") tcp_error (NO_LISTENER, "no listener for dst port") tcp_error (LOOKUP_DROPS, "lookup drops") tcp_error (DISPATCH, "Dispatch error") tcp_error (ENQUEUED, "Packets pushed into rx fifo") +tcp_error (PARTIALLY_ENQUEUED, "Packets partially pushed into rx fifo") tcp_error (PURE_ACK, "Pure acks") tcp_error (SYNS_RCVD, "SYNs received") tcp_error (SYN_ACKS_RCVD, "SYN-ACKs received") @@ -26,11 +26,14 @@ tcp_error (FIFO_FULL, "Packets dropped for lack of rx fifo space") tcp_error (EVENT_FIFO_FULL, "Events not sent for lack of event fifo space") tcp_error (API_QUEUE_FULL, "Sessions not created for lack of API queue space") tcp_error (CREATE_SESSION_FAIL, "Sessions couldn't be allocated") -tcp_error (SEGMENT_INVALID, "Invalid segment") +tcp_error (SEGMENT_INVALID, "Invalid segments") +tcp_error (SEGMENT_OLD, "Old segment") tcp_error (ACK_INVALID, "Invalid ACK") tcp_error (ACK_DUP, "Duplicate ACK") tcp_error (ACK_OLD, "Old ACK") +tcp_error (ACK_FUTURE, "Future ACK") tcp_error (PKTS_SENT, "Packets sent") tcp_error (FILTERED_DUPACKS, "Filtered duplicate ACKs") tcp_error (RST_SENT, "Resets sent") tcp_error (INVALID_CONNECTION, "Invalid connection") +tcp_error (NO_WND, "No window") \ No newline at end of file diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index 67af4321..5d11985f 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -95,13 +95,21 @@ vlib_node_registration_t tcp6_established_node; * or the rcv_nxt at last ack sent instead of rcv_nxt since that's the * peer's reference when computing our receive window. * - * This accepts only segments within the window. + * This: + * seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd) && seq_geq (seq, tc->rcv_las) + * however, is too strict when we have retransmits. Instead we just check that + * the seq is not beyond the right edge and that the end of the segment is not + * less than the left edge. + * + * N.B. rcv_nxt and rcv_wnd are both updated in this node if acks are sent, so + * use rcv_nxt in the right edge window test instead of rcv_las. + * */ always_inline u8 tcp_segment_in_rcv_wnd (tcp_connection_t * tc, u32 seq, u32 end_seq) { - return seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd) - && seq_geq (seq, tc->rcv_nxt); + return (seq_geq (end_seq, tc->rcv_las) + && seq_leq (seq, tc->rcv_nxt + tc->rcv_wnd)); } void @@ -253,6 +261,7 @@ tcp_segment_validate (vlib_main_t * vm, tcp_connection_t * tc0, { tcp_make_ack (tc0, b0); *next0 = tcp_next_output (tc0->c_is_ip4); + TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0); return -1; } } @@ -262,13 +271,25 @@ tcp_segment_validate (vlib_main_t * vm, tcp_connection_t * tc0, if (!tcp_segment_in_rcv_wnd (tc0, vnet_buffer (b0)->tcp.seq_number, vnet_buffer (b0)->tcp.seq_end)) { - if (!tcp_rst (th0)) + /* If our window is 0 and the packet is in sequence, let it pass + * through for ack processing. It should be dropped later.*/ + if (tc0->rcv_wnd == 0 + && tc0->rcv_nxt == vnet_buffer (b0)->tcp.seq_number) { - /* Send dup ack */ - tcp_make_ack (tc0, b0); - *next0 = tcp_next_output (tc0->c_is_ip4); + /* Make it look as if there's nothing to dequeue */ + vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number; + } + else + { + /* If not RST, send dup ack */ + if (!tcp_rst (th0)) + { + tcp_make_ack (tc0, b0); + *next0 = tcp_next_output (tc0->c_is_ip4); + TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0); + } + return -1; } - return -1; } /* 2nd: check the RST bit */ @@ -326,13 +347,13 @@ tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt) /* XXX Drop in RTT results in RTTVAR increase and bigger RTO. * The increase should be bound */ - tc->rttvar += (clib_abs (err) - tc->rttvar) >> 2; + tc->rttvar += ((int) clib_abs (err) - (int) tc->rttvar) >> 2; } else { /* First measurement. */ tc->srtt = mrtt; - tc->rttvar = mrtt << 1; + tc->rttvar = mrtt >> 1; } } @@ -394,7 +415,11 @@ tcp_dequeue_acked (tcp_connection_t * tc, u32 ack) } } -/** Check if dupack as per RFC5681 Sec. 2 */ +/** + * Check if dupack as per RFC5681 Sec. 2 + * + * This works only if called before updating snd_wnd. + * */ always_inline u8 tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 new_snd_wnd) { @@ -429,10 +454,10 @@ scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole) } sack_scoreboard_hole_t * -scoreboard_insert_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * prev, +scoreboard_insert_hole (sack_scoreboard_t * sb, u32 prev_index, u32 start, u32 end) { - sack_scoreboard_hole_t *hole, *next; + sack_scoreboard_hole_t *hole, *next, *prev; u32 hole_index; pool_get (sb->holes, hole); @@ -442,6 +467,7 @@ scoreboard_insert_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * prev, hole->end = end; hole_index = hole - sb->holes; + prev = scoreboard_get_hole (sb, prev_index); if (prev) { hole->prev = prev - sb->holes; @@ -462,28 +488,35 @@ scoreboard_insert_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * prev, return hole; } -static void +void tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) { sack_scoreboard_t *sb = &tc->sack_sb; sack_block_t *blk, tmp; - sack_scoreboard_hole_t *hole, *next_hole; - u32 blk_index = 0; + sack_scoreboard_hole_t *hole, *next_hole, *last_hole, *new_hole; + u32 blk_index = 0, old_sacked_bytes, hole_index; int i, j; - if (!tcp_opts_sack (tc) && sb->head == TCP_INVALID_SACK_HOLE_INDEX) + sb->last_sacked_bytes = 0; + sb->snd_una_adv = 0; + old_sacked_bytes = sb->sacked_bytes; + + if (!tcp_opts_sack (&tc->opt) && sb->head == TCP_INVALID_SACK_HOLE_INDEX) return; /* Remove invalid blocks */ - vec_foreach (blk, tc->opt.sacks) - { - if (seq_lt (blk->start, blk->end) - && seq_gt (blk->start, tc->snd_una) - && seq_gt (blk->start, ack) && seq_lt (blk->end, tc->snd_nxt)) - continue; - - vec_del1 (tc->opt.sacks, blk - tc->opt.sacks); - } + blk = tc->opt.sacks; + while (blk < vec_end (tc->opt.sacks)) + { + if (seq_lt (blk->start, blk->end) + && seq_gt (blk->start, tc->snd_una) + && seq_gt (blk->start, ack) && seq_leq (blk->end, tc->snd_nxt)) + { + blk++; + continue; + } + vec_del1 (tc->opt.sacks, blk - tc->opt.sacks); + } /* Add block for cumulative ack */ if (seq_gt (ack, tc->snd_una)) @@ -498,7 +531,7 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) /* Make sure blocks are ordered */ for (i = 0; i < vec_len (tc->opt.sacks); i++) - for (j = i; j < vec_len (tc->opt.sacks); j++) + for (j = i + 1; j < vec_len (tc->opt.sacks); j++) if (seq_lt (tc->opt.sacks[j].start, tc->opt.sacks[i].start)) { tmp = tc->opt.sacks[i]; @@ -506,10 +539,22 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) tc->opt.sacks[j] = tmp; } - /* If no holes, insert the first that covers all outstanding bytes */ if (sb->head == TCP_INVALID_SACK_HOLE_INDEX) { - scoreboard_insert_hole (sb, 0, tc->snd_una, tc->snd_una_max); + /* If no holes, insert the first that covers all outstanding bytes */ + last_hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX, + tc->snd_una, tc->snd_una_max); + sb->tail = scoreboard_hole_index (sb, last_hole); + } + else + { + /* If we have holes but snd_una_max is beyond the last hole, update + * last hole end */ + tmp = tc->opt.sacks[vec_len (tc->opt.sacks) - 1]; + last_hole = scoreboard_last_hole (sb); + if (seq_gt (tc->snd_una_max, sb->max_byte_sacked) + && seq_gt (tc->snd_una_max, last_hole->end)) + last_hole->end = tc->snd_una_max; } /* Walk the holes with the SACK blocks */ @@ -526,10 +571,10 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) next_hole = scoreboard_next_hole (sb, hole); /* Byte accounting */ - if (seq_lt (hole->end, ack)) + if (seq_leq (hole->end, ack)) { - /* Bytes lost because snd wnd left edge advances */ - if (seq_lt (next_hole->start, ack)) + /* Bytes lost because snd_wnd left edge advances */ + if (next_hole && seq_leq (next_hole->start, ack)) sb->sacked_bytes -= next_hole->start - hole->end; else sb->sacked_bytes -= ack - hole->end; @@ -539,35 +584,78 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) sb->sacked_bytes += scoreboard_hole_bytes (hole); } + /* snd_una needs to be advanced */ + if (seq_geq (ack, hole->end)) + { + if (next_hole && seq_lt (ack, next_hole->start)) + sb->snd_una_adv = next_hole->start - ack; + else + sb->snd_una_adv = sb->max_byte_sacked - ack; + + /* all these can be delivered */ + sb->sacked_bytes -= sb->snd_una_adv; + } + + /* About to remove last hole */ + if (hole == last_hole) + { + sb->tail = hole->prev; + last_hole = scoreboard_last_hole (sb); + /* keep track of max byte sacked in case the last hole + * is acked */ + if (seq_gt (hole->end, sb->max_byte_sacked)) + sb->max_byte_sacked = hole->end; + } scoreboard_remove_hole (sb, hole); hole = next_hole; } - /* Partial overlap */ + /* Partial 'head' overlap */ else { - sb->sacked_bytes += blk->end - hole->start; - hole->start = blk->end; + if (seq_gt (blk->end, hole->start)) + { + sb->sacked_bytes += blk->end - hole->start; + hole->start = blk->end; + } blk_index++; } } else { /* Hole must be split */ - if (seq_leq (blk->end, hole->end)) + if (seq_lt (blk->end, hole->end)) { sb->sacked_bytes += blk->end - blk->start; - scoreboard_insert_hole (sb, hole, blk->end, hole->end); - hole->end = blk->start - 1; + hole_index = scoreboard_hole_index (sb, hole); + new_hole = scoreboard_insert_hole (sb, hole_index, blk->end, + hole->end); + + /* Pool might've moved */ + hole = scoreboard_get_hole (sb, hole_index); + hole->end = blk->start; + + /* New or split of tail */ + if ((last_hole->end == new_hole->end) + || seq_lt (last_hole->end, new_hole->start)) + { + last_hole = new_hole; + sb->tail = scoreboard_hole_index (sb, new_hole); + } + blk_index++; + hole = scoreboard_next_hole (sb, hole); } else { - sb->sacked_bytes += hole->end - blk->start + 1; - hole->end = blk->start - 1; + sb->sacked_bytes += hole->end - blk->start; + hole->end = blk->start; hole = scoreboard_next_hole (sb, hole); } } } + + sb->last_sacked_bytes = sb->sacked_bytes + sb->snd_una_adv + - old_sacked_bytes; } /** Update snd_wnd @@ -577,72 +665,94 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) static void tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd) { - if (tc->snd_wl1 < seq || (tc->snd_wl1 == seq && tc->snd_wl2 <= ack)) + if (seq_lt (tc->snd_wl1, seq) + || (tc->snd_wl1 == seq && seq_leq (tc->snd_wl2, ack))) { tc->snd_wnd = snd_wnd; tc->snd_wl1 = seq; tc->snd_wl2 = ack; + TCP_EVT_DBG (TCP_EVT_SND_WND, tc); } } -static void +void tcp_cc_congestion (tcp_connection_t * tc) { + tc->snd_congestion = tc->snd_nxt; tc->cc_algo->congestion (tc); + TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 4); } -static void +void tcp_cc_recover (tcp_connection_t * tc) { - if (tcp_in_fastrecovery (tc)) - { - tc->cc_algo->recovered (tc); - tcp_recovery_off (tc); - } - else if (tcp_in_recovery (tc)) - { - tcp_recovery_off (tc); - tc->cwnd = tcp_loss_wnd (tc); - } + tc->cc_algo->recovered (tc); + + tc->rtx_bytes = 0; + tc->rcv_dupacks = 0; + tc->snd_nxt = tc->snd_una; + + tc->cc_algo->rcv_ack (tc); + tc->tsecr_last_ack = tc->opt.tsecr; + + tcp_fastrecovery_1_smss_off (tc); + tcp_fastrecovery_off (tc); + + TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3); } static void -tcp_cc_rcv_ack (tcp_connection_t * tc) +tcp_cc_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b) { u8 partial_ack; if (tcp_in_recovery (tc)) { - partial_ack = seq_lt (tc->snd_una, tc->snd_una_max); + partial_ack = seq_lt (tc->snd_una, tc->snd_congestion); if (!partial_ack) { /* Clear retransmitted bytes. */ - tc->rtx_bytes = 0; tcp_cc_recover (tc); } else { + TCP_EVT_DBG (TCP_EVT_CC_PACK, tc); + /* Clear retransmitted bytes. XXX should we clear all? */ tc->rtx_bytes = 0; tc->cc_algo->rcv_cong_ack (tc, TCP_CC_PARTIALACK); - /* Retransmit first unacked segment */ - tcp_retransmit_first_unacked (tc); + /* In case snd_nxt is still in the past and output tries to + * shove some new bytes */ + tc->snd_nxt = tc->snd_una; + + /* XXX need proper RFC6675 support */ + if (tc->sack_sb.last_sacked_bytes) + { + tcp_fast_retransmit (tc); + } + else + { + /* Retransmit first unacked segment */ + tcp_retransmit_first_unacked (tc); + /* If window allows, send 1 SMSS of new data */ + if (seq_lt (tc->snd_nxt, tc->snd_congestion)) + tc->snd_nxt = tc->snd_congestion; + } } } else { tc->cc_algo->rcv_ack (tc); + tc->tsecr_last_ack = tc->opt.tsecr; + tc->rcv_dupacks = 0; } - - tc->rcv_dupacks = 0; - tc->tsecr_last_ack = tc->opt.tsecr; } static void tcp_cc_rcv_dupack (tcp_connection_t * tc, u32 ack) { - ASSERT (tc->snd_una == ack); +// ASSERT (seq_geq(tc->snd_una, ack)); tc->rcv_dupacks++; if (tc->rcv_dupacks == TCP_DUPACK_THRESHOLD) @@ -688,20 +798,39 @@ tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b, { u32 new_snd_wnd; - /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) then send an - * ACK, drop the segment, and return */ + /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */ if (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)) { - tcp_make_ack (tc, b); - *next = tcp_next_output (tc->c_is_ip4); - *error = TCP_ERROR_ACK_INVALID; - return -1; + /* If we have outstanding data and this is within the window, accept it, + * probably retransmit has timed out. Otherwise ACK segment and then + * drop it */ + if (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max)) + { + tcp_make_ack (tc, b); + *next = tcp_next_output (tc->c_is_ip4); + *error = TCP_ERROR_ACK_INVALID; + TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 0, + vnet_buffer (b)->tcp.ack_number); + return -1; + } + + tc->snd_nxt = vnet_buffer (b)->tcp.ack_number; + *error = TCP_ERROR_ACK_FUTURE; + TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 2, + vnet_buffer (b)->tcp.ack_number); } - /* If old ACK, discard */ + /* If old ACK, probably it's an old dupack */ if (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una)) { *error = TCP_ERROR_ACK_OLD; + TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 1, + vnet_buffer (b)->tcp.ack_number); + if (tcp_in_fastrecovery (tc) && tc->rcv_dupacks == TCP_DUPACK_THRESHOLD) + { + TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc); + tcp_cc_rcv_dupack (tc, vnet_buffer (b)->tcp.ack_number); + } return -1; } @@ -712,32 +841,40 @@ tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b, if (tcp_ack_is_dupack (tc, b, new_snd_wnd)) { + TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc, 1); tcp_cc_rcv_dupack (tc, vnet_buffer (b)->tcp.ack_number); *error = TCP_ERROR_ACK_DUP; return -1; } - /* Valid ACK */ + /* + * Valid ACK + */ + tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una; - tc->snd_una = vnet_buffer (b)->tcp.ack_number; + tc->snd_una = vnet_buffer (b)->tcp.ack_number + tc->sack_sb.snd_una_adv; - /* Dequeue ACKed packet and update RTT */ + /* Dequeue ACKed data and update RTT */ tcp_dequeue_acked (tc, vnet_buffer (b)->tcp.ack_number); - tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number, vnet_buffer (b)->tcp.ack_number, new_snd_wnd); - /* Updates congestion control (slow start/congestion avoidance) */ - tcp_cc_rcv_ack (tc); + /* If some of our sent bytes have been acked, update cc and retransmit + * timer. */ + if (tc->bytes_acked) + { + TCP_EVT_DBG (TCP_EVT_ACK_RCVD, tc, vnet_buffer (b)->tcp.ack_number); - TCP_EVT_DBG (TCP_EVT_ACK_RCVD, tc); + /* Updates congestion control (slow start/congestion avoidance) */ + tcp_cc_rcv_ack (tc, b); - /* If everything has been acked, stop retransmit timer - * otherwise update */ - if (tc->snd_una == tc->snd_una_max) - tcp_timer_reset (tc, TCP_TIMER_RETRANSMIT); - else - tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, tc->rto); + /* If everything has been acked, stop retransmit timer + * otherwise update */ + if (tc->snd_una == tc->snd_una_max) + tcp_retransmit_timer_reset (tc); + else + tcp_retransmit_timer_update (tc); + } return 0; } @@ -757,9 +894,7 @@ static void tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end) { sack_block_t *new_list = 0, block; - u32 n_elts; int i; - u8 new_head = 0; /* If the first segment is ooo add it to the list. Last write might've moved * rcv_nxt over the first segment. */ @@ -768,7 +903,6 @@ tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end) block.start = start; block.end = end; vec_add1 (new_list, block); - new_head = 1; } /* Find the blocks still worth keeping. */ @@ -782,20 +916,19 @@ tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end) || seq_leq (tc->snd_sacks[i].start, end)) continue; - /* Save subsequent segments to new SACK list. */ - n_elts = clib_min (vec_len (tc->snd_sacks) - i, - TCP_MAX_SACK_BLOCKS - new_head); - vec_insert_elts (new_list, &tc->snd_sacks[i], n_elts, new_head); - break; + /* Save to new SACK list. */ + vec_add1 (new_list, tc->snd_sacks[i]); } + ASSERT (vec_len (new_list) < TCP_MAX_SACK_BLOCKS); + /* Replace old vector with new one */ vec_free (tc->snd_sacks); tc->snd_sacks = new_list; } /** Enqueue data for delivery to application */ -always_inline u32 +always_inline int tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b, u16 data_len) { @@ -812,6 +945,8 @@ tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b, vlib_buffer_get_current (b), data_len, 1 /* queue event */ ); + TCP_EVT_DBG (TCP_EVT_INPUT, tc, 0, data_len, written); + /* Update rcv_nxt */ if (PREDICT_TRUE (written == data_len)) { @@ -824,38 +959,61 @@ tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b, /* Send ACK confirming the update */ tc->flags |= TCP_CONN_SNDACK; + } + else if (written > 0) + { + /* We've written something but FIFO is probably full now */ + tc->rcv_nxt += written; - /* Update SACK list if need be */ - if (tcp_opts_sack_permitted (&tc->opt)) - { - /* Remove SACK blocks that have been delivered */ - tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt); - } + /* Depending on how fast the app is, all remaining buffers in burst will + * not be enqueued. Should we inform peer of the damage? XXX */ + return TCP_ERROR_PARTIALLY_ENQUEUED; } else { - ASSERT (0); return TCP_ERROR_FIFO_FULL; } + /* Update SACK list if need be */ + if (tcp_opts_sack_permitted (&tc->opt)) + { + /* Remove SACK blocks that have been delivered */ + tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt); + } + return TCP_ERROR_ENQUEUED; } /** Enqueue out-of-order data */ -always_inline u32 +always_inline int tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b, u16 data_len) { stream_session_t *s0; u32 offset, seq; + int rv; + + /* Pure ACK. Do nothing */ + if (PREDICT_FALSE (data_len == 0)) + { + return TCP_ERROR_PURE_ACK; + } s0 = stream_session_get (tc->c_s_index, tc->c_thread_index); seq = vnet_buffer (b)->tcp.seq_number; offset = seq - tc->rcv_nxt; - if (svm_fifo_enqueue_with_offset (s0->server_rx_fifo, s0->pid, offset, - data_len, vlib_buffer_get_current (b))) - return TCP_ERROR_FIFO_FULL; + rv = svm_fifo_enqueue_with_offset (s0->server_rx_fifo, s0->pid, offset, + data_len, vlib_buffer_get_current (b)); + + /* Nothing written */ + if (rv) + { + TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, 0); + return TCP_ERROR_FIFO_FULL; + } + + TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, data_len); /* Update SACK list if in use */ if (tcp_opts_sack_permitted (&tc->opt)) @@ -875,20 +1033,23 @@ tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b, } /** - * Check if ACK could be delayed. DELACK timer is set only after frame is - * processed so this can return true for a full bursts of packets. + * Check if ACK could be delayed. If ack can be delayed, it should return + * true for a full frame. If we're always acking return 0. */ always_inline int tcp_can_delack (tcp_connection_t * tc) { - /* If there's no DELACK timer set and the last window sent wasn't 0 we - * can safely delay. */ - if (!tcp_timer_is_active (tc, TCP_TIMER_DELACK) - && (tc->flags & TCP_CONN_SENT_RCV_WND0) == 0 - && (tc->flags & TCP_CONN_SNDACK) == 0) - return 1; + /* Send ack if ... */ + if (TCP_ALWAYS_ACK + /* just sent a rcv wnd 0 */ + || (tc->flags & TCP_CONN_SENT_RCV_WND0) != 0 + /* constrained to send ack */ + || (tc->flags & TCP_CONN_SNDACK) != 0 + /* we're almost out of tx wnd */ + || tcp_available_snd_space (tc) < 2 * tc->snd_mss) + return 0; - return 0; + return 1; } static int @@ -900,23 +1061,33 @@ tcp_segment_rcv (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b, /* Handle out-of-order data */ if (PREDICT_FALSE (vnet_buffer (b)->tcp.seq_number != tc->rcv_nxt)) { + /* Old sequence numbers allowed through because they overlapped + * the rx window */ + if (seq_lt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt)) + { + error = TCP_ERROR_SEGMENT_OLD; + *next0 = TCP_NEXT_DROP; + goto done; + } + error = tcp_session_enqueue_ooo (tc, b, n_data_bytes); - /* Don't send more than 3 dupacks per burst - * XXX decide if this is good */ - if (tc->snt_dupacks < 3) - { - /* RFC2581: Send DUPACK for fast retransmit */ - tcp_make_ack (tc, b); - *next0 = tcp_next_output (tc->c_is_ip4); + /* N.B. Should not filter burst of dupacks. Two issues 1) dupacks open + * cwnd on remote peer when congested 2) acks leaving should have the + * latest rcv_wnd since the burst may eaten up all of it, so only the + * old ones could be filtered. + */ - /* Mark as DUPACK. We may filter these in output if - * the burst fills the holes. */ - vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_DUPACK; + /* RFC2581: Send DUPACK for fast retransmit */ + tcp_make_ack (tc, b); + *next0 = tcp_next_output (tc->c_is_ip4); - tc->snt_dupacks++; - } + /* Mark as DUPACK. We may filter these in output if + * the burst fills the holes. */ + if (n_data_bytes) + vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_DUPACK; + TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc); goto done; } @@ -924,63 +1095,45 @@ tcp_segment_rcv (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b, * segments can be enqueued after fifo tail offset changes. */ error = tcp_session_enqueue_data (tc, b, n_data_bytes); - TCP_EVT_DBG (TCP_EVT_INPUT, tc, n_data_bytes); + if (n_data_bytes == 0) + { + *next0 = TCP_NEXT_DROP; + goto done; + } + + if (PREDICT_FALSE (error == TCP_ERROR_FIFO_FULL)) + *next0 = TCP_NEXT_DROP; /* Check if ACK can be delayed */ - if (tcp_can_delack (tc)) + if (!tcp_can_delack (tc)) { - /* Nothing to do for pure ACKs */ + /* Nothing to do for pure ACKs XXX */ if (n_data_bytes == 0) goto done; - /* If connection has not been previously marked for delay ack - * add it to the list and flag it */ - if (!tc->flags & TCP_CONN_DELACK) - { - vec_add1 (tm->delack_connections[tc->c_thread_index], - tc->c_c_index); - tc->flags |= TCP_CONN_DELACK; - } + *next0 = tcp_next_output (tc->c_is_ip4); + tcp_make_ack (tc, b); } else { - /* Check if a packet has already been enqueued to output for burst. - * If yes, then drop this one, otherwise, let it pass through to - * output */ - if ((tc->flags & TCP_CONN_BURSTACK) == 0) - { - *next0 = tcp_next_output (tc->c_is_ip4); - tcp_make_ack (tc, b); - error = TCP_ERROR_ENQUEUED; - - /* TODO: maybe add counter to ensure N acks will be sent/burst */ - tc->flags |= TCP_CONN_BURSTACK; - } + if (!tcp_timer_is_active (tc, TCP_TIMER_DELACK)) + tcp_timer_set (tc, TCP_TIMER_DELACK, TCP_DELACK_TIME); } done: return error; } -void -delack_timers_init (tcp_main_t * tm, u32 thread_index) +always_inline void +tcp_established_inc_counter (vlib_main_t * vm, u8 is_ip4, u8 evt, u8 val) { - tcp_connection_t *tc; - u32 i, *conns; - tw_timer_wheel_16t_2w_512sl_t *tw; - - tw = &tm->timer_wheels[thread_index]; - conns = tm->delack_connections[thread_index]; - for (i = 0; i < vec_len (conns); i++) - { - tc = pool_elt_at_index (tm->connections[thread_index], conns[i]); - ASSERT (0 != tc); + if (PREDICT_TRUE (!val)) + return; - tc->timers[TCP_TIMER_DELACK] - = tw_timer_start_16t_2w_512sl (tw, conns[i], - TCP_TIMER_DELACK, TCP_DELACK_TIME); - } - vec_reset_length (tm->delack_connections[thread_index]); + if (is_ip4) + vlib_node_increment_counter (vm, tcp4_established_node.index, evt, val); + else + vlib_node_increment_counter (vm, tcp6_established_node.index, evt, val); } always_inline uword @@ -1027,7 +1180,7 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (PREDICT_FALSE (tc0 == 0)) { error0 = TCP_ERROR_INVALID_CONNECTION; - goto drop; + goto done; } /* Checksum computed by ipx_local no need to compute again */ @@ -1061,18 +1214,22 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (PREDICT_FALSE (tcp_segment_validate (vm, tc0, b0, th0, &next0))) { error0 = TCP_ERROR_SEGMENT_INVALID; - goto drop; + TCP_EVT_DBG (TCP_EVT_SEG_INVALID, tc0, + vnet_buffer (b0)->tcp.seq_number, + vnet_buffer (b0)->tcp.seq_end); + goto done; } /* 5: check the ACK field */ if (tcp_rcv_ack (tc0, b0, th0, &next0, &error0)) { - goto drop; + goto done; } /* 6: check the URG bit TODO */ /* 7: process the segment text */ + vlib_buffer_advance (b0, n_advance_bytes0); error0 = tcp_segment_rcv (tm, tc0, b0, n_data_bytes0, &next0); @@ -1088,7 +1245,7 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME); } - drop: + done: b0->error = node->errors[error0]; if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { @@ -1103,17 +1260,7 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, } errors = session_manager_flush_enqueue_events (my_thread_index); - if (errors) - { - if (is_ip4) - vlib_node_increment_counter (vm, tcp4_established_node.index, - TCP_ERROR_EVENT_FIFO_FULL, errors); - else - vlib_node_increment_counter (vm, tcp6_established_node.index, - TCP_ERROR_EVENT_FIFO_FULL, errors); - } - - delack_timers_init (tm, my_thread_index); + tcp_established_inc_counter (vm, is_ip4, TCP_ERROR_EVENT_FIFO_FULL, errors); return from_frame->n_vectors; } @@ -1602,7 +1749,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, stream_session_accept_notify (&tc0->connection); /* Reset SYN-ACK retransmit timer */ - tcp_timer_reset (tc0, TCP_TIMER_RETRANSMIT); + tcp_retransmit_timer_reset (tc0); break; case TCP_STATE_ESTABLISHED: /* We can get packets in established state here because they @@ -1668,7 +1815,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME); /* Stop retransmit */ - tcp_timer_reset (tc0, TCP_TIMER_RETRANSMIT); + tcp_retransmit_timer_reset (tc0); goto drop; diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index 114a5b9e..a671f728 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -125,15 +125,33 @@ tcp_initial_window_to_advertise (tcp_connection_t * tc) u32 tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state) { - u32 available_space, max_fifo, observed_wnd; - if (state < TCP_STATE_ESTABLISHED) return tcp_initial_window_to_advertise (tc); + tcp_update_rcv_wnd (tc); + + if (tc->rcv_wnd == 0) + { + tc->flags |= TCP_CONN_SENT_RCV_WND0; + } + else + { + tc->flags &= ~TCP_CONN_SENT_RCV_WND0; + } + + return tc->rcv_wnd >> tc->rcv_wscale; +} + +void +tcp_update_rcv_wnd (tcp_connection_t * tc) +{ + i32 observed_wnd; + u32 available_space, max_fifo, wnd; + /* * Figure out how much space we have available */ - available_space = stream_session_max_enqueue (&tc->connection); + available_space = stream_session_max_rx_enqueue (&tc->connection); max_fifo = stream_session_fifo_size (&tc->connection); ASSERT (tc->opt.mss < max_fifo); @@ -145,23 +163,25 @@ tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state) * Use the above and what we know about what we've previously advertised * to compute the new window */ - observed_wnd = tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las); + observed_wnd = (i32) tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las); + if (observed_wnd < 0) + observed_wnd = 0; /* Bad. Thou shalt not shrink */ if (available_space < observed_wnd) { - if (available_space == 0) - clib_warning ("Didn't shrink rcv window despite not having space"); + /* Does happen! */ + wnd = observed_wnd; } - - tc->rcv_wnd = clib_min (available_space, TCP_WND_MAX << tc->rcv_wscale); - - if (tc->rcv_wnd == 0) + else { - tc->flags |= TCP_CONN_SENT_RCV_WND0; + wnd = available_space; } - return tc->rcv_wnd >> tc->rcv_wscale; + if (wnd && ((wnd << tc->rcv_wscale) >> tc->rcv_wscale != wnd)) + wnd += 1 << tc->rcv_wscale; + + tc->rcv_wnd = clib_min (wnd, TCP_WND_MAX << tc->rcv_wscale); } /** @@ -363,7 +383,7 @@ tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts, #define tcp_get_free_buffer_index(tm, bidx) \ do { \ u32 *my_tx_buffers, n_free_buffers; \ - u32 cpu_index = tm->vlib_main->cpu_index; \ + u32 cpu_index = os_get_cpu_number(); \ my_tx_buffers = tm->tx_buffers[cpu_index]; \ if (PREDICT_FALSE(vec_len (my_tx_buffers) == 0)) \ { \ @@ -381,6 +401,14 @@ do { \ _vec_len (my_tx_buffers) -= 1; \ } while (0) +#define tcp_return_buffer(tm) \ +do { \ + u32 *my_tx_buffers; \ + u32 cpu_index = os_get_cpu_number(); \ + my_tx_buffers = tm->tx_buffers[cpu_index]; \ + _vec_len (my_tx_buffers) +=1; \ +} while (0) + always_inline void tcp_reuse_buffer (vlib_main_t * vm, vlib_buffer_t * b) { @@ -421,8 +449,6 @@ tcp_make_ack_i (tcp_connection_t * tc, vlib_buffer_t * b, tcp_state_t state, tc->rcv_nxt, tcp_hdr_opts_len, flags, wnd); tcp_options_write ((u8 *) (th + 1), snd_opts); - - /* Mark as ACK */ vnet_buffer (b)->tcp.connection_index = tc->c_c_index; } @@ -432,12 +458,12 @@ tcp_make_ack_i (tcp_connection_t * tc, vlib_buffer_t * b, tcp_state_t state, void tcp_make_ack (tcp_connection_t * tc, vlib_buffer_t * b) { - tcp_main_t *tm = vnet_get_tcp_main (); - vlib_main_t *vm = tm->vlib_main; + vlib_main_t *vm = vlib_get_main (); tcp_reuse_buffer (vm, b); tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK); vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_ACK; + TCP_EVT_DBG (TCP_EVT_ACK_SENT, tc); } /** @@ -446,8 +472,7 @@ tcp_make_ack (tcp_connection_t * tc, vlib_buffer_t * b) void tcp_make_fin (tcp_connection_t * tc, vlib_buffer_t * b) { - tcp_main_t *tm = vnet_get_tcp_main (); - vlib_main_t *vm = tm->vlib_main; + vlib_main_t *vm = vlib_get_main (); u8 flags = 0; tcp_reuse_buffer (vm, b); @@ -467,8 +492,7 @@ tcp_make_fin (tcp_connection_t * tc, vlib_buffer_t * b) void tcp_make_synack (tcp_connection_t * tc, vlib_buffer_t * b) { - tcp_main_t *tm = vnet_get_tcp_main (); - vlib_main_t *vm = tm->vlib_main; + vlib_main_t *vm = vlib_get_main (); tcp_options_t _snd_opts, *snd_opts = &_snd_opts; u8 tcp_opts_len, tcp_hdr_opts_len; tcp_header_t *th; @@ -631,7 +655,7 @@ tcp_send_reset (vlib_buffer_t * pkt, u8 is_ip4) vlib_buffer_t *b; u32 bi; tcp_main_t *tm = vnet_get_tcp_main (); - vlib_main_t *vm = tm->vlib_main; + vlib_main_t *vm = vlib_get_main (); u8 tcp_hdr_len, flags = 0; tcp_header_t *th, *pkt_th; u32 seq, ack; @@ -736,7 +760,7 @@ tcp_send_syn (tcp_connection_t * tc) vlib_buffer_t *b; u32 bi; tcp_main_t *tm = vnet_get_tcp_main (); - vlib_main_t *vm = tm->vlib_main; + vlib_main_t *vm = vlib_get_main (); u8 tcp_hdr_opts_len, tcp_opts_len; tcp_header_t *th; u32 time_now; @@ -795,9 +819,9 @@ tcp_enqueue_to_output (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, u8 is_ip4) /* Decide where to send the packet */ next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index; - f = vlib_get_frame_to_node (vm, next_index); /* Enqueue the packet */ + f = vlib_get_frame_to_node (vm, next_index); to_next = vlib_frame_vector_args (f); to_next[0] = bi; f->n_vectors = 1; @@ -813,7 +837,7 @@ tcp_send_fin (tcp_connection_t * tc) vlib_buffer_t *b; u32 bi; tcp_main_t *tm = vnet_get_tcp_main (); - vlib_main_t *vm = tm->vlib_main; + vlib_main_t *vm = vlib_get_main (); tcp_get_free_buffer_index (tm, &bi); b = vlib_get_buffer (vm, bi); @@ -884,22 +908,21 @@ tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, vnet_buffer (b)->tcp.connection_index = tc->c_c_index; tc->snd_nxt += data_len; + /* TODO this is updated in output as well ... */ + if (tc->snd_nxt > tc->snd_una_max) + tc->snd_una_max = tc->snd_nxt; TCP_EVT_DBG (TCP_EVT_PKTIZE, tc); } -/* Send delayed ACK when timer expires */ void -tcp_timer_delack_handler (u32 index) +tcp_send_ack (tcp_connection_t * tc) { tcp_main_t *tm = vnet_get_tcp_main (); - vlib_main_t *vm = tm->vlib_main; - u32 thread_index = os_get_cpu_number (); - tcp_connection_t *tc; + vlib_main_t *vm = vlib_get_main (); + vlib_buffer_t *b; u32 bi; - tc = tcp_connection_get (index, thread_index); - /* Get buffer */ tcp_get_free_buffer_index (tm, &bi); b = vlib_get_buffer (vm, bi); @@ -907,12 +930,22 @@ tcp_timer_delack_handler (u32 index) /* Fill in the ACK */ tcp_make_ack (tc, b); - tc->timers[TCP_TIMER_DELACK] = TCP_TIMER_HANDLE_INVALID; - tc->flags &= ~TCP_CONN_DELACK; - tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); } +/* Send delayed ACK when timer expires */ +void +tcp_timer_delack_handler (u32 index) +{ + u32 thread_index = os_get_cpu_number (); + tcp_connection_t *tc; + + tc = tcp_connection_get (index, thread_index); + tc->timers[TCP_TIMER_DELACK] = TCP_TIMER_HANDLE_INVALID; +// tc->flags &= ~TCP_CONN_DELACK; + tcp_send_ack (tc); +} + /** Build a retransmit segment * * @return the number of bytes in the segment or 0 if there's nothing to @@ -920,59 +953,74 @@ tcp_timer_delack_handler (u32 index) * */ u32 tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b, - u32 max_bytes) + u32 offset, u32 max_bytes) { - tcp_main_t *tm = vnet_get_tcp_main (); - vlib_main_t *vm = tm->vlib_main; - u32 n_bytes, offset = 0; - sack_scoreboard_hole_t *hole; - u32 hole_size; + vlib_main_t *vm = vlib_get_main (); + u32 n_bytes = 0; tcp_reuse_buffer (vm, b); ASSERT (tc->state >= TCP_STATE_ESTABLISHED); ASSERT (max_bytes != 0); - if (tcp_opts_sack_permitted (&tc->opt)) - { - /* XXX get first hole not retransmitted yet */ - hole = scoreboard_first_hole (&tc->sack_sb); - if (!hole) - return 0; - - offset = hole->start - tc->snd_una; - hole_size = hole->end - hole->start; + max_bytes = clib_min (tc->snd_mss, max_bytes); - ASSERT (hole_size); + /* Start is beyond snd_congestion */ + if (seq_geq (tc->snd_una + offset, tc->snd_congestion)) + goto done; - if (hole_size < max_bytes) - max_bytes = hole_size; - } - else + /* Don't overshoot snd_congestion */ + if (seq_gt (tc->snd_nxt + max_bytes, tc->snd_congestion)) { - if (seq_geq (tc->snd_nxt, tc->snd_una_max)) - return 0; + max_bytes = tc->snd_congestion - tc->snd_nxt; + if (max_bytes == 0) + goto done; } + ASSERT (max_bytes <= tc->snd_mss); + n_bytes = stream_session_peek_bytes (&tc->connection, vlib_buffer_get_current (b), offset, max_bytes); ASSERT (n_bytes != 0); - + b->current_length = n_bytes; tcp_push_hdr_i (tc, b, tc->state); +done: + TCP_EVT_DBG (TCP_EVT_CC_RTX, tc, offset, n_bytes); return n_bytes; } +/** + * Reset congestion control, switch cwnd to loss window and try again. + */ +static void +tcp_rtx_timeout_cc_recover (tcp_connection_t * tc) +{ + /* Cleanly recover cc (also clears up fast retransmit) */ + if (tcp_in_fastrecovery (tc)) + { + tcp_cc_recover (tc); + } + else + { + tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss); + } + + /* Start again from the beginning */ + tc->cwnd = tcp_loss_wnd (tc); + tc->snd_congestion = tc->snd_una_max; +} + static void tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) { tcp_main_t *tm = vnet_get_tcp_main (); - vlib_main_t *vm = tm->vlib_main; + vlib_main_t *vm = vlib_get_main (); u32 thread_index = os_get_cpu_number (); tcp_connection_t *tc; vlib_buffer_t *b; - u32 bi, max_bytes, snd_space; + u32 bi, snd_space, n_bytes; if (is_syn) { @@ -998,26 +1046,43 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) if (tc->state >= TCP_STATE_ESTABLISHED) { - tcp_fastrecovery_off (tc); + /* First retransmit timeout */ + if (tc->rto_boff == 1) + tcp_rtx_timeout_cc_recover (tc); /* Exponential backoff */ tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX); /* Figure out what and how many bytes we can send */ snd_space = tcp_available_snd_space (tc); - max_bytes = clib_min (tc->snd_mss, snd_space); - if (max_bytes == 0) + TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 1); + + if (snd_space == 0) { clib_warning ("no wnd to retransmit"); + tcp_return_buffer (tm); + + /* Force one segment */ + tcp_retransmit_first_unacked (tc); + + /* Re-enable retransmit timer. Output may be unwilling + * to do it for us */ + tcp_retransmit_timer_set (tc); + return; } - tcp_prepare_retransmit_segment (tc, b, max_bytes); + else + { + /* No fancy recovery for now! */ + n_bytes = tcp_prepare_retransmit_segment (tc, b, 0, snd_space); + scoreboard_clear (&tc->sack_sb); - tc->rtx_bytes += max_bytes; + if (n_bytes == 0) + return; - /* No fancy recovery for now! */ - scoreboard_clear (&tc->sack_sb); + tc->rtx_bytes += n_bytes; + } } else { @@ -1072,63 +1137,110 @@ tcp_timer_retransmit_syn_handler (u32 index) } /** - * Retansmit first unacked segment */ + * Retransmit first unacked segment + */ void tcp_retransmit_first_unacked (tcp_connection_t * tc) { tcp_main_t *tm = vnet_get_tcp_main (); - u32 snd_nxt = tc->snd_nxt; + vlib_main_t *vm = vlib_get_main (); vlib_buffer_t *b; - u32 bi; + u32 bi, n_bytes; tc->snd_nxt = tc->snd_una; /* Get buffer */ tcp_get_free_buffer_index (tm, &bi); - b = vlib_get_buffer (tm->vlib_main, bi); + b = vlib_get_buffer (vm, bi); + + TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2); - tcp_prepare_retransmit_segment (tc, b, tc->snd_mss); - tcp_enqueue_to_output (tm->vlib_main, b, bi, tc->c_is_ip4); + n_bytes = tcp_prepare_retransmit_segment (tc, b, 0, tc->snd_mss); + if (n_bytes == 0) + return; - tc->snd_nxt = snd_nxt; - tc->rtx_bytes += tc->snd_mss; + tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); + tc->rtx_bytes += n_bytes; +} + +sack_scoreboard_hole_t * +scoreboard_first_rtx_hole (sack_scoreboard_t * sb) +{ + sack_scoreboard_hole_t *hole = 0; + +// hole = scoreboard_first_hole (&tc->sack_sb); +// if (hole) +// { +// +// offset = hole->start - tc->snd_una; +// hole_size = hole->end - hole->start; +// +// ASSERT(hole_size); +// +// if (hole_size < max_bytes) +// max_bytes = hole_size; +// } + return hole; } +/** + * Do fast retransmit. + */ void tcp_fast_retransmit (tcp_connection_t * tc) { tcp_main_t *tm = vnet_get_tcp_main (); - u32 snd_space, max_bytes, n_bytes, bi; + vlib_main_t *vm = vlib_get_main (); + u32 bi; + int snd_space; + u32 n_written = 0, offset = 0; vlib_buffer_t *b; + u8 use_sacks = 0; ASSERT (tcp_in_fastrecovery (tc)); - clib_warning ("fast retransmit!"); - /* Start resending from first un-acked segment */ tc->snd_nxt = tc->snd_una; snd_space = tcp_available_snd_space (tc); + TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0); + + /* If we have SACKs use them */ + if (tcp_opts_sack_permitted (&tc->opt) + && scoreboard_first_hole (&tc->sack_sb)) + use_sacks = 0; - while (snd_space) + while (snd_space > 0) { tcp_get_free_buffer_index (tm, &bi); - b = vlib_get_buffer (tm->vlib_main, bi); + b = vlib_get_buffer (vm, bi); + + if (use_sacks) + { + scoreboard_first_rtx_hole (&tc->sack_sb); + } + else + { + offset += n_written; + } - max_bytes = clib_min (tc->snd_mss, snd_space); - n_bytes = tcp_prepare_retransmit_segment (tc, b, max_bytes); + n_written = tcp_prepare_retransmit_segment (tc, b, offset, snd_space); /* Nothing left to retransmit */ - if (n_bytes == 0) - return; - - tcp_enqueue_to_output (tm->vlib_main, b, bi, tc->c_is_ip4); + if (n_written == 0) + { + tcp_return_buffer (tm); + break; + } - snd_space -= n_bytes; + tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); + tc->rtx_bytes += n_written; + snd_space -= n_written; } - /* If window allows, send new data */ - tc->snd_nxt = tc->snd_una_max; + /* If window allows, send 1 SMSS of new data */ + if (seq_lt (tc->snd_nxt, tc->snd_congestion)) + tc->snd_nxt = tc->snd_congestion; } always_inline u32 @@ -1209,8 +1321,6 @@ tcp46_output_inline (vlib_main_t * vm, if (PREDICT_FALSE (vnet_buffer (b0)->tcp.flags & TCP_BUF_FLAG_DUPACK)) { - ASSERT (tc0->snt_dupacks > 0); - tc0->snt_dupacks--; if (!tcp_session_has_ooo_data (tc0)) { error0 = TCP_ERROR_FILTERED_DUPACKS; @@ -1223,8 +1333,7 @@ tcp46_output_inline (vlib_main_t * vm, tc0->rcv_las = tc0->rcv_nxt; /* Stop DELACK timer and fix flags */ - tc0->flags &= - ~(TCP_CONN_SNDACK | TCP_CONN_DELACK | TCP_CONN_BURSTACK); + tc0->flags &= ~(TCP_CONN_SNDACK); if (tcp_timer_is_active (tc0, TCP_TIMER_DELACK)) { tcp_timer_reset (tc0, TCP_TIMER_DELACK); diff --git a/src/vnet/tcp/tcp_packet.h b/src/vnet/tcp/tcp_packet.h index 866c5fd6..4f28cf32 100644 --- a/src/vnet/tcp/tcp_packet.h +++ b/src/vnet/tcp/tcp_packet.h @@ -137,7 +137,7 @@ enum typedef struct _sack_block { u32 start; /**< Start sequence number */ - u32 end; /**< End sequence number */ + u32 end; /**< End sequence number (first outside) */ } sack_block_t; typedef struct diff --git a/src/vnet/tcp/tcp_test.c b/src/vnet/tcp/tcp_test.c new file mode 100644 index 00000000..0725bb04 --- /dev/null +++ b/src/vnet/tcp/tcp_test.c @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define TCP_TEST_I(_cond, _comment, _args...) \ +({ \ + int _evald = (_cond); \ + if (!(_evald)) { \ + fformat(stderr, "FAIL:%d: " _comment "\n", \ + __LINE__, ##_args); \ + } else { \ + fformat(stderr, "PASS:%d: " _comment "\n", \ + __LINE__, ##_args); \ + } \ + _evald; \ +}) + +#define TCP_TEST(_cond, _comment, _args...) \ +{ \ + if (!TCP_TEST_I(_cond, _comment, ##_args)) { \ + return 1; \ + } \ +} + +static int +tcp_test_sack () +{ + tcp_connection_t _tc, *tc = &_tc; + sack_scoreboard_t *sb = &tc->sack_sb; + sack_block_t *sacks = 0, block; + sack_scoreboard_hole_t *hole; + int i; + + memset (tc, 0, sizeof (*tc)); + + tc->snd_una = 0; + tc->snd_una_max = 1000; + tc->snd_nxt = 1000; + tc->opt.flags |= TCP_OPTS_FLAG_SACK; + scoreboard_init (&tc->sack_sb); + + for (i = 0; i < 1000 / 100; i++) + { + block.start = i * 100; + block.end = (i + 1) * 100; + vec_add1 (sacks, block); + } + + /* + * Inject even blocks + */ + + for (i = 0; i < 1000 / 200; i++) + { + vec_add1 (tc->opt.sacks, sacks[i * 2]); + } + tc->opt.n_sack_blocks = vec_len (tc->opt.sacks); + tcp_rcv_sacks (tc, 0); + + TCP_TEST ((pool_elts (sb->holes) == 5), + "scoreboard has %d elements", pool_elts (sb->holes)); + + /* First SACK block should be rejected */ + hole = scoreboard_first_hole (sb); + TCP_TEST ((hole->start == 0 && hole->end == 200), + "first hole start %u end %u", hole->start, hole->end); + hole = scoreboard_last_hole (sb); + TCP_TEST ((hole->start == 900 && hole->end == 1000), + "last hole start %u end %u", hole->start, hole->end); + TCP_TEST ((sb->sacked_bytes == 400), "sacked bytes %d", sb->sacked_bytes); + TCP_TEST ((sb->snd_una_adv == 0), "snd_una_adv %u", sb->snd_una_adv); + TCP_TEST ((sb->last_sacked_bytes == 400), + "last sacked bytes %d", sb->last_sacked_bytes); + + /* + * Inject odd blocks + */ + + vec_reset_length (tc->opt.sacks); + for (i = 0; i < 1000 / 200; i++) + { + vec_add1 (tc->opt.sacks, sacks[i * 2 + 1]); + } + tc->opt.n_sack_blocks = vec_len (tc->opt.sacks); + tcp_rcv_sacks (tc, 0); + + hole = scoreboard_first_hole (sb); + TCP_TEST ((pool_elts (sb->holes) == 1), + "scoreboard has %d holes", pool_elts (sb->holes)); + TCP_TEST ((hole->start == 0 && hole->end == 100), + "first hole start %u end %u", hole->start, hole->end); + TCP_TEST ((sb->sacked_bytes == 900), "sacked bytes %d", sb->sacked_bytes); + TCP_TEST ((sb->snd_una_adv == 0), "snd_una_adv %u", sb->snd_una_adv); + TCP_TEST ((sb->max_byte_sacked == 1000), + "max sacked byte %u", sb->max_byte_sacked); + TCP_TEST ((sb->last_sacked_bytes == 500), + "last sacked bytes %d", sb->last_sacked_bytes); + + /* + * Ack until byte 100, all bytes are now acked + sacked + */ + tcp_rcv_sacks (tc, 100); + + TCP_TEST ((pool_elts (sb->holes) == 0), + "scoreboard has %d elements", pool_elts (sb->holes)); + TCP_TEST ((sb->snd_una_adv == 900), + "snd_una_adv after ack %u", sb->snd_una_adv); + TCP_TEST ((sb->max_byte_sacked == 1000), + "max sacked byte %u", sb->max_byte_sacked); + TCP_TEST ((sb->sacked_bytes == 0), "sacked bytes %d", sb->sacked_bytes); + TCP_TEST ((sb->last_sacked_bytes == 0), + "last sacked bytes %d", sb->last_sacked_bytes); + + /* + * Add new block + */ + + vec_reset_length (tc->opt.sacks); + + block.start = 1200; + block.end = 1300; + vec_add1 (tc->opt.sacks, block); + + tc->snd_una_max = 1500; + tc->snd_una = 1000; + tc->snd_nxt = 1500; + tcp_rcv_sacks (tc, 1000); + + TCP_TEST ((sb->snd_una_adv == 0), + "snd_una_adv after ack %u", sb->snd_una_adv); + TCP_TEST ((pool_elts (sb->holes) == 2), + "scoreboard has %d holes", pool_elts (sb->holes)); + hole = scoreboard_first_hole (sb); + TCP_TEST ((hole->start == 1000 && hole->end == 1200), + "first hole start %u end %u", hole->start, hole->end); + hole = scoreboard_last_hole (sb); + TCP_TEST ((hole->start == 1300 && hole->end == 1500), + "last hole start %u end %u", hole->start, hole->end); + TCP_TEST ((sb->sacked_bytes == 100), "sacked bytes %d", sb->sacked_bytes); + + /* + * Ack first hole + */ + + vec_reset_length (tc->opt.sacks); + tcp_rcv_sacks (tc, 1200); + + TCP_TEST ((sb->snd_una_adv == 100), + "snd_una_adv after ack %u", sb->snd_una_adv); + TCP_TEST ((sb->sacked_bytes == 0), "sacked bytes %d", sb->sacked_bytes); + TCP_TEST ((pool_elts (sb->holes) == 1), + "scoreboard has %d elements", pool_elts (sb->holes)); + + /* + * Remove all + */ + + scoreboard_clear (sb); + TCP_TEST ((pool_elts (sb->holes) == 0), + "number of holes %d", pool_elts (sb->holes)); + return 0; +} + +static clib_error_t * +tcp_test (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd_arg) +{ + int res = 0; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "sack")) + { + res = tcp_test_sack (); + } + else + { + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + } + + if (res) + { + return clib_error_return (0, "TCP unit test failed"); + } + else + { + return 0; + } +} + +VLIB_CLI_COMMAND (tcp_test_command, static) = +{ +.path = "test tcp",.short_help = "internal tcp unit tests",.function = + tcp_test,}; +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/udp/builtin_server.c b/src/vnet/udp/builtin_server.c index 46c8e734..57f774c5 100644 --- a/src/vnet/udp/builtin_server.c +++ b/src/vnet/udp/builtin_server.c @@ -39,10 +39,10 @@ builtin_session_disconnect_callback (stream_session_t * s) } static int -builtin_server_rx_callback (stream_session_t * s, session_fifo_event_t * ep) +builtin_server_rx_callback (stream_session_t * s) { svm_fifo_t *rx_fifo, *tx_fifo; - u32 this_transfer; + u32 this_transfer, max_deq, max_enq; int actual_transfer; u8 *my_copy_buffer; session_fifo_event_t evt; @@ -52,9 +52,9 @@ builtin_server_rx_callback (stream_session_t * s, session_fifo_event_t * ep) rx_fifo = s->server_rx_fifo; tx_fifo = s->server_tx_fifo; - this_transfer = svm_fifo_max_enqueue (tx_fifo) - < svm_fifo_max_dequeue (rx_fifo) ? - svm_fifo_max_enqueue (tx_fifo) : svm_fifo_max_dequeue (rx_fifo); + max_deq = svm_fifo_max_dequeue (rx_fifo); + max_enq = svm_fifo_max_enqueue (tx_fifo); + this_transfer = max_enq < max_deq ? max_enq : max_deq; vec_validate (my_copy_buffer, this_transfer - 1); _vec_len (my_copy_buffer) = this_transfer; @@ -64,17 +64,20 @@ builtin_server_rx_callback (stream_session_t * s, session_fifo_event_t * ep) ASSERT (actual_transfer == this_transfer); actual_transfer = svm_fifo_enqueue_nowait (tx_fifo, 0, this_transfer, my_copy_buffer); + ASSERT (actual_transfer == this_transfer); copy_buffers[s->thread_index] = my_copy_buffer; - /* Fabricate TX event, send to ourselves */ - evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; - /* $$$$ for event logging */ - evt.enqueue_length = actual_transfer; - evt.event_id = 0; - q = session_manager_get_vpp_event_queue (s->thread_index); - unix_shared_memory_queue_add (q, (u8 *) & evt, 0 /* do wait for mutex */ ); + if (svm_fifo_set_event (tx_fifo)) + { + /* Fabricate TX event, send to ourselves */ + evt.fifo = tx_fifo; + evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_id = 0; + q = session_manager_get_vpp_event_queue (s->thread_index); + unix_shared_memory_queue_add (q, (u8 *) & evt, + 0 /* do wait for mutex */ ); + } return 0; } diff --git a/src/vnet/udp/udp_input.c b/src/vnet/udp/udp_input.c index 88278735..4b22109b 100644 --- a/src/vnet/udp/udp_input.c +++ b/src/vnet/udp/udp_input.c @@ -244,44 +244,53 @@ udp4_uri_input_node_fn (vlib_main_t * vm, /* Get session's server */ server0 = application_get (s0->app_index); - /* Fabricate event */ - evt.fifo = s0->server_rx_fifo; - evt.event_type = FIFO_EVENT_SERVER_RX; - evt.event_id = serial_number++; - evt.enqueue_length = svm_fifo_max_dequeue (s0->server_rx_fifo); - /* Built-in server? Deliver the goods... */ if (server0->cb_fns.builtin_server_rx_callback) { - server0->cb_fns.builtin_server_rx_callback (s0, &evt); + server0->cb_fns.builtin_server_rx_callback (s0); continue; } - /* Add event to server's event queue */ - q = server0->event_queue; - - /* Don't block for lack of space */ - if (PREDICT_TRUE (q->cursize < q->maxsize)) - unix_shared_memory_queue_add (server0->event_queue, (u8 *) & evt, - 0 /* do wait for mutex */ ); - else + if (svm_fifo_set_event (s0->server_rx_fifo)) { - vlib_node_increment_counter (vm, udp4_uri_input_node.index, - SESSION_ERROR_FIFO_FULL, 1); + /* Fabricate event */ + evt.fifo = s0->server_rx_fifo; + evt.event_type = FIFO_EVENT_SERVER_RX; + evt.event_id = serial_number++; + + /* Add event to server's event queue */ + q = server0->event_queue; + + /* Don't block for lack of space */ + if (PREDICT_TRUE (q->cursize < q->maxsize)) + { + unix_shared_memory_queue_add (server0->event_queue, + (u8 *) & evt, + 0 /* do wait for mutex */ ); + } + else + { + vlib_node_increment_counter (vm, udp4_uri_input_node.index, + SESSION_ERROR_FIFO_FULL, 1); + } } + /* *INDENT-OFF* */ if (1) { ELOG_TYPE_DECLARE (e) = { - .format = "evt-enqueue: id %d length %d",.format_args = "i4i4",}; + .format = "evt-enqueue: id %d length %d", + .format_args = "i4i4",}; struct { u32 data[2]; } *ed; ed = ELOG_DATA (&vlib_global_main.elog_main, e); ed->data[0] = evt.event_id; - ed->data[1] = evt.enqueue_length; + ed->data[1] = svm_fifo_max_dequeue (s0->server_rx_fifo); } + /* *INDENT-ON* */ + } vec_reset_length (session_indices_to_enqueue); -- cgit 1.2.3-korg From 3e350af5d3e9744a4529a28dd293b2d4601442f7 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Thu, 30 Mar 2017 02:54:28 -0700 Subject: TCP cc/window management fixes and debugging - added persist timer - update rcv_las whenever sending an ack - moved fifo size to its own cache line - improved session and builtin client debugging Change-Id: Ia649cf942cf0c061a713e8b67f0eb6974a6cd55b Signed-off-by: Florin Coras Signed-off-by: Dave Barach --- src/svm/svm_fifo.c | 37 +++++----- src/svm/svm_fifo.h | 8 ++- src/vnet/session/node.c | 24 ++++--- src/vnet/session/session.c | 6 +- src/vnet/session/session.h | 12 +++- src/vnet/session/session_debug.h | 32 ++++++++- src/vnet/tcp/builtin_client.c | 146 +++++++++++++++++++++++++++------------ src/vnet/tcp/builtin_server.c | 12 +++- src/vnet/tcp/tcp.c | 6 +- src/vnet/tcp/tcp.h | 36 +++++++++- src/vnet/tcp/tcp_debug.h | 66 +++++++++++++----- src/vnet/tcp/tcp_input.c | 68 +++++++++--------- src/vnet/tcp/tcp_output.c | 79 +++++++++++++++++---- 13 files changed, 386 insertions(+), 146 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index 07b0d2df..cc84feb9 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -254,6 +254,10 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) { ooo_segment_t *s; u32 index, bytes = 0, diff; + u32 cursize; + + /* read cursize, which can only increase while we're working */ + cursize = svm_fifo_max_dequeue (f); s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head); @@ -286,8 +290,8 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) /* If tail is adjacent to an ooo segment, 'consume' it */ if (diff == 0) { - bytes = ((f->nitems - f->cursize) >= s->length) ? s->length : - f->nitems - f->cursize; + bytes = ((f->nitems - cursize) >= s->length) ? s->length : + f->nitems - cursize; f->tail += bytes; f->tail %= f->nitems; @@ -305,11 +309,12 @@ svm_fifo_enqueue_internal (svm_fifo_t * f, u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; u32 cursize, nitems; - if (PREDICT_FALSE (f->cursize == f->nitems)) + /* read cursize, which can only increase while we're working */ + cursize = svm_fifo_max_dequeue (f); + + if (PREDICT_FALSE (cursize == f->nitems)) return -2; /* fifo stuffed */ - /* read cursize, which can only decrease while we're working */ - cursize = f->cursize; nitems = f->nitems; /* Number of bytes we're going to copy */ @@ -382,8 +387,8 @@ svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, ASSERT (offset > 0); - /* read cursize, which can only decrease while we're working */ - cursize = f->cursize; + /* read cursize, which can only increase while we're working */ + cursize = svm_fifo_max_dequeue (f); nitems = f->nitems; /* Will this request fit? */ @@ -437,11 +442,11 @@ svm_fifo_dequeue_internal (svm_fifo_t * f, u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; u32 cursize, nitems; - if (PREDICT_FALSE (f->cursize == 0)) + /* read cursize, which can only increase while we're working */ + cursize = svm_fifo_max_dequeue (f); + if (PREDICT_FALSE (cursize == 0)) return -2; /* nothing in the fifo */ - /* read cursize, which can only increase while we're working */ - cursize = f->cursize; nitems = f->nitems; /* Number of bytes we're going to copy */ @@ -495,11 +500,11 @@ svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes, u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; u32 cursize, nitems, real_head; - if (PREDICT_FALSE (f->cursize == 0)) + /* read cursize, which can only increase while we're working */ + cursize = svm_fifo_max_dequeue (f); + if (PREDICT_FALSE (cursize == 0)) return -2; /* nothing in the fifo */ - /* read cursize, which can only increase while we're working */ - cursize = f->cursize; nitems = f->nitems; real_head = f->head + offset; real_head = real_head >= nitems ? real_head - nitems : real_head; @@ -532,11 +537,11 @@ svm_fifo_dequeue_drop (svm_fifo_t * f, int pid, u32 max_bytes) u32 total_drop_bytes, first_drop_bytes, second_drop_bytes; u32 cursize, nitems; - if (PREDICT_FALSE (f->cursize == 0)) + /* read cursize, which can only increase while we're working */ + cursize = svm_fifo_max_dequeue (f); + if (PREDICT_FALSE (cursize == 0)) return -2; /* nothing in the fifo */ - /* read cursize, which can only increase while we're working */ - cursize = f->cursize; nitems = f->nitems; /* Number of bytes we're going to drop */ diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index 39556173..80e5b0f2 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -44,14 +44,16 @@ typedef struct typedef struct { + volatile u32 cursize; /**< current fifo size */ + u32 nitems; + CLIB_CACHE_LINE_ALIGN_MARK (end_cursize); + pthread_mutex_t mutex; /* 8 bytes */ pthread_cond_t condvar; /* 8 bytes */ svm_lock_tag_t tag; - volatile u32 cursize; /**< current fifo size */ volatile u8 has_event; /**< non-zero if deq event exists */ u32 owner_pid; - u32 nitems; /* Backpointers */ u32 server_session_index; @@ -105,7 +107,7 @@ svm_fifo_max_dequeue (svm_fifo_t * f) static inline u32 svm_fifo_max_enqueue (svm_fifo_t * f) { - return f->nitems - f->cursize; + return f->nitems - svm_fifo_max_dequeue (f); } static inline u8 diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c index 8681105c..b86e87d9 100644 --- a/src/vnet/session/node.c +++ b/src/vnet/session/node.c @@ -119,15 +119,20 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, /* Nothing to read return */ if (max_dequeue0 == 0) - { - return 0; - } + return 0; /* Ensure we're not writing more than transport window allows */ - max_len_to_snd0 = clib_min (max_dequeue0, snd_space0); - - /* TODO check if transport is willing to send len_to_snd0 - * bytes (Nagle) */ + if (max_dequeue0 < snd_space0) + { + /* Constrained by tx queue. Try to send only fully formed segments */ + max_len_to_snd0 = (max_dequeue0 > snd_mss0) ? + max_dequeue0 - max_dequeue0 % snd_mss0 : max_dequeue0; + /* TODO Nagle ? */ + } + else + { + max_len_to_snd0 = snd_space0; + } n_frame_bytes = snd_mss0 * VLIB_FRAME_SIZE; n_frames_per_evt = ceil ((double) max_len_to_snd0 / n_frame_bytes); @@ -308,11 +313,14 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, int n_tx_packets = 0; u32 my_thread_index = vm->cpu_index; int i, rv; + f64 now = vlib_time_now (vm); + + SESSION_EVT_DBG (SESSION_EVT_POLL_GAP_TRACK, smm, my_thread_index); /* * Update TCP time */ - tcp_update_time (vlib_time_now (vm), my_thread_index); + tcp_update_time (now, my_thread_index); /* * Get vpp queue events diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index f10918aa..8e2b2616 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -556,7 +556,7 @@ session_manager_allocate_session_fifos (session_manager_main_t * smm, u8 * added_a_segment) { svm_fifo_segment_private_t *fifo_segment; - u32 fifo_size, default_fifo_size = 128 << 10; /* TODO config */ + u32 fifo_size, default_fifo_size = 1 << 16; /* TODO config */ int i; *added_a_segment = 0; @@ -1293,6 +1293,10 @@ session_manager_main_enable (vlib_main_t * vm) vec_validate (smm->current_enqueue_epoch, num_threads - 1); vec_validate (smm->vpp_event_queues, num_threads - 1); +#if SESSION_DBG + vec_validate (smm->last_event_poll_by_thread, num_threads - 1); +#endif + /* $$$$ preallocate hack config parameter */ for (i = 0; i < 200000; i++) { diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index a39bc06f..6878b4d2 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -20,6 +20,7 @@ #include #include #include +#include #define HALF_OPEN_LOOKUP_INVALID_VALUE ((u64)~0) #define INVALID_INDEX ((u32)~0) @@ -36,7 +37,7 @@ typedef enum FIFO_EVENT_BUILTIN_RX } fifo_event_type_t; -#define foreach_session_input_error \ +#define foreach_session_input_error \ _(NO_SESSION, "No session drops") \ _(NO_LISTENER, "No listener for dst port drops") \ _(ENQUEUED, "Packets pushed into rx fifo") \ @@ -218,6 +219,15 @@ struct _session_manager_main /* Convenience */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; + +#if SESSION_DBG + /** + * last event poll time by thread + * Debug only. Will cause false cache-line sharing as-is + */ + f64 *last_event_poll_by_thread; +#endif + }; extern session_manager_main_t session_manager_main; diff --git a/src/vnet/session/session_debug.h b/src/vnet/session/session_debug.h index 80a97cd5..eb11f1a0 100644 --- a/src/vnet/session/session_debug.h +++ b/src/vnet/session/session_debug.h @@ -16,13 +16,13 @@ #define SRC_VNET_SESSION_SESSION_DEBUG_H_ #include -#include #include #define foreach_session_dbg_evt \ _(ENQ, "enqueue") \ _(DEQ, "dequeue") \ - _(DEQ_NODE, "dequeue") + _(DEQ_NODE, "dequeue") \ + _(POLL_GAP_TRACK, "poll gap track") \ typedef enum _session_evt_dbg { @@ -33,6 +33,7 @@ typedef enum _session_evt_dbg #define SESSION_DBG (0) #define SESSION_DEQ_NODE_EVTS (0) +#define SESSION_EVT_POLL_DBG (1) #if TRANSPORT_DEBUG && SESSION_DBG @@ -97,9 +98,34 @@ typedef enum _session_evt_dbg #define SESSION_EVT_DEQ_NODE_HANDLER(_node_evt) #endif +#if SESSION_DBG && SESSION_EVT_POLL_DBG +#define SESSION_EVT_POLL_GAP(_smm, _my_thread_index) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "nixon-gap: %d MS", \ + .format_args = "i4", \ + }; \ + DEC_SESSION_ED(_e, 1); \ + ed->data[0] = (u32) ((now - \ + _smm->last_event_poll_by_thread[my_thread_index])*1000.0); \ +} +#define SESSION_EVT_POLL_GAP_TRACK_HANDLER(_smm, _my_thread_index) \ +{ \ + if (PREDICT_TRUE( \ + smm->last_event_poll_by_thread[my_thread_index] != 0.0)) \ + if (now > smm->last_event_poll_by_thread[_my_thread_index] + 500e-6)\ + SESSION_EVT_POLL_GAP(smm, my_thread_index); \ + _smm->last_event_poll_by_thread[my_thread_index] = now; \ +} + +#else +#define SESSION_EVT_POLL_GAP(_smm, _my_thread_index) +#define SESSION_EVT_POLL_GAP_TRACK_HANDLER(_smm, _my_thread_index) +#endif + #define CONCAT_HELPER(_a, _b) _a##_b #define CC(_a, _b) CONCAT_HELPER(_a, _b) - #define SESSION_EVT_DBG(_evt, _args...) CC(_evt, _HANDLER)(_args) #else diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index 83cdbc1b..e3705060 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -43,6 +43,10 @@ #include #undef vl_printfun +#define TCP_BUILTIN_CLIENT_DBG (1) +#define TCP_BUILTIN_CLIENT_VPP_THREAD (0) +#define TCP_BUILTIN_CLIENT_PTHREAD (!TCP_BUILTIN_CLIENT_VPP_THREAD) + static void send_test_chunk (tclient_main_t * tm, session_t * s) { @@ -52,35 +56,50 @@ send_test_chunk (tclient_main_t * tm, session_t * s) session_fifo_event_t evt; static int serial_number = 0; int rv; + test_buf_offset = s->bytes_sent % vec_len (test_data); + bytes_this_chunk = vec_len (test_data) - test_buf_offset; - while (s->bytes_to_send > 0) - { + bytes_this_chunk = bytes_this_chunk < s->bytes_to_send + ? bytes_this_chunk : s->bytes_to_send; - test_buf_offset = s->bytes_sent % vec_len (test_data); - bytes_this_chunk = vec_len (test_data) - test_buf_offset; + rv = svm_fifo_enqueue_nowait (s->server_tx_fifo, 0 /*pid */ , + bytes_this_chunk, + test_data + test_buf_offset); - bytes_this_chunk = bytes_this_chunk < s->bytes_to_send - ? bytes_this_chunk : s->bytes_to_send; + /* If we managed to enqueue data... */ + if (rv > 0) + { + if (TCP_BUILTIN_CLIENT_DBG) + { + /* *INDENT-OFF* */ + ELOG_TYPE_DECLARE (e) = + { + .format = "tx-enq: %d bytes", + .format_args = "i4", + }; + /* *INDENT-ON* */ + struct + { + u32 data[1]; + } *ed; + ed = ELOG_DATA (&vlib_global_main.elog_main, e); + ed->data[0] = rv; + } - rv = svm_fifo_enqueue_nowait (s->server_tx_fifo, 0 /*pid */ , - bytes_this_chunk, - test_data + test_buf_offset); + /* Account for it... */ + s->bytes_to_send -= rv; + s->bytes_sent += rv; - if (rv > 0) + /* Poke the TCP state machine */ + if (svm_fifo_set_event (s->server_tx_fifo)) { - s->bytes_to_send -= rv; - s->bytes_sent += rv; + /* Fabricate TX event, send to vpp */ + evt.fifo = s->server_tx_fifo; + evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_id = serial_number++; - if (svm_fifo_set_event (s->server_tx_fifo)) - { - /* Fabricate TX event, send to vpp */ - evt.fifo = s->server_tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; - evt.event_id = serial_number++; - - unix_shared_memory_queue_add (tm->vpp_event_queue, (u8 *) & evt, - 0 /* do wait for mutex */ ); - } + unix_shared_memory_queue_add (tm->vpp_event_queue, (u8 *) & evt, + 0 /* do wait for mutex */ ); } } } @@ -89,39 +108,55 @@ static void receive_test_chunk (tclient_main_t * tm, session_t * s) { svm_fifo_t *rx_fifo = s->server_rx_fifo; - int n_read, bytes, i; + int n_read, test_bytes = 0; - bytes = svm_fifo_max_dequeue (rx_fifo); /* Allow enqueuing of new event */ - svm_fifo_unset_event (rx_fifo); + // svm_fifo_unset_event (rx_fifo); - /* Read the bytes */ - do + n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len (tm->rx_buf), + tm->rx_buf); + if (n_read > 0) { - n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len (tm->rx_buf), - tm->rx_buf); - if (n_read > 0) + if (TCP_BUILTIN_CLIENT_DBG) { - bytes -= n_read; + /* *INDENT-OFF* */ + ELOG_TYPE_DECLARE (e) = + { + .format = "rx-deq: %d bytes", + .format_args = "i4", + }; + /* *INDENT-ON* */ + struct + { + u32 data[1]; + } *ed; + ed = ELOG_DATA (&vlib_global_main.elog_main, e); + ed->data[0] = n_read; + } + + if (test_bytes) + { + int i; for (i = 0; i < n_read; i++) { if (tm->rx_buf[i] != ((s->bytes_received + i) & 0xff)) { clib_warning ("read %d error at byte %lld, 0x%x not 0x%x", - n_read, s->bytes_received + i, - tm->rx_buf[i], + n_read, s->bytes_received + i, tm->rx_buf[i], ((s->bytes_received + i) & 0xff)); } } - s->bytes_to_receive -= n_read; - s->bytes_received += n_read; } - + s->bytes_to_receive -= n_read; + s->bytes_received += n_read; } - while (n_read < 0 || bytes > 0); } +#if TCP_BUILTIN_CLIENT_VPP_THREAD +static void +#else static void * +#endif tclient_thread_fn (void *arg) { tclient_main_t *tm = &tclient_main; @@ -139,6 +174,8 @@ tclient_thread_fn (void *arg) pthread_sigmask (SIG_SETMASK, &s, 0); } + clib_per_cpu_mheaps[os_get_cpu_number ()] = clib_per_cpu_mheaps[0]; + while (1) { /* Wait until we're told to get busy */ @@ -186,12 +223,12 @@ tclient_thread_fn (void *arg) /* Disconnect sessions... */ vec_reset_length (session_indices); - pool_foreach (sp, tm->sessions, ( - { - vec_add1 (session_indices, - sp - tm->sessions); - } - )); + + /* *INDENT-OFF* */ + pool_foreach (sp, tm->sessions, ({ + vec_add1 (session_indices, sp - tm->sessions); + })); + /* *INDENT-ON* */ for (i = 0; i < vec_len (session_indices); i++) { @@ -207,7 +244,9 @@ tclient_thread_fn (void *arg) } } /* NOTREACHED */ +#if TCP_BUILTIN_CLIENT_PTHREAD return 0; +#endif } /* So we don't get "no handler for... " msgs */ @@ -333,7 +372,7 @@ test_tcp_clients_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { - u8 *connect_uri = (u8 *) "tcp://6.0.1.2/1234"; + u8 *connect_uri = (u8 *) "tcp://6.0.1.1/1234"; u8 *uri; tclient_main_t *tm = &tclient_main; int i; @@ -349,7 +388,7 @@ test_tcp_clients_command_fn (vlib_main_t * vm, ; else if (unformat (input, "iterations %d", &tm->n_iterations)) ; - else if (unformat (input, "bytes %d", &tm->bytes_to_send)) + else if (unformat (input, "bytes %lld", &tm->bytes_to_send)) ; else if (unformat (input, "uri %s", &tm->connect_uri)) ; @@ -366,17 +405,20 @@ test_tcp_clients_command_fn (vlib_main_t * vm, create_api_loopback (tm); +#if TCP_BUILTIN_CLIENT_PTHREAD /* Start a transmit thread */ if (tm->client_thread_handle == 0) { int rv = pthread_create (&tm->client_thread_handle, - NULL /*attr */ , tclient_thread_fn, 0); + NULL /*attr */ , + tclient_thread_fn, 0); if (rv) { tm->client_thread_handle = 0; return clib_error_return (0, "pthread_create returned %d", rv); } } +#endif /* Fire off connect requests, in something approaching a normal manner */ for (i = 0; i < n_clients; i++) @@ -397,6 +439,18 @@ test_tcp_clients_command_fn (vlib_main_t * vm, return 0; } +#if TCP_BUILTIN_CLIENT_VPP_THREAD +/* *INDENT-OFF* */ +VLIB_REGISTER_THREAD (builtin_client_reg, static) = { + .name = "tcp-builtin-client", + .function = tclient_thread_fn, + .fixed_count = 1, + .count = 1, + .no_data_structure_clone = 1, +}; +/* *INDENT-ON* */ +#endif + /* *INDENT-OFF* */ VLIB_CLI_COMMAND (test_clients_command, static) = { diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index efd26e91..917d4bd3 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -127,6 +127,7 @@ builtin_server_rx_callback (stream_session_t * s) { /* XXX timeout for session that are stuck */ + rx_event: /* Program self-tap to retry */ if (svm_fifo_set_event (rx_fifo)) { @@ -158,7 +159,9 @@ builtin_server_rx_callback (stream_session_t * s) n_written = svm_fifo_enqueue_nowait (tx_fifo, 0, actual_transfer, bsm->rx_buf); - ASSERT (n_written == max_transfer); + + if (n_written != max_transfer) + clib_warning ("short trout!"); if (svm_fifo_set_event (tx_fifo)) { @@ -171,6 +174,9 @@ builtin_server_rx_callback (stream_session_t * s) (u8 *) & evt, 0 /* do wait for mutex */ ); } + if (PREDICT_FALSE (max_enqueue < max_dequeue)) + goto rx_event; + return 0; } @@ -204,8 +210,8 @@ server_create (vlib_main_t * vm) a->session_cb_vft = &builtin_session_cb_vft; a->options = options; a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 128 << 20; - a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 64 << 10; - a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 64 << 10; + a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 1 << 16; + a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 1 << 16; a->segment_name = segment_name; a->segment_name_length = ARRAY_LEN (segment_name); diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index c3df5bc1..b2a371e2 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -578,7 +578,9 @@ tcp_session_send_space (transport_connection_t * trans_conn) /* If we can't write at least a segment, don't try at all */ if (snd_space < tc->snd_mss) return 0; - return snd_space; + + /* round down to mss multiple */ + return snd_space - (snd_space % tc->snd_mss); } /* If in fast recovery, send 1 SMSS if wnd allows */ @@ -706,7 +708,7 @@ static timer_expiration_handler *timer_expiration_handlers[TCP_N_TIMERS] = { tcp_timer_retransmit_handler, tcp_timer_delack_handler, - 0, + tcp_timer_persist_handler, tcp_timer_keep_handler, tcp_timer_waitclose_handler, tcp_timer_retransmit_syn_handler, diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index b4286bc4..2f5da108 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -81,6 +81,7 @@ typedef void (timer_expiration_handler) (u32 index); extern timer_expiration_handler tcp_timer_delack_handler; extern timer_expiration_handler tcp_timer_retransmit_handler; +extern timer_expiration_handler tcp_timer_persist_handler; extern timer_expiration_handler tcp_timer_retransmit_syn_handler; #define TCP_TIMER_HANDLE_INVALID ((u32) ~0) @@ -253,13 +254,25 @@ struct _tcp_cc_algorithm #define tcp_fastrecovery_on(tc) (tc)->flags |= TCP_CONN_FAST_RECOVERY #define tcp_fastrecovery_off(tc) (tc)->flags &= ~TCP_CONN_FAST_RECOVERY +#define tcp_recovery_on(tc) (tc)->flags |= TCP_CONN_RECOVERY +#define tcp_recovery_off(tc) (tc)->flags &= ~TCP_CONN_RECOVERY #define tcp_in_fastrecovery(tc) ((tc)->flags & TCP_CONN_FAST_RECOVERY) -#define tcp_in_recovery(tc) ((tc)->flags & (TCP_CONN_FAST_RECOVERY | TCP_CONN_RECOVERY)) +#define tcp_in_recovery(tc) ((tc)->flags & (TCP_CONN_RECOVERY)) #define tcp_in_slowstart(tc) (tc->cwnd < tc->ssthresh) #define tcp_fastrecovery_sent_1_smss(tc) ((tc)->flags & TCP_CONN_FR_1_SMSS) #define tcp_fastrecovery_1_smss_on(tc) ((tc)->flags |= TCP_CONN_FR_1_SMSS) #define tcp_fastrecovery_1_smss_off(tc) ((tc)->flags &= ~TCP_CONN_FR_1_SMSS) +#define tcp_in_cong_recovery(tc) ((tc)->flags & \ + (TCP_CONN_FAST_RECOVERY | TCP_CONN_RECOVERY)) + +always_inline void +tcp_cong_recovery_off (tcp_connection_t * tc) +{ + tc->flags &= ~(TCP_CONN_FAST_RECOVERY | TCP_CONN_RECOVERY); + tcp_fastrecovery_1_smss_off (tc); +} + typedef enum { TCP_IP4, @@ -538,6 +551,27 @@ tcp_retransmit_timer_reset (tcp_connection_t * tc) tcp_timer_reset (tc, TCP_TIMER_RETRANSMIT); } +always_inline void +tcp_persist_timer_set (tcp_connection_t * tc) +{ + /* Reuse RTO. It's backed off in handler */ + tcp_timer_set (tc, TCP_TIMER_PERSIST, + clib_max (tc->rto * TCP_TO_TIMER_TICK, 1)); +} + +always_inline void +tcp_persist_timer_update (tcp_connection_t * tc) +{ + tcp_timer_update (tc, TCP_TIMER_PERSIST, + clib_max (tc->rto * TCP_TO_TIMER_TICK, 1)); +} + +always_inline void +tcp_persist_timer_reset (tcp_connection_t * tc) +{ + tcp_timer_reset (tc, TCP_TIMER_PERSIST); +} + always_inline u8 tcp_timer_is_active (tcp_connection_t * tc, tcp_timers_e timer) { diff --git a/src/vnet/tcp/tcp_debug.h b/src/vnet/tcp/tcp_debug.h index 5a71694e..0090e15e 100644 --- a/src/vnet/tcp/tcp_debug.h +++ b/src/vnet/tcp/tcp_debug.h @@ -31,6 +31,7 @@ _(UNBIND, "unbind") \ _(DELETE, "delete") \ _(SYN_SENT, "SYN sent") \ + _(SYN_RTX, "SYN retransmit") \ _(FIN_SENT, "FIN sent") \ _(ACK_SENT, "ACK sent") \ _(DUPACK_SENT, "DUPACK sent") \ @@ -50,6 +51,7 @@ _(CC_PACK, "cc partial ack") \ _(SEG_INVALID, "invalid segment") \ _(ACK_RCV_ERR, "invalid ack") \ + _(RCV_WND_SHRUNK, "shrunk rcv_wnd") \ typedef enum _tcp_dbg { @@ -159,35 +161,48 @@ typedef enum _tcp_dbg_evt { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "ack_prep: acked %u rcv_nxt %u rcv_wnd %u snd_nxt %u", \ - .format_args = "i4i4i4i4", \ + .format = "ack_tx: acked %u rcv_nxt %u rcv_wnd %u snd_nxt %u snd_wnd %u",\ + .format_args = "i4i4i4i4i4", \ }; \ - DECLARE_ETD(_tc, _e, 4); \ + DECLARE_ETD(_tc, _e, 5); \ ed->data[0] = _tc->rcv_nxt - _tc->rcv_las; \ ed->data[1] = _tc->rcv_nxt - _tc->irs; \ ed->data[2] = _tc->rcv_wnd; \ ed->data[3] = _tc->snd_nxt - _tc->iss; \ + ed->data[4] = _tc->snd_wnd; \ } #define TCP_EVT_DUPACK_SENT_HANDLER(_tc, ...) \ { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "dack_tx: rcv_nxt %u rcv_wnd %u snd_nxt %u av-wnd %u", \ - .format_args = "i4i4i4i4", \ + .format = "dack_tx: rcv_nxt %u rcv_wnd %u snd_nxt %u av_wnd %u snd_wnd %u",\ + .format_args = "i4i4i4i4i4", \ }; \ - DECLARE_ETD(_tc, _e, 4); \ + DECLARE_ETD(_tc, _e, 5); \ ed->data[0] = _tc->rcv_nxt - _tc->irs; \ ed->data[1] = _tc->rcv_wnd; \ ed->data[2] = _tc->snd_nxt - _tc->iss; \ ed->data[3] = tcp_available_wnd(_tc); \ + ed->data[4] = _tc->snd_wnd; \ } #define TCP_EVT_SYN_SENT_HANDLER(_tc, ...) \ { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "SYNtx: iss %u", \ + .format = "SYNtx: iss %u", \ + .format_args = "i4", \ + }; \ + DECLARE_ETD(_tc, _e, 1); \ + ed->data[0] = _tc->iss; \ +} + +#define TCP_EVT_SYN_RTX_HANDLER(_tc, ...) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "SYNrtx: iss %u", \ .format_args = "i4", \ }; \ DECLARE_ETD(_tc, _e, 1); \ @@ -254,17 +269,17 @@ typedef enum _tcp_dbg_evt ed->data[1] = _tc->rcv_nxt - _tc->irs; \ } -#define TCP_EVT_ACK_RCVD_HANDLER(_tc, _ack, ...) \ +#define TCP_EVT_ACK_RCVD_HANDLER(_tc, ...) \ { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "acked: %u snd_una %u ack %u cwnd %u inflight %u", \ + .format = "acked: %u snd_una %u snd_wnd %u cwnd %u inflight %u", \ .format_args = "i4i4i4i4i4", \ }; \ DECLARE_ETD(_tc, _e, 5); \ ed->data[0] = _tc->bytes_acked; \ ed->data[1] = _tc->snd_una - _tc->iss; \ - ed->data[2] = _ack - _tc->iss; \ + ed->data[2] = _tc->snd_wnd; \ ed->data[3] = _tc->cwnd; \ ed->data[4] = tcp_flight_size(_tc); \ } @@ -273,14 +288,15 @@ typedef enum _tcp_dbg_evt { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "dack_rx: snd_una %u cwnd %u snd_wnd %u inflight %u", \ - .format_args = "i4i4i4i4", \ + .format = "dack_rx: snd_una %u cwnd %u snd_wnd %u flight %u rcv_wnd %u",\ + .format_args = "i4i4i4i4i4", \ }; \ - DECLARE_ETD(_tc, _e, 4); \ + DECLARE_ETD(_tc, _e, 5); \ ed->data[0] = _tc->snd_una - _tc->iss; \ ed->data[1] = _tc->cwnd; \ ed->data[2] = _tc->snd_wnd; \ ed->data[3] = tcp_flight_size(_tc); \ + ed->data[4] = _tc->rcv_wnd; \ } #define TCP_EVT_PKTIZE_HANDLER(_tc, ...) \ @@ -302,7 +318,7 @@ typedef enum _tcp_dbg_evt { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "in: %s len %u written %d rcv_nxt %u free wnd %d", \ + .format = "in: %s len %u written %d rcv_nxt %u rcv_wnd(o) %d", \ .format_args = "t4i4i4i4i4", \ .n_enum_strings = 2, \ .enum_strings = { \ @@ -338,7 +354,7 @@ typedef enum _tcp_dbg_evt .enum_strings = { \ "retransmit", \ "delack", \ - "BUG", \ + "persist", \ "keep", \ "waitclose", \ "retransmit syn", \ @@ -354,7 +370,7 @@ typedef enum _tcp_dbg_evt { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "seg-inv: seq %u end %u rcv_las %u rcv_nxt %u wnd %u", \ + .format = "seg-inv: seq %u end %u rcv_las %u rcv_nxt %u rcv_wnd %u",\ .format_args = "i4i4i4i4i4", \ }; \ DECLARE_ETD(_tc, _e, 5); \ @@ -445,6 +461,24 @@ typedef enum _tcp_dbg_evt #define TCP_EVT_CC_PACK_HANDLER(_tc, ...) #endif +#define TCP_EVT_RCV_WND_SHRUNK_HANDLER(_tc, _obs, _av, ...) \ +{ \ +if (_av > 0) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "huh?: rcv_wnd %u obsd %u av %u rcv_nxt %u rcv_las %u", \ + .format_args = "i4i4i4i4i4", \ + }; \ + DECLARE_ETD(_tc, _e, 5); \ + ed->data[0] = _tc->rcv_wnd; \ + ed->data[1] = _obs; \ + ed->data[2] = _av; \ + ed->data[3] = _tc->rcv_nxt - _tc->irs; \ + ed->data[4] = _tc->rcv_las - _tc->irs; \ +} \ +} + #if TCP_DBG_VERBOSE #define TCP_EVT_SND_WND_HANDLER(_tc, ...) \ { \ diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index 5d11985f..a8224dc2 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -276,8 +276,7 @@ tcp_segment_validate (vlib_main_t * vm, tcp_connection_t * tc0, if (tc0->rcv_wnd == 0 && tc0->rcv_nxt == vnet_buffer (b0)->tcp.seq_number) { - /* Make it look as if there's nothing to dequeue */ - vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number; + /* TODO Should segment be tagged? */ } else { @@ -375,7 +374,6 @@ tcp_update_rtt (tcp_connection_t * tc, u32 ack) if (tc->rtt_seq && seq_gt (ack, tc->rtt_seq) && !tc->rto_boff) { mrtt = tcp_time_now () - tc->rtt_ts; - tc->rtt_seq = 0; } /* As per RFC7323 TSecr can be used for RTTM only if the segment advances @@ -395,6 +393,10 @@ tcp_update_rtt (tcp_connection_t * tc, u32 ack) tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX); + /* Allow measuring of RTT and make sure boff is 0 */ + tc->rtt_seq = 0; + tc->rto_boff = 0; + return 1; } @@ -408,11 +410,7 @@ tcp_dequeue_acked (tcp_connection_t * tc, u32 ack) stream_session_dequeue_drop (&tc->connection, tc->bytes_acked); /* Update rtt and rto */ - if (tcp_update_rtt (tc, ack)) - { - /* Good ACK received and valid RTT, make sure retransmit backoff is 0 */ - tc->rto_boff = 0; - } + tcp_update_rtt (tc, ack); } /** @@ -672,6 +670,13 @@ tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd) tc->snd_wl1 = seq; tc->snd_wl2 = ack; TCP_EVT_DBG (TCP_EVT_SND_WND, tc); + + /* Set probe timer if we just got 0 wnd */ + if (tc->snd_wnd < tc->snd_mss + && !tcp_timer_is_active (tc, TCP_TIMER_PERSIST)) + tcp_persist_timer_set (tc); + else + tcp_persist_timer_reset (tc); } } @@ -686,6 +691,10 @@ tcp_cc_congestion (tcp_connection_t * tc) void tcp_cc_recover (tcp_connection_t * tc) { + /* TODO: check if time to recover was small. It might be that RTO popped + * too soon. + */ + tc->cc_algo->recovered (tc); tc->rtx_bytes = 0; @@ -695,8 +704,7 @@ tcp_cc_recover (tcp_connection_t * tc) tc->cc_algo->rcv_ack (tc); tc->tsecr_last_ack = tc->opt.tsecr; - tcp_fastrecovery_1_smss_off (tc); - tcp_fastrecovery_off (tc); + tcp_cong_recovery_off (tc); TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3); } @@ -706,7 +714,7 @@ tcp_cc_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b) { u8 partial_ack; - if (tcp_in_recovery (tc)) + if (tcp_in_cong_recovery (tc)) { partial_ack = seq_lt (tc->snd_una, tc->snd_congestion); if (!partial_ack) @@ -724,10 +732,10 @@ tcp_cc_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b) /* In case snd_nxt is still in the past and output tries to * shove some new bytes */ - tc->snd_nxt = tc->snd_una; + tc->snd_nxt = tc->snd_una_max; /* XXX need proper RFC6675 support */ - if (tc->sack_sb.last_sacked_bytes) + if (tc->sack_sb.last_sacked_bytes && !tcp_in_recovery (tc)) { tcp_fast_retransmit (tc); } @@ -735,9 +743,6 @@ tcp_cc_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b) { /* Retransmit first unacked segment */ tcp_retransmit_first_unacked (tc); - /* If window allows, send 1 SMSS of new data */ - if (seq_lt (tc->snd_nxt, tc->snd_congestion)) - tc->snd_nxt = tc->snd_congestion; } } } @@ -814,10 +819,11 @@ tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b, return -1; } - tc->snd_nxt = vnet_buffer (b)->tcp.ack_number; - *error = TCP_ERROR_ACK_FUTURE; TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 2, vnet_buffer (b)->tcp.ack_number); + + tc->snd_nxt = vnet_buffer (b)->tcp.ack_number; + *error = TCP_ERROR_ACK_FUTURE; } /* If old ACK, probably it's an old dupack */ @@ -863,7 +869,7 @@ tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b, * timer. */ if (tc->bytes_acked) { - TCP_EVT_DBG (TCP_EVT_ACK_RCVD, tc, vnet_buffer (b)->tcp.ack_number); + TCP_EVT_DBG (TCP_EVT_ACK_RCVD, tc); /* Updates congestion control (slow start/congestion avoidance) */ tcp_cc_rcv_ack (tc, b); @@ -966,11 +972,14 @@ tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b, tc->rcv_nxt += written; /* Depending on how fast the app is, all remaining buffers in burst will - * not be enqueued. Should we inform peer of the damage? XXX */ + * not be enqueued. Inform peer */ + tc->flags |= TCP_CONN_SNDACK; + return TCP_ERROR_PARTIALLY_ENQUEUED; } else { + tc->flags |= TCP_CONN_SNDACK; return TCP_ERROR_FIFO_FULL; } @@ -1101,25 +1110,17 @@ tcp_segment_rcv (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b, goto done; } - if (PREDICT_FALSE (error == TCP_ERROR_FIFO_FULL)) - *next0 = TCP_NEXT_DROP; - /* Check if ACK can be delayed */ - if (!tcp_can_delack (tc)) - { - /* Nothing to do for pure ACKs XXX */ - if (n_data_bytes == 0) - goto done; - - *next0 = tcp_next_output (tc->c_is_ip4); - tcp_make_ack (tc, b); - } - else + if (tcp_can_delack (tc)) { if (!tcp_timer_is_active (tc, TCP_TIMER_DELACK)) tcp_timer_set (tc, TCP_TIMER_DELACK, TCP_DELACK_TIME); + goto done; } + *next0 = tcp_next_output (tc->c_is_ip4); + tcp_make_ack (tc, b); + done: return error; } @@ -2084,6 +2085,7 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, child0->irs = vnet_buffer (b0)->tcp.seq_number; child0->rcv_nxt = vnet_buffer (b0)->tcp.seq_number + 1; + child0->rcv_las = child0->rcv_nxt; child0->state = TCP_STATE_SYN_RCVD; /* RFC1323: TSval timestamps sent on {SYN} and {SYN,ACK} diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index a671f728..ea157bd7 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -155,8 +155,7 @@ tcp_update_rcv_wnd (tcp_connection_t * tc) max_fifo = stream_session_fifo_size (&tc->connection); ASSERT (tc->opt.mss < max_fifo); - - if (available_space < tc->opt.mss && available_space < max_fifo / 8) + if (available_space < tc->opt.mss && available_space < max_fifo >> 3) available_space = 0; /* @@ -170,16 +169,21 @@ tcp_update_rcv_wnd (tcp_connection_t * tc) /* Bad. Thou shalt not shrink */ if (available_space < observed_wnd) { - /* Does happen! */ wnd = observed_wnd; + TCP_EVT_DBG (TCP_EVT_RCV_WND_SHRUNK, tc, observed_wnd, available_space); } else { wnd = available_space; } - if (wnd && ((wnd << tc->rcv_wscale) >> tc->rcv_wscale != wnd)) - wnd += 1 << tc->rcv_wscale; + /* Make sure we have a multiple of rcv_wscale */ + if (wnd && tc->rcv_wscale) + { + wnd &= ~(1 << tc->rcv_wscale); + if (wnd == 0) + wnd = 1 << tc->rcv_wscale; + } tc->rcv_wnd = clib_min (wnd, TCP_WND_MAX << tc->rcv_wscale); } @@ -462,8 +466,9 @@ tcp_make_ack (tcp_connection_t * tc, vlib_buffer_t * b) tcp_reuse_buffer (vm, b); tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK); - vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_ACK; TCP_EVT_DBG (TCP_EVT_ACK_SENT, tc); + vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_ACK; + tc->rcv_las = tc->rcv_nxt; } /** @@ -908,6 +913,7 @@ tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, vnet_buffer (b)->tcp.connection_index = tc->c_c_index; tc->snd_nxt += data_len; + /* TODO this is updated in output as well ... */ if (tc->snd_nxt > tc->snd_una_max) tc->snd_una_max = tc->snd_nxt; @@ -929,7 +935,6 @@ tcp_send_ack (tcp_connection_t * tc) /* Fill in the ACK */ tcp_make_ack (tc, b); - tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); } @@ -942,7 +947,6 @@ tcp_timer_delack_handler (u32 index) tc = tcp_connection_get (index, thread_index); tc->timers[TCP_TIMER_DELACK] = TCP_TIMER_HANDLE_INVALID; -// tc->flags &= ~TCP_CONN_DELACK; tcp_send_ack (tc); } @@ -995,7 +999,7 @@ done: * Reset congestion control, switch cwnd to loss window and try again. */ static void -tcp_rtx_timeout_cc_recover (tcp_connection_t * tc) +tcp_rtx_timeout_cc (tcp_connection_t * tc) { /* Cleanly recover cc (also clears up fast retransmit) */ if (tcp_in_fastrecovery (tc)) @@ -1008,6 +1012,7 @@ tcp_rtx_timeout_cc_recover (tcp_connection_t * tc) } /* Start again from the beginning */ + tcp_recovery_on (tc); tc->cwnd = tcp_loss_wnd (tc); tc->snd_congestion = tc->snd_una_max; } @@ -1048,7 +1053,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) { /* First retransmit timeout */ if (tc->rto_boff == 1) - tcp_rtx_timeout_cc_recover (tc); + tcp_rtx_timeout_cc (tc); /* Exponential backoff */ tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX); @@ -1114,6 +1119,8 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) { ASSERT (tc->state == TCP_STATE_SYN_SENT); + TCP_EVT_DBG (TCP_EVT_SYN_RTX, tc); + /* This goes straight to ipx_lookup */ tcp_push_ip_hdr (tm, tc, b); tcp_enqueue_to_ip_lookup (vm, b, bi, tc->c_is_ip4); @@ -1136,6 +1143,55 @@ tcp_timer_retransmit_syn_handler (u32 index) tcp_timer_retransmit_handler_i (index, 1); } +/** + * Got 0 snd_wnd from peer, try to do something about it. + * + */ +void +tcp_timer_persist_handler (u32 index) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + vlib_main_t *vm = vlib_get_main (); + u32 thread_index = os_get_cpu_number (); + tcp_connection_t *tc; + vlib_buffer_t *b; + u32 bi, n_bytes; + + tc = tcp_connection_get (index, thread_index); + + /* Make sure timer handle is set to invalid */ + tc->timers[TCP_TIMER_PERSIST] = TCP_TIMER_HANDLE_INVALID; + + /* Problem already solved or worse */ + if (tc->snd_wnd > tc->snd_mss || tcp_in_recovery (tc)) + return; + + /* Increment RTO backoff */ + tc->rto_boff += 1; + tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX); + + /* Try to force the first unsent segment */ + tcp_get_free_buffer_index (tm, &bi); + b = vlib_get_buffer (vm, bi); + n_bytes = stream_session_peek_bytes (&tc->connection, + vlib_buffer_get_current (b), + tc->snd_una_max - tc->snd_una, + tc->snd_mss); + /* Nothing to send */ + if (n_bytes == 0) + { + tcp_return_buffer (tm); + return; + } + + b->current_length = n_bytes; + tcp_push_hdr_i (tc, b, tc->state); + tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); + + /* Re-enable persist timer */ + tcp_persist_timer_set (tc); +} + /** * Retransmit first unacked segment */ @@ -1329,9 +1385,6 @@ tcp46_output_inline (vlib_main_t * vm, } } - /* Retransmitted SYNs do reach this but it should be harmless */ - tc0->rcv_las = tc0->rcv_nxt; - /* Stop DELACK timer and fix flags */ tc0->flags &= ~(TCP_CONN_SNDACK); if (tcp_timer_is_active (tc0, TCP_TIMER_DELACK)) -- cgit 1.2.3-korg From 6cf30adc2cd3aa818e5d97cf71ea8b2fc2aaefa7 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Tue, 4 Apr 2017 23:08:23 -0700 Subject: Session layer refactoring Major refactoring of the session layer api - Add attatch api for application binding to the the session layer - Simplify listen/connect calls - Update application CLI - Add transport endpoint to accept callback - Associate segment manager to application and allow for multiple binds/connects per app Additional: - svm fifo cleanup - add fifo free, format fns - add fifo offset enqueue unit test Change-Id: Id93a65047de61afc2bf3d58c9b544339c02065af Signed-off-by: Florin Coras Signed-off-by: Dave Barach --- src/scripts/vnet/uri/udp | 3 +- src/svm/svm_fifo.c | 66 ++- src/svm/svm_fifo.h | 32 +- src/svm/svm_fifo_segment.h | 14 +- src/uri/uri_tcp_test.c | 315 +++++++++----- src/uri/uri_udp_test.c | 326 +++++++++------ src/vnet.am | 2 + src/vnet/api_errno.h | 4 +- src/vnet/session/application.c | 458 +++++++++++++++------ src/vnet/session/application.h | 77 ++-- src/vnet/session/application_interface.c | 278 ++++++------- src/vnet/session/application_interface.h | 45 +- src/vnet/session/segment_manager.c | 342 ++++++++++++++++ src/vnet/session/segment_manager.h | 106 +++++ src/vnet/session/session.api | 237 +++++------ src/vnet/session/session.c | 564 +++++++------------------ src/vnet/session/session.h | 175 +++++--- src/vnet/session/session_api.c | 678 ++++++++++++------------------- src/vnet/session/transport.h | 23 +- src/vnet/tcp/builtin_client.c | 161 ++++++-- src/vnet/tcp/builtin_client.h | 7 +- src/vnet/tcp/builtin_server.c | 206 +++++++++- src/vnet/tcp/tcp.c | 20 +- src/vnet/tcp/tcp.h | 9 +- src/vnet/tcp/tcp_input.c | 7 +- src/vnet/tcp/tcp_test.c | 127 +++++- src/vnet/udp/builtin_server.c | 34 +- 27 files changed, 2601 insertions(+), 1715 deletions(-) create mode 100644 src/vnet/session/segment_manager.c create mode 100644 src/vnet/session/segment_manager.h (limited to 'src/svm') diff --git a/src/scripts/vnet/uri/udp b/src/scripts/vnet/uri/udp index ca13b83c..c7628f49 100644 --- a/src/scripts/vnet/uri/udp +++ b/src/scripts/vnet/uri/udp @@ -1,5 +1,5 @@ loop create -set int ip address loop0 10.0.0.1/32 +set int ip address loop0 6.0.0.1/32 set int state loop0 up packet-generator new { @@ -17,3 +17,4 @@ packet-generator new { incrementing 100 } } +session enable diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index cc84feb9..097bab77 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -20,8 +20,6 @@ svm_fifo_t * svm_fifo_create (u32 data_size_in_bytes) { svm_fifo_t *f; - pthread_mutexattr_t attr; - pthread_condattr_t cattr; f = clib_mem_alloc_aligned_or_null (sizeof (*f) + data_size_in_bytes, CLIB_CACHE_LINE_BYTES); @@ -32,29 +30,16 @@ svm_fifo_create (u32 data_size_in_bytes) f->nitems = data_size_in_bytes; f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX; - memset (&attr, 0, sizeof (attr)); - memset (&cattr, 0, sizeof (cattr)); - - if (pthread_mutexattr_init (&attr)) - clib_unix_warning ("mutexattr_init"); - if (pthread_mutexattr_setpshared (&attr, PTHREAD_PROCESS_SHARED)) - clib_unix_warning ("pthread_mutexattr_setpshared"); - if (pthread_mutex_init (&f->mutex, &attr)) - clib_unix_warning ("mutex_init"); - if (pthread_mutexattr_destroy (&attr)) - clib_unix_warning ("mutexattr_destroy"); - if (pthread_condattr_init (&cattr)) - clib_unix_warning ("condattr_init"); - if (pthread_condattr_setpshared (&cattr, PTHREAD_PROCESS_SHARED)) - clib_unix_warning ("condattr_setpshared"); - if (pthread_cond_init (&f->condvar, &cattr)) - clib_unix_warning ("cond_init1"); - if (pthread_condattr_destroy (&cattr)) - clib_unix_warning ("cond_init2"); - return (f); } +void +svm_fifo_free (svm_fifo_t * f) +{ + pool_free (f->ooo_segments); + clib_mem_free (f); +} + always_inline ooo_segment_t * ooo_segment_new (svm_fifo_t * f, u32 start, u32 length) { @@ -567,6 +552,43 @@ svm_fifo_dequeue_drop (svm_fifo_t * f, int pid, u32 max_bytes) return total_drop_bytes; } +u8 * +format_svm_fifo (u8 * s, va_list * args) +{ + svm_fifo_t *f = va_arg (*args, svm_fifo_t *); + int verbose = va_arg (*args, int); + + s = format (s, "cursize %u nitems %u has_event %d\n", + f->cursize, f->nitems, f->has_event); + s = format (s, "head %d tail %d\n", f->head, f->tail); + + if (verbose > 1) + s = format + (s, "server session %d thread %d client session %d thread %d\n", + f->server_session_index, f->server_thread_index, + f->client_session_index, f->client_thread_index); + + if (verbose) + { + ooo_segment_t *seg; + u32 seg_index; + + s = + format (s, "ooo pool %d active elts\n", pool_elts (f->ooo_segments)); + + seg_index = f->ooos_list_head; + + while (seg_index != OOO_SEGMENT_INVALID_INDEX) + { + seg = pool_elt_at_index (f->ooo_segments, seg_index); + s = format (s, " pos %u, len %u next %d\n", + seg->fifo_position, seg->length, seg->next); + seg_index = seg->next; + } + } + return s; +} + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index 80e5b0f2..9beb63f5 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -48,10 +48,6 @@ typedef struct u32 nitems; CLIB_CACHE_LINE_ALIGN_MARK (end_cursize); - pthread_mutex_t mutex; /* 8 bytes */ - pthread_cond_t condvar; /* 8 bytes */ - svm_lock_tag_t tag; - volatile u8 has_event; /**< non-zero if deq event exists */ u32 owner_pid; @@ -60,6 +56,7 @@ typedef struct u32 client_session_index; u8 server_thread_index; u8 client_thread_index; + u32 segment_manager; CLIB_CACHE_LINE_ALIGN_MARK (end_shared); u32 head; CLIB_CACHE_LINE_ALIGN_MARK (end_consumer); @@ -74,30 +71,6 @@ typedef struct CLIB_CACHE_LINE_ALIGN_MARK (data); } svm_fifo_t; -static inline int -svm_fifo_lock (svm_fifo_t * f, u32 pid, u32 tag, int nowait) -{ - if (PREDICT_TRUE (nowait == 0)) - pthread_mutex_lock (&f->mutex); - else - { - if (pthread_mutex_trylock (&f->mutex)) - return -1; - } - f->owner_pid = pid; - f->tag = tag; - return 0; -} - -static inline void -svm_fifo_unlock (svm_fifo_t * f) -{ - f->owner_pid = 0; - f->tag = 0; - CLIB_MEMORY_BARRIER (); - pthread_mutex_unlock (&f->mutex); -} - static inline u32 svm_fifo_max_dequeue (svm_fifo_t * f) { @@ -139,6 +112,7 @@ svm_fifo_unset_event (svm_fifo_t * f) } svm_fifo_t *svm_fifo_create (u32 data_size_in_bytes); +void svm_fifo_free (svm_fifo_t * f); int svm_fifo_enqueue_nowait (svm_fifo_t * f, int pid, u32 max_bytes, u8 * copy_from_here); @@ -154,6 +128,8 @@ int svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes, u8 * copy_here); int svm_fifo_dequeue_drop (svm_fifo_t * f, int pid, u32 max_bytes); +format_function_t format_svm_fifo; + always_inline ooo_segment_t * svm_fifo_newest_ooo_segment (svm_fifo_t * f) { diff --git a/src/svm/svm_fifo_segment.h b/src/svm/svm_fifo_segment.h index ecb5653a..9ab47a4c 100644 --- a/src/svm/svm_fifo_segment.h +++ b/src/svm/svm_fifo_segment.h @@ -55,6 +55,18 @@ svm_fifo_get_segment (u32 segment_index) return vec_elt_at_index (ssm->segments, segment_index); } +static inline u8 +svm_fifo_segment_has_fifos (svm_fifo_segment_private_t * fifo_segment) +{ + return vec_len ((svm_fifo_t **) fifo_segment->h->fifos) != 0; +} + +static inline svm_fifo_t ** +svm_fifo_segment_get_fifos (svm_fifo_segment_private_t * fifo_segment) +{ + return (svm_fifo_t **) fifo_segment->h->fifos; +} + #define foreach_ssvm_fifo_segment_api_error \ _(OUT_OF_SPACE, "Out of space in segment", -200) @@ -73,9 +85,7 @@ svm_fifo_t *svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, u32 data_size_in_bytes); void svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f); - void svm_fifo_segment_init (u64 baseva, u32 timeout_in_seconds); - u32 svm_fifo_segment_index (svm_fifo_segment_private_t * s); #endif /* __included_ssvm_fifo_segment_h__ */ diff --git a/src/uri/uri_tcp_test.c b/src/uri/uri_tcp_test.c index e2834817..c057e06e 100644 --- a/src/uri/uri_tcp_test.c +++ b/src/uri/uri_tcp_test.c @@ -15,8 +15,6 @@ #include #include -#include -#include #include #include #include @@ -47,8 +45,7 @@ typedef struct svm_fifo_t *server_rx_fifo; svm_fifo_t *server_tx_fifo; - u32 vpp_session_index; - u32 vpp_session_thread; + u32 vpp_session_handle; } session_t; typedef enum @@ -116,7 +113,7 @@ typedef struct pthread_t client_rx_thread_handle; u32 client_bytes_received; u8 test_return_packets; - u32 bytes_to_send; + u64 bytes_to_send; /* convenience */ svm_fifo_segment_main_t *segment_main; @@ -152,6 +149,88 @@ wait_for_state_change (uri_tcp_test_main_t * utm, connection_state_t state) return -1; } +void +application_attach (uri_tcp_test_main_t * utm) +{ + vl_api_application_attach_t *bmp; + u32 fifo_size = 3 << 20; + bmp = vl_msg_api_alloc (sizeof (*bmp)); + memset (bmp, 0, sizeof (*bmp)); + + bmp->_vl_msg_id = ntohs (VL_API_APPLICATION_ATTACH); + bmp->client_index = utm->my_client_index; + bmp->context = ntohl (0xfeedface); + bmp->options[SESSION_OPTIONS_FLAGS] = + SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; + bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; + bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; + bmp->options[SESSION_OPTIONS_SEGMENT_SIZE] = 256 << 20; + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); +} + +void +application_detach (uri_tcp_test_main_t * utm) +{ + vl_api_application_detach_t *bmp; + bmp = vl_msg_api_alloc (sizeof (*bmp)); + memset (bmp, 0, sizeof (*bmp)); + + bmp->_vl_msg_id = ntohs (VL_API_APPLICATION_DETACH); + bmp->client_index = utm->my_client_index; + bmp->context = ntohl (0xfeedface); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); +} + +static void +vl_api_application_attach_reply_t_handler (vl_api_application_attach_reply_t * + mp) +{ + uri_tcp_test_main_t *utm = &uri_tcp_test_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + int rv; + + if (mp->retval) + { + uword *errp = hash_get (utm->error_string_by_error_number, mp->retval); + clib_warning ("attach failed: %s", *errp); + utm->state = STATE_FAILED; + return; + } + + if (mp->segment_name_length == 0) + { + clib_warning ("segment_name_length zero"); + return; + } + + a->segment_name = (char *) mp->segment_name; + a->segment_size = mp->segment_size; + + ASSERT (mp->app_event_queue_address); + + /* Attach to the segment vpp created */ + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("svm_fifo_segment_attach ('%s') failed", + mp->segment_name); + return; + } + + utm->our_event_queue = + (unix_shared_memory_queue_t *) mp->app_event_queue_address; + +} + +static void +vl_api_application_detach_reply_t_handler (vl_api_application_detach_reply_t * + mp) +{ + if (mp->retval) + clib_warning ("detach returned with err: %d", mp->retval); +} + static void init_error_string_table (uri_tcp_test_main_t * utm) { @@ -239,21 +318,18 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) vl_api_disconnect_session_reply_t *rmp; uword *p; int rv = 0; - u64 key; - - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - p = hash_get (utm->session_index_by_vpp_handles, key); + p = hash_get (utm->session_index_by_vpp_handles, mp->handle); if (p) { session = pool_elt_at_index (utm->sessions, p[0]); - hash_unset (utm->session_index_by_vpp_handles, key); + hash_unset (utm->session_index_by_vpp_handles, mp->handle); pool_put (utm->sessions, session); } else { - clib_warning ("couldn't find session key %llx", key); + clib_warning ("couldn't find session key %llx", mp->handle); rv = -11; } @@ -264,8 +340,7 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) rmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION_REPLY); rmp->retval = rv; - rmp->session_index = mp->session_index; - rmp->session_thread_index = mp->session_thread_index; + rmp->handle = mp->handle; vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } @@ -277,22 +352,19 @@ vl_api_reset_session_t_handler (vl_api_reset_session_t * mp) vl_api_reset_session_reply_t *rmp; uword *p; int rv = 0; - u64 key; - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - - p = hash_get (utm->session_index_by_vpp_handles, key); + p = hash_get (utm->session_index_by_vpp_handles, mp->handle); if (p) { session = pool_elt_at_index (utm->sessions, p[0]); - hash_unset (utm->session_index_by_vpp_handles, key); + hash_unset (utm->session_index_by_vpp_handles, mp->handle); pool_put (utm->sessions, session); utm->time_to_stop = 1; } else { - clib_warning ("couldn't find session key %llx", key); + clib_warning ("couldn't find session key %llx", mp->handle); rv = -11; } @@ -300,8 +372,7 @@ vl_api_reset_session_t_handler (vl_api_reset_session_t * mp) memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_RESET_SESSION_REPLY); rmp->retval = rv; - rmp->session_index = mp->session_index; - rmp->session_thread_index = mp->session_thread_index; + rmp->handle = mp->handle; vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } @@ -343,7 +414,7 @@ client_handle_fifo_event_rx (uri_tcp_test_main_t * utm, { if (n_read == -2) { - clib_warning ("weird!"); +// clib_warning ("weird!"); break; } } @@ -409,52 +480,19 @@ static void vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) { uri_tcp_test_main_t *utm = &uri_tcp_test_main; - svm_fifo_segment_create_args_t _a, *a = &_a; session_t *session; u32 session_index; svm_fifo_t *rx_fifo, *tx_fifo; int rv; - u64 key; if (mp->retval) { - clib_warning ("connection failed with code: %d", mp->retval); - utm->state = STATE_FAILED; - return; - } - - /* - * Attatch to segment - */ - - if (mp->segment_name_length == 0) - { - clib_warning ("segment_name_length zero"); + uword *errp = hash_get (utm->error_string_by_error_number, -mp->retval); + clib_warning ("connection failed with code: %s", *errp); utm->state = STATE_FAILED; return; } - a->segment_name = (char *) mp->segment_name; - a->segment_size = mp->segment_size; - - ASSERT (mp->client_event_queue_address); - - /* Attach to the segment vpp created */ - rv = svm_fifo_segment_attach (a); - if (rv) - { - clib_warning ("svm_fifo_segment_attach ('%s') failed", - mp->segment_name); - return; - } - - /* - * Save the queues - */ - - utm->our_event_queue = (unix_shared_memory_queue_t *) - mp->client_event_queue_address; - utm->vpp_event_queue = (unix_shared_memory_queue_t *) mp->vpp_event_queue_address; @@ -472,16 +510,14 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) session->server_rx_fifo = rx_fifo; session->server_tx_fifo = tx_fifo; - session->vpp_session_index = mp->session_index; - session->vpp_session_thread = mp->session_thread_index; + session->vpp_session_handle = mp->handle; /* Save handle */ utm->connected_session_index = session_index; utm->state = STATE_READY; /* Add it to lookup table */ - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - hash_set (utm->session_index_by_vpp_handles, key, session_index); + hash_set (utm->session_index_by_vpp_handles, mp->handle, session_index); /* Start RX thread */ rv = pthread_create (&utm->client_rx_thread_handle, @@ -606,8 +642,7 @@ client_disconnect (uri_tcp_test_main_t * utm) memset (dmp, 0, sizeof (*dmp)); dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); dmp->client_index = utm->my_client_index; - dmp->session_index = connected_session->vpp_session_index; - dmp->session_thread_index = connected_session->vpp_session_thread; + dmp->handle = connected_session->vpp_session_handle; vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & dmp); } @@ -616,6 +651,7 @@ client_test (uri_tcp_test_main_t * utm) { int i; + application_attach (utm); client_connect (utm); if (wait_for_state_change (utm, STATE_READY)) @@ -636,47 +672,26 @@ client_test (uri_tcp_test_main_t * utm) if (wait_for_state_change (utm, STATE_START)) { + clib_warning ("Disconnect failed"); return; } + application_detach (utm); } static void vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) { uri_tcp_test_main_t *utm = &uri_tcp_test_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - int rv; if (mp->retval) { - clib_warning ("bind failed: %d", mp->retval); + uword *errp = hash_get (utm->error_string_by_error_number, + -clib_net_to_host_u32 (mp->retval)); + clib_warning ("bind failed: %s", (char *) *errp); utm->state = STATE_FAILED; return; } - if (mp->segment_name_length == 0) - { - clib_warning ("segment_name_length zero"); - return; - } - - a->segment_name = (char *) mp->segment_name; - a->segment_size = mp->segment_size; - - ASSERT (mp->server_event_queue_address); - - /* Attach to the segment vpp created */ - rv = svm_fifo_segment_attach (a); - if (rv) - { - clib_warning ("svm_fifo_segment_attach ('%s') failed", - mp->segment_name); - return; - } - - utm->our_event_queue = - (unix_shared_memory_queue_t *) mp->server_event_queue_address; - utm->state = STATE_READY; } @@ -691,6 +706,89 @@ vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp) utm->state = STATE_START; } +u8 * +format_ip4_address (u8 * s, va_list * args) +{ + u8 *a = va_arg (*args, u8 *); + return format (s, "%d.%d.%d.%d", a[0], a[1], a[2], a[3]); +} + +u8 * +format_ip6_address (u8 * s, va_list * args) +{ + ip6_address_t *a = va_arg (*args, ip6_address_t *); + u32 i, i_max_n_zero, max_n_zeros, i_first_zero, n_zeros, last_double_colon; + + i_max_n_zero = ARRAY_LEN (a->as_u16); + max_n_zeros = 0; + i_first_zero = i_max_n_zero; + n_zeros = 0; + for (i = 0; i < ARRAY_LEN (a->as_u16); i++) + { + u32 is_zero = a->as_u16[i] == 0; + if (is_zero && i_first_zero >= ARRAY_LEN (a->as_u16)) + { + i_first_zero = i; + n_zeros = 0; + } + n_zeros += is_zero; + if ((!is_zero && n_zeros > max_n_zeros) + || (i + 1 >= ARRAY_LEN (a->as_u16) && n_zeros > max_n_zeros)) + { + i_max_n_zero = i_first_zero; + max_n_zeros = n_zeros; + i_first_zero = ARRAY_LEN (a->as_u16); + n_zeros = 0; + } + } + + last_double_colon = 0; + for (i = 0; i < ARRAY_LEN (a->as_u16); i++) + { + if (i == i_max_n_zero && max_n_zeros > 1) + { + s = format (s, "::"); + i += max_n_zeros - 1; + last_double_colon = 1; + } + else + { + s = format (s, "%s%x", + (last_double_colon || i == 0) ? "" : ":", + clib_net_to_host_u16 (a->as_u16[i])); + last_double_colon = 0; + } + } + + return s; +} + +/* Format an IP46 address. */ +u8 * +format_ip46_address (u8 * s, va_list * args) +{ + ip46_address_t *ip46 = va_arg (*args, ip46_address_t *); + ip46_type_t type = va_arg (*args, ip46_type_t); + int is_ip4 = 1; + + switch (type) + { + case IP46_TYPE_ANY: + is_ip4 = ip46_address_is_ip4 (ip46); + break; + case IP46_TYPE_IP4: + is_ip4 = 1; + break; + case IP46_TYPE_IP6: + is_ip4 = 0; + break; + } + + return is_ip4 ? + format (s, "%U", format_ip4_address, &ip46->ip4) : + format (s, "%U", format_ip6_address, &ip46->ip6); +} + static void vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) { @@ -699,12 +797,15 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) svm_fifo_t *rx_fifo, *tx_fifo; session_t *session; static f64 start_time; - u64 key; u32 session_index; + u8 *ip_str; if (start_time == 0.0) start_time = clib_time_now (&utm->clib_time); + ip_str = format (0, "%U", format_ip46_address, &mp->ip, mp->is_ip4); + clib_warning ("Accepted session from: %s:%d", ip_str, + clib_net_to_host_u16 (mp->port)); utm->vpp_event_queue = (unix_shared_memory_queue_t *) mp->vpp_event_queue_address; @@ -721,8 +822,7 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) session->server_tx_fifo = tx_fifo; /* Add it to lookup table */ - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - hash_set (utm->session_index_by_vpp_handles, key, session_index); + hash_set (utm->session_index_by_vpp_handles, mp->handle, session_index); utm->state = STATE_READY; @@ -741,9 +841,7 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) rmp = vl_msg_api_alloc (sizeof (*rmp)); memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY); - rmp->session_type = mp->session_type; - rmp->session_index = mp->session_index; - rmp->session_thread_index = mp->session_thread_index; + rmp->handle = mp->handle; vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } @@ -837,22 +935,15 @@ server_handle_event_queue (uri_tcp_test_main_t * utm) } void -server_bind (uri_tcp_test_main_t * utm) +server_listen (uri_tcp_test_main_t * utm) { vl_api_bind_uri_t *bmp; - u32 fifo_size = 3 << 20; bmp = vl_msg_api_alloc (sizeof (*bmp)); memset (bmp, 0, sizeof (*bmp)); bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); bmp->client_index = utm->my_client_index; bmp->context = ntohl (0xfeedface); - bmp->initial_segment_size = 256 << 20; /* size of initial segment */ - bmp->options[SESSION_OPTIONS_FLAGS] = - SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; - bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; - bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; - bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); } @@ -874,8 +965,10 @@ server_unbind (uri_tcp_test_main_t * utm) void server_test (uri_tcp_test_main_t * utm) { + application_attach (utm); + /* Bind to uri */ - server_bind (utm); + server_listen (utm); if (wait_for_state_change (utm, STATE_READY)) { @@ -895,6 +988,8 @@ server_test (uri_tcp_test_main_t * utm) return; } + application_detach (utm); + fformat (stdout, "Test complete...\n"); } @@ -916,7 +1011,9 @@ _(CONNECT_URI_REPLY, connect_uri_reply) \ _(DISCONNECT_SESSION, disconnect_session) \ _(DISCONNECT_SESSION_REPLY, disconnect_session_reply) \ _(RESET_SESSION, reset_session) \ -_(MAP_ANOTHER_SEGMENT, map_another_segment) +_(APPLICATION_ATTACH_REPLY, application_attach_reply) \ +_(APPLICATION_DETACH_REPLY, application_detach_reply) \ +_(MAP_ANOTHER_SEGMENT, map_another_segment) \ void uri_api_hookup (uri_tcp_test_main_t * utm) @@ -941,7 +1038,7 @@ main (int argc, char **argv) u8 *heap, *uri = 0; u8 *bind_uri = (u8 *) "tcp://0.0.0.0/1234"; u8 *connect_uri = (u8 *) "tcp://6.0.1.2/1234"; - u32 bytes_to_send = 64 << 10, mbytes; + u64 bytes_to_send = 64 << 10, mbytes; u32 tmp; mheap_t *h; session_t *session; @@ -988,10 +1085,14 @@ main (int argc, char **argv) drop_packets = 1; else if (unformat (a, "test")) test_return_packets = 1; - else if (unformat (a, "mbytes %d", &mbytes)) + else if (unformat (a, "mbytes %lld", &mbytes)) { bytes_to_send = mbytes << 20; } + else if (unformat (a, "gbytes %lld", &mbytes)) + { + bytes_to_send = mbytes << 30; + } else { fformat (stderr, "%s: usage [master|slave]\n"); diff --git a/src/uri/uri_udp_test.c b/src/uri/uri_udp_test.c index e6c239c1..598052bc 100644 --- a/src/uri/uri_udp_test.c +++ b/src/uri/uri_udp_test.c @@ -55,6 +55,7 @@ typedef enum { STATE_START, STATE_READY, + STATE_FAILED, STATE_DISCONNECTING, } connection_state_t; @@ -162,6 +163,86 @@ setup_signal_handlers (void) return 0; } +void +application_attach (uri_udp_test_main_t * utm) +{ + vl_api_application_attach_t *bmp; + u32 fifo_size = 3 << 20; + bmp = vl_msg_api_alloc (sizeof (*bmp)); + memset (bmp, 0, sizeof (*bmp)); + + bmp->_vl_msg_id = ntohs (VL_API_APPLICATION_ATTACH); + bmp->client_index = utm->my_client_index; + bmp->context = ntohl (0xfeedface); + bmp->options[SESSION_OPTIONS_FLAGS] = + SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; + bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; + bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; + bmp->options[SESSION_OPTIONS_SEGMENT_SIZE] = 256 << 20; + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); +} + +void +application_detach (uri_udp_test_main_t * utm) +{ + vl_api_application_detach_t *bmp; + bmp = vl_msg_api_alloc (sizeof (*bmp)); + memset (bmp, 0, sizeof (*bmp)); + + bmp->_vl_msg_id = ntohs (VL_API_APPLICATION_DETACH); + bmp->client_index = utm->my_client_index; + bmp->context = ntohl (0xfeedface); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); +} + +static void +vl_api_application_attach_reply_t_handler (vl_api_application_attach_reply_t * + mp) +{ + uri_udp_test_main_t *utm = &uri_udp_test_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + int rv; + + if (mp->retval) + { + clib_warning ("attach failed: %d", mp->retval); + utm->state = STATE_FAILED; + return; + } + + if (mp->segment_name_length == 0) + { + clib_warning ("segment_name_length zero"); + return; + } + + a->segment_name = (char *) mp->segment_name; + a->segment_size = mp->segment_size; + + ASSERT (mp->app_event_queue_address); + + /* Attach to the segment vpp created */ + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("svm_fifo_segment_attach ('%s') failed", + mp->segment_name); + return; + } + + utm->our_event_queue = + (unix_shared_memory_queue_t *) mp->app_event_queue_address; +} + +static void +vl_api_application_detach_reply_t_handler (vl_api_application_detach_reply_t * + mp) +{ + if (mp->retval) + clib_warning ("detach returned with err: %d", mp->retval); +} + u8 * format_api_error (u8 * s, va_list * args) { @@ -255,9 +336,22 @@ cut_through_thread_fn (void *arg) } static void -uri_udp_slave_test (uri_udp_test_main_t * utm) +udp_client_connect (uri_udp_test_main_t * utm) { vl_api_connect_uri_t *cmp; + cmp = vl_msg_api_alloc (sizeof (*cmp)); + memset (cmp, 0, sizeof (*cmp)); + + cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); + cmp->client_index = utm->my_client_index; + cmp->context = ntohl (0xfeedface); + memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp); +} + +static void +client_send (uri_udp_test_main_t * utm, session_t * session) +{ int i; u8 *test_data = 0; u64 bytes_received = 0, bytes_sent = 0; @@ -265,30 +359,16 @@ uri_udp_slave_test (uri_udp_test_main_t * utm) int rv; int mypid = getpid (); f64 before, after, delta, bytes_per_second; - session_t *session; svm_fifo_t *rx_fifo, *tx_fifo; int buffer_offset, bytes_to_send = 0; + /* + * Prepare test data + */ vec_validate (test_data, 64 * 1024 - 1); for (i = 0; i < vec_len (test_data); i++) test_data[i] = i & 0xff; - cmp = vl_msg_api_alloc (sizeof (*cmp)); - memset (cmp, 0, sizeof (*cmp)); - - cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); - cmp->client_index = utm->my_client_index; - cmp->context = ntohl (0xfeedface); - memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp); - - if (wait_for_state_change (utm, STATE_READY)) - { - clib_warning ("timeout waiting for STATE_READY"); - return; - } - - session = pool_elt_at_index (utm->sessions, utm->cut_through_session_index); rx_fifo = session->server_rx_fifo; tx_fifo = session->server_tx_fifo; @@ -375,35 +455,38 @@ uri_udp_slave_test (uri_udp_test_main_t * utm) } static void -vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) +uri_udp_client_test (uri_udp_test_main_t * utm) { - uri_udp_test_main_t *utm = &uri_udp_test_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - int rv; + session_t *session; - if (mp->segment_name_length == 0) + application_attach (utm); + udp_client_connect (utm); + + if (wait_for_state_change (utm, STATE_READY)) { - clib_warning ("segment_name_length zero"); + clib_warning ("timeout waiting for STATE_READY"); return; } - a->segment_name = (char *) mp->segment_name; - a->segment_size = mp->segment_size; + /* Only works with cut through sessions */ + session = pool_elt_at_index (utm->sessions, utm->cut_through_session_index); - ASSERT (mp->server_event_queue_address); + client_send (utm, session); + application_detach (utm); +} - /* Attach to the segment vpp created */ - rv = svm_fifo_segment_attach (a); - if (rv) +static void +vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) +{ + uri_udp_test_main_t *utm = &uri_udp_test_main; + + if (mp->retval) { - clib_warning ("svm_fifo_segment_attach ('%s') failed", - mp->segment_name); + clib_warning ("bind failed: %d", mp->retval); + utm->state = STATE_FAILED; return; } - utm->our_event_queue = (unix_shared_memory_queue_t *) - mp->server_event_queue_address; - utm->state = STATE_READY; } @@ -427,6 +510,9 @@ vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t * mp) mp->segment_size); } +/** + * Acting as server for redirected connect requests + */ static void vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) { @@ -456,7 +542,6 @@ vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) vec_add2 (utm->seg, seg, 1); segment_index = vec_len (sm->segments) - 1; - memcpy (seg, sm->segments + segment_index, sizeof (utm->seg[0])); pool_get (utm->sessions, session); @@ -521,7 +606,6 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) svm_fifo_t *rx_fifo, *tx_fifo; session_t *session; static f64 start_time; - u64 key; if (start_time == 0.0) start_time = clib_time_now (&utm->clib_time); @@ -539,9 +623,8 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) session->server_rx_fifo = rx_fifo; session->server_tx_fifo = tx_fifo; - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - - hash_set (utm->session_index_by_vpp_handles, key, session - utm->sessions); + hash_set (utm->session_index_by_vpp_handles, mp->handle, + session - utm->sessions); utm->state = STATE_READY; @@ -556,9 +639,7 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) rmp = vl_msg_api_alloc (sizeof (*rmp)); memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY); - rmp->session_type = mp->session_type; - rmp->session_index = mp->session_index; - rmp->session_thread_index = mp->session_thread_index; + rmp->handle = mp->handle; vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } @@ -570,21 +651,18 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) vl_api_disconnect_session_reply_t *rmp; uword *p; int rv = 0; - u64 key; - - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - p = hash_get (utm->session_index_by_vpp_handles, key); + p = hash_get (utm->session_index_by_vpp_handles, mp->handle); if (p) { session = pool_elt_at_index (utm->sessions, p[0]); - hash_unset (utm->session_index_by_vpp_handles, key); + hash_unset (utm->session_index_by_vpp_handles, mp->handle); pool_put (utm->sessions, session); } else { - clib_warning ("couldn't find session key %llx", key); + clib_warning ("couldn't find session key %llx", mp->handle); rv = -11; } @@ -592,77 +670,76 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION_REPLY); rmp->retval = rv; - rmp->session_index = mp->session_index; - rmp->session_thread_index = mp->session_thread_index; + rmp->handle = mp->handle; vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } static void vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) { - svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; uri_udp_test_main_t *utm = &uri_udp_test_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - ssvm_shared_header_t *sh; - svm_fifo_segment_private_t *seg; - svm_fifo_segment_header_t *fsh; - session_t *session; - u32 segment_index; - int rv; ASSERT (utm->i_am_master == 0); - if (mp->segment_name_length == 0) + /* We've been redirected */ + if (mp->segment_name_length > 0) { - clib_warning ("segment_name_length zero"); - return; - } - - memset (a, 0, sizeof (*a)); - - a->segment_name = (char *) mp->segment_name; - - sleep (1); - - rv = svm_fifo_segment_attach (a); - if (rv) - { - clib_warning ("sm_fifo_segment_create ('%v') failed", mp->segment_name); - return; - } - - segment_index = vec_len (sm->segments) - 1; + svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + u32 segment_index; + session_t *session; + ssvm_shared_header_t *sh; + svm_fifo_segment_private_t *seg; + svm_fifo_segment_header_t *fsh; + int rv; + + memset (a, 0, sizeof (*a)); + a->segment_name = (char *) mp->segment_name; + + sleep (1); + + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("sm_fifo_segment_create ('%v') failed", + mp->segment_name); + return; + } - vec_add2 (utm->seg, seg, 1); + segment_index = vec_len (sm->segments) - 1; + vec_add2 (utm->seg, seg, 1); - memcpy (seg, sm->segments + segment_index, sizeof (*seg)); - sh = seg->ssvm.sh; - fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; + memcpy (seg, sm->segments + segment_index, sizeof (*seg)); + sh = seg->ssvm.sh; + fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; - while (vec_len (fsh->fifos) < 2) - sleep (1); + while (vec_len (fsh->fifos) < 2) + sleep (1); - pool_get (utm->sessions, session); - utm->cut_through_session_index = session - utm->sessions; + pool_get (utm->sessions, session); + utm->cut_through_session_index = session - utm->sessions; - session->server_rx_fifo = (svm_fifo_t *) fsh->fifos[0]; - ASSERT (session->server_rx_fifo); - session->server_tx_fifo = (svm_fifo_t *) fsh->fifos[1]; - ASSERT (session->server_tx_fifo); + session->server_rx_fifo = (svm_fifo_t *) fsh->fifos[0]; + ASSERT (session->server_rx_fifo); + session->server_tx_fifo = (svm_fifo_t *) fsh->fifos[1]; + ASSERT (session->server_tx_fifo); + } /* security: could unlink /dev/shm/segment_name> here, maybe */ utm->state = STATE_READY; } -#define foreach_uri_msg \ -_(BIND_URI_REPLY, bind_uri_reply) \ -_(CONNECT_URI, connect_uri) \ -_(CONNECT_URI_REPLY, connect_uri_reply) \ -_(UNBIND_URI_REPLY, unbind_uri_reply) \ -_(ACCEPT_SESSION, accept_session) \ -_(DISCONNECT_SESSION, disconnect_session) \ -_(MAP_ANOTHER_SEGMENT, map_another_segment) +#define foreach_uri_msg \ +_(BIND_URI_REPLY, bind_uri_reply) \ +_(CONNECT_URI, connect_uri) \ +_(CONNECT_URI_REPLY, connect_uri_reply) \ +_(UNBIND_URI_REPLY, unbind_uri_reply) \ +_(ACCEPT_SESSION, accept_session) \ +_(DISCONNECT_SESSION, disconnect_session) \ +_(MAP_ANOTHER_SEGMENT, map_another_segment) \ +_(APPLICATION_ATTACH_REPLY, application_attach_reply) \ +_(APPLICATION_DETACH_REPLY, application_detach_reply) \ void uri_api_hookup (uri_udp_test_main_t * utm) @@ -679,7 +756,6 @@ uri_api_hookup (uri_udp_test_main_t * utm) } - int connect_to_vpp (char *name) { @@ -784,26 +860,43 @@ server_handle_event_queue (uri_udp_test_main_t * utm) } } -void -uri_udp_test (uri_udp_test_main_t * utm) +static void +server_unbind (uri_udp_test_main_t * utm) { - vl_api_bind_uri_t *bmp; vl_api_unbind_uri_t *ump; + ump = vl_msg_api_alloc (sizeof (*ump)); + memset (ump, 0, sizeof (*ump)); + + ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); + ump->client_index = utm->my_client_index; + memcpy (ump->uri, utm->uri, vec_len (utm->uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump); +} + +static void +server_listen (uri_udp_test_main_t * utm) +{ + vl_api_bind_uri_t *bmp; + bmp = vl_msg_api_alloc (sizeof (*bmp)); memset (bmp, 0, sizeof (*bmp)); bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); bmp->client_index = utm->my_client_index; bmp->context = ntohl (0xfeedface); - bmp->initial_segment_size = 256 << 20; /* size of initial segment */ - bmp->options[SESSION_OPTIONS_FLAGS] = - SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; - bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 16 << 10; - bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 16 << 10; - bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); +} + +void +udp_server_test (uri_udp_test_main_t * utm) +{ + + application_attach (utm); + + /* Bind to uri */ + server_listen (utm); if (wait_for_state_change (utm, STATE_READY)) { @@ -813,13 +906,8 @@ uri_udp_test (uri_udp_test_main_t * utm) server_handle_event_queue (utm); - ump = vl_msg_api_alloc (sizeof (*ump)); - memset (ump, 0, sizeof (*ump)); - - ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); - ump->client_index = utm->my_client_index; - memcpy (ump->uri, utm->uri, vec_len (utm->uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump); + /* Cleanup */ + server_unbind (utm); if (wait_for_state_change (utm, STATE_START)) { @@ -827,6 +915,8 @@ uri_udp_test (uri_udp_test_main_t * utm) return; } + application_detach (utm); + fformat (stdout, "Test complete...\n"); } @@ -892,7 +982,7 @@ main (int argc, char **argv) utm->i_am_master = i_am_master; utm->segment_main = &svm_fifo_segment_main; - utm->connect_uri = format (0, "udp://10.0.0.1/1234%c", 0); + utm->connect_uri = format (0, "udp://6.0.0.1/1234%c", 0); setup_signal_handlers (); @@ -907,7 +997,7 @@ main (int argc, char **argv) if (i_am_master == 0) { - uri_udp_slave_test (utm); + uri_udp_client_test (utm); exit (0); } @@ -920,7 +1010,7 @@ main (int argc, char **argv) for (i = 0; i < 200000; i++) pool_put_index (utm->sessions, i); - uri_udp_test (utm); + udp_server_test (utm); vl_client_disconnect_from_vlib (); exit (0); diff --git a/src/vnet.am b/src/vnet.am index bed4902b..25b84616 100644 --- a/src/vnet.am +++ b/src/vnet.am @@ -827,6 +827,7 @@ libvnet_la_SOURCES += \ vnet/session/session_cli.c \ vnet/session/hashes.c \ vnet/session/application_interface.c \ + vnet/session/segment_manager.c \ vnet/session/session_api.c nobase_include_HEADERS += \ @@ -835,6 +836,7 @@ nobase_include_HEADERS += \ vnet/session/transport.h \ vnet/session/application_interface.h \ vnet/session/session_debug.h \ + vnet/session/segment_manager.h \ vnet/session/session.api.h API_FILES += vnet/session/session.api diff --git a/src/vnet/api_errno.h b/src/vnet/api_errno.h index f3ffd2a6..e939404b 100644 --- a/src/vnet/api_errno.h +++ b/src/vnet/api_errno.h @@ -105,7 +105,9 @@ _(INVALID_GPE_MODE, -112, "Invalid GPE mode") \ _(LISP_GPE_ENTRIES_PRESENT, -113, "LISP GPE entries are present") \ _(ADDRESS_FOUND_FOR_INTERFACE, -114, "Address found for interface") \ _(SESSION_CONNECT_FAIL, -115, "Session failed to connect") \ -_(ENTRY_ALREADY_EXISTS, -116, "Entry already exists") +_(ENTRY_ALREADY_EXISTS, -116, "Entry already exists") \ +_(SVM_SEGMENT_CREATE_FAIL, -117, "svm segment create fail") \ +_(APPLICATION_NOT_ATTACHED, -118, "application not attached") typedef enum { diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index 513e5fac..5a45537b 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -14,18 +14,24 @@ */ #include +#include #include -/* +/** * Pool from which we allocate all applications */ static application_t *app_pool; -/* +/** * Hash table of apps by api client index */ static uword *app_by_api_client_index; +/** + * Default application event queue size + */ +static u32 default_app_evt_queue_size = 128; + int application_api_queue_is_full (application_t * app) { @@ -67,37 +73,71 @@ application_lookup (u32 api_client_index) return 0; } +application_t * +application_new () +{ + application_t *app; + pool_get (app_pool, app); + memset (app, 0, sizeof (*app)); + app->index = application_get_index (app); + app->connects_seg_manager = ~0; + return app; +} + void application_del (application_t * app) { - session_manager_main_t *smm = vnet_get_session_manager_main (); api_main_t *am = &api_main; void *oldheap; - session_manager_t *sm; + segment_manager_t *sm; + u64 handle; + u32 index, *handles = 0; + int i; + vnet_unbind_args_t _a, *a = &_a; + + /* + * Cleanup segment managers + */ + if (app->connects_seg_manager != (u32) ~ 0) + { + sm = segment_manager_get (app->connects_seg_manager); + segment_manager_del (sm); + } - if (app->mode == APP_SERVER) + /* *INDENT-OFF* */ + hash_foreach (handle, index, app->listeners_table, + ({ + vec_add1 (handles, handle); + })); + /* *INDENT-ON* */ + + /* Actual listener cleanup */ + for (i = 0; i < vec_len (handles); i++) { - sm = session_manager_get (app->session_manager_index); - session_manager_del (smm, sm); + a->app_index = app->api_client_index; + a->handle = handles[i]; + /* seg manager is removed when unbind completes */ + vnet_unbind (a); } - /* Free the event fifo in the /vpe-api shared-memory segment */ + /* + * Free the event fifo in the /vpe-api shared-memory segment + */ oldheap = svm_push_data_heap (am->vlib_rp); if (app->event_queue) unix_shared_memory_queue_free (app->event_queue); svm_pop_heap (oldheap); application_table_del (app); - pool_put (app_pool, app); } static void -application_verify_cb_fns (application_type_t type, session_cb_vft_t * cb_fns) +application_verify_cb_fns (session_cb_vft_t * cb_fns) { - if (type == APP_SERVER && cb_fns->session_accept_callback == 0) + if (cb_fns->session_accept_callback == 0) clib_warning ("No accept callback function provided"); - if (type == APP_CLIENT && cb_fns->session_connected_callback == 0) + if (cb_fns->session_connected_callback == 0) clib_warning ("No session connected callback function provided"); if (cb_fns->session_disconnect_callback == 0) clib_warning ("No session disconnect callback function provided"); @@ -105,25 +145,26 @@ application_verify_cb_fns (application_type_t type, session_cb_vft_t * cb_fns) clib_warning ("No session reset callback function provided"); } -application_t * -application_new (application_type_t type, session_type_t sst, - u32 api_client_index, u32 flags, session_cb_vft_t * cb_fns) +int +application_init (application_t * app, u32 api_client_index, u64 * options, + session_cb_vft_t * cb_fns) { - session_manager_main_t *smm = vnet_get_session_manager_main (); api_main_t *am = &api_main; - application_t *app; + segment_manager_t *sm; + segment_manager_properties_t *props; void *oldheap; - session_manager_t *sm; + u32 app_evt_queue_size; + int rv; - pool_get (app_pool, app); - memset (app, 0, sizeof (*app)); + app_evt_queue_size = options[APP_EVT_QUEUE_SIZE] > 0 ? + options[APP_EVT_QUEUE_SIZE] : default_app_evt_queue_size; /* Allocate event fifo in the /vpe-api shared-memory segment */ oldheap = svm_push_data_heap (am->vlib_rp); /* Allocate server event queue */ app->event_queue = - unix_shared_memory_queue_init (128 /* nels $$$$ config */ , + unix_shared_memory_queue_init (app_evt_queue_size, sizeof (session_fifo_event_t), 0 /* consumer pid */ , 0 @@ -132,36 +173,31 @@ application_new (application_type_t type, session_type_t sst, svm_pop_heap (oldheap); - /* If a server, allocate session manager */ - if (type == APP_SERVER) - { - pool_get (smm->session_managers, sm); - memset (sm, 0, sizeof (*sm)); + /* Setup segment manager */ + sm = segment_manager_new (); + sm->app_index = app->index; + props = &app->sm_properties; + props->add_segment_size = options[SESSION_OPTIONS_ADD_SEGMENT_SIZE]; + props->rx_fifo_size = options[SESSION_OPTIONS_RX_FIFO_SIZE]; + props->tx_fifo_size = options[SESSION_OPTIONS_TX_FIFO_SIZE]; + props->add_segment = props->add_segment_size != 0; - app->session_manager_index = sm - smm->session_managers; - } - else if (type == APP_CLIENT) - { - /* Allocate connect session manager if needed */ - if (smm->connect_manager_index[sst] == INVALID_INDEX) - connects_session_manager_init (smm, sst); - app->session_manager_index = smm->connect_manager_index[sst]; - } + if ((rv = segment_manager_init (sm, props, + options[SESSION_OPTIONS_SEGMENT_SIZE]))) + return rv; - app->mode = type; - app->index = application_get_index (app); - app->session_type = sst; + app->first_segment_manager = segment_manager_index (sm); app->api_client_index = api_client_index; - app->flags = flags; + app->flags = options[SESSION_OPTIONS_FLAGS]; app->cb_fns = *cb_fns; /* Check that the obvious things are properly set up */ - application_verify_cb_fns (type, cb_fns); + application_verify_cb_fns (cb_fns); /* Add app to lookup by api_client_index table */ application_table_add (app); - return app; + return 0; } application_t * @@ -185,108 +221,286 @@ application_get_index (application_t * app) return app - app_pool; } +static segment_manager_t * +application_alloc_segment_manager (application_t * app) +{ + segment_manager_t *sm = 0; + + if (app->first_segment_manager != (u32) ~ 0) + { + sm = segment_manager_get (app->first_segment_manager); + app->first_segment_manager = ~0; + return sm; + } + + sm = segment_manager_new (); + if (segment_manager_init (sm, &app->sm_properties, 0)) + return 0; + return sm; +} + +/** + * Start listening local transport endpoint for requested transport. + * + * Creates a 'dummy' stream session with state LISTENING to be used in session + * lookups, prior to establishing connection. Requests transport to build + * it's own specific listening connection. + */ int -application_server_init (application_t * server, u32 segment_size, - u32 add_segment_size, u32 rx_fifo_size, - u32 tx_fifo_size, u8 ** segment_name) +application_start_listen (application_t * srv, session_type_t session_type, + transport_endpoint_t * tep, u64 * res) { - session_manager_main_t *smm = vnet_get_session_manager_main (); - session_manager_t *sm; - int rv; + segment_manager_t *sm; + stream_session_t *s; + u64 handle; + + s = listen_session_new (session_type); + s->app_index = srv->index; + + if (stream_session_listen (s, tep)) + goto err; + + /* Allocate segment manager. All sessions derived out of a listen session + * have fifos allocated by the same segment manager. */ + sm = application_alloc_segment_manager (srv); + if (sm == 0) + goto err; + + /* Add to app's listener table. Useful to find all child listeners + * when app goes down, although, just for unbinding this is not needed */ + handle = listen_session_get_handle (s); + hash_set (srv->listeners_table, handle, segment_manager_index (sm)); - sm = session_manager_get (server->session_manager_index); + *res = handle; + return 0; + +err: + listen_session_del (s); + return -1; +} + +/** + * Stop listening on session associated to handle + */ +int +application_stop_listen (application_t * srv, u64 handle) +{ + stream_session_t *listener; + uword *indexp; + segment_manager_t *sm; - /* Add first segment */ - if ((rv = session_manager_add_first_segment (smm, sm, segment_size, - segment_name))) + if (srv && hash_get (srv->listeners_table, handle) == 0) { - return rv; + clib_warning ("app doesn't own handle %llu!", handle); + return -1; } - /* Setup session manager */ - sm->add_segment_size = add_segment_size; - sm->rx_fifo_size = rx_fifo_size; - sm->tx_fifo_size = tx_fifo_size; - sm->add_segment = sm->add_segment_size != 0; + listener = listen_session_get_from_handle (handle); + stream_session_stop_listen (listener); + + indexp = hash_get (srv->listeners_table, handle); + ASSERT (indexp); + + sm = segment_manager_get (*indexp); + segment_manager_del (sm); + hash_unset (srv->listeners_table, handle); + listen_session_del (listener); + return 0; } +int +application_open_session (application_t * app, session_type_t sst, + transport_endpoint_t * tep, u32 api_context) +{ + segment_manager_t *sm; + transport_connection_t *tc = 0; + int rv; + + /* Make sure we have a segment manager for connects */ + if (app->connects_seg_manager == (u32) ~ 0) + { + sm = application_alloc_segment_manager (app); + if (sm == 0) + return -1; + app->connects_seg_manager = segment_manager_index (sm); + } + + if ((rv = stream_session_open (app->index, sst, tep, &tc))) + return rv; + + /* Store api_context for when the reply comes. Not the nicest thing + * but better allocating a separate half-open pool. */ + tc->s_index = api_context; + + return 0; +} + +segment_manager_t * +application_get_connect_segment_manager (application_t * app) +{ + ASSERT (app->connects_seg_manager != (u32) ~ 0); + return segment_manager_get (app->connects_seg_manager); +} + +segment_manager_t * +application_get_listen_segment_manager (application_t * app, + stream_session_t * s) +{ + uword *smp; + smp = hash_get (app->listeners_table, listen_session_get_handle (s)); + ASSERT (smp != 0); + return segment_manager_get (*smp); +} + +static u8 * +app_get_name_from_reg_index (application_t * app) +{ + u8 *app_name; + + vl_api_registration_t *regp; + regp = vl_api_client_index_to_registration (app->api_client_index); + if (!regp) + app_name = format (0, "builtin-%d%c", app->index, 0); + else + app_name = format (0, "%s%c", regp->name, 0); + + return app_name; +} + u8 * -format_application_server (u8 * s, va_list * args) +format_application_listener (u8 * s, va_list * args) { - application_t *srv = va_arg (*args, application_t *); + application_t *app = va_arg (*args, application_t *); + u64 handle = va_arg (*args, u64); + u32 index = va_arg (*args, u32); int verbose = va_arg (*args, int); - vl_api_registration_t *regp; stream_session_t *listener; - u8 *server_name, *str, *seg_name; - u32 segment_size; + u8 *app_name, *str; - if (srv == 0) + if (app == 0) { if (verbose) - s = format (s, "%-40s%-20s%-15s%-15s%-10s", "Connection", "Server", - "Segment", "API Client", "Cookie"); + s = format (s, "%-40s%-20s%-15s%-15s%-10s", "Connection", "App", + "API Client", "ListenerID", "SegManager"); else - s = format (s, "%-40s%-20s", "Connection", "Server"); + s = format (s, "%-40s%-20s", "Connection", "App"); return s; } - regp = vl_api_client_index_to_registration (srv->api_client_index); - if (!regp) - server_name = format (0, "builtin-%d%c", srv->index, 0); - else - server_name = regp->name; - - listener = stream_session_listener_get (srv->session_type, - srv->session_index); + app_name = app_get_name_from_reg_index (app); + listener = listen_session_get_from_handle (handle); str = format (0, "%U", format_stream_session, listener, verbose); - session_manager_get_segment_info (listener->server_segment_index, &seg_name, - &segment_size); if (verbose) { - s = format (s, "%-40s%-20s%-20s%-10d%-10d", str, server_name, - seg_name, srv->api_client_index, srv->accept_cookie); + s = format (s, "%-40s%-20s%-15u%-15u%-10u", str, app_name, + app->api_client_index, handle, index); } else - s = format (s, "%-40s%-20s", str, server_name); + s = format (s, "%-40s%-20s", str, app_name); + + vec_free (app_name); return s; } -u8 * -format_application_client (u8 * s, va_list * args) +void +application_format_connects (application_t * app, int verbose) { - application_t *client = va_arg (*args, application_t *); - int verbose = va_arg (*args, int); - stream_session_t *session; - u8 *str, *seg_name; - u32 segment_size; + vlib_main_t *vm = vlib_get_main (); + segment_manager_t *sm; + u8 *app_name, *s = 0; + int i, j; - if (client == 0) + /* Header */ + if (app == 0) { if (verbose) - s = - format (s, "%-40s%-20s%-10s", "Connection", "Segment", - "API Client"); + vlib_cli_output (vm, "%-40s%-20s%-15s%-10s", "Connection", "App", + "API Client", "SegManager"); else - s = format (s, "%-40s", "Connection"); + vlib_cli_output (vm, "%-40s%-20s", "Connection", "App"); + return; + } - return s; + /* make sure */ + if (app->connects_seg_manager == (u32) ~ 0) + return; + + app_name = app_get_name_from_reg_index (app); + + /* Across all fifo segments */ + sm = segment_manager_get (app->connects_seg_manager); + for (j = 0; j < vec_len (sm->segment_indices); j++) + { + svm_fifo_segment_private_t *fifo_segment; + svm_fifo_t **fifos; + u8 *str; + + fifo_segment = svm_fifo_get_segment (sm->segment_indices[j]); + fifos = svm_fifo_segment_get_fifos (fifo_segment); + for (i = 0; i < vec_len (fifos); i++) + { + svm_fifo_t *fifo; + u32 session_index, thread_index; + stream_session_t *session; + + /* There are 2 fifos/session. Avoid printing twice. */ + if (i % 2) + continue; + + fifo = fifos[i]; + session_index = fifo->server_session_index; + thread_index = fifo->server_thread_index; + + session = stream_session_get (session_index, thread_index); + str = format (0, "%U", format_stream_session, session, verbose); + + if (verbose) + s = format (s, "%-40s%-20s%-15u%-10u", str, app_name, + app->api_client_index, app->connects_seg_manager); + else + s = format (s, "%-40s%-20s", str, app_name); + + vlib_cli_output (vm, "%v", s); + + vec_reset_length (s); + vec_free (str); + } + vec_free (s); } - session = stream_session_get (client->session_index, client->thread_index); - str = format (0, "%U", format_stream_session, session, verbose); + vec_free (app_name); +} - session_manager_get_segment_info (session->server_segment_index, &seg_name, - &segment_size); - if (verbose) +u8 * +format_application (u8 * s, va_list * args) +{ + application_t *app = va_arg (*args, application_t *); + CLIB_UNUSED (int verbose) = va_arg (*args, int); + u8 *app_name; + + if (app == 0) { - s = format (s, "%-40s%-20s%-10d%", str, seg_name, - client->api_client_index); + if (verbose) + s = format (s, "%-10s%-20s%-15s%-15s%-15s%-15s", "Index", "Name", + "API Client", "Add seg size", "Rx fifo size", + "Tx fifo size"); + else + s = format (s, "%-10s%-20s%-20s", "Index", "Name", "API Client"); + return s; } + + app_name = app_get_name_from_reg_index (app); + if (verbose) + s = format (s, "%-10d%-20s%-15d%-15d%-15d%-15d", app->index, app_name, + app->api_client_index, app->sm_properties.add_segment_size, + app->sm_properties.rx_fifo_size, + app->sm_properties.tx_fifo_size); else - s = format (s, "%-40s", str); + s = format (s, "%-10d%-20s%-20d", app->index, app_name, + app->api_client_index); return s; } @@ -294,13 +508,12 @@ static clib_error_t * show_app_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { - session_manager_main_t *smm = &session_manager_main; application_t *app; int do_server = 0; int do_client = 0; int verbose = 0; - if (!smm->is_enabled) + if (!session_manager_is_enabled ()) { clib_error_return (0, "session layer is not enabled"); } @@ -319,17 +532,24 @@ show_app_command_fn (vlib_main_t * vm, unformat_input_t * input, if (do_server) { + u64 handle; + u32 index; if (pool_elts (app_pool)) { - vlib_cli_output (vm, "%U", format_application_server, - 0 /* header */ , + vlib_cli_output (vm, "%U", format_application_listener, + 0 /* header */ , 0, 0, verbose); /* *INDENT-OFF* */ pool_foreach (app, app_pool, ({ - if (app->mode == APP_SERVER) - vlib_cli_output (vm, "%U", format_application_server, app, - verbose); + /* App's listener sessions */ + if (hash_elts (app->listeners_table) == 0) + continue; + hash_foreach (handle, index, app->listeners_table, + ({ + vlib_cli_output (vm, "%U", format_application_listener, app, + handle, index, verbose); + })); })); /* *INDENT-ON* */ } @@ -341,15 +561,14 @@ show_app_command_fn (vlib_main_t * vm, unformat_input_t * input, { if (pool_elts (app_pool)) { - vlib_cli_output (vm, "%U", format_application_client, - 0 /* header */ , - verbose); + application_format_connects (0, verbose); + /* *INDENT-OFF* */ pool_foreach (app, app_pool, ({ - if (app->mode == APP_CLIENT) - vlib_cli_output (vm, "%U", format_application_client, app, - verbose); + if (app->connects_seg_manager == (u32)~0) + continue; + application_format_connects (app, verbose); })); /* *INDENT-ON* */ } @@ -357,6 +576,19 @@ show_app_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_output (vm, "No active client bindings"); } + /* Print app related info */ + if (!do_server && !do_client) + { + vlib_cli_output (vm, "%U", format_application, 0, verbose); + pool_foreach (app, app_pool, ( + { + vlib_cli_output (vm, "%U", + format_application, app, + verbose); + } + )); + } + return 0; } diff --git a/src/vnet/session/application.h b/src/vnet/session/application.h index 480828f7..6bcee9d3 100644 --- a/src/vnet/session/application.h +++ b/src/vnet/session/application.h @@ -18,11 +18,13 @@ #include #include +#include typedef enum { APP_SERVER, - APP_CLIENT + APP_CLIENT, + APP_N_TYPES } application_type_t; typedef struct _stream_session_cb_vft @@ -35,7 +37,7 @@ typedef struct _stream_session_cb_vft int (*session_accept_callback) (stream_session_t * new_session); /* Connection request callback */ - int (*session_connected_callback) (u32 api_client_index, + int (*session_connected_callback) (u32 app_index, u32 api_context, stream_session_t * s, u8 code); /** Notify app that session is closing */ @@ -59,45 +61,52 @@ typedef struct _application /** Flags */ u32 flags; + /* Stream server mode: accept or connect + * TODO REMOVE*/ + u8 mode; + + /** Index of the listen session or connect session + * TODO REMOVE*/ + u32 session_index; + + /** Session thread index for client connect sessions + * TODO REMOVE */ + u32 thread_index; + + /* + * Binary API interface to external app + */ + /** Binary API connection index, ~0 if internal */ u32 api_client_index; - /* */ - u32 api_context; - /** Application listens for events on this svm queue */ unix_shared_memory_queue_t *event_queue; - /** Stream session type */ - u8 session_type; - - /* Stream server mode: accept or connect */ - u8 mode; + /* + * Callbacks: shoulder-taps for the server/client + */ - u32 session_manager_index; + session_cb_vft_t cb_fns; /* - * Bind/Listen specific + * svm segment management */ + u32 connects_seg_manager; - /** Accept cookie, for multiple session flavors ($$$ maybe) */ - u32 accept_cookie; + /* Lookup tables for listeners. Value is segment manager index */ + uword *listeners_table; - /** Index of the listen session or connect session */ - u32 session_index; + u32 first_segment_manager; - /** Session thread index for client connect sessions */ - u32 thread_index; - - /* - * Callbacks: shoulder-taps for the server/client - */ - session_cb_vft_t cb_fns; + /** Segment manager properties. Shared by all segment managers */ + segment_manager_properties_t sm_properties; } application_t; -application_t *application_new (application_type_t type, session_type_t sst, - u32 api_client_index, u32 flags, - session_cb_vft_t * cb_fns); +application_t *application_new (); +int +application_init (application_t * app, u32 api_client_index, u64 * options, + session_cb_vft_t * cb_fns); void application_del (application_t * app); application_t *application_get (u32 index); application_t *application_get_if_valid (u32 index); @@ -105,11 +114,21 @@ application_t *application_lookup (u32 api_client_index); u32 application_get_index (application_t * app); int -application_server_init (application_t * server, u32 segment_size, - u32 add_segment_size, u32 rx_fifo_size, - u32 tx_fifo_size, u8 ** segment_name); +application_start_listen (application_t * app, session_type_t session_type, + transport_endpoint_t * tep, u64 * handle); +int application_stop_listen (application_t * srv, u64 handle); +int +application_open_session (application_t * app, session_type_t sst, + transport_endpoint_t * tep, u32 api_context); int application_api_queue_is_full (application_t * app); +segment_manager_t *application_get_listen_segment_manager (application_t * + app, + stream_session_t * + s); +segment_manager_t *application_get_connect_segment_manager (application_t * + app); + #endif /* SRC_VNET_SESSION_APPLICATION_H_ */ /* diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c index 4b30bd87..96d2c621 100644 --- a/src/vnet/session/application_interface.c +++ b/src/vnet/session/application_interface.c @@ -79,81 +79,51 @@ api_parse_session_handle (u64 handle, u32 * session_index, u32 * thread_index) } int -vnet_bind_i (u32 api_client_index, ip46_address_t * ip46, u16 port_host_order, - session_type_t sst, u64 * options, session_cb_vft_t * cb_fns, - application_t ** app, u32 * len_seg_name, char *seg_name) +vnet_bind_i (u32 app_index, session_type_t sst, + transport_endpoint_t * tep, u64 * handle) { - u8 *segment_name = 0; - application_t *server = 0; + application_t *app; stream_session_t *listener; - u8 is_ip4; - - listener = - stream_session_lookup_listener (ip46, - clib_host_to_net_u16 (port_host_order), - sst); - - if (listener) - return VNET_API_ERROR_ADDRESS_IN_USE; - if (application_lookup (api_client_index)) + app = application_get_if_valid (app_index); + if (!app) { - clib_warning ("Only one connection supported for now"); - return VNET_API_ERROR_ADDRESS_IN_USE; + clib_warning ("app not attached"); + return VNET_API_ERROR_APPLICATION_NOT_ATTACHED; } - is_ip4 = SESSION_TYPE_IP4_UDP == sst || SESSION_TYPE_IP4_TCP == sst; - if (!ip_is_zero (ip46, is_ip4) && !ip_is_local (ip46, is_ip4)) - return VNET_API_ERROR_INVALID_VALUE; - - /* Allocate and initialize stream server */ - server = application_new (APP_SERVER, sst, api_client_index, - options[SESSION_OPTIONS_FLAGS], cb_fns); + listener = stream_session_lookup_listener (&tep->ip, + clib_host_to_net_u16 (tep->port), + sst); + if (listener) + return VNET_API_ERROR_ADDRESS_IN_USE; - application_server_init (server, options[SESSION_OPTIONS_SEGMENT_SIZE], - options[SESSION_OPTIONS_ADD_SEGMENT_SIZE], - options[SESSION_OPTIONS_RX_FIFO_SIZE], - options[SESSION_OPTIONS_TX_FIFO_SIZE], - &segment_name); + if (!ip_is_zero (&tep->ip, tep->is_ip4) + && !ip_is_local (&tep->ip, tep->is_ip4)) + return VNET_API_ERROR_INVALID_VALUE_2; /* Setup listen path down to transport */ - stream_session_start_listen (server->index, ip46, port_host_order); - - /* - * Return values - */ - - ASSERT (vec_len (segment_name) <= 128); - *len_seg_name = vec_len (segment_name); - memcpy (seg_name, segment_name, *len_seg_name); - *app = server; - - return 0; + return application_start_listen (app, sst, tep, handle); } int -vnet_unbind_i (u32 api_client_index) +vnet_unbind_i (u32 app_index, u64 handle) { - application_t *server; + application_t *app = application_get_if_valid (app_index); - /* - * Find the stream_server_t corresponding to the api client - */ - server = application_lookup (api_client_index); - if (!server) - return VNET_API_ERROR_INVALID_VALUE_2; + if (!app) + { + clib_warning ("app not attached"); + return VNET_API_ERROR_APPLICATION_NOT_ATTACHED; + } /* Clear the listener */ - stream_session_stop_listen (server->index); - application_del (server); - - return 0; + return application_stop_listen (app, handle); } int -vnet_connect_i (u32 api_client_index, u32 api_context, session_type_t sst, - ip46_address_t * ip46, u16 port, u64 * options, void *mp, - session_cb_vft_t * cb_fns) +vnet_connect_i (u32 app_index, u32 api_context, session_type_t sst, + transport_endpoint_t * tep, void *mp) { stream_session_t *listener; application_t *server, *app; @@ -161,8 +131,8 @@ vnet_connect_i (u32 api_client_index, u32 api_context, session_type_t sst, /* * Figure out if connecting to a local server */ - listener = stream_session_lookup_listener (ip46, - clib_host_to_net_u16 (port), + listener = stream_session_lookup_listener (&tep->ip, + clib_host_to_net_u16 (tep->port), sst); if (listener) { @@ -177,16 +147,11 @@ vnet_connect_i (u32 api_client_index, u32 api_context, session_type_t sst, redirect_connect_callback (server->api_client_index, mp); } - /* Create client app */ - app = application_new (APP_CLIENT, sst, api_client_index, - options[SESSION_OPTIONS_FLAGS], cb_fns); - - app->api_context = api_context; - /* * Not connecting to a local server. Create regular session */ - return stream_session_open (sst, ip46, port, app->index); + app = application_get (app_index); + return application_open_session (app, sst, tep, api_context); } /** @@ -209,30 +174,31 @@ vnet_connect_i (u32 api_client_index, u32 api_context, session_type_t sst, uword unformat_vnet_uri (unformat_input_t * input, va_list * args) { - ip46_address_t *address = va_arg (*args, ip46_address_t *); session_type_t *sst = va_arg (*args, session_type_t *); - u16 *port = va_arg (*args, u16 *); + transport_endpoint_t *tep = va_arg (*args, transport_endpoint_t *); - if (unformat (input, "tcp://%U/%d", unformat_ip4_address, &address->ip4, - port)) + if (unformat (input, "tcp://%U/%d", unformat_ip4_address, &tep->ip.ip4, + &tep->port)) { *sst = SESSION_TYPE_IP4_TCP; + tep->is_ip4 = 1; return 1; } - if (unformat (input, "udp://%U/%d", unformat_ip4_address, &address->ip4, - port)) + if (unformat (input, "udp://%U/%d", unformat_ip4_address, &tep->ip.ip4, + &tep->port)) { *sst = SESSION_TYPE_IP4_UDP; + tep->is_ip4 = 1; return 1; } - if (unformat (input, "udp://%U/%d", unformat_ip6_address, &address->ip6, - port)) + if (unformat (input, "udp://%U/%d", unformat_ip6_address, &tep->ip.ip6, + &tep->port)) { *sst = SESSION_TYPE_IP6_UDP; return 1; } - if (unformat (input, "tcp://%U/%d", unformat_ip6_address, &address->ip6, - port)) + if (unformat (input, "tcp://%U/%d", unformat_ip6_address, &tep->ip.ip6, + &tep->port)) { *sst = SESSION_TYPE_IP6_TCP; return 1; @@ -242,8 +208,7 @@ unformat_vnet_uri (unformat_input_t * input, va_list * args) } int -parse_uri (char *uri, session_type_t * sst, ip46_address_t * addr, - u16 * port_number_host_byte_order) +parse_uri (char *uri, session_type_t * sst, transport_endpoint_t * tep) { unformat_input_t _input, *input = &_input; @@ -252,8 +217,7 @@ parse_uri (char *uri, session_type_t * sst, ip46_address_t * addr, /* Parse uri */ unformat_init_string (input, uri, strlen (uri)); - if (!unformat (input, "%U", unformat_vnet_uri, addr, sst, - port_number_host_byte_order)) + if (!unformat (input, "%U", unformat_vnet_uri, sst, tep)) { unformat_free (input); return VNET_API_ERROR_INVALID_VALUE; @@ -263,26 +227,51 @@ parse_uri (char *uri, session_type_t * sst, ip46_address_t * addr, return 0; } +/** + * Attaches application. + * + * Allocates a vpp app, i.e., a structure that keeps back pointers + * to external app and a segment manager for shared memory fifo based + * communication with the external app. + */ int -vnet_bind_uri (vnet_bind_args_t * a) +vnet_application_attach (vnet_app_attach_args_t * a) { - application_t *server = 0; - u16 port_host_order; - session_type_t sst = SESSION_N_TYPES; - ip46_address_t ip46; + application_t *app = 0; + segment_manager_t *sm; + u8 *seg_name; int rv; - memset (&ip46, 0, sizeof (ip46)); - rv = parse_uri (a->uri, &sst, &ip46, &port_host_order); - if (rv) + app = application_new (); + if ((rv = application_init (app, a->api_client_index, a->options, + a->session_cb_vft))) return rv; - if ((rv = vnet_bind_i (a->api_client_index, &ip46, port_host_order, sst, - a->options, a->session_cb_vft, &server, - &a->segment_name_length, a->segment_name))) - return rv; + a->app_event_queue_address = (u64) app->event_queue; + sm = segment_manager_get (app->first_segment_manager); + segment_manager_get_segment_info (sm->segment_indices[0], + &seg_name, &a->segment_size); - a->server_event_queue_address = (u64) server->event_queue; + a->segment_name_length = vec_len (seg_name); + a->segment_name = seg_name; + ASSERT (vec_len (a->segment_name) <= 128); + a->app_index = app->index; + return 0; +} + +int +vnet_application_detach (vnet_app_detach_args_t * a) +{ + application_t *app; + app = application_get_if_valid (a->app_index); + + if (!app) + { + clib_warning ("app not attached"); + return VNET_API_ERROR_APPLICATION_NOT_ATTACHED; + } + + application_del (app); return 0; } @@ -308,125 +297,102 @@ session_type_from_proto_and_ip (session_api_proto_t proto, u8 is_ip4) } int -vnet_unbind_uri (char *uri, u32 api_client_index) +vnet_bind_uri (vnet_bind_args_t * a) { - u16 port_number_host_byte_order; session_type_t sst = SESSION_N_TYPES; - ip46_address_t ip46_address; - stream_session_t *listener; + transport_endpoint_t tep; int rv; - rv = parse_uri (uri, &sst, &ip46_address, &port_number_host_byte_order); + memset (&tep, 0, sizeof (tep)); + rv = parse_uri (a->uri, &sst, &tep); if (rv) return rv; - listener = - stream_session_lookup_listener (&ip46_address, - clib_host_to_net_u16 - (port_number_host_byte_order), sst); + if ((rv = vnet_bind_i (a->app_index, sst, &tep, &a->handle))) + return rv; + + return 0; +} + +int +vnet_unbind_uri (vnet_unbind_args_t * a) +{ + session_type_t sst = SESSION_N_TYPES; + stream_session_t *listener; + transport_endpoint_t tep; + int rv; + + rv = parse_uri (a->uri, &sst, &tep); + if (rv) + return rv; + listener = stream_session_lookup_listener (&tep.ip, + clib_host_to_net_u16 (tep.port), + sst); if (!listener) return VNET_API_ERROR_ADDRESS_NOT_IN_USE; - /* External client? */ - if (api_client_index != ~0) - { - ASSERT (vl_api_client_index_to_registration (api_client_index)); - } - - return vnet_unbind_i (api_client_index); + return vnet_unbind_i (a->app_index, listen_session_get_handle (listener)); } int vnet_connect_uri (vnet_connect_args_t * a) { - ip46_address_t ip46_address; - u16 port; + transport_endpoint_t tep; session_type_t sst; - application_t *app; int rv; - app = application_lookup (a->api_client_index); - if (app) - { - clib_warning ("Already have a connect from this app"); - return VNET_API_ERROR_INVALID_VALUE_2; - } - /* Parse uri */ - rv = parse_uri (a->uri, &sst, &ip46_address, &port); + memset (&tep, 0, sizeof (tep)); + rv = parse_uri (a->uri, &sst, &tep); if (rv) return rv; - return vnet_connect_i (a->api_client_index, a->api_context, sst, - &ip46_address, port, a->options, a->mp, - a->session_cb_vft); + return vnet_connect_i (a->app_index, a->api_context, sst, &tep, a->mp); } int -vnet_disconnect_session (u32 session_index, u32 thread_index) +vnet_disconnect_session (vnet_disconnect_args_t * a) { - stream_session_t *session; + u32 index, thread_index; + stream_session_t *s; - session = stream_session_get (session_index, thread_index); - stream_session_disconnect (session); + stream_session_parse_handle (a->handle, &index, &thread_index); + s = stream_session_get_if_valid (index, thread_index); + + if (!s || s->app_index != a->app_index) + return VNET_API_ERROR_INVALID_VALUE; + stream_session_disconnect (s); return 0; } - int vnet_bind (vnet_bind_args_t * a) { - application_t *server = 0; session_type_t sst = SESSION_N_TYPES; int rv; sst = session_type_from_proto_and_ip (a->proto, a->tep.is_ip4); - if ((rv = vnet_bind_i (a->api_client_index, &a->tep.ip, a->tep.port, sst, - a->options, a->session_cb_vft, &server, - &a->segment_name_length, a->segment_name))) + if ((rv = vnet_bind_i (a->app_index, sst, &a->tep, &a->handle))) return rv; - a->server_event_queue_address = (u64) server->event_queue; - a->handle = (u64) a->tep.vrf << 32 | (u64) server->session_index; return 0; } int vnet_unbind (vnet_unbind_args_t * a) { - application_t *server; - - if (a->api_client_index != ~0) - { - ASSERT (vl_api_client_index_to_registration (a->api_client_index)); - } - - /* Make sure this is the right one */ - server = application_lookup (a->api_client_index); - ASSERT (server->session_index == (0xFFFFFFFF & a->handle)); - - /* TODO use handle to disambiguate namespaces/vrfs */ - return vnet_unbind_i (a->api_client_index); + return vnet_unbind_i (a->app_index, a->handle); } int vnet_connect (vnet_connect_args_t * a) { session_type_t sst; - application_t *app; - - app = application_lookup (a->api_client_index); - if (app) - { - clib_warning ("Already have a connect from this app"); - return VNET_API_ERROR_INVALID_VALUE_2; - } sst = session_type_from_proto_and_ip (a->proto, a->tep.is_ip4); - return vnet_connect_i (a->api_client_index, a->api_context, sst, &a->tep.ip, - a->tep.port, a->options, a->mp, a->session_cb_vft); + return vnet_connect_i (a->app_index, a->api_context, sst, &a->tep, a->mp); } int diff --git a/src/vnet/session/application_interface.h b/src/vnet/session/application_interface.h index a5f2b9a6..2c497531 100644 --- a/src/vnet/session/application_interface.h +++ b/src/vnet/session/application_interface.h @@ -28,6 +28,27 @@ typedef enum _session_api_proto SESSION_PROTO_UDP } session_api_proto_t; +typedef struct _vnet_app_attach_args_t +{ + u32 api_client_index; + u64 *options; + session_cb_vft_t *session_cb_vft; + + /* + * Results + */ + u8 *segment_name; + u32 segment_name_length; + u32 segment_size; + u64 app_event_queue_address; + u32 app_index; +} vnet_app_attach_args_t; + +typedef struct _vnet_app_detach_args_t +{ + u32 app_index; +} vnet_app_detach_args_t; + typedef struct _vnet_bind_args_t { union @@ -40,9 +61,7 @@ typedef struct _vnet_bind_args_t }; }; - u32 api_client_index; - u64 *options; - session_cb_vft_t *session_cb_vft; + u32 app_index; /* * Results @@ -60,7 +79,7 @@ typedef struct _vnet_unbind_args_t char *uri; u64 handle; }; - u32 api_client_index; + u32 app_index; } vnet_unbind_args_t; typedef struct _vnet_connect_args @@ -74,10 +93,8 @@ typedef struct _vnet_connect_args session_api_proto_t proto; }; }; - u32 api_client_index; + u32 app_index; u32 api_context; - u64 *options; - session_cb_vft_t *session_cb_vft; /* Used for redirects */ void *mp; @@ -86,12 +103,13 @@ typedef struct _vnet_connect_args typedef struct _vnet_disconnect_args_t { u64 handle; - u32 api_client_index; + u32 app_index; } vnet_disconnect_args_t; -/* Bind / connect options */ +/* Application attach options */ typedef enum { + APP_EVT_QUEUE_SIZE, SESSION_OPTIONS_FLAGS, SESSION_OPTIONS_SEGMENT_SIZE, SESSION_OPTIONS_ADD_SEGMENT_SIZE, @@ -99,7 +117,7 @@ typedef enum SESSION_OPTIONS_TX_FIFO_SIZE, SESSION_OPTIONS_ACCEPT_COOKIE, SESSION_OPTIONS_N_OPTIONS -} session_options_index_t; +} app_attach_options_index_t; /** Server can handle delegated connect requests from local clients */ #define SESSION_OPTIONS_FLAGS_USE_FIFO (1<<0) @@ -109,10 +127,13 @@ typedef enum #define VNET_CONNECT_REDIRECTED 123 +int vnet_application_attach (vnet_app_attach_args_t * a); +int vnet_application_detach (vnet_app_detach_args_t * a); + int vnet_bind_uri (vnet_bind_args_t *); -int vnet_unbind_uri (char *uri, u32 api_client_index); +int vnet_unbind_uri (vnet_unbind_args_t * a); int vnet_connect_uri (vnet_connect_args_t * a); -int vnet_disconnect_session (u32 session_index, u32 thread_index); +int vnet_disconnect_session (vnet_disconnect_args_t * a); int vnet_bind (vnet_bind_args_t * a); int vnet_connect (vnet_connect_args_t * a); diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c new file mode 100644 index 00000000..16e5bc56 --- /dev/null +++ b/src/vnet/session/segment_manager.c @@ -0,0 +1,342 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +/** + * Counter used to build segment names + */ +u32 segment_name_counter = 0; + +/** + * Pool of segment managers + */ +segment_manager_t *segment_managers = 0; + +/** + * Default fifo and segment size. TODO config. + */ +u32 default_fifo_size = 1 << 16; +u32 default_segment_size = 1 << 20; + +void +segment_manager_get_segment_info (u32 index, u8 ** name, u32 * size) +{ + svm_fifo_segment_private_t *s; + s = svm_fifo_get_segment (index); + *name = s->h->segment_name; + *size = s->ssvm.ssvm_size; +} + +always_inline int +session_manager_add_segment_i (segment_manager_t * sm, u32 segment_size, + u8 * segment_name) +{ + svm_fifo_segment_create_args_t _ca, *ca = &_ca; + int rv; + + memset (ca, 0, sizeof (*ca)); + + ca->segment_name = (char *) segment_name; + ca->segment_size = segment_size; + + rv = svm_fifo_segment_create (ca); + if (rv) + { + clib_warning ("svm_fifo_segment_create ('%s', %d) failed", + ca->segment_name, ca->segment_size); + vec_free (segment_name); + return VNET_API_ERROR_SVM_SEGMENT_CREATE_FAIL; + } + + vec_add1 (sm->segment_indices, ca->new_segment_index); + + return 0; +} + +int +session_manager_add_segment (segment_manager_t * sm) +{ + u8 *segment_name; + svm_fifo_segment_create_args_t _ca, *ca = &_ca; + u32 add_segment_size; + int rv; + + memset (ca, 0, sizeof (*ca)); + segment_name = format (0, "%d-%d%c", getpid (), segment_name_counter++, 0); + add_segment_size = sm->properties->add_segment_size ? + sm->properties->add_segment_size : default_segment_size; + + rv = session_manager_add_segment_i (sm, add_segment_size, segment_name); + vec_free (segment_name); + return rv; +} + +int +session_manager_add_first_segment (segment_manager_t * sm, u32 segment_size) +{ + svm_fifo_segment_create_args_t _ca, *ca = &_ca; + u8 *segment_name; + int rv; + + memset (ca, 0, sizeof (*ca)); + segment_name = format (0, "%d-%d%c", getpid (), segment_name_counter++, 0); + rv = session_manager_add_segment_i (sm, segment_size, segment_name); + vec_free (segment_name); + return rv; +} + +/** + * Initializes segment manager based on options provided. + * Returns error if svm segment allocation fails. + */ +int +segment_manager_init (segment_manager_t * sm, + segment_manager_properties_t * properties, + u32 first_seg_size) +{ + int rv; + + /* app allocates these */ + sm->properties = properties; + + if (first_seg_size > 0) + { + rv = session_manager_add_first_segment (sm, first_seg_size); + if (rv) + { + clib_warning ("Failed to allocate segment"); + return rv; + } + } + + return 0; +} + +/** + * Removes segment manager. + * + * Since the fifos allocated in the segment keep backpointers to the sessions + * prior to removing the segment, we call session disconnect. This + * subsequently propages into transport. + */ +void +segment_manager_del (segment_manager_t * sm) +{ + u32 *deleted_sessions = 0; + u32 *deleted_thread_indices = 0; + int i, j; + + /* Across all fifo segments used by the server */ + for (j = 0; j < vec_len (sm->segment_indices); j++) + { + svm_fifo_segment_private_t *fifo_segment; + svm_fifo_t **fifos; + /* Vector of fifos allocated in the segment */ + fifo_segment = svm_fifo_get_segment (sm->segment_indices[j]); + fifos = svm_fifo_segment_get_fifos (fifo_segment); + + /* + * Remove any residual sessions from the session lookup table + * Don't bother deleting the individual fifos, we're going to + * throw away the fifo segment in a minute. + */ + for (i = 0; i < vec_len (fifos); i++) + { + svm_fifo_t *fifo; + u32 session_index, thread_index; + stream_session_t *session; + + fifo = fifos[i]; + session_index = fifo->server_session_index; + thread_index = fifo->server_thread_index; + + session = stream_session_get (session_index, thread_index); + + /* Add to the deleted_sessions vector (once!) */ + if (!session->is_deleted) + { + session->is_deleted = 1; + vec_add1 (deleted_sessions, session_index); + vec_add1 (deleted_thread_indices, thread_index); + } + } + + for (i = 0; i < vec_len (deleted_sessions); i++) + { + stream_session_t *session; + session = stream_session_get (deleted_sessions[i], + deleted_thread_indices[i]); + + /* Instead of directly removing the session call disconnect */ + stream_session_disconnect (session); + + /* + stream_session_table_del (smm, session); + pool_put(smm->sessions[deleted_thread_indices[i]], session); + */ + } + + vec_reset_length (deleted_sessions); + vec_reset_length (deleted_thread_indices); + + /* Instead of removing the segment, test when removing the session if + * the segment can be removed + */ + /* svm_fifo_segment_delete (fifo_segment); */ + } + + vec_free (deleted_sessions); + vec_free (deleted_thread_indices); + pool_put (segment_managers, sm); +} + +static int +segment_manager_notify_app_seg_add (segment_manager_t * sm, + u32 fifo_segment_index) +{ + application_t *app = application_get (sm->app_index); + u32 seg_size = 0; + u8 *seg_name; + + /* Send an API message to the external app, to map new segment */ + ASSERT (app->cb_fns.add_segment_callback); + + segment_manager_get_segment_info (fifo_segment_index, &seg_name, &seg_size); + return app->cb_fns.add_segment_callback (app->api_client_index, seg_name, + seg_size); +} + +int +segment_manager_alloc_session_fifos (segment_manager_t * sm, + svm_fifo_t ** server_rx_fifo, + svm_fifo_t ** server_tx_fifo, + u32 * fifo_segment_index) +{ + svm_fifo_segment_private_t *fifo_segment; + u32 fifo_size, sm_index; + u8 added_a_segment = 0; + int i; + + /* Allocate svm fifos */ + ASSERT (vec_len (sm->segment_indices)); + +again: + for (i = 0; i < vec_len (sm->segment_indices); i++) + { + *fifo_segment_index = sm->segment_indices[i]; + fifo_segment = svm_fifo_get_segment (*fifo_segment_index); + + fifo_size = sm->properties->rx_fifo_size; + fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; + *server_rx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); + + fifo_size = sm->properties->tx_fifo_size; + fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; + *server_tx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); + + if (*server_rx_fifo == 0) + { + /* This would be very odd, but handle it... */ + if (*server_tx_fifo != 0) + { + svm_fifo_segment_free_fifo (fifo_segment, *server_tx_fifo); + *server_tx_fifo = 0; + } + continue; + } + if (*server_tx_fifo == 0) + { + if (*server_rx_fifo != 0) + { + svm_fifo_segment_free_fifo (fifo_segment, *server_rx_fifo); + *server_rx_fifo = 0; + } + continue; + } + break; + } + + /* See if we're supposed to create another segment */ + if (*server_rx_fifo == 0) + { + if (sm->properties->add_segment) + { + if (added_a_segment) + { + clib_warning ("added a segment, still cant allocate a fifo"); + return SESSION_ERROR_NEW_SEG_NO_SPACE; + } + + if (session_manager_add_segment (sm)) + return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; + + added_a_segment = 1; + goto again; + } + else + { + clib_warning ("No space to allocate fifos!"); + return SESSION_ERROR_NO_SPACE; + } + } + + if (added_a_segment) + return segment_manager_notify_app_seg_add (sm, *fifo_segment_index); + + /* Backpointers to segment manager */ + sm_index = segment_manager_index (sm); + (*server_tx_fifo)->segment_manager = sm_index; + (*server_rx_fifo)->segment_manager = sm_index; + + return 0; +} + +void +segment_manager_dealloc_fifos (u32 svm_segment_index, svm_fifo_t * rx_fifo, + svm_fifo_t * tx_fifo) +{ + segment_manager_t *sm; + svm_fifo_segment_private_t *fifo_segment; + + fifo_segment = svm_fifo_get_segment (svm_segment_index); + svm_fifo_segment_free_fifo (fifo_segment, rx_fifo); + svm_fifo_segment_free_fifo (fifo_segment, tx_fifo); + + /* If we have segment manager, try doing some cleanup. + * It's possible to have no segment manager if the session was removed + * as result of a detach */ + sm = segment_manager_get_if_valid (rx_fifo->segment_manager); + if (sm) + { + /* Remove segment only if it holds no fifos and not the first */ + if (sm->segment_indices[0] != svm_segment_index + && !svm_fifo_segment_has_fifos (fifo_segment)) + { + svm_fifo_segment_delete (fifo_segment); + vec_del1 (sm->segment_indices, svm_segment_index); + } + } +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/session/segment_manager.h b/src/vnet/session/segment_manager.h new file mode 100644 index 00000000..778d6040 --- /dev/null +++ b/src/vnet/session/segment_manager.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef SRC_VNET_SESSION_SEGMENT_MANAGER_H_ +#define SRC_VNET_SESSION_SEGMENT_MANAGER_H_ + +#include +#include + +typedef struct _segment_manager_properties +{ + /** Session fifo sizes. */ + u32 rx_fifo_size; + u32 tx_fifo_size; + + /** Configured additional segment size */ + u32 add_segment_size; + + /** Flag that indicates if additional segments should be created */ + u8 add_segment; + +} segment_manager_properties_t; + +typedef struct _segment_manager +{ + /** segments mapped by this manager */ + u32 *segment_indices; + + /** Owner app index */ + u32 app_index; + + /** Pointer to manager properties. Could be shared among all of + * an app's segment managers s*/ + segment_manager_properties_t *properties; +} segment_manager_t; + +/** Pool of segment managers */ +extern segment_manager_t *segment_managers; + +always_inline segment_manager_t * +segment_manager_new () +{ + segment_manager_t *sm; + pool_get (segment_managers, sm); + memset (sm, 0, sizeof (*sm)); + return sm; +} + +always_inline segment_manager_t * +segment_manager_get (u32 index) +{ + return pool_elt_at_index (segment_managers, index); +} + +always_inline segment_manager_t * +segment_manager_get_if_valid (u32 index) +{ + if (pool_is_free_index (segment_managers, index)) + return 0; + return pool_elt_at_index (segment_managers, index); +} + +always_inline u32 +segment_manager_index (segment_manager_t * sm) +{ + return sm - segment_managers; +} + +int +segment_manager_init (segment_manager_t * sm, + segment_manager_properties_t * properties, + u32 seg_size); + +void segment_manager_get_segment_info (u32 index, u8 ** name, u32 * size); +int +session_manager_add_first_segment (segment_manager_t * sm, u32 segment_size); +int session_manager_add_segment (segment_manager_t * sm); +void segment_manager_del (segment_manager_t * sm); +int +segment_manager_alloc_session_fifos (segment_manager_t * sm, + svm_fifo_t ** server_rx_fifo, + svm_fifo_t ** server_tx_fifo, + u32 * fifo_segment_index); +void +segment_manager_dealloc_fifos (u32 svm_segment_index, svm_fifo_t * rx_fifo, + svm_fifo_t * tx_fifo); + +#endif /* SRC_VNET_SESSION_SEGMENT_MANAGER_H_ */ +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/session/session.api b/src/vnet/session/session.api index 582765b5..e207e46f 100644 --- a/src/vnet/session/session.api +++ b/src/vnet/session/session.api @@ -13,6 +13,68 @@ * limitations under the License. */ +/** \brief client->vpp, attach application to session layer + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param initial_segment_size - size of the initial shm segment to be + allocated + @param options - segment size, fifo sizes, etc. +*/ + define application_attach { + u32 client_index; + u32 context; + u32 initial_segment_size; + u64 options[16]; + }; + + /** \brief Application attach reply + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param app_event_queue_address - vpp event queue address or 0 if this + connection shouldn't send events + @param segment_size - size of first shm segment + @param segment_name_length - length of segment name + @param segment_name - name of segment client needs to attach to +*/ +define application_attach_reply { + u32 context; + i32 retval; + u64 app_event_queue_address; + u32 segment_size; + u8 segment_name_length; + u8 segment_name[128]; +}; + + /** \brief client->vpp, attach application to session layer + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ + define application_detach { + u32 client_index; + u32 context; + }; + + /** \brief detach reply + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define application_detach_reply { + u32 context; + i32 retval; +}; + +/** \brief vpp->client, please map an additional shared memory segment + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param segment_name - +*/ +define map_another_segment { + u32 client_index; + u32 context; + u32 segment_size; + u8 segment_name[128]; +}; + /** \brief Bind to a given URI @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -25,9 +87,7 @@ define bind_uri { u32 client_index; u32 context; u32 accept_cookie; - u32 initial_segment_size; u8 uri[128]; - u64 options[16]; }; /** \brief Unbind a given URI @@ -49,7 +109,10 @@ define unbind_uri { @param accept_cookie - sender accept cookie, to identify this bind flavor @param uri - a URI, e.g. "tcp4://0.0.0.0/0/80" "tcp6://::/0/80" [ipv6], etc. - @param options - socket options, fifo sizes, etc. + @param options - socket options, fifo sizes, etc. passed by vpp to the + server when redirecting connects + @param client_queue_address - binary API client queue address. Used by + local server when connect was redirected. */ define connect_uri { u32 client_index; @@ -62,18 +125,10 @@ define connect_uri { /** \brief Bind reply @param context - sender context, to match reply w/ request @param retval - return code for the request - @param event_queue_address - vpp event queue address or 0 if this - connection shouldn't send events - @param segment_name_length - length of segment name - @param segment_name - name of segment client needs to attach to */ define bind_uri_reply { u32 context; i32 retval; - u64 server_event_queue_address; - u8 segment_name_length; - u32 segment_size; - u8 segment_name[128]; }; /** \brief unbind reply @@ -88,43 +143,28 @@ define unbind_uri_reply { /** \brief vpp->client, connect reply @param context - sender context, to match reply w/ request @param retval - return code for the request + @param handle - session handle @param server_rx_fifo - rx (vpp -> vpp-client) fifo address @param server_tx_fifo - tx (vpp-client -> vpp) fifo address - @param session_index - session index; - @param session_thread_index - session thread index - @param session_type - session thread type @param vpp_event_queue_address - vpp's event queue address - @param client_event_queue_address - client's event queue address + @param segment_size - size of segment to be attached. Only for redirects. @param segment_name_length - non-zero if the client needs to attach to - the fifo segment + the fifo segment. This should only happen + if session was redirected. @param segment_name - set if the client needs to attach to the segment */ define connect_uri_reply { u32 context; i32 retval; + u64 handle; u64 server_rx_fifo; u64 server_tx_fifo; - u32 session_index; - u32 session_thread_index; - u8 session_type; - u64 client_event_queue_address; u64 vpp_event_queue_address; u32 segment_size; u8 segment_name_length; u8 segment_name[128]; }; -/** \brief vpp->client, please map an additional shared memory segment - @param context - sender context, to match reply w/ request - @param segment_name - -*/ -define map_another_segment { - u32 client_index; - u32 context; - u32 segment_size; - u8 segment_name[128]; -}; - /** \brief client->vpp @param context - sender context, to match reply w/ request @param retval - return code for the request @@ -136,25 +176,27 @@ define map_another_segment_reply { /** \brief vpp->client, accept this session @param context - sender context, to match reply w/ request - @param accept_cookie - tells client which bind flavor just occurred + @param listener_handle - tells client which listener this pertains to + @param handle - unique session identifier + @param session_thread_index - thread index of new session @param rx_fifo_address - rx (vpp -> vpp-client) fifo address @param tx_fifo_address - tx (vpp-client -> vpp) fifo address - @param session_index - index of new session - @param session_thread_index - thread index of new session @param vpp_event_queue_address - vpp's event queue address - @param session_type - type of session - + @param port - remote port + @param is_ip4 - 1 if the ip is ip4 + @param ip - remote ip */ define accept_session { u32 client_index; u32 context; - u32 accept_cookie; + u64 listener_handle; + u64 handle; u64 server_rx_fifo; u64 server_tx_fifo; - u32 session_index; - u32 session_thread_index; u64 vpp_event_queue_address; - u8 session_type; + u16 port; + u8 is_ip4; + u8 ip[16]; }; /** \brief client->vpp, reply to an accept message @@ -167,23 +209,19 @@ define accept_session { define accept_session_reply { u32 context; i32 retval; - u8 session_type; - u8 session_thread_index; - u32 session_index; + u64 handle; }; /** \brief bidirectional disconnect API @param client_index - opaque cookie to identify the sender client to vpp direction only @param context - sender context, to match reply w/ request - @param session_index - cookie #1 from accept_session / connect_reply - @param session_thread_index - cookie #2 + @param handle - session handle obtained from accept/connect */ define disconnect_session { u32 client_index; u32 context; - u32 session_index; - u32 session_thread_index; + u64 handle; }; /** \brief bidirectional disconnect reply API @@ -191,31 +229,25 @@ define disconnect_session { client to vpp direction only @param context - sender context, to match reply w/ request @param retval - return code for the request - @param session_index - session index from accept_session / connect_reply - @param session_thread_index - thread index from accept_session / - connect_reply + @param handle - session handle */ define disconnect_session_reply { u32 client_index; u32 context; i32 retval; - u32 session_index; - u32 session_thread_index; + u64 handle; }; /** \brief vpp->client reset session API @param client_index - opaque cookie to identify the sender client to vpp direction only @param context - sender context, to match reply w/ request - @param session_index - session index from accept_session / connect_reply - @param session_thread_index - thread index from accept_session / - connect_reply + @param handle - session handle obtained via accept/connects */ define reset_session { u32 client_index; u32 context; - u32 session_index; - u32 session_thread_index; + u64 handle; }; /** \brief client->vpp reset session reply @@ -223,16 +255,13 @@ define reset_session { client to vpp direction only @param context - sender context, to match reply w/ request @param retval - return code for the request - @param session_index - session index from accept_session / connect_reply - @param session_thread_index - thread index from accept_session / - connect_reply + @param handle - session handle obtained via accept/connect */ define reset_session_reply { u32 client_index; u32 context; i32 retval; - u32 session_index; - u32 session_thread_index; + u64 handle; }; /** \brief Bind to an ip:port pair for a given transport protocol @@ -277,7 +306,7 @@ define unbind_sock { @param proto - protocol 0 - TCP 1 - UDP @param client_queue_address - client's API queue address. Non-zero when used to perform redirects - @param options - socket options, fifo sizes, etc. + @param options - socket options, fifo sizes, etc. when doing redirects */ define connect_sock { u32 client_index; @@ -326,7 +355,7 @@ define unbind_sock_reply { @param server_rx_fifo - rx (vpp -> vpp-client) fifo address @param server_tx_fifo - tx (vpp-client -> vpp) fifo address @param vpp_event_queue_address - vpp's event queue address - @param client_event_queue_address - client's event queue address + @param segment_size - size of segment to be attached. Only for redirects. @param segment_name_length - non-zero if the client needs to attach to the fifo segment @param segment_name - set if the client needs to attach to the segment @@ -337,92 +366,12 @@ define connect_sock_reply { u64 handle; u64 server_rx_fifo; u64 server_tx_fifo; - u64 client_event_queue_address; u64 vpp_event_queue_address; u32 segment_size; u8 segment_name_length; u8 segment_name[128]; }; -/** \brief bidirectional disconnect API - @param client_index - opaque cookie to identify the sender - client to vpp direction only - @param context - sender context, to match reply w/ request - @param handle - session handle obtained through accept/connect -*/ -define disconnect_sock { - u32 client_index; - u32 context; - u64 handle; -}; - -/** \brief bidirectional disconnect reply API - @param client_index - opaque cookie to identify the sender - client to vpp direction only - @param client_context - sender context, to match reply w/ request - @param handle - session handle obtained through accept/connect -*/ -define disconnect_sock_reply { - u32 client_index; - u32 context; - i32 retval; - u64 handle; -}; - -/** \brief vpp->client, accept this session - @param context - sender context, to match reply w/ request - @param accept_cookie - tells client which bind flavor just occurred - @param handle - session handle obtained through accept/connect - @param rx_fifo_address - rx (vpp -> vpp-client) fifo address - @param tx_fifo_address - tx (vpp-client -> vpp) fifo address - @param vpp_event_queue_address - vpp's event queue address -*/ -define accept_sock { - u32 client_index; - u32 context; - u32 accept_cookie; - u64 handle; - u64 server_rx_fifo; - u64 server_tx_fifo; - u64 vpp_event_queue_address; -}; - -/** \brief client->vpp, reply to an accept message - @param context - sender context, to match reply w/ request - @param retval - return code for the request - @param handle - session handle obtained through accept/connect -*/ -define accept_sock_reply { - u32 context; - i32 retval; - u64 handle; -}; - -/** \brief vpp->client reset session API - @param client_index - opaque cookie to identify the sender - client to vpp direction only - @param context - sender context, to match reply w/ request - @param handle - session handle obtained through accept/connect -*/ -define reset_sock { - u32 client_index; - u32 context; - u64 handle; -}; - -/** \brief client->vpp reset session reply - @param client_index - opaque cookie to identify the sender - client to vpp direction only - @param context - sender context, to match reply w/ request - @param handle - session handle obtained through accept/connect -*/ -define reset_sock_reply { - u32 client_index; - u32 context; - i32 retval; - u64 handle; -}; - /** \brief enable/disable session layer @param client_index - opaque cookie to identify the sender client to vpp direction only diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 8e2b2616..e6cfe7da 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -36,15 +36,14 @@ session_manager_main_t session_manager_main; * Session lookup key; (src-ip, dst-ip, src-port, dst-port, session-type) * Value: (owner thread index << 32 | session_index); */ -static void -stream_session_table_add_for_tc (u8 sst, transport_connection_t * tc, - u64 value) +void +stream_session_table_add_for_tc (transport_connection_t * tc, u64 value) { session_manager_main_t *smm = &session_manager_main; session_kv4_t kv4; session_kv6_t kv6; - switch (sst) + switch (tc->proto) { case SESSION_TYPE_IP4_UDP: case SESSION_TYPE_IP4_TCP: @@ -72,12 +71,12 @@ stream_session_table_add (session_manager_main_t * smm, stream_session_t * s, tc = tp_vfts[s->session_type].get_connection (s->connection_index, s->thread_index); - stream_session_table_add_for_tc (s->session_type, tc, value); + stream_session_table_add_for_tc (tc, value); } static void -stream_session_half_open_table_add (u8 sst, transport_connection_t * tc, - u64 value) +stream_session_half_open_table_add (session_type_t sst, + transport_connection_t * tc, u64 value) { session_manager_main_t *smm = &session_manager_main; session_kv4_t kv4; @@ -105,14 +104,13 @@ stream_session_half_open_table_add (u8 sst, transport_connection_t * tc, } } -static int -stream_session_table_del_for_tc (session_manager_main_t * smm, u8 sst, - transport_connection_t * tc) +int +stream_session_table_del_for_tc (transport_connection_t * tc) { + session_manager_main_t *smm = &session_manager_main; session_kv4_t kv4; session_kv6_t kv6; - - switch (sst) + switch (tc->proto) { case SESSION_TYPE_IP4_UDP: case SESSION_TYPE_IP4_TCP: @@ -141,7 +139,7 @@ stream_session_table_del (session_manager_main_t * smm, stream_session_t * s) ts = tp_vfts[s->session_type].get_connection (s->connection_index, s->thread_index); - return stream_session_table_del_for_tc (smm, s->session_type, ts); + return stream_session_table_del_for_tc (ts); } static void @@ -383,7 +381,7 @@ stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt, * Allocate vpp event queue (once) per worker thread */ void -vpp_session_event_queue_allocate (session_manager_main_t * smm, +session_vpp_event_queue_allocate (session_manager_main_t * smm, u32 thread_index) { api_main_t *am = &api_main; @@ -406,266 +404,24 @@ vpp_session_event_queue_allocate (session_manager_main_t * smm, } } -void -session_manager_get_segment_info (u32 index, u8 ** name, u32 * size) -{ - svm_fifo_segment_private_t *s; - s = svm_fifo_get_segment (index); - *name = s->h->segment_name; - *size = s->ssvm.ssvm_size; -} - -always_inline int -session_manager_add_segment_i (session_manager_main_t * smm, - session_manager_t * sm, - u32 segment_size, u8 * segment_name) -{ - svm_fifo_segment_create_args_t _ca, *ca = &_ca; - int rv; - - memset (ca, 0, sizeof (*ca)); - - ca->segment_name = (char *) segment_name; - ca->segment_size = segment_size; - - rv = svm_fifo_segment_create (ca); - if (rv) - { - clib_warning ("svm_fifo_segment_create ('%s', %d) failed", - ca->segment_name, ca->segment_size); - vec_free (segment_name); - return -1; - } - - vec_add1 (sm->segment_indices, ca->new_segment_index); - - return 0; -} - -static int -session_manager_add_segment (session_manager_main_t * smm, - session_manager_t * sm) -{ - u8 *segment_name; - svm_fifo_segment_create_args_t _ca, *ca = &_ca; - u32 add_segment_size; - u32 default_segment_size = 128 << 10; - - memset (ca, 0, sizeof (*ca)); - segment_name = format (0, "%d-%d%c", getpid (), - smm->unique_segment_name_counter++, 0); - add_segment_size = - sm->add_segment_size ? sm->add_segment_size : default_segment_size; - - return session_manager_add_segment_i (smm, sm, add_segment_size, - segment_name); -} - -int -session_manager_add_first_segment (session_manager_main_t * smm, - session_manager_t * sm, u32 segment_size, - u8 ** segment_name) -{ - svm_fifo_segment_create_args_t _ca, *ca = &_ca; - memset (ca, 0, sizeof (*ca)); - *segment_name = format (0, "%d-%d%c", getpid (), - smm->unique_segment_name_counter++, 0); - return session_manager_add_segment_i (smm, sm, segment_size, *segment_name); -} - -void -session_manager_del (session_manager_main_t * smm, session_manager_t * sm) -{ - u32 *deleted_sessions = 0; - u32 *deleted_thread_indices = 0; - int i, j; - - /* Across all fifo segments used by the server */ - for (j = 0; j < vec_len (sm->segment_indices); j++) - { - svm_fifo_segment_private_t *fifo_segment; - svm_fifo_t **fifos; - /* Vector of fifos allocated in the segment */ - fifo_segment = svm_fifo_get_segment (sm->segment_indices[j]); - fifos = (svm_fifo_t **) fifo_segment->h->fifos; - - /* - * Remove any residual sessions from the session lookup table - * Don't bother deleting the individual fifos, we're going to - * throw away the fifo segment in a minute. - */ - for (i = 0; i < vec_len (fifos); i++) - { - svm_fifo_t *fifo; - u32 session_index, thread_index; - stream_session_t *session; - - fifo = fifos[i]; - session_index = fifo->server_session_index; - thread_index = fifo->server_thread_index; - - session = pool_elt_at_index (smm->sessions[thread_index], - session_index); - - /* Add to the deleted_sessions vector (once!) */ - if (!session->is_deleted) - { - session->is_deleted = 1; - vec_add1 (deleted_sessions, - session - smm->sessions[thread_index]); - vec_add1 (deleted_thread_indices, thread_index); - } - } - - for (i = 0; i < vec_len (deleted_sessions); i++) - { - stream_session_t *session; - - session = - pool_elt_at_index (smm->sessions[deleted_thread_indices[i]], - deleted_sessions[i]); - - /* Instead of directly removing the session call disconnect */ - stream_session_disconnect (session); - - /* - stream_session_table_del (smm, session); - pool_put(smm->sessions[deleted_thread_indices[i]], session); - */ - } - - vec_reset_length (deleted_sessions); - vec_reset_length (deleted_thread_indices); - - /* Instead of removing the segment, test when removing the session if - * the segment can be removed - */ - /* svm_fifo_segment_delete (fifo_segment); */ - } - - vec_free (deleted_sessions); - vec_free (deleted_thread_indices); -} - -int -session_manager_allocate_session_fifos (session_manager_main_t * smm, - session_manager_t * sm, - svm_fifo_t ** server_rx_fifo, - svm_fifo_t ** server_tx_fifo, - u32 * fifo_segment_index, - u8 * added_a_segment) -{ - svm_fifo_segment_private_t *fifo_segment; - u32 fifo_size, default_fifo_size = 1 << 16; /* TODO config */ - int i; - - *added_a_segment = 0; - - /* Allocate svm fifos */ - ASSERT (vec_len (sm->segment_indices)); - -again: - for (i = 0; i < vec_len (sm->segment_indices); i++) - { - *fifo_segment_index = sm->segment_indices[i]; - fifo_segment = svm_fifo_get_segment (*fifo_segment_index); - - fifo_size = sm->rx_fifo_size; - fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; - *server_rx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); - - fifo_size = sm->tx_fifo_size; - fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; - *server_tx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); - - if (*server_rx_fifo == 0) - { - /* This would be very odd, but handle it... */ - if (*server_tx_fifo != 0) - { - svm_fifo_segment_free_fifo (fifo_segment, *server_tx_fifo); - *server_tx_fifo = 0; - } - continue; - } - if (*server_tx_fifo == 0) - { - if (*server_rx_fifo != 0) - { - svm_fifo_segment_free_fifo (fifo_segment, *server_rx_fifo); - *server_rx_fifo = 0; - } - continue; - } - break; - } - - /* See if we're supposed to create another segment */ - if (*server_rx_fifo == 0) - { - if (sm->add_segment) - { - if (*added_a_segment) - { - clib_warning ("added a segment, still cant allocate a fifo"); - return SESSION_ERROR_NEW_SEG_NO_SPACE; - } - - if (session_manager_add_segment (smm, sm)) - return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; - - *added_a_segment = 1; - goto again; - } - else - { - clib_warning ("No space to allocate fifos!"); - return SESSION_ERROR_NO_SPACE; - } - } - return 0; -} - int -stream_session_create_i (session_manager_main_t * smm, application_t * app, - transport_connection_t * tc, +stream_session_create_i (segment_manager_t * sm, transport_connection_t * tc, stream_session_t ** ret_s) { - int rv; + session_manager_main_t *smm = &session_manager_main; svm_fifo_t *server_rx_fifo = 0, *server_tx_fifo = 0; u32 fifo_segment_index; - u32 pool_index, seg_size; + u32 pool_index; stream_session_t *s; u64 value; u32 thread_index = tc->thread_index; - session_manager_t *sm; - u8 segment_added; - u8 *seg_name; - - sm = session_manager_get (app->session_manager_index); - - /* Check the API queue */ - if (app->mode == APP_SERVER && application_api_queue_is_full (app)) - return SESSION_ERROR_API_QUEUE_FULL; + int rv; - if ((rv = session_manager_allocate_session_fifos (smm, sm, &server_rx_fifo, - &server_tx_fifo, - &fifo_segment_index, - &segment_added))) + if ((rv = segment_manager_alloc_session_fifos (sm, &server_rx_fifo, + &server_tx_fifo, + &fifo_segment_index))) return rv; - if (segment_added && app->mode == APP_SERVER) - { - /* Send an API message to the external server, to map new segment */ - ASSERT (app->cb_fns.add_segment_callback); - - session_manager_get_segment_info (fifo_segment_index, &seg_name, - &seg_size); - if (app->cb_fns.add_segment_callback (app->api_client_index, seg_name, - seg_size)) - return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; - } - /* Create the session */ pool_get (smm->sessions[thread_index], s); memset (s, 0, sizeof (*s)); @@ -682,10 +438,9 @@ stream_session_create_i (session_manager_main_t * smm, application_t * app, s->server_tx_fifo = server_tx_fifo; /* Initialize state machine, such as it is... */ - s->session_type = app->session_type; + s->session_type = tc->proto; s->session_state = SESSION_STATE_CONNECTING; - s->app_index = application_get_index (app); - s->server_segment_index = fifo_segment_index; + s->svm_segment_index = fifo_segment_index; s->thread_index = thread_index; s->session_index = pool_index; @@ -697,7 +452,7 @@ stream_session_create_i (session_manager_main_t * smm, application_t * app, /* Add to the main lookup table */ value = (((u64) thread_index) << 32) | (u64) s->session_index; - stream_session_table_add_for_tc (app->session_type, tc, value); + stream_session_table_add_for_tc (tc, value); *ret_s = s; @@ -881,94 +636,6 @@ session_manager_flush_enqueue_events (u32 thread_index) return errors; } -/* - * Start listening on server's ip/port pair for requested transport. - * - * Creates a 'dummy' stream session with state LISTENING to be used in session - * lookups, prior to establishing connection. Requests transport to build - * it's own specific listening connection. - */ -int -stream_session_start_listen (u32 server_index, ip46_address_t * ip, u16 port) -{ - session_manager_main_t *smm = &session_manager_main; - stream_session_t *s; - transport_connection_t *tc; - application_t *srv; - u32 tci; - - srv = application_get (server_index); - - pool_get (smm->listen_sessions[srv->session_type], s); - memset (s, 0, sizeof (*s)); - - s->session_type = srv->session_type; - s->session_state = SESSION_STATE_LISTENING; - s->session_index = s - smm->listen_sessions[srv->session_type]; - s->app_index = srv->index; - - /* Transport bind/listen */ - tci = tp_vfts[srv->session_type].bind (s->session_index, ip, port); - - /* Attach transport to session */ - s->connection_index = tci; - tc = tp_vfts[srv->session_type].get_listener (tci); - - srv->session_index = s->session_index; - - /* Add to the main lookup table */ - stream_session_table_add_for_tc (s->session_type, tc, s->session_index); - - return 0; -} - -void -stream_session_stop_listen (u32 server_index) -{ - session_manager_main_t *smm = &session_manager_main; - stream_session_t *listener; - transport_connection_t *tc; - application_t *srv; - - srv = application_get (server_index); - listener = pool_elt_at_index (smm->listen_sessions[srv->session_type], - srv->session_index); - - tc = tp_vfts[srv->session_type].get_listener (listener->connection_index); - stream_session_table_del_for_tc (smm, listener->session_type, tc); - - tp_vfts[srv->session_type].unbind (listener->connection_index); - pool_put (smm->listen_sessions[srv->session_type], listener); -} - -int -connect_server_add_segment_cb (application_t * ss, char *segment_name, - u32 segment_size) -{ - /* Does exactly nothing, but die */ - ASSERT (0); - return 0; -} - -void -connects_session_manager_init (session_manager_main_t * smm, u8 session_type) -{ - session_manager_t *sm; - u32 connect_fifo_size = 256 << 10; /* Config? */ - u32 default_segment_size = 1 << 20; - - pool_get (smm->session_managers, sm); - memset (sm, 0, sizeof (*sm)); - - sm->add_segment_size = default_segment_size; - sm->rx_fifo_size = connect_fifo_size; - sm->tx_fifo_size = connect_fifo_size; - sm->add_segment = 1; - - session_manager_add_segment (smm, sm); - smm->connect_manager_index[session_type] = sm - smm->session_managers; -} - void stream_session_connect_notify (transport_connection_t * tc, u8 sst, u8 is_fail) @@ -976,34 +643,36 @@ stream_session_connect_notify (transport_connection_t * tc, u8 sst, session_manager_main_t *smm = &session_manager_main; application_t *app; stream_session_t *new_s = 0; - u64 value; + u64 handle; + u32 api_context = 0; - value = stream_session_half_open_lookup (smm, &tc->lcl_ip, &tc->rmt_ip, - tc->lcl_port, tc->rmt_port, - tc->proto); - if (value == HALF_OPEN_LOOKUP_INVALID_VALUE) + handle = stream_session_half_open_lookup (smm, &tc->lcl_ip, &tc->rmt_ip, + tc->lcl_port, tc->rmt_port, + tc->proto); + if (handle == HALF_OPEN_LOOKUP_INVALID_VALUE) { clib_warning ("This can't be good!"); return; } - app = application_get (value >> 32); + /* Get the app's index from the handle we stored when opening connection */ + app = application_get (handle >> 32); + api_context = tc->s_index; if (!is_fail) { - /* Create new session (server segments are allocated if needed) */ - if (stream_session_create_i (smm, app, tc, &new_s)) - return; + segment_manager_t *sm; + sm = application_get_connect_segment_manager (app); - app->session_index = stream_session_get_index (new_s); - app->thread_index = new_s->thread_index; + /* Create new session (svm segments are allocated if needed) */ + if (stream_session_create_i (sm, tc, &new_s)) + return; - /* Allocate vpp event queue for this thread if needed */ - vpp_session_event_queue_allocate (smm, tc->thread_index); + new_s->app_index = app->index; } /* Notify client */ - app->cb_fns.session_connected_callback (app->api_client_index, new_s, + app->cb_fns.session_connected_callback (app->index, api_context, new_s, is_fail); /* Cleanup session lookup */ @@ -1046,48 +715,13 @@ void stream_session_delete (stream_session_t * s) { session_manager_main_t *smm = vnet_get_session_manager_main (); - svm_fifo_segment_private_t *fifo_segment; - application_t *app; /* Delete from the main lookup table. */ stream_session_table_del (smm, s); /* Cleanup fifo segments */ - fifo_segment = svm_fifo_get_segment (s->server_segment_index); - svm_fifo_segment_free_fifo (fifo_segment, s->server_rx_fifo); - svm_fifo_segment_free_fifo (fifo_segment, s->server_tx_fifo); - - app = application_get_if_valid (s->app_index); - - /* No app. A possibility: after disconnect application called unbind */ - if (!app) - return; - - if (app->mode == APP_CLIENT) - { - /* Cleanup app if client */ - application_del (app); - } - else if (app->mode == APP_SERVER) - { - session_manager_t *sm; - svm_fifo_segment_private_t *fifo_segment; - svm_fifo_t **fifos; - u32 fifo_index; - - /* For server, see if any segments can be removed */ - sm = session_manager_get (app->session_manager_index); - - /* Delete fifo */ - fifo_segment = svm_fifo_get_segment (s->server_segment_index); - fifos = (svm_fifo_t **) fifo_segment->h->fifos; - - fifo_index = svm_fifo_segment_index (fifo_segment); - - /* Remove segment only if it holds no fifos and not the first */ - if (sm->segment_indices[0] != fifo_index && vec_len (fifos) == 0) - svm_fifo_segment_delete (fifo_segment); - } + segment_manager_dealloc_fifos (s->svm_segment_index, s->server_rx_fifo, + s->server_tx_fifo); pool_put (smm->sessions[s->thread_index], s); } @@ -1134,21 +768,22 @@ int stream_session_accept (transport_connection_t * tc, u32 listener_index, u8 sst, u8 notify) { - session_manager_main_t *smm = &session_manager_main; application_t *server; stream_session_t *s, *listener; + segment_manager_t *sm; int rv; /* Find the server */ - listener = pool_elt_at_index (smm->listen_sessions[sst], listener_index); + listener = listen_session_get (sst, listener_index); server = application_get (listener->app_index); - if ((rv = stream_session_create_i (smm, server, tc, &s))) + sm = application_get_listen_segment_manager (server, listener); + if ((rv = stream_session_create_i (sm, tc, &s))) return rv; - /* Allocate vpp event queue for this thread if needed */ - vpp_session_event_queue_allocate (smm, tc->thread_index); + s->app_index = server->index; + s->listener_index = listener_index; /* Shoulder-tap the server */ if (notify) @@ -1159,37 +794,111 @@ stream_session_accept (transport_connection_t * tc, u32 listener_index, return 0; } +/** + * Ask transport to open connection to remote transport endpoint. + * + * Stores handle for matching request with reply since the call can be + * asynchronous. For instance, for TCP the 3-way handshake must complete + * before reply comes. Session is only created once connection is established. + * + * @param app_index Index of the application requesting the connect + * @param st Session type requested. + * @param tep Remote transport endpoint + * @param res Resulting transport connection . + */ int -stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order, - u32 app_index) +stream_session_open (u32 app_index, session_type_t st, + transport_endpoint_t * tep, + transport_connection_t ** res) { transport_connection_t *tc; - u32 tci; - u64 value; int rv; + u64 handle; - /* Ask transport to open connection */ - rv = tp_vfts[sst].open (addr, port_host_byte_order); + rv = tp_vfts[st].open (&tep->ip, tep->port); if (rv < 0) { clib_warning ("Transport failed to open connection."); return VNET_API_ERROR_SESSION_CONNECT_FAIL; } - tci = rv; + tc = tp_vfts[st].get_half_open ((u32) rv); - /* Get transport connection */ - tc = tp_vfts[sst].get_half_open (tci); - - /* Store api_client_index and transport connection index */ - value = (((u64) app_index) << 32) | (u64) tc->c_index; + /* Save app and tc index. The latter is needed to help establish the + * connection while the former is needed when the connect notify comes + * and we have to notify the external app */ + handle = (((u64) app_index) << 32) | (u64) tc->c_index; /* Add to the half-open lookup table */ - stream_session_half_open_table_add (sst, tc, value); + stream_session_half_open_table_add (st, tc, handle); + + *res = tc; + + return 0; +} + +/** + * Ask transport to listen on local transport endpoint. + * + * @param s Session for which listen will be called. Note that unlike + * established sessions, listen sessions are not associated to a + * thread. + * @param tep Local endpoint to be listened on. + */ +int +stream_session_listen (stream_session_t * s, transport_endpoint_t * tep) +{ + transport_connection_t *tc; + u32 tci; + + /* Transport bind/listen */ + tci = tp_vfts[s->session_type].bind (s->session_index, &tep->ip, tep->port); + + if (tci == (u32) ~ 0) + return -1; + + /* Attach transport to session */ + s->connection_index = tci; + tc = tp_vfts[s->session_type].get_listener (tci); + + /* Weird but handle it ... */ + if (tc == 0) + return -1; + + /* Add to the main lookup table */ + stream_session_table_add_for_tc (tc, s->session_index); return 0; } +/** + * Ask transport to stop listening on local transport endpoint. + * + * @param s Session to stop listening on. It must be in state LISTENING. + */ +int +stream_session_stop_listen (stream_session_t * s) +{ + transport_connection_t *tc; + + if (s->session_state != SESSION_STATE_LISTENING) + { + clib_warning ("not a listening session"); + return -1; + } + + tc = tp_vfts[s->session_type].get_listener (s->connection_index); + if (!tc) + { + clib_warning ("no transport"); + return VNET_API_ERROR_ADDRESS_NOT_IN_USE; + } + + stream_session_table_del_for_tc (tc); + tp_vfts[s->session_type].unbind (s->connection_index); + return 0; +} + /** * Disconnect session and propagate to transport. This should eventually * result in a delete notification that allows us to cleanup session state. @@ -1297,6 +1006,10 @@ session_manager_main_enable (vlib_main_t * vm) vec_validate (smm->last_event_poll_by_thread, num_threads - 1); #endif + /* Allocate vpp event queues */ + for (i = 0; i < vec_len (smm->vpp_event_queues); i++) + session_vpp_event_queue_allocate (smm, i); + /* $$$$ preallocate hack config parameter */ for (i = 0; i < 200000; i++) { @@ -1322,9 +1035,6 @@ session_manager_main_enable (vlib_main_t * vm) 200000 /* $$$$ config parameter nbuckets */ , (64 << 20) /*$$$ config parameter table size */ ); - for (i = 0; i < SESSION_N_TYPES; i++) - smm->connect_manager_index[i] = INVALID_INDEX; - smm->is_enabled = 1; /* Enable TCP transport */ diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index 6878b4d2..6e4ea96d 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -21,6 +21,7 @@ #include #include #include +#include #define HALF_OPEN_LOOKUP_INVALID_VALUE ((u64)~0) #define INVALID_INDEX ((u32)~0) @@ -107,6 +108,9 @@ typedef struct _stream_session_t svm_fifo_t *server_rx_fifo; svm_fifo_t *server_tx_fifo; + /** svm segment index where fifos were allocated */ + u32 svm_segment_index; + /** Type */ u8 session_type; @@ -133,27 +137,10 @@ typedef struct _stream_session_t /** stream server pool index */ u32 app_index; - /** svm segment index */ - u32 server_segment_index; + /** Parent listener session if the result of an accept */ + u32 listener_index; } stream_session_t; -typedef struct _session_manager -{ - /** segments mapped by this server */ - u32 *segment_indices; - - /** Session fifo sizes. They are provided for binds and take default - * values for connects */ - u32 rx_fifo_size; - u32 tx_fifo_size; - - /** Configured additional segment size */ - u32 add_segment_size; - - /** Flag that indicates if additional segments should be created */ - u8 add_segment; -} session_manager_t; - /* Forward definition */ typedef struct _session_manager_main session_manager_main_t; @@ -206,11 +193,6 @@ struct _session_manager_main /** Unique segment name counter */ u32 unique_segment_name_counter; - /* Connection manager used by incoming connects */ - u32 connect_manager_index[SESSION_N_TYPES]; - - session_manager_t *session_managers; - /** Per transport rx function that can either dequeue or peek */ session_fifo_rx_fn *session_tx_fns[SESSION_N_TYPES]; @@ -242,37 +224,6 @@ vnet_get_session_manager_main () return &session_manager_main; } -always_inline session_manager_t * -session_manager_get (u32 index) -{ - return pool_elt_at_index (session_manager_main.session_managers, index); -} - -always_inline unix_shared_memory_queue_t * -session_manager_get_vpp_event_queue (u32 thread_index) -{ - return session_manager_main.vpp_event_queues[thread_index]; -} - -always_inline session_manager_t * -connects_session_manager_get (session_manager_main_t * smm, - session_type_t session_type) -{ - return pool_elt_at_index (smm->session_managers, - smm->connect_manager_index[session_type]); -} - -void session_manager_get_segment_info (u32 index, u8 ** name, u32 * size); -int session_manager_flush_enqueue_events (u32 thread_index); -int -session_manager_add_first_segment (session_manager_main_t * smm, - session_manager_t * sm, u32 segment_size, - u8 ** segment_name); -void -session_manager_del (session_manager_main_t * smm, session_manager_t * sm); -void -connects_session_manager_init (session_manager_main_t * smm, u8 session_type); - /* * Stream session functions */ @@ -300,6 +251,8 @@ transport_connection_t u32 thread_index); stream_session_t *stream_session_lookup_listener (ip46_address_t * lcl, u16 lcl_port, u8 proto); +void stream_session_table_add_for_tc (transport_connection_t * tc, u64 value); +int stream_session_table_del_for_tc (transport_connection_t * tc); always_inline stream_session_t * stream_session_get_tsi (u64 ti_and_si, u32 thread_index) @@ -310,7 +263,7 @@ stream_session_get_tsi (u64 ti_and_si, u32 thread_index) } always_inline stream_session_t * -stream_session_get (u64 si, u32 thread_index) +stream_session_get (u32 si, u32 thread_index) { return pool_elt_at_index (session_manager_main.sessions[thread_index], si); } @@ -327,6 +280,40 @@ stream_session_get_if_valid (u64 si, u32 thread_index) return pool_elt_at_index (session_manager_main.sessions[thread_index], si); } +always_inline u64 +stream_session_handle (stream_session_t * s) +{ + return ((u64) s->thread_index << 32) | (u64) s->session_index; +} + +always_inline u32 +stream_session_index_from_handle (u64 handle) +{ + return handle & 0xFFFFFFFF; +} + +always_inline u32 +stream_session_thread_from_handle (u64 handle) +{ + return handle >> 32; +} + +always_inline void +stream_session_parse_handle (u64 handle, u32 * index, u32 * thread_index) +{ + *index = stream_session_index_from_handle (handle); + *thread_index = stream_session_thread_from_handle (handle); +} + +always_inline stream_session_t * +stream_session_get_from_handle (u64 handle) +{ + session_manager_main_t *smm = &session_manager_main; + return pool_elt_at_index (smm->sessions[stream_session_thread_from_handle + (handle)], + stream_session_index_from_handle (handle)); +} + always_inline stream_session_t * stream_session_listener_get (u8 sst, u64 si) { @@ -375,13 +362,14 @@ void stream_session_reset_notify (transport_connection_t * tc); int stream_session_accept (transport_connection_t * tc, u32 listener_index, u8 sst, u8 notify); -int stream_session_open (u8 sst, ip46_address_t * addr, - u16 port_host_byte_order, u32 api_client_index); +int +stream_session_open (u32 app_index, session_type_t st, + transport_endpoint_t * tep, + transport_connection_t ** tc); +int stream_session_listen (stream_session_t * s, transport_endpoint_t * tep); +int stream_session_stop_listen (stream_session_t * s); void stream_session_disconnect (stream_session_t * s); void stream_session_cleanup (stream_session_t * s); -int -stream_session_start_listen (u32 server_index, ip46_address_t * ip, u16 port); -void stream_session_stop_listen (u32 server_index); u8 *format_stream_session (u8 * s, va_list * args); @@ -390,6 +378,71 @@ transport_proto_vft_t *session_get_transport_vft (u8 type); clib_error_t *vnet_session_enable_disable (vlib_main_t * vm, u8 is_en); +always_inline unix_shared_memory_queue_t * +session_manager_get_vpp_event_queue (u32 thread_index) +{ + return session_manager_main.vpp_event_queues[thread_index]; +} + +int session_manager_flush_enqueue_events (u32 thread_index); + +always_inline u64 +listen_session_get_handle (stream_session_t * s) +{ + ASSERT (s->session_state == SESSION_STATE_LISTENING); + return ((u64) s->session_type << 32) | s->session_index; +} + +always_inline stream_session_t * +listen_session_get_from_handle (u64 handle) +{ + session_manager_main_t *smm = &session_manager_main; + stream_session_t *s; + u32 type, index; + type = handle >> 32; + index = handle & 0xFFFFFFFF; + + if (pool_is_free_index (smm->listen_sessions[type], index)) + return 0; + + s = pool_elt_at_index (smm->listen_sessions[type], index); + ASSERT (s->session_state == SESSION_STATE_LISTENING); + return s; +} + +always_inline stream_session_t * +listen_session_new (session_type_t type) +{ + stream_session_t *s; + pool_get (session_manager_main.listen_sessions[type], s); + memset (s, 0, sizeof (*s)); + + s->session_type = type; + s->session_state = SESSION_STATE_LISTENING; + s->session_index = s - session_manager_main.listen_sessions[type]; + + return s; +} + +always_inline stream_session_t * +listen_session_get (session_type_t type, u32 index) +{ + return pool_elt_at_index (session_manager_main.listen_sessions[type], + index); +} + +always_inline void +listen_session_del (stream_session_t * s) +{ + pool_put (session_manager_main.listen_sessions[s->session_type], s); +} + +always_inline u8 +session_manager_is_enabled () +{ + return session_manager_main.is_enabled == 1; +} + #endif /* __included_session_h__ */ /* diff --git a/src/vnet/session/session_api.c b/src/vnet/session/session_api.c index 9c38428a..a82dfe0b 100644 --- a/src/vnet/session/session_api.c +++ b/src/vnet/session/session_api.c @@ -38,6 +38,8 @@ #define foreach_session_api_msg \ _(MAP_ANOTHER_SEGMENT_REPLY, map_another_segment_reply) \ +_(APPLICATION_ATTACH, application_attach) \ +_(APPLICATION_DETACH, application_detach) \ _(BIND_URI, bind_uri) \ _(UNBIND_URI, unbind_uri) \ _(CONNECT_URI, connect_uri) \ @@ -48,13 +50,8 @@ _(RESET_SESSION_REPLY, reset_session_reply) \ _(BIND_SOCK, bind_sock) \ _(UNBIND_SOCK, unbind_sock) \ _(CONNECT_SOCK, connect_sock) \ -_(DISCONNECT_SOCK, disconnect_sock) \ -_(DISCONNECT_SOCK_REPLY, disconnect_sock_reply) \ -_(ACCEPT_SOCK_REPLY, accept_sock_reply) \ -_(RESET_SOCK_REPLY, reset_sock_reply) \ _(SESSION_ENABLE_DISABLE, session_enable_disable) \ - static int send_add_segment_callback (u32 api_client_index, const u8 * segment_name, u32 segment_size) @@ -80,11 +77,14 @@ send_add_segment_callback (u32 api_client_index, const u8 * segment_name, } static int -send_session_accept_uri_callback (stream_session_t * s) +send_session_accept_callback (stream_session_t * s) { vl_api_accept_session_t *mp; unix_shared_memory_queue_t *q, *vpp_queue; application_t *server = application_get (s->app_index); + transport_connection_t *tc; + transport_proto_vft_t *tp_vft; + stream_session_t *listener; q = vl_api_client_index_to_input_queue (server->api_client_index); vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); @@ -93,24 +93,28 @@ send_session_accept_uri_callback (stream_session_t * s) return -1; mp = vl_msg_api_alloc (sizeof (*mp)); - mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_ACCEPT_SESSION); + memset (mp, 0, sizeof (*mp)); - /* Note: session_type is the first octet in all types of sessions */ + mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_ACCEPT_SESSION); - mp->accept_cookie = server->accept_cookie; + listener = listen_session_get (s->session_type, s->listener_index); + tp_vft = session_get_transport_vft (s->session_type); + tc = tp_vft->get_connection (s->connection_index, s->thread_index); + mp->listener_handle = listen_session_get_handle (listener); + mp->handle = stream_session_handle (s); mp->server_rx_fifo = (u64) s->server_rx_fifo; mp->server_tx_fifo = (u64) s->server_tx_fifo; - mp->session_thread_index = s->thread_index; - mp->session_index = s->session_index; - mp->session_type = s->session_type; mp->vpp_event_queue_address = (u64) vpp_queue; + mp->port = tc->rmt_port; + mp->is_ip4 = tc->is_ip4; + clib_memcpy (&mp->ip, &tc->rmt_ip, sizeof (tc->rmt_ip)); vl_msg_api_send_shmem (q, (u8 *) & mp); return 0; } static void -send_session_disconnect_uri_callback (stream_session_t * s) +send_session_disconnect_callback (stream_session_t * s) { vl_api_disconnect_session_t *mp; unix_shared_memory_queue_t *q; @@ -124,14 +128,12 @@ send_session_disconnect_uri_callback (stream_session_t * s) mp = vl_msg_api_alloc (sizeof (*mp)); memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_DISCONNECT_SESSION); - - mp->session_thread_index = s->thread_index; - mp->session_index = s->session_index; + mp->handle = stream_session_handle (s); vl_msg_api_send_shmem (q, (u8 *) & mp); } static void -send_session_reset_uri_callback (stream_session_t * s) +send_session_reset_callback (stream_session_t * s) { vl_api_reset_session_t *mp; unix_shared_memory_queue_t *q; @@ -145,22 +147,20 @@ send_session_reset_uri_callback (stream_session_t * s) mp = vl_msg_api_alloc (sizeof (*mp)); memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_RESET_SESSION); - - mp->session_thread_index = s->thread_index; - mp->session_index = s->session_index; + mp->handle = stream_session_handle (s); vl_msg_api_send_shmem (q, (u8 *) & mp); } static int -send_session_connected_uri_callback (u32 api_client_index, - stream_session_t * s, u8 is_fail) +send_session_connected_callback (u32 app_index, u32 api_context, + stream_session_t * s, u8 is_fail) { vl_api_connect_uri_reply_t *mp; unix_shared_memory_queue_t *q; - application_t *app = application_lookup (api_client_index); - u8 *seg_name; + application_t *app; unix_shared_memory_queue_t *vpp_queue; + app = application_get (app_index); q = vl_api_client_index_to_input_queue (app->api_client_index); if (!q) @@ -168,24 +168,15 @@ send_session_connected_uri_callback (u32 api_client_index, mp = vl_msg_api_alloc (sizeof (*mp)); mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_CONNECT_URI_REPLY); - mp->context = app->api_context; + mp->context = api_context; if (!is_fail) { vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); mp->server_rx_fifo = (u64) s->server_rx_fifo; mp->server_tx_fifo = (u64) s->server_tx_fifo; - mp->session_thread_index = s->thread_index; - mp->session_index = s->session_index; - mp->session_type = s->session_type; + mp->handle = stream_session_handle (s); mp->vpp_event_queue_address = (u64) vpp_queue; - mp->client_event_queue_address = (u64) app->event_queue; mp->retval = 0; - - session_manager_get_segment_info (s->server_segment_index, &seg_name, - &mp->segment_size); - mp->segment_name_length = vec_len (seg_name); - if (mp->segment_name_length) - clib_memcpy (mp->segment_name, seg_name, mp->segment_name_length); } else { @@ -195,199 +186,14 @@ send_session_connected_uri_callback (u32 api_client_index, vl_msg_api_send_shmem (q, (u8 *) & mp); /* Remove client if connect failed */ - if (is_fail) - { - application_del (app); - } - else - { - s->session_state = SESSION_STATE_READY; - } - - return 0; -} - -/** - * Redirect a connect_uri message to the indicated server. - * Only sent if the server has bound the related port with - * URI_OPTIONS_FLAGS_USE_FIFO - */ -static int -redirect_connect_uri_callback (u32 server_api_client_index, void *mp_arg) -{ - vl_api_connect_uri_t *mp = mp_arg; - unix_shared_memory_queue_t *server_q, *client_q; - vlib_main_t *vm = vlib_get_main (); - f64 timeout = vlib_time_now (vm) + 0.5; - int rv = 0; - - server_q = vl_api_client_index_to_input_queue (server_api_client_index); - - if (!server_q) - { - rv = VNET_API_ERROR_INVALID_VALUE; - goto out; - } - - client_q = vl_api_client_index_to_input_queue (mp->client_index); - if (!client_q) - { - rv = VNET_API_ERROR_INVALID_VALUE_2; - goto out; - } - - /* Tell the server the client's API queue address, so it can reply */ - mp->client_queue_address = (u64) client_q; - - /* - * Bounce message handlers MUST NOT block the data-plane. - * Spin waiting for the queue lock, but - */ - - while (vlib_time_now (vm) < timeout) - { - rv = - unix_shared_memory_queue_add (server_q, (u8 *) & mp, 1 /*nowait */ ); - switch (rv) - { - /* correctly enqueued */ - case 0: - return VNET_CONNECT_REDIRECTED; - - /* continue spinning, wait for pthread_mutex_trylock to work */ - case -1: - continue; - - /* queue stuffed, drop the msg */ - case -2: - rv = VNET_API_ERROR_QUEUE_FULL; - goto out; - } - } -out: - /* Dispose of the message */ - vl_msg_api_free (mp); - return rv; -} - -static u64 -make_session_handle (stream_session_t * s) -{ - return (u64) s->session_index << 32 | (u64) s->thread_index; -} - -static int -send_session_accept_callback (stream_session_t * s) -{ - vl_api_accept_sock_t *mp; - unix_shared_memory_queue_t *q, *vpp_queue; - application_t *server = application_get (s->app_index); - - q = vl_api_client_index_to_input_queue (server->api_client_index); - vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); - - if (!q) - return -1; - - mp = vl_msg_api_alloc (sizeof (*mp)); - mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_ACCEPT_SOCK); - - /* Note: session_type is the first octet in all types of sessions */ - - mp->accept_cookie = server->accept_cookie; - mp->server_rx_fifo = (u64) s->server_rx_fifo; - mp->server_tx_fifo = (u64) s->server_tx_fifo; - mp->handle = make_session_handle (s); - mp->vpp_event_queue_address = (u64) vpp_queue; - vl_msg_api_send_shmem (q, (u8 *) & mp); - - return 0; -} - -static int -send_session_connected_callback (u32 api_client_index, stream_session_t * s, - u8 is_fail) -{ - vl_api_connect_sock_reply_t *mp; - unix_shared_memory_queue_t *q; - application_t *app = application_lookup (api_client_index); - u8 *seg_name; - unix_shared_memory_queue_t *vpp_queue; - - q = vl_api_client_index_to_input_queue (app->api_client_index); - - if (!q) - return -1; - - mp = vl_msg_api_alloc (sizeof (*mp)); - mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_CONNECT_SOCK_REPLY); - mp->context = app->api_context; - mp->retval = is_fail; if (!is_fail) { - vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); - mp->server_rx_fifo = (u64) s->server_rx_fifo; - mp->server_tx_fifo = (u64) s->server_tx_fifo; - mp->handle = make_session_handle (s); - mp->vpp_event_queue_address = (u64) vpp_queue; - mp->client_event_queue_address = (u64) app->event_queue; - - session_manager_get_segment_info (s->server_segment_index, &seg_name, - &mp->segment_size); - mp->segment_name_length = vec_len (seg_name); - if (mp->segment_name_length) - clib_memcpy (mp->segment_name, seg_name, mp->segment_name_length); + s->session_state = SESSION_STATE_READY; } - vl_msg_api_send_shmem (q, (u8 *) & mp); - - /* Remove client if connect failed */ - if (is_fail) - application_del (app); - return 0; } -static void -send_session_disconnect_callback (stream_session_t * s) -{ - vl_api_disconnect_sock_t *mp; - unix_shared_memory_queue_t *q; - application_t *app = application_get (s->app_index); - - q = vl_api_client_index_to_input_queue (app->api_client_index); - - if (!q) - return; - - mp = vl_msg_api_alloc (sizeof (*mp)); - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_DISCONNECT_SOCK); - - mp->handle = make_session_handle (s); - vl_msg_api_send_shmem (q, (u8 *) & mp); -} - -static void -send_session_reset_callback (stream_session_t * s) -{ - vl_api_reset_sock_t *mp; - unix_shared_memory_queue_t *q; - application_t *app = application_get (s->app_index); - - q = vl_api_client_index_to_input_queue (app->api_client_index); - - if (!q) - return; - - mp = vl_msg_api_alloc (sizeof (*mp)); - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_RESET_SOCK); - - mp->handle = make_session_handle (s); - vl_msg_api_send_shmem (q, (u8 *) & mp); -} - /** * Redirect a connect_uri message to the indicated server. * Only sent if the server has bound the related port with @@ -396,10 +202,11 @@ send_session_reset_callback (stream_session_t * s) static int redirect_connect_callback (u32 server_api_client_index, void *mp_arg) { - vl_api_connect_sock_t *mp = mp_arg; + vl_api_connect_uri_t *mp = mp_arg; unix_shared_memory_queue_t *server_q, *client_q; vlib_main_t *vm = vlib_get_main (); f64 timeout = vlib_time_now (vm) + 0.5; + application_t *app; int rv = 0; server_q = vl_api_client_index_to_input_queue (server_api_client_index); @@ -419,6 +226,9 @@ redirect_connect_callback (u32 server_api_client_index, void *mp_arg) /* Tell the server the client's API queue address, so it can reply */ mp->client_queue_address = (u64) client_q; + app = application_lookup (mp->client_index); + mp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = app->sm_properties.rx_fifo_size; + mp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = app->sm_properties.tx_fifo_size; /* * Bounce message handlers MUST NOT block the data-plane. @@ -452,15 +262,6 @@ out: } static session_cb_vft_t uri_session_cb_vft = { - .session_accept_callback = send_session_accept_uri_callback, - .session_disconnect_callback = send_session_disconnect_uri_callback, - .session_connected_callback = send_session_connected_uri_callback, - .session_reset_callback = send_session_reset_uri_callback, - .add_segment_callback = send_add_segment_callback, - .redirect_connect_callback = redirect_connect_uri_callback -}; - -static session_cb_vft_t session_cb_vft = { .session_accept_callback = send_session_accept_callback, .session_disconnect_callback = send_session_disconnect_callback, .session_connected_callback = send_session_connected_callback, @@ -498,60 +299,134 @@ vl_api_session_enable_disable_t_handler (vl_api_session_enable_disable_t * mp) } static void -vl_api_bind_uri_t_handler (vl_api_bind_uri_t * mp) +vl_api_application_attach_t_handler (vl_api_application_attach_t * mp) { - vl_api_bind_uri_reply_t *rmp; - vnet_bind_args_t _a, *a = &_a; - char segment_name[128]; - u32 segment_name_length; + vl_api_application_attach_reply_t *rmp; + vnet_app_attach_args_t _a, *a = &_a; int rv; - _Static_assert (sizeof (u64) * SESSION_OPTIONS_N_OPTIONS <= - sizeof (mp->options), - "Out of options, fix api message definition"); + if (session_manager_is_enabled () == 0) + { + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; + } - segment_name_length = ARRAY_LEN (segment_name); + STATIC_ASSERT (sizeof (u64) * SESSION_OPTIONS_N_OPTIONS <= + sizeof (mp->options), + "Out of options, fix api message definition"); memset (a, 0, sizeof (*a)); - a->uri = (char *) mp->uri; a->api_client_index = mp->client_index; a->options = mp->options; - a->segment_name = segment_name; - a->segment_name_length = segment_name_length; a->session_cb_vft = &uri_session_cb_vft; - a->options[SESSION_OPTIONS_SEGMENT_SIZE] = mp->initial_segment_size; - a->options[SESSION_OPTIONS_ACCEPT_COOKIE] = mp->accept_cookie; - rv = vnet_bind_uri (a); + rv = vnet_application_attach (a); +done: /* *INDENT-OFF* */ - REPLY_MACRO2 (VL_API_BIND_URI_REPLY, ({ + REPLY_MACRO2 (VL_API_APPLICATION_ATTACH_REPLY, ({ rmp->retval = rv; if (!rv) { rmp->segment_name_length = 0; /* $$$$ policy? */ - rmp->segment_size = mp->initial_segment_size; - if (segment_name_length) + rmp->segment_size = a->segment_size; + if (a->segment_name_length) { - memcpy (rmp->segment_name, segment_name, segment_name_length); - rmp->segment_name_length = segment_name_length; + memcpy (rmp->segment_name, a->segment_name, + a->segment_name_length); + rmp->segment_name_length = a->segment_name_length; } - rmp->server_event_queue_address = a->server_event_queue_address; + rmp->app_event_queue_address = a->app_event_queue_address; } })); /* *INDENT-ON* */ } +static void +vl_api_application_detach_t_handler (vl_api_application_detach_t * mp) +{ + vl_api_application_detach_reply_t *rmp; + int rv = VNET_API_ERROR_INVALID_VALUE_2; + vnet_app_detach_args_t _a, *a = &_a; + application_t *app; + + if (session_manager_is_enabled () == 0) + { + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; + } + + app = application_lookup (mp->client_index); + if (app) + { + a->app_index = app->index; + rv = vnet_application_detach (a); + } + +done: + REPLY_MACRO (VL_API_APPLICATION_DETACH_REPLY); +} + +static void +vl_api_bind_uri_t_handler (vl_api_bind_uri_t * mp) +{ + vl_api_bind_uri_reply_t *rmp; + vnet_bind_args_t _a, *a = &_a; + application_t *app; + int rv; + + if (session_manager_is_enabled () == 0) + { + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; + } + + app = application_lookup (mp->client_index); + if (app) + { + memset (a, 0, sizeof (*a)); + a->uri = (char *) mp->uri; + a->app_index = app->index; + rv = vnet_bind_uri (a); + } + else + { + rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED; + } + +done: + REPLY_MACRO (VL_API_BIND_URI_REPLY); +} + static void vl_api_unbind_uri_t_handler (vl_api_unbind_uri_t * mp) { vl_api_unbind_uri_reply_t *rmp; + application_t *app; + vnet_unbind_args_t _a, *a = &_a; int rv; - rv = vnet_unbind_uri ((char *) mp->uri, mp->client_index); + if (session_manager_is_enabled () == 0) + { + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; + } + + app = application_lookup (mp->client_index); + if (app) + { + a->uri = (char *) mp->uri; + a->app_index = app->index; + rv = vnet_unbind_uri (a); + } + else + { + rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED; + } +done: REPLY_MACRO (VL_API_UNBIND_URI_REPLY); } @@ -560,26 +435,37 @@ vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) { vl_api_connect_uri_reply_t *rmp; vnet_connect_args_t _a, *a = &_a; + application_t *app; int rv; - a->uri = (char *) mp->uri; - a->api_client_index = mp->client_index; - a->api_context = mp->context; - a->options = mp->options; - a->session_cb_vft = &uri_session_cb_vft; - a->mp = mp; + if (session_manager_is_enabled () == 0) + { + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; + } - rv = vnet_connect_uri (a); + app = application_lookup (mp->client_index); + if (app) + { + a->uri = (char *) mp->uri; + a->api_context = mp->context; + a->app_index = app->index; + a->mp = mp; + rv = vnet_connect_uri (a); + } + else + { + rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED; + } if (rv == 0 || rv == VNET_CONNECT_REDIRECTED) return; /* Got some error, relay it */ +done: /* *INDENT-OFF* */ - REPLY_MACRO2 (VL_API_CONNECT_URI_REPLY, ({ - rmp->retval = rv; - })); + REPLY_MACRO (VL_API_CONNECT_URI_REPLY); /* *INDENT-ON* */ } @@ -587,13 +473,29 @@ static void vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) { vl_api_disconnect_session_reply_t *rmp; - int rv; + vnet_disconnect_args_t _a, *a = &_a; + application_t *app; + int rv = 0; - rv = api_session_not_valid (mp->session_index, mp->session_thread_index); - if (!rv) - rv = - vnet_disconnect_session (mp->session_index, mp->session_thread_index); + if (session_manager_is_enabled () == 0) + { + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; + } + + app = application_lookup (mp->client_index); + if (app) + { + a->handle = mp->handle; + a->app_index = app->index; + rv = vnet_disconnect_session (a); + } + else + { + rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED; + } +done: REPLY_MACRO (VL_API_DISCONNECT_SESSION_REPLY); } @@ -601,11 +503,8 @@ static void vl_api_disconnect_session_reply_t_handler (vl_api_disconnect_session_reply_t * mp) { - if (api_session_not_valid (mp->session_index, mp->session_thread_index)) - { - clib_warning ("Invalid session!"); - return; - } + vnet_disconnect_args_t _a, *a = &_a; + application_t *app; /* Client objected to disconnecting the session, log and continue */ if (mp->retval) @@ -615,15 +514,29 @@ vl_api_disconnect_session_reply_t_handler (vl_api_disconnect_session_reply_t * } /* Disconnect has been confirmed. Confirm close to transport */ - vnet_disconnect_session (mp->session_index, mp->session_thread_index); + app = application_lookup (mp->client_index); + if (app) + { + a->handle = mp->handle; + a->app_index = app->index; + vnet_disconnect_session (a); + } } static void vl_api_reset_session_reply_t_handler (vl_api_reset_session_reply_t * mp) { + application_t *app; stream_session_t *s; + u32 index, thread_index; + + app = application_lookup (mp->client_index); + if (!app) + return; - if (api_session_not_valid (mp->session_index, mp->session_thread_index)) + stream_session_parse_handle (mp->handle, &index, &thread_index); + s = stream_session_get_if_valid (index, thread_index); + if (s == 0 || app->index != s->app_index) { clib_warning ("Invalid session!"); return; @@ -636,8 +549,6 @@ vl_api_reset_session_reply_t_handler (vl_api_reset_session_reply_t * mp) return; } - s = stream_session_get (mp->session_index, mp->session_thread_index); - /* This comes as a response to a reset, transport only waiting for * confirmation to remove connection state, no need to disconnect */ stream_session_cleanup (s); @@ -648,11 +559,13 @@ vl_api_accept_session_reply_t_handler (vl_api_accept_session_reply_t * mp) { stream_session_t *s; int rv; - - if (api_session_not_valid (mp->session_index, mp->session_thread_index)) + u32 session_index, thread_index; + session_index = stream_session_index_from_handle (mp->handle); + thread_index = stream_session_thread_from_handle (mp->handle); + if (api_session_not_valid (session_index, thread_index)) return; - s = stream_session_get (mp->session_index, mp->session_thread_index); + s = stream_session_get (session_index, thread_index); rv = mp->retval; if (rv) @@ -677,49 +590,31 @@ vl_api_bind_sock_t_handler (vl_api_bind_sock_t * mp) { vl_api_bind_sock_reply_t *rmp; vnet_bind_args_t _a, *a = &_a; - char segment_name[128]; - u32 segment_name_length; - int rv; - - STATIC_ASSERT (sizeof (u64) * SESSION_OPTIONS_N_OPTIONS <= - sizeof (mp->options), - "Out of options, fix api message definition"); - - segment_name_length = ARRAY_LEN (segment_name); - - memset (a, 0, sizeof (*a)); - - clib_memcpy (&a->tep.ip, mp->ip, - (mp->is_ip4 ? sizeof (ip4_address_t) : - sizeof (ip6_address_t))); - a->tep.is_ip4 = mp->is_ip4; - a->tep.port = mp->port; - a->tep.vrf = mp->vrf; - - a->api_client_index = mp->client_index; - a->options = mp->options; - a->segment_name = segment_name; - a->segment_name_length = segment_name_length; - a->session_cb_vft = &session_cb_vft; + int rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED; + application_t *app; - rv = vnet_bind_uri (a); + if (session_manager_is_enabled () == 0) + { + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; + } - /* *INDENT-OFF* */ - REPLY_MACRO2 (VL_API_BIND_SOCK_REPLY, ({ - rmp->retval = rv; - if (!rv) - { - rmp->segment_name_length = 0; - rmp->segment_size = mp->options[SESSION_OPTIONS_SEGMENT_SIZE]; - if (segment_name_length) - { - memcpy(rmp->segment_name, segment_name, segment_name_length); - rmp->segment_name_length = segment_name_length; - } - rmp->server_event_queue_address = a->server_event_queue_address; - } - })); - /* *INDENT-ON* */ + app = application_lookup (mp->client_index); + if (app) + { + memset (a, 0, sizeof (*a)); + clib_memcpy (&a->tep.ip, mp->ip, (mp->is_ip4 ? + sizeof (ip4_address_t) : + sizeof (ip6_address_t))); + a->tep.is_ip4 = mp->is_ip4; + a->tep.port = mp->port; + a->tep.vrf = mp->vrf; + a->app_index = app->index; + + rv = vnet_bind (a); + } +done: + REPLY_MACRO (VL_API_BIND_SOCK_REPLY); } static void @@ -727,13 +622,24 @@ vl_api_unbind_sock_t_handler (vl_api_unbind_sock_t * mp) { vl_api_unbind_sock_reply_t *rmp; vnet_unbind_args_t _a, *a = &_a; - int rv; + application_t *app; + int rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED; - a->api_client_index = mp->client_index; - a->handle = mp->handle; + if (session_manager_is_enabled () == 0) + { + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; + } - rv = vnet_unbind (a); + app = application_lookup (mp->client_index); + if (app) + { + a->app_index = mp->client_index; + a->handle = mp->handle; + rv = vnet_unbind (a); + } +done: REPLY_MACRO (VL_API_UNBIND_SOCK_REPLY); } @@ -742,114 +648,55 @@ vl_api_connect_sock_t_handler (vl_api_connect_sock_t * mp) { vl_api_connect_sock_reply_t *rmp; vnet_connect_args_t _a, *a = &_a; + application_t *app; int rv; - clib_memcpy (&a->tep.ip, mp->ip, - (mp->is_ip4 ? sizeof (ip4_address_t) : - sizeof (ip6_address_t))); - a->tep.is_ip4 = mp->is_ip4; - a->tep.port = mp->port; - a->tep.vrf = mp->vrf; - a->options = mp->options; - a->session_cb_vft = &session_cb_vft; - a->api_context = mp->context; - a->mp = mp; - - rv = vnet_connect (a); - - if (rv == 0 || rv == VNET_CONNECT_REDIRECTED) - return; - - /* Got some error, relay it */ - - /* *INDENT-OFF* */ - REPLY_MACRO2 (VL_API_CONNECT_URI_REPLY, ({ - rmp->retval = rv; - })); - /* *INDENT-ON* */ -} - -static void -vl_api_disconnect_sock_t_handler (vl_api_disconnect_sock_t * mp) -{ - vnet_disconnect_args_t _a, *a = &_a; - vl_api_disconnect_sock_reply_t *rmp; - int rv; - - a->api_client_index = mp->client_index; - a->handle = mp->handle; - rv = vnet_disconnect (a); - - REPLY_MACRO (VL_API_DISCONNECT_SOCK_REPLY); -} - -static void -vl_api_disconnect_sock_reply_t_handler (vl_api_disconnect_sock_reply_t * mp) -{ - vnet_disconnect_args_t _a, *a = &_a; - - /* Client objected to disconnecting the session, log and continue */ - if (mp->retval) + if (session_manager_is_enabled () == 0) { - clib_warning ("client retval %d", mp->retval); - return; + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; } - a->api_client_index = mp->client_index; - a->handle = mp->handle; - - vnet_disconnect (a); -} - -static void -vl_api_reset_sock_reply_t_handler (vl_api_reset_sock_reply_t * mp) -{ - stream_session_t *s; - u32 session_index, thread_index; - - /* Client objected to resetting the session, log and continue */ - if (mp->retval) + app = application_lookup (mp->client_index); + if (app) { - clib_warning ("client retval %d", mp->retval); - return; + clib_memcpy (&a->tep.ip, mp->ip, + (mp->is_ip4 ? sizeof (ip4_address_t) : + sizeof (ip6_address_t))); + a->api_context = mp->context; + a->app_index = app->index; + a->mp = mp; + rv = vnet_connect (a); } - - if (api_parse_session_handle (mp->handle, &session_index, &thread_index)) + else { - clib_warning ("Invalid handle"); - return; + rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED; } - s = stream_session_get (session_index, thread_index); + if (rv == 0 || rv == VNET_CONNECT_REDIRECTED) + return; - /* This comes as a response to a reset, transport only waiting for - * confirmation to remove connection state, no need to disconnect */ - stream_session_cleanup (s); + /* Got some error, relay it */ + +done: + REPLY_MACRO (VL_API_CONNECT_URI_REPLY); } -static void -vl_api_accept_sock_reply_t_handler (vl_api_accept_sock_reply_t * mp) +static clib_error_t * +application_reaper_cb (u32 client_index) { - stream_session_t *s; - u32 session_index, thread_index; - - if (api_parse_session_handle (mp->handle, &session_index, &thread_index)) - { - clib_warning ("Invalid handle"); - return; - } - s = stream_session_get (session_index, thread_index); - - if (mp->retval) + application_t *app = application_lookup (client_index); + vnet_app_detach_args_t _a, *a = &_a; + if (app) { - /* Server isn't interested, kill the session */ - stream_session_disconnect (s); - return; + a->app_index = app->index; + vnet_application_detach (a); } - - s->session_state = SESSION_STATE_READY; + return 0; } +VL_MSG_API_REAPER_FUNCTION (application_reaper_cb); + #define vl_msg_name_crc_list #include #undef vl_msg_name_crc_list @@ -903,6 +750,7 @@ session_api_hookup (vlib_main_t * vm) } VLIB_API_INIT_FUNCTION (session_api_hookup); + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/session/transport.h b/src/vnet/session/transport.h index 2f912cbc..7ea7af15 100644 --- a/src/vnet/session/transport.h +++ b/src/vnet/session/transport.h @@ -30,7 +30,7 @@ typedef struct _transport_connection ip46_address_t lcl_ip; /**< Local IP */ u16 lcl_port; /**< Local port */ u16 rmt_port; /**< Remote port */ - u8 proto; /**< Transport protocol id */ + u8 proto; /**< Transport protocol id (also session type) */ u32 s_index; /**< Parent session index */ u32 c_index; /**< Connection index in transport pool */ @@ -103,7 +103,8 @@ typedef CLIB_PACKED (struct { { struct { - ip4_address_t src; ip4_address_t dst; + ip4_address_t src; + ip4_address_t dst; u16 src_port; u16 dst_port; /* align by making this 4 octets even though its a 1-bit field @@ -122,10 +123,14 @@ typedef CLIB_PACKED (struct { struct { /* 48 octets */ - ip6_address_t src; ip6_address_t dst; + ip6_address_t src; + ip6_address_t dst; u16 src_port; - u16 dst_port; u32 proto; u8 unused_for_now[8]; - }; u64 as_u64[6]; + u16 dst_port; + u32 proto; + u8 unused_for_now[8]; + }; + u64 as_u64[6]; }; }) v6_connection_key_t; /* *INDENT-ON* */ @@ -233,10 +238,10 @@ make_v6_ss_kv_from_tc (session_kv6_t * kv, transport_connection_t * t) typedef struct _transport_endpoint { - ip46_address_t ip; - u16 port; - u8 is_ip4; - u32 vrf; + ip46_address_t ip; /** ip address */ + u16 port; /** port in host order */ + u8 is_ip4; /** 1 if ip4 */ + u32 vrf; /** fib table the endpoint is associated with */ } transport_endpoint_t; typedef clib_bihash_24_8_t transport_endpoint_table_t; diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index 9e8e1561..f8fbf28c 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -237,8 +237,7 @@ tclient_thread_fn (void *arg) memset (dmp, 0, sizeof (*dmp)); dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); dmp->client_index = tm->my_client_index; - dmp->session_index = sp->vpp_session_index; - dmp->session_thread_index = sp->vpp_session_thread; + dmp->handle = sp->vpp_session_handle; vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & dmp); pool_put (tm->sessions, sp); } @@ -253,9 +252,10 @@ tclient_thread_fn (void *arg) static void vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) { + vlib_main_t *vm = vlib_get_main (); tclient_main_t *tm = &tclient_main; - tm->my_client_index = mp->index; + vlib_process_signal_event (vm, tm->node_index, 1 /* evt */ , 0 /* data */ ); } static void @@ -264,7 +264,6 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) tclient_main_t *tm = &tclient_main; session_t *session; u32 session_index; - u64 key; i32 retval = /* clib_net_to_host_u32 ( */ mp->retval /*) */ ; if (retval < 0) @@ -291,24 +290,24 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) session->server_rx_fifo->client_session_index = session_index; session->server_tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; session->server_tx_fifo->client_session_index = session_index; - - session->vpp_session_index = mp->session_index; - session->vpp_session_thread = mp->session_thread_index; + session->vpp_session_handle = mp->handle; /* Add it to the session lookup table */ - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - hash_set (tm->session_index_by_vpp_handles, key, session_index); + hash_set (tm->session_index_by_vpp_handles, mp->handle, session_index); tm->ready_connections++; } -static void +static int create_api_loopback (tclient_main_t * tm) { + vlib_main_t *vm = vlib_get_main (); vl_api_memclnt_create_t _m, *mp = &_m; extern void vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t *); api_main_t *am = &api_main; vl_shmem_hdr_t *shmem_hdr; + uword *event_data = 0, event_type; + int resolved = 0; /* * Create a "loopback" API client connection @@ -324,6 +323,25 @@ create_api_loopback (tclient_main_t * tm) strncpy ((char *) mp->name, "tcp_tester", sizeof (mp->name) - 1); vl_api_memclnt_create_t_handler (mp); + + /* Wait for reply */ + tm->node_index = vlib_get_current_process (vm)->node_runtime.node_index; + vlib_process_wait_for_event_or_clock (vm, 1.0); + event_type = vlib_process_get_events (vm, &event_data); + switch (event_type) + { + case 1: + resolved = 1; + break; + case ~0: + /* timed out */ + break; + default: + clib_warning ("unknown event_type %d", event_type); + } + if (!resolved) + return -1; + return 0; } #define foreach_tclient_static_api_msg \ @@ -333,17 +351,7 @@ _(CONNECT_URI_REPLY, connect_uri_reply) static clib_error_t * tclient_api_hookup (vlib_main_t * vm) { - tclient_main_t *tm = &tclient_main; vl_msg_api_msg_config_t _c, *c = &_c; - int i; - - /* Init test data */ - vec_validate (tm->connect_test_data, 64 * 1024 - 1); - for (i = 0; i < vec_len (tm->connect_test_data); i++) - tm->connect_test_data[i] = i & 0xff; - - tm->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); - vec_validate (tm->rx_buf, vec_len (tm->connect_test_data) - 1); /* Hook up client-side static APIs to our handlers */ #define _(N,n) do { \ @@ -365,18 +373,105 @@ tclient_api_hookup (vlib_main_t * vm) return 0; } -VLIB_API_INIT_FUNCTION (tclient_api_hookup); +static int +tcp_test_clients_init (vlib_main_t * vm) +{ + tclient_main_t *tm = &tclient_main; + int i; + + tclient_api_hookup (vm); + if (create_api_loopback (tm)) + return -1; + + /* Init test data */ + vec_validate (tm->connect_test_data, 64 * 1024 - 1); + for (i = 0; i < vec_len (tm->connect_test_data); i++) + tm->connect_test_data[i] = i & 0xff; + + tm->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); + vec_validate (tm->rx_buf, vec_len (tm->connect_test_data) - 1); + + tm->is_init = 1; + + return 0; +} + +static void +builtin_session_reset_callback (stream_session_t * s) +{ + return; +} + +static int +builtin_session_connected_callback (u32 app_index, u32 api_context, + stream_session_t * s, u8 code) +{ + return 0; +} + +static int +builtin_session_create_callback (stream_session_t * s) +{ + return 0; +} + +static void +builtin_session_disconnect_callback (stream_session_t * s) +{ + return; +} + +static int +builtin_server_rx_callback (stream_session_t * s) +{ + return 0; +} + +/* *INDENT-OFF* */ +static session_cb_vft_t builtin_clients = { + .session_reset_callback = builtin_session_reset_callback, + .session_connected_callback = builtin_session_connected_callback, + .session_accept_callback = builtin_session_create_callback, + .session_disconnect_callback = builtin_session_disconnect_callback, + .builtin_server_rx_callback = builtin_server_rx_callback +}; +/* *INDENT-ON* */ + +static int +attach_builtin_test_clients () +{ + vnet_app_attach_args_t _a, *a = &_a; + u8 segment_name[128]; + u32 segment_name_length; + u64 options[16]; + + segment_name_length = ARRAY_LEN (segment_name); + + memset (a, 0, sizeof (*a)); + memset (options, 0, sizeof (options)); + + a->api_client_index = ~0; + a->segment_name = segment_name; + a->segment_name_length = segment_name_length; + a->session_cb_vft = &builtin_clients; + + options[SESSION_OPTIONS_ACCEPT_COOKIE] = 0x12345678; + options[SESSION_OPTIONS_SEGMENT_SIZE] = (2 << 30); /*$$$$ config / arg */ + a->options = options; + + return vnet_application_attach (a); +} static clib_error_t * test_tcp_clients_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { + tclient_main_t *tm = &tclient_main; u8 *connect_uri = (u8 *) "tcp://6.0.1.1/1234"; u8 *uri; - tclient_main_t *tm = &tclient_main; - int i; u32 n_clients = 1; + int i; tm->bytes_to_send = 8192; tm->n_iterations = 1; @@ -397,14 +492,19 @@ test_tcp_clients_command_fn (vlib_main_t * vm, format_unformat_error, input); } + if (tm->is_init == 0) + { + if (tcp_test_clients_init (vm)) + return clib_error_return (0, "failed init"); + } + tm->ready_connections = 0; tm->expected_connections = n_clients; + uri = connect_uri; if (tm->connect_uri) uri = tm->connect_uri; - create_api_loopback (tm); - #if TCP_BUILTIN_CLIENT_PTHREAD /* Start a transmit thread */ if (tm->client_thread_handle == 0) @@ -420,6 +520,7 @@ test_tcp_clients_command_fn (vlib_main_t * vm, } #endif vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ ); + attach_builtin_test_clients (); /* Fire off connect requests, in something approaching a normal manner */ for (i = 0; i < n_clients; i++) @@ -461,6 +562,16 @@ VLIB_CLI_COMMAND (test_clients_command, static) = }; /* *INDENT-ON* */ +clib_error_t * +tcp_test_clients_main_init (vlib_main_t * vm) +{ + tclient_main_t *tm = &tclient_main; + tm->is_init = 0; + return 0; +} + +VLIB_INIT_FUNCTION (tcp_test_clients_main_init); + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/tcp/builtin_client.h b/src/vnet/tcp/builtin_client.h index 64030302..2bd87c07 100644 --- a/src/vnet/tcp/builtin_client.h +++ b/src/vnet/tcp/builtin_client.h @@ -39,8 +39,7 @@ typedef struct svm_fifo_t *server_rx_fifo; svm_fifo_t *server_tx_fifo; - u32 vpp_session_index; - u32 vpp_session_thread; + u64 vpp_session_handle; } session_t; typedef struct @@ -110,6 +109,10 @@ typedef struct u32 client_bytes_received; u8 test_return_packets; + u8 is_init; + + u32 node_index; + /* convenience */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index 917d4bd3..8308e3d9 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -18,17 +18,46 @@ #include #include +/* define message IDs */ +#include + +/* define message structures */ +#define vl_typedefs +#include +#undef vl_typedefs + +/* define generated endian-swappers */ +#define vl_endianfun +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include +#undef vl_printfun + typedef struct { u8 *rx_buf; unix_shared_memory_queue_t **vpp_queue; - u32 byte_index; + u64 byte_index; + + /* Sever's event queue */ + unix_shared_memory_queue_t *vl_input_queue; + + /* API client handle */ + u32 my_client_index; + + u32 app_index; + + /* process node index for evnt scheduling */ + u32 node_index; vlib_main_t *vlib_main; } builtin_server_main_t; builtin_server_main_t builtin_server_main; - int builtin_session_accept_callback (stream_session_t * s) { @@ -45,9 +74,13 @@ builtin_session_accept_callback (stream_session_t * s) void builtin_session_disconnect_callback (stream_session_t * s) { + builtin_server_main_t *bsm = &builtin_server_main; + vnet_disconnect_args_t _a, *a = &_a; clib_warning ("called..."); - vnet_disconnect_session (s->session_index, s->thread_index); + a->handle = stream_session_handle (s); + a->app_index = bsm->app_index; + vnet_disconnect_session (a); } void @@ -60,7 +93,7 @@ builtin_session_reset_callback (stream_session_t * s) int -builtin_session_connected_callback (u32 client_index, +builtin_session_connected_callback (u32 app_index, u32 api_context, stream_session_t * s, u8 is_fail) { clib_warning ("called..."); @@ -91,7 +124,7 @@ test_bytes (builtin_server_main_t * bsm, int actual_transfer) { if (bsm->rx_buf[i] != ((bsm->byte_index + i) & 0xff)) { - clib_warning ("at %d expected %d got %d", bsm->byte_index + i, + clib_warning ("at %lld expected %d got %d", bsm->byte_index + i, (bsm->byte_index + i) & 0xff, bsm->rx_buf[i]); } } @@ -190,23 +223,66 @@ static session_cb_vft_t builtin_session_cb_vft = { .session_reset_callback = builtin_session_reset_callback }; +/* Abuse VPP's input queue */ static int -server_create (vlib_main_t * vm) +create_api_loopback (vlib_main_t * vm) { - vnet_bind_args_t _a, *a = &_a; - u64 options[SESSION_OPTIONS_N_OPTIONS]; - char segment_name[128]; - u32 num_threads; - vlib_thread_main_t *vtm = vlib_get_thread_main (); + builtin_server_main_t *bsm = &builtin_server_main; + vl_api_memclnt_create_t _m, *mp = &_m; + extern void vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t *); + api_main_t *am = &api_main; + vl_shmem_hdr_t *shmem_hdr; + uword *event_data = 0, event_type; + int resolved = 0; - num_threads = 1 /* main thread */ + vtm->n_threads; - vec_validate (builtin_server_main.vpp_queue, num_threads - 1); + /* + * Create a "loopback" API client connection + * Don't do things like this unless you know what you're doing... + */ + + shmem_hdr = am->shmem_hdr; + bsm->vl_input_queue = shmem_hdr->vl_input_queue; + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = VL_API_MEMCLNT_CREATE; + mp->context = 0xFEEDFACE; + mp->input_queue = (u64) bsm->vl_input_queue; + strncpy ((char *) mp->name, "tcp_test_server", sizeof (mp->name) - 1); + + vl_api_memclnt_create_t_handler (mp); + + /* Wait for reply */ + bsm->node_index = vlib_get_current_process (vm)->node_runtime.node_index; + vlib_process_wait_for_event_or_clock (vm, 1.0); + event_type = vlib_process_get_events (vm, &event_data); + switch (event_type) + { + case 1: + resolved = 1; + break; + case ~0: + /* timed out */ + break; + default: + clib_warning ("unknown event_type %d", event_type); + } + if (!resolved) + return -1; + + return 0; +} + +static int +server_attach () +{ + builtin_server_main_t *bsm = &builtin_server_main; + u8 segment_name[128]; + u64 options[SESSION_OPTIONS_N_OPTIONS]; + vnet_app_attach_args_t _a, *a = &_a; memset (a, 0, sizeof (*a)); memset (options, 0, sizeof (options)); - a->uri = "tcp://0.0.0.0/1234"; - a->api_client_index = ~0; + a->api_client_index = bsm->my_client_index; a->session_cb_vft = &builtin_session_cb_vft; a->options = options; a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 128 << 20; @@ -215,9 +291,94 @@ server_create (vlib_main_t * vm) a->segment_name = segment_name; a->segment_name_length = ARRAY_LEN (segment_name); + if (vnet_application_attach (a)) + { + clib_warning ("failed to attach server"); + return -1; + } + bsm->app_index = a->app_index; + return 0; +} + +static int +server_listen () +{ + builtin_server_main_t *bsm = &builtin_server_main; + vnet_bind_args_t _a, *a = &_a; + memset (a, 0, sizeof (*a)); + a->app_index = bsm->app_index; + a->uri = "tcp://0.0.0.0/1234"; return vnet_bind_uri (a); } +static int +server_create (vlib_main_t * vm) +{ + builtin_server_main_t *bsm = &builtin_server_main; + u32 num_threads; + vlib_thread_main_t *vtm = vlib_get_thread_main (); + + if (bsm->my_client_index == (u32) ~ 0) + { + if (create_api_loopback (vm)) + return -1; + } + + num_threads = 1 /* main thread */ + vtm->n_threads; + vec_validate (builtin_server_main.vpp_queue, num_threads - 1); + + if (server_attach ()) + { + clib_warning ("failed to attach server"); + return -1; + } + if (server_listen ()) + { + clib_warning ("failed to start listening"); + return -1; + } + return 0; +} + +/* Get our api client index */ +static void +vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + builtin_server_main_t *bsm = &builtin_server_main; + bsm->my_client_index = mp->index; + vlib_process_signal_event (vm, bsm->node_index, 1 /* evt */ , + 0 /* data */ ); +} + +#define foreach_tcp_builtin_server_api_msg \ +_(MEMCLNT_CREATE_REPLY, memclnt_create_reply) \ + +static clib_error_t * +tcp_builtin_server_api_hookup (vlib_main_t * vm) +{ + vl_msg_api_msg_config_t _c, *c = &_c; + + /* Hook up client-side static APIs to our handlers */ +#define _(N,n) do { \ + c->id = VL_API_##N; \ + c->name = #n; \ + c->handler = vl_api_##n##_t_handler; \ + c->cleanup = vl_noop_handler; \ + c->endian = vl_api_##n##_t_endian; \ + c->print = vl_api_##n##_t_print; \ + c->size = sizeof(vl_api_##n##_t); \ + c->traced = 1; /* trace, so these msgs print */ \ + c->replay = 0; /* don't replay client create/delete msgs */ \ + c->message_bounce = 0; /* don't bounce this message */ \ + vl_msg_api_config(c);} while (0); + + foreach_tcp_builtin_server_api_msg; +#undef _ + + return 0; +} + static clib_error_t * server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) @@ -234,6 +395,7 @@ server_create_command_fn (vlib_main_t * vm, } #endif + tcp_builtin_server_api_hookup (vm); vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ ); rv = server_create (vm); switch (rv) @@ -249,12 +411,22 @@ server_create_command_fn (vlib_main_t * vm, /* *INDENT-OFF* */ VLIB_CLI_COMMAND (server_create_command, static) = { - .path = "test server", - .short_help = "test server", + .path = "test tcp server", + .short_help = "test tcp server", .function = server_create_command_fn, }; /* *INDENT-ON* */ +clib_error_t * +builtin_tcp_server_main_init (vlib_main_t * vm) +{ + builtin_server_main_t *bsm = &builtin_server_main; + bsm->my_client_index = ~0; + return 0; +} + +VLIB_INIT_FUNCTION (builtin_tcp_server_main_init); + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index b6c34828..a0c66b9f 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -34,14 +34,19 @@ tcp_connection_bind (u32 session_index, ip46_address_t * ip, listener->c_lcl_port = clib_host_to_net_u16 (port_host_byte_order); if (is_ip4) - listener->c_lcl_ip4.as_u32 = ip->ip4.as_u32; + { + listener->c_lcl_ip4.as_u32 = ip->ip4.as_u32; + listener->c_is_ip4 = 1; + listener->c_proto = SESSION_TYPE_IP4_TCP; + } else - clib_memcpy (&listener->c_lcl_ip6, &ip->ip6, sizeof (ip6_address_t)); + { + clib_memcpy (&listener->c_lcl_ip6, &ip->ip6, sizeof (ip6_address_t)); + listener->c_proto = SESSION_TYPE_IP6_TCP; + } listener->c_s_index = session_index; - listener->c_proto = SESSION_TYPE_IP4_TCP; listener->state = TCP_STATE_LISTEN; - listener->c_is_ip4 = 1; tcp_connection_timers_init (listener); @@ -62,7 +67,6 @@ tcp_session_bind_ip6 (u32 session_index, ip46_address_t * ip, u16 port_host_byte_order) { return tcp_connection_bind (session_index, ip, port_host_byte_order, 0); - } static void @@ -397,6 +401,7 @@ tcp_connection_open (ip46_address_t * rmt_addr, u16 rmt_port, u8 is_ip4) tc->c_lcl_port = clib_host_to_net_u16 (lcl_port); tc->c_c_index = tc - tm->half_open_connections; tc->c_is_ip4 = is_ip4; + tc->c_proto = is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP; /* The other connection vars will be initialized after SYN ACK */ tcp_connection_timers_init (tc); @@ -518,7 +523,10 @@ format_tcp_session (u8 * s, va_list * args) tcp_connection_t *tc; tc = tcp_connection_get (tci, thread_index); - return format (s, "%U", format_tcp_connection, tc); + if (tc) + return format (s, "%U", format_tcp_connection, tc); + else + return format (s, "empty"); } u8 * diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 2f5da108..93f3245d 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -100,8 +100,6 @@ extern timer_expiration_handler tcp_timer_retransmit_syn_handler; #define TCP_RTO_SYN_RETRIES 3 /* SYN retries without doubling RTO */ #define TCP_RTO_INIT 1 * THZ /* Initial retransmit timer */ -void tcp_update_time (f64 now, u32 thread_index); - /** TCP connection flags */ #define foreach_tcp_connection_flag \ _(SNDACK, "Send ACK") \ @@ -481,6 +479,13 @@ tcp_time_now (void) return clib_cpu_time_now () * tcp_main.tstamp_ticks_per_clock; } +always_inline void +tcp_update_time (f64 now, u32 thread_index) +{ + tw_timer_expire_timers_16t_2w_512sl (&tcp_main.timer_wheels[thread_index], + now); +} + u32 tcp_push_header (transport_connection_t * tconn, vlib_buffer_t * b); u32 diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index 7e9fa47b..ae1f92d5 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -1841,6 +1841,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, case TCP_STATE_ESTABLISHED: case TCP_STATE_FIN_WAIT_1: case TCP_STATE_FIN_WAIT_2: + vlib_buffer_advance (b0, n_advance_bytes0); error0 = tcp_segment_rcv (tm, tc0, b0, n_data_bytes0, &next0); break; case TCP_STATE_CLOSE_WAIT: @@ -2410,12 +2411,6 @@ VLIB_REGISTER_NODE (tcp6_input_node) = /* *INDENT-ON* */ VLIB_NODE_FUNCTION_MULTIARCH (tcp6_input_node, tcp6_input); -void -tcp_update_time (f64 now, u32 thread_index) -{ - tcp_main_t *tm = vnet_get_tcp_main (); - tw_timer_expire_timers_16t_2w_512sl (&tm->timer_wheels[thread_index], now); -} static void tcp_dispatch_table_init (tcp_main_t * tm) diff --git a/src/vnet/tcp/tcp_test.c b/src/vnet/tcp/tcp_test.c index 0725bb04..3dbbdf6f 100644 --- a/src/vnet/tcp/tcp_test.c +++ b/src/vnet/tcp/tcp_test.c @@ -12,7 +12,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - #include #define TCP_TEST_I(_cond, _comment, _args...) \ @@ -174,6 +173,118 @@ tcp_test_sack () return 0; } +static int +tcp_test_fifo (vlib_main_t * vm, unformat_input_t * input) +{ + svm_fifo_t *f; + u32 fifo_size = 1 << 20; + u32 *test_data = 0; + u32 offset; + int i, rv; + u32 data_word, test_data_len; + + /* $$$ parse args */ + test_data_len = fifo_size / sizeof (u32); + vec_validate (test_data, test_data_len - 1); + + for (i = 0; i < vec_len (test_data); i++) + test_data[i] = i; + + f = svm_fifo_create (fifo_size); + + /* Paint fifo data vector with -1's */ + memset (f->data, 0xFF, test_data_len); + + /* Enqueue an initial (un-dequeued) chunk */ + rv = svm_fifo_enqueue_nowait (f, 0 /* pid */ , + sizeof (u32), (u8 *) test_data); + + if (rv != sizeof (u32)) + { + clib_warning ("enqueue returned %d", rv); + goto out; + } + + /* + * Create 3 chunks in the future. The offsets are relative + * to the current fifo tail + */ + for (i = 0; i < 3; i++) + { + offset = (2 * i + 1) * sizeof (u32); + vlib_cli_output (vm, "add offset %d", offset); + + rv = svm_fifo_enqueue_with_offset + (f, 0 /* pid */ , offset, sizeof (u32), + (u8 *) (test_data + ((offset + sizeof (u32)) / sizeof (u32)))); + + if (rv) + { + clib_warning ("enqueue returned %d", rv); + goto out; + } + } + + /* Paint missing data backwards */ + for (i = 3; i > 0; i--) + { + offset = (2 * i + 0) * sizeof (u32); + + vlib_cli_output (vm, "add offset %d", offset); + + rv = svm_fifo_enqueue_with_offset + (f, 0 /* pid */ , offset, sizeof (u32), + (u8 *) (test_data + ((offset + sizeof (u32)) / sizeof (u32)))); + + if (rv) + { + clib_warning ("enqueue returned %d", rv); + goto out; + } + } + + vlib_cli_output (vm, "fifo before missing link: %U", + format_svm_fifo, f, 1 /* verbose */ ); + + /* Enqueue the missing u32 */ + rv = svm_fifo_enqueue_nowait (f, 0 /* pid */ , + sizeof (u32), (u8 *) (test_data + 1)); + if (rv != 7 * sizeof (u32)) + { + clib_warning ("enqueue returned %d", rv); + goto out; + } + + vlib_cli_output (vm, "fifo after missing link: %U", + format_svm_fifo, f, 1 /* verbose */ ); + + /* Collect results */ + for (i = 0; i < 7; i++) + { + rv = svm_fifo_dequeue_nowait (f, 0 /* pid */ , sizeof (u32), + (u8 *) & data_word); + if (rv != sizeof (u32)) + { + clib_warning ("dequeue returned %d", rv); + goto out; + } + if (data_word != test_data[i]) + { + clib_warning ("recovered data %d not %d", data_word, test_data[i]); + goto out; + } + } + + clib_warning ("test complete..."); + +out: + svm_fifo_free (f); + vec_free (test_data); + return 0; +} + + + static clib_error_t * tcp_test (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd_arg) @@ -186,6 +297,10 @@ tcp_test (vlib_main_t * vm, { res = tcp_test_sack (); } + else if (unformat (input, "fifo")) + { + res = tcp_test_fifo (vm, input); + } else { return clib_error_return (0, "unknown input `%U'", @@ -203,10 +318,16 @@ tcp_test (vlib_main_t * vm, } } +/* *INDENT-OFF* */ VLIB_CLI_COMMAND (tcp_test_command, static) = { -.path = "test tcp",.short_help = "internal tcp unit tests",.function = - tcp_test,}; + .path = "test tcp", + .short_help = "internal tcp unit tests", + .function = tcp_test, +}; +/* *INDENT-ON* */ + + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/udp/builtin_server.c b/src/vnet/udp/builtin_server.c index 57f774c5..8565f04c 100644 --- a/src/vnet/udp/builtin_server.c +++ b/src/vnet/udp/builtin_server.c @@ -91,12 +91,11 @@ static session_cb_vft_t builtin_server = { /* *INDENT-ON* */ static int -bind_builtin_uri_server (u8 * uri) +attach_builtin_uri_server () { - vnet_bind_args_t _a, *a = &_a; - char segment_name[128]; + vnet_app_attach_args_t _a, *a = &_a; + u8 segment_name[128]; u32 segment_name_length; - int rv; u64 options[16]; segment_name_length = ARRAY_LEN (segment_name); @@ -104,8 +103,7 @@ bind_builtin_uri_server (u8 * uri) memset (a, 0, sizeof (*a)); memset (options, 0, sizeof (options)); - a->uri = (char *) uri; - a->api_client_index = ~0; /* built-in server */ + a->api_client_index = ~0; a->segment_name = segment_name; a->segment_name_length = segment_name_length; a->session_cb_vft = &builtin_server; @@ -114,6 +112,23 @@ bind_builtin_uri_server (u8 * uri) options[SESSION_OPTIONS_SEGMENT_SIZE] = (2 << 30); /*$$$$ config / arg */ a->options = options; + return vnet_application_attach (a); +} + +static int +bind_builtin_uri_server (u8 * uri) +{ + vnet_bind_args_t _a, *a = &_a; + int rv; + + rv = attach_builtin_uri_server (); + if (rv) + return rv; + + memset (a, 0, sizeof (*a)); + a->uri = (char *) uri; + a->app_index = ~0; /* built-in server */ + rv = vnet_bind_uri (a); return rv; @@ -122,11 +137,12 @@ bind_builtin_uri_server (u8 * uri) static int unbind_builtin_uri_server (u8 * uri) { - int rv; + vnet_unbind_args_t _a, *a = &_a; - rv = vnet_unbind_uri ((char *) uri, ~0 /* client_index */ ); + a->app_index = ~0; + a->uri = (char *) uri; - return rv; + return vnet_unbind_uri (a); } static clib_error_t * -- cgit 1.2.3-korg From 1f75cfd73320476a8f821064391fe368dd4bf75b Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Fri, 14 Apr 2017 16:46:44 -0400 Subject: Fix fifo ooo bugs and improve testing Change-Id: If3c01e318bcb740ca5b240c63f712e2167082a80 Signed-off-by: Dave Barach Signed-off-by: Florin Coras --- src/svm/svm_fifo.c | 126 +++++++---- src/svm/svm_fifo.h | 21 +- src/vnet/tcp/tcp.c | 2 +- src/vnet/tcp/tcp.h | 1 + src/vnet/tcp/tcp_format.c | 2 +- src/vnet/tcp/tcp_input.c | 28 ++- src/vnet/tcp/tcp_test.c | 516 ++++++++++++++++++++++++++++++++++++++++++---- 7 files changed, 602 insertions(+), 94 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index 097bab77..bd968aea 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -15,6 +15,36 @@ #include +#define offset_lt(_a, _b) ((i32)((_a)-(_b)) < 0) +#define offset_leq(_a, _b) ((i32)((_a)-(_b)) <= 0) + +u8 * +format_ooo_segment (u8 * s, va_list * args) +{ + ooo_segment_t *seg = va_arg (*args, ooo_segment_t *); + + s = format (s, "pos %u, len %u, next %d, prev %d", + seg->start, seg->length, seg->next, seg->prev); + return s; +} + +u8 * +format_ooo_list (u8 * s, va_list * args) +{ + svm_fifo_t *f = va_arg (*args, svm_fifo_t *); + u32 ooo_segment_index = f->ooos_list_head; + ooo_segment_t *seg; + + while (ooo_segment_index != OOO_SEGMENT_INVALID_INDEX) + { + seg = pool_elt_at_index (f->ooo_segments, ooo_segment_index); + s = format (s, "\n %U", format_ooo_segment, seg); + + ooo_segment_index = seg->next; + } + return s; +} + /** create an svm fifo, in the current heap. Fails vs blow up the process */ svm_fifo_t * svm_fifo_create (u32 data_size_in_bytes) @@ -47,7 +77,7 @@ ooo_segment_new (svm_fifo_t * f, u32 start, u32 length) pool_get (f->ooo_segments, s); - s->fifo_position = start; + s->start = start; s->length = length; s->prev = s->next = OOO_SEGMENT_INVALID_INDEX; @@ -88,14 +118,13 @@ static void ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) { ooo_segment_t *s, *new_s, *prev, *next, *it; - u32 new_index, position, end_offset, s_sof, s_eof, s_index; + u32 new_index, end_offset, s_sof, s_eof, s_index; - position = (f->tail + offset) % f->nitems; end_offset = offset + length; if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX) { - s = ooo_segment_new (f, position, length); + s = ooo_segment_new (f, offset, length); f->ooos_list_head = s - f->ooo_segments; f->ooos_newest = f->ooos_list_head; return; @@ -104,26 +133,26 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) /* Find first segment that starts after new segment */ s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head); while (s->next != OOO_SEGMENT_INVALID_INDEX - && ooo_segment_offset (f, s) <= offset) + && offset_leq (ooo_segment_offset (f, s), offset)) s = pool_elt_at_index (f->ooo_segments, s->next); s_index = s - f->ooo_segments; s_sof = ooo_segment_offset (f, s); s_eof = ooo_segment_end_offset (f, s); + prev = ooo_segment_get_prev (f, s); /* No overlap, add before current segment */ - if (end_offset < s_sof) + if (offset_lt (end_offset, s_sof) + && (!prev || offset_lt (prev->start + prev->length, offset))) { - new_s = ooo_segment_new (f, position, length); + new_s = ooo_segment_new (f, offset, length); new_index = new_s - f->ooo_segments; /* Pool might've moved, get segment again */ s = pool_elt_at_index (f->ooo_segments, s_index); - if (s->prev != OOO_SEGMENT_INVALID_INDEX) { new_s->prev = s->prev; - prev = pool_elt_at_index (f->ooo_segments, new_s->prev); prev->next = new_index; } @@ -139,9 +168,9 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) return; } /* No overlap, add after current segment */ - else if (s_eof < offset) + else if (offset_lt (s_eof, offset)) { - new_s = ooo_segment_new (f, position, length); + new_s = ooo_segment_new (f, offset, length); new_index = new_s - f->ooo_segments; /* Pool might've moved, get segment again */ @@ -150,7 +179,6 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) if (s->next != OOO_SEGMENT_INVALID_INDEX) { new_s->next = s->next; - next = pool_elt_at_index (f->ooo_segments, new_s->next); next->prev = new_index; } @@ -167,7 +195,7 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) */ /* Merge at head */ - if (offset <= s_sof) + if (offset_leq (offset, s_sof)) { /* If we have a previous, check if we overlap */ if (s->prev != OOO_SEGMENT_INVALID_INDEX) @@ -176,26 +204,31 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) /* New segment merges prev and current. Remove previous and * update position of current. */ - if (ooo_segment_end_offset (f, prev) >= offset) + if (offset_leq (offset, ooo_segment_end_offset (f, prev))) { - s->fifo_position = prev->fifo_position; + s->start = prev->start; s->length = s_eof - ooo_segment_offset (f, prev); ooo_segment_del (f, s->prev); } + else + { + s->start = offset; + s->length = s_eof - ooo_segment_offset (f, s); + } } else { - s->fifo_position = position; + s->start = offset; s->length = s_eof - ooo_segment_offset (f, s); } /* The new segment's tail may cover multiple smaller ones */ - if (s_eof < end_offset) + if (offset_lt (s_eof, end_offset)) { /* Remove segments completely covered */ it = (s->next != OOO_SEGMENT_INVALID_INDEX) ? pool_elt_at_index (f->ooo_segments, s->next) : 0; - while (it && ooo_segment_end_offset (f, it) < end_offset) + while (it && offset_lt (ooo_segment_end_offset (f, it), end_offset)) { next = (it->next != OOO_SEGMENT_INVALID_INDEX) ? pool_elt_at_index (f->ooo_segments, it->next) : 0; @@ -207,7 +240,7 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) s->length = end_offset - ooo_segment_offset (f, s); /* If partial overlap with last, merge */ - if (it && ooo_segment_offset (f, it) < end_offset) + if (it && offset_lt (ooo_segment_offset (f, it), end_offset)) { s->length += it->length - (ooo_segment_offset (f, it) - end_offset); @@ -216,7 +249,7 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) } } /* Last but overlapping previous */ - else if (s_eof <= end_offset) + else if (offset_leq (s_eof, end_offset)) { s->length = end_offset - ooo_segment_offset (f, s); } @@ -247,7 +280,7 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head); /* If last tail update overlaps one/multiple ooo segments, remove them */ - diff = (f->nitems + f->tail - s->fifo_position) % f->nitems; + diff = (f->nitems + ((int) s->start - f->tail)) % f->nitems; while (0 < diff && diff < n_bytes_enqueued) { /* Segment end is beyond the tail. Advance tail and be done */ @@ -262,7 +295,7 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) { index = s - f->ooo_segments; s = pool_elt_at_index (f->ooo_segments, s->next); - diff = (f->nitems + f->tail - s->fifo_position) % f->nitems; + diff = (f->nitems + ((int) s->start - f->tail)) % f->nitems; ooo_segment_del (f, index); } /* End of search */ @@ -368,9 +401,20 @@ svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, { u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; u32 cursize, nitems; - u32 tail_plus_offset; + u32 normalized_offset; + int rv; - ASSERT (offset > 0); + /* Safety: don't wrap more than nitems/2 */ + ASSERT ((f->nitems + offset - f->tail) % f->nitems < f->nitems / 2); + + /* Users would do do well to avoid this */ + if (PREDICT_FALSE (f->tail == (offset % f->nitems))) + { + rv = svm_fifo_enqueue_internal (f, pid, required_bytes, copy_from_here); + if (rv > 0) + return 0; + return -1; + } /* read cursize, which can only increase while we're working */ cursize = svm_fifo_max_dequeue (f); @@ -384,24 +428,24 @@ svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, /* Number of bytes we're going to copy */ total_copy_bytes = required_bytes; - tail_plus_offset = (f->tail + offset) % nitems; + normalized_offset = offset % nitems; /* Number of bytes in first copy segment */ - first_copy_bytes = ((nitems - tail_plus_offset) < total_copy_bytes) - ? (nitems - tail_plus_offset) : total_copy_bytes; + first_copy_bytes = ((nitems - normalized_offset) < total_copy_bytes) + ? (nitems - normalized_offset) : total_copy_bytes; - clib_memcpy (&f->data[tail_plus_offset], copy_from_here, first_copy_bytes); + clib_memcpy (&f->data[normalized_offset], copy_from_here, first_copy_bytes); /* Number of bytes in second copy segment, if any */ second_copy_bytes = total_copy_bytes - first_copy_bytes; if (second_copy_bytes) { - tail_plus_offset += first_copy_bytes; - tail_plus_offset %= nitems; + normalized_offset += first_copy_bytes; + normalized_offset %= nitems; - ASSERT (tail_plus_offset == 0); + ASSERT (normalized_offset == 0); - clib_memcpy (&f->data[tail_plus_offset], + clib_memcpy (&f->data[normalized_offset], copy_from_here + first_copy_bytes, second_copy_bytes); } @@ -573,8 +617,8 @@ format_svm_fifo (u8 * s, va_list * args) ooo_segment_t *seg; u32 seg_index; - s = - format (s, "ooo pool %d active elts\n", pool_elts (f->ooo_segments)); + s = format (s, "ooo pool %d active elts\n", + pool_elts (f->ooo_segments)); seg_index = f->ooos_list_head; @@ -582,13 +626,25 @@ format_svm_fifo (u8 * s, va_list * args) { seg = pool_elt_at_index (f->ooo_segments, seg_index); s = format (s, " pos %u, len %u next %d\n", - seg->fifo_position, seg->length, seg->next); + seg->start, seg->length, seg->next); seg_index = seg->next; } } return s; } +u32 +svm_fifo_number_ooo_segments (svm_fifo_t * f) +{ + return pool_elts (f->ooo_segments); +} + +ooo_segment_t * +svm_fifo_first_ooo_segment (svm_fifo_t * f) +{ + return pool_elt_at_index (f->ooo_segments, f->ooos_list_head); +} + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index 9beb63f5..0fff2577 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -36,10 +36,13 @@ typedef struct u32 next; /**< Next linked-list element pool index */ u32 prev; /**< Previous linked-list element pool index */ - u32 fifo_position; /**< Start of segment, normalized*/ + u32 start; /**< Start of segment, normalized*/ u32 length; /**< Length of segment */ } ooo_segment_t; +format_function_t format_ooo_segment; +format_function_t format_ooo_list; + #define OOO_SEGMENT_INVALID_INDEX ((u32)~0) typedef struct @@ -127,6 +130,8 @@ int svm_fifo_dequeue_nowait (svm_fifo_t * f, int pid, u32 max_bytes, int svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes, u8 * copy_here); int svm_fifo_dequeue_drop (svm_fifo_t * f, int pid, u32 max_bytes); +u32 svm_fifo_number_ooo_segments (svm_fifo_t * f); +ooo_segment_t *svm_fifo_first_ooo_segment (svm_fifo_t * f); format_function_t format_svm_fifo; @@ -139,13 +144,23 @@ svm_fifo_newest_ooo_segment (svm_fifo_t * f) always_inline u32 ooo_segment_offset (svm_fifo_t * f, ooo_segment_t * s) { - return ((f->nitems + s->fifo_position - f->tail) % f->nitems); +// return ((f->nitems + s->fifo_position - f->tail) % f->nitems); + return s->start; } always_inline u32 ooo_segment_end_offset (svm_fifo_t * f, ooo_segment_t * s) { - return ((f->nitems + s->fifo_position + s->length - f->tail) % f->nitems); +// return ((f->nitems + s->fifo_position + s->length - f->tail) % f->nitems); + return s->start + s->length; +} + +always_inline ooo_segment_t * +ooo_segment_get_prev (svm_fifo_t * f, ooo_segment_t * s) +{ + if (s->prev == OOO_SEGMENT_INVALID_INDEX) + return 0; + return pool_elt_at_index (f->ooo_segments, s->prev); } #endif /* __included_ssvm_fifo_h__ */ diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index a0c66b9f..12982589 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -447,7 +447,7 @@ format_tcp_state (u8 * s, va_list * args) if (*state < TCP_N_STATES) s = format (s, "%s", tcp_fsm_states[*state]); else - s = format (s, "UNKNOWN"); + s = format (s, "UNKNOWN (%d (0x%x))", *state, *state); return s; } diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 225b26da..2ac6a9b8 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -58,6 +58,7 @@ typedef enum _tcp_state } tcp_state_t; format_function_t format_tcp_state; +format_function_t format_tcp_flags; /** TCP timers */ #define foreach_tcp_timer \ diff --git a/src/vnet/tcp/tcp_format.c b/src/vnet/tcp/tcp_format.c index 7136741d..994ccfd6 100644 --- a/src/vnet/tcp/tcp_format.c +++ b/src/vnet/tcp/tcp_format.c @@ -40,7 +40,7 @@ #include #include -static u8 * +u8 * format_tcp_flags (u8 * s, va_list * args) { int flags = va_arg (*args, int); diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index a12ad8c0..97679aaf 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -211,8 +211,6 @@ tcp_options_parse (tcp_header_t * th, tcp_options_t * to) always_inline int tcp_segment_check_paws (tcp_connection_t * tc) { - /* XXX normally test for timestamp should be lt instead of leq, but for - * local testing this is not enough */ return tcp_opts_tstamp (&tc->opt) && tc->tsval_recent && timestamp_lt (tc->opt.tsval, tc->tsval_recent); } @@ -999,7 +997,7 @@ tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b, u16 data_len) { stream_session_t *s0; - u32 offset, seq; + u32 offset; int rv; /* Pure ACK. Do nothing */ @@ -1009,8 +1007,9 @@ tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b, } s0 = stream_session_get (tc->c_s_index, tc->c_thread_index); - seq = vnet_buffer (b)->tcp.seq_number; - offset = seq - tc->rcv_nxt; + offset = vnet_buffer (b)->tcp.seq_number - tc->irs; + + clib_warning ("ooo: offset %d len %d", offset, data_len); rv = svm_fifo_enqueue_with_offset (s0->server_rx_fifo, s0->pid, offset, data_len, vlib_buffer_get_current (b)); @@ -1032,8 +1031,8 @@ tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b, /* Get the newest segment from the fifo */ newest = svm_fifo_newest_ooo_segment (s0->server_rx_fifo); - start = tc->rcv_nxt + ooo_segment_offset (s0->server_rx_fifo, newest); - end = tc->rcv_nxt + ooo_segment_end_offset (s0->server_rx_fifo, newest); + start = ooo_segment_offset (s0->server_rx_fifo, newest); + end = ooo_segment_end_offset (s0->server_rx_fifo, newest); tcp_update_sack_list (tc, start, end); } @@ -1072,6 +1071,7 @@ tcp_segment_rcv (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b, { /* Old sequence numbers allowed through because they overlapped * the rx window */ + if (seq_lt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt)) { error = TCP_ERROR_SEGMENT_OLD; @@ -1181,6 +1181,7 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, u32 n_left_from, next_index, *from, *to_next; u32 my_thread_index = vm->thread_index, errors = 0; tcp_main_t *tm = vnet_get_tcp_main (); + u8 is_fin = 0; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -1243,9 +1244,11 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, n_advance_bytes0 += sizeof (ip60[0]); } + is_fin = (th0->flags & TCP_FLAG_FIN) != 0; + /* SYNs, FINs and data consume sequence numbers */ vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number - + tcp_is_syn (th0) + tcp_is_fin (th0) + n_data_bytes0; + + tcp_is_syn (th0) + is_fin + n_data_bytes0; /* TODO header prediction fast path */ @@ -1272,8 +1275,11 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_buffer_advance (b0, n_advance_bytes0); error0 = tcp_segment_rcv (tm, tc0, b0, n_data_bytes0, &next0); + /* N.B. buffer is rewritten if segment is ooo. Thus, th0 becomes a + * dangling reference. */ + /* 8: check the FIN bit */ - if (tcp_fin (th0)) + if (is_fin) { /* Enter CLOSE-WAIT and notify session. Don't send ACK, instead * wait for session to call close. To avoid lingering @@ -2365,8 +2371,12 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (PREDICT_FALSE (error0 == TCP_ERROR_DISPATCH)) { + tcp_state_t state0 = tc0->state; /* Overload tcp flags to store state */ vnet_buffer (b0)->tcp.flags = tc0->state; + clib_warning ("disp error state %U flags %U", + format_tcp_state, &state0, + format_tcp_flags, flags0); } } else diff --git a/src/vnet/tcp/tcp_test.c b/src/vnet/tcp/tcp_test.c index 3dbbdf6f..12579632 100644 --- a/src/vnet/tcp/tcp_test.c +++ b/src/vnet/tcp/tcp_test.c @@ -173,17 +173,145 @@ tcp_test_sack () return 0; } -static int -tcp_test_fifo (vlib_main_t * vm, unformat_input_t * input) +typedef struct +{ + u32 offset; + u32 len; +} test_pattern_t; + +/* *INDENT-OFF* */ +test_pattern_t test_pattern[] = { + {380, 8}, {768, 8}, {1156, 8}, {1544, 8}, {1932, 8}, {2320, 8}, {2708, 8}, + {2992, 8}, {372, 8}, {760, 8}, {1148, 8}, {1536, 8}, {1924, 8}, {2312, 8}, + {2700, 8}, {2984, 8}, {364, 8}, {752, 8}, {1140, 8}, {1528, 8}, {1916, 8}, + {2304, 8}, {2692, 8}, {2976, 8}, {356, 8}, {744, 8}, {1132, 8}, {1520, 8}, + {1908, 8}, {2296, 8}, {2684, 8}, {2968, 8}, {348, 8}, {736, 8}, {1124, 8}, + {1512, 8}, {1900, 8}, {2288, 8}, {2676, 8}, {2960, 8}, {340, 8}, {728, 8}, + {1116, 8}, {1504, 8}, {1892, 8}, {2280, 8}, {2668, 8}, {2952, 8}, {332, 8}, + {720, 8}, {1108, 8}, {1496, 8}, {1884, 8}, {2272, 8}, {2660, 8}, {2944, 8}, + {324, 8}, {712, 8}, {1100, 8}, {1488, 8}, {1876, 8}, {2264, 8}, {2652, 8}, + {2936, 8}, {316, 8}, {704, 8}, {1092, 8}, {1480, 8}, {1868, 8}, {2256, 8}, + {2644, 8}, {2928, 8}, {308, 8}, {696, 8}, {1084, 8}, {1472, 8}, {1860, 8}, + {2248, 8}, {2636, 8}, {2920, 8}, {300, 8}, {688, 8}, {1076, 8}, {1464, 8}, + {1852, 8}, {2240, 8}, {2628, 8}, {2912, 8}, {292, 8}, {680, 8}, {1068, 8}, + {1456, 8}, {1844, 8}, {2232, 8}, {2620, 8}, {2904, 8}, {284, 8}, {672, 8}, + {1060, 8}, {1448, 8}, {1836, 8}, {2224, 8}, {2612, 8}, {2896, 8}, {276, 8}, + {664, 8}, {1052, 8}, {1440, 8}, {1828, 8}, {2216, 8}, {2604, 8}, {2888, 8}, + {268, 8}, {656, 8}, {1044, 8}, {1432, 8}, {1820, 8}, {2208, 8}, {2596, 8}, + {2880, 8}, {260, 8}, {648, 8}, {1036, 8}, {1424, 8}, {1812, 8}, {2200, 8}, + {2588, 8}, {2872, 8}, {252, 8}, {640, 8}, {1028, 8}, {1416, 8}, {1804, 8}, + {2192, 8}, {2580, 8}, {2864, 8}, {244, 8}, {632, 8}, {1020, 8}, {1408, 8}, + {1796, 8}, {2184, 8}, {2572, 8}, {2856, 8}, {236, 8}, {624, 8}, {1012, 8}, + {1400, 8}, {1788, 8}, {2176, 8}, {2564, 8}, {2848, 8}, {228, 8}, {616, 8}, + {1004, 8}, {1392, 8}, {1780, 8}, {2168, 8}, {2556, 8}, {2840, 8}, {220, 8}, + {608, 8}, {996, 8}, {1384, 8}, {1772, 8}, {2160, 8}, {2548, 8}, {2832, 8}, + {212, 8}, {600, 8}, {988, 8}, {1376, 8}, {1764, 8}, {2152, 8}, {2540, 8}, + {2824, 8}, {204, 8}, {592, 8}, {980, 8}, {1368, 8}, {1756, 8}, {2144, 8}, + {2532, 8}, {2816, 8}, {196, 8}, {584, 8}, {972, 8}, {1360, 8}, {1748, 8}, + {2136, 8}, {2524, 8}, {2808, 8}, {188, 8}, {576, 8}, {964, 8}, {1352, 8}, + {1740, 8}, {2128, 8}, {2516, 8}, {2800, 8}, {180, 8}, {568, 8}, {956, 8}, + {1344, 8}, {1732, 8}, {2120, 8}, {2508, 8}, {2792, 8}, {172, 8}, {560, 8}, + {948, 8}, {1336, 8}, {1724, 8}, {2112, 8}, {2500, 8}, {2784, 8}, {164, 8}, + {552, 8}, {940, 8}, {1328, 8}, {1716, 8}, {2104, 8}, {2492, 8}, {2776, 8}, + {156, 8}, {544, 8}, {932, 8}, {1320, 8}, {1708, 8}, {2096, 8}, {2484, 8}, + {2768, 8}, {148, 8}, {536, 8}, {924, 8}, {1312, 8}, {1700, 8}, {2088, 8}, + {2476, 8}, {2760, 8}, {140, 8}, {528, 8}, {916, 8}, {1304, 8}, {1692, 8}, + {2080, 8}, {2468, 8}, {2752, 8}, {132, 8}, {520, 8}, {908, 8}, {1296, 8}, + {1684, 8}, {2072, 8}, {2460, 8}, {2744, 8}, {124, 8}, {512, 8}, {900, 8}, + {1288, 8}, {1676, 8}, {2064, 8}, {2452, 8}, {2736, 8}, {116, 8}, {504, 8}, + {892, 8}, {1280, 8}, {1668, 8}, {2056, 8}, {2444, 8}, {2728, 8}, {108, 8}, + {496, 8}, {884, 8}, {1272, 8}, {1660, 8}, {2048, 8}, {2436, 8}, {2720, 8}, + {100, 8}, {488, 8}, {876, 8}, {1264, 8}, {1652, 8}, {2040, 8}, {2428, 8}, + {2716, 4}, {92, 8}, {480, 8}, {868, 8}, {1256, 8}, {1644, 8}, {2032, 8}, + {2420, 8}, {84, 8}, {472, 8}, {860, 8}, {1248, 8}, {1636, 8}, {2024, 8}, + {2412, 8}, {76, 8}, {464, 8}, {852, 8}, {1240, 8}, {1628, 8}, {2016, 8}, + {2404, 8}, {68, 8}, {456, 8}, {844, 8}, {1232, 8}, {1620, 8}, {2008, 8}, + {2396, 8}, {60, 8}, {448, 8}, {836, 8}, {1224, 8}, {1612, 8}, {2000, 8}, + {2388, 8}, {52, 8}, {440, 8}, {828, 8}, {1216, 8}, {1604, 8}, {1992, 8}, + {2380, 8}, {44, 8}, {432, 8}, {820, 8}, {1208, 8}, {1596, 8}, {1984, 8}, + {2372, 8}, {36, 8}, {424, 8}, {812, 8}, {1200, 8}, {1588, 8}, {1976, 8}, + {2364, 8}, {28, 8}, {416, 8}, {804, 8}, {1192, 8}, {1580, 8}, {1968, 8}, + {2356, 8}, {20, 8}, {408, 8}, {796, 8}, {1184, 8}, {1572, 8}, {1960, 8}, + {2348, 8}, {12, 8}, {400, 8}, {788, 8}, {1176, 8}, {1564, 8}, {1952, 8}, + {2340, 8}, {4, 8}, {392, 8}, {780, 8}, {1168, 8}, {1556, 8}, {1944, 8}, + {2332, 8}, + /* missing from original data set */ + {388, 4}, {776, 4}, {1164, 4}, {1552, 4}, {1940, 4}, {2328, 4}, +}; +/* *INDENT-ON* */ + +int +pattern_cmp (const void *arg1, const void *arg2) +{ + test_pattern_t *a1 = (test_pattern_t *) arg1; + test_pattern_t *a2 = (test_pattern_t *) arg2; + + if (a1->offset < a2->offset) + return -1; + else if (a1->offset > a2->offset) + return 1; + return 0; +} + +static u8 +fifo_validate_pattern (vlib_main_t * vm, test_pattern_t * pattern, + u32 pattern_length) +{ + test_pattern_t *tp = pattern; + int i; + + /* Go through the pattern and make 100% sure it's sane */ + for (i = 0; i < pattern_length - 1; i++) + { + if (tp->offset + tp->len != (tp + 1)->offset) + { + vlib_cli_output (vm, "[%d] missing {%d, %d}", i, + (tp->offset + tp->len), + (tp + 1)->offset - (tp->offset + tp->len)); + return 0; + } + tp++; + } + return 1; +} + +static test_pattern_t * +fifo_get_validate_pattern (vlib_main_t * vm, test_pattern_t * test_data, + u32 test_data_len) +{ + test_pattern_t *validate_pattern = 0; + + /* Validate, and try segments in order... */ + vec_validate (validate_pattern, test_data_len - 1); + memcpy (validate_pattern, test_data, + test_data_len * sizeof (test_pattern_t)); + qsort ((u8 *) validate_pattern, test_data_len, sizeof (test_pattern_t), + pattern_cmp); + + if (fifo_validate_pattern (vm, validate_pattern, test_data_len) == 0) + return 0; + + return validate_pattern; +} + +int +tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) { svm_fifo_t *f; u32 fifo_size = 1 << 20; u32 *test_data = 0; u32 offset; - int i, rv; + int i, rv, verbose = 0; u32 data_word, test_data_len; + ooo_segment_t *ooo_seg; + u8 *data; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "verbose")) + verbose = 1; + } - /* $$$ parse args */ test_data_len = fifo_size / sizeof (u32); vec_validate (test_data, test_data_len - 1); @@ -198,12 +326,8 @@ tcp_test_fifo (vlib_main_t * vm, unformat_input_t * input) /* Enqueue an initial (un-dequeued) chunk */ rv = svm_fifo_enqueue_nowait (f, 0 /* pid */ , sizeof (u32), (u8 *) test_data); - - if (rv != sizeof (u32)) - { - clib_warning ("enqueue returned %d", rv); - goto out; - } + TCP_TEST ((rv == sizeof (u32)), "enqueued %d", rv); + TCP_TEST ((f->tail == 4), "fifo tail %u", f->tail); /* * Create 3 chunks in the future. The offsets are relative @@ -212,51 +336,62 @@ tcp_test_fifo (vlib_main_t * vm, unformat_input_t * input) for (i = 0; i < 3; i++) { offset = (2 * i + 1) * sizeof (u32); - vlib_cli_output (vm, "add offset %d", offset); - - rv = svm_fifo_enqueue_with_offset - (f, 0 /* pid */ , offset, sizeof (u32), - (u8 *) (test_data + ((offset + sizeof (u32)) / sizeof (u32)))); - + data = (u8 *) (test_data + (2 * i + 1)); + rv = + svm_fifo_enqueue_with_offset (f, 0 /* pid */ , offset, sizeof (u32), + data); + if (verbose) + vlib_cli_output (vm, "add [%d] [%d, %d]", 2 * i + 1, offset, + offset + sizeof (u32)); if (rv) { clib_warning ("enqueue returned %d", rv); - goto out; + goto err; } } - /* Paint missing data backwards */ - for (i = 3; i > 0; i--) + if (verbose) + vlib_cli_output (vm, "fifo after odd segs: %U", format_svm_fifo, f, 1); + TCP_TEST ((f->tail == 8), "fifo tail %u", f->tail); + + /* Paint some of missing data backwards */ + for (i = 3; i > 1; i--) { offset = (2 * i + 0) * sizeof (u32); - - vlib_cli_output (vm, "add offset %d", offset); - - rv = svm_fifo_enqueue_with_offset - (f, 0 /* pid */ , offset, sizeof (u32), - (u8 *) (test_data + ((offset + sizeof (u32)) / sizeof (u32)))); - + data = (u8 *) (test_data + (2 * i + 0)); + rv = + svm_fifo_enqueue_with_offset (f, 0 /* pid */ , offset, sizeof (u32), + data); + if (verbose) + vlib_cli_output (vm, "add [%d] [%d, %d]", 2 * i, offset, + offset + sizeof (u32)); if (rv) { clib_warning ("enqueue returned %d", rv); - goto out; + goto err; } } - vlib_cli_output (vm, "fifo before missing link: %U", - format_svm_fifo, f, 1 /* verbose */ ); + if (verbose) + vlib_cli_output (vm, "fifo before missing link: %U", format_svm_fifo, f, + 1); + TCP_TEST ((svm_fifo_number_ooo_segments (f) == 1), + "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); + ooo_seg = svm_fifo_first_ooo_segment (f); + TCP_TEST ((ooo_seg->start == 12), + "first ooo seg position %u", ooo_seg->start); + TCP_TEST ((ooo_seg->length == 16), + "first ooo seg length %u", ooo_seg->length); /* Enqueue the missing u32 */ - rv = svm_fifo_enqueue_nowait (f, 0 /* pid */ , - sizeof (u32), (u8 *) (test_data + 1)); - if (rv != 7 * sizeof (u32)) - { - clib_warning ("enqueue returned %d", rv); - goto out; - } - - vlib_cli_output (vm, "fifo after missing link: %U", - format_svm_fifo, f, 1 /* verbose */ ); + rv = svm_fifo_enqueue_nowait (f, 0 /* pid */ , sizeof (u32), + (u8 *) (test_data + 2)); + if (verbose) + vlib_cli_output (vm, "fifo after missing link: %U", format_svm_fifo, f, + 1); + TCP_TEST ((rv == 20), "bytes to be enqueued %u", rv); + TCP_TEST ((svm_fifo_number_ooo_segments (f) == 0), + "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); /* Collect results */ for (i = 0; i < 7; i++) @@ -265,25 +400,316 @@ tcp_test_fifo (vlib_main_t * vm, unformat_input_t * input) (u8 *) & data_word); if (rv != sizeof (u32)) { - clib_warning ("dequeue returned %d", rv); - goto out; + clib_warning ("bytes dequeues %u", rv); + goto err; } if (data_word != test_data[i]) { - clib_warning ("recovered data %d not %d", data_word, test_data[i]); - goto out; + clib_warning ("recovered [%d] %d not %d", i, data_word, + test_data[i]); + goto err; } } - clib_warning ("test complete..."); + svm_fifo_free (f); + vec_free (test_data); + return 0; -out: +err: svm_fifo_free (f); vec_free (test_data); + return -1; +} + +static int +tcp_test_fifo2 (vlib_main_t * vm) +{ + svm_fifo_t *f; + u32 fifo_size = 1 << 20; + int i, rv, test_data_len; + u64 data64; + test_pattern_t *tp, *vp, *test_data; + ooo_segment_t *ooo_seg; + + test_data = test_pattern; + test_data_len = ARRAY_LEN (test_pattern); + + vp = fifo_get_validate_pattern (vm, test_data, test_data_len); + + /* Create a fifo */ + f = svm_fifo_create (fifo_size); + + /* Paint the fifo data vector with -1's */ + memset (f->data, 0xFF, 1 << 20); + + /* + * Try with sorted data + */ + for (i = 0; i < test_data_len; i++) + { + tp = vp + i; + data64 = tp->offset; + rv = svm_fifo_enqueue_with_offset (f, 0, tp->offset, tp->len, + (u8 *) & data64); + } + + /* Expected result: one big fat chunk at offset 4 */ + TCP_TEST ((svm_fifo_number_ooo_segments (f) == 1), + "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); + ooo_seg = svm_fifo_first_ooo_segment (f); + TCP_TEST ((ooo_seg->start == 4), + "first ooo seg position %u", ooo_seg->start); + TCP_TEST ((ooo_seg->length == 2996), + "first ooo seg length %u", ooo_seg->length); + + data64 = 0; + rv = svm_fifo_enqueue_nowait (f, 0, sizeof (u32), (u8 *) & data64); + TCP_TEST ((rv == 3000), "bytes to be enqueued %u", rv); + + svm_fifo_free (f); + vec_free (vp); + + /* + * Now try it again w/ unsorted data... + */ + + f = svm_fifo_create (fifo_size); + + /* Paint fifo data vector with -1's */ + memset (f->data, 0xFF, 1 << 20); + + for (i = 0; i < test_data_len; i++) + { + tp = &test_data[i]; + data64 = tp->offset; + rv = svm_fifo_enqueue_with_offset (f, 0, tp->offset, tp->len, + (u8 *) & data64); + if (rv) + { + clib_warning ("enqueue returned %d", rv); + } + } + + /* Expecting the same result: one big fat chunk at offset 4 */ + TCP_TEST ((svm_fifo_number_ooo_segments (f) == 1), + "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); + ooo_seg = svm_fifo_first_ooo_segment (f); + TCP_TEST ((ooo_seg->start == 4), + "first ooo seg position %u", ooo_seg->start); + TCP_TEST ((ooo_seg->length == 2996), + "first ooo seg length %u", ooo_seg->length); + + data64 = 0; + rv = svm_fifo_enqueue_nowait (f, 0, sizeof (u32), (u8 *) & data64); + + TCP_TEST ((rv == 3000), "bytes to be enqueued %u", rv); + + svm_fifo_free (f); + return 0; } +static int +tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) +{ + svm_fifo_t *f; + u32 fifo_size = 4 << 10; + u32 fifo_initial_offset = 0; + u32 total_size = 2 << 10; + int overlap = 0; + int i, rv; + u8 *data_pattern = 0; + test_pattern_t *tp, *generate = 0; + u32 nsegs = 2; + u32 seg_size, length_so_far; + u32 current_offset, offset_increment, len_this_chunk; + u32 seed = 0xdeaddabe; + int verbose = 0; + int randomize = 1; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "fifo-size %d", &fifo_size)) + ; + else if (unformat (input, "total-size %d", &total_size)) + ; + else if (unformat (input, "verbose")) + verbose = 1; + else if (unformat (input, "overlap")) + overlap = 1; + else if (unformat (input, "initial-offset %d", &fifo_initial_offset)) + ; + else if (unformat (input, "seed %d", &seed)) + ; + else if (unformat (input, "nsegs %d", &nsegs)) + ; + else if (unformat (input, "no-randomize")) + randomize = 0; + else + { + clib_error_t *e = clib_error_return + (0, "unknown input `%U'", format_unformat_error, input); + clib_error_report (e); + return -1; + } + } + /* + * Generate data + */ + vec_validate (data_pattern, total_size - 1); + for (i = 0; i < vec_len (data_pattern); i++) + data_pattern[i] = i & 0xff; + + seg_size = total_size / nsegs; + length_so_far = 0; + current_offset = 1; + while (length_so_far < total_size) + { + vec_add2 (generate, tp, 1); + len_this_chunk = clib_min (seg_size, total_size - length_so_far); + tp->offset = current_offset; + tp->len = len_this_chunk; + + if (overlap && (len_this_chunk == seg_size)) + do + { + offset_increment = len_this_chunk + % (1 + (random_u32 (&seed) % len_this_chunk)); + } + while (offset_increment == 0); + else + offset_increment = len_this_chunk; + + current_offset += offset_increment; + length_so_far = tp->offset + tp->len; + } + + /* + * Validate segment list. Only valid for non-overlap cases. + */ + if (overlap == 0) + fifo_validate_pattern (vm, generate, vec_len (generate)); + + if (verbose) + { + vlib_cli_output (vm, "raw data pattern:"); + for (i = 0; i < vec_len (generate); i++) + { + vlib_cli_output (vm, "[%d] offset %u len %u", i, + generate[i].offset, generate[i].len); + } + } + + /* Randomize data pattern */ + if (randomize) + { + for (i = 0; i < vec_len (generate) / 2; i++) + { + u32 src_index, dst_index; + test_pattern_t _tmp, *tmp = &_tmp; + + src_index = random_u32 (&seed) % vec_len (generate); + dst_index = random_u32 (&seed) % vec_len (generate); + + tmp[0] = generate[dst_index]; + generate[dst_index] = generate[src_index]; + generate[src_index] = tmp[0]; + } + } + + if (verbose) + { + vlib_cli_output (vm, "randomized data pattern:"); + for (i = 0; i < vec_len (generate); i++) + { + vlib_cli_output (vm, "[%d] offset %u len %u", i, + generate[i].offset, generate[i].len); + } + } + + /* Create a fifo */ + f = svm_fifo_create (fifo_size); + + /* Paint the fifo data vector with -1's */ + memset (f->data, 0xFF, fifo_size); + + /* manually set head and tail pointers to validate modular arithmetic */ + f->head = fifo_initial_offset % fifo_size; + f->tail = fifo_initial_offset % fifo_size; + + for (i = 0; i < vec_len (generate); i++) + { + tp = generate + i; + rv = svm_fifo_enqueue_with_offset (f, 0, tp->offset, tp->len, + (u8 *) data_pattern + tp->offset); + } + + /* Expected result: one big fat chunk at offset 1 */ + + if (verbose) + vlib_cli_output (vm, "fifo before missing link: %U", + format_svm_fifo, f, 1 /* verbose */ ); + + rv = svm_fifo_enqueue_nowait (f, 0, 1 /* count */ , data_pattern + 0); + + if (verbose) + vlib_cli_output (vm, "in-order enqueue returned %d", rv); + + TCP_TEST ((rv == total_size), "retrieved %u expected %u", rv, total_size); + TCP_TEST ((svm_fifo_number_ooo_segments (f) == 0), + "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); + svm_fifo_free (f); + vec_free (data_pattern); + + return 0; +} + +static int +tcp_test_fifo (vlib_main_t * vm, unformat_input_t * input) +{ + int res = 0; + + /* Run all tests */ + if (unformat_check_input (input) == UNFORMAT_END_OF_INPUT) + { + res = tcp_test_fifo1 (vm, input); + if (res) + return res; + + res = tcp_test_fifo2 (vm); + if (res) + return res; + + /* Run a number of fifo3 configs */ + unformat_init_cstring (input, "nsegs 3 overlap seed 123"); + if (tcp_test_fifo3 (vm, input)) + return -1; + unformat_free (input); + + unformat_init_cstring (input, "nsegs 10"); + if (tcp_test_fifo3 (vm, input)) + return -1; + unformat_free (input); + } + else + { + if (unformat (input, "fifo3")) + { + res = tcp_test_fifo3 (vm, input); + } + else if (unformat (input, "fifo2")) + { + res = tcp_test_fifo2 (vm); + } + else if (unformat (input, "fifo1")) + { + res = tcp_test_fifo1 (vm, input); + } + } + + return res; +} static clib_error_t * tcp_test (vlib_main_t * vm, -- cgit 1.2.3-korg From b59a7055524bae29a328958c493d0b600776dc27 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Tue, 18 Apr 2017 22:07:29 -0700 Subject: Add more svm fifo unit tests Change-Id: Ifc07b3f90ac155c26c3a216e073b474b499ebd44 Signed-off-by: Florin Coras --- src/svm/svm_fifo.c | 100 ++++++++-------- src/vnet/tcp/tcp_test.c | 305 ++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 292 insertions(+), 113 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index bd968aea..f428d3ec 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -38,13 +38,37 @@ format_ooo_list (u8 * s, va_list * args) while (ooo_segment_index != OOO_SEGMENT_INVALID_INDEX) { seg = pool_elt_at_index (f->ooo_segments, ooo_segment_index); - s = format (s, "\n %U", format_ooo_segment, seg); - + s = format (s, " %U\n", format_ooo_segment, seg); ooo_segment_index = seg->next; } return s; } +u8 * +format_svm_fifo (u8 * s, va_list * args) +{ + svm_fifo_t *f = va_arg (*args, svm_fifo_t *); + int verbose = va_arg (*args, int); + + s = format (s, "cursize %u nitems %u has_event %d\n", + f->cursize, f->nitems, f->has_event); + s = format (s, "head %d tail %d\n", f->head, f->tail); + + if (verbose > 1) + s = format + (s, "server session %d thread %d client session %d thread %d\n", + f->server_session_index, f->server_thread_index, + f->client_session_index, f->client_thread_index); + + if (verbose) + { + s = format (s, "ooo pool %d active elts\n", + pool_elts (f->ooo_segments)); + s = format (s, "%U", format_ooo_list, f); + } + return s; +} + /** create an svm fifo, in the current heap. Fails vs blow up the process */ svm_fifo_t * svm_fifo_create (u32 data_size_in_bytes) @@ -274,33 +298,41 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) u32 index, bytes = 0, diff; u32 cursize; - /* read cursize, which can only increase while we're working */ - cursize = svm_fifo_max_dequeue (f); + /* current size has not yet been updated */ + cursize = svm_fifo_max_dequeue (f) + n_bytes_enqueued; s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head); + diff = (f->nitems + (i32) (f->tail - s->start)) % f->nitems; + if (diff > cursize) + return 0; + /* If last tail update overlaps one/multiple ooo segments, remove them */ - diff = (f->nitems + ((int) s->start - f->tail)) % f->nitems; - while (0 < diff && diff < n_bytes_enqueued) + while (0 < diff && diff < cursize) { - /* Segment end is beyond the tail. Advance tail and be done */ + index = s - f->ooo_segments; + + /* Segment end is beyond the tail. Advance tail and remove segment */ if (diff < s->length) { f->tail += s->length - diff; f->tail %= f->nitems; + bytes = s->length - diff; + ooo_segment_del (f, index); break; } + /* If we have next go on */ - else if (s->next != OOO_SEGMENT_INVALID_INDEX) + if (s->next != OOO_SEGMENT_INVALID_INDEX) { - index = s - f->ooo_segments; s = pool_elt_at_index (f->ooo_segments, s->next); - diff = (f->nitems + ((int) s->start - f->tail)) % f->nitems; + diff = (f->nitems + (i32) (f->tail - s->start)) % f->nitems; ooo_segment_del (f, index); } /* End of search */ else { + ooo_segment_del (f, index); break; } } @@ -404,10 +436,7 @@ svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, u32 normalized_offset; int rv; - /* Safety: don't wrap more than nitems/2 */ - ASSERT ((f->nitems + offset - f->tail) % f->nitems < f->nitems / 2); - - /* Users would do do well to avoid this */ + /* Users would do well to avoid this */ if (PREDICT_FALSE (f->tail == (offset % f->nitems))) { rv = svm_fifo_enqueue_internal (f, pid, required_bytes, copy_from_here); @@ -421,7 +450,7 @@ svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, nitems = f->nitems; /* Will this request fit? */ - if ((required_bytes + offset) > (nitems - cursize)) + if ((required_bytes + (offset - f->tail) % nitems) > (nitems - cursize)) return -1; ooo_segment_add (f, offset, required_bytes); @@ -523,7 +552,7 @@ svm_fifo_dequeue_nowait (svm_fifo_t * f, } int -svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes, +svm_fifo_peek (svm_fifo_t * f, int pid, u32 relative_offset, u32 max_bytes, u8 * copy_here) { u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; @@ -535,7 +564,7 @@ svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes, return -2; /* nothing in the fifo */ nitems = f->nitems; - real_head = f->head + offset; + real_head = f->head + relative_offset; real_head = real_head >= nitems ? real_head - nitems : real_head; /* Number of bytes we're going to copy */ @@ -596,43 +625,6 @@ svm_fifo_dequeue_drop (svm_fifo_t * f, int pid, u32 max_bytes) return total_drop_bytes; } -u8 * -format_svm_fifo (u8 * s, va_list * args) -{ - svm_fifo_t *f = va_arg (*args, svm_fifo_t *); - int verbose = va_arg (*args, int); - - s = format (s, "cursize %u nitems %u has_event %d\n", - f->cursize, f->nitems, f->has_event); - s = format (s, "head %d tail %d\n", f->head, f->tail); - - if (verbose > 1) - s = format - (s, "server session %d thread %d client session %d thread %d\n", - f->server_session_index, f->server_thread_index, - f->client_session_index, f->client_thread_index); - - if (verbose) - { - ooo_segment_t *seg; - u32 seg_index; - - s = format (s, "ooo pool %d active elts\n", - pool_elts (f->ooo_segments)); - - seg_index = f->ooos_list_head; - - while (seg_index != OOO_SEGMENT_INVALID_INDEX) - { - seg = pool_elt_at_index (f->ooo_segments, seg_index); - s = format (s, " pos %u, len %u next %d\n", - seg->start, seg->length, seg->next); - seg_index = seg->next; - } - } - return s; -} - u32 svm_fifo_number_ooo_segments (svm_fifo_t * f) { diff --git a/src/vnet/tcp/tcp_test.c b/src/vnet/tcp/tcp_test.c index 12579632..890e50b9 100644 --- a/src/vnet/tcp/tcp_test.c +++ b/src/vnet/tcp/tcp_test.c @@ -294,6 +294,34 @@ fifo_get_validate_pattern (vlib_main_t * vm, test_pattern_t * test_data, return validate_pattern; } +static svm_fifo_t * +fifo_prepare (u32 fifo_size) +{ + svm_fifo_t *f; + f = svm_fifo_create (fifo_size); + + /* Paint fifo data vector with -1's */ + memset (f->data, 0xFF, fifo_size); + + return f; +} + +static int +compare_data (u8 * data1, u8 * data2, u32 start, u32 len, u32 * index) +{ + int i; + + for (i = start; i < len; i++) + { + if (data1[i] != data2[i]) + { + *index = i; + return 1; + } + } + return 0; +} + int tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) { @@ -302,9 +330,9 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) u32 *test_data = 0; u32 offset; int i, rv, verbose = 0; - u32 data_word, test_data_len; + u32 data_word, test_data_len, j; ooo_segment_t *ooo_seg; - u8 *data; + u8 *data, *s, *data_buf = 0; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { @@ -318,12 +346,11 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) for (i = 0; i < vec_len (test_data); i++) test_data[i] = i; - f = svm_fifo_create (fifo_size); - - /* Paint fifo data vector with -1's */ - memset (f->data, 0xFF, test_data_len); + f = fifo_prepare (fifo_size); - /* Enqueue an initial (un-dequeued) chunk */ + /* + * Enqueue an initial (un-dequeued) chunk + */ rv = svm_fifo_enqueue_nowait (f, 0 /* pid */ , sizeof (u32), (u8 *) test_data); TCP_TEST ((rv == sizeof (u32)), "enqueued %d", rv); @@ -337,9 +364,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) { offset = (2 * i + 1) * sizeof (u32); data = (u8 *) (test_data + (2 * i + 1)); - rv = - svm_fifo_enqueue_with_offset (f, 0 /* pid */ , offset, sizeof (u32), - data); + rv = svm_fifo_enqueue_with_offset (f, 0, offset, sizeof (u32), data); if (verbose) vlib_cli_output (vm, "add [%d] [%d, %d]", 2 * i + 1, offset, offset + sizeof (u32)); @@ -352,16 +377,23 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) if (verbose) vlib_cli_output (vm, "fifo after odd segs: %U", format_svm_fifo, f, 1); + TCP_TEST ((f->tail == 8), "fifo tail %u", f->tail); - /* Paint some of missing data backwards */ + /* + * Make sure format functions are not buggy + */ + s = format (0, "%U", format_svm_fifo, f, 2); + vec_free (s); + + /* + * Paint some of missing data backwards + */ for (i = 3; i > 1; i--) { offset = (2 * i + 0) * sizeof (u32); data = (u8 *) (test_data + (2 * i + 0)); - rv = - svm_fifo_enqueue_with_offset (f, 0 /* pid */ , offset, sizeof (u32), - data); + rv = svm_fifo_enqueue_with_offset (f, 0, offset, sizeof (u32), data); if (verbose) vlib_cli_output (vm, "add [%d] [%d, %d]", 2 * i, offset, offset + sizeof (u32)); @@ -383,7 +415,9 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) TCP_TEST ((ooo_seg->length == 16), "first ooo seg length %u", ooo_seg->length); - /* Enqueue the missing u32 */ + /* + * Enqueue the missing u32 + */ rv = svm_fifo_enqueue_nowait (f, 0 /* pid */ , sizeof (u32), (u8 *) (test_data + 2)); if (verbose) @@ -393,7 +427,9 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) TCP_TEST ((svm_fifo_number_ooo_segments (f) == 0), "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); - /* Collect results */ + /* + * Collect results + */ for (i = 0; i < 7; i++) { rv = svm_fifo_dequeue_nowait (f, 0 /* pid */ , sizeof (u32), @@ -411,8 +447,77 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) } } + /* + * Test segment overlaps: last ooo segment overlaps all + */ + svm_fifo_free (f); + f = fifo_prepare (fifo_size); + + for (i = 0; i < 4; i++) + { + offset = (2 * i + 1) * sizeof (u32); + data = (u8 *) (test_data + (2 * i + 1)); + rv = svm_fifo_enqueue_with_offset (f, 0, offset, sizeof (u32), data); + if (verbose) + vlib_cli_output (vm, "add [%d] [%d, %d]", 2 * i + 1, offset, + offset + sizeof (u32)); + if (rv) + { + clib_warning ("enqueue returned %d", rv); + goto err; + } + } + + rv = svm_fifo_enqueue_with_offset (f, 0, 8, 21, data); + TCP_TEST ((rv == 0), "ooo enqueued %u", rv); + TCP_TEST ((svm_fifo_number_ooo_segments (f) == 1), + "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); + + vec_validate (data_buf, vec_len (data)); + svm_fifo_peek (f, 0, 0, vec_len (data), data_buf); + if (compare_data (data_buf, data, 8, vec_len (data), &j)) + { + TCP_TEST (0, "[%d] peeked %u expected %u", j, data_buf[j], data[j]); + } + vec_reset_length (data_buf); + + /* + * Test segment overlaps: enqueue and overlap ooo segments + */ + svm_fifo_free (f); + f = fifo_prepare (fifo_size); + + for (i = 0; i < 4; i++) + { + offset = (2 * i + 1) * sizeof (u32); + data = (u8 *) (test_data + (2 * i + 1)); + rv = svm_fifo_enqueue_with_offset (f, 0, offset, sizeof (u32), data); + if (verbose) + vlib_cli_output (vm, "add [%d] [%d, %d]", 2 * i + 1, offset, + offset + sizeof (u32)); + if (rv) + { + clib_warning ("enqueue returned %d", rv); + goto err; + } + } + + rv = svm_fifo_enqueue_nowait (f, 0, 29, data); + TCP_TEST ((rv == 32), "ooo enqueued %u", rv); + TCP_TEST ((svm_fifo_number_ooo_segments (f) == 0), + "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); + + vec_validate (data_buf, vec_len (data)); + svm_fifo_peek (f, 0, 0, vec_len (data), data_buf); + if (compare_data (data_buf, data, 0, vec_len (data), &j)) + { + TCP_TEST (0, "[%d] peeked %u expected %u", j, data_buf[j], data[j]); + } + + vec_free (data_buf); svm_fifo_free (f); vec_free (test_data); + return 0; err: @@ -437,10 +542,7 @@ tcp_test_fifo2 (vlib_main_t * vm) vp = fifo_get_validate_pattern (vm, test_data, test_data_len); /* Create a fifo */ - f = svm_fifo_create (fifo_size); - - /* Paint the fifo data vector with -1's */ - memset (f->data, 0xFF, 1 << 20); + f = fifo_prepare (fifo_size); /* * Try with sorted data @@ -473,10 +575,7 @@ tcp_test_fifo2 (vlib_main_t * vm) * Now try it again w/ unsorted data... */ - f = svm_fifo_create (fifo_size); - - /* Paint fifo data vector with -1's */ - memset (f->data, 0xFF, 1 << 20); + f = fifo_prepare (fifo_size); for (i = 0; i < test_data_len; i++) { @@ -516,16 +615,13 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) u32 fifo_size = 4 << 10; u32 fifo_initial_offset = 0; u32 total_size = 2 << 10; - int overlap = 0; - int i, rv; - u8 *data_pattern = 0; + int overlap = 0, verbose = 0, randomize = 1, drop = 0, in_seq_all = 0; + u8 *data_pattern = 0, *data_buf = 0; test_pattern_t *tp, *generate = 0; - u32 nsegs = 2; - u32 seg_size, length_so_far; + u32 nsegs = 2, seg_size, length_so_far; u32 current_offset, offset_increment, len_this_chunk; - u32 seed = 0xdeaddabe; - int verbose = 0; - int randomize = 1; + u32 seed = 0xdeaddabe, j; + int i, rv; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { @@ -545,6 +641,10 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) ; else if (unformat (input, "no-randomize")) randomize = 0; + else if (unformat (input, "in-seq-all")) + in_seq_all = 1; + else if (unformat (input, "drop")) + drop = 1; else { clib_error_t *e = clib_error_return @@ -554,6 +654,18 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) } } + if (total_size > fifo_size) + { + clib_warning ("total_size %d greater than fifo size %d", total_size, + fifo_size); + return -1; + } + if (overlap && randomize == 0) + { + clib_warning ("Can't enqueue in-order with overlap"); + return -1; + } + /* * Generate data */ @@ -561,9 +673,12 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) for (i = 0; i < vec_len (data_pattern); i++) data_pattern[i] = i & 0xff; + /* + * Generate segments + */ seg_size = total_size / nsegs; length_so_far = 0; - current_offset = 1; + current_offset = randomize; while (length_so_far < total_size) { vec_add2 (generate, tp, 1); @@ -616,51 +731,100 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) generate[dst_index] = generate[src_index]; generate[src_index] = tmp[0]; } - } - - if (verbose) - { - vlib_cli_output (vm, "randomized data pattern:"); - for (i = 0; i < vec_len (generate); i++) + if (verbose) { - vlib_cli_output (vm, "[%d] offset %u len %u", i, - generate[i].offset, generate[i].len); + vlib_cli_output (vm, "randomized data pattern:"); + for (i = 0; i < vec_len (generate); i++) + { + vlib_cli_output (vm, "[%d] offset %u len %u", i, + generate[i].offset, generate[i].len); + } } } - /* Create a fifo */ - f = svm_fifo_create (fifo_size); - - /* Paint the fifo data vector with -1's */ - memset (f->data, 0xFF, fifo_size); + /* + * Create a fifo and add segments + */ + f = fifo_prepare (fifo_size); /* manually set head and tail pointers to validate modular arithmetic */ - f->head = fifo_initial_offset % fifo_size; - f->tail = fifo_initial_offset % fifo_size; + fifo_initial_offset = fifo_initial_offset % fifo_size; + f->head = fifo_initial_offset; + f->tail = fifo_initial_offset; for (i = 0; i < vec_len (generate); i++) { tp = generate + i; - rv = svm_fifo_enqueue_with_offset (f, 0, tp->offset, tp->len, + rv = svm_fifo_enqueue_with_offset (f, 0, fifo_initial_offset + + tp->offset, tp->len, (u8 *) data_pattern + tp->offset); } - /* Expected result: one big fat chunk at offset 1 */ + /* + * Expected result: one big fat chunk at offset 1 if randomize == 1 + */ if (verbose) vlib_cli_output (vm, "fifo before missing link: %U", format_svm_fifo, f, 1 /* verbose */ ); - rv = svm_fifo_enqueue_nowait (f, 0, 1 /* count */ , data_pattern + 0); + /* + * Add the missing byte if segments were randomized + */ + if (randomize) + { + u32 bytes_to_enq = 1; + if (in_seq_all) + bytes_to_enq = total_size; + rv = svm_fifo_enqueue_nowait (f, 0, bytes_to_enq, data_pattern + 0); + + if (verbose) + vlib_cli_output (vm, "in-order enqueue returned %d", rv); - if (verbose) - vlib_cli_output (vm, "in-order enqueue returned %d", rv); + TCP_TEST ((rv == total_size), "enqueued %u expected %u", rv, + total_size); + + } + + TCP_TEST ((svm_fifo_has_ooo_data (f) == 0), "number of ooo segments %u", + svm_fifo_number_ooo_segments (f)); + + /* + * Test if peeked data is the same as original data + */ + vec_validate (data_buf, vec_len (data_pattern)); + svm_fifo_peek (f, 0, 0, vec_len (data_pattern), data_buf); + if (compare_data (data_buf, data_pattern, 0, vec_len (data_pattern), &j)) + { + TCP_TEST (0, "[%d] peeked %u expected %u", j, data_buf[j], + data_pattern[j]); + } + vec_reset_length (data_buf); + + /* + * Dequeue or drop all data + */ + if (drop) + { + svm_fifo_dequeue_drop (f, 0, vec_len (data_pattern)); + } + else + { + svm_fifo_dequeue_nowait (f, 0, vec_len (data_pattern), data_buf); + if (compare_data + (data_buf, data_pattern, 0, vec_len (data_pattern), &j)) + { + TCP_TEST (0, "[%d] dequeued %u expected %u", j, data_buf[j], + data_pattern[j]); + } + } + + TCP_TEST ((svm_fifo_max_dequeue (f) == 0), "fifo has %d bytes", + svm_fifo_max_dequeue (f)); - TCP_TEST ((rv == total_size), "retrieved %u expected %u", rv, total_size); - TCP_TEST ((svm_fifo_number_ooo_segments (f) == 0), - "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); svm_fifo_free (f); vec_free (data_pattern); + vec_free (data_buf); return 0; } @@ -669,6 +833,7 @@ static int tcp_test_fifo (vlib_main_t * vm, unformat_input_t * input) { int res = 0; + char *str; /* Run all tests */ if (unformat_check_input (input) == UNFORMAT_END_OF_INPUT) @@ -681,13 +846,35 @@ tcp_test_fifo (vlib_main_t * vm, unformat_input_t * input) if (res) return res; - /* Run a number of fifo3 configs */ - unformat_init_cstring (input, "nsegs 3 overlap seed 123"); + /* + * Run a number of fifo3 configs + */ + str = "nsegs 10 overlap seed 123"; + unformat_init_cstring (input, str); + if (tcp_test_fifo3 (vm, input)) + return -1; + unformat_free (input); + + str = "nsegs 10 overlap seed 123 in-seq-all"; + unformat_init_cstring (input, str); + if (tcp_test_fifo3 (vm, input)) + return -1; + unformat_free (input); + + str = "nsegs 10 overlap seed 123 initial-offset 3917"; + unformat_init_cstring (input, str); + if (tcp_test_fifo3 (vm, input)) + return -1; + unformat_free (input); + + str = "nsegs 10 overlap seed 123 initial-offset 3917 drop"; + unformat_init_cstring (input, str); if (tcp_test_fifo3 (vm, input)) return -1; unformat_free (input); - unformat_init_cstring (input, "nsegs 10"); + str = "nsegs 10 seed 123 initial-offset 3917 drop no-randomize"; + unformat_init_cstring (input, str); if (tcp_test_fifo3 (vm, input)) return -1; unformat_free (input); -- cgit 1.2.3-korg From a5464817522c7a7dc760af4612f1d6a68ed0afc8 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Wed, 19 Apr 2017 13:00:05 -0700 Subject: Session layer improvements Among others: - Moved app event queue to shared memory segment - Use private memory segment for builtin apps - Remove pid from svm fifo - Protect session fifo (de)allocation - Use fifo event for session disconnects - Have session queue node poll in all wk threads Change-Id: I89dbf7fdfebef12f5ef2b34ba3ef3c2c07f49ff2 Signed-off-by: Florin Coras --- src/svm/svm_fifo.c | 30 ++--- src/svm/svm_fifo.h | 31 ++--- src/svm/svm_fifo_segment.c | 50 +++++++- src/svm/svm_fifo_segment.h | 5 + src/svm/test_svm_fifo1.c | 27 ++--- src/uri/uri_tcp_test.c | 189 ++++++++++++++++++++----------- src/uri/uri_udp_test.c | 40 ++++--- src/vnet/session/application.c | 48 +++----- src/vnet/session/application.h | 12 -- src/vnet/session/application_interface.c | 26 +---- src/vnet/session/application_interface.h | 38 ++++++- src/vnet/session/node.c | 63 ++++++----- src/vnet/session/segment_manager.c | 134 ++++++++++++++++++---- src/vnet/session/segment_manager.h | 12 ++ src/vnet/session/session.c | 138 ++++++++++++---------- src/vnet/session/session.h | 19 ++-- src/vnet/session/session_api.c | 58 ++++------ src/vnet/tcp/builtin_client.c | 9 +- src/vnet/tcp/builtin_server.c | 8 +- src/vnet/tcp/tcp.c | 13 ++- src/vnet/tcp/tcp_input.c | 8 +- src/vnet/tcp/tcp_output.c | 6 - src/vnet/tcp/tcp_test.c | 43 ++++--- src/vnet/udp/builtin_server.c | 8 +- src/vnet/udp/udp_input.c | 5 +- 25 files changed, 604 insertions(+), 416 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index f428d3ec..8f2ed0c9 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -57,7 +57,7 @@ format_svm_fifo (u8 * s, va_list * args) if (verbose > 1) s = format (s, "server session %d thread %d client session %d thread %d\n", - f->server_session_index, f->server_thread_index, + f->master_session_index, f->master_thread_index, f->client_session_index, f->client_thread_index); if (verbose) @@ -353,8 +353,7 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) } static int -svm_fifo_enqueue_internal (svm_fifo_t * f, - int pid, u32 max_bytes, u8 * copy_from_here) +svm_fifo_enqueue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here) { u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; u32 cursize, nitems; @@ -411,10 +410,9 @@ svm_fifo_enqueue_internal (svm_fifo_t * f, } int -svm_fifo_enqueue_nowait (svm_fifo_t * f, - int pid, u32 max_bytes, u8 * copy_from_here) +svm_fifo_enqueue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here) { - return svm_fifo_enqueue_internal (f, pid, max_bytes, copy_from_here); + return svm_fifo_enqueue_internal (f, max_bytes, copy_from_here); } /** @@ -426,7 +424,6 @@ svm_fifo_enqueue_nowait (svm_fifo_t * f, */ static int svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, - int pid, u32 offset, u32 required_bytes, u8 * copy_from_here) @@ -439,7 +436,7 @@ svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, /* Users would do well to avoid this */ if (PREDICT_FALSE (f->tail == (offset % f->nitems))) { - rv = svm_fifo_enqueue_internal (f, pid, required_bytes, copy_from_here); + rv = svm_fifo_enqueue_internal (f, required_bytes, copy_from_here); if (rv > 0) return 0; return -1; @@ -484,18 +481,16 @@ svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, int svm_fifo_enqueue_with_offset (svm_fifo_t * f, - int pid, u32 offset, u32 required_bytes, u8 * copy_from_here) { - return svm_fifo_enqueue_with_offset_internal - (f, pid, offset, required_bytes, copy_from_here); + return svm_fifo_enqueue_with_offset_internal (f, offset, required_bytes, + copy_from_here); } static int -svm_fifo_dequeue_internal (svm_fifo_t * f, - int pid, u32 max_bytes, u8 * copy_here) +svm_fifo_dequeue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_here) { u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; u32 cursize, nitems; @@ -545,14 +540,13 @@ svm_fifo_dequeue_internal (svm_fifo_t * f, } int -svm_fifo_dequeue_nowait (svm_fifo_t * f, - int pid, u32 max_bytes, u8 * copy_here) +svm_fifo_dequeue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_here) { - return svm_fifo_dequeue_internal (f, pid, max_bytes, copy_here); + return svm_fifo_dequeue_internal (f, max_bytes, copy_here); } int -svm_fifo_peek (svm_fifo_t * f, int pid, u32 relative_offset, u32 max_bytes, +svm_fifo_peek (svm_fifo_t * f, u32 relative_offset, u32 max_bytes, u8 * copy_here) { u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; @@ -590,7 +584,7 @@ svm_fifo_peek (svm_fifo_t * f, int pid, u32 relative_offset, u32 max_bytes, } int -svm_fifo_dequeue_drop (svm_fifo_t * f, int pid, u32 max_bytes) +svm_fifo_dequeue_drop (svm_fifo_t * f, u32 max_bytes) { u32 total_drop_bytes, first_drop_bytes, second_drop_bytes; u32 cursize, nitems; diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index 0fff2577..d67237c6 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -23,13 +23,6 @@ #include #include -typedef enum -{ - SVM_FIFO_TAG_NOT_HELD = 0, - SVM_FIFO_TAG_DEQUEUE, - SVM_FIFO_TAG_ENQUEUE, -} svm_lock_tag_t; - /** Out-of-order segment */ typedef struct { @@ -37,7 +30,7 @@ typedef struct u32 prev; /**< Previous linked-list element pool index */ u32 start; /**< Start of segment, normalized*/ - u32 length; /**< Length of segment */ + u32 length; /**< Length of segment */ } ooo_segment_t; format_function_t format_ooo_segment; @@ -52,12 +45,11 @@ typedef struct CLIB_CACHE_LINE_ALIGN_MARK (end_cursize); volatile u8 has_event; /**< non-zero if deq event exists */ - u32 owner_pid; /* Backpointers */ - u32 server_session_index; + u32 master_session_index; u32 client_session_index; - u8 server_thread_index; + u8 master_thread_index; u8 client_thread_index; u32 segment_manager; CLIB_CACHE_LINE_ALIGN_MARK (end_shared); @@ -117,19 +109,14 @@ svm_fifo_unset_event (svm_fifo_t * f) svm_fifo_t *svm_fifo_create (u32 data_size_in_bytes); void svm_fifo_free (svm_fifo_t * f); -int svm_fifo_enqueue_nowait (svm_fifo_t * f, int pid, u32 max_bytes, +int svm_fifo_enqueue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here); +int svm_fifo_enqueue_with_offset (svm_fifo_t * f, u32 offset, + u32 required_bytes, u8 * copy_from_here); +int svm_fifo_dequeue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_here); -int svm_fifo_enqueue_with_offset (svm_fifo_t * f, int pid, - u32 offset, u32 required_bytes, - u8 * copy_from_here); - -int svm_fifo_dequeue_nowait (svm_fifo_t * f, int pid, u32 max_bytes, - u8 * copy_here); - -int svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes, - u8 * copy_here); -int svm_fifo_dequeue_drop (svm_fifo_t * f, int pid, u32 max_bytes); +int svm_fifo_peek (svm_fifo_t * f, u32 offset, u32 max_bytes, u8 * copy_here); +int svm_fifo_dequeue_drop (svm_fifo_t * f, u32 max_bytes); u32 svm_fifo_number_ooo_segments (svm_fifo_t * f); ooo_segment_t *svm_fifo_first_ooo_segment (svm_fifo_t * f); diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index acabb3bd..281fae27 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -70,6 +70,44 @@ svm_fifo_segment_create (svm_fifo_segment_create_args_t * a) return (0); } +/** Create an svm fifo segment in process-private memory */ +int +svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t * a) +{ + svm_fifo_segment_private_t *s; + svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; + ssvm_shared_header_t *sh; + svm_fifo_segment_header_t *fsh; + + /* Allocate a fresh segment */ + pool_get (sm->segments, s); + memset (s, 0, sizeof (*s)); + + s->ssvm.ssvm_size = ~0; + s->ssvm.i_am_master = 1; + s->ssvm.my_pid = getpid (); + s->ssvm.name = (u8 *) a->segment_name; + s->ssvm.requested_va = ~0; + + /* Allocate a [sic] shared memory header, in process memory... */ + sh = clib_mem_alloc_aligned (sizeof (*sh), CLIB_CACHE_LINE_BYTES); + s->ssvm.sh = sh; + + memset (sh, 0, sizeof (*sh)); + sh->heap = clib_mem_get_heap (); + + /* Set up svm_fifo_segment shared header */ + fsh = clib_mem_alloc (sizeof (*fsh)); + memset (fsh, 0, sizeof (*fsh)); + sh->opaque[0] = fsh; + s->h = fsh; + fsh->segment_name = format (0, "%s%c", a->segment_name, 0); + + sh->ready = 1; + a->new_segment_index = s - sm->segments; + return (0); +} + /** (slave) attach to an svm fifo segment */ int svm_fifo_segment_attach (svm_fifo_segment_create_args_t * a) @@ -82,7 +120,6 @@ svm_fifo_segment_attach (svm_fifo_segment_create_args_t * a) /* Allocate a fresh segment */ pool_get (sm->segments, s); - memset (s, 0, sizeof (*s)); s->ssvm.ssvm_size = a->segment_size; @@ -126,19 +163,22 @@ svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, sh = s->ssvm.sh; fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; + + ssvm_lock (sh, 1, 0); oldheap = ssvm_push_heap (sh); /* Note: this can fail, in which case: create another segment */ f = svm_fifo_create (data_size_in_bytes); - if (f == 0) + if (PREDICT_FALSE (f == 0)) { ssvm_pop_heap (oldheap); + ssvm_unlock (sh); return (0); } vec_add1 (fsh->fifos, f); - ssvm_pop_heap (oldheap); + ssvm_unlock (sh); return (f); } @@ -152,8 +192,9 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f) sh = s->ssvm.sh; fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; - oldheap = ssvm_push_heap (sh); + ssvm_lock (sh, 1, 0); + oldheap = ssvm_push_heap (sh); for (i = 0; i < vec_len (fsh->fifos); i++) { if (fsh->fifos[i] == f) @@ -167,6 +208,7 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f) found: clib_mem_free (f); ssvm_pop_heap (oldheap); + ssvm_unlock (sh); } void diff --git a/src/svm/svm_fifo_segment.h b/src/svm/svm_fifo_segment.h index 9ab47a4c..4218013a 100644 --- a/src/svm/svm_fifo_segment.h +++ b/src/svm/svm_fifo_segment.h @@ -17,6 +17,7 @@ #include #include +#include typedef struct { @@ -32,6 +33,8 @@ typedef struct typedef struct { + volatile u32 lock; + /** pool of segments */ svm_fifo_segment_private_t *segments; /* Where to put the next one */ @@ -78,6 +81,8 @@ typedef enum } ssvm_fifo_segment_api_error_enum_t; int svm_fifo_segment_create (svm_fifo_segment_create_args_t * a); +int svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t + * a); int svm_fifo_segment_attach (svm_fifo_segment_create_args_t * a); void svm_fifo_segment_delete (svm_fifo_segment_private_t * s); diff --git a/src/svm/test_svm_fifo1.c b/src/svm/test_svm_fifo1.c index 355653df..398dd6d7 100644 --- a/src/svm/test_svm_fifo1.c +++ b/src/svm/test_svm_fifo1.c @@ -25,7 +25,6 @@ hello_world (int verbose) u8 *test_data; u8 *retrieved_data = 0; clib_error_t *error = 0; - int pid = getpid (); memset (a, 0, sizeof (*a)); @@ -48,18 +47,16 @@ hello_world (int verbose) vec_validate (retrieved_data, vec_len (test_data) - 1); while (svm_fifo_max_enqueue (f) >= vec_len (test_data)) - svm_fifo_enqueue_nowait (f, pid, vec_len (test_data), test_data); + svm_fifo_enqueue_nowait (f, vec_len (test_data), test_data); while (svm_fifo_max_dequeue (f) >= vec_len (test_data)) - svm_fifo_dequeue_nowait (f, pid, vec_len (retrieved_data), - retrieved_data); + svm_fifo_dequeue_nowait (f, vec_len (retrieved_data), retrieved_data); while (svm_fifo_max_enqueue (f) >= vec_len (test_data)) - svm_fifo_enqueue_nowait (f, pid, vec_len (test_data), test_data); + svm_fifo_enqueue_nowait (f, vec_len (test_data), test_data); while (svm_fifo_max_dequeue (f) >= vec_len (test_data)) - svm_fifo_dequeue_nowait (f, pid, vec_len (retrieved_data), - retrieved_data); + svm_fifo_dequeue_nowait (f, vec_len (retrieved_data), retrieved_data); if (!memcmp (retrieved_data, test_data, vec_len (test_data))) error = clib_error_return (0, "data test OK, got '%s'", retrieved_data); @@ -81,7 +78,6 @@ master (int verbose) u8 *test_data; u8 *retrieved_data = 0; int i; - int pid = getpid (); memset (a, 0, sizeof (*a)); @@ -104,7 +100,7 @@ master (int verbose) vec_validate (retrieved_data, vec_len (test_data) - 1); for (i = 0; i < 1000; i++) - svm_fifo_enqueue_nowait (f, pid, vec_len (test_data), test_data); + svm_fifo_enqueue_nowait (f, vec_len (test_data), test_data); return clib_error_return (0, "master (enqueue) done"); } @@ -176,7 +172,6 @@ offset (int verbose) u32 *test_data = 0; u32 *recovered_data = 0; int i; - int pid = getpid (); memset (a, 0, sizeof (*a)); @@ -199,19 +194,19 @@ offset (int verbose) vec_add1 (test_data, i); /* Enqueue the first 1024 u32's */ - svm_fifo_enqueue_nowait (f, pid, 4096 /* bytes to enqueue */ , + svm_fifo_enqueue_nowait (f, 4096 /* bytes to enqueue */ , (u8 *) test_data); /* Enqueue the third 1024 u32's 2048 ahead of the current tail */ - svm_fifo_enqueue_with_offset (f, pid, 4096, 4096, (u8 *) & test_data[2048]); + svm_fifo_enqueue_with_offset (f, 4096, 4096, (u8 *) & test_data[2048]); /* Enqueue the second 1024 u32's at the current tail */ - svm_fifo_enqueue_nowait (f, pid, 4096 /* bytes to enqueue */ , + svm_fifo_enqueue_nowait (f, 4096 /* bytes to enqueue */ , (u8 *) & test_data[1024]); vec_validate (recovered_data, (3 * 1024) - 1); - svm_fifo_dequeue_nowait (f, pid, 3 * 4096, (u8 *) recovered_data); + svm_fifo_dequeue_nowait (f, 3 * 4096, (u8 *) recovered_data); for (i = 0; i < (3 * 1024); i++) { @@ -237,7 +232,6 @@ slave (int verbose) int rv; u8 *test_data; u8 *retrieved_data = 0; - int pid = getpid (); int i; memset (a, 0, sizeof (*a)); @@ -262,8 +256,7 @@ slave (int verbose) for (i = 0; i < 1000; i++) { - svm_fifo_dequeue_nowait (f, pid, vec_len (retrieved_data), - retrieved_data); + svm_fifo_dequeue_nowait (f, vec_len (retrieved_data), retrieved_data); if (memcmp (retrieved_data, test_data, vec_len (retrieved_data))) return clib_error_return (0, "retrieved data incorrect, '%s'", retrieved_data); diff --git a/src/uri/uri_tcp_test.c b/src/uri/uri_tcp_test.c index 2e15d36c..686c93f9 100755 --- a/src/uri/uri_tcp_test.c +++ b/src/uri/uri_tcp_test.c @@ -45,12 +45,13 @@ typedef struct svm_fifo_t *server_rx_fifo; svm_fifo_t *server_tx_fifo; - u32 vpp_session_handle; + u64 vpp_session_handle; } session_t; typedef enum { STATE_START, + STATE_ATTACHED, STATE_READY, STATE_DISCONNECTING, STATE_FAILED @@ -127,6 +128,34 @@ uri_tcp_test_main_t uri_tcp_test_main; #define NITER 4000000 #endif +static u8 * +format_api_error (u8 * s, va_list * args) +{ + uri_tcp_test_main_t *utm = &uri_tcp_test_main; + i32 error = va_arg (*args, u32); + uword *p; + + p = hash_get (utm->error_string_by_error_number, -error); + + if (p) + s = format (s, "%s", p[0]); + else + s = format (s, "%d", error); + return s; +} + +static void +init_error_string_table (uri_tcp_test_main_t * utm) +{ + utm->error_string_by_error_number = hash_create (0, sizeof (uword)); + +#define _(n,v,s) hash_set (utm->error_string_by_error_number, -v, s); + foreach_vnet_api_error; +#undef _ + + hash_set (utm->error_string_by_error_number, 99, "Misc"); +} + int wait_for_state_change (uri_tcp_test_main_t * utm, connection_state_t state) { @@ -150,7 +179,7 @@ wait_for_state_change (uri_tcp_test_main_t * utm, connection_state_t state) } void -application_attach (uri_tcp_test_main_t * utm) +application_send_attach (uri_tcp_test_main_t * utm) { vl_api_application_attach_t *bmp; u32 fifo_size = 3 << 20; @@ -160,8 +189,8 @@ application_attach (uri_tcp_test_main_t * utm) bmp->_vl_msg_id = ntohs (VL_API_APPLICATION_ATTACH); bmp->client_index = utm->my_client_index; bmp->context = ntohl (0xfeedface); - bmp->options[SESSION_OPTIONS_FLAGS] = - SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[APP_OPTIONS_FLAGS] = + APP_OPTIONS_FLAGS_USE_FIFO | APP_OPTIONS_FLAGS_ADD_SEGMENT; bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; @@ -169,6 +198,18 @@ application_attach (uri_tcp_test_main_t * utm) vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); } +int +application_attach (uri_tcp_test_main_t * utm) +{ + application_send_attach (utm); + if (wait_for_state_change (utm, STATE_ATTACHED)) + { + clib_warning ("timeout waiting for STATE_ATTACHED"); + return -1; + } + return 0; +} + void application_detach (uri_tcp_test_main_t * utm) { @@ -192,8 +233,8 @@ vl_api_application_attach_reply_t_handler (vl_api_application_attach_reply_t * if (mp->retval) { - uword *errp = hash_get (utm->error_string_by_error_number, -mp->retval); - clib_warning ("attach failed: %s", *errp); + clib_warning ("attach failed: %U", format_api_error, + clib_net_to_host_u32 (mp->retval)); utm->state = STATE_FAILED; return; } @@ -220,7 +261,7 @@ vl_api_application_attach_reply_t_handler (vl_api_application_attach_reply_t * utm->our_event_queue = (unix_shared_memory_queue_t *) mp->app_event_queue_address; - + utm->state = STATE_ATTACHED; } static void @@ -231,18 +272,6 @@ vl_api_application_detach_reply_t_handler (vl_api_application_detach_reply_t * clib_warning ("detach returned with err: %d", mp->retval); } -static void -init_error_string_table (uri_tcp_test_main_t * utm) -{ - utm->error_string_by_error_number = hash_create (0, sizeof (uword)); - -#define _(n,v,s) hash_set (utm->error_string_by_error_number, -v, s); - foreach_vnet_api_error; -#undef _ - - hash_set (utm->error_string_by_error_number, 99, "Misc"); -} - static void stop_signal (int signum) { @@ -392,7 +421,7 @@ client_handle_fifo_event_rx (uri_tcp_test_main_t * utm, /* Read the bytes */ do { - n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, + n_read = svm_fifo_dequeue_nowait (rx_fifo, clib_min (vec_len (utm->rx_buf), bytes), utm->rx_buf); if (n_read > 0) @@ -432,11 +461,11 @@ client_handle_event_queue (uri_tcp_test_main_t * utm) 0 /* nowait */ ); switch (e->event_type) { - case FIFO_EVENT_SERVER_RX: + case FIFO_EVENT_APP_RX: client_handle_fifo_event_rx (utm, e); break; - case FIFO_EVENT_SERVER_EXIT: + case FIFO_EVENT_DISCONNECT: return; default: @@ -458,11 +487,11 @@ client_rx_thread_fn (void *arg) 0 /* nowait */ ); switch (e->event_type) { - case FIFO_EVENT_SERVER_RX: + case FIFO_EVENT_APP_RX: client_handle_fifo_event_rx (utm, e); break; - case FIFO_EVENT_SERVER_EXIT: + case FIFO_EVENT_DISCONNECT: return 0; default: clib_warning ("unknown event type %d", e->event_type); @@ -487,9 +516,8 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) if (mp->retval) { - uword *errp = hash_get (utm->error_string_by_error_number, - -clib_net_to_host_u32 (mp->retval)); - clib_warning ("connection failed with code: %s", *errp); + clib_warning ("connection failed with code: %U", format_api_error, + clib_net_to_host_u32 (mp->retval)); utm->state = STATE_FAILED; return; } @@ -551,7 +579,7 @@ send_test_chunk (uri_tcp_test_main_t * utm, svm_fifo_t * tx_fifo, int mypid, { actual_write = bytes_to_snd > queue_max_chunk ? queue_max_chunk : bytes_to_snd; - rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, actual_write, + rv = svm_fifo_enqueue_nowait (tx_fifo, actual_write, test_data + test_buf_offset); if (rv > 0) @@ -564,7 +592,7 @@ send_test_chunk (uri_tcp_test_main_t * utm, svm_fifo_t * tx_fifo, int mypid, { /* Fabricate TX event, send to vpp */ evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = serial_number++; unix_shared_memory_queue_add (utm->vpp_event_queue, @@ -619,7 +647,7 @@ client_send_data (uri_tcp_test_main_t * utm) } void -client_connect (uri_tcp_test_main_t * utm) +client_send_connect (uri_tcp_test_main_t * utm) { vl_api_connect_uri_t *cmp; cmp = vl_msg_api_alloc (sizeof (*cmp)); @@ -632,8 +660,20 @@ client_connect (uri_tcp_test_main_t * utm) vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp); } +int +client_connect (uri_tcp_test_main_t * utm) +{ + client_send_connect (utm); + if (wait_for_state_change (utm, STATE_READY)) + { + clib_warning ("Connect failed"); + return -1; + } + return 0; +} + void -client_disconnect (uri_tcp_test_main_t * utm) +client_send_disconnect (uri_tcp_test_main_t * utm) { session_t *connected_session; vl_api_disconnect_session_t *dmp; @@ -647,16 +687,29 @@ client_disconnect (uri_tcp_test_main_t * utm) vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & dmp); } +int +client_disconnect (uri_tcp_test_main_t * utm) +{ + client_send_disconnect (utm); + if (wait_for_state_change (utm, STATE_START)) + { + clib_warning ("Disconnect failed"); + return -1; + } + return 0; +} + static void client_test (uri_tcp_test_main_t * utm) { int i; - application_attach (utm); - client_connect (utm); + if (application_attach (utm)) + return; - if (wait_for_state_change (utm, STATE_READY)) + if (client_connect (utm)) { + application_detach (utm); return; } @@ -671,11 +724,6 @@ client_test (uri_tcp_test_main_t * utm) /* Disconnect */ client_disconnect (utm); - if (wait_for_state_change (utm, STATE_START)) - { - clib_warning ("Disconnect failed"); - return; - } application_detach (utm); } @@ -686,9 +734,8 @@ vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) if (mp->retval) { - uword *errp = hash_get (utm->error_string_by_error_number, - -clib_net_to_host_u32 (mp->retval)); - clib_warning ("bind failed: %s", (char *) *errp); + clib_warning ("bind failed: %s", format_api_error, + clib_net_to_host_u32 (mp->retval)); utm->state = STATE_FAILED; return; } @@ -869,7 +916,7 @@ server_handle_fifo_event_rx (uri_tcp_test_main_t * utm, /* Read the bytes */ do { - n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len (utm->rx_buf), + n_read = svm_fifo_dequeue_nowait (rx_fifo, vec_len (utm->rx_buf), utm->rx_buf); if (n_read > 0) bytes -= n_read; @@ -882,7 +929,7 @@ server_handle_fifo_event_rx (uri_tcp_test_main_t * utm, { do { - rv = svm_fifo_enqueue_nowait (tx_fifo, 0, n_read, utm->rx_buf); + rv = svm_fifo_enqueue_nowait (tx_fifo, n_read, utm->rx_buf); } while (rv <= 0 && !utm->time_to_stop); @@ -891,7 +938,7 @@ server_handle_fifo_event_rx (uri_tcp_test_main_t * utm, { /* Fabricate TX event, send to vpp */ evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = e->event_id; q = utm->vpp_event_queue; @@ -914,11 +961,11 @@ server_handle_event_queue (uri_tcp_test_main_t * utm) 0 /* nowait */ ); switch (e->event_type) { - case FIFO_EVENT_SERVER_RX: + case FIFO_EVENT_APP_RX: server_handle_fifo_event_rx (utm, e); break; - case FIFO_EVENT_SERVER_EXIT: + case FIFO_EVENT_DISCONNECT: return; default: @@ -936,7 +983,7 @@ server_handle_event_queue (uri_tcp_test_main_t * utm) } void -server_listen (uri_tcp_test_main_t * utm) +server_send_listen (uri_tcp_test_main_t * utm) { vl_api_bind_uri_t *bmp; bmp = vl_msg_api_alloc (sizeof (*bmp)); @@ -949,8 +996,20 @@ server_listen (uri_tcp_test_main_t * utm) vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); } +int +server_listen (uri_tcp_test_main_t * utm) +{ + server_send_listen (utm); + if (wait_for_state_change (utm, STATE_READY)) + { + clib_warning ("timeout waiting for STATE_READY"); + return -1; + } + return 0; +} + void -server_unbind (uri_tcp_test_main_t * utm) +server_send_unbind (uri_tcp_test_main_t * utm) { vl_api_unbind_uri_t *ump; @@ -963,31 +1022,33 @@ server_unbind (uri_tcp_test_main_t * utm) vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump); } +int +server_unbind (uri_tcp_test_main_t * utm) +{ + server_send_unbind (utm); + if (wait_for_state_change (utm, STATE_START)) + { + clib_warning ("timeout waiting for STATE_START"); + return -1; + } + return 0; +} + void server_test (uri_tcp_test_main_t * utm) { - application_attach (utm); + if (application_attach (utm)) + return; /* Bind to uri */ - server_listen (utm); - - if (wait_for_state_change (utm, STATE_READY)) - { - clib_warning ("timeout waiting for STATE_READY"); - return; - } + if (server_listen (utm)) + return; /* Enter handle event loop */ server_handle_event_queue (utm); /* Cleanup */ - server_unbind (utm); - - if (wait_for_state_change (utm, STATE_START)) - { - clib_warning ("timeout waiting for STATE_START"); - return; - } + server_send_unbind (utm); application_detach (utm); diff --git a/src/uri/uri_udp_test.c b/src/uri/uri_udp_test.c index 598052bc..266215c8 100644 --- a/src/uri/uri_udp_test.c +++ b/src/uri/uri_udp_test.c @@ -164,7 +164,7 @@ setup_signal_handlers (void) } void -application_attach (uri_udp_test_main_t * utm) +application_send_attach (uri_udp_test_main_t * utm) { vl_api_application_attach_t *bmp; u32 fifo_size = 3 << 20; @@ -174,8 +174,8 @@ application_attach (uri_udp_test_main_t * utm) bmp->_vl_msg_id = ntohs (VL_API_APPLICATION_ATTACH); bmp->client_index = utm->my_client_index; bmp->context = ntohl (0xfeedface); - bmp->options[SESSION_OPTIONS_FLAGS] = - SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[APP_OPTIONS_FLAGS] = + APP_OPTIONS_FLAGS_USE_FIFO | APP_OPTIONS_FLAGS_ADD_SEGMENT; bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; @@ -307,7 +307,7 @@ cut_through_thread_fn (void *arg) /* We read from the tx fifo and write to the rx fifo */ do { - actual_transfer = svm_fifo_dequeue_nowait (tx_fifo, 0, + actual_transfer = svm_fifo_dequeue_nowait (tx_fifo, vec_len (my_copy_buffer), my_copy_buffer); } @@ -318,7 +318,7 @@ cut_through_thread_fn (void *arg) buffer_offset = 0; while (actual_transfer > 0) { - rv = svm_fifo_enqueue_nowait (rx_fifo, 0, actual_transfer, + rv = svm_fifo_enqueue_nowait (rx_fifo, actual_transfer, my_copy_buffer + buffer_offset); if (rv > 0) { @@ -357,7 +357,6 @@ client_send (uri_udp_test_main_t * utm, session_t * session) u64 bytes_received = 0, bytes_sent = 0; i32 bytes_to_read; int rv; - int mypid = getpid (); f64 before, after, delta, bytes_per_second; svm_fifo_t *rx_fifo, *tx_fifo; int buffer_offset, bytes_to_send = 0; @@ -382,8 +381,7 @@ client_send (uri_udp_test_main_t * utm, session_t * session) buffer_offset = 0; while (bytes_to_send > 0) { - rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, - bytes_to_send, + rv = svm_fifo_enqueue_nowait (tx_fifo, bytes_to_send, test_data + buffer_offset); if (rv > 0) @@ -402,7 +400,7 @@ client_send (uri_udp_test_main_t * utm, session_t * session) buffer_offset = 0; while (bytes_to_read > 0) { - rv = svm_fifo_dequeue_nowait (rx_fifo, mypid, + rv = svm_fifo_dequeue_nowait (rx_fifo, bytes_to_read, utm->rx_buf + buffer_offset); if (rv > 0) @@ -415,8 +413,8 @@ client_send (uri_udp_test_main_t * utm, session_t * session) } while (bytes_received < bytes_sent) { - rv = svm_fifo_dequeue_nowait (rx_fifo, mypid, - vec_len (utm->rx_buf), utm->rx_buf); + rv = + svm_fifo_dequeue_nowait (rx_fifo, vec_len (utm->rx_buf), utm->rx_buf); if (rv > 0) { #if CLIB_DEBUG > 0 @@ -459,7 +457,7 @@ uri_udp_client_test (uri_udp_test_main_t * utm) { session_t *session; - application_attach (utm); + application_send_attach (utm); udp_client_connect (utm); if (wait_for_state_change (utm, STATE_READY)) @@ -559,8 +557,8 @@ vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) 128 * 1024); ASSERT (session->server_tx_fifo); - session->server_rx_fifo->server_session_index = session - utm->sessions; - session->server_tx_fifo->server_session_index = session - utm->sessions; + session->server_rx_fifo->master_session_index = session - utm->sessions; + session->server_tx_fifo->master_session_index = session - utm->sessions; utm->cut_through_session_index = session - utm->sessions; rv = pthread_create (&utm->cut_through_thread_handle, @@ -805,19 +803,19 @@ server_handle_fifo_event_rx (uri_udp_test_main_t * utm, do { - nbytes = svm_fifo_dequeue_nowait (rx_fifo, 0, - vec_len (utm->rx_buf), utm->rx_buf); + nbytes = svm_fifo_dequeue_nowait (rx_fifo, vec_len (utm->rx_buf), + utm->rx_buf); } while (nbytes <= 0); do { - rv = svm_fifo_enqueue_nowait (tx_fifo, 0, nbytes, utm->rx_buf); + rv = svm_fifo_enqueue_nowait (tx_fifo, nbytes, utm->rx_buf); } while (rv == -2); /* Fabricate TX event, send to vpp */ evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = e->event_id; if (svm_fifo_set_event (tx_fifo)) @@ -839,11 +837,11 @@ server_handle_event_queue (uri_udp_test_main_t * utm) 0 /* nowait */ ); switch (e->event_type) { - case FIFO_EVENT_SERVER_RX: + case FIFO_EVENT_APP_RX: server_handle_fifo_event_rx (utm, e); break; - case FIFO_EVENT_SERVER_EXIT: + case FIFO_EVENT_DISCONNECT: return; default: @@ -893,7 +891,7 @@ void udp_server_test (uri_udp_test_main_t * utm) { - application_attach (utm); + application_send_attach (utm); /* Bind to uri */ server_listen (utm); diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index 5a45537b..ccf9837f 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -87,14 +87,17 @@ application_new () void application_del (application_t * app) { - api_main_t *am = &api_main; - void *oldheap; segment_manager_t *sm; u64 handle; u32 index, *handles = 0; int i; vnet_unbind_args_t _a, *a = &_a; + /* + * The app event queue allocated in first segment is cleared with + * the segment manager. No need to explicitly free it. + */ + /* * Cleanup segment managers */ @@ -120,14 +123,6 @@ application_del (application_t * app) vnet_unbind (a); } - /* - * Free the event fifo in the /vpe-api shared-memory segment - */ - oldheap = svm_push_data_heap (am->vlib_rp); - if (app->event_queue) - unix_shared_memory_queue_free (app->event_queue); - svm_pop_heap (oldheap); - application_table_del (app); pool_put (app_pool, app); } @@ -149,30 +144,14 @@ int application_init (application_t * app, u32 api_client_index, u64 * options, session_cb_vft_t * cb_fns) { - api_main_t *am = &api_main; segment_manager_t *sm; segment_manager_properties_t *props; - void *oldheap; - u32 app_evt_queue_size; + u32 app_evt_queue_size, first_seg_size; int rv; app_evt_queue_size = options[APP_EVT_QUEUE_SIZE] > 0 ? options[APP_EVT_QUEUE_SIZE] : default_app_evt_queue_size; - /* Allocate event fifo in the /vpe-api shared-memory segment */ - oldheap = svm_push_data_heap (am->vlib_rp); - - /* Allocate server event queue */ - app->event_queue = - unix_shared_memory_queue_init (app_evt_queue_size, - sizeof (session_fifo_event_t), - 0 /* consumer pid */ , - 0 - /* (do not) signal when queue non-empty */ - ); - - svm_pop_heap (oldheap); - /* Setup segment manager */ sm = segment_manager_new (); sm->app_index = app->index; @@ -181,16 +160,21 @@ application_init (application_t * app, u32 api_client_index, u64 * options, props->rx_fifo_size = options[SESSION_OPTIONS_RX_FIFO_SIZE]; props->tx_fifo_size = options[SESSION_OPTIONS_TX_FIFO_SIZE]; props->add_segment = props->add_segment_size != 0; + props->use_private_segment = options[APP_OPTIONS_FLAGS] + & APP_OPTIONS_FLAGS_BUILTIN_APP; - if ((rv = segment_manager_init (sm, props, - options[SESSION_OPTIONS_SEGMENT_SIZE]))) + first_seg_size = options[SESSION_OPTIONS_SEGMENT_SIZE]; + if ((rv = segment_manager_init (sm, props, first_seg_size))) return rv; app->first_segment_manager = segment_manager_index (sm); app->api_client_index = api_client_index; - app->flags = options[SESSION_OPTIONS_FLAGS]; + app->flags = options[APP_OPTIONS_FLAGS]; app->cb_fns = *cb_fns; + /* Allocate app event queue in the first shared-memory segment */ + app->event_queue = segment_manager_alloc_queue (sm, app_evt_queue_size); + /* Check that the obvious things are properly set up */ application_verify_cb_fns (cb_fns); @@ -451,8 +435,8 @@ application_format_connects (application_t * app, int verbose) continue; fifo = fifos[i]; - session_index = fifo->server_session_index; - thread_index = fifo->server_thread_index; + session_index = fifo->master_session_index; + thread_index = fifo->master_thread_index; session = stream_session_get (session_index, thread_index); str = format (0, "%U", format_stream_session, session, verbose); diff --git a/src/vnet/session/application.h b/src/vnet/session/application.h index 6bcee9d3..35caae85 100644 --- a/src/vnet/session/application.h +++ b/src/vnet/session/application.h @@ -61,18 +61,6 @@ typedef struct _application /** Flags */ u32 flags; - /* Stream server mode: accept or connect - * TODO REMOVE*/ - u8 mode; - - /** Index of the listen session or connect session - * TODO REMOVE*/ - u32 session_index; - - /** Session thread index for client connect sessions - * TODO REMOVE */ - u32 thread_index; - /* * Binary API interface to external app */ diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c index 96d2c621..ad44baa1 100644 --- a/src/vnet/session/application_interface.c +++ b/src/vnet/session/application_interface.c @@ -142,7 +142,7 @@ vnet_connect_i (u32 app_index, u32 api_context, session_type_t sst, * Server is willing to have a direct fifo connection created * instead of going through the state machine, etc. */ - if (server->flags & SESSION_OPTIONS_FLAGS_USE_FIFO) + if (server->flags & APP_OPTIONS_FLAGS_USE_FIFO) return server->cb_fns. redirect_connect_callback (server->api_client_index, mp); } @@ -363,7 +363,11 @@ vnet_disconnect_session (vnet_disconnect_args_t * a) if (!s || s->app_index != a->app_index) return VNET_API_ERROR_INVALID_VALUE; - stream_session_disconnect (s); + /* We're peeking into another's thread pool. Make sure */ + ASSERT (s->session_index == index); + + session_send_session_evt_to_thread (a->handle, FIFO_EVENT_DISCONNECT, + thread_index); return 0; } @@ -395,24 +399,6 @@ vnet_connect (vnet_connect_args_t * a) return vnet_connect_i (a->app_index, a->api_context, sst, &a->tep, a->mp); } -int -vnet_disconnect (vnet_disconnect_args_t * a) -{ - stream_session_t *session; - u32 session_index, thread_index; - - if (api_parse_session_handle (a->handle, &session_index, &thread_index)) - { - clib_warning ("Invalid handle"); - return -1; - } - - session = stream_session_get (session_index, thread_index); - stream_session_disconnect (session); - - return 0; -} - /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/session/application_interface.h b/src/vnet/session/application_interface.h index 2c497531..7d924c14 100644 --- a/src/vnet/session/application_interface.h +++ b/src/vnet/session/application_interface.h @@ -30,10 +30,18 @@ typedef enum _session_api_proto typedef struct _vnet_app_attach_args_t { + /** Binary API client index */ u32 api_client_index; + + /** Application and segment manager options */ u64 *options; + + /** Session to application callback functions */ session_cb_vft_t *session_cb_vft; + /** Flag that indicates if app is builtin */ + u8 builtin; + /* * Results */ @@ -110,7 +118,7 @@ typedef struct _vnet_disconnect_args_t typedef enum { APP_EVT_QUEUE_SIZE, - SESSION_OPTIONS_FLAGS, + APP_OPTIONS_FLAGS, SESSION_OPTIONS_SEGMENT_SIZE, SESSION_OPTIONS_ADD_SEGMENT_SIZE, SESSION_OPTIONS_RX_FIFO_SIZE, @@ -119,11 +127,30 @@ typedef enum SESSION_OPTIONS_N_OPTIONS } app_attach_options_index_t; -/** Server can handle delegated connect requests from local clients */ -#define SESSION_OPTIONS_FLAGS_USE_FIFO (1<<0) +#define foreach_app_options_flags \ + _(USE_FIFO, "Use FIFO with redirects") \ + _(ADD_SEGMENT, "Add segment and signal app if needed") \ + _(BUILTIN_APP, "Application is builtin") \ + +typedef enum _app_options +{ +#define _(sym, str) APP_OPTIONS_##sym, + foreach_app_options_flags +#undef _ +} app_options_t; + +typedef enum _app_options_flags +{ +#define _(sym, str) APP_OPTIONS_FLAGS_##sym = 1 << APP_OPTIONS_##sym, + foreach_app_options_flags +#undef _ +} app_options_flags_t; -/** Server wants vpp to add segments when out of memory for fifos */ -#define SESSION_OPTIONS_FLAGS_ADD_SEGMENT (1<<1) +///** Server can handle delegated connect requests from local clients */ +//#define APP_OPTIONS_FLAGS_USE_FIFO (1<<0) +// +///** Server wants vpp to add segments when out of memory for fifos */ +//#define APP_OPTIONS_FLAGS_ADD_SEGMENT (1<<1) #define VNET_CONNECT_REDIRECTED 123 @@ -138,7 +165,6 @@ int vnet_disconnect_session (vnet_disconnect_args_t * a); int vnet_bind (vnet_bind_args_t * a); int vnet_connect (vnet_connect_args_t * a); int vnet_unbind (vnet_unbind_args_t * a); -int vnet_disconnect (vnet_disconnect_args_t * a); int api_parse_session_handle (u64 handle, u32 * session_index, diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c index dd211c51..210754fa 100644 --- a/src/vnet/session/node.c +++ b/src/vnet/session/node.c @@ -218,8 +218,8 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, * 2) buffer chains */ if (peek_data) { - n_bytes_read = svm_fifo_peek (s0->server_tx_fifo, s0->pid, - rx_offset, len_to_deq0, data0); + n_bytes_read = svm_fifo_peek (s0->server_tx_fifo, rx_offset, + len_to_deq0, data0); if (n_bytes_read <= 0) goto dequeue_fail; @@ -230,8 +230,7 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, else { n_bytes_read = svm_fifo_dequeue_nowait (s0->server_tx_fifo, - s0->pid, len_to_deq0, - data0); + len_to_deq0, data0); if (n_bytes_read <= 0) goto dequeue_fail; } @@ -301,6 +300,26 @@ session_tx_fifo_dequeue_and_snd (vlib_main_t * vm, vlib_node_runtime_t * node, n_tx_pkts, 0); } +stream_session_t * +session_event_get_session (session_fifo_event_t * e0, u8 thread_index) +{ + svm_fifo_t *f0; + stream_session_t *s0; + u32 session_index0; + + f0 = e0->fifo; + session_index0 = f0->master_session_index; + + /* $$$ add multiple event queues, per vpp worker thread */ + ASSERT (f0->master_thread_index == thread_index); + + s0 = stream_session_get_if_valid (session_index0, thread_index); + + ASSERT (s0->thread_index == thread_index); + + return s0; +} + static uword session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) @@ -370,34 +389,24 @@ skip_dequeue: n_events = vec_len (my_fifo_events); for (i = 0; i < n_events; i++) { - svm_fifo_t *f0; /* $$$ prefetch 1 ahead maybe */ - stream_session_t *s0; - u32 session_index0; + stream_session_t *s0; /* $$$ prefetch 1 ahead maybe */ session_fifo_event_t *e0; e0 = &my_fifo_events[i]; - f0 = e0->fifo; - session_index0 = f0->server_session_index; - - /* $$$ add multiple event queues, per vpp worker thread */ - ASSERT (f0->server_thread_index == my_thread_index); - s0 = stream_session_get_if_valid (session_index0, my_thread_index); - - if (CLIB_DEBUG && !s0) + switch (e0->event_type) { - clib_warning ("It's dead, Jim!"); - continue; - } - - if (PREDICT_FALSE (s0->session_state == SESSION_STATE_CLOSED)) - continue; + case FIFO_EVENT_APP_TX: + s0 = session_event_get_session (e0, my_thread_index); - ASSERT (s0->thread_index == my_thread_index); + if (CLIB_DEBUG && !s0) + { + clib_warning ("It's dead, Jim!"); + continue; + } - switch (e0->event_type) - { - case FIFO_EVENT_SERVER_TX: + if (PREDICT_FALSE (s0->session_state == SESSION_STATE_CLOSED)) + continue; /* Spray packets in per session type frames, since they go to * different nodes */ rv = (smm->session_tx_fns[s0->session_type]) (vm, node, smm, e0, s0, @@ -408,10 +417,12 @@ skip_dequeue: goto done; break; - case FIFO_EVENT_SERVER_EXIT: + case FIFO_EVENT_DISCONNECT: + s0 = stream_session_get_from_handle (e0->session_handle); stream_session_disconnect (s0); break; case FIFO_EVENT_BUILTIN_RX: + s0 = session_event_get_session (e0, my_thread_index); svm_fifo_unset_event (s0->server_rx_fifo); /* Get session's server */ app = application_get (s0->app_index); diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c index 16e5bc56..e0532320 100644 --- a/src/vnet/session/segment_manager.c +++ b/src/vnet/session/segment_manager.c @@ -27,6 +27,11 @@ u32 segment_name_counter = 0; */ segment_manager_t *segment_managers = 0; +/** + * Process private segment index + */ +u32 private_segment_index = ~0; + /** * Default fifo and segment size. TODO config. */ @@ -100,6 +105,26 @@ session_manager_add_first_segment (segment_manager_t * sm, u32 segment_size) return rv; } +static void +segment_manager_alloc_process_private_segment () +{ + svm_fifo_segment_create_args_t _a, *a = &_a; + + if (private_segment_index != ~0) + return; + + memset (a, 0, sizeof (*a)); + a->segment_name = "process-private-segment"; + a->segment_size = ~0; + a->new_segment_index = ~0; + + if (svm_fifo_segment_create_process_private (a)) + clib_warning ("Failed to create process private segment"); + + private_segment_index = a->new_segment_index; + ASSERT (private_segment_index != ~0); +} + /** * Initializes segment manager based on options provided. * Returns error if svm segment allocation fails. @@ -114,7 +139,9 @@ segment_manager_init (segment_manager_t * sm, /* app allocates these */ sm->properties = properties; - if (first_seg_size > 0) + first_seg_size = first_seg_size > 0 ? first_seg_size : default_segment_size; + + if (sm->properties->use_private_segment == 0) { rv = session_manager_add_first_segment (sm, first_seg_size); if (rv) @@ -123,7 +150,15 @@ segment_manager_init (segment_manager_t * sm, return rv; } } + else + { + if (private_segment_index == ~0) + segment_manager_alloc_process_private_segment (); + ASSERT (private_segment_index != ~0); + vec_add1 (sm->segment_indices, private_segment_index); + } + clib_spinlock_init (&sm->lockp); return 0; } @@ -162,8 +197,8 @@ segment_manager_del (segment_manager_t * sm) stream_session_t *session; fifo = fifos[i]; - session_index = fifo->server_session_index; - thread_index = fifo->server_thread_index; + session_index = fifo->master_session_index; + thread_index = fifo->master_thread_index; session = stream_session_get (session_index, thread_index); @@ -183,7 +218,9 @@ segment_manager_del (segment_manager_t * sm) deleted_thread_indices[i]); /* Instead of directly removing the session call disconnect */ - stream_session_disconnect (session); + session_send_session_evt_to_thread (stream_session_handle (session), + FIFO_EVENT_DISCONNECT, + deleted_thread_indices[i]); /* stream_session_table_del (smm, session); @@ -200,6 +237,7 @@ segment_manager_del (segment_manager_t * sm) /* svm_fifo_segment_delete (fifo_segment); */ } + clib_spinlock_free (&sm->lockp); vec_free (deleted_sessions); vec_free (deleted_thread_indices); pool_put (segment_managers, sm); @@ -232,9 +270,13 @@ segment_manager_alloc_session_fifos (segment_manager_t * sm, u8 added_a_segment = 0; int i; - /* Allocate svm fifos */ ASSERT (vec_len (sm->segment_indices)); + /* Make sure we don't have multiple threads trying to allocate segments + * at the same time. */ + clib_spinlock_lock (&sm->lockp); + + /* Allocate svm fifos */ again: for (i = 0; i < vec_len (sm->segment_indices); i++) { @@ -283,7 +325,9 @@ again: } if (session_manager_add_segment (sm)) - return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; + { + return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; + } added_a_segment = 1; goto again; @@ -295,14 +339,16 @@ again: } } - if (added_a_segment) - return segment_manager_notify_app_seg_add (sm, *fifo_segment_index); - /* Backpointers to segment manager */ sm_index = segment_manager_index (sm); (*server_tx_fifo)->segment_manager = sm_index; (*server_rx_fifo)->segment_manager = sm_index; + clib_spinlock_unlock (&sm->lockp); + + if (added_a_segment) + return segment_manager_notify_app_seg_add (sm, *fifo_segment_index); + return 0; } @@ -313,26 +359,72 @@ segment_manager_dealloc_fifos (u32 svm_segment_index, svm_fifo_t * rx_fifo, segment_manager_t *sm; svm_fifo_segment_private_t *fifo_segment; + sm = segment_manager_get_if_valid (rx_fifo->segment_manager); + + /* It's possible to have no segment manager if the session was removed + * as result of a detach */ + if (!sm) + return; + fifo_segment = svm_fifo_get_segment (svm_segment_index); svm_fifo_segment_free_fifo (fifo_segment, rx_fifo); svm_fifo_segment_free_fifo (fifo_segment, tx_fifo); - /* If we have segment manager, try doing some cleanup. - * It's possible to have no segment manager if the session was removed - * as result of a detach */ - sm = segment_manager_get_if_valid (rx_fifo->segment_manager); - if (sm) + /* Remove segment only if it holds no fifos and not the first */ + if (sm->segment_indices[0] != svm_segment_index + && !svm_fifo_segment_has_fifos (fifo_segment)) { - /* Remove segment only if it holds no fifos and not the first */ - if (sm->segment_indices[0] != svm_segment_index - && !svm_fifo_segment_has_fifos (fifo_segment)) - { - svm_fifo_segment_delete (fifo_segment); - vec_del1 (sm->segment_indices, svm_segment_index); - } + svm_fifo_segment_delete (fifo_segment); + vec_del1 (sm->segment_indices, svm_segment_index); } } +/** + * Allocates shm queue in the first segment + */ +unix_shared_memory_queue_t * +segment_manager_alloc_queue (segment_manager_t * sm, u32 queue_size) +{ + ssvm_shared_header_t *sh; + svm_fifo_segment_private_t *segment; + unix_shared_memory_queue_t *q; + void *oldheap; + + ASSERT (sm->segment_indices != 0); + + segment = svm_fifo_get_segment (sm->segment_indices[0]); + sh = segment->ssvm.sh; + + oldheap = ssvm_push_heap (sh); + q = + unix_shared_memory_queue_init (queue_size, sizeof (session_fifo_event_t), + 0 /* consumer pid */ , 0 + /* signal when queue non-empty */ ); + ssvm_pop_heap (oldheap); + return q; +} + +/** + * Frees shm queue allocated in the first segment + */ +void +segment_manager_dealloc_queue (segment_manager_t * sm, + unix_shared_memory_queue_t * q) +{ + ssvm_shared_header_t *sh; + svm_fifo_segment_private_t *segment; + void *oldheap; + + ASSERT (sm->segment_indices != 0); + + segment = svm_fifo_get_segment (sm->segment_indices[0]); + sh = segment->ssvm.sh; + + oldheap = ssvm_push_heap (sh); + unix_shared_memory_queue_free (q); + ssvm_pop_heap (oldheap); +} + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/session/segment_manager.h b/src/vnet/session/segment_manager.h index 778d6040..2710bb54 100644 --- a/src/vnet/session/segment_manager.h +++ b/src/vnet/session/segment_manager.h @@ -18,6 +18,10 @@ #include #include +#include +#include +#include + typedef struct _segment_manager_properties { /** Session fifo sizes. */ @@ -30,10 +34,14 @@ typedef struct _segment_manager_properties /** Flag that indicates if additional segments should be created */ u8 add_segment; + /** Use private memory segment instead of shared memory */ + u8 use_private_segment; } segment_manager_properties_t; typedef struct _segment_manager { + clib_spinlock_t lockp; + /** segments mapped by this manager */ u32 *segment_indices; @@ -95,6 +103,10 @@ segment_manager_alloc_session_fifos (segment_manager_t * sm, void segment_manager_dealloc_fifos (u32 svm_segment_index, svm_fifo_t * rx_fifo, svm_fifo_t * tx_fifo); +unix_shared_memory_queue_t *segment_manager_alloc_queue (segment_manager_t * + sm, u32 queue_size); +void segment_manager_dealloc_queue (segment_manager_t * sm, + unix_shared_memory_queue_t * q); #endif /* SRC_VNET_SESSION_SEGMENT_MANAGER_H_ */ /* diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index e6cfe7da..d17c93f8 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -377,33 +377,6 @@ stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt, return 0; } -/** - * Allocate vpp event queue (once) per worker thread - */ -void -session_vpp_event_queue_allocate (session_manager_main_t * smm, - u32 thread_index) -{ - api_main_t *am = &api_main; - void *oldheap; - - if (smm->vpp_event_queues[thread_index] == 0) - { - /* Allocate event fifo in the /vpe-api shared-memory segment */ - oldheap = svm_push_data_heap (am->vlib_rp); - - smm->vpp_event_queues[thread_index] = - unix_shared_memory_queue_init (2048 /* nels $$$$ config */ , - sizeof (session_fifo_event_t), - 0 /* consumer pid */ , - 0 - /* (do not) send signal when queue non-empty */ - ); - - svm_pop_heap (oldheap); - } -} - int stream_session_create_i (segment_manager_t * sm, transport_connection_t * tc, stream_session_t ** ret_s) @@ -428,11 +401,11 @@ stream_session_create_i (segment_manager_t * sm, transport_connection_t * tc, /* Initialize backpointers */ pool_index = s - smm->sessions[thread_index]; - server_rx_fifo->server_session_index = pool_index; - server_rx_fifo->server_thread_index = thread_index; + server_rx_fifo->master_session_index = pool_index; + server_rx_fifo->master_thread_index = thread_index; - server_tx_fifo->server_session_index = pool_index; - server_tx_fifo->server_thread_index = thread_index; + server_tx_fifo->master_session_index = pool_index; + server_tx_fifo->master_thread_index = thread_index; s->server_rx_fifo = server_rx_fifo; s->server_tx_fifo = server_tx_fifo; @@ -485,7 +458,7 @@ stream_session_enqueue_data (transport_connection_t * tc, u8 * data, u16 len, if (PREDICT_FALSE (len > svm_fifo_max_enqueue (s->server_rx_fifo))) return -1; - enqueued = svm_fifo_enqueue_nowait (s->server_rx_fifo, s->pid, len, data); + enqueued = svm_fifo_enqueue_nowait (s->server_rx_fifo, len, data); if (queue_event) { @@ -527,14 +500,14 @@ stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer, u32 offset, u32 max_bytes) { stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index); - return svm_fifo_peek (s->server_tx_fifo, s->pid, offset, max_bytes, buffer); + return svm_fifo_peek (s->server_tx_fifo, offset, max_bytes, buffer); } u32 stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes) { stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index); - return svm_fifo_dequeue_drop (s->server_tx_fifo, s->pid, max_bytes); + return svm_fifo_dequeue_drop (s->server_tx_fifo, max_bytes); } /** @@ -568,7 +541,7 @@ stream_session_enqueue_notify (stream_session_t * s, u8 block) { /* Fabricate event */ evt.fifo = s->server_rx_fifo; - evt.event_type = FIFO_EVENT_SERVER_RX; + evt.event_type = FIFO_EVENT_APP_RX; evt.event_id = serial_number++; /* Add event to server's event queue */ @@ -899,37 +872,45 @@ stream_session_stop_listen (stream_session_t * s) return 0; } +void +session_send_session_evt_to_thread (u64 session_handle, + fifo_event_type_t evt_type, + u32 thread_index) +{ + static u16 serial_number = 0; + session_fifo_event_t evt; + unix_shared_memory_queue_t *q; + + /* Fabricate event */ + evt.session_handle = session_handle; + evt.event_type = evt_type; + evt.event_id = serial_number++; + + q = session_manager_get_vpp_event_queue (thread_index); + + /* Based on request block (or not) for lack of space */ + if (PREDICT_TRUE (q->cursize < q->maxsize)) + unix_shared_memory_queue_add (q, (u8 *) & evt, + 0 /* do wait for mutex */ ); + else + { + clib_warning ("queue full"); + return; + } +} + /** * Disconnect session and propagate to transport. This should eventually * result in a delete notification that allows us to cleanup session state. * Called for both active/passive disconnects. + * + * Should be called from the session's thread. */ void stream_session_disconnect (stream_session_t * s) { -// session_fifo_event_t evt; - s->session_state = SESSION_STATE_CLOSED; - /* RPC to vpp evt queue in the right thread */ - tp_vfts[s->session_type].close (s->connection_index, s->thread_index); - -// { -// /* Fabricate event */ -// evt.fifo = s->server_rx_fifo; -// evt.event_type = FIFO_EVENT_SERVER_RX; -// evt.event_id = serial_number++; -// -// /* Based on request block (or not) for lack of space */ -// if (PREDICT_TRUE(q->cursize < q->maxsize)) -// unix_shared_memory_queue_add (app->event_queue, (u8 *) &evt, -// 0 /* do wait for mutex */); -// else -// { -// clib_warning("fifo full"); -// return -1; -// } -// } } /** @@ -976,6 +957,33 @@ session_get_transport_vft (u8 type) return &tp_vfts[type]; } +/** + * Allocate vpp event queue (once) per worker thread + */ +void +session_vpp_event_queue_allocate (session_manager_main_t * smm, + u32 thread_index) +{ + api_main_t *am = &api_main; + void *oldheap; + + if (smm->vpp_event_queues[thread_index] == 0) + { + /* Allocate event fifo in the /vpe-api shared-memory segment */ + oldheap = svm_push_data_heap (am->vlib_rp); + + smm->vpp_event_queues[thread_index] = + unix_shared_memory_queue_init (2048 /* nels $$$$ config */ , + sizeof (session_fifo_event_t), + 0 /* consumer pid */ , + 0 + /* (do not) send signal when queue non-empty */ + ); + + svm_pop_heap (oldheap); + } +} + static clib_error_t * session_manager_main_enable (vlib_main_t * vm) { @@ -1043,6 +1051,18 @@ session_manager_main_enable (vlib_main_t * vm) return 0; } +void +session_node_enable_disable (u8 is_en) +{ + u8 state = is_en ? VLIB_NODE_STATE_POLLING : VLIB_NODE_STATE_DISABLED; + /* *INDENT-OFF* */ + foreach_vlib_main (({ + vlib_node_set_state (this_vlib_main, session_queue_node.index, + state); + })); + /* *INDENT-ON* */ +} + clib_error_t * vnet_session_enable_disable (vlib_main_t * vm, u8 is_en) { @@ -1051,16 +1071,14 @@ vnet_session_enable_disable (vlib_main_t * vm, u8 is_en) if (session_manager_main.is_enabled) return 0; - vlib_node_set_state (vm, session_queue_node.index, - VLIB_NODE_STATE_POLLING); + session_node_enable_disable (is_en); return session_manager_main_enable (vm); } else { session_manager_main.is_enabled = 0; - vlib_node_set_state (vm, session_queue_node.index, - VLIB_NODE_STATE_DISABLED); + session_node_enable_disable (is_en); } return 0; diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index 6e4ea96d..8cd72f35 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -17,9 +17,6 @@ #include #include -#include -#include -#include #include #include @@ -31,10 +28,10 @@ typedef enum { - FIFO_EVENT_SERVER_RX, - FIFO_EVENT_SERVER_TX, + FIFO_EVENT_APP_RX, + FIFO_EVENT_APP_TX, FIFO_EVENT_TIMEOUT, - FIFO_EVENT_SERVER_EXIT, + FIFO_EVENT_DISCONNECT, FIFO_EVENT_BUILTIN_RX } fifo_event_type_t; @@ -96,7 +93,11 @@ typedef enum /* *INDENT-OFF* */ typedef CLIB_PACKED (struct { - svm_fifo_t * fifo; + union + { + svm_fifo_t * fifo; + u64 session_handle; + }; u8 event_type; u16 event_id; }) session_fifo_event_t; @@ -370,7 +371,9 @@ int stream_session_listen (stream_session_t * s, transport_endpoint_t * tep); int stream_session_stop_listen (stream_session_t * s); void stream_session_disconnect (stream_session_t * s); void stream_session_cleanup (stream_session_t * s); - +void session_send_session_evt_to_thread (u64 session_handle, + fifo_event_type_t evt_type, + u32 thread_index); u8 *format_stream_session (u8 * s, va_list * args); void session_register_transport (u8 type, const transport_proto_vft_t * vft); diff --git a/src/vnet/session/session_api.c b/src/vnet/session/session_api.c index 8116b673..79d67a2f 100755 --- a/src/vnet/session/session_api.c +++ b/src/vnet/session/session_api.c @@ -96,7 +96,7 @@ send_session_accept_callback (stream_session_t * s) memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_ACCEPT_SESSION); - + mp->context = server->index; listener = listen_session_get (s->session_type, s->listener_index); tp_vft = session_get_transport_vft (s->session_type); tc = tp_vft->get_connection (s->connection_index, s->thread_index); @@ -270,23 +270,6 @@ static session_cb_vft_t uri_session_cb_vft = { .redirect_connect_callback = redirect_connect_callback }; -static int -api_session_not_valid (u32 session_index, u32 thread_index) -{ - session_manager_main_t *smm = vnet_get_session_manager_main (); - stream_session_t *pool; - - if (thread_index >= vec_len (smm->sessions)) - return VNET_API_ERROR_INVALID_VALUE; - - pool = smm->sessions[thread_index]; - - if (pool_is_free_index (pool, session_index)) - return VNET_API_ERROR_INVALID_VALUE_2; - - return 0; -} - static void vl_api_session_enable_disable_t_handler (vl_api_session_enable_disable_t * mp) { @@ -324,9 +307,9 @@ vl_api_application_attach_t_handler (vl_api_application_attach_t * mp) rv = vnet_application_attach (a); done: + /* *INDENT-OFF* */ REPLY_MACRO2 (VL_API_APPLICATION_ATTACH_REPLY, ({ - rmp->retval = rv; if (!rv) { rmp->segment_name_length = 0; @@ -558,24 +541,33 @@ static void vl_api_accept_session_reply_t_handler (vl_api_accept_session_reply_t * mp) { stream_session_t *s; - int rv; u32 session_index, thread_index; - session_index = stream_session_index_from_handle (mp->handle); - thread_index = stream_session_thread_from_handle (mp->handle); - if (api_session_not_valid (session_index, thread_index)) - return; - - s = stream_session_get (session_index, thread_index); - rv = mp->retval; + vnet_disconnect_args_t _a, *a = &_a; - if (rv) + /* Server isn't interested, kill the session */ + if (mp->retval) { - /* Server isn't interested, kill the session */ - stream_session_disconnect (s); - return; + a->app_index = mp->context; + a->handle = mp->handle; + vnet_disconnect_session (a); + } + else + { + stream_session_parse_handle (mp->handle, &session_index, &thread_index); + s = stream_session_get_if_valid (session_index, thread_index); + if (!s) + { + clib_warning ("session doesn't exist"); + return; + } + if (s->app_index != mp->context) + { + clib_warning ("app doesn't own session"); + return; + } + /* XXX volatile? */ + s->session_state = SESSION_STATE_READY; } - - s->session_state = SESSION_STATE_READY; } static void diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index f8fbf28c..276beb21 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -62,8 +62,7 @@ send_test_chunk (tclient_main_t * tm, session_t * s) bytes_this_chunk = bytes_this_chunk < s->bytes_to_send ? bytes_this_chunk : s->bytes_to_send; - rv = svm_fifo_enqueue_nowait (s->server_tx_fifo, 0 /*pid */ , - bytes_this_chunk, + rv = svm_fifo_enqueue_nowait (s->server_tx_fifo, bytes_this_chunk, test_data + test_buf_offset); /* If we managed to enqueue data... */ @@ -95,7 +94,7 @@ send_test_chunk (tclient_main_t * tm, session_t * s) { /* Fabricate TX event, send to vpp */ evt.fifo = s->server_tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = serial_number++; unix_shared_memory_queue_add (tm->vpp_event_queue, (u8 *) & evt, @@ -113,7 +112,7 @@ receive_test_chunk (tclient_main_t * tm, session_t * s) /* Allow enqueuing of new event */ // svm_fifo_unset_event (rx_fifo); - n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len (tm->rx_buf), + n_read = svm_fifo_dequeue_nowait (rx_fifo, vec_len (tm->rx_buf), tm->rx_buf); if (n_read > 0) { @@ -457,6 +456,8 @@ attach_builtin_test_clients () options[SESSION_OPTIONS_ACCEPT_COOKIE] = 0x12345678; options[SESSION_OPTIONS_SEGMENT_SIZE] = (2 << 30); /*$$$$ config / arg */ + options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; + a->options = options; return vnet_application_attach (a); diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index 8308e3d9..34682699 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -180,7 +180,7 @@ builtin_server_rx_callback (stream_session_t * s) vec_validate (bsm->rx_buf, max_transfer - 1); _vec_len (bsm->rx_buf) = max_transfer; - actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, 0, max_transfer, + actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, max_transfer, bsm->rx_buf); ASSERT (actual_transfer == max_transfer); @@ -190,8 +190,7 @@ builtin_server_rx_callback (stream_session_t * s) * Echo back */ - n_written = - svm_fifo_enqueue_nowait (tx_fifo, 0, actual_transfer, bsm->rx_buf); + n_written = svm_fifo_enqueue_nowait (tx_fifo, actual_transfer, bsm->rx_buf); if (n_written != max_transfer) clib_warning ("short trout!"); @@ -200,7 +199,7 @@ builtin_server_rx_callback (stream_session_t * s) { /* Fabricate TX event, send to vpp */ evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = serial_number++; unix_shared_memory_queue_add (bsm->vpp_queue[s->thread_index], @@ -288,6 +287,7 @@ server_attach () a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 128 << 20; a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 1 << 16; a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 1 << 16; + a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; a->segment_name = segment_name; a->segment_name_length = ARRAY_LEN (segment_name); diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 12982589..245a35ab 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -487,7 +487,8 @@ u8 * format_tcp_connection (u8 * s, va_list * args) { tcp_connection_t *tc = va_arg (*args, tcp_connection_t *); - + if (!tc) + return s; if (tc->c_is_ip4) { s = format (s, "[#%d][%s] %U:%d->%U:%d", tc->c_thread_index, "T", @@ -747,12 +748,14 @@ void tcp_initialize_timer_wheels (tcp_main_t * tm) { tw_timer_wheel_16t_2w_512sl_t *tw; - vec_foreach (tw, tm->timer_wheels) - { + /* *INDENT-OFF* */ + foreach_vlib_main (({ + tw = &tm->timer_wheels[ii]; tw_timer_wheel_init_16t_2w_512sl (tw, tcp_expired_timers_dispatch, 100e-3 /* timer period 100ms */ , ~0); - tw->last_run_time = vlib_time_now (tm->vlib_main); - } + tw->last_run_time = vlib_time_now (this_vlib_main); + })); + /* *INDENT-ON* */ } clib_error_t * diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index 97679aaf..3bd53878 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -1011,8 +1011,8 @@ tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b, clib_warning ("ooo: offset %d len %d", offset, data_len); - rv = svm_fifo_enqueue_with_offset (s0->server_rx_fifo, s0->pid, offset, - data_len, vlib_buffer_get_current (b)); + rv = svm_fifo_enqueue_with_offset (s0->server_rx_fifo, offset, data_len, + vlib_buffer_get_current (b)); /* Nothing written */ if (rv) @@ -2392,8 +2392,8 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, { t0 = vlib_add_trace (vm, node, b0, sizeof (*t0)); clib_memcpy (&t0->tcp_header, tcp0, sizeof (t0->tcp_header)); - clib_memcpy (&t0->tcp_connection, tc0, - sizeof (t0->tcp_connection)); + if (tc0) + clib_memcpy (&t0->tcp_connection, tc0, sizeof (*tc0)); } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index a7be8bd5..4e1a7aa5 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -1558,7 +1558,6 @@ tcp46_send_reset_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_buffer_t *b0; tcp_tx_trace_t *t0; tcp_header_t *th0; - tcp_connection_t *tc0; u32 error0 = TCP_ERROR_RST_SENT, next0 = TCP_RESET_NEXT_IP_LOOKUP; bi0 = from[0]; @@ -1592,13 +1591,8 @@ tcp46_send_reset_inline (vlib_main_t * vm, vlib_node_runtime_t * node, th0 = ip4_next_header ((ip4_header_t *) th0); else th0 = ip6_next_header ((ip6_header_t *) th0); - tc0 = - tcp_connection_get (vnet_buffer (b0)->tcp.connection_index, - my_thread_index); t0 = vlib_add_trace (vm, node, b0, sizeof (*t0)); clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header)); - clib_memcpy (&t0->tcp_connection, tc0, - sizeof (t0->tcp_connection)); } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, diff --git a/src/vnet/tcp/tcp_test.c b/src/vnet/tcp/tcp_test.c index 890e50b9..0146154b 100644 --- a/src/vnet/tcp/tcp_test.c +++ b/src/vnet/tcp/tcp_test.c @@ -351,8 +351,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) /* * Enqueue an initial (un-dequeued) chunk */ - rv = svm_fifo_enqueue_nowait (f, 0 /* pid */ , - sizeof (u32), (u8 *) test_data); + rv = svm_fifo_enqueue_nowait (f, sizeof (u32), (u8 *) test_data); TCP_TEST ((rv == sizeof (u32)), "enqueued %d", rv); TCP_TEST ((f->tail == 4), "fifo tail %u", f->tail); @@ -364,7 +363,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) { offset = (2 * i + 1) * sizeof (u32); data = (u8 *) (test_data + (2 * i + 1)); - rv = svm_fifo_enqueue_with_offset (f, 0, offset, sizeof (u32), data); + rv = svm_fifo_enqueue_with_offset (f, offset, sizeof (u32), data); if (verbose) vlib_cli_output (vm, "add [%d] [%d, %d]", 2 * i + 1, offset, offset + sizeof (u32)); @@ -393,7 +392,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) { offset = (2 * i + 0) * sizeof (u32); data = (u8 *) (test_data + (2 * i + 0)); - rv = svm_fifo_enqueue_with_offset (f, 0, offset, sizeof (u32), data); + rv = svm_fifo_enqueue_with_offset (f, offset, sizeof (u32), data); if (verbose) vlib_cli_output (vm, "add [%d] [%d, %d]", 2 * i, offset, offset + sizeof (u32)); @@ -418,8 +417,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) /* * Enqueue the missing u32 */ - rv = svm_fifo_enqueue_nowait (f, 0 /* pid */ , sizeof (u32), - (u8 *) (test_data + 2)); + rv = svm_fifo_enqueue_nowait (f, sizeof (u32), (u8 *) (test_data + 2)); if (verbose) vlib_cli_output (vm, "fifo after missing link: %U", format_svm_fifo, f, 1); @@ -432,8 +430,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) */ for (i = 0; i < 7; i++) { - rv = svm_fifo_dequeue_nowait (f, 0 /* pid */ , sizeof (u32), - (u8 *) & data_word); + rv = svm_fifo_dequeue_nowait (f, sizeof (u32), (u8 *) & data_word); if (rv != sizeof (u32)) { clib_warning ("bytes dequeues %u", rv); @@ -457,7 +454,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) { offset = (2 * i + 1) * sizeof (u32); data = (u8 *) (test_data + (2 * i + 1)); - rv = svm_fifo_enqueue_with_offset (f, 0, offset, sizeof (u32), data); + rv = svm_fifo_enqueue_with_offset (f, offset, sizeof (u32), data); if (verbose) vlib_cli_output (vm, "add [%d] [%d, %d]", 2 * i + 1, offset, offset + sizeof (u32)); @@ -468,13 +465,13 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) } } - rv = svm_fifo_enqueue_with_offset (f, 0, 8, 21, data); + rv = svm_fifo_enqueue_with_offset (f, 8, 21, data); TCP_TEST ((rv == 0), "ooo enqueued %u", rv); TCP_TEST ((svm_fifo_number_ooo_segments (f) == 1), "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); vec_validate (data_buf, vec_len (data)); - svm_fifo_peek (f, 0, 0, vec_len (data), data_buf); + svm_fifo_peek (f, 0, vec_len (data), data_buf); if (compare_data (data_buf, data, 8, vec_len (data), &j)) { TCP_TEST (0, "[%d] peeked %u expected %u", j, data_buf[j], data[j]); @@ -491,7 +488,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) { offset = (2 * i + 1) * sizeof (u32); data = (u8 *) (test_data + (2 * i + 1)); - rv = svm_fifo_enqueue_with_offset (f, 0, offset, sizeof (u32), data); + rv = svm_fifo_enqueue_with_offset (f, offset, sizeof (u32), data); if (verbose) vlib_cli_output (vm, "add [%d] [%d, %d]", 2 * i + 1, offset, offset + sizeof (u32)); @@ -502,13 +499,13 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) } } - rv = svm_fifo_enqueue_nowait (f, 0, 29, data); + rv = svm_fifo_enqueue_nowait (f, 29, data); TCP_TEST ((rv == 32), "ooo enqueued %u", rv); TCP_TEST ((svm_fifo_number_ooo_segments (f) == 0), "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); vec_validate (data_buf, vec_len (data)); - svm_fifo_peek (f, 0, 0, vec_len (data), data_buf); + svm_fifo_peek (f, 0, vec_len (data), data_buf); if (compare_data (data_buf, data, 0, vec_len (data), &j)) { TCP_TEST (0, "[%d] peeked %u expected %u", j, data_buf[j], data[j]); @@ -551,7 +548,7 @@ tcp_test_fifo2 (vlib_main_t * vm) { tp = vp + i; data64 = tp->offset; - rv = svm_fifo_enqueue_with_offset (f, 0, tp->offset, tp->len, + rv = svm_fifo_enqueue_with_offset (f, tp->offset, tp->len, (u8 *) & data64); } @@ -565,7 +562,7 @@ tcp_test_fifo2 (vlib_main_t * vm) "first ooo seg length %u", ooo_seg->length); data64 = 0; - rv = svm_fifo_enqueue_nowait (f, 0, sizeof (u32), (u8 *) & data64); + rv = svm_fifo_enqueue_nowait (f, sizeof (u32), (u8 *) & data64); TCP_TEST ((rv == 3000), "bytes to be enqueued %u", rv); svm_fifo_free (f); @@ -581,7 +578,7 @@ tcp_test_fifo2 (vlib_main_t * vm) { tp = &test_data[i]; data64 = tp->offset; - rv = svm_fifo_enqueue_with_offset (f, 0, tp->offset, tp->len, + rv = svm_fifo_enqueue_with_offset (f, tp->offset, tp->len, (u8 *) & data64); if (rv) { @@ -599,7 +596,7 @@ tcp_test_fifo2 (vlib_main_t * vm) "first ooo seg length %u", ooo_seg->length); data64 = 0; - rv = svm_fifo_enqueue_nowait (f, 0, sizeof (u32), (u8 *) & data64); + rv = svm_fifo_enqueue_nowait (f, sizeof (u32), (u8 *) & data64); TCP_TEST ((rv == 3000), "bytes to be enqueued %u", rv); @@ -755,7 +752,7 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) for (i = 0; i < vec_len (generate); i++) { tp = generate + i; - rv = svm_fifo_enqueue_with_offset (f, 0, fifo_initial_offset + rv = svm_fifo_enqueue_with_offset (f, fifo_initial_offset + tp->offset, tp->len, (u8 *) data_pattern + tp->offset); } @@ -776,7 +773,7 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) u32 bytes_to_enq = 1; if (in_seq_all) bytes_to_enq = total_size; - rv = svm_fifo_enqueue_nowait (f, 0, bytes_to_enq, data_pattern + 0); + rv = svm_fifo_enqueue_nowait (f, bytes_to_enq, data_pattern + 0); if (verbose) vlib_cli_output (vm, "in-order enqueue returned %d", rv); @@ -793,7 +790,7 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) * Test if peeked data is the same as original data */ vec_validate (data_buf, vec_len (data_pattern)); - svm_fifo_peek (f, 0, 0, vec_len (data_pattern), data_buf); + svm_fifo_peek (f, 0, vec_len (data_pattern), data_buf); if (compare_data (data_buf, data_pattern, 0, vec_len (data_pattern), &j)) { TCP_TEST (0, "[%d] peeked %u expected %u", j, data_buf[j], @@ -806,11 +803,11 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) */ if (drop) { - svm_fifo_dequeue_drop (f, 0, vec_len (data_pattern)); + svm_fifo_dequeue_drop (f, vec_len (data_pattern)); } else { - svm_fifo_dequeue_nowait (f, 0, vec_len (data_pattern), data_buf); + svm_fifo_dequeue_nowait (f, vec_len (data_pattern), data_buf); if (compare_data (data_buf, data_pattern, 0, vec_len (data_pattern), &j)) { diff --git a/src/vnet/udp/builtin_server.c b/src/vnet/udp/builtin_server.c index 8565f04c..18684d54 100644 --- a/src/vnet/udp/builtin_server.c +++ b/src/vnet/udp/builtin_server.c @@ -59,10 +59,10 @@ builtin_server_rx_callback (stream_session_t * s) vec_validate (my_copy_buffer, this_transfer - 1); _vec_len (my_copy_buffer) = this_transfer; - actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, 0, this_transfer, + actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, this_transfer, my_copy_buffer); ASSERT (actual_transfer == this_transfer); - actual_transfer = svm_fifo_enqueue_nowait (tx_fifo, 0, this_transfer, + actual_transfer = svm_fifo_enqueue_nowait (tx_fifo, this_transfer, my_copy_buffer); ASSERT (actual_transfer == this_transfer); @@ -72,7 +72,7 @@ builtin_server_rx_callback (stream_session_t * s) { /* Fabricate TX event, send to ourselves */ evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = 0; q = session_manager_get_vpp_event_queue (s->thread_index); unix_shared_memory_queue_add (q, (u8 *) & evt, @@ -110,6 +110,8 @@ attach_builtin_uri_server () options[SESSION_OPTIONS_ACCEPT_COOKIE] = 0x12345678; options[SESSION_OPTIONS_SEGMENT_SIZE] = (2 << 30); /*$$$$ config / arg */ + options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; + a->options = options; return vnet_application_attach (a); diff --git a/src/vnet/udp/udp_input.c b/src/vnet/udp/udp_input.c index 810278e6..e6b4f8fc 100644 --- a/src/vnet/udp/udp_input.c +++ b/src/vnet/udp/udp_input.c @@ -145,8 +145,7 @@ udp4_uri_input_node_fn (vlib_main_t * vm, goto trace0; } - svm_fifo_enqueue_nowait (f0, 0 /* pid */ , - udp_len0 - sizeof (*udp0), + svm_fifo_enqueue_nowait (f0, udp_len0 - sizeof (*udp0), (u8 *) (udp0 + 1)); b0->error = node->errors[SESSION_ERROR_ENQUEUED]; @@ -255,7 +254,7 @@ udp4_uri_input_node_fn (vlib_main_t * vm, { /* Fabricate event */ evt.fifo = s0->server_rx_fifo; - evt.event_type = FIFO_EVENT_SERVER_RX; + evt.event_type = FIFO_EVENT_APP_RX; evt.event_id = serial_number++; /* Add event to server's event queue */ -- cgit 1.2.3-korg From c28764fd356632763614ea579f678d8f55eca4c7 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Wed, 26 Apr 2017 00:08:42 -0700 Subject: TCP ooo reception fixes - Improve svm fifo handling of out-of-order segments - Ensure tsval_recent is updated only if rcv_las falls withing the segments's sequence space - Avoid directly dropping old ACKs - Improve debugging Change-Id: I88dbe2394a0ad7eb389a4cc12d013a13733953aa Signed-off-by: Florin Coras --- src/svm/svm_fifo.c | 144 ++++++++++++++++++++++----------------------- src/svm/svm_fifo.h | 1 + src/vnet/session/session.c | 15 +++++ src/vnet/session/session.h | 7 ++- src/vnet/tcp/tcp_debug.h | 15 +++++ src/vnet/tcp/tcp_error.def | 3 +- src/vnet/tcp/tcp_format.c | 6 +- src/vnet/tcp/tcp_input.c | 81 +++++++++++++++++-------- src/vnet/tcp/tcp_output.c | 4 +- src/vnet/tcp/tcp_test.c | 114 ++++++++++++++++++++++++++++++++--- 10 files changed, 275 insertions(+), 115 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index 8f2ed0c9..9b09d0c2 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -17,6 +17,8 @@ #define offset_lt(_a, _b) ((i32)((_a)-(_b)) < 0) #define offset_leq(_a, _b) ((i32)((_a)-(_b)) <= 0) +#define offset_gt(_a, _b) ((i32)((_a)-(_b)) > 0) +#define offset_geq(_a, _b) ((i32)((_a)-(_b)) >= 0) u8 * format_ooo_segment (u8 * s, va_list * args) @@ -160,14 +162,23 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) && offset_leq (ooo_segment_offset (f, s), offset)) s = pool_elt_at_index (f->ooo_segments, s->next); + /* If we have a previous and we overlap it, use it as starting point */ + prev = ooo_segment_get_prev (f, s); + if (prev && offset_leq (offset, ooo_segment_end_offset (f, prev))) + { + s = prev; + prev = ooo_segment_get_prev (f, s); + s_sof = ooo_segment_offset (f, s); + s_eof = ooo_segment_end_offset (f, s); + goto merge; + } + s_index = s - f->ooo_segments; s_sof = ooo_segment_offset (f, s); s_eof = ooo_segment_end_offset (f, s); - prev = ooo_segment_get_prev (f, s); /* No overlap, add before current segment */ - if (offset_lt (end_offset, s_sof) - && (!prev || offset_lt (prev->start + prev->length, offset))) + if (offset_lt (end_offset, s_sof)) { new_s = ooo_segment_new (f, offset, length); new_index = new_s - f->ooo_segments; @@ -192,7 +203,7 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) return; } /* No overlap, add after current segment */ - else if (offset_lt (s_eof, offset)) + else if (offset_gt (offset, s_eof)) { new_s = ooo_segment_new (f, offset, length); new_index = new_s - f->ooo_segments; @@ -218,62 +229,16 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) * Merge needed */ +merge: + /* Merge at head */ - if (offset_leq (offset, s_sof)) + if (offset_lt (offset, s_sof)) { - /* If we have a previous, check if we overlap */ - if (s->prev != OOO_SEGMENT_INVALID_INDEX) - { - prev = pool_elt_at_index (f->ooo_segments, s->prev); - - /* New segment merges prev and current. Remove previous and - * update position of current. */ - if (offset_leq (offset, ooo_segment_end_offset (f, prev))) - { - s->start = prev->start; - s->length = s_eof - ooo_segment_offset (f, prev); - ooo_segment_del (f, s->prev); - } - else - { - s->start = offset; - s->length = s_eof - ooo_segment_offset (f, s); - } - } - else - { - s->start = offset; - s->length = s_eof - ooo_segment_offset (f, s); - } - - /* The new segment's tail may cover multiple smaller ones */ - if (offset_lt (s_eof, end_offset)) - { - /* Remove segments completely covered */ - it = (s->next != OOO_SEGMENT_INVALID_INDEX) ? - pool_elt_at_index (f->ooo_segments, s->next) : 0; - while (it && offset_lt (ooo_segment_end_offset (f, it), end_offset)) - { - next = (it->next != OOO_SEGMENT_INVALID_INDEX) ? - pool_elt_at_index (f->ooo_segments, it->next) : 0; - ooo_segment_del (f, it - f->ooo_segments); - it = next; - } - - /* Update length. Segment's start might have changed. */ - s->length = end_offset - ooo_segment_offset (f, s); - - /* If partial overlap with last, merge */ - if (it && offset_lt (ooo_segment_offset (f, it), end_offset)) - { - s->length += - it->length - (ooo_segment_offset (f, it) - end_offset); - ooo_segment_del (f, it - f->ooo_segments); - } - } + s->start = offset; + s->length = s_eof - ooo_segment_offset (f, s); } /* Last but overlapping previous */ - else if (offset_leq (s_eof, end_offset)) + else if (offset_gt (end_offset, s_eof)) { s->length = end_offset - ooo_segment_offset (f, s); } @@ -281,8 +246,33 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) else { /* Do Nothing */ + goto done; + } + + /* The new segment's tail may cover multiple smaller ones */ + if (offset_geq (end_offset, s_eof)) + { + /* Remove the completely overlapped segments */ + it = (s->next != OOO_SEGMENT_INVALID_INDEX) ? + pool_elt_at_index (f->ooo_segments, s->next) : 0; + while (it && offset_leq (ooo_segment_end_offset (f, it), end_offset)) + { + next = (it->next != OOO_SEGMENT_INVALID_INDEX) ? + pool_elt_at_index (f->ooo_segments, it->next) : 0; + ooo_segment_del (f, it - f->ooo_segments); + it = next; + } + + /* If partial overlap with last, merge */ + if (it && offset_leq (ooo_segment_offset (f, it), end_offset)) + { + s->length = ooo_segment_end_offset (f, it) - + ooo_segment_offset (f, s); + ooo_segment_del (f, it - f->ooo_segments); + } } +done: /* Most recently updated segment */ f->ooos_newest = s - f->ooo_segments; } @@ -296,14 +286,17 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) { ooo_segment_t *s; u32 index, bytes = 0, diff; - u32 cursize; + u32 cursize, norm_start, nitems; /* current size has not yet been updated */ cursize = svm_fifo_max_dequeue (f) + n_bytes_enqueued; + nitems = f->nitems; s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head); - diff = (f->nitems + (i32) (f->tail - s->start)) % f->nitems; + norm_start = s->start % nitems; + diff = (f->nitems + (i32) (f->tail - norm_start)) % nitems; + if (diff > cursize) return 0; @@ -326,7 +319,8 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) if (s->next != OOO_SEGMENT_INVALID_INDEX) { s = pool_elt_at_index (f->ooo_segments, s->next); - diff = (f->nitems + (i32) (f->tail - s->start)) % f->nitems; + norm_start = s->start % nitems; + diff = (f->nitems + (i32) (f->tail - norm_start)) % nitems; ooo_segment_del (f, index); } /* End of search */ @@ -340,11 +334,11 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) /* If tail is adjacent to an ooo segment, 'consume' it */ if (diff == 0) { - bytes = ((f->nitems - cursize) >= s->length) ? s->length : - f->nitems - cursize; + bytes = ((nitems - cursize) >= s->length) ? s->length : + nitems - cursize; f->tail += bytes; - f->tail %= f->nitems; + f->tail %= nitems; ooo_segment_del (f, s - f->ooo_segments); } @@ -430,31 +424,22 @@ svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, { u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; u32 cursize, nitems; - u32 normalized_offset; - int rv; - - /* Users would do well to avoid this */ - if (PREDICT_FALSE (f->tail == (offset % f->nitems))) - { - rv = svm_fifo_enqueue_internal (f, required_bytes, copy_from_here); - if (rv > 0) - return 0; - return -1; - } + u32 normalized_offset, offset_from_tail; /* read cursize, which can only increase while we're working */ cursize = svm_fifo_max_dequeue (f); nitems = f->nitems; + normalized_offset = offset % nitems; /* Will this request fit? */ - if ((required_bytes + (offset - f->tail) % nitems) > (nitems - cursize)) + offset_from_tail = (nitems + normalized_offset - f->tail) % nitems; + if ((required_bytes + offset_from_tail) > (nitems - cursize)) return -1; ooo_segment_add (f, offset, required_bytes); /* Number of bytes we're going to copy */ total_copy_bytes = required_bytes; - normalized_offset = offset % nitems; /* Number of bytes in first copy segment */ first_copy_bytes = ((nitems - normalized_offset) < total_copy_bytes) @@ -631,6 +616,15 @@ svm_fifo_first_ooo_segment (svm_fifo_t * f) return pool_elt_at_index (f->ooo_segments, f->ooos_list_head); } +/** + * Set fifo pointers to requested offset + */ +void +svm_fifo_init_pointers (svm_fifo_t * f, u32 pointer) +{ + f->head = f->tail = pointer % f->nitems; +} + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index d67237c6..36158dc5 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -119,6 +119,7 @@ int svm_fifo_peek (svm_fifo_t * f, u32 offset, u32 max_bytes, u8 * copy_here); int svm_fifo_dequeue_drop (svm_fifo_t * f, u32 max_bytes); u32 svm_fifo_number_ooo_segments (svm_fifo_t * f); ooo_segment_t *svm_fifo_first_ooo_segment (svm_fifo_t * f); +void svm_fifo_init_pointers (svm_fifo_t * f, u32 pointer); format_function_t format_svm_fifo; diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index d17c93f8..e92bb440 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -609,6 +609,21 @@ session_manager_flush_enqueue_events (u32 thread_index) return errors; } +/** + * Init fifo tail and head pointers + * + * Useful if transport uses absolute offsets for tracking ooo segments. + */ +void +stream_session_init_fifos_pointers (transport_connection_t * tc, + u32 rx_pointer, u32 tx_pointer) +{ + stream_session_t *s; + s = stream_session_get (tc->s_index, tc->thread_index); + svm_fifo_init_pointers (s->server_rx_fifo, rx_pointer); + svm_fifo_init_pointers (s->server_tx_fifo, tx_pointer); +} + void stream_session_connect_notify (transport_connection_t * tc, u8 sst, u8 is_fail) diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index 8cd72f35..f41a8a96 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -352,9 +352,10 @@ stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer, u32 offset, u32 max_bytes); u32 stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes); -void -stream_session_connect_notify (transport_connection_t * tc, u8 sst, - u8 is_fail); +void stream_session_connect_notify (transport_connection_t * tc, u8 sst, + u8 is_fail); +void stream_session_init_fifos_pointers (transport_connection_t * tc, + u32 rx_pointer, u32 tx_pointer); void stream_session_accept_notify (transport_connection_t * tc); void stream_session_disconnect_notify (transport_connection_t * tc); diff --git a/src/vnet/tcp/tcp_debug.h b/src/vnet/tcp/tcp_debug.h index ecbf7887..b4497a3b 100755 --- a/src/vnet/tcp/tcp_debug.h +++ b/src/vnet/tcp/tcp_debug.h @@ -50,6 +50,7 @@ _(CC_EVT, "cc event") \ _(CC_PACK, "cc partial ack") \ _(SEG_INVALID, "invalid segment") \ + _(PAWS_FAIL, "failed paws check") \ _(ACK_RCV_ERR, "invalid ack") \ _(RCV_WND_SHRUNK, "shrunk rcv_wnd") \ @@ -382,6 +383,20 @@ typedef enum _tcp_dbg_evt ed->data[4] = _tc->rcv_wnd; \ } +#define TCP_EVT_PAWS_FAIL_HANDLER(_tc, _seq, _end, ...) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "paws fail: seq %u end %u tsval %u tsval_recent %u", \ + .format_args = "i4i4i4i4", \ + }; \ + DECLARE_ETD(_tc, _e, 4); \ + ed->data[0] = _seq - _tc->irs; \ + ed->data[1] = _end - _tc->irs; \ + ed->data[2] = _tc->opt.tsval; \ + ed->data[3] = _tc->tsval_recent; \ +} + #define TCP_EVT_ACK_RCV_ERR_HANDLER(_tc, _type, _ack, ...) \ { \ ELOG_TYPE_DECLARE (_e) = \ diff --git a/src/vnet/tcp/tcp_error.def b/src/vnet/tcp/tcp_error.def index 0d75d975..a4e46d64 100644 --- a/src/vnet/tcp/tcp_error.def +++ b/src/vnet/tcp/tcp_error.def @@ -37,4 +37,5 @@ tcp_error (PKTS_SENT, "Packets sent") tcp_error (FILTERED_DUPACKS, "Filtered duplicate ACKs") tcp_error (RST_SENT, "Resets sent") tcp_error (INVALID_CONNECTION, "Invalid connection") -tcp_error (NO_WND, "No window") \ No newline at end of file +tcp_error (NO_WND, "No window") +tcp_error (CONNECTION_CLOSED, "Connection closed") \ No newline at end of file diff --git a/src/vnet/tcp/tcp_format.c b/src/vnet/tcp/tcp_format.c index 3148fd40..4de99235 100644 --- a/src/vnet/tcp/tcp_format.c +++ b/src/vnet/tcp/tcp_format.c @@ -131,11 +131,13 @@ format_tcp_header (u8 * s, va_list * args) u8 * format_tcp_sacks (u8 * s, va_list * args) { - sack_block_t *sacks = va_arg (*args, sack_block_t *); + tcp_connection_t *tc = va_arg (*args, tcp_connection_t *); + sack_block_t *sacks = tc->snd_sacks; sack_block_t *block; vec_foreach (block, sacks) { - s = format (s, " start %u end %u\n", block->start, block->end); + s = format (s, " start %u end %u\n", block->start - tc->irs, + block->end - tc->irs); } return s; } diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index 3c65a5ea..0030cfe2 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -208,6 +208,15 @@ tcp_options_parse (tcp_header_t * th, tcp_options_t * to) } } +/** + * RFC1323: Check against wrapped sequence numbers (PAWS). If we have + * timestamp to echo and it's less than tsval_recent, drop segment + * but still send an ACK in order to retain TCP's mechanism for detecting + * and recovering from half-open connections + * + * Or at least that's what the theory says. It seems that this might not work + * very well with packet reordering and fast retransmit. XXX + */ always_inline int tcp_segment_check_paws (tcp_connection_t * tc) { @@ -215,6 +224,27 @@ tcp_segment_check_paws (tcp_connection_t * tc) && timestamp_lt (tc->opt.tsval, tc->tsval_recent); } +/** + * Update tsval recent + */ +always_inline void +tcp_update_timestamp (tcp_connection_t * tc, u32 seq, u32 seq_end) +{ + /* + * RFC1323: If Last.ACK.sent falls within the range of sequence numbers + * of an incoming segment: + * SEG.SEQ <= Last.ACK.sent < SEG.SEQ + SEG.LEN + * then the TSval from the segment is copied to TS.Recent; + * otherwise, the TSval is ignored. + */ + if (tcp_opts_tstamp (&tc->opt) && tc->tsval_recent + && seq_leq (seq, tc->rcv_las) && seq_leq (tc->rcv_las, seq_end)) + { + tc->tsval_recent = tc->opt.tsval; + tc->tsval_recent_age = tcp_time_now (); + } +} + /** * Validate incoming segment as per RFC793 p. 69 and RFC1323 p. 19 * @@ -228,21 +258,16 @@ static int tcp_segment_validate (vlib_main_t * vm, tcp_connection_t * tc0, vlib_buffer_t * b0, tcp_header_t * th0, u32 * next0) { - u8 paws_failed; - if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0))) return -1; tcp_options_parse (th0, &tc0->opt); - /* RFC1323: Check against wrapped sequence numbers (PAWS). If we have - * timestamp to echo and it's less than tsval_recent, drop segment - * but still send an ACK in order to retain TCP's mechanism for detecting - * and recovering from half-open connections */ - paws_failed = tcp_segment_check_paws (tc0); - if (paws_failed) + if (tcp_segment_check_paws (tc0)) { clib_warning ("paws failed"); + TCP_EVT_DBG (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number, + vnet_buffer (b0)->tcp.seq_end); /* If it just so happens that a segment updates tsval_recent for a * segment over 24 days old, invalidate tsval_recent. */ @@ -251,6 +276,7 @@ tcp_segment_validate (vlib_main_t * vm, tcp_connection_t * tc0, { /* Age isn't reset until we get a valid tsval (bsd inspired) */ tc0->tsval_recent = 0; + clib_warning ("paws failed - really old segment. REALLY?"); } else { @@ -305,12 +331,9 @@ tcp_segment_validate (vlib_main_t * vm, tcp_connection_t * tc0, return -1; } - /* If PAWS passed and segment in window, save timestamp */ - if (!paws_failed) - { - tc0->tsval_recent = tc0->opt.tsval; - tc0->tsval_recent_age = tcp_time_now (); - } + /* If segment in window, save timestamp */ + tcp_update_timestamp (tc0, vnet_buffer (b0)->tcp.seq_number, + vnet_buffer (b0)->tcp.seq_end); return 0; } @@ -835,7 +858,8 @@ tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b, TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc); tcp_cc_rcv_dupack (tc, vnet_buffer (b)->tcp.ack_number); } - return -1; + /* Don't drop yet */ + return 0; } if (tcp_opts_sack_permitted (&tc->opt)) @@ -932,10 +956,6 @@ tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end) { vec_add1 (new_list, tc->snd_sacks[i]); } - else - { - clib_warning ("dropped sack blocks"); - } } ASSERT (vec_len (new_list) <= TCP_MAX_SACK_BLOCKS); @@ -1011,7 +1031,6 @@ tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b, u16 data_len) { stream_session_t *s0; - u32 offset; int rv; /* Pure ACK. Do nothing */ @@ -1021,12 +1040,11 @@ tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b, } s0 = stream_session_get (tc->c_s_index, tc->c_thread_index); - offset = vnet_buffer (b)->tcp.seq_number - tc->irs; - clib_warning ("ooo: offset %d len %d", offset, data_len); - - rv = svm_fifo_enqueue_with_offset (s0->server_rx_fifo, offset, data_len, - vlib_buffer_get_current (b)); + /* Enqueue out-of-order data with absolute offset */ + rv = svm_fifo_enqueue_with_offset (s0->server_rx_fifo, + vnet_buffer (b)->tcp.seq_number, + data_len, vlib_buffer_get_current (b)); /* Nothing written */ if (rv) @@ -1542,6 +1560,9 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* Notify app that we have connection */ stream_session_connect_notify (&new_tc0->connection, sst, 0); + stream_session_init_fifos_pointers (&new_tc0->connection, + new_tc0->irs + 1, + new_tc0->iss + 1); /* Make sure after data segment processing ACK is sent */ new_tc0->flags |= TCP_CONN_SNDACK; } @@ -1552,7 +1573,9 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* Notify app that we have connection */ stream_session_connect_notify (&new_tc0->connection, sst, 0); - + stream_session_init_fifos_pointers (&new_tc0->connection, + new_tc0->irs + 1, + new_tc0->iss + 1); tcp_make_synack (new_tc0, b0); next0 = tcp_next_output (is_ip4); @@ -2139,6 +2162,10 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, tcp_make_synack (child0, b0); next0 = tcp_next_output (is_ip4); + /* Init fifo pointers after we have iss */ + stream_session_init_fifos_pointers (&child0->connection, + child0->irs + 1, + child0->iss + 1); drop: if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { @@ -2474,6 +2501,7 @@ do { \ _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE); /* ACK for for a SYN-ACK -> tcp-rcv-process. */ _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + _(SYN_RCVD, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); /* SYN-ACK for a SYN */ _(SYN_SENT, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE); @@ -2499,6 +2527,7 @@ do { \ _(FIN_WAIT_2, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED); #undef _ } diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index 4e1a7aa5..a85d30da 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -359,7 +359,8 @@ tcp_make_established_options (tcp_connection_t * tc, tcp_options_t * opts) { opts->flags |= TCP_OPTS_FLAG_SACK; opts->sacks = tc->snd_sacks; - opts->n_sack_blocks = vec_len (tc->snd_sacks); + opts->n_sack_blocks = clib_min (vec_len (tc->snd_sacks), + TCP_OPTS_MAX_SACK_BLOCKS); len += 2 + TCP_OPTION_LEN_SACK_BLOCK * opts->n_sack_blocks; } } @@ -917,6 +918,7 @@ tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, vnet_buffer (b)->tcp.connection_index = tc->c_c_index; tc->snd_nxt += data_len; + tc->rcv_las = tc->rcv_nxt; /* TODO this is updated in output as well ... */ if (tc->snd_nxt > tc->snd_una_max) diff --git a/src/vnet/tcp/tcp_test.c b/src/vnet/tcp/tcp_test.c index ed032206..a457ac8f 100644 --- a/src/vnet/tcp/tcp_test.c +++ b/src/vnet/tcp/tcp_test.c @@ -231,7 +231,7 @@ tcp_test_sack_tx (vlib_main_t * vm, unformat_input_t * input) tcp_update_sack_list (tc, 300, 300); if (verbose) vlib_cli_output (vm, "overlap first 2 segments:\n%U", - format_tcp_sacks, tc->snd_sacks); + format_tcp_sacks, tc); TCP_TEST ((vec_len (tc->snd_sacks) == 3), "sack blocks %d expected %d", vec_len (tc->snd_sacks), 3); TCP_TEST ((tc->snd_sacks[0].start == 900), @@ -244,7 +244,7 @@ tcp_test_sack_tx (vlib_main_t * vm, unformat_input_t * input) tcp_update_sack_list (tc, 1100, 1200); if (verbose) vlib_cli_output (vm, "add new segment [1100, 1200]\n%U", - format_tcp_sacks, tc->snd_sacks); + format_tcp_sacks, tc); TCP_TEST ((vec_len (tc->snd_sacks) == 4), "sack blocks %d expected %d", vec_len (tc->snd_sacks), 4); TCP_TEST ((tc->snd_sacks[0].start == 1100), @@ -257,7 +257,7 @@ tcp_test_sack_tx (vlib_main_t * vm, unformat_input_t * input) tcp_update_sack_list (tc, 800, 900); if (verbose) vlib_cli_output (vm, "join middle segments [800, 900]\n%U", - format_tcp_sacks, tc->snd_sacks); + format_tcp_sacks, tc); TCP_TEST ((vec_len (tc->snd_sacks) == 3), "sack blocks %d expected %d", vec_len (tc->snd_sacks), 3); @@ -271,8 +271,7 @@ tcp_test_sack_tx (vlib_main_t * vm, unformat_input_t * input) tc->rcv_nxt = 1200; tcp_update_sack_list (tc, 1200, 1200); if (verbose) - vlib_cli_output (vm, "advance rcv_nxt to 1200\n%U", - format_tcp_sacks, tc->snd_sacks); + vlib_cli_output (vm, "advance rcv_nxt to 1200\n%U", format_tcp_sacks, tc); TCP_TEST ((vec_len (tc->snd_sacks) == 0), "sack blocks %d expected %d", vec_len (tc->snd_sacks), 0); return 0; @@ -502,7 +501,13 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) { offset = (2 * i + 1) * sizeof (u32); data = (u8 *) (test_data + (2 * i + 1)); - rv = svm_fifo_enqueue_with_offset (f, offset, sizeof (u32), data); + if (i == 0) + { + rv = svm_fifo_enqueue_nowait (f, sizeof (u32), data); + rv = rv > 0 ? 0 : rv; + } + else + rv = svm_fifo_enqueue_with_offset (f, offset, sizeof (u32), data); if (verbose) vlib_cli_output (vm, "add [%d] [%d, %d]", 2 * i + 1, offset, offset + sizeof (u32)); @@ -517,6 +522,26 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) vlib_cli_output (vm, "fifo after odd segs: %U", format_svm_fifo, f, 1); TCP_TEST ((f->tail == 8), "fifo tail %u", f->tail); + TCP_TEST ((svm_fifo_number_ooo_segments (f) == 2), + "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); + + /* + * Try adding a completely overlapped segment + */ + offset = 3 * sizeof (u32); + data = (u8 *) (test_data + 3); + rv = svm_fifo_enqueue_with_offset (f, offset, sizeof (u32), data); + if (rv) + { + clib_warning ("enqueue returned %d", rv); + goto err; + } + + if (verbose) + vlib_cli_output (vm, "fifo after overlap seg: %U", format_svm_fifo, f, 1); + + TCP_TEST ((svm_fifo_number_ooo_segments (f) == 2), + "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); /* * Make sure format functions are not buggy @@ -887,7 +912,7 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) f->head = fifo_initial_offset; f->tail = fifo_initial_offset; - for (i = 0; i < vec_len (generate); i++) + for (i = !randomize; i < vec_len (generate); i++) { tp = generate + i; svm_fifo_enqueue_with_offset (f, fifo_initial_offset + tp->offset, @@ -895,6 +920,10 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) (u8 *) data_pattern + tp->offset); } + /* Add the first segment in order for non random data */ + if (!randomize) + svm_fifo_enqueue_nowait (f, generate[0].len, (u8 *) data_pattern); + /* * Expected result: one big fat chunk at offset 1 if randomize == 1 */ @@ -964,6 +993,73 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) return 0; } +static int +tcp_test_fifo4 (vlib_main_t * vm, unformat_input_t * input) +{ + svm_fifo_t *f; + u32 fifo_size = 6 << 10; + u32 fifo_initial_offset = 1000000000; + u32 test_n_bytes = 5000, j; + u8 *test_data = 0, *data_buf = 0; + int i, rv, verbose = 0; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "verbose")) + verbose = 1; + else + { + clib_error_t *e = clib_error_return + (0, "unknown input `%U'", format_unformat_error, input); + clib_error_report (e); + return -1; + } + } + + /* + * Create a fifo and add segments + */ + f = fifo_prepare (fifo_size); + + /* Set head and tail pointers */ + fifo_initial_offset = fifo_initial_offset % fifo_size; + svm_fifo_init_pointers (f, fifo_initial_offset); + + vec_validate (test_data, test_n_bytes - 1); + for (i = 0; i < vec_len (test_data); i++) + test_data[i] = i; + + for (i = test_n_bytes - 1; i > 0; i--) + { + rv = svm_fifo_enqueue_with_offset (f, fifo_initial_offset + i, + sizeof (u8), &test_data[i]); + if (verbose) + vlib_cli_output (vm, "add [%d] [%d, %d]", i, i, i + sizeof (u8)); + if (rv) + { + clib_warning ("enqueue returned %d", rv); + svm_fifo_free (f); + vec_free (test_data); + return -1; + } + } + + svm_fifo_enqueue_nowait (f, sizeof (u8), &test_data[0]); + + vec_validate (data_buf, vec_len (test_data)); + + svm_fifo_dequeue_nowait (f, vec_len (test_data), data_buf); + rv = compare_data (data_buf, test_data, 0, vec_len (test_data), &j); + if (rv) + vlib_cli_output (vm, "[%d] dequeued %u expected %u", j, data_buf[j], + test_data[j]); + TCP_TEST ((rv == 0), "dequeued compared to original returned %d", rv); + + svm_fifo_free (f); + vec_free (test_data); + return 0; +} + static int tcp_test_fifo (vlib_main_t * vm, unformat_input_t * input) { @@ -1028,6 +1124,10 @@ tcp_test_fifo (vlib_main_t * vm, unformat_input_t * input) { res = tcp_test_fifo1 (vm, input); } + else if (unformat (input, "fifo4")) + { + res = tcp_test_fifo4 (vm, input); + } } return res; -- cgit 1.2.3-korg From e72be39cd0f498178fd62dfc0a0b0daa2b633f62 Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Wed, 26 Apr 2017 13:59:20 -0700 Subject: A sprinkling of const in vlibmemory/api.h and friends Change-Id: I953ebb37eeec7de0c4a6b00258c3c67a83cbc020 Signed-off-by: Neale Ranns --- src/svm/svm.c | 6 +++--- src/svm/svm.h | 10 +++++----- src/svm/svmdb.h | 2 +- src/vlibapi/api.h | 11 ++++++----- src/vlibapi/api_shared.c | 4 ++-- src/vlibmemory/api.h | 20 ++++++++++---------- src/vlibmemory/memory_client.c | 16 +++++++++------- src/vlibmemory/memory_shared.c | 4 ++-- src/vlibmemory/memory_vlib.c | 4 ++-- src/vpp/api/api_main.c | 2 +- 10 files changed, 41 insertions(+), 38 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm.c b/src/svm/svm.c index e4ca98e1..97add5a7 100644 --- a/src/svm/svm.c +++ b/src/svm/svm.c @@ -796,7 +796,7 @@ svm_region_init (void) } void -svm_region_init_chroot (char *root_path) +svm_region_init_chroot (const char *root_path) { svm_map_region_args_t _a, *a = &_a; @@ -813,7 +813,7 @@ svm_region_init_chroot (char *root_path) } void -svm_region_init_chroot_uid_gid (char *root_path, int uid, int gid) +svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid) { svm_map_region_args_t _a, *a = &_a; @@ -1151,7 +1151,7 @@ svm_client_scan_this_region_nolock (svm_region_t * rp) * Scan svm regions for dead clients */ void -svm_client_scan (char *root_path) +svm_client_scan (const char *root_path) { int i, j; svm_main_region_t *mp; diff --git a/src/svm/svm.h b/src/svm/svm.h index 0b87dbcb..06797fa1 100644 --- a/src/svm/svm.h +++ b/src/svm/svm.h @@ -69,8 +69,8 @@ typedef struct svm_region_ typedef struct svm_map_region_args_ { - char *root_path; /* NULL means use the truly global arena */ - char *name; + const char *root_path; /* NULL means use the truly global arena */ + const char *name; u64 baseva; u64 size; u64 pvt_heap_size; @@ -115,12 +115,12 @@ typedef struct void *svm_region_find_or_create (svm_map_region_args_t * a); void svm_region_init (void); -void svm_region_init_chroot (char *root_path); -void svm_region_init_chroot_uid_gid (char *root_path, int uid, int gid); +void svm_region_init_chroot (const char *root_path); +void svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid); void svm_region_init_args (svm_map_region_args_t * a); void svm_region_exit (void); void svm_region_unmap (void *rp_arg); -void svm_client_scan (char *root_path); +void svm_client_scan (const char *root_path); void svm_client_scan_this_region_nolock (svm_region_t * rp); u8 *shm_name_from_svm_map_region_args (svm_map_region_args_t * a); diff --git a/src/svm/svmdb.h b/src/svm/svmdb.h index e02628a0..e35be8aa 100644 --- a/src/svm/svmdb.h +++ b/src/svm/svmdb.h @@ -83,7 +83,7 @@ typedef struct typedef struct { - char *root_path; + const char *root_path; uword size; u32 uid; u32 gid; diff --git a/src/vlibapi/api.h b/src/vlibapi/api.h index a62fa644..7538050e 100644 --- a/src/vlibapi/api.h +++ b/src/vlibapi/api.h @@ -127,7 +127,7 @@ typedef struct void (**msg_cleanup_handlers) (void *); void (**msg_endian_handlers) (void *); void (**msg_print_handlers) (void *, void *); - char **msg_names; + const char **msg_names; u8 *message_bounce; u8 *is_mp_safe; struct ring_alloc_ *arings; @@ -195,8 +195,8 @@ typedef struct /* client side message index hash table */ uword *msg_index_by_name_and_crc; - char *region_name; - char *root_path; + const char *region_name; + const char *root_path; /* Replay in progress? */ int replay_in_progress; @@ -276,8 +276,9 @@ void vl_msg_api_register_pd_handler (void *handler, int vl_msg_api_pd_handler (void *mp, int rv); void vl_msg_api_set_first_available_msg_id (u16 first_avail); -u16 vl_msg_api_get_msg_ids (char *name, int n); -void vl_msg_api_add_msg_name_crc (api_main_t * am, char *string, u32 id); +u16 vl_msg_api_get_msg_ids (const char *name, int n); +void vl_msg_api_add_msg_name_crc (api_main_t * am, const char *string, + u32 id); u32 vl_api_get_msg_index (u8 * name_and_crc); /* node_serialize.c prototypes */ diff --git a/src/vlibapi/api_shared.c b/src/vlibapi/api_shared.c index 6774e3dd..0817f38e 100644 --- a/src/vlibapi/api_shared.c +++ b/src/vlibapi/api_shared.c @@ -828,7 +828,7 @@ vl_msg_api_set_first_available_msg_id (u16 first_avail) } u16 -vl_msg_api_get_msg_ids (char *name, int n) +vl_msg_api_get_msg_ids (const char *name, int n) { api_main_t *am = &api_main; u8 *name_copy; @@ -872,7 +872,7 @@ vl_msg_api_get_msg_ids (char *name, int n) } void -vl_msg_api_add_msg_name_crc (api_main_t * am, char *string, u32 id) +vl_msg_api_add_msg_name_crc (api_main_t * am, const char *string, u32 id) { uword *p; diff --git a/src/vlibmemory/api.h b/src/vlibmemory/api.h index 8e44c20d..c195e181 100644 --- a/src/vlibmemory/api.h +++ b/src/vlibmemory/api.h @@ -123,20 +123,20 @@ void *vl_msg_api_alloc_or_null (int nbytes); void *vl_msg_api_alloc_as_if_client (int nbytes); void *vl_msg_api_alloc_as_if_client_or_null (int nbytes); void vl_msg_api_free (void *a); -int vl_map_shmem (char *region_name, int is_vlib); +int vl_map_shmem (const char *region_name, int is_vlib); void vl_register_mapped_shmem_region (svm_region_t * rp); void vl_unmap_shmem (void); void vl_msg_api_send_shmem (unix_shared_memory_queue_t * q, u8 * elem); void vl_msg_api_send_shmem_nolock (unix_shared_memory_queue_t * q, u8 * elem); void vl_msg_api_send (vl_api_registration_t * rp, u8 * elem); -int vl_client_connect (char *name, int ctx_quota, int input_queue_size); +int vl_client_connect (const char *name, int ctx_quota, int input_queue_size); void vl_client_disconnect (void); unix_shared_memory_queue_t *vl_api_client_index_to_input_queue (u32 index); vl_api_registration_t *vl_api_client_index_to_registration (u32 index); -int vl_client_api_map (char *region_name); +int vl_client_api_map (const char *region_name); void vl_client_api_unmap (void); -void vl_set_memory_region_name (char *name); -void vl_set_memory_root_path (char *root_path); +void vl_set_memory_region_name (const char *name); +void vl_set_memory_root_path (const char *root_path); void vl_set_memory_uid (int uid); void vl_set_memory_gid (int gid); void vl_set_global_memory_baseva (u64 baseva); @@ -146,12 +146,12 @@ void vl_set_global_pvt_heap_size (u64 size); void vl_set_api_pvt_heap_size (u64 size); void vl_enable_disable_memory_api (vlib_main_t * vm, int yesno); void vl_client_disconnect_from_vlib (void); -int vl_client_connect_to_vlib (char *svm_name, char *client_name, - int rx_queue_size); -int vl_client_connect_to_vlib_no_rx_pthread (char *svm_name, - char *client_name, +int vl_client_connect_to_vlib (const char *svm_name, + const char *client_name, int rx_queue_size); +int vl_client_connect_to_vlib_no_rx_pthread (const char *svm_name, + const char *client_name, int rx_queue_size); -u16 vl_client_get_first_plugin_msg_id (char *plugin_name); +u16 vl_client_get_first_plugin_msg_id (const char *plugin_name); void vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length); diff --git a/src/vlibmemory/memory_client.c b/src/vlibmemory/memory_client.c index 25b06f65..d48a4fa1 100644 --- a/src/vlibmemory/memory_client.c +++ b/src/vlibmemory/memory_client.c @@ -155,7 +155,7 @@ noop_handler (void *notused) } int -vl_client_connect (char *name, int ctx_quota, int input_queue_size) +vl_client_connect (const char *name, int ctx_quota, int input_queue_size) { svm_region_t *svm; vl_api_memclnt_create_t *mp; @@ -326,7 +326,7 @@ _(MEMCLNT_DELETE_REPLY, memclnt_delete_reply) int -vl_client_api_map (char *region_name) +vl_client_api_map (const char *region_name) { int rv; @@ -354,7 +354,8 @@ vl_client_api_unmap (void) } static int -connect_to_vlib_internal (char *svm_name, char *client_name, +connect_to_vlib_internal (const char *svm_name, + const char *client_name, int rx_queue_size, int want_pthread) { int rv = 0; @@ -388,15 +389,16 @@ connect_to_vlib_internal (char *svm_name, char *client_name, } int -vl_client_connect_to_vlib (char *svm_name, char *client_name, - int rx_queue_size) +vl_client_connect_to_vlib (const char *svm_name, + const char *client_name, int rx_queue_size) { return connect_to_vlib_internal (svm_name, client_name, rx_queue_size, 1 /* want pthread */ ); } int -vl_client_connect_to_vlib_no_rx_pthread (char *svm_name, char *client_name, +vl_client_connect_to_vlib_no_rx_pthread (const char *svm_name, + const char *client_name, int rx_queue_size) { return connect_to_vlib_internal (svm_name, client_name, rx_queue_size, @@ -437,7 +439,7 @@ static void vl_api_get_first_msg_id_reply_t_handler } u16 -vl_client_get_first_plugin_msg_id (char *plugin_name) +vl_client_get_first_plugin_msg_id (const char *plugin_name) { vl_api_get_first_msg_id_t *mp; api_main_t *am = &api_main; diff --git a/src/vlibmemory/memory_shared.c b/src/vlibmemory/memory_shared.c index 6cea5df9..aea90330 100644 --- a/src/vlibmemory/memory_shared.c +++ b/src/vlibmemory/memory_shared.c @@ -257,7 +257,7 @@ vl_msg_api_free_nolock (void *a) } void -vl_set_memory_root_path (char *name) +vl_set_memory_root_path (const char *name) { api_main_t *am = &api_main; @@ -321,7 +321,7 @@ vl_set_api_pvt_heap_size (u64 size) } int -vl_map_shmem (char *region_name, int is_vlib) +vl_map_shmem (const char *region_name, int is_vlib) { svm_map_region_args_t _a, *a = &_a; svm_region_t *vlib_rp, *root_rp; diff --git a/src/vlibmemory/memory_vlib.c b/src/vlibmemory/memory_vlib.c index 43574dea..29a5c2c2 100644 --- a/src/vlibmemory/memory_vlib.c +++ b/src/vlibmemory/memory_vlib.c @@ -361,7 +361,7 @@ _(GET_FIRST_MSG_ID, get_first_msg_id) * vl_api_init */ static int -memory_api_init (char *region_name) +memory_api_init (const char *region_name) { int rv; vl_msg_api_msg_config_t cfg; @@ -1202,7 +1202,7 @@ vlibmemory_init (vlib_main_t * vm) VLIB_INIT_FUNCTION (vlibmemory_init); void -vl_set_memory_region_name (char *name) +vl_set_memory_region_name (const char *name) { api_main_t *am = &api_main; diff --git a/src/vpp/api/api_main.c b/src/vpp/api/api_main.c index d48e4eff..ac09cd15 100644 --- a/src/vpp/api/api_main.c +++ b/src/vpp/api/api_main.c @@ -211,7 +211,7 @@ api_cli_output (void *notused, const char *fmt, ...) } u16 -vl_client_get_first_plugin_msg_id (char *plugin_name) +vl_client_get_first_plugin_msg_id (const char *plugin_name) { api_main_t *am = &api_main; vl_api_msg_range_t *rp; -- cgit 1.2.3-korg From 7bee80c823ca77de3aca803fdede77e4c7385a52 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Wed, 26 Apr 2017 15:32:12 +0200 Subject: Fix remaining 32-bit compile issues Change-Id: I9664214652229b663c3e3ba7406b4ede96bfb123 Signed-off-by: Damjan Marion --- Makefile | 8 ++++---- build-data/platforms/vpp.mk | 5 +++++ src/plugins/dpdk/buffer.c | 6 +++--- src/plugins/ixge/ixge.c | 5 +++-- src/svm/svm.c | 11 ++++++----- src/svm/svmtool.c | 4 ++-- src/tools/vppapigen/gram.y | 6 +++--- src/tools/vppapigen/node.c | 4 ++-- src/uri/uri_tcp_test.c | 21 ++++++++++++--------- src/uri/uri_udp_test.c | 15 +++++++++------ src/vat/api_format.c | 10 +++++----- src/vlib/threads.c | 2 +- src/vlibmemory/memory_client.c | 2 +- src/vlibmemory/memory_vlib.c | 3 ++- src/vnet/devices/virtio/vhost-user.c | 2 +- src/vnet/session/application_interface.c | 2 +- src/vnet/session/session_api.c | 14 +++++++------- src/vnet/tcp/builtin_client.c | 19 +++++++++++-------- src/vnet/tcp/builtin_server.c | 2 +- src/vppinfra/mheap.c | 2 +- 20 files changed, 80 insertions(+), 63 deletions(-) (limited to 'src/svm') diff --git a/Makefile b/Makefile index 8240e789..b344f377 100644 --- a/Makefile +++ b/Makefile @@ -263,9 +263,9 @@ define test TEST_DIR=$(WS_ROOT)/test \ VPP_TEST_BUILD_DIR=$(BR)/build-$(2)-native \ VPP_TEST_BIN=$(BR)/install-$(2)-native/vpp/bin/vpp \ - VPP_TEST_PLUGIN_PATH=$(BR)/install-$(2)-native/vpp/lib64/vpp_plugins \ + VPP_TEST_PLUGIN_PATH=$(wildcard $(BR)/install-$(2)-native/vpp/lib*/vpp_plugins) \ VPP_TEST_INSTALL_PATH=$(BR)/install-$(2)-native/ \ - LD_LIBRARY_PATH=$(BR)/install-$(2)-native/vpp/lib64/ \ + LD_LIBRARY_PATH=$(subst $(subst ,, ),:,$(wildcard $(BR)/install-$(2)-native/vpp/lib*/)) \ EXTENDED_TESTS=$(EXTENDED_TESTS) \ PYTHON=$(PYTHON) \ $(3) @@ -325,12 +325,12 @@ define run @echo "WARNING: STARTUP_CONF not defined or file doesn't exist." @echo " Running with minimal startup config: $(MINIMAL_STARTUP_CONF)\n" @cd $(STARTUP_DIR) && \ - sudo $(2) $(1)/vpp/bin/vpp $(MINIMAL_STARTUP_CONF) plugin_path $(1)/vpp/lib64/vpp_plugins + sudo $(2) $(1)/vpp/bin/vpp $(MINIMAL_STARTUP_CONF) plugin_path $(wildcard $(1)/vpp/lib*/vpp_plugins) endef else define run @cd $(STARTUP_DIR) && \ - sudo $(2) $(1)/vpp/bin/vpp $(shell cat $(STARTUP_CONF) | sed -e 's/#.*//') plugin_path $(1)/vpp/lib64/vpp_plugins + sudo $(2) $(1)/vpp/bin/vpp $(shell cat $(STARTUP_CONF) | sed -e 's/#.*//') plugin_path $(wildcard $(1)/vpp/lib*/vpp_plugins) endef endif diff --git a/build-data/platforms/vpp.mk b/build-data/platforms/vpp.mk index 5aafdd76..4577fa2e 100644 --- a/build-data/platforms/vpp.mk +++ b/build-data/platforms/vpp.mk @@ -46,6 +46,11 @@ vpp_root_packages = vpp gmod # vpp_dpdk_lib_dir = /usr/lib # vpp_dpdk_shared_lib = yes +# load balancer plugin is not portable on 32 bit platform +ifeq ($(MACHINE),i686) +vpp_configure_args_vpp = --disable-lb-plugin +endif + vpp_debug_TAG_CFLAGS = -g -O0 -DCLIB_DEBUG -DFORTIFY_SOURCE=2 -march=$(MARCH) \ -fstack-protector-all -fPIC -Werror vpp_debug_TAG_LDFLAGS = -g -O0 -DCLIB_DEBUG -DFORTIFY_SOURCE=2 -march=$(MARCH) \ diff --git a/src/plugins/dpdk/buffer.c b/src/plugins/dpdk/buffer.c index c80b3fa8..2d4762ab 100644 --- a/src/plugins/dpdk/buffer.c +++ b/src/plugins/dpdk/buffer.c @@ -455,8 +455,8 @@ vlib_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs, uword save_vpm_start, save_vpm_end, save_vpm_size; struct rte_mempool_memhdr *memhdr; - this_pool_start = ~0ULL; - this_pool_end = 0LL; + this_pool_start = ~0; + this_pool_end = 0; STAILQ_FOREACH (memhdr, &rmp->mem_list, next) { @@ -465,7 +465,7 @@ vlib_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs, if (((uword) memhdr->addr) < this_pool_start) this_pool_start = (uword) (memhdr->addr); } - ASSERT (this_pool_start < ~0ULL && this_pool_end > 0); + ASSERT (this_pool_start < ~0 && this_pool_end > 0); this_pool_size = this_pool_end - this_pool_start; if (CLIB_DEBUG > 1) diff --git a/src/plugins/ixge/ixge.c b/src/plugins/ixge/ixge.c index 08f5b692..0d287250 100644 --- a/src/plugins/ixge/ixge.c +++ b/src/plugins/ixge/ixge.c @@ -20,7 +20,7 @@ * Please use supported DPDK driver instead. */ -#if __x86_64__ +#if __x86_64__ || __i386__ #include #ifndef CLIB_HAVE_VEC128 @@ -2929,7 +2929,6 @@ ixge_set_next_node (ixge_rx_next_t next, char *name) break; } } -#endif /* *INDENT-OFF* */ VLIB_PLUGIN_REGISTER () = { @@ -2937,8 +2936,10 @@ VLIB_PLUGIN_REGISTER () = { .default_disabled = 1, .description = "Intel 82599 Family Native Driver (experimental)", }; +#endif /* *INDENT-ON* */ + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/svm/svm.c b/src/svm/svm.c index 97add5a7..c96135cf 100644 --- a/src/svm/svm.c +++ b/src/svm/svm.c @@ -491,7 +491,7 @@ svm_map_region (svm_map_region_args_t * a) return (0); } - rp = mmap ((void *) a->baseva, a->size, + rp = mmap (uword_to_pointer (a->baseva, void *), a->size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0); if (rp == (svm_region_t *) MAP_FAILED) @@ -533,9 +533,10 @@ svm_map_region (svm_map_region_args_t * a) rp->virtual_size = a->size; rp->region_heap = - mheap_alloc_with_flags ((void *) (a->baseva + MMAP_PAGESIZE), - (a->pvt_heap_size != 0) ? - a->pvt_heap_size : SVM_PVT_MHEAP_SIZE, + mheap_alloc_with_flags (uword_to_pointer + (a->baseva + MMAP_PAGESIZE, void *), + (a->pvt_heap_size != + 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE, MHEAP_FLAG_DISABLE_VM); oldheap = svm_push_pvt_heap (rp); @@ -661,7 +662,7 @@ svm_map_region (svm_map_region_args_t * a) a->size = rp->virtual_size; munmap (rp, MMAP_PAGESIZE); - rp = (void *) mmap ((void *) a->baseva, a->size, + rp = (void *) mmap (uword_to_pointer (a->baseva, void *), a->size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0); if ((uword) rp == (uword) MAP_FAILED) diff --git a/src/svm/svmtool.c b/src/svm/svmtool.c index b3195514..01ae4221 100644 --- a/src/svm/svmtool.c +++ b/src/svm/svmtool.c @@ -172,7 +172,7 @@ svm_map_region_nolock (svm_map_region_args_t * a) a->size = rp->virtual_size; munmap (rp, MMAP_PAGESIZE); - rp = (void *) mmap ((void *) a->baseva, a->size, + rp = (void *) mmap (uword_to_pointer (a->baseva, void *), a->size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0); if ((uword) rp == (uword) MAP_FAILED) @@ -401,7 +401,7 @@ repair (char *chroot_path, int crash_root_region) a->size = root_rp->virtual_size; munmap (root_rp, MMAP_PAGESIZE); - root_rp = (void *) mmap ((void *) a->baseva, a->size, + root_rp = (void *) mmap (uword_to_pointer (a->baseva, void *), a->size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0); if ((uword) root_rp == (uword) MAP_FAILED) diff --git a/src/tools/vppapigen/gram.y b/src/tools/vppapigen/gram.y index 9cea6023..52bb65c5 100644 --- a/src/tools/vppapigen/gram.y +++ b/src/tools/vppapigen/gram.y @@ -53,9 +53,9 @@ stmt: flist defn {$$ = set_flags($1, $2);} | defn {$$ = $1;} ; -flist: flist flag {$$ = (YYSTYPE)(unsigned long long) - ((unsigned long long) $1 - | (unsigned long long) $2);} +flist: flist flag {$$ = (YYSTYPE)(unsigned long) + ((unsigned long) $1 + | (unsigned long) $2);} | flag {$$ = $1;} ; diff --git a/src/tools/vppapigen/node.c b/src/tools/vppapigen/node.c index 9f234037..15868ee5 100644 --- a/src/tools/vppapigen/node.c +++ b/src/tools/vppapigen/node.c @@ -397,7 +397,7 @@ void node_define_generate (node_t *this, enum passid which, FILE *fp) fprintf(fp, ",\n"); } indent_me(fp); - fprintf (fp, "{\"crc\" : \"0x%08x\"}\n", (u32)(u64)CDATA3); + fprintf (fp, "{\"crc\" : \"0x%08x\"}\n", (u32)(uword)CDATA3); indent -= 4; indent_me(fp); fprintf(fp, "]"); @@ -1219,7 +1219,7 @@ void generate_msg_name_crc_list (YYSTYPE a1, FILE *fp) if (!(np->flags & NODE_FLAG_TYPEONLY)) { fprintf (fp, "\\\n_(VL_API_%s, %s, %08x) ", uppercase (np->data[0]), (i8 *) np->data[0], - (u32)(u64)np->data[3]); + (u32)(uword)np->data[3]); } } np = np->peer; diff --git a/src/uri/uri_tcp_test.c b/src/uri/uri_tcp_test.c index b15fd6ce..22f246e5 100755 --- a/src/uri/uri_tcp_test.c +++ b/src/uri/uri_tcp_test.c @@ -262,7 +262,8 @@ vl_api_application_attach_reply_t_handler (vl_api_application_attach_reply_t * } utm->our_event_queue = - (unix_shared_memory_queue_t *) mp->app_event_queue_address; + uword_to_pointer (mp->app_event_queue_address, + unix_shared_memory_queue_t *); utm->state = STATE_ATTACHED; } @@ -524,8 +525,9 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) return; } - utm->vpp_event_queue = (unix_shared_memory_queue_t *) - mp->vpp_event_queue_address; + utm->vpp_event_queue = + uword_to_pointer (mp->vpp_event_queue_address, + unix_shared_memory_queue_t *); /* * Setup session @@ -534,9 +536,9 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) pool_get (utm->sessions, session); session_index = session - utm->sessions; - rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; + rx_fifo = uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *); rx_fifo->client_session_index = session_index; - tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; + tx_fifo = uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *); tx_fifo->client_session_index = session_index; session->server_rx_fifo = rx_fifo; @@ -858,16 +860,17 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) ip_str = format (0, "%U", format_ip46_address, &mp->ip, mp->is_ip4); clib_warning ("Accepted session from: %s:%d", ip_str, clib_net_to_host_u16 (mp->port)); - utm->vpp_event_queue = (unix_shared_memory_queue_t *) - mp->vpp_event_queue_address; + utm->vpp_event_queue = + uword_to_pointer (mp->vpp_event_queue_address, + unix_shared_memory_queue_t *); /* Allocate local session and set it up */ pool_get (utm->sessions, session); session_index = session - utm->sessions; - rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; + rx_fifo = uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *); rx_fifo->client_session_index = session_index; - tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; + tx_fifo = uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *); tx_fifo->client_session_index = session_index; session->server_rx_fifo = rx_fifo; diff --git a/src/uri/uri_udp_test.c b/src/uri/uri_udp_test.c index 266215c8..8fb12ed2 100644 --- a/src/uri/uri_udp_test.c +++ b/src/uri/uri_udp_test.c @@ -232,7 +232,8 @@ vl_api_application_attach_reply_t_handler (vl_api_application_attach_reply_t * } utm->our_event_queue = - (unix_shared_memory_queue_t *) mp->app_event_queue_address; + uword_to_pointer (mp->app_event_queue_address, + unix_shared_memory_queue_t *); } static void @@ -581,7 +582,8 @@ send_reply: vec_free (a->segment_name); - client_q = (unix_shared_memory_queue_t *) mp->client_queue_address; + client_q = + uword_to_pointer (mp->client_queue_address, unix_shared_memory_queue_t *); vl_msg_api_send_shmem (client_q, (u8 *) & rmp); } @@ -608,14 +610,15 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) if (start_time == 0.0) start_time = clib_time_now (&utm->clib_time); - utm->vpp_event_queue = (unix_shared_memory_queue_t *) - mp->vpp_event_queue_address; + utm->vpp_event_queue = + uword_to_pointer (mp->vpp_event_queue_address, + unix_shared_memory_queue_t *); pool_get (utm->sessions, session); - rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; + rx_fifo = uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *); rx_fifo->client_session_index = session - utm->sessions; - tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; + tx_fifo = uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *); tx_fifo->client_session_index = session - utm->sessions; session->server_rx_fifo = rx_fifo; diff --git a/src/vat/api_format.c b/src/vat/api_format.c index 28b227b4..495b660e 100644 --- a/src/vat/api_format.c +++ b/src/vat/api_format.c @@ -1037,7 +1037,7 @@ vl_api_cli_reply_t_handler (vl_api_cli_reply_t * mp) i32 retval = ntohl (mp->retval); vam->retval = retval; - vam->shmem_result = (u8 *) mp->reply_in_shmem; + vam->shmem_result = uword_to_pointer (mp->reply_in_shmem, u8 *); vam->result_ready = 1; } @@ -1058,7 +1058,7 @@ vl_api_cli_reply_t_handler_json (vl_api_cli_reply_t * mp) pthread_mutex_lock (&am->vlib_rp->mutex); oldheap = svm_push_data_heap (am->vlib_rp); - reply = (u8 *) (mp->reply_in_shmem); + reply = uword_to_pointer (mp->reply_in_shmem, u8 *); vec_free (reply); svm_pop_heap (oldheap); @@ -2405,7 +2405,7 @@ static void vl_api_get_node_graph_reply_t_handler if (retval != 0) return; - reply = (u8 *) (mp->reply_in_shmem); + reply = uword_to_pointer (mp->reply_in_shmem, u8 *); pvt_copy = vec_dup (reply); /* Toss the shared-memory original... */ @@ -2456,7 +2456,7 @@ static void vl_api_get_node_graph_reply_t_handler_json vat_json_object_add_int (&node, "retval", ntohl (mp->retval)); vat_json_object_add_uint (&node, "reply_in_shmem", mp->reply_in_shmem); - reply = (u8 *) (mp->reply_in_shmem); + reply = uword_to_pointer (mp->reply_in_shmem, u8 *); /* Toss the shared-memory original... */ pthread_mutex_lock (&am->vlib_rp->mutex); @@ -4959,7 +4959,7 @@ exec (vat_main_t * vam) svm_pop_heap (oldheap); pthread_mutex_unlock (&am->vlib_rp->mutex); - mp->cmd_in_shmem = (u64) cmd; + mp->cmd_in_shmem = pointer_to_uword (cmd); S (mp); timeout = vat_time_now (vam) + 10.0; diff --git a/src/vlib/threads.c b/src/vlib/threads.c index 4a111f8d..9ccfd3a2 100644 --- a/src/vlib/threads.c +++ b/src/vlib/threads.c @@ -1125,7 +1125,7 @@ cpu_config (vlib_main_t * vm, unformat_input_t * input) VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu"); -#if !defined (__x86_64__) && !defined (__aarch64__) && !defined (__powerpc64__) && !defined(__arm__) +#if !defined (__x86_64__) && !defined (__i386__) && !defined (__aarch64__) && !defined (__powerpc64__) && !defined(__arm__) void __sync_fetch_and_add_8 (void) { diff --git a/src/vlibmemory/memory_client.c b/src/vlibmemory/memory_client.c index d48a4fa1..a162d6bb 100644 --- a/src/vlibmemory/memory_client.c +++ b/src/vlibmemory/memory_client.c @@ -137,7 +137,7 @@ vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) am->msg_index_by_name_and_crc = hash_create_string (0, sizeof (uword)); /* Recreate the vnet-side API message handler table */ - tblv = (u8 *) mp->message_table; + tblv = uword_to_pointer (mp->message_table, u8 *); serialize_open_vector (sm, tblv); unserialize_integer (sm, &nmsgs, sizeof (u32)); diff --git a/src/vlibmemory/memory_vlib.c b/src/vlibmemory/memory_vlib.c index 29a5c2c2..acba8b3f 100644 --- a/src/vlibmemory/memory_vlib.c +++ b/src/vlibmemory/memory_vlib.c @@ -216,7 +216,8 @@ vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp) am->shmem_hdr->application_restarts); rp->context = mp->context; rp->response = ntohl (rv); - rp->message_table = (u64) am->serialized_message_table_in_shmem; + rp->message_table = + pointer_to_uword (am->serialized_message_table_in_shmem); vl_msg_api_send_shmem (q, (u8 *) & rp); } diff --git a/src/vnet/devices/virtio/vhost-user.c b/src/vnet/devices/virtio/vhost-user.c index acc7bf82..6ccc0d87 100644 --- a/src/vnet/devices/virtio/vhost-user.c +++ b/src/vnet/devices/virtio/vhost-user.c @@ -719,7 +719,7 @@ vhost_user_log_dirty_pages_2 (vhost_user_intf_t * vui, } if (is_host_address) { - addr = (u64) map_user_mem (vui, (uword) addr); + addr = pointer_to_uword (map_user_mem (vui, (uword) addr)); } if (PREDICT_FALSE ((addr + len - 1) / VHOST_LOG_PAGE / 8 >= vui->log_size)) { diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c index ad44baa1..f74b0cfe 100644 --- a/src/vnet/session/application_interface.c +++ b/src/vnet/session/application_interface.c @@ -247,7 +247,7 @@ vnet_application_attach (vnet_app_attach_args_t * a) a->session_cb_vft))) return rv; - a->app_event_queue_address = (u64) app->event_queue; + a->app_event_queue_address = pointer_to_uword (app->event_queue); sm = segment_manager_get (app->first_segment_manager); segment_manager_get_segment_info (sm->segment_indices[0], &seg_name, &a->segment_size); diff --git a/src/vnet/session/session_api.c b/src/vnet/session/session_api.c index 5a02a08e..8266922c 100755 --- a/src/vnet/session/session_api.c +++ b/src/vnet/session/session_api.c @@ -102,9 +102,9 @@ send_session_accept_callback (stream_session_t * s) tc = tp_vft->get_connection (s->connection_index, s->thread_index); mp->listener_handle = listen_session_get_handle (listener); mp->handle = stream_session_handle (s); - mp->server_rx_fifo = (u64) s->server_rx_fifo; - mp->server_tx_fifo = (u64) s->server_tx_fifo; - mp->vpp_event_queue_address = (u64) vpp_queue; + mp->server_rx_fifo = pointer_to_uword (s->server_rx_fifo); + mp->server_tx_fifo = pointer_to_uword (s->server_tx_fifo); + mp->vpp_event_queue_address = pointer_to_uword (vpp_queue); mp->port = tc->rmt_port; mp->is_ip4 = tc->is_ip4; clib_memcpy (&mp->ip, &tc->rmt_ip, sizeof (tc->rmt_ip)); @@ -172,10 +172,10 @@ send_session_connected_callback (u32 app_index, u32 api_context, if (!is_fail) { vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); - mp->server_rx_fifo = (u64) s->server_rx_fifo; - mp->server_tx_fifo = (u64) s->server_tx_fifo; + mp->server_rx_fifo = pointer_to_uword (s->server_rx_fifo); + mp->server_tx_fifo = pointer_to_uword (s->server_tx_fifo); mp->handle = stream_session_handle (s); - mp->vpp_event_queue_address = (u64) vpp_queue; + mp->vpp_event_queue_address = pointer_to_uword (vpp_queue); mp->retval = 0; } else @@ -225,7 +225,7 @@ redirect_connect_callback (u32 server_api_client_index, void *mp_arg) } /* Tell the server the client's API queue address, so it can reply */ - mp->client_queue_address = (u64) client_q; + mp->client_queue_address = pointer_to_uword (client_q); app = application_lookup (mp->client_index); if (!app) { diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index 32d69a96..6f890874 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -274,11 +274,12 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) return; } - tm->our_event_queue = (unix_shared_memory_queue_t *) - mp->vpp_event_queue_address; - - tm->vpp_event_queue = (unix_shared_memory_queue_t *) - mp->vpp_event_queue_address; + tm->our_event_queue = + uword_to_pointer (mp->vpp_event_queue_address, + unix_shared_memory_queue_t *); + tm->vpp_event_queue = + uword_to_pointer (mp->vpp_event_queue_address, + unix_shared_memory_queue_t *); /* * Setup session @@ -288,9 +289,11 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) session_index = session - tm->sessions; session->bytes_to_receive = session->bytes_to_send = tm->bytes_to_send; - session->server_rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; + session->server_rx_fifo = + uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *); session->server_rx_fifo->client_session_index = session_index; - session->server_tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; + session->server_tx_fifo = + uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *); session->server_tx_fifo->client_session_index = session_index; session->vpp_session_handle = mp->handle; @@ -321,7 +324,7 @@ create_api_loopback (tclient_main_t * tm) memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = VL_API_MEMCLNT_CREATE; mp->context = 0xFEEDFACE; - mp->input_queue = (u64) tm->vl_input_queue; + mp->input_queue = pointer_to_uword (tm->vl_input_queue); strncpy ((char *) mp->name, "tcp_tester", sizeof (mp->name) - 1); vl_api_memclnt_create_t_handler (mp); diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index 34682699..621ce02a 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -244,7 +244,7 @@ create_api_loopback (vlib_main_t * vm) memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = VL_API_MEMCLNT_CREATE; mp->context = 0xFEEDFACE; - mp->input_queue = (u64) bsm->vl_input_queue; + mp->input_queue = pointer_to_uword (bsm->vl_input_queue); strncpy ((char *) mp->name, "tcp_test_server", sizeof (mp->name) - 1); vl_api_memclnt_create_t_handler (mp); diff --git a/src/vppinfra/mheap.c b/src/vppinfra/mheap.c index b8828f9e..192732db 100644 --- a/src/vppinfra/mheap.c +++ b/src/vppinfra/mheap.c @@ -304,7 +304,7 @@ mheap_small_object_cache_mask (mheap_small_object_cache_t * c, uword bin) uword mask; /* $$$$ ELIOT FIXME: add Altivec version of this routine */ -#if !defined (CLIB_HAVE_VEC128) || defined (__ALTIVEC__) +#if !defined (CLIB_HAVE_VEC128) || defined (__ALTIVEC__) || defined (__i386__) mask = 0; #else u8x16 b = u8x16_splat (bin); -- cgit 1.2.3-korg From acd2a6a32cebab70a054e9335e446798538d0524 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Tue, 16 May 2017 17:41:34 -0400 Subject: VPP-846: tcp perf / scale / hardening Fix builtin server event queue handling Change-Id: I21b49c37188746cadb2fd9423291c5dc1335798c Signed-off-by: Dave Barach --- src/svm/svm_fifo.h | 4 +- src/vnet/session/node.c | 108 ++++++++++++++++++++++++++++++------------ src/vnet/session/session.c | 16 ++++--- src/vnet/session/session.h | 4 +- src/vnet/tcp/builtin_client.c | 2 +- src/vnet/tcp/builtin_server.c | 12 ++--- src/vnet/unix/gdb_funcs.c | 12 ++++- 7 files changed, 108 insertions(+), 50 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index 36158dc5..69369163 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -44,7 +44,7 @@ typedef struct u32 nitems; CLIB_CACHE_LINE_ALIGN_MARK (end_cursize); - volatile u8 has_event; /**< non-zero if deq event exists */ + volatile u32 has_event; /**< non-zero if deq event exists */ /* Backpointers */ u32 master_session_index; @@ -103,7 +103,7 @@ always_inline void svm_fifo_unset_event (svm_fifo_t * f) { /* Probably doesn't need to be atomic. Still, better avoid surprises */ - __sync_lock_test_and_set (&f->has_event, 0); + __sync_lock_release (&f->has_event); } svm_fifo_t *svm_fifo_create (u32 data_size_in_bytes); diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c index fffc8eb3..3053ccc2 100644 --- a/src/vnet/session/node.c +++ b/src/vnet/session/node.c @@ -154,7 +154,7 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, /* Can't make any progress */ if (snd_space0 == 0 || snd_mss0 == 0) { - vec_add1 (smm->evts_partially_read[thread_index], *e0); + vec_add1 (smm->pending_event_vector[thread_index], *e0); return 0; } @@ -216,7 +216,7 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, { if (svm_fifo_set_event (s0->server_tx_fifo)) { - vec_add1 (smm->evts_partially_read[thread_index], *e0); + vec_add1 (smm->pending_event_vector[thread_index], *e0); } return -1; } @@ -324,7 +324,7 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, /* If we don't already have new event */ if (svm_fifo_set_event (s0->server_tx_fifo)) { - vec_add1 (smm->evts_partially_read[thread_index], *e0); + vec_add1 (smm->pending_event_vector[thread_index], *e0); } } return 0; @@ -338,7 +338,7 @@ dequeue_fail: if (svm_fifo_set_event (s0->server_tx_fifo)) { - vec_add1 (smm->evts_partially_read[thread_index], *e0); + vec_add1 (smm->pending_event_vector[thread_index], *e0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1); _vec_len (smm->tx_buffers[thread_index]) += 1; @@ -388,12 +388,70 @@ session_event_get_session (session_fifo_event_t * e0, u8 thread_index) return s0; } +void +dump_thread_0_event_queue (void) +{ + session_manager_main_t *smm = vnet_get_session_manager_main (); + vlib_main_t *vm = &vlib_global_main; + u32 my_thread_index = vm->thread_index; + session_fifo_event_t _e, *e = &_e; + stream_session_t *s0; + int i, index; + i8 *headp; + + unix_shared_memory_queue_t *q; + q = smm->vpp_event_queues[my_thread_index]; + + index = q->head; + + for (i = 0; i < q->cursize; i++) + { + headp = (i8 *) (&q->data[0] + q->elsize * index); + clib_memcpy (e, headp, q->elsize); + + switch (e->event_type) + { + case FIFO_EVENT_APP_TX: + s0 = session_event_get_session (e, my_thread_index); + fformat (stdout, "[%04d] TX session %d\n", i, s0->session_index); + break; + + case FIFO_EVENT_DISCONNECT: + s0 = stream_session_get_from_handle (e->session_handle); + fformat (stdout, "[%04d] disconnect session %d\n", i, + s0->session_index); + break; + + case FIFO_EVENT_BUILTIN_RX: + s0 = session_event_get_session (e, my_thread_index); + fformat (stdout, "[%04d] builtin_rx %d\n", i, s0->session_index); + break; + + case FIFO_EVENT_RPC: + fformat (stdout, "[%04d] RPC call %llx with %llx\n", + i, (u64) (e->rpc_args.fp), (u64) (e->rpc_args.arg)); + break; + + default: + fformat (stdout, "[%04d] unhandled event type %d\n", + i, e->event_type); + break; + } + + index++; + + if (index == q->maxsize) + index = 0; + } +} + static uword session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { session_manager_main_t *smm = vnet_get_session_manager_main (); - session_fifo_event_t *my_fifo_events, *e; + session_fifo_event_t *my_pending_event_vector, *e; + session_fifo_event_t *my_fifo_events; u32 n_to_dequeue, n_events; unix_shared_memory_queue_t *q; application_t *app; @@ -417,11 +475,13 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, if (PREDICT_FALSE (q == 0)) return 0; + my_fifo_events = smm->free_event_vector[my_thread_index]; + /* min number of events we can dequeue without blocking */ n_to_dequeue = q->cursize; - my_fifo_events = smm->fifo_events[my_thread_index]; + my_pending_event_vector = smm->pending_event_vector[my_thread_index]; - if (n_to_dequeue == 0 && vec_len (my_fifo_events) == 0) + if (n_to_dequeue == 0 && vec_len (my_pending_event_vector) == 0) return 0; SESSION_EVT_DBG (SESSION_EVT_DEQ_NODE, 0); @@ -431,7 +491,7 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, * over them again without dequeuing new ones. */ /* XXX: Block senders to sessions that can't keep up */ - if (vec_len (my_fifo_events) >= 100) + if (0 && vec_len (my_pending_event_vector) >= 100) { clib_warning ("too many fifo events unsolved"); goto skip_dequeue; @@ -452,7 +512,10 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, (void) pthread_cond_broadcast (&q->condvar); pthread_mutex_unlock (&q->mutex); - smm->fifo_events[my_thread_index] = my_fifo_events; + vec_append (my_fifo_events, my_pending_event_vector); + + _vec_len (my_pending_event_vector) = 0; + smm->pending_event_vector[my_thread_index] = my_pending_event_vector; skip_dequeue: n_events = vec_len (my_fifo_events); @@ -483,8 +546,10 @@ skip_dequeue: &n_tx_packets); /* Out of buffers */ if (rv < 0) - goto done; - + { + vec_add1 (smm->pending_event_vector[my_thread_index], *e0); + continue; + } break; case FIFO_EVENT_DISCONNECT: s0 = stream_session_get_from_handle (e0->session_handle); @@ -507,25 +572,8 @@ skip_dequeue: } } -done: - - /* Couldn't process all events. Probably out of buffers */ - if (PREDICT_FALSE (i < n_events)) - { - session_fifo_event_t *partially_read = - smm->evts_partially_read[my_thread_index]; - vec_add (partially_read, &my_fifo_events[i], n_events - i); - vec_free (my_fifo_events); - smm->fifo_events[my_thread_index] = partially_read; - smm->evts_partially_read[my_thread_index] = 0; - } - else - { - vec_free (smm->fifo_events[my_thread_index]); - smm->fifo_events[my_thread_index] = - smm->evts_partially_read[my_thread_index]; - smm->evts_partially_read[my_thread_index] = 0; - } + _vec_len (my_fifo_events) = 0; + smm->free_event_vector[my_thread_index] = my_fifo_events; vlib_node_increment_counter (vm, session_queue_node.index, SESSION_QUEUE_ERROR_TX, n_tx_packets); diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index d0792fae..c5aaf2e2 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -732,10 +732,6 @@ stream_session_connect_notify (transport_connection_t * tc, u8 sst, /* Cleanup session lookup */ stream_session_half_open_table_del (smm, sst, tc); - - /* Add to established lookup table */ - handle = (((u64) tc->thread_index) << 32) | (u64) new_s->session_index; - stream_session_table_add_for_tc (tc, handle); } void @@ -1091,11 +1087,19 @@ session_manager_main_enable (vlib_main_t * vm) vec_validate (smm->sessions, num_threads - 1); vec_validate (smm->session_indices_to_enqueue_by_thread, num_threads - 1); vec_validate (smm->tx_buffers, num_threads - 1); - vec_validate (smm->fifo_events, num_threads - 1); - vec_validate (smm->evts_partially_read, num_threads - 1); + vec_validate (smm->pending_event_vector, num_threads - 1); + vec_validate (smm->free_event_vector, num_threads - 1); vec_validate (smm->current_enqueue_epoch, num_threads - 1); vec_validate (smm->vpp_event_queues, num_threads - 1); + for (i = 0; i < num_threads; i++) + { + vec_validate (smm->free_event_vector[i], 0); + _vec_len (smm->free_event_vector[i]) = 0; + vec_validate (smm->pending_event_vector[i], 0); + _vec_len (smm->pending_event_vector[i]) = 0; + } + #if SESSION_DBG vec_validate (smm->last_event_poll_by_thread, num_threads - 1); #endif diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index a08fa231..d60cca29 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -197,10 +197,10 @@ struct _session_manager_main u32 **tx_buffers; /** Per worker-thread vector of partially read events */ - session_fifo_event_t **evts_partially_read; + session_fifo_event_t **free_event_vector; /** per-worker active event vectors */ - session_fifo_event_t **fifo_events; + session_fifo_event_t **pending_event_vector; /** vpp fifo event queue */ unix_shared_memory_queue_t **vpp_event_queues; diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index a0e61f42..d13fd446 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -44,7 +44,7 @@ #undef vl_printfun #define TCP_BUILTIN_CLIENT_DBG (1) -#define TCP_BUILTIN_CLIENT_VPP_THREAD (1) +#define TCP_BUILTIN_CLIENT_VPP_THREAD (0) #define TCP_BUILTIN_CLIENT_PTHREAD (!TCP_BUILTIN_CLIENT_VPP_THREAD) static void diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index 621ce02a..64fc4a71 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -141,16 +141,14 @@ builtin_server_rx_callback (stream_session_t * s) session_fifo_event_t evt; static int serial_number = 0; + tx_fifo = s->server_tx_fifo; + rx_fifo = s->server_rx_fifo; + max_dequeue = svm_fifo_max_dequeue (s->server_rx_fifo); max_enqueue = svm_fifo_max_enqueue (s->server_tx_fifo); if (PREDICT_FALSE (max_dequeue == 0)) - { - return 0; - } - - tx_fifo = s->server_tx_fifo; - rx_fifo = s->server_rx_fifo; + return 0; /* Number of bytes we're going to copy */ max_transfer = (max_dequeue < max_enqueue) ? max_dequeue : max_enqueue; @@ -175,8 +173,6 @@ builtin_server_rx_callback (stream_session_t * s) return 0; } - svm_fifo_unset_event (rx_fifo); - vec_validate (bsm->rx_buf, max_transfer - 1); _vec_len (bsm->rx_buf) = max_transfer; diff --git a/src/vnet/unix/gdb_funcs.c b/src/vnet/unix/gdb_funcs.c index cfb4b247..cca2e420 100644 --- a/src/vnet/unix/gdb_funcs.c +++ b/src/vnet/unix/gdb_funcs.c @@ -20,7 +20,7 @@ #include #include - +#include /** @@ -164,6 +164,16 @@ VLIB_CLI_COMMAND (show_gdb_funcs_command, static) = { .function = show_gdb_command_fn, }; +vnet_buffer_opaque_t *vb (void *vb_arg) +{ + vlib_buffer_t *b = (vlib_buffer_t *)vb_arg; + vnet_buffer_opaque_t *rv; + + rv = vnet_buffer (b); + + return rv; +} + /* Cafeteria plan, maybe you don't want these functions */ clib_error_t * gdb_func_init (vlib_main_t * vm) { return 0; } -- cgit 1.2.3-korg From bb292f4d3fbecfc6b1bac695f833b0da78369116 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Fri, 19 May 2017 09:49:19 -0700 Subject: Improve session debugging Also improves builtin client code. Change-Id: I8bca1aa632028f95c373726efb0abf2ee0eff414 Signed-off-by: Florin Coras --- src/svm/svm_fifo.c | 9 +++-- src/vnet/session/session_cli.c | 47 +++++++++++++-------- src/vnet/tcp/builtin_client.c | 73 ++++++++++++++++----------------- src/vnet/tcp/builtin_client.h | 3 -- src/vnet/tcp/tcp.c | 92 +++++++++++++++++++++++++++++++++++------- src/vnet/tcp/tcp.h | 4 +- src/vnet/tcp/tcp_input.c | 25 +++++++----- src/vnet/tcp/tcp_output.c | 12 +++++- 8 files changed, 176 insertions(+), 89 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index 9b09d0c2..f13f6fea 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -54,19 +54,20 @@ format_svm_fifo (u8 * s, va_list * args) s = format (s, "cursize %u nitems %u has_event %d\n", f->cursize, f->nitems, f->has_event); - s = format (s, "head %d tail %d\n", f->head, f->tail); + s = format (s, " head %d tail %d\n", f->head, f->tail); if (verbose > 1) s = format - (s, "server session %d thread %d client session %d thread %d\n", + (s, " server session %d thread %d client session %d thread %d\n", f->master_session_index, f->master_thread_index, f->client_session_index, f->client_thread_index); if (verbose) { - s = format (s, "ooo pool %d active elts\n", + s = format (s, " ooo pool %d active elts\n", pool_elts (f->ooo_segments)); - s = format (s, "%U", format_ooo_list, f); + if (svm_fifo_has_ooo_data (f)) + s = format (s, " %U", format_ooo_list, f); } return s; } diff --git a/src/vnet/session/session_cli.c b/src/vnet/session/session_cli.c index f5990c81..509eedbb 100755 --- a/src/vnet/session/session_cli.c +++ b/src/vnet/session/session_cli.c @@ -30,33 +30,35 @@ format_stream_session (u8 * s, va_list * args) int verbose = va_arg (*args, int); transport_proto_vft_t *tp_vft; u8 *str = 0; - tp_vft = session_get_transport_vft (ss->session_type); - if (verbose) - str = format (0, "%-20llp%-20llp%-15lld", ss->server_rx_fifo, - ss->server_tx_fifo, stream_session_get_index (ss)); + if (verbose == 1) + str = format (0, "%-10u%-10u%-10lld", + svm_fifo_max_dequeue (ss->server_rx_fifo), + svm_fifo_max_enqueue (ss->server_tx_fifo), + stream_session_get_index (ss)); if (ss->session_state == SESSION_STATE_READY) { - s = format (s, "%-40U%v", tp_vft->format_connection, - ss->connection_index, ss->thread_index, str); + s = format (s, "%U", tp_vft->format_connection, ss->connection_index, + ss->thread_index, verbose); + if (verbose == 1) + s = format (s, "%v", str); } else if (ss->session_state == SESSION_STATE_LISTENING) { s = format (s, "%-40U%v", tp_vft->format_listener, ss->connection_index, str); } - else if (ss->session_state == SESSION_STATE_READY) + else if (ss->session_state == SESSION_STATE_CONNECTING) { - s = - format (s, "%-40U%v", tp_vft->format_half_open, ss->connection_index, - str); + s = format (s, "%-40U%v", tp_vft->format_half_open, + ss->connection_index, str); } else if (ss->session_state == SESSION_STATE_CLOSED) { s = format (s, "[CL] %-40U%v", tp_vft->format_connection, - ss->connection_index, ss->thread_index, str); + ss->connection_index, ss->thread_index, verbose, str); } else { @@ -85,7 +87,9 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { - if (unformat (input, "verbose")) + if (unformat (input, "verbose %d", &verbose)) + ; + else if (unformat (input, "verbose")) verbose = 1; else break; @@ -105,11 +109,11 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, i, pool_elts (pool)); if (verbose) { - if (once_per_pool) + if (once_per_pool && verbose == 1) { - str = format (str, "%-50s%-20s%-20s%-15s", - "Connection", "Rx fifo", "Tx fifo", - "Session Index"); + str = + format (str, "%-50s%-15s%-10s%-10s%-10s", "Connection", + "State", "Rx-f", "Tx-f", "S-idx"); vlib_cli_output (vm, "%v", str); vec_reset_length (str); once_per_pool = 0; @@ -118,7 +122,16 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, /* *INDENT-OFF* */ pool_foreach (s, pool, ({ - vlib_cli_output (vm, "%U", format_stream_session, s, verbose); + vec_reset_length (str); + str = format (str, "%U", format_stream_session, s, verbose); + if (verbose > 1) + { + str = format (str, " Rx fifo: %U", format_svm_fifo, + s->server_rx_fifo, 1); + str = format (str, " Tx fifo: %U", format_svm_fifo, + s->server_tx_fifo, 1); + } + vlib_cli_output (vm, "%v", str); })); /* *INDENT-ON* */ } diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index d13fd446..aaefa7eb 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -187,6 +187,9 @@ tclient_thread_fn (void *arg) clib_per_cpu_mheaps[vlib_get_thread_index ()] = clib_per_cpu_mheaps[0]; + vec_validate (session_indices, 0); + vec_reset_length (session_indices); + while (1) { /* Wait until we're told to get busy */ @@ -201,14 +204,12 @@ tclient_thread_fn (void *arg) tm->run_test = 0; rx_total = 0; - clib_warning ("Run %d iterations", tm->n_iterations); + clib_warning ("Start test..."); before = clib_time_now (&ttime); - for (i = 0; i < tm->n_iterations; i++) + do { - session_t *sp; - do { try_tx = try_rx = 0; @@ -229,24 +230,38 @@ tclient_thread_fn (void *arg) receive_test_chunk (tm, sp); try_rx = 1; } + else + { + /* Session is complete */ + vec_add1 (session_indices, sp - tm->sessions); + } })); + /* Terminate any completed sessions */ + if (PREDICT_FALSE (_vec_len(session_indices) != 0)) + { + for (i = 0; i < _vec_len (session_indices); i++) + { + sp = pool_elt_at_index (tm->sessions, session_indices[i]); + rx_total += sp->bytes_received; + dmp = vl_msg_api_alloc_as_if_client (sizeof (*dmp)); + memset (dmp, 0, sizeof (*dmp)); + dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); + dmp->client_index = tm->my_client_index; + dmp->handle = sp->vpp_session_handle; + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & dmp); + pool_put (tm->sessions, sp); + } + _vec_len(session_indices) = 0; + } /* *INDENT-ON* */ } while (try_tx || try_rx); - - /* *INDENT-OFF* */ - pool_foreach (sp, tm->sessions, - ({ - rx_total += sp->bytes_received; - sp->bytes_received = 0; - sp->bytes_to_send = tm->bytes_to_send; - })); - /* *INDENT-ON* */ } + while (0); after = clib_time_now (&ttime); - clib_warning ("Done %d iterations, %lld bytes in %.2f secs", - tm->n_iterations, rx_total, (after - before)); + clib_warning ("Test complete %lld bytes in %.2f secs", + rx_total, (after - before)); if ((after - before) != 0.0) { clib_warning ("%.2f bytes/second full-duplex", @@ -255,28 +270,11 @@ tclient_thread_fn (void *arg) (((f64) rx_total * 8.0) / (after - before)) / 1e9); } - /* Disconnect sessions... */ - vec_reset_length (session_indices); - - /* *INDENT-OFF* */ - pool_foreach (sp, tm->sessions, - ({ - vec_add1 (session_indices, sp - tm->sessions); - })); - /* *INDENT-ON* */ - - for (i = 0; i < vec_len (session_indices); i++) - { - sp = pool_elt_at_index (tm->sessions, session_indices[i]); - dmp = vl_msg_api_alloc_as_if_client (sizeof (*dmp)); - memset (dmp, 0, sizeof (*dmp)); - dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); - dmp->client_index = tm->my_client_index; - dmp->handle = sp->vpp_session_handle; - vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & dmp); - pool_put (tm->sessions, sp); - } + if (pool_elts (tm->sessions)) + clib_warning ("BUG: %d active sessions remain...", + pool_elts (tm->sessions)); } + while (0); /* NOTREACHED */ #if TCP_BUILTIN_CLIENT_PTHREAD return 0; @@ -511,15 +509,12 @@ test_tcp_clients_command_fn (vlib_main_t * vm, u64 tmp; tm->bytes_to_send = 8192; - tm->n_iterations = 1; vec_free (tm->connect_uri); while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "nclients %d", &n_clients)) ; - else if (unformat (input, "iterations %d", &tm->n_iterations)) - ; else if (unformat (input, "mbytes %lld", &tmp)) tm->bytes_to_send = tmp << 20; else if (unformat (input, "gbytes %lld", &tmp)) diff --git a/src/vnet/tcp/builtin_client.h b/src/vnet/tcp/builtin_client.h index 756b3d18..57d112e6 100644 --- a/src/vnet/tcp/builtin_client.h +++ b/src/vnet/tcp/builtin_client.h @@ -93,9 +93,6 @@ typedef struct /* Signal variables */ volatile int run_test; - /* Number of iterations */ - int n_iterations; - /* Bytes to send */ u64 bytes_to_send; diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 36d85e46..9b7b2f65 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -461,13 +461,12 @@ const char *tcp_fsm_states[] = { u8 * format_tcp_state (u8 * s, va_list * args) { - tcp_state_t *state = va_arg (*args, tcp_state_t *); + u32 state = va_arg (*args, u32); - if (*state < TCP_N_STATES) - s = format (s, "%s", tcp_fsm_states[*state]); + if (state < TCP_N_STATES) + s = format (s, "%s", tcp_fsm_states[state]); else - s = format (s, "UNKNOWN (%d (0x%x))", *state, *state); - + s = format (s, "UNKNOWN (%d (0x%x))", state, state); return s; } @@ -503,7 +502,51 @@ format_tcp_timers (u8 * s, va_list * args) } u8 * -format_tcp_connection (u8 * s, va_list * args) +format_tcp_congestion_status (u8 * s, va_list * args) +{ + tcp_connection_t *tc = va_arg (*args, tcp_connection_t *); + if (tcp_in_recovery (tc)) + s = format (s, "recovery"); + else if (tcp_in_fastrecovery (tc)) + s = format (s, "fastrecovery"); + else + s = format (s, "none"); + return s; +} + +u8 * +format_tcp_vars (u8 * s, va_list * args) +{ + tcp_connection_t *tc = va_arg (*args, tcp_connection_t *); + s = format (s, " snd_una %u snd_nxt %u snd_una_max %u\n", + tc->snd_una - tc->iss, tc->snd_nxt - tc->iss, + tc->snd_una_max - tc->iss); + s = format (s, " rcv_nxt %u rcv_las %u\n", + tc->rcv_nxt - tc->irs, tc->rcv_las - tc->irs); + s = format (s, " snd_wnd %u rcv_wnd %u snd_wl1 %u snd_wl2 %u\n", + tc->snd_wnd, tc->rcv_wnd, tc->snd_wl1 - tc->irs, + tc->snd_wl2 - tc->iss); + s = format (s, " flight size %u send space %u rcv_wnd available %d\n", + tcp_flight_size (tc), tcp_snd_space (tc), + tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las)); + s = format (s, " cong %U ", format_tcp_congestion_status, tc); + s = format (s, "cwnd %u ssthresh %u rtx_bytes %u bytes_acked %u\n", + tc->cwnd, tc->ssthresh, tc->rtx_bytes, tc->bytes_acked); + s = format (s, " prev_ssthresh %u snd_congestion %u\n", tc->prev_ssthresh, + tc->snd_congestion - tc->iss); + s = format (s, " rto %u rto_boff %u srtt %u rttvar %u rtt_ts %u ", tc->rto, + tc->rto_boff, tc->srtt, tc->rttvar, tc->rtt_ts); + s = format (s, "rtt_seq %u\n", tc->rtt_seq); + if (scoreboard_first_hole (&tc->sack_sb)) + s = format (s, " scoreboard: %U\n", format_tcp_scoreboard, &tc->sack_sb); + if (vec_len (tc->snd_sacks)) + s = format (s, " sacks tx: %U\n", format_tcp_sacks, tc); + + return s; +} + +u8 * +format_tcp_connection_id (u8 * s, va_list * args) { tcp_connection_t *tc = va_arg (*args, tcp_connection_t *); if (!tc) @@ -527,11 +570,18 @@ format_tcp_connection (u8 * s, va_list * args) } u8 * -format_tcp_connection_verbose (u8 * s, va_list * args) +format_tcp_connection (u8 * s, va_list * args) { tcp_connection_t *tc = va_arg (*args, tcp_connection_t *); - s = format (s, "%U %U %U", format_tcp_connection, tc, format_tcp_state, - &tc->state, format_tcp_timers, tc); + u32 verbose = va_arg (*args, u32); + + s = format (s, "%-50U", format_tcp_connection_id, tc); + if (verbose) + { + s = format (s, "%-15U", format_tcp_state, tc->state); + if (verbose > 1) + s = format (s, " %U\n%U", format_tcp_timers, tc, format_tcp_vars, tc); + } return s; } @@ -540,11 +590,12 @@ format_tcp_session (u8 * s, va_list * args) { u32 tci = va_arg (*args, u32); u32 thread_index = va_arg (*args, u32); + u32 verbose = va_arg (*args, u32); tcp_connection_t *tc; tc = tcp_connection_get (tci, thread_index); if (tc) - return format (s, "%U", format_tcp_connection, tc); + return format (s, "%U", format_tcp_connection, tc, verbose); else return format (s, "empty"); } @@ -554,7 +605,7 @@ format_tcp_listener_session (u8 * s, va_list * args) { u32 tci = va_arg (*args, u32); tcp_connection_t *tc = tcp_listener_get (tci); - return format (s, "%U", format_tcp_connection, tc); + return format (s, "%U", format_tcp_connection_id, tc); } u8 * @@ -562,7 +613,7 @@ format_tcp_half_open_session (u8 * s, va_list * args) { u32 tci = va_arg (*args, u32); tcp_connection_t *tc = tcp_half_open_connection_get (tci); - return format (s, "%U", format_tcp_connection, tc); + return format (s, "%U", format_tcp_connection_id, tc); } u8 * @@ -659,12 +710,18 @@ tcp_round_snd_space (tcp_connection_t * tc, u32 snd_space) /** * Compute tx window session is allowed to fill. + * + * Takes into account available send space, snd_mss and the congestion + * state of the connection. If possible, the value returned is a multiple + * of snd_mss. + * + * @param tc tcp connection + * @return number of bytes session is allowed to write */ u32 -tcp_session_send_space (transport_connection_t * trans_conn) +tcp_snd_space (tcp_connection_t * tc) { int snd_space; - tcp_connection_t *tc = (tcp_connection_t *) trans_conn; /* If we haven't gotten dupacks or if we did and have gotten sacked bytes * then we can still send */ @@ -697,6 +754,13 @@ tcp_session_send_space (transport_connection_t * trans_conn) return 0; } +u32 +tcp_session_send_space (transport_connection_t * trans_conn) +{ + tcp_connection_t *tc = (tcp_connection_t *) trans_conn; + return tcp_snd_space (tc); +} + u32 tcp_session_tx_fifo_offset (transport_connection_t * trans_conn) { diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 8d24a70b..c3ebe22b 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -387,8 +387,8 @@ void tcp_connection_cleanup (tcp_connection_t * tc); void tcp_connection_del (tcp_connection_t * tc); void tcp_connection_reset (tcp_connection_t * tc); +u8 *format_tcp_connection_id (u8 * s, va_list * args); u8 *format_tcp_connection (u8 * s, va_list * args); -u8 *format_tcp_connection_verbose (u8 * s, va_list * args); u8 *format_tcp_scoreboard (u8 * s, va_list * args); always_inline tcp_connection_t * @@ -481,6 +481,8 @@ tcp_available_snd_space (const tcp_connection_t * tc) return available_wnd - flight_size; } +u32 tcp_rcv_wnd_available (tcp_connection_t * tc); +u32 tcp_snd_space (tcp_connection_t * tc); void tcp_update_rcv_wnd (tcp_connection_t * tc); void tcp_retransmit_first_unacked (tcp_connection_t * tc); diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index 9d3f4cc3..35bc9094 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -722,9 +722,11 @@ tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd) TCP_EVT_DBG (TCP_EVT_SND_WND, tc); /* Set probe timer if we just got 0 wnd */ - if (tc->snd_wnd < tc->snd_mss - && !tcp_timer_is_active (tc, TCP_TIMER_PERSIST)) - tcp_persist_timer_set (tc); + if (tc->snd_wnd < tc->snd_mss) + { + if (!tcp_timer_is_active (tc, TCP_TIMER_PERSIST)) + tcp_persist_timer_set (tc); + } else tcp_persist_timer_reset (tc); } @@ -763,6 +765,7 @@ static void tcp_cc_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b) { u8 partial_ack; + u32 bytes_advanced; if (tcp_in_fastrecovery (tc)) { @@ -804,10 +807,14 @@ tcp_cc_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b) tc->rcv_dupacks = 0; if (tcp_in_recovery (tc)) { - tc->rtx_bytes -= clib_min (tc->bytes_acked, tc->rtx_bytes); + bytes_advanced = tc->bytes_acked + tc->sack_sb.snd_una_adv; + tc->rtx_bytes -= clib_min (bytes_advanced, tc->rtx_bytes); tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX); if (seq_geq (tc->snd_una, tc->snd_congestion)) - tcp_recovery_off (tc); + { + tc->rtx_bytes = 0; + tcp_recovery_off (tc); + } } } } @@ -1221,7 +1228,7 @@ format_tcp_rx_trace (u8 * s, va_list * args) s = format (s, "%U\n%U%U", format_tcp_header, &t->tcp_header, 128, format_white_space, indent, - format_tcp_connection_verbose, &t->tcp_connection); + format_tcp_connection, &t->tcp_connection, 1); return s; } @@ -1236,7 +1243,7 @@ format_tcp_rx_trace_short (u8 * s, va_list * args) s = format (s, "%d -> %d (%U)", clib_net_to_host_u16 (t->tcp_header.src_port), clib_net_to_host_u16 (t->tcp_header.dst_port), format_tcp_state, - &t->tcp_connection.state); + t->tcp_connection.state); return s; } @@ -2165,6 +2172,7 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, child0->c_rmt_port = th0->src_port; child0->c_is_ip4 = is_ip4; child0->c_thread_index = my_thread_index; + child0->state = TCP_STATE_SYN_RCVD; if (is_ip4) { @@ -2194,7 +2202,6 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, child0->irs = vnet_buffer (b0)->tcp.seq_number; child0->rcv_nxt = vnet_buffer (b0)->tcp.seq_number + 1; child0->rcv_las = child0->rcv_nxt; - child0->state = TCP_STATE_SYN_RCVD; /* RFC1323: TSval timestamps sent on {SYN} and {SYN,ACK} * segments are used to initialize PAWS. */ @@ -2450,7 +2457,7 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (error0 == TCP_ERROR_DISPATCH) clib_warning ("disp error state %U flags %U", - format_tcp_state, &state0, format_tcp_flags, + format_tcp_state, state0, format_tcp_flags, (int) flags0); } } diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index a462d8da..49fd6bef 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -59,7 +59,7 @@ format_tcp_tx_trace (u8 * s, va_list * args) s = format (s, "%U\n%U%U", format_tcp_header, &t->tcp_header, 128, format_white_space, indent, - format_tcp_connection_verbose, &t->tcp_connection); + format_tcp_connection, &t->tcp_connection, 1); return s; } @@ -1024,6 +1024,11 @@ tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b, ASSERT (n_bytes != 0); b->current_length = n_bytes; tcp_push_hdr_i (tc, b, tc->state, 0); + + /* Don't count multiple retransmits of the same segment */ + if (tc->rto_boff > 1) + goto done; + tc->rtx_bytes += n_bytes; done: @@ -1103,7 +1108,9 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) if (n_bytes == 0) { - clib_warning ("could not retransmit"); + clib_warning ("could not retransmit anything"); + /* Try again eventually */ + tcp_retransmit_timer_set (tc); return; } } @@ -1203,6 +1210,7 @@ tcp_timer_persist_handler (u32 index) /* Nothing to send */ if (n_bytes == 0) { + clib_warning ("persist found nothing to send"); tcp_return_buffer (tm); return; } -- cgit 1.2.3-korg From 10d8cc6bf92851fcaec4a6b4c6d3554dc1eb2386 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Tue, 30 May 2017 09:30:07 -0400 Subject: Improve fifo allocator performance - add option to preallocate fifos in a segment - track active fifos with doubly linked list instead of vector - update udp redirect test code to read fifo pointers from API call instead of digging them up from fifo segment header - input-node based active-open session generator Change-Id: I804b81e99d95f8690d17e12660c6645995e28a9a Signed-off-by: Dave Barach Signed-off-by: Florin Coras Signed-off-by: Dave Barach --- src/svm/svm_fifo.h | 5 +- src/svm/svm_fifo_segment.c | 146 ++++++++++++-- src/svm/svm_fifo_segment.h | 30 ++- src/svm/test_svm_fifo1.c | 23 ++- src/uri/uri_tcp_test.c | 1 + src/uri/uri_udp_test.c | 39 ++-- src/vlibapi/api.h | 3 + src/vlibmemory/memory_shared.c | 22 ++- src/vlibmemory/memory_vlib.c | 26 +++ src/vnet/session/application.c | 18 +- src/vnet/session/application_interface.h | 2 + src/vnet/session/segment_manager.c | 79 ++++---- src/vnet/session/segment_manager.h | 3 + src/vnet/session/session.c | 38 +++- src/vnet/session/session.h | 8 +- src/vnet/session/session_api.c | 2 +- src/vnet/tcp/builtin_client.c | 330 +++++++++++++++++++------------ src/vnet/tcp/builtin_client.h | 13 +- src/vnet/tcp/builtin_http_server.c | 1 + src/vnet/tcp/builtin_server.c | 9 +- src/vnet/udp/builtin_server.c | 1 + 21 files changed, 534 insertions(+), 265 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index 69369163..9cb93ff4 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -38,7 +38,7 @@ format_function_t format_ooo_list; #define OOO_SEGMENT_INVALID_INDEX ((u32)~0) -typedef struct +typedef struct _svm_fifo { volatile u32 cursize; /**< current fifo size */ u32 nitems; @@ -62,7 +62,8 @@ typedef struct ooo_segment_t *ooo_segments; /**< Pool of ooo segments */ u32 ooos_list_head; /**< Head of out-of-order linked-list */ u32 ooos_newest; /**< Last segment to have been updated */ - + struct _svm_fifo *next; /**< next in freelist/active chain */ + struct _svm_fifo *prev; /**< prev in active chain */ CLIB_CACHE_LINE_ALIGN_MARK (data); } svm_fifo_t; diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index 281fae27..eef2168c 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -17,6 +17,71 @@ svm_fifo_segment_main_t svm_fifo_segment_main; +static void +preallocate_fifo_pairs (svm_fifo_segment_header_t * fsh, + svm_fifo_segment_create_args_t * a) +{ + u32 rx_fifo_size, tx_fifo_size; + svm_fifo_t *f; + u8 *rx_fifo_space, *tx_fifo_space; + int i; + + /* Parameter check */ + if (a->rx_fifo_size == 0 || a->tx_fifo_size == 0 + || a->preallocated_fifo_pairs == 0) + return; + + /* Calculate space requirements */ + rx_fifo_size = (sizeof (*f) + a->rx_fifo_size) * a->preallocated_fifo_pairs; + tx_fifo_size = (sizeof (*f) + a->tx_fifo_size) * a->preallocated_fifo_pairs; + + /* Allocate rx fifo space. May fail. */ + rx_fifo_space = clib_mem_alloc_aligned_at_offset + (rx_fifo_size, CLIB_CACHE_LINE_BYTES, 0 /* align_offset */ , + 0 /* os_out_of_memory */ ); + + /* Same for TX */ + tx_fifo_space = clib_mem_alloc_aligned_at_offset + (tx_fifo_size, CLIB_CACHE_LINE_BYTES, 0 /* align_offset */ , + 0 /* os_out_of_memory */ ); + + /* Make sure it worked. Clean up if it didn't... */ + if (rx_fifo_space == 0 || tx_fifo_space == 0) + { + if (rx_fifo_space) + clib_mem_free (rx_fifo_space); + else + clib_warning ("rx fifo preallocation failure: size %d npairs %d", + a->rx_fifo_size, a->preallocated_fifo_pairs); + + if (tx_fifo_space) + clib_mem_free (tx_fifo_space); + else + clib_warning ("tx fifo preallocation failure: size %d nfifos %d", + a->tx_fifo_size, a->preallocated_fifo_pairs); + return; + } + + /* Carve rx fifo space */ + f = (svm_fifo_t *) rx_fifo_space; + for (i = 0; i < a->preallocated_fifo_pairs; i++) + { + f->next = fsh->free_fifos[FIFO_SEGMENT_RX_FREELIST]; + fsh->free_fifos[FIFO_SEGMENT_RX_FREELIST] = f; + rx_fifo_space += sizeof (*f) + a->rx_fifo_size; + f = (svm_fifo_t *) rx_fifo_space; + } + /* Carve tx fifo space */ + f = (svm_fifo_t *) tx_fifo_space; + for (i = 0; i < a->preallocated_fifo_pairs; i++) + { + f->next = fsh->free_fifos[FIFO_SEGMENT_TX_FREELIST]; + fsh->free_fifos[FIFO_SEGMENT_TX_FREELIST] = f; + tx_fifo_space += sizeof (*f) + a->tx_fifo_size; + f = (svm_fifo_t *) tx_fifo_space; + } +} + /** (master) create an svm fifo segment */ int svm_fifo_segment_create (svm_fifo_segment_create_args_t * a) @@ -59,9 +124,7 @@ svm_fifo_segment_create (svm_fifo_segment_create_args_t * a) s->h = fsh; fsh->segment_name = format (0, "%s%c", a->segment_name, 0); - /* Avoid vec_add1(...) failure when adding a fifo, etc. */ - vec_validate (fsh->fifos, 64); - _vec_len (fsh->fifos) = 0; + preallocate_fifo_pairs (fsh, a); ssvm_pop_heap (oldheap); @@ -103,6 +166,8 @@ svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t * a) s->h = fsh; fsh->segment_name = format (0, "%s%c", a->segment_name, 0); + preallocate_fifo_pairs (fsh, a); + sh->ready = 1; a->new_segment_index = s - sm->segments; return (0); @@ -154,7 +219,8 @@ svm_fifo_segment_delete (svm_fifo_segment_private_t * s) svm_fifo_t * svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, - u32 data_size_in_bytes) + u32 data_size_in_bytes, + svm_fifo_segment_freelist_t list_index) { ssvm_shared_header_t *sh; svm_fifo_segment_header_t *fsh; @@ -167,6 +233,29 @@ svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, ssvm_lock (sh, 1, 0); oldheap = ssvm_push_heap (sh); + switch (list_index) + { + case FIFO_SEGMENT_RX_FREELIST: + case FIFO_SEGMENT_TX_FREELIST: + f = fsh->free_fifos[list_index]; + if (f) + { + fsh->free_fifos[list_index] = f->next; + /* (re)initialize the fifo, as in svm_fifo_create */ + memset (f, 0, sizeof (*f)); + f->nitems = data_size_in_bytes; + f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX; + goto found; + } + /* FALLTHROUGH */ + case FIFO_SEGMENT_FREELIST_NONE: + break; + + default: + clib_warning ("ignore bogus freelist %d", list_index); + break; + } + /* Note: this can fail, in which case: create another segment */ f = svm_fifo_create (data_size_in_bytes); if (PREDICT_FALSE (f == 0)) @@ -176,37 +265,62 @@ svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, return (0); } - vec_add1 (fsh->fifos, f); +found: + /* If rx_freelist add to active fifos list. When cleaning up segment, + * we need a list of active sessions that should be disconnected. Since + * both rx and tx fifos keep pointers to the session, it's enough to track + * only one. */ + if (list_index == FIFO_SEGMENT_RX_FREELIST) + { + if (fsh->fifos) + { + fsh->fifos->prev = f; + f->next = fsh->fifos; + } + fsh->fifos = f; + } + ssvm_pop_heap (oldheap); ssvm_unlock (sh); return (f); } void -svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f) +svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f, + svm_fifo_segment_freelist_t list_index) { ssvm_shared_header_t *sh; svm_fifo_segment_header_t *fsh; void *oldheap; - int i; sh = s->ssvm.sh; fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; ssvm_lock (sh, 1, 0); oldheap = ssvm_push_heap (sh); - for (i = 0; i < vec_len (fsh->fifos); i++) + + switch (list_index) { - if (fsh->fifos[i] == f) - { - vec_delete (fsh->fifos, 1, i); - goto found; - } + case FIFO_SEGMENT_RX_FREELIST: + /* Remove from active list */ + if (f->prev) + f->prev->next = f->next; + if (f->next) + f->next->prev = f->prev; + /* FALLTHROUGH */ + case FIFO_SEGMENT_TX_FREELIST: + /* Add to free list */ + f->next = fsh->free_fifos[list_index]; + fsh->free_fifos[list_index] = f; + /* FALLTHROUGH */ + case FIFO_SEGMENT_FREELIST_NONE: + break; + + default: + clib_warning ("ignore bogus freelist %d", list_index); + break; } - clib_warning ("fifo 0x%llx not found in fifo table...", f); -found: - clib_mem_free (f); ssvm_pop_heap (oldheap); ssvm_unlock (sh); } diff --git a/src/svm/svm_fifo_segment.h b/src/svm/svm_fifo_segment.h index 4218013a..31e14db5 100644 --- a/src/svm/svm_fifo_segment.h +++ b/src/svm/svm_fifo_segment.h @@ -19,10 +19,19 @@ #include #include +typedef enum +{ + FIFO_SEGMENT_FREELIST_NONE = -1, + FIFO_SEGMENT_RX_FREELIST = 0, + FIFO_SEGMENT_TX_FREELIST, + FIFO_SEGMENT_N_FREELISTS +} svm_fifo_segment_freelist_t; + typedef struct { - volatile svm_fifo_t **fifos; - u8 *segment_name; + svm_fifo_t *fifos; /**< Linked list of active RX fifos */ + u8 *segment_name; /**< Segment name */ + svm_fifo_t *free_fifos[FIFO_SEGMENT_N_FREELISTS]; /**< Free lists */ } svm_fifo_segment_header_t; typedef struct @@ -49,6 +58,9 @@ typedef struct char *segment_name; u32 segment_size; u32 new_segment_index; + u32 rx_fifo_size; + u32 tx_fifo_size; + u32 preallocated_fifo_pairs; } svm_fifo_segment_create_args_t; static inline svm_fifo_segment_private_t * @@ -61,13 +73,13 @@ svm_fifo_get_segment (u32 segment_index) static inline u8 svm_fifo_segment_has_fifos (svm_fifo_segment_private_t * fifo_segment) { - return vec_len ((svm_fifo_t **) fifo_segment->h->fifos) != 0; + return fifo_segment->h->fifos != 0; } -static inline svm_fifo_t ** -svm_fifo_segment_get_fifos (svm_fifo_segment_private_t * fifo_segment) +static inline svm_fifo_t * +svm_fifo_segment_get_fifo_list (svm_fifo_segment_private_t * fifo_segment) { - return (svm_fifo_t **) fifo_segment->h->fifos; + return fifo_segment->h->fifos; } #define foreach_ssvm_fifo_segment_api_error \ @@ -87,9 +99,11 @@ int svm_fifo_segment_attach (svm_fifo_segment_create_args_t * a); void svm_fifo_segment_delete (svm_fifo_segment_private_t * s); svm_fifo_t *svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, - u32 data_size_in_bytes); + u32 data_size_in_bytes, + svm_fifo_segment_freelist_t index); void svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, - svm_fifo_t * f); + svm_fifo_t * f, + svm_fifo_segment_freelist_t index); void svm_fifo_segment_init (u64 baseva, u32 timeout_in_seconds); u32 svm_fifo_segment_index (svm_fifo_segment_private_t * s); diff --git a/src/svm/test_svm_fifo1.c b/src/svm/test_svm_fifo1.c index 398dd6d7..63b4a9b7 100644 --- a/src/svm/test_svm_fifo1.c +++ b/src/svm/test_svm_fifo1.c @@ -30,6 +30,9 @@ hello_world (int verbose) a->segment_name = "fifo-test1"; a->segment_size = 256 << 10; + a->rx_fifo_size = 4096; + a->tx_fifo_size = 4096; + a->preallocated_fifo_pairs = 4; rv = svm_fifo_segment_create (a); @@ -38,7 +41,7 @@ hello_world (int verbose) sp = svm_fifo_get_segment (a->new_segment_index); - f = svm_fifo_segment_alloc_fifo (sp, 4096); + f = svm_fifo_segment_alloc_fifo (sp, 4096, FIFO_SEGMENT_RX_FREELIST); if (f == 0) return clib_error_return (0, "svm_fifo_segment_alloc_fifo failed"); @@ -63,7 +66,7 @@ hello_world (int verbose) else error = clib_error_return (0, "data test FAIL!"); - svm_fifo_segment_free_fifo (sp, f); + svm_fifo_segment_free_fifo (sp, f, FIFO_SEGMENT_RX_FREELIST); return error; } @@ -91,7 +94,7 @@ master (int verbose) sp = svm_fifo_get_segment (a->new_segment_index); - f = svm_fifo_segment_alloc_fifo (sp, 4096); + f = svm_fifo_segment_alloc_fifo (sp, 4096, FIFO_SEGMENT_RX_FREELIST); if (f == 0) return clib_error_return (0, "svm_fifo_segment_alloc_fifo failed"); @@ -129,7 +132,7 @@ mempig (int verbose) for (i = 0; i < 1000; i++) { - f = svm_fifo_segment_alloc_fifo (sp, 4096); + f = svm_fifo_segment_alloc_fifo (sp, 4096, FIFO_SEGMENT_RX_FREELIST); if (f == 0) break; vec_add1 (flist, f); @@ -139,14 +142,14 @@ mempig (int verbose) for (i = 0; i < vec_len (flist); i++) { f = flist[i]; - svm_fifo_segment_free_fifo (sp, f); + svm_fifo_segment_free_fifo (sp, f, FIFO_SEGMENT_RX_FREELIST); } _vec_len (flist) = 0; for (i = 0; i < 1000; i++) { - f = svm_fifo_segment_alloc_fifo (sp, 4096); + f = svm_fifo_segment_alloc_fifo (sp, 4096, FIFO_SEGMENT_RX_FREELIST); if (f == 0) break; vec_add1 (flist, f); @@ -156,7 +159,7 @@ mempig (int verbose) for (i = 0; i < vec_len (flist); i++) { f = flist[i]; - svm_fifo_segment_free_fifo (sp, f); + svm_fifo_segment_free_fifo (sp, f, FIFO_SEGMENT_RX_FREELIST); } return 0; @@ -185,7 +188,7 @@ offset (int verbose) sp = svm_fifo_get_segment (a->new_segment_index); - f = svm_fifo_segment_alloc_fifo (sp, 200 << 10); + f = svm_fifo_segment_alloc_fifo (sp, 200 << 10, FIFO_SEGMENT_RX_FREELIST); if (f == 0) return clib_error_return (0, "svm_fifo_segment_alloc_fifo failed"); @@ -226,9 +229,9 @@ slave (int verbose) { svm_fifo_segment_create_args_t _a, *a = &_a; svm_fifo_segment_private_t *sp; - svm_fifo_segment_header_t *fsh; svm_fifo_t *f; ssvm_shared_header_t *sh; + svm_fifo_segment_header_t *fsh; int rv; u8 *test_data; u8 *retrieved_data = 0; @@ -248,7 +251,7 @@ slave (int verbose) fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; /* might wanna wait.. */ - f = (svm_fifo_t *) fsh->fifos[0]; + f = fsh->fifos; /* Lazy bastards united */ test_data = format (0, "Hello world%c", 0); diff --git a/src/uri/uri_tcp_test.c b/src/uri/uri_tcp_test.c index 22f246e5..e201a359 100755 --- a/src/uri/uri_tcp_test.c +++ b/src/uri/uri_tcp_test.c @@ -193,6 +193,7 @@ application_send_attach (uri_tcp_test_main_t * utm) bmp->context = ntohl (0xfeedface); bmp->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_USE_FIFO | APP_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = 16; bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; diff --git a/src/uri/uri_udp_test.c b/src/uri/uri_udp_test.c index 8fb12ed2..45ad35a4 100644 --- a/src/uri/uri_udp_test.c +++ b/src/uri/uri_udp_test.c @@ -176,6 +176,7 @@ application_send_attach (uri_udp_test_main_t * utm) bmp->context = ntohl (0xfeedface); bmp->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_USE_FIFO | APP_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = 16; bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; @@ -522,7 +523,7 @@ vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) svm_fifo_segment_private_t *seg; unix_shared_memory_queue_t *client_q; vl_api_connect_uri_reply_t *rmp; - session_t *session; + session_t *session = 0; int rv = 0; /* Create the segment */ @@ -545,17 +546,12 @@ vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) pool_get (utm->sessions, session); - /* - * By construction the master's idea of the rx fifo ends up in - * fsh->fifos[0], and the master's idea of the tx fifo ends up in - * fsh->fifos[1]. - */ - session->server_rx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, - 128 * 1024); + session->server_rx_fifo = svm_fifo_segment_alloc_fifo + (utm->seg, 128 * 1024, FIFO_SEGMENT_RX_FREELIST); ASSERT (session->server_rx_fifo); - session->server_tx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, - 128 * 1024); + session->server_tx_fifo = svm_fifo_segment_alloc_fifo + (utm->seg, 128 * 1024, FIFO_SEGMENT_TX_FREELIST); ASSERT (session->server_tx_fifo); session->server_rx_fifo->master_session_index = session - utm->sessions; @@ -578,6 +574,12 @@ send_reply: rmp->context = mp->context; rmp->retval = ntohl (rv); rmp->segment_name_length = vec_len (a->segment_name); + if (session) + { + rmp->server_rx_fifo = pointer_to_uword (session->server_rx_fifo); + rmp->server_tx_fifo = pointer_to_uword (session->server_tx_fifo); + } + memcpy (rmp->segment_name, a->segment_name, vec_len (a->segment_name)); vec_free (a->segment_name); @@ -689,9 +691,7 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) svm_fifo_segment_create_args_t _a, *a = &_a; u32 segment_index; session_t *session; - ssvm_shared_header_t *sh; svm_fifo_segment_private_t *seg; - svm_fifo_segment_header_t *fsh; int rv; memset (a, 0, sizeof (*a)); @@ -707,22 +707,19 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) return; } - segment_index = vec_len (sm->segments) - 1; + segment_index = a->new_segment_index; vec_add2 (utm->seg, seg, 1); - memcpy (seg, sm->segments + segment_index, sizeof (*seg)); - sh = seg->ssvm.sh; - fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; - - while (vec_len (fsh->fifos) < 2) - sleep (1); + sleep (1); pool_get (utm->sessions, session); utm->cut_through_session_index = session - utm->sessions; - session->server_rx_fifo = (svm_fifo_t *) fsh->fifos[0]; + session->server_rx_fifo = uword_to_pointer (mp->server_rx_fifo, + svm_fifo_t *); ASSERT (session->server_rx_fifo); - session->server_tx_fifo = (svm_fifo_t *) fsh->fifos[1]; + session->server_tx_fifo = uword_to_pointer (mp->server_tx_fifo, + svm_fifo_t *); ASSERT (session->server_tx_fifo); } diff --git a/src/vlibapi/api.h b/src/vlibapi/api.h index 3403e1c6..0e2c2101 100644 --- a/src/vlibapi/api.h +++ b/src/vlibapi/api.h @@ -193,6 +193,9 @@ typedef struct i32 vlib_signal; + /* vlib input queue length */ + u32 vlib_input_queue_length; + /* client side message index hash table */ uword *msg_index_by_name_and_crc; diff --git a/src/vlibmemory/memory_shared.c b/src/vlibmemory/memory_shared.c index aea90330..41aa1231 100644 --- a/src/vlibmemory/memory_shared.c +++ b/src/vlibmemory/memory_shared.c @@ -104,8 +104,17 @@ vl_msg_api_alloc_internal (int nbytes, int pool, int may_return_null) if (now - rv->gc_mark_timestamp > 10) { if (CLIB_DEBUG > 0) - clib_warning ("garbage collect pool %d ring %d index %d", - pool, i, q->head); + { + u16 *msg_idp, msg_id; + clib_warning + ("garbage collect pool %d ring %d index %d", pool, i, + q->head); + msg_idp = (u16 *) (rv->data); + msg_id = clib_net_to_host_u16 (*msg_idp); + if (msg_id < vec_len (api_main.msg_names)) + clib_warning ("msg id %d name %s", (u32) msg_id, + api_main.msg_names[msg_id]); + } shmem_hdr->garbage_collects++; goto collected; } @@ -330,6 +339,7 @@ vl_map_shmem (const char *region_name, int is_vlib) api_main_t *am = &api_main; int i; struct timespec ts, tsrem; + u32 vlib_input_queue_length; if (is_vlib == 0) svm_region_init_chroot (am->root_path); @@ -449,9 +459,13 @@ vl_map_shmem (const char *region_name, int is_vlib) shmem_hdr->version = VL_SHM_VERSION; /* vlib main input queue */ + vlib_input_queue_length = 1024; + if (am->vlib_input_queue_length) + vlib_input_queue_length = am->vlib_input_queue_length; + shmem_hdr->vl_input_queue = - unix_shared_memory_queue_init (1024, sizeof (uword), getpid (), - am->vlib_signal); + unix_shared_memory_queue_init (vlib_input_queue_length, sizeof (uword), + getpid (), am->vlib_signal); /* Set up the msg ring allocator */ #define _(sz,n) \ diff --git a/src/vlibmemory/memory_vlib.c b/src/vlibmemory/memory_vlib.c index e5d88732..004a9974 100644 --- a/src/vlibmemory/memory_vlib.c +++ b/src/vlibmemory/memory_vlib.c @@ -1917,6 +1917,32 @@ api_config_fn (vlib_main_t * vm, unformat_input_t * input) VLIB_CONFIG_FUNCTION (api_config_fn, "api-trace"); +static clib_error_t * +api_queue_config_fn (vlib_main_t * vm, unformat_input_t * input) +{ + api_main_t *am = &api_main; + u32 nitems; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "length %d", &nitems) || + (unformat (input, "len %d", &nitems))) + { + if (nitems >= 1024) + am->vlib_input_queue_length = nitems; + else + clib_warning ("vlib input queue length %d too small, ignored", + nitems); + } + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + return 0; +} + +VLIB_CONFIG_FUNCTION (api_queue_config_fn, "api-queue"); + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index ccf9837f..c679b1f5 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -160,6 +160,7 @@ application_init (application_t * app, u32 api_client_index, u64 * options, props->rx_fifo_size = options[SESSION_OPTIONS_RX_FIFO_SIZE]; props->tx_fifo_size = options[SESSION_OPTIONS_TX_FIFO_SIZE]; props->add_segment = props->add_segment_size != 0; + props->preallocated_fifo_pairs = options[APP_OPTIONS_PREALLOC_FIFO_PAIRS]; props->use_private_segment = options[APP_OPTIONS_FLAGS] & APP_OPTIONS_FLAGS_BUILTIN_APP; @@ -395,7 +396,7 @@ application_format_connects (application_t * app, int verbose) vlib_main_t *vm = vlib_get_main (); segment_manager_t *sm; u8 *app_name, *s = 0; - int i, j; + int j; /* Header */ if (app == 0) @@ -419,22 +420,16 @@ application_format_connects (application_t * app, int verbose) for (j = 0; j < vec_len (sm->segment_indices); j++) { svm_fifo_segment_private_t *fifo_segment; - svm_fifo_t **fifos; + svm_fifo_t *fifo; u8 *str; fifo_segment = svm_fifo_get_segment (sm->segment_indices[j]); - fifos = svm_fifo_segment_get_fifos (fifo_segment); - for (i = 0; i < vec_len (fifos); i++) + fifo = svm_fifo_segment_get_fifo_list (fifo_segment); + while (fifo) { - svm_fifo_t *fifo; u32 session_index, thread_index; stream_session_t *session; - /* There are 2 fifos/session. Avoid printing twice. */ - if (i % 2) - continue; - - fifo = fifos[i]; session_index = fifo->master_session_index; thread_index = fifo->master_thread_index; @@ -448,9 +443,10 @@ application_format_connects (application_t * app, int verbose) s = format (s, "%-40s%-20s", str, app_name); vlib_cli_output (vm, "%v", s); - vec_reset_length (s); vec_free (str); + + fifo = fifo->next; } vec_free (s); } diff --git a/src/vnet/session/application_interface.h b/src/vnet/session/application_interface.h index 7d924c14..4d6f9def 100644 --- a/src/vnet/session/application_interface.h +++ b/src/vnet/session/application_interface.h @@ -119,10 +119,12 @@ typedef enum { APP_EVT_QUEUE_SIZE, APP_OPTIONS_FLAGS, + APP_OPTIONS_PREALLOC_FIFO_PAIRS, SESSION_OPTIONS_SEGMENT_SIZE, SESSION_OPTIONS_ADD_SEGMENT_SIZE, SESSION_OPTIONS_RX_FIFO_SIZE, SESSION_OPTIONS_TX_FIFO_SIZE, + SESSION_OPTIONS_PREALLOCATED_FIFO_PAIRS, SESSION_OPTIONS_ACCEPT_COOKIE, SESSION_OPTIONS_N_OPTIONS } app_attach_options_index_t; diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c index b13df21c..caf8eaa3 100644 --- a/src/vnet/session/segment_manager.c +++ b/src/vnet/session/segment_manager.c @@ -58,6 +58,9 @@ session_manager_add_segment_i (segment_manager_t * sm, u32 segment_size, ca->segment_name = (char *) segment_name; ca->segment_size = segment_size; + ca->rx_fifo_size = sm->properties->rx_fifo_size; + ca->tx_fifo_size = sm->properties->tx_fifo_size; + ca->preallocated_fifo_pairs = sm->properties->preallocated_fifo_pairs; rv = svm_fifo_segment_create (ca); if (rv) @@ -104,7 +107,8 @@ session_manager_add_first_segment (segment_manager_t * sm, u32 segment_size) } static void -segment_manager_alloc_process_private_segment () + segment_manager_alloc_process_private_segment + (segment_manager_properties_t * props) { svm_fifo_segment_create_args_t _a, *a = &_a; @@ -115,6 +119,9 @@ segment_manager_alloc_process_private_segment () a->segment_name = "process-private-segment"; a->segment_size = ~0; a->new_segment_index = ~0; + a->rx_fifo_size = props->rx_fifo_size; + a->tx_fifo_size = props->tx_fifo_size; + a->preallocated_fifo_pairs = props->preallocated_fifo_pairs; if (svm_fifo_segment_create_process_private (a)) clib_warning ("Failed to create process private segment"); @@ -151,7 +158,7 @@ segment_manager_init (segment_manager_t * sm, else { if (private_segment_index == ~0) - segment_manager_alloc_process_private_segment (); + segment_manager_alloc_process_private_segment (properties); ASSERT (private_segment_index != ~0); vec_add1 (sm->segment_indices, private_segment_index); } @@ -170,74 +177,46 @@ segment_manager_init (segment_manager_t * sm, void segment_manager_del (segment_manager_t * sm) { - u32 *deleted_sessions = 0; - u32 *deleted_thread_indices = 0; - int i, j; + int j; /* Across all fifo segments used by the server */ for (j = 0; j < vec_len (sm->segment_indices); j++) { svm_fifo_segment_private_t *fifo_segment; - svm_fifo_t **fifos; + svm_fifo_t *fifo; + /* Vector of fifos allocated in the segment */ fifo_segment = svm_fifo_get_segment (sm->segment_indices[j]); - fifos = svm_fifo_segment_get_fifos (fifo_segment); + fifo = svm_fifo_segment_get_fifo_list (fifo_segment); /* * Remove any residual sessions from the session lookup table * Don't bother deleting the individual fifos, we're going to * throw away the fifo segment in a minute. */ - for (i = 0; i < vec_len (fifos); i++) + while (fifo) { - svm_fifo_t *fifo; u32 session_index, thread_index; stream_session_t *session; - fifo = fifos[i]; session_index = fifo->master_session_index; thread_index = fifo->master_thread_index; session = stream_session_get (session_index, thread_index); - /* Add to the deleted_sessions vector (once!) */ - if (!session->is_deleted) - { - session->is_deleted = 1; - vec_add1 (deleted_sessions, session_index); - vec_add1 (deleted_thread_indices, thread_index); - } - } - - for (i = 0; i < vec_len (deleted_sessions); i++) - { - stream_session_t *session; - session = stream_session_get (deleted_sessions[i], - deleted_thread_indices[i]); - /* Instead of directly removing the session call disconnect */ session_send_session_evt_to_thread (stream_session_handle (session), FIFO_EVENT_DISCONNECT, - deleted_thread_indices[i]); - - /* - stream_session_table_del (smm, session); - pool_put(smm->sessions[deleted_thread_indices[i]], session); - */ + thread_index); + fifo = fifo->next; } - vec_reset_length (deleted_sessions); - vec_reset_length (deleted_thread_indices); - - /* Instead of removing the segment, test when removing the session if - * the segment can be removed + /* Instead of removing the segment, test when cleaning up disconnected + * sessions if the segment can be removed. */ - /* svm_fifo_segment_delete (fifo_segment); */ } clib_spinlock_free (&sm->lockp); - vec_free (deleted_sessions); - vec_free (deleted_thread_indices); pool_put (segment_managers, sm); } @@ -281,20 +260,27 @@ again: *fifo_segment_index = sm->segment_indices[i]; fifo_segment = svm_fifo_get_segment (*fifo_segment_index); + /* FC: cleanup, make sure sm->properties->xxx_fifo_size always set */ fifo_size = sm->properties->rx_fifo_size; fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; - *server_rx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); + *server_rx_fifo = + svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size, + FIFO_SEGMENT_RX_FREELIST); + /* FC: cleanup, make sure sm->properties->xxx_fifo_size always set */ fifo_size = sm->properties->tx_fifo_size; fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; - *server_tx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); + *server_tx_fifo = + svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size, + FIFO_SEGMENT_TX_FREELIST); if (*server_rx_fifo == 0) { /* This would be very odd, but handle it... */ if (*server_tx_fifo != 0) { - svm_fifo_segment_free_fifo (fifo_segment, *server_tx_fifo); + svm_fifo_segment_free_fifo (fifo_segment, *server_tx_fifo, + FIFO_SEGMENT_TX_FREELIST); *server_tx_fifo = 0; } continue; @@ -303,7 +289,8 @@ again: { if (*server_rx_fifo != 0) { - svm_fifo_segment_free_fifo (fifo_segment, *server_rx_fifo); + svm_fifo_segment_free_fifo (fifo_segment, *server_rx_fifo, + FIFO_SEGMENT_RX_FREELIST); *server_rx_fifo = 0; } continue; @@ -365,8 +352,10 @@ segment_manager_dealloc_fifos (u32 svm_segment_index, svm_fifo_t * rx_fifo, return; fifo_segment = svm_fifo_get_segment (svm_segment_index); - svm_fifo_segment_free_fifo (fifo_segment, rx_fifo); - svm_fifo_segment_free_fifo (fifo_segment, tx_fifo); + svm_fifo_segment_free_fifo (fifo_segment, rx_fifo, + FIFO_SEGMENT_RX_FREELIST); + svm_fifo_segment_free_fifo (fifo_segment, tx_fifo, + FIFO_SEGMENT_TX_FREELIST); /* Remove segment only if it holds no fifos and not the first */ if (sm->segment_indices[0] != svm_segment_index diff --git a/src/vnet/session/segment_manager.h b/src/vnet/session/segment_manager.h index 2710bb54..d4b73208 100644 --- a/src/vnet/session/segment_manager.h +++ b/src/vnet/session/segment_manager.h @@ -28,6 +28,9 @@ typedef struct _segment_manager_properties u32 rx_fifo_size; u32 tx_fifo_size; + /** Preallocated pool sizes */ + u32 preallocated_fifo_pairs; + /** Configured additional segment size */ u32 add_segment_size; diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index c5aaf2e2..02b0cced 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -1048,19 +1048,21 @@ session_vpp_event_queue_allocate (session_manager_main_t * smm, { api_main_t *am = &api_main; void *oldheap; + u32 event_queue_length = 2048; if (smm->vpp_event_queues[thread_index] == 0) { /* Allocate event fifo in the /vpe-api shared-memory segment */ oldheap = svm_push_data_heap (am->vlib_rp); + if (smm->configured_event_queue_length) + event_queue_length = smm->configured_event_queue_length; + smm->vpp_event_queues[thread_index] = - unix_shared_memory_queue_init (2048 /* nels $$$$ config */ , - sizeof (session_fifo_event_t), - 0 /* consumer pid */ , - 0 - /* (do not) send signal when queue non-empty */ - ); + unix_shared_memory_queue_init + (event_queue_length, + sizeof (session_fifo_event_t), 0 /* consumer pid */ , + 0 /* (do not) send signal when queue non-empty */ ); svm_pop_heap (oldheap); } @@ -1187,6 +1189,30 @@ session_manager_main_init (vlib_main_t * vm) } VLIB_INIT_FUNCTION (session_manager_main_init) + static clib_error_t *session_config_fn (vlib_main_t * vm, + unformat_input_t * input) +{ + session_manager_main_t *smm = &session_manager_main; + u32 nitems; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "event-queue-length %d", &nitems)) + { + if (nitems >= 2048) + smm->configured_event_queue_length = nitems; + else + clib_warning ("event queue length %d too small, ignored", nitems); + } + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + return 0; +} + +VLIB_CONFIG_FUNCTION (session_config_fn, "session"); + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index d60cca29..a8728649 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -125,14 +125,11 @@ typedef struct _stream_session_t u8 thread_index; - /** used during unbind processing */ - u8 is_deleted; - /** To avoid n**2 "one event per frame" check */ u8 enqueue_epoch; /** Pad to a multiple of 8 octets */ - u8 align_pad[2]; + u8 align_pad[4]; /** svm segment index where fifos were allocated */ u32 svm_segment_index; @@ -205,6 +202,9 @@ struct _session_manager_main /** vpp fifo event queue */ unix_shared_memory_queue_t **vpp_event_queues; + /** vpp fifo event queue configured length */ + u32 configured_event_queue_length; + /** Unique segment name counter */ u32 unique_segment_name_counter; diff --git a/src/vnet/session/session_api.c b/src/vnet/session/session_api.c index 8c073a08..98d6946a 100755 --- a/src/vnet/session/session_api.c +++ b/src/vnet/session/session_api.c @@ -419,7 +419,7 @@ done: REPLY_MACRO (VL_API_UNBIND_URI_REPLY); } -static void +void vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) { vl_api_connect_uri_reply_t *rmp; diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index aaefa7eb..768f0c3c 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -44,8 +44,6 @@ #undef vl_printfun #define TCP_BUILTIN_CLIENT_DBG (1) -#define TCP_BUILTIN_CLIENT_VPP_THREAD (0) -#define TCP_BUILTIN_CLIENT_PTHREAD (!TCP_BUILTIN_CLIENT_VPP_THREAD) static void send_test_chunk (tclient_main_t * tm, session_t * s) @@ -156,131 +154,76 @@ receive_test_chunk (tclient_main_t * tm, session_t * s) } } -#if TCP_BUILTIN_CLIENT_VPP_THREAD -#define THREAD_PROTOTYPE static void -#else -#define THREAD_PROTOTYPE static void * -#endif - -THREAD_PROTOTYPE -tclient_thread_fn (void *arg) +static uword +builtin_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame) { tclient_main_t *tm = &tclient_main; + int my_thread_index = vlib_get_thread_index (); vl_api_disconnect_session_t *dmp; session_t *sp; - struct timespec ts, tsrem; int i; - int try_tx, try_rx; - u32 *session_indices = 0; - clib_time_t ttime; - f64 before, after; - u64 rx_total; + int delete_session; + u32 *connection_indices; - clib_time_init (&ttime); + connection_indices = tm->connection_index_by_thread[my_thread_index]; - /* stats thread wants no signals. */ - { - sigset_t s; - sigfillset (&s); - pthread_sigmask (SIG_SETMASK, &s, 0); - } + if (tm->run_test == 0 || vec_len (connection_indices) == 0) + return 0; - clib_per_cpu_mheaps[vlib_get_thread_index ()] = clib_per_cpu_mheaps[0]; + for (i = 0; i < vec_len (connection_indices); i++) + { + delete_session = 1; - vec_validate (session_indices, 0); - vec_reset_length (session_indices); + sp = pool_elt_at_index (tm->sessions, connection_indices[i]); - while (1) - { - /* Wait until we're told to get busy */ - while (tm->run_test == 0 - || (tm->ready_connections != tm->expected_connections)) + if (sp->bytes_to_send > 0) { - ts.tv_sec = 0; - ts.tv_nsec = 100000000; - while (nanosleep (&ts, &tsrem) < 0) - ts = tsrem; + send_test_chunk (tm, sp); + delete_session = 0; } - tm->run_test = 0; - rx_total = 0; - - clib_warning ("Start test..."); - - before = clib_time_now (&ttime); - - do + if (sp->bytes_to_receive > 0) { - do - { - try_tx = try_rx = 0; - - /* *INDENT-OFF* */ - pool_foreach (sp, tm->sessions, - ({ - if (sp->bytes_to_send > 0) - { - send_test_chunk (tm, sp); - try_tx = 1; - } - })); - pool_foreach (sp, tm->sessions, - ({ - if (sp->bytes_to_receive > 0) - { - receive_test_chunk (tm, sp); - try_rx = 1; - } - else - { - /* Session is complete */ - vec_add1 (session_indices, sp - tm->sessions); - } - })); - /* Terminate any completed sessions */ - if (PREDICT_FALSE (_vec_len(session_indices) != 0)) - { - for (i = 0; i < _vec_len (session_indices); i++) - { - sp = pool_elt_at_index (tm->sessions, session_indices[i]); - rx_total += sp->bytes_received; - dmp = vl_msg_api_alloc_as_if_client (sizeof (*dmp)); - memset (dmp, 0, sizeof (*dmp)); - dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); - dmp->client_index = tm->my_client_index; - dmp->handle = sp->vpp_session_handle; - vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & dmp); - pool_put (tm->sessions, sp); - } - _vec_len(session_indices) = 0; - } - /* *INDENT-ON* */ - } - while (try_tx || try_rx); + receive_test_chunk (tm, sp); + delete_session = 0; } - while (0); - after = clib_time_now (&ttime); - - clib_warning ("Test complete %lld bytes in %.2f secs", - rx_total, (after - before)); - if ((after - before) != 0.0) + if (PREDICT_FALSE (delete_session == 1)) { - clib_warning ("%.2f bytes/second full-duplex", - ((f64) rx_total) / (after - before)); - clib_warning ("%.4f gbit/second full-duplex", - (((f64) rx_total * 8.0) / (after - before)) / 1e9); + __sync_fetch_and_add (&tm->rx_total, sp->bytes_received); + dmp = vl_msg_api_alloc_as_if_client (sizeof (*dmp)); + memset (dmp, 0, sizeof (*dmp)); + dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); + dmp->client_index = tm->my_client_index; + dmp->handle = sp->vpp_session_handle; + vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & dmp); + vec_delete (connection_indices, 1, i); + tm->connection_index_by_thread[my_thread_index] = + connection_indices; + __sync_fetch_and_add (&tm->ready_connections, -1); + + /* Kick the debug CLI process */ + if (tm->ready_connections == 0) + { + tm->test_end_time = vlib_time_now (vm); + vlib_process_signal_event (vm, tm->cli_node_index, + 2, 0 /* data */ ); + } } - - if (pool_elts (tm->sessions)) - clib_warning ("BUG: %d active sessions remain...", - pool_elts (tm->sessions)); } - while (0); - /* NOTREACHED */ -#if TCP_BUILTIN_CLIENT_PTHREAD return 0; -#endif } +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (builtin_client_node) = +{ + .function = builtin_client_node_fn, + .name = "builtin-tcp-client", + .type = VLIB_NODE_TYPE_INPUT, + .state = VLIB_NODE_STATE_DISABLED, +}; +/* *INDENT-ON* */ + + /* So we don't get "no handler for... " msgs */ static void vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) @@ -299,6 +242,7 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) session_t *session; u32 session_index; i32 retval = /* clib_net_to_host_u32 ( */ mp->retval /*) */ ; + int i; if (retval < 0) { @@ -332,7 +276,29 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) /* Add it to the session lookup table */ hash_set (tm->session_index_by_vpp_handles, mp->handle, session_index); - tm->ready_connections++; + if (tm->ready_connections == tm->expected_connections - 1) + { + vlib_thread_main_t *thread_main = vlib_get_thread_main (); + int thread_index; + + thread_index = 0; + for (i = 0; i < pool_elts (tm->sessions); i++) + { + vec_add1 (tm->connection_index_by_thread[thread_index], i); + thread_index++; + if (thread_index == thread_main->n_vlib_mains) + thread_index = 0; + } + } + __sync_fetch_and_add (&tm->ready_connections, 1); + if (tm->ready_connections == tm->expected_connections) + { + tm->run_test = 1; + tm->test_start_time = vlib_time_now (tm->vlib_main); + /* Signal the CLI process that the action is starting... */ + vlib_process_signal_event (tm->vlib_main, tm->cli_node_index, + 1, 0 /* data */ ); + } } static int @@ -414,6 +380,7 @@ static int tcp_test_clients_init (vlib_main_t * vm) { tclient_main_t *tm = &tclient_main; + vlib_thread_main_t *thread_main = vlib_get_thread_main (); int i; tclient_api_hookup (vm); @@ -429,6 +396,46 @@ tcp_test_clients_init (vlib_main_t * vm) vec_validate (tm->rx_buf, vec_len (tm->connect_test_data) - 1); tm->is_init = 1; + tm->vlib_main = vm; + + vec_validate (tm->connection_index_by_thread, thread_main->n_vlib_mains); + return 0; +} + +static int +builtin_session_connected_callback (u32 app_index, u32 api_context, + stream_session_t * s, u8 is_fail) +{ + vl_api_connect_uri_reply_t _m, *mp = &_m; + unix_shared_memory_queue_t *q; + application_t *app; + unix_shared_memory_queue_t *vpp_queue; + + app = application_get (app_index); + q = vl_api_client_index_to_input_queue (app->api_client_index); + + if (!q) + return -1; + + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_CONNECT_URI_REPLY); + mp->context = api_context; + if (!is_fail) + { + vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); + mp->server_rx_fifo = pointer_to_uword (s->server_rx_fifo); + mp->server_tx_fifo = pointer_to_uword (s->server_tx_fifo); + mp->handle = stream_session_handle (s); + mp->vpp_event_queue_address = pointer_to_uword (vpp_queue); + mp->retval = 0; + s->session_state = SESSION_STATE_READY; + } + else + { + mp->retval = clib_host_to_net_u32 (VNET_API_ERROR_SESSION_CONNECT_FAIL); + } + + vl_api_connect_uri_reply_t_handler (mp); return 0; } @@ -461,7 +468,7 @@ builtin_server_rx_callback (stream_session_t * s) static session_cb_vft_t builtin_clients = { .session_reset_callback = builtin_session_reset_callback, - .session_connected_callback = send_session_connected_callback, + .session_connected_callback = builtin_session_connected_callback, .session_accept_callback = builtin_session_create_callback, .session_disconnect_callback = builtin_session_disconnect_callback, .builtin_server_rx_callback = builtin_server_rx_callback @@ -502,11 +509,16 @@ test_tcp_clients_command_fn (vlib_main_t * vm, vlib_cli_command_t * cmd) { tclient_main_t *tm = &tclient_main; + vlib_thread_main_t *thread_main = vlib_get_thread_main (); + uword *event_data = 0; + uword event_type; u8 *connect_uri = (u8 *) "tcp://6.0.1.1/1234"; u8 *uri; u32 n_clients = 1; int i; u64 tmp; + f64 cli_timeout = 20.0; + f64 delta; tm->bytes_to_send = 8192; vec_free (tm->connect_uri); @@ -523,6 +535,8 @@ test_tcp_clients_command_fn (vlib_main_t * vm, ; else if (unformat (input, "uri %s", &tm->connect_uri)) ; + else if (unformat (input, "cli-timeout %f", &cli_timeout)) + ; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); @@ -536,6 +550,7 @@ test_tcp_clients_command_fn (vlib_main_t * vm, tm->ready_connections = 0; tm->expected_connections = n_clients; + tm->rx_total = 0; uri = connect_uri; if (tm->connect_uri) @@ -556,40 +571,99 @@ test_tcp_clients_command_fn (vlib_main_t * vm, } #endif vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ ); - attach_builtin_test_clients (); + if (tm->test_client_attached == 0) + attach_builtin_test_clients (); + tm->test_client_attached = 1; + + /* Turn on the builtin client input nodes */ + for (i = 0; i < thread_main->n_vlib_mains; i++) + vlib_node_set_state (vlib_mains[i], builtin_client_node.index, + VLIB_NODE_STATE_POLLING); - /* Fire off connect requests, in something approaching a normal manner */ + tm->cli_node_index = vlib_get_current_process (vm)->node_runtime.node_index; + + /* Fire off connect requests */ for (i = 0; i < n_clients; i++) { - vl_api_connect_uri_t *cmp; - cmp = vl_msg_api_alloc_as_if_client (sizeof (*cmp)); + vl_api_connect_uri_t _cmp, *cmp = &_cmp; + void vl_api_connect_uri_t_handler (vl_api_connect_uri_t * cmp); + memset (cmp, 0, sizeof (*cmp)); cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); cmp->client_index = tm->my_client_index; cmp->context = ntohl (0xfeedface); memcpy (cmp->uri, uri, strlen ((char *) uri) + 1); - vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & cmp); + + vl_api_connect_uri_t_handler (cmp); + /* Crude pacing for call setups, 100k/sec */ + vlib_process_suspend (vm, 10e-6); + } + + /* Park until the sessions come up, or ten seconds elapse... */ + vlib_process_wait_for_event_or_clock (vm, 10.0 /* timeout, seconds */ ); + event_type = vlib_process_get_events (vm, &event_data); + + switch (event_type) + { + case ~0: + vlib_cli_output (vm, "Timeout with only %d sessions active...", + tm->ready_connections); + goto cleanup; + + case 1: + vlib_cli_output (vm, "Test started at %.6f", tm->test_start_time); + break; + + default: + vlib_cli_output (vm, "unexpected event(1): %d", event_type); + goto cleanup; } - tm->run_test = 1; + /* Now wait for the sessions to finish... */ + vlib_process_wait_for_event_or_clock (vm, cli_timeout); + event_type = vlib_process_get_events (vm, &event_data); + + switch (event_type) + { + case ~0: + vlib_cli_output (vm, "Timeout with %d sessions still active...", + tm->ready_connections); + goto cleanup; + + case 2: + vlib_cli_output (vm, "Test finished at %.6f", tm->test_end_time); + break; + + default: + vlib_cli_output (vm, "unexpected event(2): %d", event_type); + goto cleanup; + } + + delta = tm->test_end_time - tm->test_start_time; + + if (delta != 0.0) + { + vlib_cli_output (vm, + "%lld bytes (%lld mbytes, %lld gbytes) in %.2f seconds", + tm->rx_total, tm->rx_total / (1ULL << 20), + tm->rx_total / (1ULL << 30), delta); + vlib_cli_output (vm, "%.2f bytes/second full-duplex", + ((f64) tm->rx_total) / (delta)); + vlib_cli_output (vm, "%.4f gbit/second full-duplex", + (((f64) tm->rx_total * 8.0) / delta / 1e9)); + } + else + vlib_cli_output (vm, "zero delta-t?"); + +cleanup: + pool_free (tm->sessions); + for (i = 0; i < vec_len (tm->connection_index_by_thread); i++) + vec_reset_length (tm->connection_index_by_thread[i]); return 0; } -/* *INDENT-OFF* */ -#if TCP_BUILTIN_CLIENT_VPP_THREAD -VLIB_REGISTER_THREAD (builtin_client_reg, static) = -{ - .name = "tcp-builtin-client", - .function = tclient_thread_fn, - .fixed_count = 1, - .count = 1, - .no_data_structure_clone = 1, -}; -#endif -/* *INDENT-ON* */ - /* *INDENT-OFF* */ VLIB_CLI_COMMAND (test_clients_command, static) = { diff --git a/src/vnet/tcp/builtin_client.h b/src/vnet/tcp/builtin_client.h index 57d112e6..d5d79e53 100644 --- a/src/vnet/tcp/builtin_client.h +++ b/src/vnet/tcp/builtin_client.h @@ -83,14 +83,18 @@ typedef struct pid_t my_pid; - /* For deadman timers */ - clib_time_t clib_time; + f64 test_start_time; + f64 test_end_time; - /* Connection counts */ u32 expected_connections; + u32 **connection_index_by_thread; volatile u32 ready_connections; + volatile u32 finished_connections; - /* Signal variables */ + volatile u64 rx_total; + u32 cli_node_index; + + /* Signal variable */ volatile int run_test; /* Bytes to send */ @@ -107,6 +111,7 @@ typedef struct u8 test_return_packets; u8 is_init; + u8 test_client_attached; u32 node_index; diff --git a/src/vnet/tcp/builtin_http_server.c b/src/vnet/tcp/builtin_http_server.c index 763a46e9..8b4801cd 100644 --- a/src/vnet/tcp/builtin_http_server.c +++ b/src/vnet/tcp/builtin_http_server.c @@ -513,6 +513,7 @@ server_attach () a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 8 << 10; a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 32 << 10; a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; + a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = 16; a->segment_name = segment_name; a->segment_name_length = ARRAY_LEN (segment_name); diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index 64fc4a71..4f0e211c 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -62,7 +62,6 @@ int builtin_session_accept_callback (stream_session_t * s) { builtin_server_main_t *bsm = &builtin_server_main; - clib_warning ("called..."); bsm->vpp_queue[s->thread_index] = session_manager_get_vpp_event_queue (s->thread_index); @@ -76,7 +75,6 @@ builtin_session_disconnect_callback (stream_session_t * s) { builtin_server_main_t *bsm = &builtin_server_main; vnet_disconnect_args_t _a, *a = &_a; - clib_warning ("called..."); a->handle = stream_session_handle (s); a->app_index = bsm->app_index; @@ -280,10 +278,11 @@ server_attach () a->api_client_index = bsm->my_client_index; a->session_cb_vft = &builtin_session_cb_vft; a->options = options; - a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 128 << 20; - a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 1 << 16; - a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 1 << 16; + a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 512 << 20; + a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 64 << 10; + a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 64 << 10; a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; + a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = 8192; a->segment_name = segment_name; a->segment_name_length = ARRAY_LEN (segment_name); diff --git a/src/vnet/udp/builtin_server.c b/src/vnet/udp/builtin_server.c index 18684d54..7dd03670 100644 --- a/src/vnet/udp/builtin_server.c +++ b/src/vnet/udp/builtin_server.c @@ -111,6 +111,7 @@ attach_builtin_uri_server () options[SESSION_OPTIONS_ACCEPT_COOKIE] = 0x12345678; options[SESSION_OPTIONS_SEGMENT_SIZE] = (2 << 30); /*$$$$ config / arg */ options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; + options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = 1024; a->options = options; -- cgit 1.2.3-korg From 93992a9048cb6e5dcd22de5091e72de778122627 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Wed, 24 May 2017 18:03:56 -0700 Subject: Implement sack based tcp loss recovery (RFC 6675) - refactor existing congestion control code (RFC 6582/5681). Handling of ack feedback now consists of: ack parsing, cc event detection, event handling, congestion control update - extend sack scoreboard to support sack based retransmissions - basic implementation of Eifel detection algorithm (RFC 3522) for detecting spurious retransmissions - actually initialize the per-thread frame freelist hash tables - increase worker stack size to 2mb - fix session queue node out-of-buffer handling - ensure that the local buffer cache vec_len matches reality - avoid 2x spurious event requeues when short of buffers - count out-of-buffer events - make the builtin server thread-safe - fix bihash template threading issue: need to paint -1 across uninitialized working_copy_length vector elements (via rebase from master) Change-Id: I646cb9f1add9a67d08f4a87badbcb117980ebfc4 Signed-off-by: Florin Coras Signed-off-by: Dave Barach --- src/svm/svm_fifo.c | 5 +- src/vlib/node.c | 1 + src/vlib/threads.c | 2 +- src/vlib/threads.h | 2 +- src/vnet/session/node.c | 53 ++-- src/vnet/session/session.c | 11 +- src/vnet/session/session.h | 6 +- src/vnet/session/session_cli.c | 26 +- src/vnet/tcp/builtin_client.c | 40 ++- src/vnet/tcp/builtin_server.c | 20 +- src/vnet/tcp/tcp.c | 57 ++-- src/vnet/tcp/tcp.h | 112 +++++-- src/vnet/tcp/tcp_debug.h | 16 +- src/vnet/tcp/tcp_input.c | 671 +++++++++++++++++++++++++++++------------ src/vnet/tcp/tcp_newreno.c | 20 +- src/vnet/tcp/tcp_output.c | 287 ++++++++++++------ src/vnet/tcp/tcp_test.c | 53 ++-- 17 files changed, 973 insertions(+), 409 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index f13f6fea..5c8f244a 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -540,7 +540,7 @@ svm_fifo_peek (svm_fifo_t * f, u32 relative_offset, u32 max_bytes, /* read cursize, which can only increase while we're working */ cursize = svm_fifo_max_dequeue (f); - if (PREDICT_FALSE (cursize == 0)) + if (PREDICT_FALSE (cursize < relative_offset)) return -2; /* nothing in the fifo */ nitems = f->nitems; @@ -548,7 +548,8 @@ svm_fifo_peek (svm_fifo_t * f, u32 relative_offset, u32 max_bytes, real_head = real_head >= nitems ? real_head - nitems : real_head; /* Number of bytes we're going to copy */ - total_copy_bytes = (cursize < max_bytes) ? cursize : max_bytes; + total_copy_bytes = (cursize - relative_offset < max_bytes) ? + cursize - relative_offset : max_bytes; if (PREDICT_TRUE (copy_here != 0)) { diff --git a/src/vlib/node.c b/src/vlib/node.c index bbd3a42e..eecad274 100644 --- a/src/vlib/node.c +++ b/src/vlib/node.c @@ -502,6 +502,7 @@ vlib_node_main_init (vlib_main_t * vm) vlib_node_t *n; uword ni; + nm->frame_size_hash = hash_create (0, sizeof (uword)); nm->flags |= VLIB_NODE_MAIN_RUNTIME_STARTED; /* Generate sibling relationships */ diff --git a/src/vlib/threads.c b/src/vlib/threads.c index b7bc9e26..0c775e2d 100644 --- a/src/vlib/threads.c +++ b/src/vlib/threads.c @@ -670,7 +670,7 @@ start_workers (vlib_main_t * vm) /* zap the (per worker) frame freelists, etc */ nm_clone->frame_sizes = 0; - nm_clone->frame_size_hash = 0; + nm_clone->frame_size_hash = hash_create (0, sizeof (uword)); /* Packet trace buffers are guaranteed to be empty, nothing to do here */ diff --git a/src/vlib/threads.h b/src/vlib/threads.h index 17d35a24..572ce77f 100644 --- a/src/vlib/threads.h +++ b/src/vlib/threads.h @@ -62,7 +62,7 @@ typedef struct vlib_thread_registration_ #define VLIB_CPU_MASK (VLIB_MAX_CPUS - 1) /* 0x3f, max */ #define VLIB_OFFSET_MASK (~VLIB_CPU_MASK) -#define VLIB_LOG2_THREAD_STACK_SIZE (20) +#define VLIB_LOG2_THREAD_STACK_SIZE (21) #define VLIB_THREAD_STACK_SIZE (1<session_type]; @@ -167,9 +169,6 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, /* Check how much we can pull. If buffering, subtract the offset */ max_dequeue0 = svm_fifo_max_dequeue (s0->server_tx_fifo) - rx_offset; - /* Allow enqueuing of a new event */ - svm_fifo_unset_event (s0->server_tx_fifo); - /* Nothing to read return */ if (max_dequeue0 == 0) return 0; @@ -187,8 +186,8 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, max_len_to_snd0 = snd_space0; } - n_bytes_per_buf = vlib_buffer_free_list_buffer_size (vm, - VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); + n_bytes_per_buf = vlib_buffer_free_list_buffer_size + (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); n_bytes_per_seg = MAX_HDRS_LEN + snd_mss0; n_bufs_per_seg = ceil ((double) n_bytes_per_seg / n_bytes_per_buf); n_bufs_per_evt = (ceil ((double) max_len_to_snd0 / n_bytes_per_seg)) @@ -205,24 +204,33 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, if (PREDICT_FALSE (n_bufs < VLIB_FRAME_SIZE)) { vec_validate (smm->tx_buffers[thread_index], - n_bufs + VLIB_FRAME_SIZE - 1); - n_bufs += vlib_buffer_alloc (vm, - &smm->tx_buffers[thread_index][n_bufs], - VLIB_FRAME_SIZE); - - /* buffer shortage - * XXX 0.9 because when debugging we might not get a full frame */ - if (PREDICT_FALSE (n_bufs < 0.9 * VLIB_FRAME_SIZE)) + n_bufs + 2 * VLIB_FRAME_SIZE - 1); + + buffers_allocated = 0; + do { - if (svm_fifo_set_event (s0->server_tx_fifo)) - { - vec_add1 (smm->pending_event_vector[thread_index], *e0); - } - return -1; + buffers_allocated_this_call = + vlib_buffer_alloc + (vm, + &smm->tx_buffers[thread_index][n_bufs + buffers_allocated], + 2 * VLIB_FRAME_SIZE - buffers_allocated); + buffers_allocated += buffers_allocated_this_call; } + while (buffers_allocated_this_call > 0 + && ((buffers_allocated + n_bufs < VLIB_FRAME_SIZE))); + + n_bufs += buffers_allocated; _vec_len (smm->tx_buffers[thread_index]) = n_bufs; + + if (PREDICT_FALSE (n_bufs < VLIB_FRAME_SIZE)) + { + vec_add1 (smm->pending_event_vector[thread_index], *e0); + return -1; + } } + /* Allow enqueuing of a new event */ + svm_fifo_unset_event (s0->server_tx_fifo); vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (left_to_snd0 && n_left_to_next >= n_bufs_per_seg) @@ -232,7 +240,9 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, */ /* Get free buffer */ + ASSERT (n_bufs >= 1); bi0 = smm->tx_buffers[thread_index][--n_bufs]; + ASSERT (bi0); _vec_len (smm->tx_buffers[thread_index]) = n_bufs; b0 = vlib_get_buffer (vm, bi0); @@ -545,9 +555,10 @@ skip_dequeue: my_thread_index, &n_tx_packets); /* Out of buffers */ - if (rv < 0) + if (PREDICT_FALSE (rv < 0)) { - vec_add1 (smm->pending_event_vector[my_thread_index], *e0); + vlib_node_increment_counter (vm, node->node_index, + SESSION_QUEUE_ERROR_NO_BUFFER, 1); continue; } break; diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 02b0cced..534598d6 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -551,7 +551,7 @@ u8 stream_session_no_space (transport_connection_t * tc, u32 thread_index, u16 data_len) { - stream_session_t *s = stream_session_get (tc->c_index, thread_index); + stream_session_t *s = stream_session_get (tc->s_index, thread_index); if (PREDICT_FALSE (s->session_state != SESSION_STATE_READY)) return 1; @@ -563,6 +563,15 @@ stream_session_no_space (transport_connection_t * tc, u32 thread_index, } u32 +stream_session_tx_fifo_max_dequeue (transport_connection_t * tc) +{ + stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index); + if (s->session_state != SESSION_STATE_READY) + return 0; + return svm_fifo_max_dequeue (s->server_tx_fifo); +} + +int stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer, u32 offset, u32 max_bytes) { diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index a8728649..d9c38bd1 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -352,16 +352,18 @@ stream_session_max_rx_enqueue (transport_connection_t * tc) } always_inline u32 -stream_session_fifo_size (transport_connection_t * tc) +stream_session_rx_fifo_size (transport_connection_t * tc) { stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index); return s->server_rx_fifo->nitems; } +u32 stream_session_tx_fifo_max_dequeue (transport_connection_t * tc); + int stream_session_enqueue_data (transport_connection_t * tc, vlib_buffer_t * b, u32 offset, u8 queue_event, u8 is_in_order); -u32 +int stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer, u32 offset, u32 max_bytes); u32 stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes); diff --git a/src/vnet/session/session_cli.c b/src/vnet/session/session_cli.c index 509eedbb..6b8341aa 100755 --- a/src/vnet/session/session_cli.c +++ b/src/vnet/session/session_cli.c @@ -15,6 +15,15 @@ #include #include +u8 * +format_stream_session_fifos (u8 * s, va_list * args) +{ + stream_session_t *ss = va_arg (*args, stream_session_t *); + s = format (s, " Rx fifo: %U", format_svm_fifo, ss->server_rx_fifo, 1); + s = format (s, " Tx fifo: %U", format_svm_fifo, ss->server_tx_fifo, 1); + return s; +} + /** * Format stream session as per the following format * @@ -44,6 +53,8 @@ format_stream_session (u8 * s, va_list * args) ss->thread_index, verbose); if (verbose == 1) s = format (s, "%v", str); + if (verbose > 1) + s = format (s, "%U", format_stream_session_fifos, ss); } else if (ss->session_state == SESSION_STATE_LISTENING) { @@ -57,8 +68,12 @@ format_stream_session (u8 * s, va_list * args) } else if (ss->session_state == SESSION_STATE_CLOSED) { - s = format (s, "[CL] %-40U%v", tp_vft->format_connection, - ss->connection_index, ss->thread_index, verbose, str); + s = format (s, "[CL] %-40U", tp_vft->format_connection, + ss->connection_index, ss->thread_index, verbose); + if (verbose == 1) + s = format (s, "%v", str); + if (verbose > 1) + s = format (s, "%U", format_stream_session_fifos, ss); } else { @@ -124,13 +139,6 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, ({ vec_reset_length (str); str = format (str, "%U", format_stream_session, s, verbose); - if (verbose > 1) - { - str = format (str, " Rx fifo: %U", format_svm_fifo, - s->server_rx_fifo, 1); - str = format (str, " Tx fifo: %U", format_svm_fifo, - s->server_tx_fifo, 1); - } vlib_cli_output (vm, "%v", str); })); /* *INDENT-ON* */ diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index 768f0c3c..7238cda3 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -115,8 +115,17 @@ receive_test_chunk (tclient_main_t * tm, session_t * s) /* Allow enqueuing of new event */ // svm_fifo_unset_event (rx_fifo); - n_read = svm_fifo_dequeue_nowait (rx_fifo, vec_len (tm->rx_buf), - tm->rx_buf); + if (test_bytes) + { + n_read = svm_fifo_dequeue_nowait (rx_fifo, vec_len (tm->rx_buf), + tm->rx_buf); + } + else + { + n_read = svm_fifo_max_dequeue (rx_fifo); + svm_fifo_dequeue_drop (rx_fifo, n_read); + } + if (n_read > 0) { if (TCP_BUILTIN_CLIENT_DBG) @@ -165,6 +174,8 @@ builtin_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, int i; int delete_session; u32 *connection_indices; + u32 tx_quota = 0; + u32 delta, prev_bytes_received_this_session; connection_indices = tm->connection_index_by_thread[my_thread_index]; @@ -177,14 +188,19 @@ builtin_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, sp = pool_elt_at_index (tm->sessions, connection_indices[i]); - if (sp->bytes_to_send > 0) + if (tx_quota < 60 && sp->bytes_to_send > 0) { send_test_chunk (tm, sp); delete_session = 0; + tx_quota++; } if (sp->bytes_to_receive > 0) { + prev_bytes_received_this_session = sp->bytes_received; receive_test_chunk (tm, sp); + delta = sp->bytes_received - prev_bytes_received_this_session; + if (delta > 0) + tx_quota--; delete_session = 0; } if (PREDICT_FALSE (delete_session == 1)) @@ -195,11 +211,19 @@ builtin_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); dmp->client_index = tm->my_client_index; dmp->handle = sp->vpp_session_handle; - vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & dmp); - vec_delete (connection_indices, 1, i); - tm->connection_index_by_thread[my_thread_index] = - connection_indices; - __sync_fetch_and_add (&tm->ready_connections, -1); +// vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & dmp); + if (!unix_shared_memory_queue_add (tm->vl_input_queue, (u8 *) & dmp, + 1)) + { + vec_delete (connection_indices, 1, i); + tm->connection_index_by_thread[my_thread_index] = + connection_indices; + __sync_fetch_and_add (&tm->ready_connections, -1); + } + else + { + vl_msg_api_free (dmp); + } /* Kick the debug CLI process */ if (tm->ready_connections == 0) diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index 4f0e211c..8bd2f360 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -39,7 +39,8 @@ typedef struct { - u8 *rx_buf; + /* Per-thread RX buffer */ + u8 **rx_buf; unix_shared_memory_queue_t **vpp_queue; u64 byte_index; @@ -117,13 +118,15 @@ void test_bytes (builtin_server_main_t * bsm, int actual_transfer) { int i; + u32 my_thread_id = vlib_get_thread_index (); for (i = 0; i < actual_transfer; i++) { - if (bsm->rx_buf[i] != ((bsm->byte_index + i) & 0xff)) + if (bsm->rx_buf[my_thread_id][i] != ((bsm->byte_index + i) & 0xff)) { clib_warning ("at %lld expected %d got %d", bsm->byte_index + i, - (bsm->byte_index + i) & 0xff, bsm->rx_buf[i]); + (bsm->byte_index + i) & 0xff, + bsm->rx_buf[my_thread_id][i]); } } bsm->byte_index += actual_transfer; @@ -138,6 +141,7 @@ builtin_server_rx_callback (stream_session_t * s) builtin_server_main_t *bsm = &builtin_server_main; session_fifo_event_t evt; static int serial_number = 0; + u32 my_thread_id = vlib_get_thread_index (); tx_fifo = s->server_tx_fifo; rx_fifo = s->server_rx_fifo; @@ -171,11 +175,12 @@ builtin_server_rx_callback (stream_session_t * s) return 0; } - vec_validate (bsm->rx_buf, max_transfer - 1); - _vec_len (bsm->rx_buf) = max_transfer; + vec_validate (bsm->rx_buf, my_thread_id); + vec_validate (bsm->rx_buf[my_thread_id], max_transfer - 1); + _vec_len (bsm->rx_buf[my_thread_id]) = max_transfer; actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, max_transfer, - bsm->rx_buf); + bsm->rx_buf[my_thread_id]); ASSERT (actual_transfer == max_transfer); // test_bytes (bsm, actual_transfer); @@ -184,7 +189,8 @@ builtin_server_rx_callback (stream_session_t * s) * Echo back */ - n_written = svm_fifo_enqueue_nowait (tx_fifo, actual_transfer, bsm->rx_buf); + n_written = svm_fifo_enqueue_nowait (tx_fifo, actual_transfer, + bsm->rx_buf[my_thread_id]); if (n_written != max_transfer) clib_warning ("short trout!"); diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 9b7b2f65..e0b67a8e 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -195,8 +195,8 @@ tcp_connection_close (tcp_connection_t * tc) TCP_EVT_DBG (TCP_EVT_CLOSE, tc); /* Send FIN if needed */ - if (tc->state == TCP_STATE_ESTABLISHED || tc->state == TCP_STATE_SYN_RCVD - || tc->state == TCP_STATE_CLOSE_WAIT) + if (tc->state == TCP_STATE_ESTABLISHED + || tc->state == TCP_STATE_SYN_RCVD || tc->state == TCP_STATE_CLOSE_WAIT) tcp_send_fin (tc); /* Switch state */ @@ -480,7 +480,7 @@ u8 * format_tcp_timers (u8 * s, va_list * args) { tcp_connection_t *tc = va_arg (*args, tcp_connection_t *); - int i, last = 0; + int i, last = -1; for (i = 0; i < TCP_N_TIMERS; i++) if (tc->timers[i] != TCP_TIMER_HANDLE_INVALID) @@ -493,7 +493,7 @@ format_tcp_timers (u8 * s, va_list * args) s = format (s, "%s,", tcp_conn_timers[i]); } - if (last > 0) + if (last >= 0) s = format (s, "%s]", tcp_conn_timers[i]); else s = format (s, "]"); @@ -526,19 +526,19 @@ format_tcp_vars (u8 * s, va_list * args) s = format (s, " snd_wnd %u rcv_wnd %u snd_wl1 %u snd_wl2 %u\n", tc->snd_wnd, tc->rcv_wnd, tc->snd_wl1 - tc->irs, tc->snd_wl2 - tc->iss); - s = format (s, " flight size %u send space %u rcv_wnd available %d\n", - tcp_flight_size (tc), tcp_snd_space (tc), - tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las)); + s = format (s, " flight size %u send space %u rcv_wnd_av %d\n", + tcp_flight_size (tc), tcp_available_snd_space (tc), + tcp_rcv_wnd_available (tc)); s = format (s, " cong %U ", format_tcp_congestion_status, tc); s = format (s, "cwnd %u ssthresh %u rtx_bytes %u bytes_acked %u\n", - tc->cwnd, tc->ssthresh, tc->rtx_bytes, tc->bytes_acked); - s = format (s, " prev_ssthresh %u snd_congestion %u\n", tc->prev_ssthresh, - tc->snd_congestion - tc->iss); + tc->cwnd, tc->ssthresh, tc->snd_rxt_bytes, tc->bytes_acked); + s = format (s, " prev_ssthresh %u snd_congestion %u dupack %u\n", + tc->prev_ssthresh, tc->snd_congestion - tc->iss, + tc->rcv_dupacks); s = format (s, " rto %u rto_boff %u srtt %u rttvar %u rtt_ts %u ", tc->rto, tc->rto_boff, tc->srtt, tc->rttvar, tc->rtt_ts); s = format (s, "rtt_seq %u\n", tc->rtt_seq); - if (scoreboard_first_hole (&tc->sack_sb)) - s = format (s, " scoreboard: %U\n", format_tcp_scoreboard, &tc->sack_sb); + s = format (s, " scoreboard: %U\n", format_tcp_scoreboard, &tc->sack_sb); if (vec_len (tc->snd_sacks)) s = format (s, " sacks tx: %U\n", format_tcp_sacks, tc); @@ -595,9 +595,10 @@ format_tcp_session (u8 * s, va_list * args) tc = tcp_connection_get (tci, thread_index); if (tc) - return format (s, "%U", format_tcp_connection, tc, verbose); + s = format (s, "%U", format_tcp_connection, tc, verbose); else - return format (s, "empty"); + s = format (s, "empty"); + return s; } u8 * @@ -643,13 +644,17 @@ format_tcp_scoreboard (u8 * s, va_list * args) { sack_scoreboard_t *sb = va_arg (*args, sack_scoreboard_t *); sack_scoreboard_hole_t *hole; - s = format (s, "head %u tail %u snd_una_adv %u\n", sb->head, sb->tail, - sb->snd_una_adv); - s = format (s, "sacked_bytes %u last_sacked_bytes %u", sb->sacked_bytes, - sb->last_sacked_bytes); - s = format (s, " max_byte_sacked %u\n", sb->max_byte_sacked); - s = format (s, "holes:\n"); + s = format (s, "sacked_bytes %u last_sacked_bytes %u lost_bytes %u\n", + sb->sacked_bytes, sb->last_sacked_bytes, sb->lost_bytes); + s = format (s, " last_bytes_delivered %u high_sacked %u snd_una_adv %u\n", + sb->last_bytes_delivered, sb->high_sacked, sb->snd_una_adv); + s = format (s, " cur_rxt_hole %u high_rxt %u rescue_rxt %u", + sb->cur_rxt_hole, sb->high_rxt, sb->rescue_rxt); + hole = scoreboard_first_hole (sb); + if (hole) + s = format (s, "\n head %u tail %u holes:\n", sb->head, sb->tail); + while (hole) { s = format (s, "%U", format_tcp_sack_hole, hole); @@ -736,7 +741,7 @@ tcp_snd_space (tcp_connection_t * tc) if (tcp_in_recovery (tc)) { tc->snd_nxt = tc->snd_una_max; - snd_space = tcp_available_wnd (tc) - tc->rtx_bytes + snd_space = tcp_available_wnd (tc) - tc->snd_rxt_bytes - (tc->snd_una_max - tc->snd_congestion); if (snd_space <= 0 || (tc->snd_una_max - tc->snd_una) >= tc->snd_wnd) return 0; @@ -744,8 +749,8 @@ tcp_snd_space (tcp_connection_t * tc) } /* If in fast recovery, send 1 SMSS if wnd allows */ - if (tcp_in_fastrecovery (tc) && tcp_available_snd_space (tc) - && tcp_fastrecovery_sent_1_smss (tc)) + if (tcp_in_fastrecovery (tc) + && tcp_available_snd_space (tc) && !tcp_fastrecovery_sent_1_smss (tc)) { tcp_fastrecovery_1_smss_on (tc); return tc->snd_mss; @@ -761,6 +766,12 @@ tcp_session_send_space (transport_connection_t * trans_conn) return tcp_snd_space (tc); } +i32 +tcp_rcv_wnd_available (tcp_connection_t * tc) +{ + return (i32) tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las); +} + u32 tcp_session_tx_fifo_offset (transport_connection_t * trans_conn) { diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index c3ebe22b..071f1ab1 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -34,6 +34,7 @@ #define TCP_MAX_RX_FIFO_SIZE 2 << 20 #define TCP_IW_N_SEGMENTS 10 #define TCP_ALWAYS_ACK 0 /**< If on, we always ack */ +#define TCP_USE_SACKS 1 /**< Disable only for testing */ /** TCP FSM state definitions as per RFC793. */ #define foreach_tcp_fsm_state \ @@ -94,7 +95,7 @@ extern timer_expiration_handler tcp_timer_retransmit_syn_handler; #define TCP_DELACK_TIME 1 /* 0.1s */ #define TCP_ESTABLISH_TIME 750 /* 75s */ #define TCP_2MSL_TIME 300 /* 30s */ -#define TCP_CLOSEWAIT_TIME 1 /* 0.1s */ +#define TCP_CLOSEWAIT_TIME 20 /* 0.1s */ #define TCP_CLEANUP_TIME 5 /* 0.5s Time to wait before cleanup */ #define TCP_TIMER_PERSIST_MIN 2 /* 0.2s */ @@ -157,6 +158,7 @@ typedef struct _sack_scoreboard_hole u32 prev; /**< Index for previous entry in linked list */ u32 start; /**< Start sequence number */ u32 end; /**< End sequence number */ + u8 is_lost; /**< Mark hole as lost */ } sack_scoreboard_hole_t; typedef struct _sack_scoreboard @@ -166,8 +168,13 @@ typedef struct _sack_scoreboard u32 tail; /**< Index of last entry */ u32 sacked_bytes; /**< Number of bytes sacked in sb */ u32 last_sacked_bytes; /**< Number of bytes last sacked */ + u32 last_bytes_delivered; /**< Number of sack bytes delivered */ u32 snd_una_adv; /**< Bytes to add to snd_una */ - u32 max_byte_sacked; /**< Highest byte acked */ + u32 high_sacked; /**< Highest byte sacked (fack) */ + u32 high_rxt; /**< Highest retransmitted sequence */ + u32 rescue_rxt; /**< Rescue sequence number */ + u32 lost_bytes; /**< Bytes lost as per RFC6675 */ + u32 cur_rxt_hole; /**< Retransmitting from this hole */ } sack_scoreboard_t; typedef enum _tcp_cc_algorithm_type @@ -211,7 +218,7 @@ typedef struct _tcp_connection u32 irs; /**< initial remote sequence */ /* Options */ - tcp_options_t opt; /**< TCP connection options parsed */ + tcp_options_t rcv_opts; /**< Rx options for connection */ tcp_options_t snd_opts; /**< Tx options for connection */ u8 snd_opts_len; /**< Tx options len */ u8 rcv_wscale; /**< Window scale to advertise to peer */ @@ -229,8 +236,10 @@ typedef struct _tcp_connection u32 cwnd; /**< Congestion window */ u32 ssthresh; /**< Slow-start threshold */ u32 prev_ssthresh; /**< ssthresh before congestion */ + u32 prev_cwnd; /**< ssthresh before congestion */ u32 bytes_acked; /**< Bytes acknowledged by current segment */ - u32 rtx_bytes; /**< Retransmitted bytes */ + u32 snd_rxt_bytes; /**< Retransmitted bytes */ + u32 snd_rxt_ts; /**< Timestamp when first packet is retransmitted */ u32 tsecr_last_ack; /**< Timestamp echoed to us in last healthy ACK */ u32 snd_congestion; /**< snd_una_max when congestion is detected */ tcp_cc_algorithm_t *cc_algo; /**< Congestion control algorithm */ @@ -411,6 +420,7 @@ void tcp_send_syn (tcp_connection_t * tc); void tcp_send_fin (tcp_connection_t * tc); void tcp_init_mss (tcp_connection_t * tc); void tcp_update_snd_mss (tcp_connection_t * tc); +void tcp_update_rto (tcp_connection_t * tc); always_inline u32 tcp_end_seq (tcp_header_t * th, u32 len) @@ -428,17 +438,39 @@ tcp_end_seq (tcp_header_t * th, u32 len) #define timestamp_lt(_t1, _t2) ((i32)((_t1)-(_t2)) < 0) #define timestamp_leq(_t1, _t2) ((i32)((_t1)-(_t2)) <= 0) +/** + * Our estimate of the number of bytes that have left the network + */ +always_inline u32 +tcp_bytes_out (const tcp_connection_t * tc) +{ + if (tcp_opts_sack_permitted (&tc->rcv_opts)) + return tc->sack_sb.sacked_bytes + tc->sack_sb.lost_bytes; + else + return tc->rcv_dupacks * tc->snd_mss; +} + +/** + * Our estimate of the number of bytes in flight (pipe size) + */ always_inline u32 tcp_flight_size (const tcp_connection_t * tc) { int flight_size; - flight_size = (int) ((tc->snd_una_max - tc->snd_una) + tc->rtx_bytes) - - (tc->rcv_dupacks * tc->snd_mss) /* - tc->sack_sb.sacked_bytes */ ; + flight_size = (int) (tc->snd_una_max - tc->snd_una) - tcp_bytes_out (tc) + + tc->snd_rxt_bytes; - /* Happens if we don't clear sacked bytes */ if (flight_size < 0) - return 0; + { + if (0) + clib_warning + ("Negative: %u %u %u dupacks %u sacked bytes %u flags %d", + tc->snd_una_max - tc->snd_una, tcp_bytes_out (tc), + tc->snd_rxt_bytes, tc->rcv_dupacks, tc->sack_sb.sacked_bytes, + tc->rcv_opts.flags); + return 0; + } return flight_size; } @@ -481,14 +513,17 @@ tcp_available_snd_space (const tcp_connection_t * tc) return available_wnd - flight_size; } -u32 tcp_rcv_wnd_available (tcp_connection_t * tc); +i32 tcp_rcv_wnd_available (tcp_connection_t * tc); u32 tcp_snd_space (tcp_connection_t * tc); void tcp_update_rcv_wnd (tcp_connection_t * tc); void tcp_retransmit_first_unacked (tcp_connection_t * tc); +void tcp_fast_retransmit_no_sack (tcp_connection_t * tc); +void tcp_fast_retransmit_sack (tcp_connection_t * tc); void tcp_fast_retransmit (tcp_connection_t * tc); -void tcp_cc_congestion (tcp_connection_t * tc); -void tcp_cc_recover (tcp_connection_t * tc); +void tcp_cc_init_congestion (tcp_connection_t * tc); +int tcp_cc_recover (tcp_connection_t * tc); +void tcp_cc_fastrecovery_exit (tcp_connection_t * tc); /* Made public for unit testing only */ void tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end); @@ -563,16 +598,16 @@ tcp_retransmit_timer_set (tcp_connection_t * tc) } always_inline void -tcp_retransmit_timer_update (tcp_connection_t * tc) +tcp_retransmit_timer_reset (tcp_connection_t * tc) { - tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, - clib_max (tc->rto * TCP_TO_TIMER_TICK, 1)); + tcp_timer_reset (tc, TCP_TIMER_RETRANSMIT); } always_inline void -tcp_retransmit_timer_reset (tcp_connection_t * tc) +tcp_retransmit_timer_force_update (tcp_connection_t * tc) { - tcp_timer_reset (tc, TCP_TIMER_RETRANSMIT); + tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, + clib_max (tc->rto * TCP_TO_TIMER_TICK, 1)); } always_inline void @@ -598,15 +633,43 @@ tcp_persist_timer_reset (tcp_connection_t * tc) tcp_timer_reset (tc, TCP_TIMER_PERSIST); } +always_inline void +tcp_retransmit_timer_update (tcp_connection_t * tc) +{ + if (tc->snd_una == tc->snd_una_max) + { + tcp_retransmit_timer_reset (tc); + if (tc->snd_wnd < tc->snd_mss) + tcp_persist_timer_set (tc); + } + else + tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, + clib_max (tc->rto * TCP_TO_TIMER_TICK, 1)); +} + always_inline u8 tcp_timer_is_active (tcp_connection_t * tc, tcp_timers_e timer) { return tc->timers[timer] != TCP_TIMER_HANDLE_INVALID; } +#define tcp_validate_txf_size(_tc, _a) \ + ASSERT(_tc->state != TCP_STATE_ESTABLISHED \ + || stream_session_tx_fifo_max_dequeue (&_tc->connection) >= _a) + void scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole); +void scoreboard_update_lost (tcp_connection_t * tc, sack_scoreboard_t * sb); +sack_scoreboard_hole_t *scoreboard_insert_hole (sack_scoreboard_t * sb, + u32 prev_index, u32 start, + u32 end); +sack_scoreboard_hole_t *scoreboard_next_rxt_hole (sack_scoreboard_t * sb, + sack_scoreboard_hole_t * + start, u8 have_sent_1_smss, + u8 * can_rescue, + u8 * snd_limited); +void scoreboard_init_high_rxt (sack_scoreboard_t * sb); always_inline sack_scoreboard_hole_t * scoreboard_get_hole (sack_scoreboard_t * sb, u32 index) @@ -624,6 +687,14 @@ scoreboard_next_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole) return 0; } +always_inline sack_scoreboard_hole_t * +scoreboard_prev_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole) +{ + if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX) + return pool_elt_at_index (sb->holes, hole->prev); + return 0; +} + always_inline sack_scoreboard_hole_t * scoreboard_first_hole (sack_scoreboard_t * sb) { @@ -643,15 +714,19 @@ scoreboard_last_hole (sack_scoreboard_t * sb) always_inline void scoreboard_clear (sack_scoreboard_t * sb) { - sack_scoreboard_hole_t *hole = scoreboard_first_hole (sb); + sack_scoreboard_hole_t *hole; while ((hole = scoreboard_first_hole (sb))) { scoreboard_remove_hole (sb, hole); } sb->sacked_bytes = 0; sb->last_sacked_bytes = 0; + sb->last_bytes_delivered = 0; sb->snd_una_adv = 0; - sb->max_byte_sacked = 0; + sb->high_sacked = 0; + sb->high_rxt = 0; + sb->lost_bytes = 0; + sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX; } always_inline u32 @@ -671,6 +746,7 @@ scoreboard_init (sack_scoreboard_t * sb) { sb->head = TCP_INVALID_SACK_HOLE_INDEX; sb->tail = TCP_INVALID_SACK_HOLE_INDEX; + sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX; } void tcp_rcv_sacks (tcp_connection_t * tc, u32 ack); diff --git a/src/vnet/tcp/tcp_debug.h b/src/vnet/tcp/tcp_debug.h index b4497a3b..3a16cf63 100755 --- a/src/vnet/tcp/tcp_debug.h +++ b/src/vnet/tcp/tcp_debug.h @@ -393,7 +393,7 @@ typedef enum _tcp_dbg_evt DECLARE_ETD(_tc, _e, 4); \ ed->data[0] = _seq - _tc->irs; \ ed->data[1] = _end - _tc->irs; \ - ed->data[2] = _tc->opt.tsval; \ + ed->data[2] = _tc->rcv_opts.tsval; \ ed->data[3] = _tc->tsval_recent; \ } @@ -427,27 +427,27 @@ typedef enum _tcp_dbg_evt { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "rtx: snd_nxt %u offset %u snd %u rtx %u", \ + .format = "rxt: snd_nxt %u offset %u snd %u rxt %u", \ .format_args = "i4i4i4i4", \ }; \ DECLARE_ETD(_tc, _e, 4); \ ed->data[0] = _tc->snd_nxt - _tc->iss; \ ed->data[1] = offset; \ ed->data[2] = n_bytes; \ - ed->data[3] = _tc->rtx_bytes; \ + ed->data[3] = _tc->snd_rxt_bytes; \ } #define TCP_EVT_CC_EVT_HANDLER(_tc, _sub_evt, ...) \ { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "cc: %s wnd %u snd_cong %u rtx_bytes %u", \ + .format = "cc: %s wnd %u snd_cong %u rxt_bytes %u", \ .format_args = "t4i4i4i4", \ .n_enum_strings = 5, \ .enum_strings = { \ - "fast-rtx", \ - "rtx-timeout", \ - "first-rtx", \ + "fast-rxt", \ + "rxt-timeout", \ + "first-rxt", \ "recovered", \ "congestion", \ }, \ @@ -456,7 +456,7 @@ typedef enum _tcp_dbg_evt ed->data[0] = _sub_evt; \ ed->data[1] = tcp_available_snd_space (_tc); \ ed->data[2] = _tc->snd_congestion - _tc->iss; \ - ed->data[3] = _tc->rtx_bytes; \ + ed->data[3] = _tc->snd_rxt_bytes; \ } #define TCP_EVT_CC_PACK_HANDLER(_tc, ...) \ diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index 35bc9094..ff2229b3 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -231,8 +231,8 @@ tcp_options_parse (tcp_header_t * th, tcp_options_t * to) always_inline int tcp_segment_check_paws (tcp_connection_t * tc) { - return tcp_opts_tstamp (&tc->opt) && tc->tsval_recent - && timestamp_lt (tc->opt.tsval, tc->tsval_recent); + return tcp_opts_tstamp (&tc->rcv_opts) && tc->tsval_recent + && timestamp_lt (tc->rcv_opts.tsval, tc->tsval_recent); } /** @@ -248,10 +248,10 @@ tcp_update_timestamp (tcp_connection_t * tc, u32 seq, u32 seq_end) * then the TSval from the segment is copied to TS.Recent; * otherwise, the TSval is ignored. */ - if (tcp_opts_tstamp (&tc->opt) && tc->tsval_recent + if (tcp_opts_tstamp (&tc->rcv_opts) && tc->tsval_recent && seq_leq (seq, tc->rcv_las) && seq_leq (tc->rcv_las, seq_end)) { - tc->tsval_recent = tc->opt.tsval; + tc->tsval_recent = tc->rcv_opts.tsval; tc->tsval_recent_age = tcp_time_now (); } } @@ -272,14 +272,21 @@ tcp_segment_validate (vlib_main_t * vm, tcp_connection_t * tc0, if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0))) return -1; - if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->opt))) + if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->rcv_opts))) { return -1; } if (tcp_segment_check_paws (tc0)) { - clib_warning ("paws failed"); + if (CLIB_DEBUG > 2) + { + clib_warning ("paws failed\n%U", format_tcp_connection, tc0, 2); + clib_warning ("seq %u seq_end %u ack %u", + vnet_buffer (b0)->tcp.seq_number - tc0->irs, + vnet_buffer (b0)->tcp.seq_end - tc0->irs, + vnet_buffer (b0)->tcp.ack_number - tc0->iss); + } TCP_EVT_DBG (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number, vnet_buffer (b0)->tcp.seq_end); @@ -348,7 +355,6 @@ tcp_segment_validate (vlib_main_t * vm, tcp_connection_t * tc0, /* If segment in window, save timestamp */ tcp_update_timestamp (tc0, vnet_buffer (b0)->tcp.seq_number, vnet_buffer (b0)->tcp.seq_end); - return 0; } @@ -391,6 +397,12 @@ tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt) } } +void +tcp_update_rto (tcp_connection_t * tc) +{ + tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX); +} + /** Update RTT estimate and RTO timer * * Measure RTT: We have two sources of RTT measurements: TSOPT and ACK @@ -405,7 +417,7 @@ tcp_update_rtt (tcp_connection_t * tc, u32 ack) u32 mrtt = 0; u8 rtx_acked; - /* Determine if only rtx bytes are acked. TODO fast retransmit */ + /* Determine if only rtx bytes are acked. TODO XXX fast retransmit */ rtx_acked = tc->rto_boff && (tc->bytes_acked <= tc->snd_mss); /* Karn's rule, part 1. Don't use retransmitted segments to estimate @@ -418,9 +430,10 @@ tcp_update_rtt (tcp_connection_t * tc, u32 ack) * snd_una, i.e., the left side of the send window: * seq_lt (tc->snd_una, ack). Note: last condition could be dropped, we don't * try to update rtt for dupacks */ - else if (tcp_opts_tstamp (&tc->opt) && tc->opt.tsecr && tc->bytes_acked) + else if (tcp_opts_tstamp (&tc->rcv_opts) && tc->rcv_opts.tsecr + && tc->bytes_acked) { - mrtt = tcp_time_now () - tc->opt.tsecr; + mrtt = tcp_time_now () - tc->rcv_opts.tsecr; } /* Allow measuring of a new RTT */ @@ -436,7 +449,7 @@ tcp_update_rtt (tcp_connection_t * tc, u32 ack) return 0; tcp_estimate_rtt (tc, mrtt); - tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX); + tcp_update_rto (tc); return 0; } @@ -447,25 +460,46 @@ tcp_update_rtt (tcp_connection_t * tc, u32 ack) static void tcp_dequeue_acked (tcp_connection_t * tc, u32 ack) { - /* Dequeue the newly ACKed bytes */ - stream_session_dequeue_drop (&tc->connection, tc->bytes_acked); + /* Dequeue the newly ACKed add SACKed bytes */ + stream_session_dequeue_drop (&tc->connection, + tc->bytes_acked + tc->sack_sb.snd_una_adv); + + tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una); /* Update rtt and rto */ tcp_update_rtt (tc, ack); + + /* If everything has been acked, stop retransmit timer + * otherwise update. */ + tcp_retransmit_timer_update (tc); } /** - * Check if dupack as per RFC5681 Sec. 2 - * - * This works only if called before updating snd_wnd. - * */ -always_inline u8 -tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 new_snd_wnd) + * Check if duplicate ack as per RFC5681 Sec. 2 + */ +static u8 +tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd, + u32 prev_snd_una) { - return ((vnet_buffer (b)->tcp.ack_number == tc->snd_una) + return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una) && seq_gt (tc->snd_una_max, tc->snd_una) && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number) - && (new_snd_wnd == tc->snd_wnd)); + && (prev_snd_wnd == tc->snd_wnd)); +} + +/** + * Checks if ack is a congestion control event. + */ +static u8 +tcp_ack_is_cc_event (tcp_connection_t * tc, vlib_buffer_t * b, + u32 prev_snd_wnd, u32 prev_snd_una, u8 * is_dack) +{ + /* Check if ack is duplicate. Per RFC 6675, ACKs that SACK new data are + * defined to be 'duplicate' */ + *is_dack = tc->sack_sb.last_sacked_bytes + || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una); + + return (*is_dack || tcp_in_cong_recovery (tc)); } void @@ -478,6 +512,10 @@ scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole) next = pool_elt_at_index (sb->holes, hole->next); next->prev = hole->prev; } + else + { + sb->tail = hole->prev; + } if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX) { @@ -489,6 +527,9 @@ scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole) sb->head = hole->next; } + if (scoreboard_hole_index (sb, hole) == sb->cur_rxt_hole) + sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX; + pool_put (sb->holes, hole); } @@ -527,26 +568,131 @@ scoreboard_insert_hole (sack_scoreboard_t * sb, u32 prev_index, return hole; } +void +scoreboard_update_lost (tcp_connection_t * tc, sack_scoreboard_t * sb) +{ + sack_scoreboard_hole_t *hole, *prev; + u32 bytes = 0, blks = 0; + + sb->lost_bytes = 0; + hole = scoreboard_last_hole (sb); + if (!hole) + return; + + if (seq_gt (sb->high_sacked, hole->end)) + { + bytes = sb->high_sacked - hole->end; + blks = 1; + } + + while ((prev = scoreboard_prev_hole (sb, hole)) + && (bytes < (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss + && blks < TCP_DUPACK_THRESHOLD)) + { + bytes += hole->start - prev->end; + blks++; + hole = prev; + } + + hole = prev; + while (hole) + { + sb->lost_bytes += scoreboard_hole_bytes (hole); + hole->is_lost = 1; + hole = scoreboard_prev_hole (sb, hole); + } +} + +/** + * Figure out the next hole to retransmit + * + * Follows logic proposed in RFC6675 Sec. 4, NextSeg() + */ +sack_scoreboard_hole_t * +scoreboard_next_rxt_hole (sack_scoreboard_t * sb, + sack_scoreboard_hole_t * start, + u8 have_sent_1_smss, + u8 * can_rescue, u8 * snd_limited) +{ + sack_scoreboard_hole_t *hole = 0; + + hole = start ? start : scoreboard_first_hole (sb); + while (hole && seq_leq (hole->end, sb->high_rxt) && hole->is_lost) + hole = scoreboard_next_hole (sb, hole); + + /* Nothing, return */ + if (!hole) + { + sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX; + return 0; + } + + /* Rule (1): if higher than rxt, less than high_sacked and lost */ + if (hole->is_lost && seq_lt (hole->start, sb->high_sacked)) + { + sb->cur_rxt_hole = scoreboard_hole_index (sb, hole); + } + else + { + /* Rule (2): output takes care of transmitting new data */ + if (!have_sent_1_smss) + { + hole = 0; + sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX; + } + /* Rule (3): if hole not lost */ + else if (seq_lt (hole->start, sb->high_sacked)) + { + *snd_limited = 1; + sb->cur_rxt_hole = scoreboard_hole_index (sb, hole); + } + /* Rule (4): if hole beyond high_sacked */ + else + { + ASSERT (seq_geq (hole->start, sb->high_sacked)); + *snd_limited = 1; + *can_rescue = 1; + /* HighRxt MUST NOT be updated */ + return 0; + } + } + + if (hole && seq_lt (sb->high_rxt, hole->start)) + sb->high_rxt = hole->start; + + return hole; +} + +void +scoreboard_init_high_rxt (sack_scoreboard_t * sb) +{ + sack_scoreboard_hole_t *hole; + hole = scoreboard_first_hole (sb); + sb->high_rxt = hole->start; + sb->cur_rxt_hole = sb->head; +} + void tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) { sack_scoreboard_t *sb = &tc->sack_sb; sack_block_t *blk, tmp; sack_scoreboard_hole_t *hole, *next_hole, *last_hole, *new_hole; - u32 blk_index = 0, old_sacked_bytes, delivered_bytes, hole_index; + u32 blk_index = 0, old_sacked_bytes, hole_index; int i, j; sb->last_sacked_bytes = 0; sb->snd_una_adv = 0; old_sacked_bytes = sb->sacked_bytes; - delivered_bytes = 0; + sb->last_bytes_delivered = 0; - if (!tcp_opts_sack (&tc->opt) && sb->head == TCP_INVALID_SACK_HOLE_INDEX) + if (!tcp_opts_sack (&tc->rcv_opts) + && sb->head == TCP_INVALID_SACK_HOLE_INDEX) return; /* Remove invalid blocks */ - blk = tc->opt.sacks; - while (blk < vec_end (tc->opt.sacks)) + blk = tc->rcv_opts.sacks; + while (blk < vec_end (tc->rcv_opts.sacks)) { if (seq_lt (blk->start, blk->end) && seq_gt (blk->start, tc->snd_una) @@ -555,7 +701,7 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) blk++; continue; } - vec_del1 (tc->opt.sacks, blk - tc->opt.sacks); + vec_del1 (tc->rcv_opts.sacks, blk - tc->rcv_opts.sacks); } /* Add block for cumulative ack */ @@ -563,20 +709,20 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) { tmp.start = tc->snd_una; tmp.end = ack; - vec_add1 (tc->opt.sacks, tmp); + vec_add1 (tc->rcv_opts.sacks, tmp); } - if (vec_len (tc->opt.sacks) == 0) + if (vec_len (tc->rcv_opts.sacks) == 0) return; /* Make sure blocks are ordered */ - for (i = 0; i < vec_len (tc->opt.sacks); i++) - for (j = i + 1; j < vec_len (tc->opt.sacks); j++) - if (seq_lt (tc->opt.sacks[j].start, tc->opt.sacks[i].start)) + for (i = 0; i < vec_len (tc->rcv_opts.sacks); i++) + for (j = i + 1; j < vec_len (tc->rcv_opts.sacks); j++) + if (seq_lt (tc->rcv_opts.sacks[j].start, tc->rcv_opts.sacks[i].start)) { - tmp = tc->opt.sacks[i]; - tc->opt.sacks[i] = tc->opt.sacks[j]; - tc->opt.sacks[j] = tmp; + tmp = tc->rcv_opts.sacks[i]; + tc->rcv_opts.sacks[i] = tc->rcv_opts.sacks[j]; + tc->rcv_opts.sacks[j] = tmp; } if (sb->head == TCP_INVALID_SACK_HOLE_INDEX) @@ -585,25 +731,25 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) last_hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX, tc->snd_una, tc->snd_una_max); sb->tail = scoreboard_hole_index (sb, last_hole); - tmp = tc->opt.sacks[vec_len (tc->opt.sacks) - 1]; - sb->max_byte_sacked = tmp.end; + tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1]; + sb->high_sacked = tmp.end; } else { /* If we have holes but snd_una_max is beyond the last hole, update * last hole end */ - tmp = tc->opt.sacks[vec_len (tc->opt.sacks) - 1]; + tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1]; last_hole = scoreboard_last_hole (sb); - if (seq_gt (tc->snd_una_max, sb->max_byte_sacked) + if (seq_gt (tc->snd_una_max, sb->high_sacked) && seq_gt (tc->snd_una_max, last_hole->end)) last_hole->end = tc->snd_una_max; } /* Walk the holes with the SACK blocks */ hole = pool_elt_at_index (sb->holes, sb->head); - while (hole && blk_index < vec_len (tc->opt.sacks)) + while (hole && blk_index < vec_len (tc->rcv_opts.sacks)) { - blk = &tc->opt.sacks[blk_index]; + blk = &tc->rcv_opts.sacks[blk_index]; if (seq_leq (blk->start, hole->start)) { @@ -617,9 +763,9 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) { /* Bytes lost because snd_wnd left edge advances */ if (next_hole && seq_leq (next_hole->start, ack)) - delivered_bytes += next_hole->start - hole->end; + sb->last_bytes_delivered += next_hole->start - hole->end; else - delivered_bytes += ack - hole->end; + sb->last_bytes_delivered += ack - hole->end; } else { @@ -633,8 +779,8 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) last_hole = scoreboard_last_hole (sb); /* keep track of max byte sacked for when the last hole * is acked */ - if (seq_gt (hole->end, sb->max_byte_sacked)) - sb->max_byte_sacked = hole->end; + if (seq_gt (hole->end, sb->high_sacked)) + sb->high_sacked = hole->end; } /* snd_una needs to be advanced */ @@ -645,12 +791,12 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) sb->snd_una_adv = next_hole->start - ack; /* all these can be delivered */ - delivered_bytes += sb->snd_una_adv; + sb->last_bytes_delivered += sb->snd_una_adv; } else if (!next_hole) { - sb->snd_una_adv = sb->max_byte_sacked - ack; - delivered_bytes += sb->snd_una_adv; + sb->snd_una_adv = sb->high_sacked - ack; + sb->last_bytes_delivered += sb->snd_una_adv; } } @@ -691,28 +837,33 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) } blk_index++; - hole = scoreboard_next_hole (sb, hole); } - else + else if (seq_leq (blk->start, hole->end)) { sb->sacked_bytes += hole->end - blk->start; hole->end = blk->start; - hole = scoreboard_next_hole (sb, hole); } + + hole = scoreboard_next_hole (sb, hole); } } sb->last_sacked_bytes = sb->sacked_bytes - old_sacked_bytes; - sb->sacked_bytes -= delivered_bytes; + sb->sacked_bytes -= sb->last_bytes_delivered; + scoreboard_update_lost (tc, sb); } -/** Update snd_wnd +/** + * Try to update snd_wnd based on feedback received from peer. * - * If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set - * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */ + * If successful, and new window is 'effectively' 0, activate persist + * timer. + */ static void tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd) { + /* If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set + * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */ if (seq_lt (tc->snd_wl1, seq) || (tc->snd_wl1 == seq && seq_leq (tc->snd_wl2, ack))) { @@ -721,138 +872,269 @@ tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd) tc->snd_wl2 = ack; TCP_EVT_DBG (TCP_EVT_SND_WND, tc); - /* Set probe timer if we just got 0 wnd */ if (tc->snd_wnd < tc->snd_mss) { - if (!tcp_timer_is_active (tc, TCP_TIMER_PERSIST)) + /* Set persist timer if not set and we just got 0 wnd */ + if (!tcp_timer_is_active (tc, TCP_TIMER_PERSIST) + && !tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)) tcp_persist_timer_set (tc); } else - tcp_persist_timer_reset (tc); + { + tcp_persist_timer_reset (tc); + if (!tcp_in_recovery (tc) && tc->rto_boff > 0) + { + tc->rto_boff = 0; + tcp_update_rto (tc); + } + } } } void -tcp_cc_congestion (tcp_connection_t * tc) +tcp_cc_init_congestion (tcp_connection_t * tc) { - tc->snd_congestion = tc->snd_nxt; + tcp_fastrecovery_on (tc); + tc->snd_congestion = tc->snd_una_max; tc->cc_algo->congestion (tc); TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 4); } -void -tcp_cc_recover (tcp_connection_t * tc) +static void +tcp_cc_recovery_exit (tcp_connection_t * tc) { - /* TODO: check if time to recover was small. It might be that RTO popped - * too soon. - */ + /* Deflate rto */ + tcp_update_rto (tc); + tc->rto_boff = 0; + tc->snd_rxt_ts = 0; + tcp_recovery_off (tc); +} +void +tcp_cc_fastrecovery_exit (tcp_connection_t * tc) +{ tc->cc_algo->recovered (tc); + tc->snd_rxt_bytes = 0; + tc->rcv_dupacks = 0; + tcp_fastrecovery_off (tc); + tcp_fastrecovery_1_smss_off (tc); +} - tc->rtx_bytes = 0; +static void +tcp_cc_congestion_undo (tcp_connection_t * tc) +{ + tc->cwnd = tc->prev_cwnd; + tc->ssthresh = tc->prev_ssthresh; + tc->snd_nxt = tc->snd_una_max; tc->rcv_dupacks = 0; - tc->snd_nxt = tc->snd_una; + if (tcp_in_recovery (tc)) + tcp_cc_recovery_exit (tc); + ASSERT (tc->rto_boff == 0); + /* TODO extend for fastrecovery */ +} - tc->cc_algo->rcv_ack (tc); - tc->tsecr_last_ack = tc->opt.tsecr; +static u8 +tcp_cc_is_spurious_retransmit (tcp_connection_t * tc) +{ + return (tc->snd_rxt_ts + && tcp_opts_tstamp (&tc->rcv_opts) + && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts)); +} - tcp_cong_recovery_off (tc); +int +tcp_cc_recover (tcp_connection_t * tc) +{ + ASSERT (tcp_in_cong_recovery (tc)); + if (tcp_cc_is_spurious_retransmit (tc)) + { + tcp_cc_congestion_undo (tc); + return 1; + } + + if (tcp_in_recovery (tc)) + tcp_cc_recovery_exit (tc); + else if (tcp_in_fastrecovery (tc)) + tcp_cc_fastrecovery_exit (tc); + + ASSERT (tc->rto_boff == 0); + ASSERT (!tcp_in_cong_recovery (tc)); TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3); + return 0; } static void -tcp_cc_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b) +tcp_cc_update (tcp_connection_t * tc, vlib_buffer_t * b) +{ + ASSERT (!tcp_in_cong_recovery (tc)); + + /* Congestion avoidance */ + tc->cc_algo->rcv_ack (tc); + tc->tsecr_last_ack = tc->rcv_opts.tsecr; + + /* If a cumulative ack, make sure dupacks is 0 */ + tc->rcv_dupacks = 0; + + /* When dupacks hits the threshold we only enter fast retransmit if + * cumulative ack covers more than snd_congestion. Should snd_una + * wrap this test may fail under otherwise valid circumstances. + * Therefore, proactively update snd_congestion when wrap detected. */ + if (PREDICT_FALSE + (seq_leq (tc->snd_congestion, tc->snd_una - tc->bytes_acked) + && seq_gt (tc->snd_congestion, tc->snd_una))) + tc->snd_congestion = tc->snd_una - 1; +} + +static u8 +tcp_should_fastrecover_sack (tcp_connection_t * tc) { - u8 partial_ack; - u32 bytes_advanced; + return (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss < tc->sack_sb.sacked_bytes; +} - if (tcp_in_fastrecovery (tc)) +static u8 +tcp_should_fastrecover (tcp_connection_t * tc) +{ + return (tc->rcv_dupacks == TCP_DUPACK_THRESHOLD + || tcp_should_fastrecover_sack (tc)); +} + +static void +tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack) +{ + /* + * Duplicate ACK. Check if we should enter fast recovery, or if already in + * it account for the bytes that left the network. + */ + if (is_dack) { - partial_ack = seq_lt (tc->snd_una, tc->snd_congestion); - if (!partial_ack) + ASSERT (tc->snd_una != tc->snd_una_max + || tc->sack_sb.last_sacked_bytes); + tc->rcv_dupacks++; + + if (tc->rcv_dupacks > TCP_DUPACK_THRESHOLD && !tc->bytes_acked) { - /* Clear retransmitted bytes. */ - tcp_cc_recover (tc); + ASSERT (tcp_in_fastrecovery (tc)); + /* Pure duplicate ack. If some data got acked, it's handled lower */ + tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK); + return; } - else + else if (tcp_should_fastrecover (tc)) { - TCP_EVT_DBG (TCP_EVT_CC_PACK, tc); + /* Things are already bad */ + if (tcp_in_cong_recovery (tc)) + { + tc->rcv_dupacks = 0; + goto partial_ack_test; + } - /* Clear retransmitted bytes. XXX should we clear all? */ - tc->rtx_bytes = 0; + /* If of of the two conditions lower hold, reset dupacks + * 1) Cumulative ack does not cover more than congestion threshold + * 2) RFC6582 heuristic to avoid multiple fast retransmits + */ + if (seq_leq (tc->snd_una, tc->snd_congestion) + || tc->rcv_opts.tsecr != tc->tsecr_last_ack) + { + tc->rcv_dupacks = 0; + return; + } + + tcp_cc_init_congestion (tc); + tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK); - tc->cc_algo->rcv_cong_ack (tc, TCP_CC_PARTIALACK); + /* The first segment MUST be retransmitted */ + tcp_retransmit_first_unacked (tc); - /* In case snd_nxt is still in the past and output tries to - * shove some new bytes */ - tc->snd_nxt = tc->snd_una_max; + /* Post retransmit update cwnd to ssthresh and account for the + * three segments that have left the network and should've been + * buffered at the receiver XXX */ + tc->cwnd = tc->ssthresh + tc->rcv_dupacks * tc->snd_mss; - /* XXX need proper RFC6675 support */ - if (tc->sack_sb.last_sacked_bytes && !tcp_in_recovery (tc)) + /* If cwnd allows, send more data */ + if (tcp_opts_sack_permitted (&tc->rcv_opts) + && scoreboard_first_hole (&tc->sack_sb)) { - tcp_fast_retransmit (tc); + scoreboard_init_high_rxt (&tc->sack_sb); + tcp_fast_retransmit_sack (tc); } else { - /* Retransmit first unacked segment */ - tcp_retransmit_first_unacked (tc); + tcp_fast_retransmit_no_sack (tc); } + + return; } - } - else - { - tc->cc_algo->rcv_ack (tc); - tc->tsecr_last_ack = tc->opt.tsecr; - tc->rcv_dupacks = 0; - if (tcp_in_recovery (tc)) + else if (!tc->bytes_acked + || (tc->bytes_acked && !tcp_in_cong_recovery (tc))) { - bytes_advanced = tc->bytes_acked + tc->sack_sb.snd_una_adv; - tc->rtx_bytes -= clib_min (bytes_advanced, tc->rtx_bytes); - tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX); - if (seq_geq (tc->snd_una, tc->snd_congestion)) - { - tc->rtx_bytes = 0; - tcp_recovery_off (tc); - } + tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK); + return; } + else + goto partial_ack; } -} -static void -tcp_cc_rcv_dupack (tcp_connection_t * tc, u32 ack) -{ -// ASSERT (seq_geq(tc->snd_una, ack)); +partial_ack_test: + + if (!tc->bytes_acked) + return; + +partial_ack: + /* + * Legitimate ACK. 1) See if we can exit recovery + */ + /* XXX limit this only to first partial ack? */ + tcp_retransmit_timer_update (tc); - tc->rcv_dupacks++; - if (tc->rcv_dupacks == TCP_DUPACK_THRESHOLD) + if (seq_geq (tc->snd_una, tc->snd_congestion)) { - /* RFC6582 NewReno heuristic to avoid multiple fast retransmits */ - if (tc->opt.tsecr != tc->tsecr_last_ack) - { - tc->rcv_dupacks = 0; - return; - } + /* If spurious return, we've already updated everything */ + if (tcp_cc_recover (tc)) + return; + + tc->snd_nxt = tc->snd_una_max; - tcp_fastrecovery_on (tc); + /* Treat as congestion avoidance ack */ + tc->cc_algo->rcv_ack (tc); + tc->tsecr_last_ack = tc->rcv_opts.tsecr; + return; + } + + /* + * Legitimate ACK. 2) If PARTIAL ACK try to retransmit + */ + TCP_EVT_DBG (TCP_EVT_CC_PACK, tc); + + /* RFC6675: If the incoming ACK is a cumulative acknowledgment, + * reset dupacks to 0 */ + tc->rcv_dupacks = 0; - /* Handle congestion and dupack */ - tcp_cc_congestion (tc); - tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK); + tcp_retransmit_first_unacked (tc); - tcp_fast_retransmit (tc); + /* Post RTO timeout don't try anything fancy */ + if (tcp_in_recovery (tc)) + return; - /* Post retransmit update cwnd to ssthresh and account for the - * three segments that have left the network and should've been - * buffered at the receiver */ - tc->cwnd = tc->ssthresh + TCP_DUPACK_THRESHOLD * tc->snd_mss; + /* Remove retransmitted bytes that have been delivered */ + if (tc->sack_sb.last_bytes_delivered + && seq_gt (tc->sack_sb.high_rxt, tc->snd_una)) + { + /* If we have sacks and we haven't gotten an ack beyond high_rxt, + * remove sacked bytes delivered */ + tc->snd_rxt_bytes -= tc->sack_sb.last_bytes_delivered; } - else if (tc->rcv_dupacks > TCP_DUPACK_THRESHOLD) + else { - ASSERT (tcp_in_fastrecovery (tc)); - - tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK); + /* Either all retransmitted holes have been acked, or we're + * "in the blind" and retransmitting segment by segment */ + tc->snd_rxt_bytes = 0; } + + tc->cc_algo->rcv_cong_ack (tc, TCP_CC_PARTIALACK); + + /* + * Since this was a partial ack, try to retransmit some more data + */ + tcp_fast_retransmit (tc); } void @@ -862,14 +1144,18 @@ tcp_cc_init (tcp_connection_t * tc) tc->cc_algo->init (tc); } +/** + * Process incoming ACK + */ static int tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b, tcp_header_t * th, u32 * next, u32 * error) { - u32 new_snd_wnd; + u32 prev_snd_wnd, prev_snd_una; + u8 is_dack; /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */ - if (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)) + if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt))) { /* If we have outstanding data and this is within the window, accept it, * probably retransmit has timed out. Otherwise ACK segment and then @@ -892,7 +1178,7 @@ tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b, } /* If old ACK, probably it's an old dupack */ - if (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una)) + if (PREDICT_FALSE (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))) { *error = TCP_ERROR_ACK_OLD; TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 1, @@ -900,54 +1186,50 @@ tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b, if (tcp_in_fastrecovery (tc) && tc->rcv_dupacks == TCP_DUPACK_THRESHOLD) { TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc); - tcp_cc_rcv_dupack (tc, vnet_buffer (b)->tcp.ack_number); + tcp_cc_handle_event (tc, 1); } /* Don't drop yet */ return 0; } - if (tcp_opts_sack_permitted (&tc->opt)) - tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number); - - new_snd_wnd = clib_net_to_host_u16 (th->window) << tc->snd_wscale; - - if (tcp_ack_is_dupack (tc, b, new_snd_wnd)) - { - TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc, 1); - tcp_cc_rcv_dupack (tc, vnet_buffer (b)->tcp.ack_number); - *error = TCP_ERROR_ACK_DUP; - return -1; - } - /* - * Valid ACK + * Looks okay, process feedback */ - tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una; - tc->snd_una = vnet_buffer (b)->tcp.ack_number + tc->sack_sb.snd_una_adv; + TCP_EVT_DBG (TCP_EVT_ACK_RCVD, tc); + + if (tcp_opts_sack_permitted (&tc->rcv_opts)) + tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number); - /* Dequeue ACKed data and update RTT */ - tcp_dequeue_acked (tc, vnet_buffer (b)->tcp.ack_number); + prev_snd_wnd = tc->snd_wnd; + prev_snd_una = tc->snd_una; tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number, - vnet_buffer (b)->tcp.ack_number, new_snd_wnd); + vnet_buffer (b)->tcp.ack_number, + clib_net_to_host_u16 (th->window) << tc->snd_wscale); + tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una; + tc->snd_una = vnet_buffer (b)->tcp.ack_number + tc->sack_sb.snd_una_adv; + tcp_validate_txf_size (tc, tc->bytes_acked); - /* If some of our sent bytes have been acked, update cc and retransmit - * timer. */ if (tc->bytes_acked) - { - TCP_EVT_DBG (TCP_EVT_ACK_RCVD, tc); + tcp_dequeue_acked (tc, vnet_buffer (b)->tcp.ack_number); - /* Updates congestion control (slow start/congestion avoidance) */ - tcp_cc_rcv_ack (tc, b); + /* + * Check if we have congestion event + */ - /* If everything has been acked, stop retransmit timer - * otherwise update. */ - if (tc->snd_una == tc->snd_una_max) - tcp_retransmit_timer_reset (tc); - else - tcp_retransmit_timer_update (tc); + if (tcp_ack_is_cc_event (tc, b, prev_snd_wnd, prev_snd_una, &is_dack)) + { + tcp_cc_handle_event (tc, is_dack); + *error = TCP_ERROR_ACK_DUP; + TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc, 1); + return vnet_buffer (b)->tcp.data_len ? 0 : -1; } + /* + * Update congestion control (slow start/congestion avoidance) + */ + tcp_cc_update (tc, b); + return 0; } @@ -1059,7 +1341,7 @@ tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b, } /* Update SACK list if need be */ - if (tcp_opts_sack_permitted (&tc->opt)) + if (tcp_opts_sack_permitted (&tc->rcv_opts)) { /* Remove SACK blocks that have been delivered */ tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt); @@ -1097,7 +1379,7 @@ tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b, TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, data_len); /* Update SACK list if in use */ - if (tcp_opts_sack_permitted (&tc->opt)) + if (tcp_opts_sack_permitted (&tc->rcv_opts)) { ooo_segment_t *newest; u32 start, end; @@ -1294,7 +1576,6 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; @@ -1321,7 +1602,6 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, } th0 = tcp_buffer_hdr (b0); - is_fin = (th0->flags & TCP_FLAG_FIN) != 0; /* SYNs, FINs and data consume sequence numbers */ @@ -1387,7 +1667,6 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, errors = session_manager_flush_enqueue_events (my_thread_index); tcp_established_inc_counter (vm, is_ip4, TCP_ERROR_EVENT_FIFO_FULL, errors); - return from_frame->n_vectors; } @@ -1582,17 +1861,17 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, new_tc0->irs = seq0; /* Parse options */ - if (tcp_options_parse (tcp0, &new_tc0->opt)) + if (tcp_options_parse (tcp0, &new_tc0->rcv_opts)) goto drop; - if (tcp_opts_tstamp (&new_tc0->opt)) + if (tcp_opts_tstamp (&new_tc0->rcv_opts)) { - new_tc0->tsval_recent = new_tc0->opt.tsval; + new_tc0->tsval_recent = new_tc0->rcv_opts.tsval; new_tc0->tsval_recent_age = tcp_time_now (); } - if (tcp_opts_wscale (&new_tc0->opt)) - new_tc0->snd_wscale = new_tc0->opt.wscale; + if (tcp_opts_wscale (&new_tc0->rcv_opts)) + new_tc0->snd_wscale = new_tc0->rcv_opts.wscale; /* No scaling */ new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window); @@ -1845,7 +2124,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* Initialize session variables */ tc0->snd_una = vnet_buffer (b0)->tcp.ack_number; tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window) - << tc0->opt.wscale; + << tc0->rcv_opts.wscale; tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number; tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number; @@ -1903,13 +2182,21 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, break; case TCP_STATE_LAST_ACK: - /* The only thing that can arrive in this state is an + /* The only thing that [should] arrive in this state is an * acknowledgment of our FIN. If our FIN is now acknowledged, * delete the TCB, enter the CLOSED state, and return. */ if (!tcp_rcv_ack_is_acceptable (tc0, b0)) goto drop; + /* Apparently our FIN was lost */ + if (tcp_fin (tcp0)) + { + /* Don't "make" fin since that increments snd_nxt */ + tcp_send_fin (tc0); + goto drop; + } + tc0->state = TCP_STATE_CLOSED; /* Don't delete the connection/session yet. Instead, wait a @@ -1929,8 +2216,15 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, * retransmission of the remote FIN. Acknowledge it, and restart * the 2 MSL timeout. */ - /* TODO */ + if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0)) + goto drop; + + tcp_make_ack (tc0, b0); + tcp_timer_reset (tc0, TCP_TIMER_WAITCLOSE); + tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME); + goto drop; + break; default: ASSERT (0); @@ -2194,7 +2488,7 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, goto drop; } - if (tcp_options_parse (th0, &child0->opt)) + if (tcp_options_parse (th0, &child0->rcv_opts)) { goto drop; } @@ -2205,14 +2499,14 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* RFC1323: TSval timestamps sent on {SYN} and {SYN,ACK} * segments are used to initialize PAWS. */ - if (tcp_opts_tstamp (&child0->opt)) + if (tcp_opts_tstamp (&child0->rcv_opts)) { - child0->tsval_recent = child0->opt.tsval; + child0->tsval_recent = child0->rcv_opts.tsval; child0->tsval_recent_age = tcp_time_now (); } - if (tcp_opts_wscale (&child0->opt)) - child0->snd_wscale = child0->opt.wscale; + if (tcp_opts_wscale (&child0->rcv_opts)) + child0->snd_wscale = child0->rcv_opts.wscale; /* No scaling */ child0->snd_wnd = clib_net_to_host_u16 (th0->window); @@ -2477,7 +2771,6 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_add_trace (vm, node, b0, sizeof (*t0)); tcp_set_rx_trace_data (t0, tc0, tcp0, b0, is_ip4); } - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); } @@ -2600,7 +2893,13 @@ do { \ _(FIN_WAIT_2, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + _(LAST_ACK, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, + TCP_ERROR_NONE); _(LAST_ACK, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + _(TIME_WAIT, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + _(TIME_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, + TCP_ERROR_NONE); _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED); _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED); #undef _ diff --git a/src/vnet/tcp/tcp_newreno.c b/src/vnet/tcp/tcp_newreno.c index 3525f4e5..c66250e4 100644 --- a/src/vnet/tcp/tcp_newreno.c +++ b/src/vnet/tcp/tcp_newreno.c @@ -51,9 +51,23 @@ newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type) } else if (ack_type == TCP_CC_PARTIALACK) { - tc->cwnd -= tc->bytes_acked; - if (tc->bytes_acked > tc->snd_mss) - tc->bytes_acked += tc->snd_mss; + /* RFC 6582 Sec. 3.2 */ + if (!tcp_opts_sack_permitted (&tc->rcv_opts)) + { + /* Deflate the congestion window by the amount of new data + * acknowledged by the Cumulative Acknowledgment field. + * If the partial ACK acknowledges at least one SMSS of new data, + * then add back SMSS bytes to the congestion window. This + * artificially inflates the congestion window in order to reflect + * the additional segment that has left the network. This "partial + * window deflation" attempts to ensure that, when fast recovery + * eventually ends, approximately ssthresh amount of data will be + * outstanding in the network.*/ + tc->cwnd = (tc->cwnd > tc->bytes_acked) ? + tc->cwnd - tc->bytes_acked : 0; + if (tc->bytes_acked > tc->snd_mss) + tc->cwnd += tc->snd_mss; + } } } diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index 49fd6bef..47c94e6d 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -136,10 +136,10 @@ tcp_update_rcv_wnd (tcp_connection_t * tc) * Figure out how much space we have available */ available_space = stream_session_max_rx_enqueue (&tc->connection); - max_fifo = stream_session_fifo_size (&tc->connection); + max_fifo = stream_session_rx_fifo_size (&tc->connection); - ASSERT (tc->opt.mss < max_fifo); - if (available_space < tc->opt.mss && available_space < max_fifo >> 3) + ASSERT (tc->rcv_opts.mss < max_fifo); + if (available_space < tc->rcv_opts.mss && available_space < max_fifo >> 3) available_space = 0; /* @@ -276,8 +276,11 @@ tcp_make_syn_options (tcp_options_t * opts, u8 wnd_scale) opts->tsecr = 0; len += TCP_OPTION_LEN_TIMESTAMP; - opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED; - len += TCP_OPTION_LEN_SACK_PERMITTED; + if (TCP_USE_SACKS) + { + opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED; + len += TCP_OPTION_LEN_SACK_PERMITTED; + } /* Align to needed boundary */ len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN; @@ -293,14 +296,14 @@ tcp_make_synack_options (tcp_connection_t * tc, tcp_options_t * opts) opts->mss = tc->mss; len += TCP_OPTION_LEN_MSS; - if (tcp_opts_wscale (&tc->opt)) + if (tcp_opts_wscale (&tc->rcv_opts)) { opts->flags |= TCP_OPTS_FLAG_WSCALE; opts->wscale = tc->rcv_wscale; len += TCP_OPTION_LEN_WINDOW_SCALE; } - if (tcp_opts_tstamp (&tc->opt)) + if (tcp_opts_tstamp (&tc->rcv_opts)) { opts->flags |= TCP_OPTS_FLAG_TSTAMP; opts->tsval = tcp_time_now (); @@ -308,7 +311,7 @@ tcp_make_synack_options (tcp_connection_t * tc, tcp_options_t * opts) len += TCP_OPTION_LEN_TIMESTAMP; } - if (tcp_opts_sack_permitted (&tc->opt)) + if (tcp_opts_sack_permitted (&tc->rcv_opts)) { opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED; len += TCP_OPTION_LEN_SACK_PERMITTED; @@ -326,14 +329,14 @@ tcp_make_established_options (tcp_connection_t * tc, tcp_options_t * opts) opts->flags = 0; - if (tcp_opts_tstamp (&tc->opt)) + if (tcp_opts_tstamp (&tc->rcv_opts)) { opts->flags |= TCP_OPTS_FLAG_TSTAMP; opts->tsval = tcp_time_now (); opts->tsecr = tc->tsval_recent; len += TCP_OPTION_LEN_TIMESTAMP; } - if (tcp_opts_sack_permitted (&tc->opt)) + if (tcp_opts_sack_permitted (&tc->rcv_opts)) { if (vec_len (tc->snd_sacks)) { @@ -395,7 +398,7 @@ tcp_update_snd_mss (tcp_connection_t * tc) tcp_make_options (tc, &tc->snd_opts, TCP_STATE_ESTABLISHED); /* XXX check if MTU has been updated */ - tc->snd_mss = clib_min (tc->mss, tc->opt.mss) - tc->snd_opts_len; + tc->snd_mss = clib_min (tc->mss, tc->rcv_opts.mss) - tc->snd_opts_len; ASSERT (tc->snd_mss > 0); } @@ -406,21 +409,21 @@ tcp_init_mss (tcp_connection_t * tc) tcp_update_rcv_mss (tc); /* TODO cache mss and consider PMTU discovery */ - tc->snd_mss = clib_min (tc->opt.mss, tc->mss); + tc->snd_mss = clib_min (tc->rcv_opts.mss, tc->mss); if (tc->snd_mss < 45) { clib_warning ("snd mss is 0"); /* Assume that at least the min default mss works */ tc->snd_mss = default_min_mss; - tc->opt.mss = default_min_mss; + tc->rcv_opts.mss = default_min_mss; } /* We should have enough space for 40 bytes of options */ ASSERT (tc->snd_mss > 45); /* If we use timestamp option, account for it */ - if (tcp_opts_tstamp (&tc->opt)) + if (tcp_opts_tstamp (&tc->rcv_opts)) tc->snd_mss -= TCP_OPTION_LEN_TIMESTAMP; } @@ -879,6 +882,7 @@ tcp_send_fin (tcp_connection_t * tc) tcp_make_fin (tc, b); tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); tc->flags |= TCP_CONN_FINSNT; + tcp_retransmit_timer_force_update (tc); TCP_EVT_DBG (TCP_EVT_FIN_SENT, tc); } @@ -919,10 +923,7 @@ tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, if (compute_opts) tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state); - /* Write pre-computed options */ tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t); - - /* Get rcv window to advertise */ advertise_wnd = tcp_window_to_advertise (tc, next_state); flags = tcp_make_state_flags (next_state); @@ -930,26 +931,25 @@ tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt, tc->rcv_nxt, tcp_hdr_opts_len, flags, advertise_wnd); - opts_write_len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts); ASSERT (opts_write_len == tc->snd_opts_len); - - /* Tag the buffer with the connection index */ vnet_buffer (b)->tcp.connection_index = tc->c_c_index; + /* + * Update connection variables + */ + tc->snd_nxt += data_len; tc->rcv_las = tc->rcv_nxt; /* TODO this is updated in output as well ... */ - if (tc->snd_nxt > tc->snd_una_max) - tc->snd_una_max = tc->snd_nxt; - - if (tc->rtt_ts == 0) + if (seq_gt (tc->snd_nxt, tc->snd_una_max)) { - tc->rtt_ts = tcp_time_now (); - tc->rtt_seq = tc->snd_nxt; + tc->snd_una_max = tc->snd_nxt; + tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una); } + TCP_EVT_DBG (TCP_EVT_PKTIZE, tc); } @@ -987,13 +987,14 @@ tcp_timer_delack_handler (u32 index) * * @return the number of bytes in the segment or 0 if there's nothing to * retransmit - * */ + */ u32 tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b, u32 offset, u32 max_bytes) { vlib_main_t *vm = vlib_get_main (); - u32 n_bytes = 0; + int n_bytes = 0; + u32 start; tcp_reuse_buffer (vm, b); @@ -1001,15 +1002,16 @@ tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b, ASSERT (max_bytes != 0); max_bytes = clib_min (tc->snd_mss, max_bytes); + start = tc->snd_una + offset; /* Start is beyond snd_congestion */ - if (seq_geq (tc->snd_una + offset, tc->snd_congestion)) + if (seq_geq (start, tc->snd_congestion)) goto done; /* Don't overshoot snd_congestion */ - if (seq_gt (tc->snd_nxt + max_bytes, tc->snd_congestion)) + if (seq_gt (start + max_bytes, tc->snd_congestion)) { - max_bytes = tc->snd_congestion - tc->snd_nxt; + max_bytes = tc->snd_congestion - start; if (max_bytes == 0) goto done; } @@ -1021,15 +1023,12 @@ tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b, n_bytes = stream_session_peek_bytes (&tc->connection, vlib_buffer_get_current (b), offset, max_bytes); - ASSERT (n_bytes != 0); + ASSERT (n_bytes > 0); b->current_length = n_bytes; tcp_push_hdr_i (tc, b, tc->state, 0); - /* Don't count multiple retransmits of the same segment */ - if (tc->rto_boff > 1) - goto done; - - tc->rtx_bytes += n_bytes; + if (tcp_in_fastrecovery (tc)) + tc->snd_rxt_bytes += n_bytes; done: TCP_EVT_DBG (TCP_EVT_CC_RTX, tc, offset, n_bytes); @@ -1042,18 +1041,15 @@ done: static void tcp_rtx_timeout_cc (tcp_connection_t * tc) { + tc->prev_ssthresh = tc->ssthresh; + tc->prev_cwnd = tc->cwnd; + /* Cleanly recover cc (also clears up fast retransmit) */ if (tcp_in_fastrecovery (tc)) - { - tcp_cc_recover (tc); - } - else - { - tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss); - } + tcp_cc_fastrecovery_exit (tc); /* Start again from the beginning */ - + tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss); tc->cwnd = tcp_loss_wnd (tc); tc->snd_congestion = tc->snd_una_max; tcp_recovery_on (tc); @@ -1081,18 +1077,31 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) /* Make sure timer handle is set to invalid */ tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID; + if (!tcp_in_recovery (tc) && tc->rto_boff > 0 + && tc->state >= TCP_STATE_ESTABLISHED) + { + tc->rto_boff = 0; + tcp_update_rto (tc); + } + /* Increment RTO backoff (also equal to number of retries) */ tc->rto_boff += 1; /* Go back to first un-acked byte */ tc->snd_nxt = tc->snd_una; - /* Get buffer */ tcp_get_free_buffer_index (tm, &bi); b = vlib_get_buffer (vm, bi); if (tc->state >= TCP_STATE_ESTABLISHED) { + /* Lost FIN, retransmit and return */ + if (tc->flags & TCP_CONN_FINSNT) + { + tcp_send_fin (tc); + return; + } + /* First retransmit timeout */ if (tc->rto_boff == 1) tcp_rtx_timeout_cc (tc); @@ -1102,24 +1111,30 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 1); - /* Send one segment. No fancy recovery for now! */ + /* Send one segment */ n_bytes = tcp_prepare_retransmit_segment (tc, b, 0, tc->snd_mss); + /* TODO be less aggressive about this */ scoreboard_clear (&tc->sack_sb); if (n_bytes == 0) { clib_warning ("could not retransmit anything"); + clib_warning ("%U", format_tcp_connection, tc, 2); + /* Try again eventually */ tcp_retransmit_timer_set (tc); + ASSERT (0 || (tc->rto_boff > 1 + && tc->snd_una == tc->snd_congestion)); return; } + + /* For first retransmit, record timestamp (Eifel detection RFC3522) */ + if (tc->rto_boff == 1) + tc->snd_rxt_ts = tcp_time_now (); } - else + /* Retransmit for SYN/SYNACK */ + else if (tc->state == TCP_STATE_SYN_RCVD || tc->state == TCP_STATE_SYN_SENT) { - /* Retransmit for SYN/SYNACK */ - ASSERT (tc->state == TCP_STATE_SYN_RCVD - || tc->state == TCP_STATE_SYN_SENT); - /* Try without increasing RTO a number of times. If this fails, * start growing RTO exponentially */ if (tc->rto_boff > TCP_RTO_SYN_RETRIES) @@ -1132,6 +1147,12 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) /* Account for the SYN */ tc->snd_nxt += 1; } + else + { + ASSERT (tc->state == TCP_STATE_CLOSED); + clib_warning ("connection closed ..."); + return; + } if (!is_syn) { @@ -1180,7 +1201,8 @@ tcp_timer_persist_handler (u32 index) u32 thread_index = vlib_get_thread_index (); tcp_connection_t *tc; vlib_buffer_t *b; - u32 bi, n_bytes; + u32 bi, old_snd_nxt; + int n_bytes = 0; tc = tcp_connection_get_if_valid (index, thread_index); @@ -1202,13 +1224,15 @@ tcp_timer_persist_handler (u32 index) /* Try to force the first unsent segment */ tcp_get_free_buffer_index (tm, &bi); b = vlib_get_buffer (vm, bi); + + tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una); tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state); n_bytes = stream_session_peek_bytes (&tc->connection, vlib_buffer_get_current (b), tc->snd_una_max - tc->snd_una, tc->snd_mss); /* Nothing to send */ - if (n_bytes == 0) + if (n_bytes <= 0) { clib_warning ("persist found nothing to send"); tcp_return_buffer (tm); @@ -1216,7 +1240,13 @@ tcp_timer_persist_handler (u32 index) } b->current_length = n_bytes; + ASSERT (tc->snd_nxt == tc->snd_una_max || tc->rto_boff > 1 + || tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)); + + /* Allow updating of snd_una_max but don't update snd_nxt */ + old_snd_nxt = tc->snd_nxt; tcp_push_hdr_i (tc, b, tc->state, 0); + tc->snd_nxt = old_snd_nxt; tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); /* Re-enable persist timer */ @@ -1232,8 +1262,9 @@ tcp_retransmit_first_unacked (tcp_connection_t * tc) tcp_main_t *tm = vnet_get_tcp_main (); vlib_main_t *vm = vlib_get_main (); vlib_buffer_t *b; - u32 bi, n_bytes; + u32 bi, n_bytes, old_snd_nxt; + old_snd_nxt = tc->snd_nxt; tc->snd_nxt = tc->snd_una; /* Get buffer */ @@ -1244,75 +1275,117 @@ tcp_retransmit_first_unacked (tcp_connection_t * tc) n_bytes = tcp_prepare_retransmit_segment (tc, b, 0, tc->snd_mss); if (n_bytes == 0) - goto done; + { + tcp_return_buffer (tm); + goto done; + } tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); done: - tc->snd_nxt = tc->snd_una_max; + tc->snd_nxt = old_snd_nxt; } -sack_scoreboard_hole_t * -scoreboard_first_rtx_hole (sack_scoreboard_t * sb) +/** + * Do fast retransmit with SACKs + */ +void +tcp_fast_retransmit_sack (tcp_connection_t * tc) { - sack_scoreboard_hole_t *hole = 0; - -// hole = scoreboard_first_hole (&tc->sack_sb); -// if (hole) -// { -// -// offset = hole->start - tc->snd_una; -// hole_size = hole->end - hole->start; -// -// ASSERT(hole_size); -// -// if (hole_size < max_bytes) -// max_bytes = hole_size; -// } - return hole; + tcp_main_t *tm = vnet_get_tcp_main (); + vlib_main_t *vm = vlib_get_main (); + u32 n_written = 0, offset = 0, max_bytes; + vlib_buffer_t *b; + sack_scoreboard_hole_t *hole; + sack_scoreboard_t *sb; + u32 bi, old_snd_nxt; + int snd_space; + u8 snd_limited = 0, can_rescue = 0; + + ASSERT (tcp_in_fastrecovery (tc)); + TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0); + + old_snd_nxt = tc->snd_nxt; + sb = &tc->sack_sb; + snd_space = tcp_available_snd_space (tc); + + hole = scoreboard_get_hole (sb, sb->cur_rxt_hole); + while (hole && snd_space > 0) + { + tcp_get_free_buffer_index (tm, &bi); + b = vlib_get_buffer (vm, bi); + + hole = scoreboard_next_rxt_hole (sb, hole, + tcp_fastrecovery_sent_1_smss (tc), + &can_rescue, &snd_limited); + if (!hole) + { + if (!can_rescue || !(seq_lt (sb->rescue_rxt, tc->snd_una) + || seq_gt (sb->rescue_rxt, + tc->snd_congestion))) + break; + + /* If rescue rxt undefined or less than snd_una then one segment of + * up to SMSS octets that MUST include the highest outstanding + * unSACKed sequence number SHOULD be returned, and RescueRxt set to + * RecoveryPoint. HighRxt MUST NOT be updated. + */ + max_bytes = clib_min (tc->snd_mss, snd_space); + offset = tc->snd_congestion - tc->snd_una - max_bytes; + sb->rescue_rxt = tc->snd_congestion; + tc->snd_nxt = tc->snd_una + offset; + tcp_prepare_retransmit_segment (tc, b, offset, max_bytes); + tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); + break; + } + + max_bytes = snd_limited ? tc->snd_mss : hole->end - sb->high_rxt; + offset = sb->high_rxt - tc->snd_una; + tc->snd_nxt = tc->snd_una + offset; + n_written = tcp_prepare_retransmit_segment (tc, b, offset, max_bytes); + + /* Nothing left to retransmit */ + if (n_written == 0) + { + tcp_return_buffer (tm); + break; + } + + sb->high_rxt += n_written; + tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); + snd_space -= n_written; + } + + /* If window allows, send 1 SMSS of new data */ + tc->snd_nxt = old_snd_nxt; } /** - * Do fast retransmit. + * Fast retransmit without SACK info */ void -tcp_fast_retransmit (tcp_connection_t * tc) +tcp_fast_retransmit_no_sack (tcp_connection_t * tc) { tcp_main_t *tm = vnet_get_tcp_main (); vlib_main_t *vm = vlib_get_main (); - u32 bi; + u32 n_written = 0, offset = 0, bi, old_snd_nxt; int snd_space; - u32 n_written = 0, offset = 0; vlib_buffer_t *b; - u8 use_sacks = 0; ASSERT (tcp_in_fastrecovery (tc)); + TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0); /* Start resending from first un-acked segment */ + old_snd_nxt = tc->snd_nxt; tc->snd_nxt = tc->snd_una; - snd_space = tcp_available_snd_space (tc); - TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0); - - /* If we have SACKs use them */ - if (tcp_opts_sack_permitted (&tc->opt) - && scoreboard_first_hole (&tc->sack_sb)) - use_sacks = 0; while (snd_space > 0) { tcp_get_free_buffer_index (tm, &bi); b = vlib_get_buffer (vm, bi); - if (use_sacks) - { - scoreboard_first_rtx_hole (&tc->sack_sb); - } - else - { - offset += n_written; - } - + offset += n_written; n_written = tcp_prepare_retransmit_segment (tc, b, offset, snd_space); /* Nothing left to retransmit */ @@ -1326,9 +1399,21 @@ tcp_fast_retransmit (tcp_connection_t * tc) snd_space -= n_written; } - /* If window allows, send 1 SMSS of new data */ - if (seq_lt (tc->snd_nxt, tc->snd_congestion)) - tc->snd_nxt = tc->snd_congestion; + /* Restore snd_nxt. If window allows, send 1 SMSS of new data */ + tc->snd_nxt = old_snd_nxt; +} + +/** + * Do fast retransmit + */ +void +tcp_fast_retransmit (tcp_connection_t * tc) +{ + if (tcp_opts_sack_permitted (&tc->rcv_opts) + && scoreboard_first_hole (&tc->sack_sb)) + tcp_fast_retransmit_sack (tc); + else + tcp_fast_retransmit_no_sack (tc); } always_inline u32 @@ -1544,6 +1629,12 @@ tcp_push_header (transport_connection_t * tconn, vlib_buffer_t * b) tc = (tcp_connection_t *) tconn; tcp_push_hdr_i (tc, b, TCP_STATE_ESTABLISHED, 0); + + if (tc->rtt_ts == 0) + { + tc->rtt_ts = tcp_time_now (); + tc->rtt_seq = tc->snd_nxt; + } return 0; } diff --git a/src/vnet/tcp/tcp_test.c b/src/vnet/tcp/tcp_test.c index 2af38484..3f8afa40 100644 --- a/src/vnet/tcp/tcp_test.c +++ b/src/vnet/tcp/tcp_test.c @@ -54,7 +54,7 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) tc->snd_una = 0; tc->snd_una_max = 1000; tc->snd_nxt = 1000; - tc->opt.flags |= TCP_OPTS_FLAG_SACK; + tc->rcv_opts.flags |= TCP_OPTS_FLAG_SACK; scoreboard_init (&tc->sack_sb); for (i = 0; i < 1000 / 100; i++) @@ -70,9 +70,9 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) for (i = 0; i < 1000 / 200; i++) { - vec_add1 (tc->opt.sacks, sacks[i * 2]); + vec_add1 (tc->rcv_opts.sacks, sacks[i * 2]); } - tc->opt.n_sack_blocks = vec_len (tc->opt.sacks); + tc->rcv_opts.n_sack_blocks = vec_len (tc->rcv_opts.sacks); tcp_rcv_sacks (tc, 0); if (verbose) @@ -93,18 +93,17 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) TCP_TEST ((sb->snd_una_adv == 0), "snd_una_adv %u", sb->snd_una_adv); TCP_TEST ((sb->last_sacked_bytes == 400), "last sacked bytes %d", sb->last_sacked_bytes); - TCP_TEST ((sb->max_byte_sacked == 900), - "max byte sacked %u", sb->max_byte_sacked); + TCP_TEST ((sb->high_sacked == 900), "max byte sacked %u", sb->high_sacked); /* * Inject odd blocks */ - vec_reset_length (tc->opt.sacks); + vec_reset_length (tc->rcv_opts.sacks); for (i = 0; i < 1000 / 200; i++) { - vec_add1 (tc->opt.sacks, sacks[i * 2 + 1]); + vec_add1 (tc->rcv_opts.sacks, sacks[i * 2 + 1]); } - tc->opt.n_sack_blocks = vec_len (tc->opt.sacks); + tc->rcv_opts.n_sack_blocks = vec_len (tc->rcv_opts.sacks); tcp_rcv_sacks (tc, 0); if (verbose) @@ -118,8 +117,7 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) "first hole start %u end %u", hole->start, hole->end); TCP_TEST ((sb->sacked_bytes == 900), "sacked bytes %d", sb->sacked_bytes); TCP_TEST ((sb->snd_una_adv == 0), "snd_una_adv %u", sb->snd_una_adv); - TCP_TEST ((sb->max_byte_sacked == 1000), - "max sacked byte %u", sb->max_byte_sacked); + TCP_TEST ((sb->high_sacked == 1000), "max sacked byte %u", sb->high_sacked); TCP_TEST ((sb->last_sacked_bytes == 500), "last sacked bytes %d", sb->last_sacked_bytes); @@ -135,8 +133,7 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) "scoreboard has %d elements", pool_elts (sb->holes)); TCP_TEST ((sb->snd_una_adv == 900), "snd_una_adv after ack %u", sb->snd_una_adv); - TCP_TEST ((sb->max_byte_sacked == 1000), - "max sacked byte %u", sb->max_byte_sacked); + TCP_TEST ((sb->high_sacked == 1000), "max sacked byte %u", sb->high_sacked); TCP_TEST ((sb->sacked_bytes == 0), "sacked bytes %d", sb->sacked_bytes); TCP_TEST ((sb->last_sacked_bytes == 0), "last sacked bytes %d", sb->last_sacked_bytes); @@ -145,11 +142,11 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) * Add new block */ - vec_reset_length (tc->opt.sacks); + vec_reset_length (tc->rcv_opts.sacks); block.start = 1200; block.end = 1300; - vec_add1 (tc->opt.sacks, block); + vec_add1 (tc->rcv_opts.sacks, block); if (verbose) vlib_cli_output (vm, "add [1200, 1300]:\n%U", format_tcp_scoreboard, sb); @@ -171,8 +168,7 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) "first hole start %u end %u", hole->start, hole->end); TCP_TEST ((sb->snd_una_adv == 0), "snd_una_adv after ack %u", sb->snd_una_adv); - TCP_TEST ((sb->max_byte_sacked == 1300), - "max sacked byte %u", sb->max_byte_sacked); + TCP_TEST ((sb->high_sacked == 1300), "max sacked byte %u", sb->high_sacked); hole = scoreboard_last_hole (sb); TCP_TEST ((hole->start == 1300 && hole->end == 1500), "last hole start %u end %u", hole->start, hole->end); @@ -182,7 +178,7 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) * Ack first hole */ - vec_reset_length (tc->opt.sacks); + vec_reset_length (tc->rcv_opts.sacks); tcp_rcv_sacks (tc, 1200); if (verbose) @@ -196,8 +192,16 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) "scoreboard has %d elements", pool_elts (sb->holes)); /* - * Remove all + * Add some more blocks and then remove all */ + vec_reset_length (tc->rcv_opts.sacks); + for (i = 0; i < 5; i++) + { + block.start = i * 100 + 1200; + block.end = (i + 1) * 100 + 1200; + vec_add1 (tc->rcv_opts.sacks, block); + } + tcp_rcv_sacks (tc, 1900); scoreboard_clear (sb); if (verbose) @@ -205,6 +209,9 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) TCP_TEST ((pool_elts (sb->holes) == 0), "number of holes %d", pool_elts (sb->holes)); + TCP_TEST ((sb->head == TCP_INVALID_SACK_HOLE_INDEX), "head %u", sb->head); + TCP_TEST ((sb->tail == TCP_INVALID_SACK_HOLE_INDEX), "tail %u", sb->tail); + /* * Re-inject odd blocks and ack them all */ @@ -214,9 +221,9 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) tc->snd_nxt = 1000; for (i = 0; i < 5; i++) { - vec_add1 (tc->opt.sacks, sacks[i * 2 + 1]); + vec_add1 (tc->rcv_opts.sacks, sacks[i * 2 + 1]); } - tc->opt.n_sack_blocks = vec_len (tc->opt.sacks); + tc->rcv_opts.n_sack_blocks = vec_len (tc->rcv_opts.sacks); tcp_rcv_sacks (tc, 0); if (verbose) vlib_cli_output (vm, "sb added odd blocks and ack [0, 950]:\n%U", @@ -740,6 +747,10 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) TCP_TEST (0, "[%d] peeked %u expected %u", j, data_buf[j], data[j]); } + /* Try to peek beyond the data */ + rv = svm_fifo_peek (f, svm_fifo_max_dequeue (f), vec_len (data), data_buf); + TCP_TEST ((rv == 0), "peeked %u expected 0", rv); + vec_free (data_buf); svm_fifo_free (f); vec_free (test_data); @@ -1239,7 +1250,7 @@ tcp_test_session (vlib_main_t * vm, unformat_input_t * input) tc0->c_thread_index = 0; tc0->c_lcl_ip4.as_u32 = local.as_u32; tc0->c_rmt_ip4.as_u32 = remote.as_u32; - tc0->opt.mss = 1450; + tc0->rcv_opts.mss = 1450; tcp_connection_init_vars (tc0); TCP_EVT_DBG (TCP_EVT_OPEN, tc0); -- cgit 1.2.3-korg From f03a59ab008908f98fd7d1b187a8c0fb78b01add Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Fri, 9 Jun 2017 21:07:32 -0700 Subject: Overall tcp performance improvements (VPP-846) - limit minimum rto per connection - cleanup sack scoreboard - switched svm fifo out-of-order data handling from absolute offsets to relative offsets. - improve cwnd handling when using sacks - add cc event debug stats - improved uri tcp test client/server: bugfixes and added half-duplex mode - expanded builtin client/server - updated uri socket client/server code to work in half-duplex - ensure session node unsets fifo event for empty fifo - fix session detach Change-Id: Ia446972340e32a65e0694ee2844355167d0c170d Signed-off-by: Florin Coras --- src/svm/svm_fifo.c | 152 +++++++++-------- src/svm/svm_fifo.h | 26 ++- src/svm/svm_fifo_segment.c | 7 +- src/uri/uri_socket_server.c | 25 ++- src/uri/uri_socket_test.c | 93 +++++++---- src/uri/uri_tcp_test.c | 120 ++++++++++---- src/vnet/session/application.c | 2 +- src/vnet/session/node.c | 7 +- src/vnet/session/segment_manager.c | 5 + src/vnet/session/session.c | 25 ++- src/vnet/session/session.h | 4 +- src/vnet/session/session_api.c | 2 +- src/vnet/session/transport.h | 2 + src/vnet/tcp/builtin_client.c | 325 +++++++++++++++++++------------------ src/vnet/tcp/builtin_client.h | 107 +++++------- src/vnet/tcp/builtin_server.c | 114 ++++++++++--- src/vnet/tcp/tcp.c | 22 ++- src/vnet/tcp/tcp.h | 12 +- src/vnet/tcp/tcp_debug.h | 186 +++++++++++++++------ src/vnet/tcp/tcp_input.c | 193 ++++++++++++---------- src/vnet/tcp/tcp_newreno.c | 4 +- src/vnet/tcp/tcp_output.c | 10 +- src/vnet/tcp/tcp_test.c | 70 ++++++-- 23 files changed, 945 insertions(+), 568 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index 5c8f244a..6ca437cf 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -15,10 +15,39 @@ #include -#define offset_lt(_a, _b) ((i32)((_a)-(_b)) < 0) -#define offset_leq(_a, _b) ((i32)((_a)-(_b)) <= 0) -#define offset_gt(_a, _b) ((i32)((_a)-(_b)) > 0) -#define offset_geq(_a, _b) ((i32)((_a)-(_b)) >= 0) +static inline u8 +position_lt (svm_fifo_t * f, u32 a, u32 b) +{ + return (ooo_segment_distance_to_tail (f, a) + < ooo_segment_distance_to_tail (f, b)); +} + +static inline u8 +position_leq (svm_fifo_t * f, u32 a, u32 b) +{ + return (ooo_segment_distance_to_tail (f, a) + <= ooo_segment_distance_to_tail (f, b)); +} + +static inline u8 +position_gt (svm_fifo_t * f, u32 a, u32 b) +{ + return (ooo_segment_distance_to_tail (f, a) + > ooo_segment_distance_to_tail (f, b)); +} + +static inline u32 +position_diff (svm_fifo_t * f, u32 posa, u32 posb) +{ + return ooo_segment_distance_to_tail (f, posa) + - ooo_segment_distance_to_tail (f, posb); +} + +static inline u32 +ooo_segment_end_pos (svm_fifo_t * f, ooo_segment_t * s) +{ + return (s->start + s->length) % f->nitems; +} u8 * format_ooo_segment (u8 * s, va_list * args) @@ -145,13 +174,17 @@ static void ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) { ooo_segment_t *s, *new_s, *prev, *next, *it; - u32 new_index, end_offset, s_sof, s_eof, s_index; + u32 new_index, s_end_pos, s_index; + u32 normalized_position, normalized_end_position; + + normalized_position = (f->tail + offset) % f->nitems; + normalized_end_position = (f->tail + offset + length) % f->nitems; - end_offset = offset + length; + f->ooos_newest = OOO_SEGMENT_INVALID_INDEX; if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX) { - s = ooo_segment_new (f, offset, length); + s = ooo_segment_new (f, normalized_position, length); f->ooos_list_head = s - f->ooo_segments; f->ooos_newest = f->ooos_list_head; return; @@ -160,28 +193,26 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) /* Find first segment that starts after new segment */ s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head); while (s->next != OOO_SEGMENT_INVALID_INDEX - && offset_leq (ooo_segment_offset (f, s), offset)) + && position_lt (f, s->start, normalized_position)) s = pool_elt_at_index (f->ooo_segments, s->next); /* If we have a previous and we overlap it, use it as starting point */ prev = ooo_segment_get_prev (f, s); - if (prev && offset_leq (offset, ooo_segment_end_offset (f, prev))) + if (prev + && position_leq (f, normalized_position, ooo_segment_end_pos (f, prev))) { s = prev; - prev = ooo_segment_get_prev (f, s); - s_sof = ooo_segment_offset (f, s); - s_eof = ooo_segment_end_offset (f, s); + s_end_pos = ooo_segment_end_pos (f, s); goto merge; } s_index = s - f->ooo_segments; - s_sof = ooo_segment_offset (f, s); - s_eof = ooo_segment_end_offset (f, s); + s_end_pos = ooo_segment_end_pos (f, s); /* No overlap, add before current segment */ - if (offset_lt (end_offset, s_sof)) + if (position_lt (f, normalized_end_position, s->start)) { - new_s = ooo_segment_new (f, offset, length); + new_s = ooo_segment_new (f, normalized_position, length); new_index = new_s - f->ooo_segments; /* Pool might've moved, get segment again */ @@ -198,28 +229,23 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) f->ooos_list_head = new_index; } - new_s->next = s - f->ooo_segments; + new_s->next = s_index; s->prev = new_index; f->ooos_newest = new_index; return; } /* No overlap, add after current segment */ - else if (offset_gt (offset, s_eof)) + else if (position_gt (f, normalized_position, s_end_pos)) { - new_s = ooo_segment_new (f, offset, length); + new_s = ooo_segment_new (f, normalized_position, length); new_index = new_s - f->ooo_segments; /* Pool might've moved, get segment again */ s = pool_elt_at_index (f->ooo_segments, s_index); - if (s->next != OOO_SEGMENT_INVALID_INDEX) - { - new_s->next = s->next; - next = pool_elt_at_index (f->ooo_segments, new_s->next); - next->prev = new_index; - } + ASSERT (s->next == OOO_SEGMENT_INVALID_INDEX); - new_s->prev = s - f->ooo_segments; + new_s->prev = s_index; s->next = new_index; f->ooos_newest = new_index; @@ -233,30 +259,32 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) merge: /* Merge at head */ - if (offset_lt (offset, s_sof)) + if (position_lt (f, normalized_position, s->start)) { - s->start = offset; - s->length = s_eof - ooo_segment_offset (f, s); + s->start = normalized_position; + s->length = position_diff (f, s_end_pos, s->start); } - /* Last but overlapping previous */ - else if (offset_gt (end_offset, s_eof)) + /* Overlapping tail */ + else if (position_gt (f, normalized_end_position, s_end_pos)) { - s->length = end_offset - ooo_segment_offset (f, s); + s->length = position_diff (f, normalized_end_position, s->start); } /* New segment completely covered by current one */ else { /* Do Nothing */ + s = 0; goto done; } /* The new segment's tail may cover multiple smaller ones */ - if (offset_geq (end_offset, s_eof)) + if (position_gt (f, normalized_end_position, s_end_pos)) { /* Remove the completely overlapped segments */ it = (s->next != OOO_SEGMENT_INVALID_INDEX) ? pool_elt_at_index (f->ooo_segments, s->next) : 0; - while (it && offset_leq (ooo_segment_end_offset (f, it), end_offset)) + while (it && position_leq (f, ooo_segment_end_pos (f, it), + normalized_end_position)) { next = (it->next != OOO_SEGMENT_INVALID_INDEX) ? pool_elt_at_index (f->ooo_segments, it->next) : 0; @@ -265,17 +293,17 @@ merge: } /* If partial overlap with last, merge */ - if (it && offset_leq (ooo_segment_offset (f, it), end_offset)) + if (it && position_leq (f, it->start, normalized_end_position)) { - s->length = ooo_segment_end_offset (f, it) - - ooo_segment_offset (f, s); + s->length = ooo_segment_end_pos (f, it) - s->start; ooo_segment_del (f, it - f->ooo_segments); } } done: /* Most recently updated segment */ - f->ooos_newest = s - f->ooo_segments; + if (s) + f->ooos_newest = s - f->ooo_segments; } /** @@ -286,32 +314,28 @@ static int ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) { ooo_segment_t *s; - u32 index, bytes = 0, diff; - u32 cursize, norm_start, nitems; - - /* current size has not yet been updated */ - cursize = svm_fifo_max_dequeue (f) + n_bytes_enqueued; - nitems = f->nitems; + u32 index, bytes = 0; + i32 diff; s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head); - norm_start = s->start % nitems; - diff = (f->nitems + (i32) (f->tail - norm_start)) % nitems; + diff = (f->tail >= s->start) ? + f->tail - s->start : f->nitems + f->tail - s->start; - if (diff > cursize) + if (diff > n_bytes_enqueued) return 0; /* If last tail update overlaps one/multiple ooo segments, remove them */ - while (0 < diff && diff < cursize) + while (0 <= diff && diff < n_bytes_enqueued) { index = s - f->ooo_segments; /* Segment end is beyond the tail. Advance tail and remove segment */ - if (diff < s->length) + if (s->length > diff) { - f->tail += s->length - diff; - f->tail %= f->nitems; bytes = s->length - diff; + f->tail += bytes; + f->tail %= f->nitems; ooo_segment_del (f, index); break; } @@ -320,8 +344,8 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) if (s->next != OOO_SEGMENT_INVALID_INDEX) { s = pool_elt_at_index (f->ooo_segments, s->next); - norm_start = s->start % nitems; - diff = (f->nitems + (i32) (f->tail - norm_start)) % nitems; + diff = (f->tail >= s->start) ? + f->tail - s->start : f->nitems + f->tail - s->start; ooo_segment_del (f, index); } /* End of search */ @@ -332,18 +356,6 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) } } - /* If tail is adjacent to an ooo segment, 'consume' it */ - if (diff == 0) - { - bytes = ((nitems - cursize) >= s->length) ? s->length : - nitems - cursize; - - f->tail += bytes; - f->tail %= nitems; - - ooo_segment_del (f, s - f->ooo_segments); - } - return bytes; } @@ -355,6 +367,7 @@ svm_fifo_enqueue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here) /* read cursize, which can only increase while we're working */ cursize = svm_fifo_max_dequeue (f); + f->ooos_newest = OOO_SEGMENT_INVALID_INDEX; if (PREDICT_FALSE (cursize == f->nitems)) return -2; /* fifo stuffed */ @@ -424,13 +437,16 @@ svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, u8 * copy_from_here) { u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; - u32 cursize, nitems; - u32 normalized_offset, offset_from_tail; + u32 cursize, nitems, normalized_offset; + u32 offset_from_tail; + + f->ooos_newest = OOO_SEGMENT_INVALID_INDEX; /* read cursize, which can only increase while we're working */ cursize = svm_fifo_max_dequeue (f); nitems = f->nitems; - normalized_offset = offset % nitems; + + normalized_offset = (f->tail + offset) % nitems; /* Will this request fit? */ offset_from_tail = (nitems + normalized_offset - f->tail) % nitems; diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index 9cb93ff4..f32ef41d 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -127,21 +127,37 @@ format_function_t format_svm_fifo; always_inline ooo_segment_t * svm_fifo_newest_ooo_segment (svm_fifo_t * f) { - return f->ooo_segments + f->ooos_newest; + if (f->ooos_newest == OOO_SEGMENT_INVALID_INDEX) + return 0; + return pool_elt_at_index (f->ooo_segments, f->ooos_newest); +} + +always_inline u32 +ooo_segment_distance_to_tail (svm_fifo_t * f, u32 a) +{ + /* Ambiguous. Assumption is that ooo segments don't touch tail */ + if (a == f->tail && f->tail == f->head) + return f->nitems; + + return ((f->nitems + a - f->tail) % f->nitems); } always_inline u32 ooo_segment_offset (svm_fifo_t * f, ooo_segment_t * s) { -// return ((f->nitems + s->fifo_position - f->tail) % f->nitems); - return s->start; + return ooo_segment_distance_to_tail (f, s->start); } always_inline u32 ooo_segment_end_offset (svm_fifo_t * f, ooo_segment_t * s) { -// return ((f->nitems + s->fifo_position + s->length - f->tail) % f->nitems); - return s->start + s->length; + return ooo_segment_distance_to_tail (f, s->start) + s->length; +} + +always_inline u32 +ooo_segment_length (svm_fifo_t * f, ooo_segment_t * s) +{ + return s->length; } always_inline ooo_segment_t * diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index eef2168c..c4ac2352 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -305,14 +305,17 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f, /* Remove from active list */ if (f->prev) f->prev->next = f->next; + else + fsh->fifos = f->next; if (f->next) f->next->prev = f->prev; - /* FALLTHROUGH */ + /* Fall through: we add only rx fifos to active pool */ case FIFO_SEGMENT_TX_FREELIST: /* Add to free list */ f->next = fsh->free_fifos[list_index]; + f->prev = 0; fsh->free_fifos[list_index] = f; - /* FALLTHROUGH */ + break; case FIFO_SEGMENT_FREELIST_NONE: break; diff --git a/src/uri/uri_socket_server.c b/src/uri/uri_socket_server.c index 2366f420..4f4c5f30 100644 --- a/src/uri/uri_socket_server.c +++ b/src/uri/uri_socket_server.c @@ -22,6 +22,7 @@ #include #include #include +#include volatile int signal_received; @@ -78,7 +79,10 @@ main (int argc, char *argv[]) struct sockaddr_in serv_addr; struct sockaddr_in client; struct hostent *server; - u8 *rx_buffer = 0; + u8 *rx_buffer = 0, no_echo = 0; + struct timeval start, end; + long rcvd = 0; + double deltat; if (argc > 1 && argc < 3) { @@ -86,8 +90,9 @@ main (int argc, char *argv[]) exit (0); } - if (argc >= 3) + if (argc >= 4) { + no_echo = atoi (argv[3]); portno = atoi (argv[2]); server = gethostbyname (argv[1]); if (server == NULL) @@ -137,7 +142,7 @@ main (int argc, char *argv[]) exit (1); } - vec_validate (rx_buffer, 8999 /* jumbo mtu */ ); + vec_validate (rx_buffer, 128 << 10); if (listen (sockfd, 5 /* backlog */ ) < 0) { @@ -160,6 +165,8 @@ main (int argc, char *argv[]) } fformat (stderr, "Accepted connection from: %s : %d\n", inet_ntoa (client.sin_addr), client.sin_port); + gettimeofday (&start, NULL); + while (1) { n = recv (accfd, rx_buffer, vec_len (rx_buffer), 0 /* flags */ ); @@ -167,6 +174,14 @@ main (int argc, char *argv[]) { /* Graceful exit */ close (accfd); + gettimeofday (&end, NULL); + deltat = (end.tv_sec - start.tv_sec); + deltat += (end.tv_usec - start.tv_usec) / 1000000.0; + clib_warning ("Finished in %.6f", deltat); + clib_warning ("%.4f Gbit/second %s", + (((f64) rcvd * 8.0) / deltat / 1e9), + no_echo ? "half" : "full"); + rcvd = 0; break; } if (n < 0) @@ -179,6 +194,10 @@ main (int argc, char *argv[]) if (signal_received) break; + rcvd += n; + if (no_echo) + continue; + sent = send (accfd, rx_buffer, n, 0 /* flags */ ); if (n < 0) { diff --git a/src/uri/uri_socket_test.c b/src/uri/uri_socket_test.c index 9f049bda..5f7084d5 100644 --- a/src/uri/uri_socket_test.c +++ b/src/uri/uri_socket_test.c @@ -19,6 +19,7 @@ #include #include #include +#include int main (int argc, char *argv[]) @@ -26,28 +27,44 @@ main (int argc, char *argv[]) int sockfd, portno, n; struct sockaddr_in serv_addr; struct hostent *server; - u8 *rx_buffer = 0, *tx_buffer = 0; + u8 *rx_buffer = 0, *tx_buffer = 0, no_echo = 0, test_bytes = 0; u32 offset; - int iter, i; - if (0 && argc < 3) + long bytes = 1 << 20, to_send; + int i; + struct timeval start, end; + double deltat; + + if (argc >= 3) { - fformat (stderr, "usage %s hostname port\n", argv[0]); - exit (0); + bytes = ((long) atoi (argv[4])) << 20; + no_echo = atoi (argv[3]); + portno = atoi (argv[2]); + server = gethostbyname (argv[1]); + if (server == NULL) + { + clib_unix_warning ("gethostbyname"); + exit (1); + } + } + else + { + portno = 1234; // atoi(argv[2]); + server = gethostbyname ("6.0.1.1" /* argv[1] */ ); + if (server == NULL) + { + clib_unix_warning ("gethostbyname"); + exit (1); + } } - portno = 1234; // atoi(argv[2]); + to_send = bytes; sockfd = socket (AF_INET, SOCK_STREAM, 0); if (sockfd < 0) { clib_unix_error ("socket"); exit (1); } - server = gethostbyname ("6.0.1.1" /* argv[1] */ ); - if (server == NULL) - { - clib_unix_warning ("gethostbyname"); - exit (1); - } + bzero ((char *) &serv_addr, sizeof (serv_addr)); serv_addr.sin_family = AF_INET; bcopy ((char *) server->h_addr, @@ -59,8 +76,8 @@ main (int argc, char *argv[]) exit (1); } - vec_validate (rx_buffer, 1400); - vec_validate (tx_buffer, 1400); + vec_validate (rx_buffer, 128 << 10); + vec_validate (tx_buffer, 128 << 10); for (i = 0; i < vec_len (tx_buffer); i++) tx_buffer[i] = (i + 1) % 0xff; @@ -75,19 +92,28 @@ main (int argc, char *argv[]) exit (0); } - for (iter = 0; iter < 100000; iter++) + gettimeofday (&start, NULL); + while (bytes > 0) { - if (iter < 99999) + /* + * TX + */ + n = send (sockfd, tx_buffer, vec_len (tx_buffer), 0 /* flags */ ); + if (n != vec_len (tx_buffer)) { - n = send (sockfd, tx_buffer, vec_len (tx_buffer), 0 /* flags */ ); - if (n != vec_len (tx_buffer)) - { - clib_unix_warning ("write"); - exit (0); - } + clib_unix_warning ("write"); + exit (0); } - offset = 0; + bytes -= n; + if (no_echo) + continue; + + /* + * RX + */ + + offset = 0; do { n = recv (sockfd, rx_buffer + offset, @@ -101,18 +127,27 @@ main (int argc, char *argv[]) } while (offset < vec_len (rx_buffer)); - for (i = 0; i < vec_len (rx_buffer); i++) + if (test_bytes) { - if (rx_buffer[i] != tx_buffer[i]) + for (i = 0; i < vec_len (rx_buffer); i++) { - clib_warning ("[%d] read 0x%x not 0x%x", - rx_buffer[i], tx_buffer[i]); - exit (1); + if (rx_buffer[i] != tx_buffer[i]) + { + clib_warning ("[%d] read 0x%x not 0x%x", rx_buffer[i], + tx_buffer[i]); + exit (1); + } } } - } close (sockfd); + gettimeofday (&end, NULL); + + deltat = (end.tv_sec - start.tv_sec); + deltat += (end.tv_usec - start.tv_usec) / 1000000.0; // us to ms + clib_warning ("Finished in %.6f", deltat); + clib_warning ("%.4f Gbit/second %s", (((f64) to_send * 8.0) / deltat / 1e9), + no_echo ? "half" : "full"); return 0; } diff --git a/src/uri/uri_tcp_test.c b/src/uri/uri_tcp_test.c index e201a359..d1694cf4 100755 --- a/src/uri/uri_tcp_test.c +++ b/src/uri/uri_tcp_test.c @@ -46,6 +46,8 @@ typedef struct svm_fifo_t *server_tx_fifo; u64 vpp_session_handle; + u64 bytes_received; + f64 start; } session_t; typedef enum @@ -174,7 +176,7 @@ wait_for_state_change (uri_tcp_test_main_t * utm, connection_state_t state) if (utm->state == STATE_FAILED) return -1; if (utm->time_to_stop == 1) - return -1; + return 0; } clib_warning ("timeout waiting for STATE_READY"); return -1; @@ -184,7 +186,7 @@ void application_send_attach (uri_tcp_test_main_t * utm) { vl_api_application_attach_t *bmp; - u32 fifo_size = 3 << 20; + u32 fifo_size = 4 << 20; bmp = vl_msg_api_alloc (sizeof (*bmp)); memset (bmp, 0, sizeof (*bmp)); @@ -343,11 +345,23 @@ vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t * mp) mp->segment_size); } +static void +session_print_stats (uri_tcp_test_main_t * utm, session_t * session) +{ + f64 deltat; + u64 bytes; + + deltat = clib_time_now (&utm->clib_time) - session->start; + bytes = utm->i_am_master ? session->bytes_received : utm->bytes_to_send; + fformat (stdout, "Finished in %.6f\n", deltat); + fformat (stdout, "%.4f Gbit/second\n", (bytes * 8.0) / deltat / 1e9); +} + static void vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) { uri_tcp_test_main_t *utm = &uri_tcp_test_main; - session_t *session; + session_t *session = 0; vl_api_disconnect_session_reply_t *rmp; uword *p; int rv = 0; @@ -366,7 +380,7 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) rv = -11; } - utm->time_to_stop = 1; +// utm->time_to_stop = 1; rmp = vl_msg_api_alloc (sizeof (*rmp)); memset (rmp, 0, sizeof (*rmp)); @@ -375,6 +389,9 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) rmp->retval = rv; rmp->handle = mp->handle; vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); + + if (session) + session_print_stats (utm, session); } static void @@ -431,14 +448,19 @@ client_handle_fifo_event_rx (uri_tcp_test_main_t * utm, if (n_read > 0) { bytes -= n_read; - for (i = 0; i < n_read; i++) + if (utm->test_return_packets) { - if (utm->rx_buf[i] != ((utm->client_bytes_received + i) & 0xff)) + for (i = 0; i < n_read; i++) { - clib_warning ("error at byte %lld, 0x%x not 0x%x", - utm->client_bytes_received + i, - utm->rx_buf[i], - ((utm->client_bytes_received + i) & 0xff)); + if (utm->rx_buf[i] + != ((utm->client_bytes_received + i) & 0xff)) + { + clib_warning ("error at byte %lld, 0x%x not 0x%x", + utm->client_bytes_received + i, + utm->rx_buf[i], + ((utm->client_bytes_received + + i) & 0xff)); + } } } utm->client_bytes_received += n_read; @@ -545,6 +567,7 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) session->server_rx_fifo = rx_fifo; session->server_tx_fifo = tx_fifo; session->vpp_session_handle = mp->handle; + session->start = clib_time_now (&utm->clib_time); /* Save handle */ utm->connected_session_index = session_index; @@ -571,7 +594,7 @@ send_test_chunk (uri_tcp_test_main_t * utm, svm_fifo_t * tx_fifo, int mypid, u64 bytes_sent = 0; int test_buf_offset = 0; u32 bytes_to_snd; - u32 queue_max_chunk = 64 << 10, actual_write; + u32 queue_max_chunk = 128 << 10, actual_write; session_fifo_event_t evt; static int serial_number = 0; int rv; @@ -582,8 +605,8 @@ send_test_chunk (uri_tcp_test_main_t * utm, svm_fifo_t * tx_fifo, int mypid, while (bytes_to_snd > 0) { - actual_write = - bytes_to_snd > queue_max_chunk ? queue_max_chunk : bytes_to_snd; + actual_write = (bytes_to_snd > queue_max_chunk) ? + queue_max_chunk : bytes_to_snd; rv = svm_fifo_enqueue_nowait (tx_fifo, actual_write, test_data + test_buf_offset); @@ -635,9 +658,9 @@ client_send_data (uri_tcp_test_main_t * utm) if (leftover) send_test_chunk (utm, tx_fifo, mypid, leftover); - if (utm->test_return_packets) + if (!utm->drop_packets) { - f64 timeout = clib_time_now (&utm->clib_time) + 2; + f64 timeout = clib_time_now (&utm->clib_time) + 10; /* Wait for the outstanding packets */ while (utm->client_bytes_received < @@ -698,6 +721,7 @@ int client_disconnect (uri_tcp_test_main_t * utm) { client_send_disconnect (utm); + clib_warning ("Sent disconnect"); if (wait_for_state_change (utm, STATE_START)) { clib_warning ("Disconnect failed"); @@ -721,7 +745,7 @@ client_test (uri_tcp_test_main_t * utm) } /* Init test data */ - vec_validate (utm->connect_test_data, 64 * 1024 - 1); + vec_validate (utm->connect_test_data, 128 * 1024 - 1); for (i = 0; i < vec_len (utm->connect_test_data); i++) utm->connect_test_data[i] = i & 0xff; @@ -899,6 +923,9 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY); rmp->handle = mp->handle; vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); + + session->bytes_received = 0; + session->start = clib_time_now (&utm->clib_time); } void @@ -909,37 +936,50 @@ server_handle_fifo_event_rx (uri_tcp_test_main_t * utm, int n_read; session_fifo_event_t evt; unix_shared_memory_queue_t *q; - int rv, bytes; + session_t *session; + int rv; + u32 max_dequeue, offset, max_transfer, rx_buf_len; + rx_buf_len = vec_len (utm->rx_buf); rx_fifo = e->fifo; - tx_fifo = utm->sessions[rx_fifo->client_session_index].server_tx_fifo; + session = &utm->sessions[rx_fifo->client_session_index]; + tx_fifo = session->server_tx_fifo; - bytes = svm_fifo_max_dequeue (rx_fifo); + max_dequeue = svm_fifo_max_dequeue (rx_fifo); /* Allow enqueuing of a new event */ svm_fifo_unset_event (rx_fifo); - if (bytes == 0) - return; + if (PREDICT_FALSE (max_dequeue == 0)) + { + return; + } - /* Read the bytes */ + /* Read the max_dequeue */ do { - n_read = svm_fifo_dequeue_nowait (rx_fifo, vec_len (utm->rx_buf), - utm->rx_buf); + max_transfer = clib_min (rx_buf_len, max_dequeue); + n_read = svm_fifo_dequeue_nowait (rx_fifo, max_transfer, utm->rx_buf); if (n_read > 0) - bytes -= n_read; - - if (utm->drop_packets) - continue; + { + max_dequeue -= n_read; + session->bytes_received += n_read; + } /* Reflect if a non-drop session */ - if (n_read > 0) + if (!utm->drop_packets && n_read > 0) { + offset = 0; do { - rv = svm_fifo_enqueue_nowait (tx_fifo, n_read, utm->rx_buf); + rv = svm_fifo_enqueue_nowait (tx_fifo, n_read, + &utm->rx_buf[offset]); + if (rv > 0) + { + n_read -= rv; + offset += rv; + } } - while (rv <= 0 && !utm->time_to_stop); + while ((rv <= 0 || n_read > 0) && !utm->time_to_stop); /* If event wasn't set, add one */ if (svm_fifo_set_event (tx_fifo)) @@ -951,11 +991,11 @@ server_handle_fifo_event_rx (uri_tcp_test_main_t * utm, q = utm->vpp_event_queue; unix_shared_memory_queue_add (q, (u8 *) & evt, - 0 /* do wait for mutex */ ); + 1 /* do wait for mutex */ ); } } } - while ((n_read < 0 || bytes > 0) && !utm->time_to_stop); + while ((n_read < 0 || max_dequeue > 0) && !utm->time_to_stop); } void @@ -1068,9 +1108,18 @@ vl_api_disconnect_session_reply_t_handler (vl_api_disconnect_session_reply_t * mp) { uri_tcp_test_main_t *utm = &uri_tcp_test_main; + session_t *session; + + if (mp->retval) + { + clib_warning ("vpp complained about disconnect: %d", + ntohl (mp->retval)); + } - clib_warning ("retval %d", ntohl (mp->retval)); utm->state = STATE_START; + session = pool_elt_at_index (utm->sessions, utm->connected_session_index); + if (session) + session_print_stats (utm, session); } #define foreach_uri_msg \ @@ -1123,7 +1172,7 @@ main (int argc, char **argv) /* make the main heap thread-safe */ h->flags |= MHEAP_FLAG_THREAD_SAFE; - vec_validate (utm->rx_buf, 65536); + vec_validate (utm->rx_buf, 128 << 10); utm->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); @@ -1186,6 +1235,7 @@ main (int argc, char **argv) utm->drop_packets = drop_packets; utm->test_return_packets = test_return_packets; utm->bytes_to_send = bytes_to_send; + utm->time_to_stop = 0; setup_signal_handlers (); uri_api_hookup (utm); diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index c679b1f5..4bdb1027 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -117,7 +117,7 @@ application_del (application_t * app) /* Actual listener cleanup */ for (i = 0; i < vec_len (handles); i++) { - a->app_index = app->api_client_index; + a->app_index = app->index; a->handle = handles[i]; /* seg manager is removed when unbind completes */ vnet_unbind (a); diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c index 07eeae82..c0ab1bf0 100644 --- a/src/vnet/session/node.c +++ b/src/vnet/session/node.c @@ -171,7 +171,10 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, /* Nothing to read return */ if (max_dequeue0 == 0) - return 0; + { + svm_fifo_unset_event (s0->server_tx_fifo); + return 0; + } /* Ensure we're not writing more than transport window allows */ if (max_dequeue0 < snd_space0) @@ -393,7 +396,7 @@ session_event_get_session (session_fifo_event_t * e0, u8 thread_index) s0 = stream_session_get_if_valid (session_index0, thread_index); - ASSERT (s0->thread_index == thread_index); + ASSERT (s0 == 0 || s0->thread_index == thread_index); return s0; } diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c index caf8eaa3..bf571963 100644 --- a/src/vnet/session/segment_manager.c +++ b/src/vnet/session/segment_manager.c @@ -306,11 +306,13 @@ again: if (added_a_segment) { clib_warning ("added a segment, still cant allocate a fifo"); + clib_spinlock_unlock (&sm->lockp); return SESSION_ERROR_NEW_SEG_NO_SPACE; } if (session_manager_add_segment (sm)) { + clib_spinlock_unlock (&sm->lockp); return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; } @@ -320,6 +322,7 @@ again: else { clib_warning ("No space to allocate fifos!"); + clib_spinlock_unlock (&sm->lockp); return SESSION_ERROR_NO_SPACE; } } @@ -361,8 +364,10 @@ segment_manager_dealloc_fifos (u32 svm_segment_index, svm_fifo_t * rx_fifo, if (sm->segment_indices[0] != svm_segment_index && !svm_fifo_segment_has_fifos (fifo_segment)) { + clib_spinlock_lock (&sm->lockp); svm_fifo_segment_delete (fifo_segment); vec_del1 (sm->segment_indices, svm_segment_index); + clib_spinlock_unlock (&sm->lockp); } } diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 534598d6..fe198044 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -700,7 +700,7 @@ stream_session_init_fifos_pointers (transport_connection_t * tc, svm_fifo_init_pointers (s->server_tx_fifo, tx_pointer); } -void +int stream_session_connect_notify (transport_connection_t * tc, u8 sst, u8 is_fail) { @@ -709,6 +709,7 @@ stream_session_connect_notify (transport_connection_t * tc, u8 sst, stream_session_t *new_s = 0; u64 handle; u32 api_context = 0; + int error = 0; handle = stream_session_half_open_lookup (smm, &tc->lcl_ip, &tc->rmt_ip, tc->lcl_port, tc->rmt_port, @@ -716,7 +717,7 @@ stream_session_connect_notify (transport_connection_t * tc, u8 sst, if (handle == HALF_OPEN_LOOKUP_INVALID_VALUE) { clib_warning ("This can't be good!"); - return; + return -1; } /* Get the app's index from the handle we stored when opening connection */ @@ -730,9 +731,12 @@ stream_session_connect_notify (transport_connection_t * tc, u8 sst, /* Create new session (svm segments are allocated if needed) */ if (stream_session_create_i (sm, tc, &new_s)) - return; - - new_s->app_index = app->index; + { + is_fail = 1; + error = -1; + } + else + new_s->app_index = app->index; } /* Notify client */ @@ -741,6 +745,8 @@ stream_session_connect_notify (transport_connection_t * tc, u8 sst, /* Cleanup session lookup */ stream_session_half_open_table_del (smm, sst, tc); + + return error; } void @@ -981,8 +987,13 @@ session_send_session_evt_to_thread (u64 session_handle, /* Based on request block (or not) for lack of space */ if (PREDICT_TRUE (q->cursize < q->maxsize)) - unix_shared_memory_queue_add (q, (u8 *) & evt, - 0 /* do wait for mutex */ ); + { + if (unix_shared_memory_queue_add (q, (u8 *) & evt, + 1 /* do wait for mutex */ )) + { + clib_warning ("failed to enqueue evt"); + } + } else { clib_warning ("queue full"); diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index d9c38bd1..5fa4225c 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -368,8 +368,8 @@ stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer, u32 offset, u32 max_bytes); u32 stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes); -void stream_session_connect_notify (transport_connection_t * tc, u8 sst, - u8 is_fail); +int stream_session_connect_notify (transport_connection_t * tc, u8 sst, + u8 is_fail); void stream_session_init_fifos_pointers (transport_connection_t * tc, u32 rx_pointer, u32 tx_pointer); diff --git a/src/vnet/session/session_api.c b/src/vnet/session/session_api.c index f772cb9f..60f764af 100755 --- a/src/vnet/session/session_api.c +++ b/src/vnet/session/session_api.c @@ -419,7 +419,7 @@ done: REPLY_MACRO (VL_API_UNBIND_URI_REPLY); } -void +static void vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) { vl_api_connect_uri_reply_t *rmp; diff --git a/src/vnet/session/transport.h b/src/vnet/session/transport.h index e5f788be..04bd5ca0 100644 --- a/src/vnet/session/transport.h +++ b/src/vnet/session/transport.h @@ -39,6 +39,7 @@ typedef struct _transport_connection #if TRANSPORT_DEBUG elog_track_t elog_track; /**< Event logging */ + u32 cc_stat_tstamp; /**< CC stats timestamp */ #endif /** Macros for 'derived classes' where base is named "connection" */ @@ -57,6 +58,7 @@ typedef struct _transport_connection #define c_is_ip4 connection.is_ip4 #define c_thread_index connection.thread_index #define c_elog_track connection.elog_track +#define c_cc_stat_tstamp connection.cc_stat_tstamp } transport_connection_t; /* diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index 7238cda3..6f8be082 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -43,7 +43,7 @@ #include #undef vl_printfun -#define TCP_BUILTIN_CLIENT_DBG (1) +#define TCP_BUILTIN_CLIENT_DBG (0) static void send_test_chunk (tclient_main_t * tm, session_t * s) @@ -92,7 +92,7 @@ send_test_chunk (tclient_main_t * tm, session_t * s) ed->data[2] = s->bytes_to_send; } - /* Poke the TCP state machine */ + /* Poke the session layer */ if (svm_fifo_set_event (s->server_tx_fifo)) { /* Fabricate TX event, send to vpp */ @@ -100,8 +100,9 @@ send_test_chunk (tclient_main_t * tm, session_t * s) evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = serial_number++; - unix_shared_memory_queue_add (tm->vpp_event_queue, (u8 *) & evt, - 0 /* do wait for mutex */ ); + if (unix_shared_memory_queue_add (tm->vpp_event_queue, (u8 *) & evt, + 0 /* do wait for mutex */ )) + clib_warning ("could not enqueue event"); } } } @@ -188,13 +189,13 @@ builtin_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, sp = pool_elt_at_index (tm->sessions, connection_indices[i]); - if (tx_quota < 60 && sp->bytes_to_send > 0) + if ((tm->no_return || tx_quota < 60) && sp->bytes_to_send > 0) { send_test_chunk (tm, sp); delete_session = 0; tx_quota++; } - if (sp->bytes_to_receive > 0) + if (!tm->no_return && sp->bytes_to_receive > 0) { prev_bytes_received_this_session = sp->bytes_received; receive_test_chunk (tm, sp); @@ -205,13 +206,14 @@ builtin_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, } if (PREDICT_FALSE (delete_session == 1)) { + __sync_fetch_and_add (&tm->tx_total, tm->bytes_to_send); __sync_fetch_and_add (&tm->rx_total, sp->bytes_received); + dmp = vl_msg_api_alloc_as_if_client (sizeof (*dmp)); memset (dmp, 0, sizeof (*dmp)); dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); dmp->client_index = tm->my_client_index; dmp->handle = sp->vpp_session_handle; -// vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & dmp); if (!unix_shared_memory_queue_add (tm->vl_input_queue, (u8 *) & dmp, 1)) { @@ -247,7 +249,6 @@ VLIB_REGISTER_NODE (builtin_client_node) = }; /* *INDENT-ON* */ - /* So we don't get "no handler for... " msgs */ static void vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) @@ -255,76 +256,10 @@ vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) vlib_main_t *vm = vlib_get_main (); tclient_main_t *tm = &tclient_main; tm->my_client_index = mp->index; - vlib_process_signal_event (vm, tm->node_index, 1 /* evt */ , + vlib_process_signal_event (vm, tm->cli_node_index, 1 /* evt */ , 0 /* data */ ); } -static void -vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) -{ - tclient_main_t *tm = &tclient_main; - session_t *session; - u32 session_index; - i32 retval = /* clib_net_to_host_u32 ( */ mp->retval /*) */ ; - int i; - - if (retval < 0) - { - clib_warning ("connection failed: retval %d", retval); - return; - } - - tm->our_event_queue = - uword_to_pointer (mp->vpp_event_queue_address, - unix_shared_memory_queue_t *); - tm->vpp_event_queue = - uword_to_pointer (mp->vpp_event_queue_address, - unix_shared_memory_queue_t *); - - /* - * Setup session - */ - pool_get (tm->sessions, session); - memset (session, 0, sizeof (*session)); - session_index = session - tm->sessions; - session->bytes_to_receive = session->bytes_to_send = tm->bytes_to_send; - - session->server_rx_fifo = - uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *); - session->server_rx_fifo->client_session_index = session_index; - session->server_tx_fifo = - uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *); - session->server_tx_fifo->client_session_index = session_index; - session->vpp_session_handle = mp->handle; - - /* Add it to the session lookup table */ - hash_set (tm->session_index_by_vpp_handles, mp->handle, session_index); - - if (tm->ready_connections == tm->expected_connections - 1) - { - vlib_thread_main_t *thread_main = vlib_get_thread_main (); - int thread_index; - - thread_index = 0; - for (i = 0; i < pool_elts (tm->sessions); i++) - { - vec_add1 (tm->connection_index_by_thread[thread_index], i); - thread_index++; - if (thread_index == thread_main->n_vlib_mains) - thread_index = 0; - } - } - __sync_fetch_and_add (&tm->ready_connections, 1); - if (tm->ready_connections == tm->expected_connections) - { - tm->run_test = 1; - tm->test_start_time = vlib_time_now (tm->vlib_main); - /* Signal the CLI process that the action is starting... */ - vlib_process_signal_event (tm->vlib_main, tm->cli_node_index, - 1, 0 /* data */ ); - } -} - static int create_api_loopback (tclient_main_t * tm) { @@ -347,12 +282,11 @@ create_api_loopback (tclient_main_t * tm) mp->_vl_msg_id = VL_API_MEMCLNT_CREATE; mp->context = 0xFEEDFACE; mp->input_queue = pointer_to_uword (tm->vl_input_queue); - strncpy ((char *) mp->name, "tcp_tester", sizeof (mp->name) - 1); + strncpy ((char *) mp->name, "tcp_clients_tester", sizeof (mp->name) - 1); vl_api_memclnt_create_t_handler (mp); /* Wait for reply */ - tm->node_index = vlib_get_current_process (vm)->node_runtime.node_index; vlib_process_wait_for_event_or_clock (vm, 1.0); event_type = vlib_process_get_events (vm, &event_data); switch (event_type) @@ -373,7 +307,6 @@ create_api_loopback (tclient_main_t * tm) #define foreach_tclient_static_api_msg \ _(MEMCLNT_CREATE_REPLY, memclnt_create_reply) \ -_(CONNECT_URI_REPLY, connect_uri_reply) static clib_error_t * tclient_api_hookup (vlib_main_t * vm) @@ -411,8 +344,8 @@ tcp_test_clients_init (vlib_main_t * vm) if (create_api_loopback (tm)) return -1; - /* Init test data */ - vec_validate (tm->connect_test_data, 64 * 1024 - 1); + /* Init test data. Big buffer */ + vec_validate (tm->connect_test_data, 1024 * 1024 - 1); for (i = 0; i < vec_len (tm->connect_test_data); i++) tm->connect_test_data[i] = i & 0xff; @@ -430,37 +363,66 @@ static int builtin_session_connected_callback (u32 app_index, u32 api_context, stream_session_t * s, u8 is_fail) { - vl_api_connect_uri_reply_t _m, *mp = &_m; - unix_shared_memory_queue_t *q; - application_t *app; - unix_shared_memory_queue_t *vpp_queue; + tclient_main_t *tm = &tclient_main; + session_t *session; + u32 session_index; + int i; - app = application_get (app_index); - q = vl_api_client_index_to_input_queue (app->api_client_index); + if (is_fail) + { + clib_warning ("connection %d failed!", api_context); + vlib_process_signal_event (tm->vlib_main, tm->cli_node_index, -1, + 0 /* data */ ); + return -1; + } - if (!q) - return -1; + /* Mark vpp session as connected */ + s->session_state = SESSION_STATE_READY; - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_CONNECT_URI_REPLY); - mp->context = api_context; - if (!is_fail) + tm->our_event_queue = session_manager_get_vpp_event_queue (s->thread_index); + tm->vpp_event_queue = session_manager_get_vpp_event_queue (s->thread_index); + + /* + * Setup session + */ + pool_get (tm->sessions, session); + memset (session, 0, sizeof (*session)); + session_index = session - tm->sessions; + session->bytes_to_receive = session->bytes_to_send = tm->bytes_to_send; + session->server_rx_fifo = s->server_rx_fifo; + session->server_rx_fifo->client_session_index = session_index; + session->server_tx_fifo = s->server_tx_fifo; + session->server_tx_fifo->client_session_index = session_index; + session->vpp_session_handle = stream_session_handle (s); + + /* Add it to the session lookup table */ + hash_set (tm->session_index_by_vpp_handles, session->vpp_session_handle, + session_index); + + if (tm->ready_connections == tm->expected_connections - 1) { - vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); - mp->server_rx_fifo = pointer_to_uword (s->server_rx_fifo); - mp->server_tx_fifo = pointer_to_uword (s->server_tx_fifo); - mp->handle = stream_session_handle (s); - mp->vpp_event_queue_address = pointer_to_uword (vpp_queue); - mp->retval = 0; - s->session_state = SESSION_STATE_READY; + vlib_thread_main_t *thread_main = vlib_get_thread_main (); + int thread_index; + + thread_index = 0; + for (i = 0; i < pool_elts (tm->sessions); i++) + { + vec_add1 (tm->connection_index_by_thread[thread_index], i); + thread_index++; + if (thread_index == thread_main->n_vlib_mains) + thread_index = 0; + } } - else + __sync_fetch_and_add (&tm->ready_connections, 1); + if (tm->ready_connections == tm->expected_connections) { - mp->retval = clib_host_to_net_u32 (VNET_API_ERROR_SESSION_CONNECT_FAIL); + tm->run_test = 1; + tm->test_start_time = vlib_time_now (tm->vlib_main); + /* Signal the CLI process that the action is starting... */ + vlib_process_signal_event (tm->vlib_main, tm->cli_node_index, 1, + 0 /* data */ ); } - vl_api_connect_uri_reply_t_handler (mp); - return 0; } @@ -489,23 +451,22 @@ builtin_server_rx_callback (stream_session_t * s) } /* *INDENT-OFF* */ -static session_cb_vft_t builtin_clients = - { - .session_reset_callback = builtin_session_reset_callback, - .session_connected_callback = builtin_session_connected_callback, - .session_accept_callback = builtin_session_create_callback, - .session_disconnect_callback = builtin_session_disconnect_callback, - .builtin_server_rx_callback = builtin_server_rx_callback - }; +static session_cb_vft_t builtin_clients = { + .session_reset_callback = builtin_session_reset_callback, + .session_connected_callback = builtin_session_connected_callback, + .session_accept_callback = builtin_session_create_callback, + .session_disconnect_callback = builtin_session_disconnect_callback, + .builtin_server_rx_callback = builtin_server_rx_callback +}; /* *INDENT-ON* */ static int -attach_builtin_test_clients () +attach_builtin_test_clients_app (void) { tclient_main_t *tm = &tclient_main; vnet_app_attach_args_t _a, *a = &_a; u8 segment_name[128]; - u32 segment_name_length; + u32 segment_name_length, prealloc_fifos; u64 options[16]; segment_name_length = ARRAY_LEN (segment_name); @@ -518,13 +479,68 @@ attach_builtin_test_clients () a->segment_name_length = segment_name_length; a->session_cb_vft = &builtin_clients; + prealloc_fifos = tm->prealloc_fifos ? tm->expected_connections : 1; + options[SESSION_OPTIONS_ACCEPT_COOKIE] = 0x12345678; - options[SESSION_OPTIONS_SEGMENT_SIZE] = (2 << 30); /*$$$$ config / arg */ + options[SESSION_OPTIONS_SEGMENT_SIZE] = (2ULL << 32); + options[SESSION_OPTIONS_RX_FIFO_SIZE] = tm->fifo_size; + options[SESSION_OPTIONS_TX_FIFO_SIZE] = tm->fifo_size / 2; + options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = prealloc_fifos; + options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; a->options = options; - return vnet_application_attach (a); + if (vnet_application_attach (a)) + return -1; + + tm->app_index = a->app_index; + return 0; +} + +static void * +tclient_thread_fn (void *arg) +{ + return 0; +} + +/** Start a transmit thread */ +int +start_tx_pthread (tclient_main_t * tm) +{ + if (tm->client_thread_handle == 0) + { + int rv = pthread_create (&tm->client_thread_handle, + NULL /*attr */ , + tclient_thread_fn, 0); + if (rv) + { + tm->client_thread_handle = 0; + return -1; + } + } + return 0; +} + +void +clients_connect (vlib_main_t * vm, u8 * uri, u32 n_clients) +{ + tclient_main_t *tm = &tclient_main; + vnet_connect_args_t _a, *a = &_a; + int i; + for (i = 0; i < n_clients; i++) + { + memset (a, 0, sizeof (*a)); + + a->uri = (char *) uri; + a->api_context = i; + a->app_index = tm->app_index; + a->mp = 0; + vnet_connect_uri (a); + + /* Crude pacing for call setups, 100k/sec */ + vlib_process_suspend (vm, 10e-6); + } } static clib_error_t * @@ -534,17 +550,18 @@ test_tcp_clients_command_fn (vlib_main_t * vm, { tclient_main_t *tm = &tclient_main; vlib_thread_main_t *thread_main = vlib_get_thread_main (); - uword *event_data = 0; - uword event_type; - u8 *connect_uri = (u8 *) "tcp://6.0.1.1/1234"; - u8 *uri; + uword *event_data = 0, event_type; + u8 *default_connect_uri = (u8 *) "tcp://6.0.1.1/1234", *uri; + u64 tmp, total_bytes; + f64 cli_timeout = 20.0, delta; u32 n_clients = 1; + char *transfer_type; int i; - u64 tmp; - f64 cli_timeout = 20.0; - f64 delta; tm->bytes_to_send = 8192; + tm->no_return = 0; + tm->fifo_size = 64 << 10; + vec_free (tm->connect_uri); while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) @@ -561,11 +578,18 @@ test_tcp_clients_command_fn (vlib_main_t * vm, ; else if (unformat (input, "cli-timeout %f", &cli_timeout)) ; + else if (unformat (input, "no-return")) + tm->no_return = 1; + else if (unformat (input, "fifo-size %d", &tm->fifo_size)) + tm->fifo_size <<= 10; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); } + /* Store cli process node index for signalling */ + tm->cli_node_index = vlib_get_current_process (vm)->node_runtime.node_index; + if (tm->is_init == 0) { if (tcp_test_clients_init (vm)) @@ -575,28 +599,25 @@ test_tcp_clients_command_fn (vlib_main_t * vm, tm->ready_connections = 0; tm->expected_connections = n_clients; tm->rx_total = 0; + tm->tx_total = 0; - uri = connect_uri; + uri = default_connect_uri; if (tm->connect_uri) uri = tm->connect_uri; #if TCP_BUILTIN_CLIENT_PTHREAD - /* Start a transmit thread */ - if (tm->client_thread_handle == 0) + start_tx_pthread (); +#endif + + vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ ); + + if (tm->test_client_attached == 0) { - int rv = pthread_create (&tm->client_thread_handle, - NULL /*attr */ , - tclient_thread_fn, 0); - if (rv) + if (attach_builtin_test_clients_app ()) { - tm->client_thread_handle = 0; - return clib_error_return (0, "pthread_create returned %d", rv); + return clib_error_return (0, "app attach failed"); } } -#endif - vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ ); - if (tm->test_client_attached == 0) - attach_builtin_test_clients (); tm->test_client_attached = 1; /* Turn on the builtin client input nodes */ @@ -604,25 +625,8 @@ test_tcp_clients_command_fn (vlib_main_t * vm, vlib_node_set_state (vlib_mains[i], builtin_client_node.index, VLIB_NODE_STATE_POLLING); - tm->cli_node_index = vlib_get_current_process (vm)->node_runtime.node_index; - /* Fire off connect requests */ - for (i = 0; i < n_clients; i++) - { - vl_api_connect_uri_t _cmp, *cmp = &_cmp; - void vl_api_connect_uri_t_handler (vl_api_connect_uri_t * cmp); - - memset (cmp, 0, sizeof (*cmp)); - - cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); - cmp->client_index = tm->my_client_index; - cmp->context = ntohl (0xfeedface); - memcpy (cmp->uri, uri, strlen ((char *) uri) + 1); - - vl_api_connect_uri_t_handler (cmp); - /* Crude pacing for call setups, 100k/sec */ - vlib_process_suspend (vm, 10e-6); - } + clients_connect (vm, uri, n_clients); /* Park until the sessions come up, or ten seconds elapse... */ vlib_process_wait_for_event_or_clock (vm, 10.0 /* timeout, seconds */ ); @@ -668,14 +672,17 @@ test_tcp_clients_command_fn (vlib_main_t * vm, if (delta != 0.0) { + total_bytes = (tm->no_return ? tm->tx_total : tm->rx_total); + transfer_type = tm->no_return ? "half-duplex" : "full-duplex"; vlib_cli_output (vm, "%lld bytes (%lld mbytes, %lld gbytes) in %.2f seconds", - tm->rx_total, tm->rx_total / (1ULL << 20), - tm->rx_total / (1ULL << 30), delta); - vlib_cli_output (vm, "%.2f bytes/second full-duplex", - ((f64) tm->rx_total) / (delta)); - vlib_cli_output (vm, "%.4f gbit/second full-duplex", - (((f64) tm->rx_total * 8.0) / delta / 1e9)); + total_bytes, total_bytes / (1ULL << 20), + total_bytes / (1ULL << 30), delta); + vlib_cli_output (vm, "%.2f bytes/second %s", + ((f64) total_bytes) / (delta), transfer_type); + vlib_cli_output (vm, "%.4f gbit/second %s", + (((f64) total_bytes * 8.0) / delta / 1e9), + transfer_type); } else vlib_cli_output (vm, "zero delta-t?"); diff --git a/src/vnet/tcp/builtin_client.h b/src/vnet/tcp/builtin_client.h index d5d79e53..3462e0ee 100644 --- a/src/vnet/tcp/builtin_client.h +++ b/src/vnet/tcp/builtin_client.h @@ -44,78 +44,59 @@ typedef struct typedef struct { - /* API message ID base */ - u16 msg_id_base; - - /* vpe input queue */ - unix_shared_memory_queue_t *vl_input_queue; - - /* API client handle */ - u32 my_client_index; - - /* The URI we're playing with */ - u8 *uri; - - /* Session pool */ - session_t *sessions; - - /* Hash table for disconnect processing */ - uword *session_index_by_vpp_handles; - - /* intermediate rx buffer */ - u8 *rx_buf; - - /* URI for slave's connect */ - u8 *connect_uri; - - u32 connected_session_index; - - int i_am_master; - - /* drop all packets */ - int drop_packets; - - /* Our event queue */ - unix_shared_memory_queue_t *our_event_queue; - - /* $$$ single thread only for the moment */ - unix_shared_memory_queue_t *vpp_event_queue; - - pid_t my_pid; - - f64 test_start_time; - f64 test_end_time; - - u32 expected_connections; + /* + * Application setup parameters + */ + unix_shared_memory_queue_t *vl_input_queue; /**< vpe input queue */ + unix_shared_memory_queue_t *our_event_queue; /**< Our event queue */ + unix_shared_memory_queue_t *vpp_event_queue; /**< $$$ single thread */ + + u32 cli_node_index; /**< cli process node index */ + u32 my_client_index; /**< loopback API client handle */ + u32 app_index; /**< app index after attach */ + + /* + * Configuration params + */ + u8 *connect_uri; /**< URI for slave's connect */ + u64 bytes_to_send; /**< Bytes to send */ + u32 configured_segment_size; + u32 fifo_size; + u32 expected_connections; /**< Number of clients/connections */ + + /* + * Test state variables + */ + session_t *sessions; /**< Sessions pool */ + u8 *rx_buf; /**< intermediate rx buffer */ + uword *session_index_by_vpp_handles; /**< Hash table for disconnecting */ + u8 *connect_test_data; /**< Pre-computed test data */ u32 **connection_index_by_thread; + pthread_t client_thread_handle; + volatile u32 ready_connections; volatile u32 finished_connections; - volatile u64 rx_total; - u32 cli_node_index; - - /* Signal variable */ - volatile int run_test; - - /* Bytes to send */ - u64 bytes_to_send; - - u32 configured_segment_size; + volatile u64 tx_total; + volatile int run_test; /**< Signal start of test */ - /* VNET_API_ERROR_FOO -> "Foo" hash table */ - uword *error_string_by_error_number; - - u8 *connect_test_data; - pthread_t client_thread_handle; - u64 client_bytes_received; - u8 test_return_packets; + f64 test_start_time; + f64 test_end_time; + /* + * Flags + */ u8 is_init; u8 test_client_attached; + u8 no_return; + u8 test_return_packets; + int i_am_master; + int drop_packets; /**< drop all packets */ + u8 prealloc_fifos; /**< Request fifo preallocation */ - u32 node_index; - - /* convenience */ + /* + * Convenience + */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; ethernet_main_t *ethernet_main; diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index 8bd2f360..775bfc26 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -39,21 +39,30 @@ typedef struct { - /* Per-thread RX buffer */ - u8 **rx_buf; + /* + * Server app parameters + */ unix_shared_memory_queue_t **vpp_queue; - u64 byte_index; + unix_shared_memory_queue_t *vl_input_queue; /**< Sever's event queue */ - /* Sever's event queue */ - unix_shared_memory_queue_t *vl_input_queue; + u32 app_index; /**< Server app index */ + u32 my_client_index; /**< API client handle */ + u32 node_index; /**< process node index for evnt scheduling */ - /* API client handle */ - u32 my_client_index; + /* + * Config params + */ + u8 no_echo; /**< Don't echo traffic */ + u32 fifo_size; /**< Fifo size */ + u32 rcv_buffer_size; /**< Rcv buffer size */ + u32 prealloc_fifos; /**< Preallocate fifos */ - u32 app_index; + /* + * Test state + */ + u8 **rx_buf; /**< Per-thread RX buffer */ + u64 byte_index; - /* process node index for evnt scheduling */ - u32 node_index; vlib_main_t *vlib_main; } builtin_server_main_t; @@ -132,6 +141,29 @@ test_bytes (builtin_server_main_t * bsm, int actual_transfer) bsm->byte_index += actual_transfer; } +/* + * If no-echo, just read the data and be done with it + */ +int +builtin_server_rx_callback_no_echo (stream_session_t * s) +{ + builtin_server_main_t *bsm = &builtin_server_main; + u32 my_thread_id = vlib_get_thread_index (); + int actual_transfer; + svm_fifo_t *rx_fifo; + + rx_fifo = s->server_rx_fifo; + + do + { + actual_transfer = + svm_fifo_dequeue_nowait (rx_fifo, bsm->rcv_buffer_size, + bsm->rx_buf[my_thread_id]); + } + while (actual_transfer > 0); + return 0; +} + int builtin_server_rx_callback (stream_session_t * s) { @@ -143,8 +175,8 @@ builtin_server_rx_callback (stream_session_t * s) static int serial_number = 0; u32 my_thread_id = vlib_get_thread_index (); - tx_fifo = s->server_tx_fifo; rx_fifo = s->server_rx_fifo; + tx_fifo = s->server_tx_fifo; max_dequeue = svm_fifo_max_dequeue (s->server_rx_fifo); max_enqueue = svm_fifo_max_enqueue (s->server_tx_fifo); @@ -164,19 +196,22 @@ builtin_server_rx_callback (stream_session_t * s) /* Program self-tap to retry */ if (svm_fifo_set_event (rx_fifo)) { + unix_shared_memory_queue_t *q; evt.fifo = rx_fifo; evt.event_type = FIFO_EVENT_BUILTIN_RX; evt.event_id = 0; - unix_shared_memory_queue_add (bsm->vpp_queue[s->thread_index], - (u8 *) & evt, - 0 /* do wait for mutex */ ); + + q = bsm->vpp_queue[s->thread_index]; + if (PREDICT_FALSE (q->cursize == q->maxsize)) + clib_warning ("out of event queue space"); + else + unix_shared_memory_queue_add (q, (u8 *) & evt, + 0 /* don't wait for mutex */ ); } return 0; } - vec_validate (bsm->rx_buf, my_thread_id); - vec_validate (bsm->rx_buf[my_thread_id], max_transfer - 1); _vec_len (bsm->rx_buf[my_thread_id]) = max_transfer; actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, max_transfer, @@ -281,14 +316,21 @@ server_attach () memset (a, 0, sizeof (*a)); memset (options, 0, sizeof (options)); + if (bsm->no_echo) + builtin_session_cb_vft.builtin_server_rx_callback = + builtin_server_rx_callback_no_echo; + else + builtin_session_cb_vft.builtin_server_rx_callback = + builtin_server_rx_callback; a->api_client_index = bsm->my_client_index; a->session_cb_vft = &builtin_session_cb_vft; a->options = options; a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 512 << 20; - a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 64 << 10; - a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 64 << 10; + a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = bsm->fifo_size; + a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = bsm->fifo_size; a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; - a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = 8192; + a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = + bsm->prealloc_fifos ? bsm->prealloc_fifos : 1; a->segment_name = segment_name; a->segment_name_length = ARRAY_LEN (segment_name); @@ -316,17 +358,24 @@ static int server_create (vlib_main_t * vm) { builtin_server_main_t *bsm = &builtin_server_main; - u32 num_threads; vlib_thread_main_t *vtm = vlib_get_thread_main (); + u32 num_threads; + int i; if (bsm->my_client_index == (u32) ~ 0) { if (create_api_loopback (vm)) - return -1; + { + clib_warning ("failed to create api loopback"); + return -1; + } } num_threads = 1 /* main thread */ + vtm->n_threads; vec_validate (builtin_server_main.vpp_queue, num_threads - 1); + vec_validate (bsm->rx_buf, num_threads - 1); + for (i = 0; i < num_threads; i++) + vec_validate (bsm->rx_buf[i], bsm->rcv_buffer_size); if (server_attach ()) { @@ -381,23 +430,35 @@ tcp_builtin_server_api_hookup (vlib_main_t * vm) } static clib_error_t * -server_create_command_fn (vlib_main_t * vm, - unformat_input_t * input, vlib_cli_command_t * cmd) +server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) { + builtin_server_main_t *bsm = &builtin_server_main; int rv; -#if 0 + + bsm->no_echo = 0; + bsm->fifo_size = 64 << 10; + bsm->rcv_buffer_size = 128 << 10; + bsm->prealloc_fifos = 0; + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { - if (unformat (input, "whatever %d", &whatever)) + if (unformat (input, "no-echo")) + bsm->no_echo = 1; + else if (unformat (input, "fifo-size %d", &bsm->fifo_size)) + bsm->fifo_size <<= 10; + else if (unformat (input, "rcv-buf-size %d", &bsm->rcv_buffer_size)) + ; + else if (unformat (input, "prealloc-fifos", &bsm->prealloc_fifos)) ; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); } -#endif tcp_builtin_server_api_hookup (vm); vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ ); + rv = server_create (vm); switch (rv) { @@ -406,6 +467,7 @@ server_create_command_fn (vlib_main_t * vm, default: return clib_error_return (0, "server_create returned %d", rv); } + return 0; } diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index e0b67a8e..5c554bac 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -726,15 +726,25 @@ tcp_round_snd_space (tcp_connection_t * tc, u32 snd_space) u32 tcp_snd_space (tcp_connection_t * tc) { - int snd_space; + int snd_space, snt_limited; - /* If we haven't gotten dupacks or if we did and have gotten sacked bytes - * then we can still send */ - if (PREDICT_TRUE (tcp_in_cong_recovery (tc) == 0 - && (tc->rcv_dupacks == 0 - || tc->sack_sb.last_sacked_bytes))) + if (PREDICT_TRUE (tcp_in_cong_recovery (tc) == 0)) { snd_space = tcp_available_snd_space (tc); + + /* If we haven't gotten dupacks or if we did and have gotten sacked + * bytes then we can still send as per Limited Transmit (RFC3042) */ + if (PREDICT_FALSE (tc->rcv_dupacks != 0 + && (tcp_opts_sack_permitted (tc) + && tc->sack_sb.last_sacked_bytes == 0))) + { + if (tc->rcv_dupacks == 1 && tc->limited_transmit != tc->snd_nxt) + tc->limited_transmit = tc->snd_nxt; + ASSERT (seq_leq (tc->limited_transmit, tc->snd_nxt)); + + snt_limited = tc->snd_nxt - tc->limited_transmit; + snd_space = clib_max (2 * tc->snd_mss - snt_limited, 0); + } return tcp_round_snd_space (tc, snd_space); } diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 071f1ab1..e8398718 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -31,9 +31,9 @@ #define TCP_MAX_OPTION_SPACE 40 #define TCP_DUPACK_THRESHOLD 3 -#define TCP_MAX_RX_FIFO_SIZE 2 << 20 +#define TCP_MAX_RX_FIFO_SIZE 4 << 20 #define TCP_IW_N_SEGMENTS 10 -#define TCP_ALWAYS_ACK 0 /**< If on, we always ack */ +#define TCP_ALWAYS_ACK 1 /**< On/off delayed acks */ #define TCP_USE_SACKS 1 /**< Disable only for testing */ /** TCP FSM state definitions as per RFC793. */ @@ -100,6 +100,7 @@ extern timer_expiration_handler tcp_timer_retransmit_syn_handler; #define TCP_TIMER_PERSIST_MIN 2 /* 0.2s */ #define TCP_RTO_MAX 60 * THZ /* Min max RTO (60s) as per RFC6298 */ +#define TCP_RTO_MIN 0.2 * THZ /* Min RTO (200ms) - lower than standard */ #define TCP_RTT_MAX 30 * THZ /* 30s (probably too much) */ #define TCP_RTO_SYN_RETRIES 3 /* SYN retries without doubling RTO */ #define TCP_RTO_INIT 1 * THZ /* Initial retransmit timer */ @@ -149,7 +150,7 @@ enum #undef _ }; -#define TCP_MAX_SACK_BLOCKS 5 /**< Max number of SACK blocks stored */ +#define TCP_MAX_SACK_BLOCKS 15 /**< Max number of SACK blocks stored */ #define TCP_INVALID_SACK_HOLE_INDEX ((u32)~0) typedef struct _sack_scoreboard_hole @@ -208,6 +209,7 @@ typedef struct _tcp_connection u32 snd_wl1; /**< seq number used for last snd.wnd update */ u32 snd_wl2; /**< ack number used for last snd.wnd update */ u32 snd_nxt; /**< next seq number to be sent */ + u16 snd_mss; /**< Effective send max seg (data) size */ /** Receive sequence variables RFC793 */ u32 rcv_nxt; /**< next sequence number expected */ @@ -252,8 +254,8 @@ typedef struct _tcp_connection u32 rtt_ts; /**< Timestamp for tracked ACK */ u32 rtt_seq; /**< Sequence number for tracked ACK */ - u16 snd_mss; /**< Effective send max seg (data) size */ u16 mss; /**< Our max seg size that includes options */ + u32 limited_transmit; /**< snd_nxt when limited transmit starts */ } tcp_connection_t; struct _tcp_cc_algorithm @@ -433,6 +435,7 @@ tcp_end_seq (tcp_header_t * th, u32 len) #define seq_leq(_s1, _s2) ((i32)((_s1)-(_s2)) <= 0) #define seq_gt(_s1, _s2) ((i32)((_s1)-(_s2)) > 0) #define seq_geq(_s1, _s2) ((i32)((_s1)-(_s2)) >= 0) +#define seq_max(_s1, _s2) (seq_gt((_s1), (_s2)) ? (_s1) : (_s2)) /* Modulo arithmetic for timestamps */ #define timestamp_lt(_t1, _t2) ((i32)((_t1)-(_t2)) < 0) @@ -719,6 +722,7 @@ scoreboard_clear (sack_scoreboard_t * sb) { scoreboard_remove_hole (sb, hole); } + ASSERT (sb->head == sb->tail && sb->head == TCP_INVALID_SACK_HOLE_INDEX); sb->sacked_bytes = 0; sb->last_sacked_bytes = 0; sb->last_bytes_delivered = 0; diff --git a/src/vnet/tcp/tcp_debug.h b/src/vnet/tcp/tcp_debug.h index 3a16cf63..ae68ad1b 100755 --- a/src/vnet/tcp/tcp_debug.h +++ b/src/vnet/tcp/tcp_debug.h @@ -19,8 +19,10 @@ #include #define TCP_DEBUG (1) +#define TCP_DEBUG_SM (0) #define TCP_DEBUG_CC (1) -#define TCP_DEBUG_VERBOSE (0) +#define TCP_DEBUG_CC_STAT (1) +#define TCP_DEBUG_SM_VERBOSE (0) #define foreach_tcp_dbg_evt \ _(INIT, "") \ @@ -49,6 +51,8 @@ _(CC_RTX, "retransmit") \ _(CC_EVT, "cc event") \ _(CC_PACK, "cc partial ack") \ + _(CC_STAT, "cc stats") \ + _(CC_RTO_STAT, "cc rto stats") \ _(SEG_INVALID, "invalid segment") \ _(PAWS_FAIL, "failed paws check") \ _(ACK_RCV_ERR, "invalid ack") \ @@ -72,6 +76,10 @@ typedef enum _tcp_dbg_evt #define TRANSPORT_DEBUG (1) +/* + * Infra and evt track setup + */ + #define TCP_DBG(_tc, _evt, _args...) \ { \ u8 *_tmp = 0; \ @@ -158,6 +166,30 @@ typedef enum _tcp_dbg_evt TCP_EVT_DEALLOC_HANDLER(_tc); \ } +#define TCP_EVT_SYN_RCVD_HANDLER(_tc, ...) \ +{ \ + TCP_EVT_INIT_HANDLER(_tc, "s%d%c"); \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "SYNrx: irs %u", \ + .format_args = "i4", \ + }; \ + DECLARE_ETD(_tc, _e, 1); \ + ed->data[0] = _tc->irs; \ +} + +#define CONCAT_HELPER(_a, _b) _a##_b +#define CC(_a, _b) CONCAT_HELPER(_a, _b) +#define TCP_EVT_DBG(_evt, _args...) CC(_evt, _HANDLER)(_args) +#else +#define TCP_EVT_DBG(_evt, _args...) +#endif + +/* + * State machine + */ +#if TCP_DEBUG_SM + #define TCP_EVT_ACK_SENT_HANDLER(_tc, ...) \ { \ ELOG_TYPE_DECLARE (_e) = \ @@ -234,18 +266,6 @@ typedef enum _tcp_dbg_evt ed->data[1] = _tc->rcv_nxt - _tc->irs; \ } -#define TCP_EVT_SYN_RCVD_HANDLER(_tc, ...) \ -{ \ - TCP_EVT_INIT_HANDLER(_tc, "s%d%c"); \ - ELOG_TYPE_DECLARE (_e) = \ - { \ - .format = "SYNrx: irs %u", \ - .format_args = "i4", \ - }; \ - DECLARE_ETD(_tc, _e, 1); \ - ed->data[0] = _tc->irs; \ -} - #define TCP_EVT_FIN_RCVD_HANDLER(_tc, ...) \ { \ ELOG_TYPE_DECLARE (_e) = \ @@ -418,6 +438,74 @@ typedef enum _tcp_dbg_evt ed->data[4] = _tc->snd_una_max - _tc->iss; \ } +#define TCP_EVT_RCV_WND_SHRUNK_HANDLER(_tc, _obs, _av, ...) \ +{ \ +if (_av > 0) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "huh?: rcv_wnd %u obsd %u av %u rcv_nxt %u rcv_las %u", \ + .format_args = "i4i4i4i4i4", \ + }; \ + DECLARE_ETD(_tc, _e, 5); \ + ed->data[0] = _tc->rcv_wnd; \ + ed->data[1] = _obs; \ + ed->data[2] = _av; \ + ed->data[3] = _tc->rcv_nxt - _tc->irs; \ + ed->data[4] = _tc->rcv_las - _tc->irs; \ +} \ +} +#else +#define TCP_EVT_ACK_SENT_HANDLER(_tc, ...) +#define TCP_EVT_DUPACK_SENT_HANDLER(_tc, ...) +#define TCP_EVT_SYN_SENT_HANDLER(_tc, ...) +#define TCP_EVT_SYN_RTX_HANDLER(_tc, ...) +#define TCP_EVT_FIN_SENT_HANDLER(_tc, ...) +#define TCP_EVT_RST_SENT_HANDLER(_tc, ...) +#define TCP_EVT_FIN_RCVD_HANDLER(_tc, ...) +#define TCP_EVT_RST_RCVD_HANDLER(_tc, ...) +#define TCP_EVT_ACK_RCVD_HANDLER(_tc, ...) +#define TCP_EVT_DUPACK_RCVD_HANDLER(_tc, ...) +#define TCP_EVT_PKTIZE_HANDLER(_tc, ...) +#define TCP_EVT_INPUT_HANDLER(_tc, _type, _len, _written, ...) +#define TCP_EVT_TIMER_POP_HANDLER(_tc_index, _timer_id, ...) +#define TCP_EVT_SEG_INVALID_HANDLER(_tc, _seq, _end, ...) +#define TCP_EVT_PAWS_FAIL_HANDLER(_tc, _seq, _end, ...) +#define TCP_EVT_ACK_RCV_ERR_HANDLER(_tc, _type, _ack, ...) +#define TCP_EVT_RCV_WND_SHRUNK_HANDLER(_tc, _obs, _av, ...) +#endif + +/* + * State machine verbose + */ +#if TCP_DBG_SM_VERBOSE +#define TCP_EVT_SND_WND_HANDLER(_tc, ...) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "snd_wnd update: %u ", \ + .format_args = "i4", \ + }; \ + DECLARE_ETD(_tc, _e, 1); \ + ed->data[0] = _tc->snd_wnd; \ +} + +#define TCP_EVT_OUTPUT_HANDLER(_tc, flags, n_bytes,...) \ +{ \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "out: flags %x, bytes %u", \ + .format_args = "i4i4", \ + }; \ + DECLARE_ETD(_tc, _e, 2); \ + ed->data[0] = flags; \ + ed->data[1] = n_bytes; \ +} +#else +#define TCP_EVT_SND_WND_HANDLER(_tc, ...) +#define TCP_EVT_OUTPUT_HANDLER(_tc, flags, n_bytes,...) +#endif + /* * Congestion Control */ @@ -471,67 +559,59 @@ typedef enum _tcp_dbg_evt ed->data[1] = _tc->snd_una_max - _tc->iss; \ } -#else -#define TCP_EVT_CC_RTX_HANDLER(_tc, offset, n_bytes, ...) -#define TCP_EVT_CC_EVT_HANDLER(_tc, _sub_evt, _snd_space, ...) -#define TCP_EVT_CC_PACK_HANDLER(_tc, ...) -#endif +/* + * Congestion control stats + */ +#if TCP_DEBUG_CC_STAT -#define TCP_EVT_RCV_WND_SHRUNK_HANDLER(_tc, _obs, _av, ...) \ +#define STATS_INTERVAL 1 + +#define TCP_EVT_CC_RTO_STAT_HANDLER(_tc, ...) \ { \ -if (_av > 0) \ +if (_tc->c_cc_stat_tstamp + STATS_INTERVAL < tcp_time_now()) \ { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "huh?: rcv_wnd %u obsd %u av %u rcv_nxt %u rcv_las %u", \ - .format_args = "i4i4i4i4i4", \ + .format = "rto_stat: rto %u srtt %u rttvar %u ", \ + .format_args = "i4i4i4", \ }; \ - DECLARE_ETD(_tc, _e, 5); \ - ed->data[0] = _tc->rcv_wnd; \ - ed->data[1] = _obs; \ - ed->data[2] = _av; \ - ed->data[3] = _tc->rcv_nxt - _tc->irs; \ - ed->data[4] = _tc->rcv_las - _tc->irs; \ + DECLARE_ETD(_tc, _e, 3); \ + ed->data[0] = _tc->rto; \ + ed->data[1] = _tc->srtt; \ + ed->data[2] = _tc->rttvar; \ } \ } -#if TCP_DBG_VERBOSE -#define TCP_EVT_SND_WND_HANDLER(_tc, ...) \ +#define TCP_EVT_CC_STAT_HANDLER(_tc, ...) \ { \ - ELOG_TYPE_DECLARE (_e) = \ - { \ - .format = "snd_wnd update: %u ", \ - .format_args = "i4", \ - }; \ - DECLARE_ETD(_tc, _e, 1); \ - ed->data[0] = _tc->snd_wnd; \ -} - -#define TCP_EVT_OUTPUT_HANDLER(_tc, flags, n_bytes,...) \ +if (_tc->c_cc_stat_tstamp + STATS_INTERVAL < tcp_time_now()) \ { \ ELOG_TYPE_DECLARE (_e) = \ { \ - .format = "out: flags %x, bytes %u", \ - .format_args = "i4i4", \ + .format = "cc_stat: cwnd %u flight %u space %u ssthresh %u snd_wnd %u",\ + .format_args = "i4i4i4i4i4", \ }; \ - DECLARE_ETD(_tc, _e, 2); \ - ed->data[0] = flags; \ - ed->data[1] = n_bytes; \ + DECLARE_ETD(_tc, _e, 5); \ + ed->data[0] = _tc->cwnd; \ + ed->data[1] = tcp_flight_size (_tc); \ + ed->data[2] = tcp_snd_space (_tc); \ + ed->data[3] = _tc->ssthresh; \ + ed->data[4] = _tc->snd_wnd; \ + TCP_EVT_CC_RTO_STAT_HANDLER (_tc); \ + _tc->c_cc_stat_tstamp = tcp_time_now(); \ +} \ } + #else -#define TCP_EVT_SND_WND_HANDLER(_tc, ...) -#define TCP_EVT_OUTPUT_HANDLER(_tc, flags, n_bytes,...) +#define TCP_EVT_CC_STAT_HANDLER(_tc, ...) #endif -#define CONCAT_HELPER(_a, _b) _a##_b -#define CC(_a, _b) CONCAT_HELPER(_a, _b) -#define TCP_EVT_DBG(_evt, _args...) CC(_evt, _HANDLER)(_args) - #else -#define TCP_EVT_DBG(_evt, _args...) +#define TCP_EVT_CC_RTX_HANDLER(_tc, offset, n_bytes, ...) +#define TCP_EVT_CC_EVT_HANDLER(_tc, _sub_evt, ...) +#define TCP_EVT_CC_PACK_HANDLER(_tc, ...) #endif - #endif /* SRC_VNET_TCP_TCP_DEBUG_H_ */ /* * fd.io coding-style-patch-verification: ON diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index ff2229b3..a2e6dad1 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -378,16 +378,20 @@ tcp_rcv_ack_is_acceptable (tcp_connection_t * tc0, vlib_buffer_t * tb0) static void tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt) { - int err; + int err, diff; if (tc->srtt != 0) { err = mrtt - tc->srtt; - tc->srtt += err >> 3; +// tc->srtt += err >> 3; /* XXX Drop in RTT results in RTTVAR increase and bigger RTO. * The increase should be bound */ - tc->rttvar += ((int) clib_abs (err) - (int) tc->rttvar) >> 2; +// tc->rttvar += ((int) clib_abs (err) - (int) tc->rttvar) >> 2; + + tc->srtt = clib_max ((int) tc->srtt + (err >> 3), 1); + diff = (clib_abs (err) - (int) tc->rttvar) >> 2; + tc->rttvar = clib_max ((int) tc->rttvar + diff, 1); } else { @@ -401,6 +405,7 @@ void tcp_update_rto (tcp_connection_t * tc) { tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX); + tc->rto = clib_max (tc->rto, TCP_RTO_MIN); } /** Update RTT estimate and RTO timer @@ -417,8 +422,8 @@ tcp_update_rtt (tcp_connection_t * tc, u32 ack) u32 mrtt = 0; u8 rtx_acked; - /* Determine if only rtx bytes are acked. TODO XXX fast retransmit */ - rtx_acked = tc->rto_boff && (tc->bytes_acked <= tc->snd_mss); + /* Determine if only rtx bytes are acked. */ + rtx_acked = tcp_in_cong_recovery (tc) || !tc->bytes_acked; /* Karn's rule, part 1. Don't use retransmitted segments to estimate * RTT because they're ambiguous. */ @@ -428,8 +433,7 @@ tcp_update_rtt (tcp_connection_t * tc, u32 ack) } /* As per RFC7323 TSecr can be used for RTTM only if the segment advances * snd_una, i.e., the left side of the send window: - * seq_lt (tc->snd_una, ack). Note: last condition could be dropped, we don't - * try to update rtt for dupacks */ + * seq_lt (tc->snd_una, ack). */ else if (tcp_opts_tstamp (&tc->rcv_opts) && tc->rcv_opts.tsecr && tc->bytes_acked) { @@ -550,11 +554,13 @@ scoreboard_insert_hole (sack_scoreboard_t * sb, u32 prev_index, prev = scoreboard_get_hole (sb, prev_index); if (prev) { - hole->prev = prev - sb->holes; + hole->prev = prev_index; hole->next = prev->next; if ((next = scoreboard_next_hole (sb, hole))) next->prev = hole_index; + else + sb->tail = hole_index; prev->next = hole_index; } @@ -569,12 +575,13 @@ scoreboard_insert_hole (sack_scoreboard_t * sb, u32 prev_index, } void -scoreboard_update_lost (tcp_connection_t * tc, sack_scoreboard_t * sb) +scoreboard_update_bytes (tcp_connection_t * tc, sack_scoreboard_t * sb) { sack_scoreboard_hole_t *hole, *prev; u32 bytes = 0, blks = 0; sb->lost_bytes = 0; + sb->sacked_bytes = 0; hole = scoreboard_last_hole (sb); if (!hole) return; @@ -594,13 +601,16 @@ scoreboard_update_lost (tcp_connection_t * tc, sack_scoreboard_t * sb) hole = prev; } - hole = prev; while (hole) { sb->lost_bytes += scoreboard_hole_bytes (hole); hole->is_lost = 1; + prev = hole; hole = scoreboard_prev_hole (sb, hole); + if (hole) + bytes += prev->start - hole->end; } + sb->sacked_bytes = bytes; } /** @@ -677,7 +687,7 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) { sack_scoreboard_t *sb = &tc->sack_sb; sack_block_t *blk, tmp; - sack_scoreboard_hole_t *hole, *next_hole, *last_hole, *new_hole; + sack_scoreboard_hole_t *hole, *next_hole, *last_hole; u32 blk_index = 0, old_sacked_bytes, hole_index; int i, j; @@ -743,6 +753,10 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) if (seq_gt (tc->snd_una_max, sb->high_sacked) && seq_gt (tc->snd_una_max, last_hole->end)) last_hole->end = tc->snd_una_max; + /* keep track of max byte sacked for when the last hole + * is acked */ + if (seq_gt (tmp.end, sb->high_sacked)) + sb->high_sacked = tmp.end; } /* Walk the holes with the SACK blocks */ @@ -758,45 +772,20 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) { next_hole = scoreboard_next_hole (sb, hole); - /* Byte accounting */ - if (seq_leq (hole->end, ack)) - { - /* Bytes lost because snd_wnd left edge advances */ - if (next_hole && seq_leq (next_hole->start, ack)) - sb->last_bytes_delivered += next_hole->start - hole->end; - else - sb->last_bytes_delivered += ack - hole->end; - } - else - { - sb->sacked_bytes += scoreboard_hole_bytes (hole); - } - - /* About to remove last hole */ - if (hole == last_hole) - { - sb->tail = hole->prev; - last_hole = scoreboard_last_hole (sb); - /* keep track of max byte sacked for when the last hole - * is acked */ - if (seq_gt (hole->end, sb->high_sacked)) - sb->high_sacked = hole->end; - } - - /* snd_una needs to be advanced */ - if (blk->end == ack && seq_geq (ack, hole->end)) + /* Byte accounting: snd_una needs to be advanced */ + if (blk->end == ack) { - if (next_hole && seq_lt (ack, next_hole->start)) + if (next_hole) { - sb->snd_una_adv = next_hole->start - ack; - - /* all these can be delivered */ - sb->last_bytes_delivered += sb->snd_una_adv; + if (seq_lt (ack, next_hole->start)) + sb->snd_una_adv = next_hole->start - ack; + sb->last_bytes_delivered += + next_hole->start - hole->end; } else if (!next_hole) { sb->snd_una_adv = sb->high_sacked - ack; - sb->last_bytes_delivered += sb->snd_una_adv; + sb->last_bytes_delivered += sb->high_sacked - hole->end; } } @@ -808,7 +797,6 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) { if (seq_gt (blk->end, hole->start)) { - sb->sacked_bytes += blk->end - hole->start; hole->start = blk->end; } blk_index++; @@ -819,28 +807,16 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) /* Hole must be split */ if (seq_lt (blk->end, hole->end)) { - sb->sacked_bytes += blk->end - blk->start; hole_index = scoreboard_hole_index (sb, hole); - new_hole = scoreboard_insert_hole (sb, hole_index, blk->end, - hole->end); + scoreboard_insert_hole (sb, hole_index, blk->end, hole->end); /* Pool might've moved */ hole = scoreboard_get_hole (sb, hole_index); hole->end = blk->start; - - /* New or split of tail */ - if ((last_hole->end == new_hole->end) - || seq_lt (last_hole->end, new_hole->start)) - { - last_hole = new_hole; - sb->tail = scoreboard_hole_index (sb, new_hole); - } - blk_index++; } - else if (seq_leq (blk->start, hole->end)) + else if (seq_lt (blk->start, hole->end)) { - sb->sacked_bytes += hole->end - blk->start; hole->end = blk->start; } @@ -848,9 +824,13 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) } } - sb->last_sacked_bytes = sb->sacked_bytes - old_sacked_bytes; - sb->sacked_bytes -= sb->last_bytes_delivered; - scoreboard_update_lost (tc, sb); + scoreboard_update_bytes (tc, sb); + sb->last_sacked_bytes = sb->sacked_bytes + - (old_sacked_bytes - sb->last_bytes_delivered); + ASSERT (sb->sacked_bytes == 0 + || sb->sacked_bytes < tc->snd_una_max - seq_max (tc->snd_una, ack)); + ASSERT (sb->last_sacked_bytes + sb->lost_bytes <= tc->snd_una_max + - seq_max (tc->snd_una, ack)); } /** @@ -998,9 +978,14 @@ tcp_should_fastrecover (tcp_connection_t * tc) || tcp_should_fastrecover_sack (tc)); } +/** + * One function to rule them all ... and in the darkness bind them + */ static void tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack) { + u32 rxt_delivered; + /* * Duplicate ACK. Check if we should enter fast recovery, or if already in * it account for the bytes that left the network. @@ -1028,10 +1013,15 @@ tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack) } /* If of of the two conditions lower hold, reset dupacks - * 1) Cumulative ack does not cover more than congestion threshold + * 1) Cumulative ack does not cover more than congestion threshold, + * and the following doesn't hold: the congestion window is + * greater than SMSS bytes and the difference between highest_ack + * and prev_highest_ack is at most 4*SMSS bytes (XXX) * 2) RFC6582 heuristic to avoid multiple fast retransmits */ - if (seq_leq (tc->snd_una, tc->snd_congestion) + if ((seq_gt (tc->snd_una, tc->snd_congestion) + || !(tc->cwnd > tc->snd_mss + && tc->bytes_acked <= 4 * tc->snd_mss)) || tc->rcv_opts.tsecr != tc->tsecr_last_ack) { tc->rcv_dupacks = 0; @@ -1089,7 +1079,10 @@ partial_ack: { /* If spurious return, we've already updated everything */ if (tcp_cc_recover (tc)) - return; + { + tc->tsecr_last_ack = tc->rcv_opts.tsecr; + return; + } tc->snd_nxt = tc->snd_una_max; @@ -1115,12 +1108,16 @@ partial_ack: return; /* Remove retransmitted bytes that have been delivered */ - if (tc->sack_sb.last_bytes_delivered - && seq_gt (tc->sack_sb.high_rxt, tc->snd_una)) + ASSERT (tc->bytes_acked + tc->sack_sb.snd_una_adv + >= tc->sack_sb.last_bytes_delivered); + rxt_delivered = tc->bytes_acked + tc->sack_sb.snd_una_adv + - tc->sack_sb.last_bytes_delivered; + if (rxt_delivered && seq_gt (tc->sack_sb.high_rxt, tc->snd_una)) { /* If we have sacks and we haven't gotten an ack beyond high_rxt, * remove sacked bytes delivered */ - tc->snd_rxt_bytes -= tc->sack_sb.last_bytes_delivered; + ASSERT (tc->snd_rxt_bytes >= rxt_delivered); + tc->snd_rxt_bytes -= rxt_delivered; } else { @@ -1154,6 +1151,8 @@ tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd, prev_snd_una; u8 is_dack; + TCP_EVT_DBG (TCP_EVT_CC_STAT, tc); + /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */ if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt))) { @@ -1282,6 +1281,10 @@ tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end) { vec_add1 (new_list, tc->snd_sacks[i]); } + else + { + clib_warning ("sack discarded"); + } } ASSERT (vec_len (new_list) <= TCP_MAX_SACK_BLOCKS); @@ -1358,16 +1361,18 @@ tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b, stream_session_t *s0; int rv; + ASSERT (seq_gt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt)); + /* Pure ACK. Do nothing */ if (PREDICT_FALSE (data_len == 0)) { return TCP_ERROR_PURE_ACK; } - /* Enqueue out-of-order data with absolute offset */ + /* Enqueue out-of-order data with relative offset */ rv = stream_session_enqueue_data (&tc->connection, b, - vnet_buffer (b)->tcp.seq_number, - 0 /* queue event */ , 0); + vnet_buffer (b)->tcp.seq_number - + tc->rcv_nxt, 0 /* queue event */ , 0); /* Nothing written */ if (rv) @@ -1388,10 +1393,15 @@ tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b, /* Get the newest segment from the fifo */ newest = svm_fifo_newest_ooo_segment (s0->server_rx_fifo); - start = ooo_segment_offset (s0->server_rx_fifo, newest); - end = ooo_segment_end_offset (s0->server_rx_fifo, newest); + if (newest) + { + start = + tc->rcv_nxt + ooo_segment_offset (s0->server_rx_fifo, newest); + end = start + ooo_segment_length (s0->server_rx_fifo, newest); + tcp_update_sack_list (tc, start, end); - tcp_update_sack_list (tc, start, end); + ASSERT (seq_gt (start, tc->rcv_nxt)); + } } return TCP_ERROR_ENQUEUED; @@ -1411,7 +1421,7 @@ tcp_can_delack (tcp_connection_t * tc) /* constrained to send ack */ || (tc->flags & TCP_CONN_SNDACK) != 0 /* we're almost out of tx wnd */ - || tcp_available_snd_space (tc) < 2 * tc->snd_mss) + || tcp_available_snd_space (tc) < 4 * tc->snd_mss) return 0; return 1; @@ -1434,7 +1444,7 @@ tcp_segment_rcv (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b, *next0 = TCP_NEXT_DROP; /* Completely in the past (possible retransmit) */ - if (seq_lt (vnet_buffer (b)->tcp.seq_end, tc->rcv_nxt)) + if (seq_leq (vnet_buffer (b)->tcp.seq_end, tc->rcv_nxt)) goto done; /* Chop off the bytes in the past */ @@ -1873,8 +1883,8 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (tcp_opts_wscale (&new_tc0->rcv_opts)) new_tc0->snd_wscale = new_tc0->rcv_opts.wscale; - /* No scaling */ - new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window); + new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window) + << new_tc0->snd_wscale; new_tc0->snd_wl1 = seq0; new_tc0->snd_wl2 = ack0; @@ -1892,8 +1902,15 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* Make sure las is initialized for the wnd computation */ new_tc0->rcv_las = new_tc0->rcv_nxt; - /* Notify app that we have connection */ - stream_session_connect_notify (&new_tc0->connection, sst, 0); + /* Notify app that we have connection. If session layer can't + * allocate session send reset */ + if (stream_session_connect_notify (&new_tc0->connection, sst, + 0)) + { + tcp_connection_cleanup (new_tc0); + tcp_send_reset (b0, is_ip4); + goto drop; + } stream_session_init_fifos_pointers (&new_tc0->connection, new_tc0->irs + 1, @@ -1907,7 +1924,14 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, new_tc0->state = TCP_STATE_SYN_RCVD; /* Notify app that we have connection */ - stream_session_connect_notify (&new_tc0->connection, sst, 0); + if (stream_session_connect_notify + (&new_tc0->connection, sst, 0)) + { + tcp_connection_cleanup (new_tc0); + tcp_send_reset (b0, is_ip4); + goto drop; + } + stream_session_init_fifos_pointers (&new_tc0->connection, new_tc0->irs + 1, new_tc0->iss + 1); @@ -2508,8 +2532,8 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (tcp_opts_wscale (&child0->rcv_opts)) child0->snd_wscale = child0->rcv_opts.wscale; - /* No scaling */ - child0->snd_wnd = clib_net_to_host_u16 (th0->window); + child0->snd_wnd = clib_net_to_host_u16 (th0->window) + << child0->snd_wscale; child0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number; child0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number; @@ -2892,6 +2916,9 @@ do { \ _(FIN_WAIT_2, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); _(FIN_WAIT_2, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + _(CLOSE_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + _(CLOSE_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, + TCP_ERROR_NONE); _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); _(LAST_ACK, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, diff --git a/src/vnet/tcp/tcp_newreno.c b/src/vnet/tcp/tcp_newreno.c index c66250e4..c825e952 100644 --- a/src/vnet/tcp/tcp_newreno.c +++ b/src/vnet/tcp/tcp_newreno.c @@ -18,7 +18,6 @@ void newreno_congestion (tcp_connection_t * tc) { - tc->prev_ssthresh = tc->ssthresh; tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss); } @@ -47,7 +46,8 @@ newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type) { if (ack_type == TCP_CC_DUPACK) { - tc->cwnd += tc->snd_mss; + if (!tcp_opts_sack_permitted (tc)) + tc->cwnd += tc->snd_mss; } else if (ack_type == TCP_CC_PARTIALACK) { diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index 47c94e6d..554a981d 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -1052,6 +1052,7 @@ tcp_rtx_timeout_cc (tcp_connection_t * tc) tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss); tc->cwnd = tcp_loss_wnd (tc); tc->snd_congestion = tc->snd_una_max; + tcp_recovery_on (tc); } @@ -1213,7 +1214,7 @@ tcp_timer_persist_handler (u32 index) tc->timers[TCP_TIMER_PERSIST] = TCP_TIMER_HANDLE_INVALID; /* Problem already solved or worse */ - if (tc->state == TCP_STATE_CLOSED + if (tc->state == TCP_STATE_CLOSED || tc->state > TCP_STATE_ESTABLISHED || tc->snd_wnd > tc->snd_mss || tcp_in_recovery (tc)) return; @@ -1505,10 +1506,7 @@ tcp46_output_inline (vlib_main_t * vm, /* Stop DELACK timer and fix flags */ tc0->flags &= ~(TCP_CONN_SNDACK); - if (tcp_timer_is_active (tc0, TCP_TIMER_DELACK)) - { - tcp_timer_reset (tc0, TCP_TIMER_DELACK); - } + tcp_timer_reset (tc0, TCP_TIMER_DELACK); /* If not retransmitting * 1) update snd_una_max (SYN, SYNACK, FIN) @@ -1630,7 +1628,7 @@ tcp_push_header (transport_connection_t * tconn, vlib_buffer_t * b) tc = (tcp_connection_t *) tconn; tcp_push_hdr_i (tc, b, TCP_STATE_ESTABLISHED, 0); - if (tc->rtt_ts == 0) + if (tc->rtt_ts == 0 && !tcp_in_cong_recovery (tc)) { tc->rtt_ts = tcp_time_now (); tc->rtt_seq = tc->snd_nxt; diff --git a/src/vnet/tcp/tcp_test.c b/src/vnet/tcp/tcp_test.c index 3f8afa40..a461e3b8 100644 --- a/src/vnet/tcp/tcp_test.c +++ b/src/vnet/tcp/tcp_test.c @@ -190,11 +190,18 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) TCP_TEST ((sb->sacked_bytes == 0), "sacked bytes %d", sb->sacked_bytes); TCP_TEST ((pool_elts (sb->holes) == 1), "scoreboard has %d elements", pool_elts (sb->holes)); + hole = scoreboard_first_hole (sb); + TCP_TEST ((hole->prev == TCP_INVALID_SACK_HOLE_INDEX + && hole->next == TCP_INVALID_SACK_HOLE_INDEX), "hole is valid"); + TCP_TEST ((sb->last_bytes_delivered == 100), "last bytes delivered %d", + sb->last_bytes_delivered); /* * Add some more blocks and then remove all */ vec_reset_length (tc->rcv_opts.sacks); + tc->snd_una += sb->snd_una_adv; + tc->snd_una_max = 1900; for (i = 0; i < 5; i++) { block.start = i * 100 + 1200; @@ -242,6 +249,39 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) TCP_TEST ((sb->last_sacked_bytes == 0), "last sacked bytes %d", sb->last_sacked_bytes); + /* + * Inject one block, ack it and overlap hole + */ + + tc->snd_una = 0; + tc->snd_una_max = 1000; + tc->snd_nxt = 1000; + + block.start = 100; + block.end = 500; + vec_add1 (tc->rcv_opts.sacks, block); + tc->rcv_opts.n_sack_blocks = vec_len (tc->rcv_opts.sacks); + + tcp_rcv_sacks (tc, 0); + + if (verbose) + vlib_cli_output (vm, "sb added [100, 500]:\n%U", + format_tcp_scoreboard, sb); + + tcp_rcv_sacks (tc, 800); + + if (verbose) + vlib_cli_output (vm, "sb ack [0, 800]:\n%U", format_tcp_scoreboard, sb); + + TCP_TEST ((pool_elts (sb->holes) == 1), + "scoreboard has %d elements", pool_elts (sb->holes)); + TCP_TEST ((sb->snd_una_adv == 0), "snd_una_adv %u", sb->snd_una_adv); + TCP_TEST ((sb->sacked_bytes == 0), "sacked bytes %d", sb->sacked_bytes); + TCP_TEST ((sb->last_sacked_bytes == 0), + "last sacked bytes %d", sb->last_sacked_bytes); + TCP_TEST ((sb->last_bytes_delivered == 400), + "last bytes delivered %d", sb->last_bytes_delivered); + return 0; } @@ -571,7 +611,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) */ for (i = 0; i < 3; i++) { - offset = (2 * i + 1) * sizeof (u32); + offset = (2 * i + 1) * sizeof (u32) - f->tail; data = (u8 *) (test_data + (2 * i + 1)); if (i == 0) { @@ -600,7 +640,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) /* * Try adding a completely overlapped segment */ - offset = 3 * sizeof (u32); + offset = 3 * sizeof (u32) - f->tail; data = (u8 *) (test_data + 3); rv = svm_fifo_enqueue_with_offset (f, offset, sizeof (u32), data); if (rv) @@ -626,7 +666,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) */ for (i = 3; i > 1; i--) { - offset = (2 * i + 0) * sizeof (u32); + offset = (2 * i + 0) * sizeof (u32) - f->tail; data = (u8 *) (test_data + (2 * i + 0)); rv = svm_fifo_enqueue_with_offset (f, offset, sizeof (u32), data); if (verbose) @@ -688,7 +728,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) for (i = 0; i < 4; i++) { - offset = (2 * i + 1) * sizeof (u32); + offset = (2 * i + 1) * sizeof (u32) - f->tail; data = (u8 *) (test_data + (2 * i + 1)); rv = svm_fifo_enqueue_with_offset (f, offset, sizeof (u32), data); if (verbose) @@ -701,7 +741,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) } } - rv = svm_fifo_enqueue_with_offset (f, 8, 21, data); + rv = svm_fifo_enqueue_with_offset (f, 8 - f->tail, 21, data); TCP_TEST ((rv == 0), "ooo enqueued %u", rv); TCP_TEST ((svm_fifo_number_ooo_segments (f) == 1), "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); @@ -722,7 +762,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) for (i = 0; i < 4; i++) { - offset = (2 * i + 1) * sizeof (u32); + offset = (2 * i + 1) * sizeof (u32) - f->tail; data = (u8 *) (test_data + (2 * i + 1)); rv = svm_fifo_enqueue_with_offset (f, offset, sizeof (u32), data); if (verbose) @@ -735,7 +775,13 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) } } + if (verbose) + vlib_cli_output (vm, "fifo after enqueue: %U", format_svm_fifo, f, 1); + rv = svm_fifo_enqueue_nowait (f, 29, data); + if (verbose) + vlib_cli_output (vm, "fifo after enqueueing 29: %U", format_svm_fifo, f, + 1); TCP_TEST ((rv == 32), "ooo enqueued %u", rv); TCP_TEST ((svm_fifo_number_ooo_segments (f) == 0), "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); @@ -788,7 +834,8 @@ tcp_test_fifo2 (vlib_main_t * vm) { tp = vp + i; data64 = tp->offset; - svm_fifo_enqueue_with_offset (f, tp->offset, tp->len, (u8 *) & data64); + svm_fifo_enqueue_with_offset (f, tp->offset - f->tail, tp->len, + (u8 *) & data64); } /* Expected result: one big fat chunk at offset 4 */ @@ -817,7 +864,7 @@ tcp_test_fifo2 (vlib_main_t * vm) { tp = &test_data[i]; data64 = tp->offset; - rv = svm_fifo_enqueue_with_offset (f, tp->offset, tp->len, + rv = svm_fifo_enqueue_with_offset (f, tp->offset - f->tail, tp->len, (u8 *) & data64); if (rv) { @@ -991,8 +1038,9 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) for (i = !randomize; i < vec_len (generate); i++) { tp = generate + i; - svm_fifo_enqueue_with_offset (f, fifo_initial_offset + tp->offset, - tp->len, + svm_fifo_enqueue_with_offset (f, + fifo_initial_offset + tp->offset - + f->tail, tp->len, (u8 *) data_pattern + tp->offset); } @@ -1107,7 +1155,7 @@ tcp_test_fifo4 (vlib_main_t * vm, unformat_input_t * input) for (i = test_n_bytes - 1; i > 0; i--) { - rv = svm_fifo_enqueue_with_offset (f, fifo_initial_offset + i, + rv = svm_fifo_enqueue_with_offset (f, fifo_initial_offset + i - f->tail, sizeof (u8), &test_data[i]); if (verbose) vlib_cli_output (vm, "add [%d] [%d, %d]", i, i, i + sizeof (u8)); -- cgit 1.2.3-korg From f6359c8cace5b73a813e5f4e3d1bc28f7752fcdf Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Mon, 19 Jun 2017 12:26:09 -0400 Subject: Improve svm fifo and tcp tx path performance (VPP-846) - multiarch on svm fifo - avoid ip lookup on tx Change-Id: Iab0d85204a710979417bca1d692cc47877131203 Signed-off-by: Florin Coras Signed-off-by: Dave Barach --- src/svm/svm_fifo.c | 95 +++++++++++++++++++++++++++++++++++++--- src/uri/uri_tcp_test.c | 10 ++--- src/vnet/session/node.c | 13 +++--- src/vnet/session/transport.h | 7 ++- src/vnet/tcp/tcp.c | 100 +++++++++++++++++++++++++++++++++++++++++-- src/vnet/tcp/tcp.h | 4 ++ src/vnet/tcp/tcp_output.c | 29 +++++++++---- src/vnet/tcp/tcp_packet.h | 9 ++-- 8 files changed, 232 insertions(+), 35 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index 6ca437cf..aed5d6a7 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -14,6 +14,7 @@ */ #include +#include static inline u8 position_lt (svm_fifo_t * f, u32 a, u32 b) @@ -417,10 +418,38 @@ svm_fifo_enqueue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here) return (total_copy_bytes); } +#define SVM_ENQUEUE_CLONE_TEMPLATE(arch, fn, tgt) \ + uword \ + __attribute__ ((flatten)) \ + __attribute__ ((target (tgt))) \ + CLIB_CPU_OPTIMIZED \ + fn ## _ ## arch ( svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here) \ + { return fn (f, max_bytes, copy_from_here);} + +static int +svm_fifo_enqueue_nowait_ma (svm_fifo_t * f, u32 max_bytes, + u8 * copy_from_here) +{ + return svm_fifo_enqueue_internal (f, max_bytes, copy_from_here); +} + +foreach_march_variant (SVM_ENQUEUE_CLONE_TEMPLATE, + svm_fifo_enqueue_nowait_ma); +CLIB_MULTIARCH_SELECT_FN (svm_fifo_enqueue_nowait_ma); + int svm_fifo_enqueue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here) { - return svm_fifo_enqueue_internal (f, max_bytes, copy_from_here); +#if CLIB_DEBUG > 0 + return svm_fifo_enqueue_nowait_ma (f, max_bytes, copy_from_here); +#else + static int (*fp) (svm_fifo_t *, u32, u8 *); + + if (PREDICT_FALSE (fp == 0)) + fp = (void *) svm_fifo_enqueue_nowait_ma_multiarch_select (); + + return (*fp) (f, max_bytes, copy_from_here); +#endif } /** @@ -541,15 +570,43 @@ svm_fifo_dequeue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_here) return (total_copy_bytes); } -int -svm_fifo_dequeue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_here) +static int +svm_fifo_dequeue_nowait_ma (svm_fifo_t * f, u32 max_bytes, u8 * copy_here) { return svm_fifo_dequeue_internal (f, max_bytes, copy_here); } +#define SVM_FIFO_DEQUEUE_CLONE_TEMPLATE(arch, fn, tgt) \ + uword \ + __attribute__ ((flatten)) \ + __attribute__ ((target (tgt))) \ + CLIB_CPU_OPTIMIZED \ + fn ## _ ## arch ( svm_fifo_t * f, u32 max_bytes, \ + u8 * copy_here) \ + { return fn (f, max_bytes, copy_here);} + +foreach_march_variant (SVM_FIFO_DEQUEUE_CLONE_TEMPLATE, + svm_fifo_dequeue_nowait_ma); +CLIB_MULTIARCH_SELECT_FN (svm_fifo_dequeue_nowait_ma); + int -svm_fifo_peek (svm_fifo_t * f, u32 relative_offset, u32 max_bytes, - u8 * copy_here) +svm_fifo_dequeue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_here) +{ +#if CLIB_DEBUG > 0 + return svm_fifo_dequeue_nowait_ma (f, max_bytes, copy_here); +#else + static int (*fp) (svm_fifo_t *, u32, u8 *); + + if (PREDICT_FALSE (fp == 0)) + fp = (void *) svm_fifo_dequeue_nowait_ma_multiarch_select (); + + return (*fp) (f, max_bytes, copy_here); +#endif +} + +static int +svm_fifo_peek_ma (svm_fifo_t * f, u32 relative_offset, u32 max_bytes, + u8 * copy_here) { u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; u32 cursize, nitems, real_head; @@ -586,6 +643,34 @@ svm_fifo_peek (svm_fifo_t * f, u32 relative_offset, u32 max_bytes, return total_copy_bytes; } +#define SVM_FIFO_PEEK_CLONE_TEMPLATE(arch, fn, tgt) \ + uword \ + __attribute__ ((flatten)) \ + __attribute__ ((target (tgt))) \ + CLIB_CPU_OPTIMIZED \ + fn ## _ ## arch ( svm_fifo_t * f, u32 relative_offset, u32 max_bytes, \ + u8 * copy_here) \ + { return fn (f, relative_offset, max_bytes, copy_here);} + +foreach_march_variant (SVM_FIFO_PEEK_CLONE_TEMPLATE, svm_fifo_peek_ma); +CLIB_MULTIARCH_SELECT_FN (svm_fifo_peek_ma); + +int +svm_fifo_peek (svm_fifo_t * f, u32 relative_offset, u32 max_bytes, + u8 * copy_here) +{ +#if CLIB_DEBUG > 0 + return svm_fifo_peek_ma (f, relative_offset, max_bytes, copy_here); +#else + static int (*fp) (svm_fifo_t *, u32, u32, u8 *); + + if (PREDICT_FALSE (fp == 0)) + fp = (void *) svm_fifo_peek_ma_multiarch_select (); + + return (*fp) (f, relative_offset, max_bytes, copy_here); +#endif +} + int svm_fifo_dequeue_drop (svm_fifo_t * f, u32 max_bytes) { diff --git a/src/uri/uri_tcp_test.c b/src/uri/uri_tcp_test.c index d1694cf4..80aab183 100755 --- a/src/uri/uri_tcp_test.c +++ b/src/uri/uri_tcp_test.c @@ -398,7 +398,6 @@ static void vl_api_reset_session_t_handler (vl_api_reset_session_t * mp) { uri_tcp_test_main_t *utm = &uri_tcp_test_main; - session_t *session; vl_api_reset_session_reply_t *rmp; uword *p; int rv = 0; @@ -407,9 +406,8 @@ vl_api_reset_session_t_handler (vl_api_reset_session_t * mp) if (p) { - session = pool_elt_at_index (utm->sessions, p[0]); - hash_unset (utm->session_index_by_vpp_handles, mp->handle); - pool_put (utm->sessions, session); + clib_warning ("got reset"); + /* Cleanup later */ utm->time_to_stop = 1; } else @@ -603,7 +601,7 @@ send_test_chunk (uri_tcp_test_main_t * utm, svm_fifo_t * tx_fifo, int mypid, if (bytes_to_snd > vec_len (test_data)) bytes_to_snd = vec_len (test_data); - while (bytes_to_snd > 0) + while (bytes_to_snd > 0 && !utm->time_to_stop) { actual_write = (bytes_to_snd > queue_max_chunk) ? queue_max_chunk : bytes_to_snd; @@ -652,6 +650,8 @@ client_send_data (uri_tcp_test_main_t * utm) for (i = 0; i < n_iterations; i++) { send_test_chunk (utm, tx_fifo, mypid, 0); + if (utm->time_to_stop) + break; } leftover = utm->bytes_to_send % vec_len (test_data); diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c index c0ab1bf0..b24f5fd9 100644 --- a/src/vnet/session/node.c +++ b/src/vnet/session/node.c @@ -248,6 +248,11 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, ASSERT (bi0); _vec_len (smm->tx_buffers[thread_index]) = n_bufs; + /* usual speculation, or the enqueue_x1 macro will barf */ + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + b0 = vlib_get_buffer (vm, bi0); b0->error = 0; b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID @@ -255,10 +260,6 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, b0->current_data = 0; b0->total_length_not_including_first_buffer = 0; - /* RX on the local interface. tx in default fib */ - vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0; - vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; - len_to_deq0 = clib_min (left_to_snd0, deq_per_buf); data0 = vlib_buffer_make_headroom (b0, MAX_HDRS_LEN); @@ -307,10 +308,6 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, })); /* *INDENT-ON* */ - /* usual speculation, or the enqueue_x1 macro will barf */ - to_next[0] = bi0; - to_next += 1; - n_left_to_next -= 1; VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); if (PREDICT_FALSE (n_trace > 0)) diff --git a/src/vnet/session/transport.h b/src/vnet/session/transport.h index 04bd5ca0..561a9257 100644 --- a/src/vnet/session/transport.h +++ b/src/vnet/session/transport.h @@ -30,13 +30,16 @@ typedef struct _transport_connection ip46_address_t lcl_ip; /**< Local IP */ u16 lcl_port; /**< Local port */ u16 rmt_port; /**< Remote port */ - u8 proto; /**< Transport protocol id (also session type) */ + u8 proto; /**< Protocol id (also session type) */ u32 s_index; /**< Parent session index */ u32 c_index; /**< Connection index in transport pool */ u8 is_ip4; /**< Flag if IP4 connection */ u32 thread_index; /**< Worker-thread index */ + fib_node_index_t rmt_fei; /**< FIB entry index for rmt */ + dpo_id_t rmt_dpo; /**< Forwarding DPO for rmt */ + #if TRANSPORT_DEBUG elog_track_t elog_track; /**< Event logging */ u32 cc_stat_tstamp; /**< CC stats timestamp */ @@ -59,6 +62,8 @@ typedef struct _transport_connection #define c_thread_index connection.thread_index #define c_elog_track connection.elog_track #define c_cc_stat_tstamp connection.cc_stat_tstamp +#define c_rmt_fei connection.rmt_fei +#define c_rmt_dpo connection.rmt_dpo } transport_connection_t; /* diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 5c554bac..4e85eb3f 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -16,6 +16,7 @@ #include #include #include +#include #include tcp_main_t tcp_main; @@ -342,6 +343,99 @@ tcp_connection_timers_reset (tcp_connection_t * tc) } } +typedef struct ip4_tcp_hdr +{ + ip4_header_t ip; + tcp_header_t tcp; +} ip4_tcp_hdr_t; + +typedef struct ip6_tcp_hdr +{ + ip6_header_t ip; + tcp_header_t tcp; +} ip6_tcp_hdr_t; + +static void +tcp_connection_select_lb_bucket (tcp_connection_t * tc, const dpo_id_t * dpo, + dpo_id_t * result) +{ + const dpo_id_t *choice; + load_balance_t *lb; + int hash; + + lb = load_balance_get (dpo->dpoi_index); + if (tc->c_is_ip4) + { + ip4_tcp_hdr_t hdr; + memset (&hdr, 0, sizeof (hdr)); + hdr.ip.protocol = IP_PROTOCOL_TCP; + hdr.ip.address_pair.src.as_u32 = tc->c_lcl_ip.ip4.as_u32; + hdr.ip.address_pair.dst.as_u32 = tc->c_rmt_ip.ip4.as_u32; + hdr.tcp.src_port = tc->c_lcl_port; + hdr.tcp.dst_port = tc->c_rmt_port; + hash = ip4_compute_flow_hash (&hdr.ip, lb->lb_hash_config); + } + else + { + ip6_tcp_hdr_t hdr; + memset (&hdr, 0, sizeof (hdr)); + hdr.ip.protocol = IP_PROTOCOL_TCP; + clib_memcpy (&hdr.ip.src_address, &tc->c_lcl_ip.ip6, + sizeof (ip6_address_t)); + clib_memcpy (&hdr.ip.dst_address, &tc->c_rmt_ip.ip6, + sizeof (ip6_address_t)); + hdr.tcp.src_port = tc->c_lcl_port; + hdr.tcp.dst_port = tc->c_rmt_port; + hash = ip6_compute_flow_hash (&hdr.ip, lb->lb_hash_config); + } + choice = load_balance_get_bucket_i (lb, hash & lb->lb_n_buckets_minus_1); + dpo_copy (result, choice); +} + +fib_node_index_t +tcp_lookup_rmt_in_fib (tcp_connection_t * tc) +{ + fib_prefix_t prefix; + + clib_memcpy (&prefix.fp_addr, &tc->c_rmt_ip, sizeof (prefix.fp_addr)); + prefix.fp_proto = tc->c_is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6; + prefix.fp_len = tc->c_is_ip4 ? 32 : 128; + return fib_table_lookup (0, &prefix); +} + +static int +tcp_connection_stack_on_fib_entry (tcp_connection_t * tc) +{ + dpo_id_t choice = DPO_INVALID; + u32 output_node_index; + fib_entry_t *fe; + + fe = fib_entry_get (tc->c_rmt_fei); + if (fe->fe_lb.dpoi_type != DPO_LOAD_BALANCE) + return -1; + + tcp_connection_select_lb_bucket (tc, &fe->fe_lb, &choice); + + output_node_index = + tc->c_is_ip4 ? tcp4_output_node.index : tcp6_output_node.index; + dpo_stack_from_node (output_node_index, &tc->c_rmt_dpo, &choice); + return 0; +} + +/** Stack tcp connection on peer's fib entry. + * + * This ultimately populates the dpo the connection will use to send packets. + */ +static void +tcp_connection_fib_attach (tcp_connection_t * tc) +{ + tc->c_rmt_fei = tcp_lookup_rmt_in_fib (tc); + + ASSERT (tc->c_rmt_fei != FIB_NODE_INDEX_INVALID); + + tcp_connection_stack_on_fib_entry (tc); +} + /** Initialize tcp connection variables * * Should be called after having received a msg from the peer, i.e., a SYN or @@ -353,6 +447,7 @@ tcp_connection_init_vars (tcp_connection_t * tc) tcp_init_mss (tc); scoreboard_init (&tc->sack_sb); tcp_cc_init (tc); + tcp_connection_fib_attach (tc); } int @@ -361,7 +456,8 @@ tcp_connection_open (ip46_address_t * rmt_addr, u16 rmt_port, u8 is_ip4) tcp_main_t *tm = vnet_get_tcp_main (); tcp_connection_t *tc; fib_prefix_t prefix; - u32 fei, sw_if_index; + fib_node_index_t fei; + u32 sw_if_index; ip46_address_t lcl_addr; u16 lcl_port; @@ -985,8 +1081,6 @@ tcp_main_enable (vlib_main_t * vm) vec_validate (tm->timer_wheels, num_threads - 1); tcp_initialize_timer_wheels (tm); -// vec_validate (tm->delack_connections, num_threads - 1); - /* Initialize clocks per tick for TCP timestamp. Used to compute * monotonically increasing timestamps. */ tm->tstamp_ticks_per_clock = vm->clib_time.seconds_per_clock diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index e8398718..12d804b8 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -28,6 +28,7 @@ #define THZ (u32) (1/TCP_TICK) /**< TCP tick frequency */ #define TCP_TSTAMP_RESOLUTION TCP_TICK /**< Time stamp resolution */ #define TCP_PAWS_IDLE 24 * 24 * 60 * 60 * THZ /**< 24 days */ +#define TCP_FIB_RECHECK_PERIOD 1 * THZ /**< Recheck every 1s */ #define TCP_MAX_OPTION_SPACE 40 #define TCP_DUPACK_THRESHOLD 3 @@ -256,6 +257,7 @@ typedef struct _tcp_connection u16 mss; /**< Our max seg size that includes options */ u32 limited_transmit; /**< snd_nxt when limited transmit starts */ + u32 last_fib_check; /**< Last time we checked fib route for peer */ } tcp_connection_t; struct _tcp_cc_algorithm @@ -528,6 +530,8 @@ void tcp_cc_init_congestion (tcp_connection_t * tc); int tcp_cc_recover (tcp_connection_t * tc); void tcp_cc_fastrecovery_exit (tcp_connection_t * tc); +fib_node_index_t tcp_lookup_rmt_in_fib (tcp_connection_t * tc); + /* Made public for unit testing only */ void tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end); diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index 554a981d..41bebcb3 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -22,17 +22,14 @@ vlib_node_registration_t tcp6_output_node; typedef enum _tcp_output_nect { TCP_OUTPUT_NEXT_DROP, - TCP_OUTPUT_NEXT_IP_LOOKUP, TCP_OUTPUT_N_NEXT } tcp_output_next_t; #define foreach_tcp4_output_next \ _ (DROP, "error-drop") \ - _ (IP_LOOKUP, "ip4-lookup") #define foreach_tcp6_output_next \ _ (DROP, "error-drop") \ - _ (IP_LOOKUP, "ip6-lookup") static char *tcp_error_strings[] = { #define tcp_error(n,s) s, @@ -1451,7 +1448,7 @@ tcp46_output_inline (vlib_main_t * vm, tcp_connection_t *tc0; tcp_tx_trace_t *t0; tcp_header_t *th0 = 0; - u32 error0 = TCP_ERROR_PKTS_SENT, next0 = TCP_OUTPUT_NEXT_IP_LOOKUP; + u32 error0 = TCP_ERROR_PKTS_SENT, next0 = TCP_OUTPUT_NEXT_DROP; bi0 = from[0]; to_next[0] = bi0; @@ -1530,10 +1527,26 @@ tcp46_output_inline (vlib_main_t * vm, tc0->rto_boff = 0; } - /* set fib index to default and lookup node */ - /* XXX network virtualization (vrf/vni) */ - vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0; - vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + /* Make sure we haven't lost route to our peer */ + if (PREDICT_FALSE (tc0->last_fib_check + < tc0->snd_opts.tsval + TCP_FIB_RECHECK_PERIOD)) + { + if (PREDICT_TRUE + (tc0->c_rmt_fei == tcp_lookup_rmt_in_fib (tc0))) + { + tc0->last_fib_check = tc0->snd_opts.tsval; + } + else + { + clib_warning ("lost connection to peer"); + tcp_connection_reset (tc0); + goto done; + } + } + + /* Use pre-computed dpo to set next node */ + next0 = tc0->c_rmt_dpo.dpoi_next_node; + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = tc0->c_rmt_dpo.dpoi_index; b0->flags |= VNET_BUFFER_LOCALLY_ORIGINATED; done: diff --git a/src/vnet/tcp/tcp_packet.h b/src/vnet/tcp/tcp_packet.h index 4f28cf32..a6f62ee1 100644 --- a/src/vnet/tcp/tcp_packet.h +++ b/src/vnet/tcp/tcp_packet.h @@ -144,12 +144,11 @@ typedef struct { u8 flags; /** Option flags, see above */ - /* Received options */ - u16 mss; /**< Maximum segment size advertised by peer */ - u8 wscale; /**< Window scale advertised by peer */ - u32 tsval; /**< Peer's timestamp value */ + u16 mss; /**< Maximum segment size advertised */ + u8 wscale; /**< Window scale advertised */ + u32 tsval; /**< Timestamp value */ u32 tsecr; /**< Echoed/reflected time stamp */ - sack_block_t *sacks; /**< SACK blocks received */ + sack_block_t *sacks; /**< SACK blocks */ u8 n_sack_blocks; /**< Number of SACKs blocks */ } tcp_options_t; -- cgit 1.2.3-korg From 58eb866b15a45514dc356170f28640d6c9db8034 Mon Sep 17 00:00:00 2001 From: Klement Sekera Date: Fri, 9 Jun 2017 06:06:49 +0200 Subject: Refactor API message handling code This is preparation for new C API. Moving common stuff to separate headers reduces dependency issues. Change-Id: Ie7adb23398de72448e5eba6c1c1da4e1bc678725 Signed-off-by: Klement Sekera --- src/svm.am | 2 +- src/svm/svm.h | 102 +--------------- src/svm/svm_common.h | 133 +++++++++++++++++++++ src/vlib-api.am | 6 +- src/vlibapi/api.h | 280 ++++---------------------------------------- src/vlibapi/api_common.h | 268 ++++++++++++++++++++++++++++++++++++++++++ src/vlibmemory/api.h | 107 +---------------- src/vlibmemory/api_common.h | 138 ++++++++++++++++++++++ src/vppinfra.am | 1 + src/vppinfra/clib_error.h | 35 ++++++ src/vppinfra/error.h | 14 +-- 11 files changed, 609 insertions(+), 477 deletions(-) create mode 100644 src/svm/svm_common.h create mode 100644 src/vlibapi/api_common.h create mode 100644 src/vlibmemory/api_common.h create mode 100644 src/vppinfra/clib_error.h (limited to 'src/svm') diff --git a/src/svm.am b/src/svm.am index 442eba8e..7116eb37 100644 --- a/src/svm.am +++ b/src/svm.am @@ -13,7 +13,7 @@ bin_PROGRAMS += svmtool svmdbtool -nobase_include_HEADERS += svm/svm.h svm/ssvm.h svm/svmdb.h \ +nobase_include_HEADERS += svm/svm.h svm/svm_common.h svm/ssvm.h svm/svmdb.h \ svm/svm_fifo.h svm/svm_fifo_segment.h lib_LTLIBRARIES += libsvm.la libsvmdb.la diff --git a/src/svm/svm.h b/src/svm/svm.h index 06797fa1..894c3d95 100644 --- a/src/svm/svm.h +++ b/src/svm/svm.h @@ -24,106 +24,10 @@ #include #include #include +#include #define MMAP_PAGESIZE (clib_mem_get_page_size()) -#define SVM_VERSION ((1<<16) | 1) /* set to declare region ready. */ - -#define SVM_FLAGS_MHEAP (1<<0) /* region contains an mheap */ -#define SVM_FLAGS_FILE (1<<1) /* region backed by one or more files */ -#define SVM_FLAGS_NODATA (1<<2) /* region will be further subdivided */ -#define SVM_FLAGS_NEED_DATA_INIT (1<<3) - -#define SVM_PVT_MHEAP_SIZE (128<<10) /* region's private mheap (128k) */ - -typedef struct svm_region_ -{ - volatile uword version; - pthread_mutex_t mutex; - pthread_cond_t condvar; - int mutex_owner_pid; /* in case of trouble */ - int mutex_owner_tag; - uword flags; - uword virtual_base; /* base of the region object */ - uword virtual_size; - void *region_heap; - void *data_base; /* data portion base address */ - void *data_heap; /* data heap, if any */ - volatile void *user_ctx; /* user context pointer */ - /* stuff allocated in the region's heap */ - uword bitmap_size; /* nbits in virtual alloc bitmap */ - uword *bitmap; /* the bitmap */ - char *region_name; - char *backing_file; - char **filenames; - uword *client_pids; - /* pad */ - - /* next page: - * (64K) clib heap for the region itself - * - * data_base -> whatever is in this region - */ - -} svm_region_t; - -typedef struct svm_map_region_args_ -{ - const char *root_path; /* NULL means use the truly global arena */ - const char *name; - u64 baseva; - u64 size; - u64 pvt_heap_size; - uword flags; - char *backing_file; - uword backing_mmap_size; - /* uid, gid to own the svm region(s) */ - int uid; - int gid; -} svm_map_region_args_t; - - -/* - * Memory shared across all router instances. Packet buffers, etc - * Base should be "out of the way," and size should be big enough to - * cover everything we plan to put here. - */ -#define SVM_GLOBAL_REGION_BASEVA 0x30000000 -#define SVM_GLOBAL_REGION_SIZE (64<<20) -#define SVM_GLOBAL_REGION_NAME "/global_vm" - -/* - * Memory shared across individual router instances. - */ -#define SVM_OVERLAY_REGION_BASEVA \ - (SVM_GLOBAL_REGION_BASEVA + SVM_GLOBAL_REGION_SIZE) -#define SVM_OVERLAY_REGION_SIZE (1<<20) -#define SVM_OVERLAY_REGION_BASENAME "/overlay_vm" - -typedef struct -{ - u8 *subregion_name; -} svm_subregion_t; - -typedef struct -{ - svm_subregion_t *subregions; /* subregion pool */ - uword *name_hash; - u8 *root_path; -} svm_main_region_t; - - -void *svm_region_find_or_create (svm_map_region_args_t * a); -void svm_region_init (void); -void svm_region_init_chroot (const char *root_path); -void svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid); -void svm_region_init_args (svm_map_region_args_t * a); -void svm_region_exit (void); -void svm_region_unmap (void *rp_arg); -void svm_client_scan (const char *root_path); -void svm_client_scan_this_region_nolock (svm_region_t * rp); -u8 *shm_name_from_svm_map_region_args (svm_map_region_args_t * a); - static inline void * svm_mem_alloc (svm_region_t * rp, uword size) { @@ -192,10 +96,6 @@ svm_pop_heap (void *oldheap) clib_mem_set_heap (oldheap); } -u8 *format_svm_region (u8 * s, va_list * args); - -svm_region_t *svm_get_root_rp (void); - #endif /* __included_svm_h__ */ /* diff --git a/src/svm/svm_common.h b/src/svm/svm_common.h new file mode 100644 index 00000000..1f184432 --- /dev/null +++ b/src/svm/svm_common.h @@ -0,0 +1,133 @@ +/* + *------------------------------------------------------------------ + * Copyright (c) 2009 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#ifndef __included_svm_common_h__ +#define __included_svm_common_h__ + +#include +#include +#include + +#define SVM_VERSION ((1<<16) | 1) /* set to declare region ready. */ + +#define SVM_FLAGS_MHEAP (1<<0) /* region contains an mheap */ +#define SVM_FLAGS_FILE (1<<1) /* region backed by one or more files */ +#define SVM_FLAGS_NODATA (1<<2) /* region will be further subdivided */ +#define SVM_FLAGS_NEED_DATA_INIT (1<<3) + +#define SVM_PVT_MHEAP_SIZE (128<<10) /* region's private mheap (128k) */ + +typedef struct svm_region_ +{ + volatile uword version; + pthread_mutex_t mutex; + pthread_cond_t condvar; + int mutex_owner_pid; /* in case of trouble */ + int mutex_owner_tag; + uword flags; + uword virtual_base; /* base of the region object */ + uword virtual_size; + void *region_heap; + void *data_base; /* data portion base address */ + void *data_heap; /* data heap, if any */ + volatile void *user_ctx; /* user context pointer */ + /* stuff allocated in the region's heap */ + uword bitmap_size; /* nbits in virtual alloc bitmap */ + uword *bitmap; /* the bitmap */ + char *region_name; + char *backing_file; + char **filenames; + uword *client_pids; + /* pad */ + + /* next page: + * (64K) clib heap for the region itself + * + * data_base -> whatever is in this region + */ + +} svm_region_t; + +typedef struct svm_map_region_args_ +{ + const char *root_path; /* NULL means use the truly global arena */ + const char *name; + u64 baseva; + u64 size; + u64 pvt_heap_size; + uword flags; + char *backing_file; + uword backing_mmap_size; + /* uid, gid to own the svm region(s) */ + int uid; + int gid; +} svm_map_region_args_t; + + +/* + * Memory shared across all router instances. Packet buffers, etc + * Base should be "out of the way," and size should be big enough to + * cover everything we plan to put here. + */ +#define SVM_GLOBAL_REGION_BASEVA 0x30000000 +#define SVM_GLOBAL_REGION_SIZE (64<<20) +#define SVM_GLOBAL_REGION_NAME "/global_vm" + +/* + * Memory shared across individual router instances. + */ +#define SVM_OVERLAY_REGION_BASEVA \ + (SVM_GLOBAL_REGION_BASEVA + SVM_GLOBAL_REGION_SIZE) +#define SVM_OVERLAY_REGION_SIZE (1<<20) +#define SVM_OVERLAY_REGION_BASENAME "/overlay_vm" + +typedef struct +{ + u8 *subregion_name; +} svm_subregion_t; + +typedef struct +{ + svm_subregion_t *subregions; /* subregion pool */ + uword *name_hash; + u8 *root_path; +} svm_main_region_t; + + +void *svm_region_find_or_create (svm_map_region_args_t * a); +void svm_region_init (void); +void svm_region_init_chroot (const char *root_path); +void svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid); +void svm_region_init_args (svm_map_region_args_t * a); +void svm_region_exit (void); +void svm_region_unmap (void *rp_arg); +void svm_client_scan (const char *root_path); +void svm_client_scan_this_region_nolock (svm_region_t * rp); +u8 *shm_name_from_svm_map_region_args (svm_map_region_args_t * a); +u8 *format_svm_region (u8 * s, va_list * args); + +svm_region_t *svm_get_root_rp (void); + +#endif /* __included_svm_common_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vlib-api.am b/src/vlib-api.am index eea52420..677811bc 100644 --- a/src/vlib-api.am +++ b/src/vlib-api.am @@ -30,7 +30,10 @@ libvlibmemory_la_SOURCES = \ vlibmemory/vl_memory_api_h.h \ vlibmemory/vl_memory_msg_enum.h -nobase_include_HEADERS += vlibapi/api.h vlibapi/api_helper_macros.h vlibapi/vat_helper_macros.h +nobase_include_HEADERS += vlibapi/api.h \ + vlibapi/api_common.h \ + vlibapi/api_helper_macros.h \ + vlibapi/vat_helper_macros.h libvlibmemoryclient_la_DEPENDENCIES = libvppinfra.la libsvm.la libvlibmemoryclient_la_LIBADD = $(libvlibmemoryclient_la_DEPENDENCIES) -lpthread @@ -49,6 +52,7 @@ libvlibmemoryclient_la_SOURCES = \ nobase_include_HEADERS += \ vlibmemory/api.h \ + vlibmemory/api_common.h \ vlibmemory/vl_memory_api_h.h \ vlibmemory/vl_memory_msg_enum.h \ vlibmemory/unix_shared_memory_queue.h \ diff --git a/src/vlibapi/api.h b/src/vlibapi/api.h index 0e2c2101..49f5d5fb 100644 --- a/src/vlibapi/api.h +++ b/src/vlibapi/api.h @@ -20,73 +20,13 @@ #ifndef included_api_h #define included_api_h +#include #include #include #include #include #include -#include - -typedef enum -{ - REGISTRATION_TYPE_FREE = 0, - REGISTRATION_TYPE_SHMEM, - REGISTRATION_TYPE_SOCKET_LISTEN, - REGISTRATION_TYPE_SOCKET_SERVER, - REGISTRATION_TYPE_SOCKET_CLIENT, -} vl_registration_type_t; - -typedef struct vl_api_registration_ -{ - vl_registration_type_t registration_type; - - /* Index in VLIB's brain (not shared memory). */ - u32 vl_api_registration_pool_index; - - u8 *name; - - /* - * The following groups of data could be unioned, but my fingers are - * going to be sore enough. - */ - - /* shared memory only */ - unix_shared_memory_queue_t *vl_input_queue; - - /* socket server and client */ - u32 unix_file_index; - i8 *unprocessed_input; - u32 unprocessed_msg_length; - u8 *output_vector; - - /* socket client only */ - u32 server_handle; - u32 server_index; - -} vl_api_registration_t; - - -/* Trace configuration for a single message */ -typedef struct -{ - int size; - int trace_enable; - int replay_enable; -} trace_cfg_t; - -/* - * API recording - */ -typedef struct -{ - u8 endian; - u8 enabled; - u8 wrapped; - u8 pad; - u32 nitems; - u32 curindex; - u8 **traces; -} vl_api_trace_t; +#include /* *INDENT-OFF* */ typedef CLIB_PACKED @@ -97,200 +37,8 @@ typedef CLIB_PACKED }) vl_api_trace_file_header_t; /* *INDENT-ON* */ -typedef enum -{ - VL_API_TRACE_TX, - VL_API_TRACE_RX, -} vl_api_trace_which_t; - -#define VL_API_LITTLE_ENDIAN 0x00 -#define VL_API_BIG_ENDIAN 0x01 - -typedef struct -{ - u8 *name; - u16 first_msg_id; - u16 last_msg_id; -} vl_api_msg_range_t; - -typedef clib_error_t *(vl_msg_api_init_function_t) (u32 client_index); - -typedef struct _vl_msg_api_init_function_list_elt -{ - struct _vl_msg_api_init_function_list_elt *next_init_function; - vl_msg_api_init_function_t *f; -} _vl_msg_api_function_list_elt_t; - -typedef struct -{ - void (**msg_handlers) (void *); - int (**pd_msg_handlers) (void *, int); - void (**msg_cleanup_handlers) (void *); - void (**msg_endian_handlers) (void *); - void (**msg_print_handlers) (void *, void *); - const char **msg_names; - u8 *message_bounce; - u8 *is_mp_safe; - struct ring_alloc_ *arings; - u32 ring_misses; - u32 garbage_collects; - u32 missing_clients; - vl_api_trace_t *rx_trace; - vl_api_trace_t *tx_trace; - int msg_print_flag; - trace_cfg_t *api_trace_cfg; - int our_pid; - svm_region_t *vlib_rp; - svm_region_t **mapped_shmem_regions; - struct vl_shmem_hdr_ *shmem_hdr; - vl_api_registration_t **vl_clients; - - u8 *serialized_message_table_in_shmem; - - /* For plugin msg allocator */ - u16 first_available_msg_id; - - /* message range by name hash */ - uword *msg_range_by_name; - - /* vector of message ranges */ - vl_api_msg_range_t *msg_ranges; - - /* uid for the api shared memory region */ - int api_uid; - /* gid for the api shared memory region */ - int api_gid; - - /* base virtual address for global VM region */ - u64 global_baseva; - - /* size of the global VM region */ - u64 global_size; - - /* size of the API region */ - u64 api_size; - - /* size of the global VM private mheap */ - u64 global_pvt_heap_size; - - /* size of the api private mheap */ - u64 api_pvt_heap_size; - - /* Client-only data structures */ - unix_shared_memory_queue_t *vl_input_queue; - - /* - * All VLIB-side message handlers use my_client_index to identify - * the queue / client. This works in sim replay. - */ - int my_client_index; - /* - * This is the (shared VM) address of the registration, - * don't use it to id the connection since it can't possibly - * work in simulator replay. - */ - vl_api_registration_t *my_registration; - - i32 vlib_signal; - - /* vlib input queue length */ - u32 vlib_input_queue_length; - - /* client side message index hash table */ - uword *msg_index_by_name_and_crc; - - const char *region_name; - const char *root_path; - - /* Replay in progress? */ - int replay_in_progress; - - /* List of API client reaper functions */ - _vl_msg_api_function_list_elt_t *reaper_function_registrations; - -} api_main_t; - -extern api_main_t api_main; - -typedef struct -{ - int id; - char *name; - u32 crc; - void *handler; - void *cleanup; - void *endian; - void *print; - int size; - int traced; - int replay; - int message_bounce; - int is_mp_safe; -} vl_msg_api_msg_config_t; - -typedef struct msgbuf_ -{ - unix_shared_memory_queue_t *q; - u32 data_len; - u32 gc_mark_timestamp; - u8 data[0]; -} msgbuf_t; - -/* api_shared.c prototypes */ -int vl_msg_api_rx_trace_enabled (api_main_t * am); -int vl_msg_api_tx_trace_enabled (api_main_t * am); -void vl_msg_api_trace (api_main_t * am, vl_api_trace_t * tp, void *msg); -int vl_msg_api_trace_onoff (api_main_t * am, vl_api_trace_which_t which, - int onoff); -int vl_msg_api_trace_free (api_main_t * am, vl_api_trace_which_t which); int vl_msg_api_trace_save (api_main_t * am, vl_api_trace_which_t which, FILE * fp); -int vl_msg_api_trace_configure (api_main_t * am, vl_api_trace_which_t which, - u32 nitems); -void vl_msg_api_handler_with_vm_node (api_main_t * am, - void *the_msg, vlib_main_t * vm, - vlib_node_runtime_t * node); -void vl_msg_api_handler (void *the_msg); -void vl_msg_api_handler_no_free (void *the_msg); -void vl_msg_api_handler_no_trace_no_free (void *the_msg); -void vl_msg_api_trace_only (void *the_msg); -void vl_msg_api_cleanup_handler (void *the_msg); -void vl_msg_api_replay_handler (void *the_msg); -void vl_msg_api_socket_handler (void *the_msg); -void vl_msg_api_set_handlers (int msg_id, char *msg_name, - void *handler, - void *cleanup, - void *endian, - void *print, int msg_size, int traced); -void vl_msg_api_config (vl_msg_api_msg_config_t *); -void vl_msg_api_set_cleanup_handler (int msg_id, void *fp); -void vl_msg_api_queue_handler (unix_shared_memory_queue_t * q); -vl_api_trace_t *vl_msg_api_trace_get (api_main_t * am, - vl_api_trace_which_t which); - -void vl_msg_api_barrier_sync (void) __attribute__ ((weak)); -void vl_msg_api_barrier_release (void) __attribute__ ((weak)); -void vl_msg_api_free (void *); -void vl_noop_handler (void *mp); -void vl_msg_api_increment_missing_client_counter (void); -void vl_msg_api_post_mortem_dump (void); -void vl_msg_api_post_mortem_dump_enable_disable (int enable); -void vl_msg_api_register_pd_handler (void *handler, - u16 msg_id_host_byte_order); -int vl_msg_api_pd_handler (void *mp, int rv); - -void vl_msg_api_set_first_available_msg_id (u16 first_avail); -u16 vl_msg_api_get_msg_ids (const char *name, int n); -void vl_msg_api_add_msg_name_crc (api_main_t * am, const char *string, - u32 id); -u32 vl_api_get_msg_index (u8 * name_and_crc); -u32 vl_msg_api_get_msg_length (void *msg_arg); - -/* node_serialize.c prototypes */ -u8 *vlib_node_serialize (vlib_node_main_t * nm, u8 * vector, - u32 max_threads, int include_nexts, - int include_stats); -vlib_node_t **vlib_node_unserialize (u8 * vector); #define VLIB_API_INIT_FUNCTION(x) VLIB_DECLARE_INIT_FUNCTION(x,api_init) @@ -350,8 +98,30 @@ vl_msg_api_get_msg_length_inline (void *msg_arg) return clib_net_to_host_u32 (header->data_len); } -#endif /* included_api_h */ +int vl_msg_api_rx_trace_enabled (api_main_t * am); +int vl_msg_api_tx_trace_enabled (api_main_t * am); +void vl_msg_api_trace (api_main_t * am, vl_api_trace_t * tp, void *msg); +int vl_msg_api_trace_onoff (api_main_t * am, vl_api_trace_which_t which, + int onoff); +int vl_msg_api_trace_free (api_main_t * am, vl_api_trace_which_t which); +int vl_msg_api_trace_configure (api_main_t * am, vl_api_trace_which_t which, + u32 nitems); +void vl_msg_api_handler_with_vm_node (api_main_t * am, + void *the_msg, vlib_main_t * vm, + vlib_node_runtime_t * node); +vl_api_trace_t *vl_msg_api_trace_get (api_main_t * am, + vl_api_trace_which_t which); +void vl_msg_api_add_msg_name_crc (api_main_t * am, const char *string, + u32 id); +/* node_serialize.c prototypes */ +u8 *vlib_node_serialize (vlib_node_main_t * nm, u8 * vector, + u32 max_threads, int include_nexts, + int include_stats); +vlib_node_t **vlib_node_unserialize (u8 * vector); +u32 vl_msg_api_get_msg_length (void *msg_arg); + +#endif /* included_api_h */ /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vlibapi/api_common.h b/src/vlibapi/api_common.h new file mode 100644 index 00000000..b84d269e --- /dev/null +++ b/src/vlibapi/api_common.h @@ -0,0 +1,268 @@ +/* + *------------------------------------------------------------------ + * api_common.h + * + * Copyright (c) 2009-2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#ifndef included_api_common_h +#define included_api_common_h + +#include +#include +#include + +typedef enum +{ + REGISTRATION_TYPE_FREE = 0, + REGISTRATION_TYPE_SHMEM, + REGISTRATION_TYPE_SOCKET_LISTEN, + REGISTRATION_TYPE_SOCKET_SERVER, + REGISTRATION_TYPE_SOCKET_CLIENT, +} vl_registration_type_t; + +typedef struct vl_api_registration_ +{ + vl_registration_type_t registration_type; + + /* Index in VLIB's brain (not shared memory). */ + u32 vl_api_registration_pool_index; + + u8 *name; + + /* + * The following groups of data could be unioned, but my fingers are + * going to be sore enough. + */ + + /* shared memory only */ + unix_shared_memory_queue_t *vl_input_queue; + + /* socket server and client */ + u32 unix_file_index; + i8 *unprocessed_input; + u32 unprocessed_msg_length; + u8 *output_vector; + + /* socket client only */ + u32 server_handle; + u32 server_index; + +} vl_api_registration_t; + + +/* Trace configuration for a single message */ +typedef struct +{ + int size; + int trace_enable; + int replay_enable; +} trace_cfg_t; + +/* + * API recording + */ +typedef struct +{ + u8 endian; + u8 enabled; + u8 wrapped; + u8 pad; + u32 nitems; + u32 curindex; + u8 **traces; +} vl_api_trace_t; + +typedef enum +{ + VL_API_TRACE_TX, + VL_API_TRACE_RX, +} vl_api_trace_which_t; + +#define VL_API_LITTLE_ENDIAN 0x00 +#define VL_API_BIG_ENDIAN 0x01 + +typedef struct +{ + u8 *name; + u16 first_msg_id; + u16 last_msg_id; +} vl_api_msg_range_t; + +typedef struct +{ + int id; + char *name; + u32 crc; + void *handler; + void *cleanup; + void *endian; + void *print; + int size; + int traced; + int replay; + int message_bounce; + int is_mp_safe; +} vl_msg_api_msg_config_t; + +typedef struct msgbuf_ +{ + unix_shared_memory_queue_t *q; + u32 data_len; + u32 gc_mark_timestamp; + u8 data[0]; +} msgbuf_t; + +/* api_shared.c prototypes */ +void vl_msg_api_handler (void *the_msg); +void vl_msg_api_handler_no_free (void *the_msg); +void vl_msg_api_handler_no_trace_no_free (void *the_msg); +void vl_msg_api_trace_only (void *the_msg); +void vl_msg_api_cleanup_handler (void *the_msg); +void vl_msg_api_replay_handler (void *the_msg); +void vl_msg_api_socket_handler (void *the_msg); +void vl_msg_api_set_handlers (int msg_id, char *msg_name, + void *handler, + void *cleanup, + void *endian, + void *print, int msg_size, int traced); +void vl_msg_api_config (vl_msg_api_msg_config_t *); +void vl_msg_api_set_cleanup_handler (int msg_id, void *fp); +void vl_msg_api_queue_handler (unix_shared_memory_queue_t * q); + +void vl_msg_api_barrier_sync (void) __attribute__ ((weak)); +void vl_msg_api_barrier_release (void) __attribute__ ((weak)); +void vl_msg_api_free (void *); +void vl_noop_handler (void *mp); +void vl_msg_api_increment_missing_client_counter (void); +void vl_msg_api_post_mortem_dump (void); +void vl_msg_api_post_mortem_dump_enable_disable (int enable); +void vl_msg_api_register_pd_handler (void *handler, + u16 msg_id_host_byte_order); +int vl_msg_api_pd_handler (void *mp, int rv); + +void vl_msg_api_set_first_available_msg_id (u16 first_avail); +u16 vl_msg_api_get_msg_ids (const char *name, int n); +u32 vl_api_get_msg_index (u8 * name_and_crc); + +typedef clib_error_t *(vl_msg_api_init_function_t) (u32 client_index); + +typedef struct _vl_msg_api_init_function_list_elt +{ + struct _vl_msg_api_init_function_list_elt *next_init_function; + vl_msg_api_init_function_t *f; +} _vl_msg_api_function_list_elt_t; + +typedef struct +{ + void (**msg_handlers) (void *); + int (**pd_msg_handlers) (void *, int); + void (**msg_cleanup_handlers) (void *); + void (**msg_endian_handlers) (void *); + void (**msg_print_handlers) (void *, void *); + const char **msg_names; + u8 *message_bounce; + u8 *is_mp_safe; + struct ring_alloc_ *arings; + u32 ring_misses; + u32 garbage_collects; + u32 missing_clients; + vl_api_trace_t *rx_trace; + vl_api_trace_t *tx_trace; + int msg_print_flag; + trace_cfg_t *api_trace_cfg; + int our_pid; + svm_region_t *vlib_rp; + svm_region_t **mapped_shmem_regions; + struct vl_shmem_hdr_ *shmem_hdr; + vl_api_registration_t **vl_clients; + + u8 *serialized_message_table_in_shmem; + + /* For plugin msg allocator */ + u16 first_available_msg_id; + + /* message range by name hash */ + uword *msg_range_by_name; + + /* vector of message ranges */ + vl_api_msg_range_t *msg_ranges; + + /* uid for the api shared memory region */ + int api_uid; + /* gid for the api shared memory region */ + int api_gid; + + /* base virtual address for global VM region */ + u64 global_baseva; + + /* size of the global VM region */ + u64 global_size; + + /* size of the API region */ + u64 api_size; + + /* size of the global VM private mheap */ + u64 global_pvt_heap_size; + + /* size of the api private mheap */ + u64 api_pvt_heap_size; + + /* Client-only data structures */ + unix_shared_memory_queue_t *vl_input_queue; + + /* + * All VLIB-side message handlers use my_client_index to identify + * the queue / client. This works in sim replay. + */ + int my_client_index; + /* + * This is the (shared VM) address of the registration, + * don't use it to id the connection since it can't possibly + * work in simulator replay. + */ + vl_api_registration_t *my_registration; + + i32 vlib_signal; + + /* vlib input queue length */ + u32 vlib_input_queue_length; + + /* client side message index hash table */ + uword *msg_index_by_name_and_crc; + + const char *region_name; + const char *root_path; + + /* Replay in progress? */ + int replay_in_progress; + + /* List of API client reaper functions */ + _vl_msg_api_function_list_elt_t *reaper_function_registrations; + +} api_main_t; + +extern api_main_t api_main; + + +#endif /* included_api_common_h */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vlibmemory/api.h b/src/vlibmemory/api.h index c195e181..2a1438fd 100644 --- a/src/vlibmemory/api.h +++ b/src/vlibmemory/api.h @@ -20,81 +20,12 @@ #ifndef included_vlibmemory_api_h #define included_vlibmemory_api_h -#include #include #include #include #include #include - -/* Allocated in shared memory */ - -/* - * Ring-allocation scheme for client API messages - * - * Only one proc/thread has control of a given message buffer. - * To free a buffer allocated from one of these rings, we clear - * a field in the buffer (header), and leave. - * - * No locks, no hits, no errors... - */ -typedef struct ring_alloc_ -{ - unix_shared_memory_queue_t *rp; - u16 size; - u16 nitems; - u32 hits; - u32 misses; -} ring_alloc_t; - -/* - * Initializers for the (shared-memory) rings - * _(size, n). Note: each msg has an 8 byte header. - * Might want to change that to an index sometime. - */ -#define foreach_vl_aring_size \ -_(64+8, 1024) \ -_(256+8, 128) \ -_(1024+8, 64) - -#define foreach_clnt_aring_size \ -_(1024+8, 1024) \ -_(2048+8, 128) \ -_(4096+8, 8) - -typedef struct vl_shmem_hdr_ -{ - int version; - - /* getpid () for the VLIB client process */ - volatile int vl_pid; - - /* Client sends VLIB msgs here. */ - unix_shared_memory_queue_t *vl_input_queue; - - /* Vector of rings; one for each size. */ - - /* VLIB allocates buffers to send msgs to clients here. */ - ring_alloc_t *vl_rings; - - /* Clients allocate buffer to send msgs to VLIB here. */ - ring_alloc_t *client_rings; - - /* Number of detected application restarts */ - u32 application_restarts; - - /* Number of messages reclaimed during application restart */ - u32 restart_reclaims; - - /* Number of garbage-collected messages */ - u32 garbage_collects; - -} vl_shmem_hdr_t; - -#define VL_SHM_VERSION 2 - -#define VL_API_EPOCH_MASK 0xFF -#define VL_API_EPOCH_SHIFT 8 +#include static inline u32 vl_msg_api_handle_get_epoch (u32 index) @@ -118,43 +49,7 @@ vl_msg_api_handle_from_index_and_epoch (u32 index, u32 epoch) return handle; } -void *vl_msg_api_alloc (int nbytes); -void *vl_msg_api_alloc_or_null (int nbytes); -void *vl_msg_api_alloc_as_if_client (int nbytes); -void *vl_msg_api_alloc_as_if_client_or_null (int nbytes); -void vl_msg_api_free (void *a); -int vl_map_shmem (const char *region_name, int is_vlib); -void vl_register_mapped_shmem_region (svm_region_t * rp); -void vl_unmap_shmem (void); -void vl_msg_api_send_shmem (unix_shared_memory_queue_t * q, u8 * elem); -void vl_msg_api_send_shmem_nolock (unix_shared_memory_queue_t * q, u8 * elem); -void vl_msg_api_send (vl_api_registration_t * rp, u8 * elem); -int vl_client_connect (const char *name, int ctx_quota, int input_queue_size); -void vl_client_disconnect (void); -unix_shared_memory_queue_t *vl_api_client_index_to_input_queue (u32 index); -vl_api_registration_t *vl_api_client_index_to_registration (u32 index); -int vl_client_api_map (const char *region_name); -void vl_client_api_unmap (void); -void vl_set_memory_region_name (const char *name); -void vl_set_memory_root_path (const char *root_path); -void vl_set_memory_uid (int uid); -void vl_set_memory_gid (int gid); -void vl_set_global_memory_baseva (u64 baseva); -void vl_set_global_memory_size (u64 size); -void vl_set_api_memory_size (u64 size); -void vl_set_global_pvt_heap_size (u64 size); -void vl_set_api_pvt_heap_size (u64 size); void vl_enable_disable_memory_api (vlib_main_t * vm, int yesno); -void vl_client_disconnect_from_vlib (void); -int vl_client_connect_to_vlib (const char *svm_name, - const char *client_name, int rx_queue_size); -int vl_client_connect_to_vlib_no_rx_pthread (const char *svm_name, - const char *client_name, - int rx_queue_size); -u16 vl_client_get_first_plugin_msg_id (const char *plugin_name); - -void vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length); - #endif /* included_vlibmemory_api_h */ /* diff --git a/src/vlibmemory/api_common.h b/src/vlibmemory/api_common.h new file mode 100644 index 00000000..53909cc4 --- /dev/null +++ b/src/vlibmemory/api_common.h @@ -0,0 +1,138 @@ +/* + *------------------------------------------------------------------ + * Copyright (c) 2009 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#ifndef included_vlibmemory_api_common_h +#define included_vlibmemory_api_common_h + +#include +#include +#include + +/* Allocated in shared memory */ + +/* + * Ring-allocation scheme for client API messages + * + * Only one proc/thread has control of a given message buffer. + * To free a buffer allocated from one of these rings, we clear + * a field in the buffer (header), and leave. + * + * No locks, no hits, no errors... + */ +typedef struct ring_alloc_ +{ + unix_shared_memory_queue_t *rp; + u16 size; + u16 nitems; + u32 hits; + u32 misses; +} ring_alloc_t; + +/* + * Initializers for the (shared-memory) rings + * _(size, n). Note: each msg has an 8 byte header. + * Might want to change that to an index sometime. + */ +#define foreach_vl_aring_size \ +_(64+8, 1024) \ +_(256+8, 128) \ +_(1024+8, 64) + +#define foreach_clnt_aring_size \ +_(1024+8, 1024) \ +_(2048+8, 128) \ +_(4096+8, 8) + +typedef struct vl_shmem_hdr_ +{ + int version; + + /* getpid () for the VLIB client process */ + volatile int vl_pid; + + /* Client sends VLIB msgs here. */ + unix_shared_memory_queue_t *vl_input_queue; + + /* Vector of rings; one for each size. */ + + /* VLIB allocates buffers to send msgs to clients here. */ + ring_alloc_t *vl_rings; + + /* Clients allocate buffer to send msgs to VLIB here. */ + ring_alloc_t *client_rings; + + /* Number of detected application restarts */ + u32 application_restarts; + + /* Number of messages reclaimed during application restart */ + u32 restart_reclaims; + + /* Number of garbage-collected messages */ + u32 garbage_collects; + +} vl_shmem_hdr_t; + +#define VL_SHM_VERSION 2 + +#define VL_API_EPOCH_MASK 0xFF +#define VL_API_EPOCH_SHIFT 8 + +void *vl_msg_api_alloc (int nbytes); +void *vl_msg_api_alloc_or_null (int nbytes); +void *vl_msg_api_alloc_as_if_client (int nbytes); +void *vl_msg_api_alloc_as_if_client_or_null (int nbytes); +void vl_msg_api_free (void *a); +int vl_map_shmem (const char *region_name, int is_vlib); +void vl_register_mapped_shmem_region (svm_region_t * rp); +void vl_unmap_shmem (void); +void vl_msg_api_send_shmem (unix_shared_memory_queue_t * q, u8 * elem); +void vl_msg_api_send_shmem_nolock (unix_shared_memory_queue_t * q, u8 * elem); +void vl_msg_api_send (vl_api_registration_t * rp, u8 * elem); +int vl_client_connect (const char *name, int ctx_quota, int input_queue_size); +void vl_client_disconnect (void); +unix_shared_memory_queue_t *vl_api_client_index_to_input_queue (u32 index); +vl_api_registration_t *vl_api_client_index_to_registration (u32 index); +int vl_client_api_map (const char *region_name); +void vl_client_api_unmap (void); +void vl_set_memory_region_name (const char *name); +void vl_set_memory_root_path (const char *root_path); +void vl_set_memory_uid (int uid); +void vl_set_memory_gid (int gid); +void vl_set_global_memory_baseva (u64 baseva); +void vl_set_global_memory_size (u64 size); +void vl_set_api_memory_size (u64 size); +void vl_set_global_pvt_heap_size (u64 size); +void vl_set_api_pvt_heap_size (u64 size); +void vl_client_disconnect_from_vlib (void); +int vl_client_connect_to_vlib (const char *svm_name, const char *client_name, + int rx_queue_size); +int vl_client_connect_to_vlib_no_rx_pthread (const char *svm_name, + const char *client_name, + int rx_queue_size); +u16 vl_client_get_first_plugin_msg_id (const char *plugin_name); + +void vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length); + +#endif /* included_vlibmemory_api_common_h */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vppinfra.am b/src/vppinfra.am index ff2b8ea4..785445a6 100644 --- a/src/vppinfra.am +++ b/src/vppinfra.am @@ -167,6 +167,7 @@ nobase_include_HEADERS = \ vppinfra/byte_order.h \ vppinfra/cache.h \ vppinfra/clib.h \ + vppinfra/clib_error.h \ vppinfra/cpu.h \ vppinfra/crc32.h \ vppinfra/dlist.h \ diff --git a/src/vppinfra/clib_error.h b/src/vppinfra/clib_error.h new file mode 100644 index 00000000..45f18eb1 --- /dev/null +++ b/src/vppinfra/clib_error.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef included_clib_error_h +#define included_clib_error_h + +#include + +typedef struct +{ + /* Error message. */ + u8 *what; + + /* Where error occurred (e.g. __FUNCTION__ __LINE__) */ + const u8 *where; + + uword flags; + + /* Error code (e.g. errno for Unix errors). */ + any code; +} clib_error_t; + +#endif diff --git a/src/vppinfra/error.h b/src/vppinfra/error.h index e51f938e..e0e2d472 100644 --- a/src/vppinfra/error.h +++ b/src/vppinfra/error.h @@ -72,19 +72,7 @@ void clib_error_register_handler (clib_error_handler_func_t func, void *arg); #define clib_panic(format,args...) \ _clib_error (CLIB_ERROR_ABORT, (char *) clib_error_function, __LINE__, format, ## args) -typedef struct -{ - /* Error message. */ - u8 *what; - - /* Where error occurred (e.g. __FUNCTION__ __LINE__) */ - const u8 *where; - - uword flags; - - /* Error code (e.g. errno for Unix errors). */ - any code; -} clib_error_t; +#include #define clib_error_get_code(err) ((err) ? (err)->code : 0) #define clib_error_set_code(err, c) \ -- cgit 1.2.3-korg From 2c25a62cc1cc4937165de740a3b32d78429c72d6 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Mon, 26 Jun 2017 11:35:07 -0400 Subject: Horizontal (nSessions) scaling draft - Data structure preallocation. - Input state machine fixes for mid-stream 3-way handshake retries. - Batch connections in the builtin_client - Multiple private fifo segment support - Fix elog simultaneous event type registration - Fix sacks when segment hole is added after highest sacked - Add "accepting" session state for sessions pending accept - Add ssvm non-recursive locking - Estimate RTT for syn-ack - Don't init fifo pointers. We're using relative offsets for ooo segments - CLI to dump individual session Change-Id: Ie0598563fd246537bafba4feed7985478ea1d415 Signed-off-by: Dave Barach Signed-off-by: Florin Coras --- src/svm/ssvm.h | 17 +++ src/svm/svm_fifo.c | 56 +++++--- src/svm/svm_fifo.h | 16 ++- src/svm/svm_fifo_segment.c | 114 +++++++++++----- src/svm/svm_fifo_segment.h | 4 +- src/svm/test_svm_fifo1.c | 10 +- src/uri/uri_udp_test.c | 2 +- src/vnet/session/application.c | 2 + src/vnet/session/application_interface.c | 21 --- src/vnet/session/application_interface.h | 12 +- src/vnet/session/node.c | 23 +--- src/vnet/session/segment_manager.c | 26 ++-- src/vnet/session/segment_manager.h | 4 + src/vnet/session/session.c | 72 +++++++--- src/vnet/session/session.h | 30 ++++- src/vnet/session/session_cli.c | 99 +++++++++++--- src/vnet/session/transport.h | 6 + src/vnet/tcp/builtin_client.c | 118 +++++++++++----- src/vnet/tcp/builtin_client.h | 7 +- src/vnet/tcp/builtin_server.c | 66 +++++++-- src/vnet/tcp/tcp.c | 225 ++++++++++++++++++++++++++++--- src/vnet/tcp/tcp.h | 13 ++ src/vnet/tcp/tcp_debug.h | 13 +- src/vnet/tcp/tcp_input.c | 97 ++++++++----- src/vnet/tcp/tcp_newreno.c | 4 +- src/vnet/tcp/tcp_output.c | 53 +++++--- src/vnet/tcp/tcp_packet.h | 1 + src/vnet/tcp/tcp_test.c | 10 +- src/vnet/udp/udp_input.c | 2 +- 29 files changed, 838 insertions(+), 285 deletions(-) (limited to 'src/svm') diff --git a/src/svm/ssvm.h b/src/svm/ssvm.h index bccfc164..8466e155 100644 --- a/src/svm/ssvm.h +++ b/src/svm/ssvm.h @@ -101,6 +101,15 @@ ssvm_lock (ssvm_shared_header_t * h, u32 my_pid, u32 tag) h->tag = tag; } +always_inline void +ssvm_lock_non_recursive (ssvm_shared_header_t * h, u32 tag) +{ + while (__sync_lock_test_and_set (&h->lock, 1)) + ; + + h->tag = tag; +} + always_inline void ssvm_unlock (ssvm_shared_header_t * h) { @@ -113,6 +122,14 @@ ssvm_unlock (ssvm_shared_header_t * h) } } +always_inline void +ssvm_unlock_non_recursive (ssvm_shared_header_t * h) +{ + h->tag = 0; + CLIB_MEMORY_BARRIER (); + h->lock = 0; +} + static inline void * ssvm_push_heap (ssvm_shared_header_t * sh) { diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index aed5d6a7..da60fee5 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -19,29 +19,29 @@ static inline u8 position_lt (svm_fifo_t * f, u32 a, u32 b) { - return (ooo_segment_distance_to_tail (f, a) - < ooo_segment_distance_to_tail (f, b)); + return (ooo_segment_distance_from_tail (f, a) + < ooo_segment_distance_from_tail (f, b)); } static inline u8 position_leq (svm_fifo_t * f, u32 a, u32 b) { - return (ooo_segment_distance_to_tail (f, a) - <= ooo_segment_distance_to_tail (f, b)); + return (ooo_segment_distance_from_tail (f, a) + <= ooo_segment_distance_from_tail (f, b)); } static inline u8 position_gt (svm_fifo_t * f, u32 a, u32 b) { - return (ooo_segment_distance_to_tail (f, a) - > ooo_segment_distance_to_tail (f, b)); + return (ooo_segment_distance_from_tail (f, a) + > ooo_segment_distance_from_tail (f, b)); } static inline u32 position_diff (svm_fifo_t * f, u32 posa, u32 posb) { - return ooo_segment_distance_to_tail (f, posa) - - ooo_segment_distance_to_tail (f, posb); + return ooo_segment_distance_from_tail (f, posa) + - ooo_segment_distance_from_tail (f, posb); } static inline u32 @@ -113,7 +113,7 @@ svm_fifo_create (u32 data_size_in_bytes) if (f == 0) return 0; - memset (f, 0, sizeof (*f) + data_size_in_bytes); + memset (f, 0, sizeof (*f)); f->nitems = data_size_in_bytes; f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX; @@ -204,7 +204,19 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) { s = prev; s_end_pos = ooo_segment_end_pos (f, s); - goto merge; + + /* Check head and tail now since segment may be wider at both ends so + * merge tests lower won't work */ + if (position_lt (f, normalized_position, s->start)) + { + s->start = normalized_position; + s->length = position_diff (f, s_end_pos, s->start); + } + if (position_gt (f, normalized_end_position, s_end_pos)) + { + s->length = position_diff (f, normalized_end_position, s->start); + } + goto check_tail; } s_index = s - f->ooo_segments; @@ -257,8 +269,6 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) * Merge needed */ -merge: - /* Merge at head */ if (position_lt (f, normalized_position, s->start)) { @@ -278,6 +288,7 @@ merge: goto done; } +check_tail: /* The new segment's tail may cover multiple smaller ones */ if (position_gt (f, normalized_end_position, s_end_pos)) { @@ -296,7 +307,8 @@ merge: /* If partial overlap with last, merge */ if (it && position_leq (f, it->start, normalized_end_position)) { - s->length = ooo_segment_end_pos (f, it) - s->start; + s->length = + position_diff (f, ooo_segment_end_pos (f, it), s->start); ooo_segment_del (f, it - f->ooo_segments); } } @@ -319,9 +331,9 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) i32 diff; s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head); + diff = ooo_segment_distance_to_tail (f, s->start); - diff = (f->tail >= s->start) ? - f->tail - s->start : f->nitems + f->tail - s->start; + ASSERT (diff != n_bytes_enqueued); if (diff > n_bytes_enqueued) return 0; @@ -345,8 +357,7 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) if (s->next != OOO_SEGMENT_INVALID_INDEX) { s = pool_elt_at_index (f->ooo_segments, s->next); - diff = (f->tail >= s->start) ? - f->tail - s->start : f->nitems + f->tail - s->start; + diff = ooo_segment_distance_to_tail (f, s->start); ooo_segment_del (f, index); } /* End of search */ @@ -357,6 +368,7 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) } } + ASSERT (bytes >= 0 && bytes <= f->nitems); return bytes; } @@ -401,6 +413,8 @@ svm_fifo_enqueue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here) } else { + ASSERT (0); + /* Account for a zero-copy enqueue done elsewhere */ ASSERT (max_bytes <= (nitems - cursize)); f->tail += max_bytes; @@ -413,6 +427,7 @@ svm_fifo_enqueue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here) total_copy_bytes += ooo_segment_try_collect (f, total_copy_bytes); /* Atomically increase the queue length */ + ASSERT (cursize + total_copy_bytes <= nitems); __sync_fetch_and_add (&f->cursize, total_copy_bytes); return (total_copy_bytes); @@ -475,6 +490,8 @@ svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, cursize = svm_fifo_max_dequeue (f); nitems = f->nitems; + ASSERT (required_bytes < nitems); + normalized_offset = (f->tail + offset) % nitems; /* Will this request fit? */ @@ -557,6 +574,7 @@ svm_fifo_dequeue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_here) } else { + ASSERT (0); /* Account for a zero-copy dequeue done elsewhere */ ASSERT (max_bytes <= cursize); f->head += max_bytes; @@ -565,6 +583,8 @@ svm_fifo_dequeue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_here) total_copy_bytes = max_bytes; } + ASSERT (f->head <= nitems); + ASSERT (cursize >= total_copy_bytes); __sync_fetch_and_sub (&f->cursize, total_copy_bytes); return (total_copy_bytes); @@ -702,6 +722,8 @@ svm_fifo_dequeue_drop (svm_fifo_t * f, u32 max_bytes) f->head = (f->head == nitems) ? 0 : f->head; } + ASSERT (f->head <= nitems); + ASSERT (cursize >= total_drop_bytes); __sync_fetch_and_sub (&f->cursize, total_drop_bytes); return total_drop_bytes; diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index f32ef41d..fe21de47 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -133,25 +133,31 @@ svm_fifo_newest_ooo_segment (svm_fifo_t * f) } always_inline u32 -ooo_segment_distance_to_tail (svm_fifo_t * f, u32 a) +ooo_segment_distance_from_tail (svm_fifo_t * f, u32 pos) { /* Ambiguous. Assumption is that ooo segments don't touch tail */ - if (a == f->tail && f->tail == f->head) + if (PREDICT_FALSE (pos == f->tail && f->tail == f->head)) return f->nitems; - return ((f->nitems + a - f->tail) % f->nitems); + return (((f->nitems + pos) - f->tail) % f->nitems); +} + +always_inline u32 +ooo_segment_distance_to_tail (svm_fifo_t * f, u32 pos) +{ + return (((f->nitems + f->tail) - pos) % f->nitems); } always_inline u32 ooo_segment_offset (svm_fifo_t * f, ooo_segment_t * s) { - return ooo_segment_distance_to_tail (f, s->start); + return ooo_segment_distance_from_tail (f, s->start); } always_inline u32 ooo_segment_end_offset (svm_fifo_t * f, ooo_segment_t * s) { - return ooo_segment_distance_to_tail (f, s->start) + s->length; + return ooo_segment_distance_from_tail (f, s->start) + s->length; } always_inline u32 diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index c4ac2352..69d4ecb9 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -35,6 +35,11 @@ preallocate_fifo_pairs (svm_fifo_segment_header_t * fsh, rx_fifo_size = (sizeof (*f) + a->rx_fifo_size) * a->preallocated_fifo_pairs; tx_fifo_size = (sizeof (*f) + a->tx_fifo_size) * a->preallocated_fifo_pairs; + if (0) + clib_warning ("rx_fifo_size %u (%d mb), tx_fifo_size %u (%d mb)", + rx_fifo_size, rx_fifo_size >> 20, + tx_fifo_size, tx_fifo_size >> 20); + /* Allocate rx fifo space. May fail. */ rx_fifo_space = clib_mem_alloc_aligned_at_offset (rx_fifo_size, CLIB_CACHE_LINE_BYTES, 0 /* align_offset */ , @@ -129,7 +134,7 @@ svm_fifo_segment_create (svm_fifo_segment_create_args_t * a) ssvm_pop_heap (oldheap); sh->ready = 1; - a->new_segment_index = s - sm->segments; + vec_add1 (a->new_segment_indices, s - sm->segments); return (0); } @@ -141,35 +146,81 @@ svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t * a) svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; ssvm_shared_header_t *sh; svm_fifo_segment_header_t *fsh; + void *oldheap; + u8 **heaps = 0; + mheap_t *heap_header; + int segment_count = 1; + int i; - /* Allocate a fresh segment */ - pool_get (sm->segments, s); - memset (s, 0, sizeof (*s)); - - s->ssvm.ssvm_size = ~0; - s->ssvm.i_am_master = 1; - s->ssvm.my_pid = getpid (); - s->ssvm.name = (u8 *) a->segment_name; - s->ssvm.requested_va = ~0; - - /* Allocate a [sic] shared memory header, in process memory... */ - sh = clib_mem_alloc_aligned (sizeof (*sh), CLIB_CACHE_LINE_BYTES); - s->ssvm.sh = sh; + if (a->private_segment_count && a->private_segment_size) + { + void *mem; + u8 *heap; + u32 pagesize = clib_mem_get_page_size (); + u32 rnd_size; - memset (sh, 0, sizeof (*sh)); - sh->heap = clib_mem_get_heap (); + for (i = 0; i < a->private_segment_count; i++) + { + rnd_size = (a->private_segment_size + (pagesize - 1)) & ~pagesize; + + mem = mmap (0, rnd_size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, + -1 /* fd */ , 0 /* offset */ ); + + if (mem == MAP_FAILED) + { + clib_unix_warning ("mmap"); + return -1; + } + heap = mheap_alloc (mem, rnd_size); + heap_header = mheap_header (heap); + heap_header->flags |= MHEAP_FLAG_THREAD_SAFE; + vec_add1 (heaps, heap); + } + segment_count = a->private_segment_count; + } - /* Set up svm_fifo_segment shared header */ - fsh = clib_mem_alloc (sizeof (*fsh)); - memset (fsh, 0, sizeof (*fsh)); - sh->opaque[0] = fsh; - s->h = fsh; - fsh->segment_name = format (0, "%s%c", a->segment_name, 0); + /* Spread preallocated fifo pairs across segments */ + a->preallocated_fifo_pairs /= segment_count; - preallocate_fifo_pairs (fsh, a); + /* Allocate segments */ + for (i = 0; i < segment_count; i++) + { + pool_get (sm->segments, s); + memset (s, 0, sizeof (*s)); + + s->ssvm.ssvm_size = ~0; + s->ssvm.i_am_master = 1; + s->ssvm.my_pid = getpid (); + s->ssvm.name = (u8 *) a->segment_name; + s->ssvm.requested_va = ~0; + + /* Allocate a [sic] shared memory header, in process memory... */ + sh = clib_mem_alloc_aligned (sizeof (*sh), CLIB_CACHE_LINE_BYTES); + s->ssvm.sh = sh; + + memset (sh, 0, sizeof (*sh)); + sh->heap = a->private_segment_count ? heaps[i] : clib_mem_get_heap (); + + /* Set up svm_fifo_segment shared header */ + fsh = clib_mem_alloc (sizeof (*fsh)); + memset (fsh, 0, sizeof (*fsh)); + sh->opaque[0] = fsh; + s->h = fsh; + fsh->segment_name = format (0, "%s%c", a->segment_name, 0); + + if (a->private_segment_count) + { + oldheap = clib_mem_get_heap (); + clib_mem_set_heap (sh->heap); + preallocate_fifo_pairs (fsh, a); + clib_mem_set_heap (oldheap); + } - sh->ready = 1; - a->new_segment_index = s - sm->segments; + sh->ready = 1; + vec_add1 (a->new_segment_indices, s - sm->segments); + } + vec_free (heaps); return (0); } @@ -205,7 +256,7 @@ svm_fifo_segment_attach (svm_fifo_segment_create_args_t * a) fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; s->h = fsh; - a->new_segment_index = s - sm->segments; + vec_add1 (a->new_segment_indices, s - sm->segments); return (0); } @@ -230,7 +281,7 @@ svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, sh = s->ssvm.sh; fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; - ssvm_lock (sh, 1, 0); + ssvm_lock_non_recursive (sh, 1); oldheap = ssvm_push_heap (sh); switch (list_index) @@ -261,7 +312,7 @@ svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, if (PREDICT_FALSE (f == 0)) { ssvm_pop_heap (oldheap); - ssvm_unlock (sh); + ssvm_unlock_non_recursive (sh); return (0); } @@ -281,7 +332,7 @@ found: } ssvm_pop_heap (oldheap); - ssvm_unlock (sh); + ssvm_unlock_non_recursive (sh); return (f); } @@ -293,10 +344,11 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f, svm_fifo_segment_header_t *fsh; void *oldheap; + sh = s->ssvm.sh; fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; - ssvm_lock (sh, 1, 0); + ssvm_lock_non_recursive (sh, 2); oldheap = ssvm_push_heap (sh); switch (list_index) @@ -325,7 +377,7 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f, } ssvm_pop_heap (oldheap); - ssvm_unlock (sh); + ssvm_unlock_non_recursive (sh); } void diff --git a/src/svm/svm_fifo_segment.h b/src/svm/svm_fifo_segment.h index 31e14db5..a7a3f469 100644 --- a/src/svm/svm_fifo_segment.h +++ b/src/svm/svm_fifo_segment.h @@ -57,10 +57,12 @@ typedef struct { char *segment_name; u32 segment_size; - u32 new_segment_index; + u32 *new_segment_indices; u32 rx_fifo_size; u32 tx_fifo_size; u32 preallocated_fifo_pairs; + u32 private_segment_count; + u32 private_segment_size; } svm_fifo_segment_create_args_t; static inline svm_fifo_segment_private_t * diff --git a/src/svm/test_svm_fifo1.c b/src/svm/test_svm_fifo1.c index 63b4a9b7..63d75845 100644 --- a/src/svm/test_svm_fifo1.c +++ b/src/svm/test_svm_fifo1.c @@ -39,7 +39,7 @@ hello_world (int verbose) if (rv) return clib_error_return (0, "svm_fifo_segment_create returned %d", rv); - sp = svm_fifo_get_segment (a->new_segment_index); + sp = svm_fifo_get_segment (a->new_segment_indices[0]); f = svm_fifo_segment_alloc_fifo (sp, 4096, FIFO_SEGMENT_RX_FREELIST); @@ -92,7 +92,7 @@ master (int verbose) if (rv) return clib_error_return (0, "svm_fifo_segment_create returned %d", rv); - sp = svm_fifo_get_segment (a->new_segment_index); + sp = svm_fifo_get_segment (a->new_segment_indices[0]); f = svm_fifo_segment_alloc_fifo (sp, 4096, FIFO_SEGMENT_RX_FREELIST); @@ -128,7 +128,7 @@ mempig (int verbose) if (rv) return clib_error_return (0, "svm_fifo_segment_create returned %d", rv); - sp = svm_fifo_get_segment (a->new_segment_index); + sp = svm_fifo_get_segment (a->new_segment_indices[0]); for (i = 0; i < 1000; i++) { @@ -186,7 +186,7 @@ offset (int verbose) if (rv) return clib_error_return (0, "svm_fifo_segment_create returned %d", rv); - sp = svm_fifo_get_segment (a->new_segment_index); + sp = svm_fifo_get_segment (a->new_segment_indices[0]); f = svm_fifo_segment_alloc_fifo (sp, 200 << 10, FIFO_SEGMENT_RX_FREELIST); @@ -246,7 +246,7 @@ slave (int verbose) if (rv) return clib_error_return (0, "svm_fifo_segment_attach returned %d", rv); - sp = svm_fifo_get_segment (a->new_segment_index); + sp = svm_fifo_get_segment (a->new_segment_indices[0]); sh = sp->ssvm.sh; fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; diff --git a/src/uri/uri_udp_test.c b/src/uri/uri_udp_test.c index 45ad35a4..a8e39eaa 100644 --- a/src/uri/uri_udp_test.c +++ b/src/uri/uri_udp_test.c @@ -707,7 +707,7 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) return; } - segment_index = a->new_segment_index; + segment_index = a->new_segment_indices[0]; vec_add2 (utm->seg, seg, 1); memcpy (seg, sm->segments + segment_index, sizeof (*seg)); sleep (1); diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index 3cc56f37..8a953719 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -174,6 +174,8 @@ application_init (application_t * app, u32 api_client_index, u64 * options, props->preallocated_fifo_pairs = options[APP_OPTIONS_PREALLOC_FIFO_PAIRS]; props->use_private_segment = options[APP_OPTIONS_FLAGS] & APP_OPTIONS_FLAGS_BUILTIN_APP; + props->private_segment_count = options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT]; + props->private_segment_size = options[APP_OPTIONS_PRIVATE_SEGMENT_SIZE]; first_seg_size = options[SESSION_OPTIONS_SEGMENT_SIZE]; if ((rv = segment_manager_init (sm, props, first_seg_size))) diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c index 338ae857..566a52d7 100644 --- a/src/vnet/session/application_interface.c +++ b/src/vnet/session/application_interface.c @@ -275,27 +275,6 @@ vnet_application_detach (vnet_app_detach_args_t * a) return 0; } -session_type_t -session_type_from_proto_and_ip (session_api_proto_t proto, u8 is_ip4) -{ - if (proto == SESSION_PROTO_TCP) - { - if (is_ip4) - return SESSION_TYPE_IP4_TCP; - else - return SESSION_TYPE_IP6_TCP; - } - else - { - if (is_ip4) - return SESSION_TYPE_IP4_UDP; - else - return SESSION_TYPE_IP6_UDP; - } - - return SESSION_N_TYPES; -} - int vnet_bind_uri (vnet_bind_args_t * a) { diff --git a/src/vnet/session/application_interface.h b/src/vnet/session/application_interface.h index 4d6f9def..ed9f89b3 100644 --- a/src/vnet/session/application_interface.h +++ b/src/vnet/session/application_interface.h @@ -22,12 +22,6 @@ #include #include -typedef enum _session_api_proto -{ - SESSION_PROTO_TCP, - SESSION_PROTO_UDP -} session_api_proto_t; - typedef struct _vnet_app_attach_args_t { /** Binary API client index */ @@ -65,7 +59,7 @@ typedef struct _vnet_bind_args_t struct { transport_endpoint_t tep; - session_api_proto_t proto; + transport_proto_t proto; }; }; @@ -98,7 +92,7 @@ typedef struct _vnet_connect_args struct { transport_endpoint_t tep; - session_api_proto_t proto; + transport_proto_t proto; }; }; u32 app_index; @@ -120,6 +114,8 @@ typedef enum APP_EVT_QUEUE_SIZE, APP_OPTIONS_FLAGS, APP_OPTIONS_PREALLOC_FIFO_PAIRS, + APP_OPTIONS_PRIVATE_SEGMENT_COUNT, + APP_OPTIONS_PRIVATE_SEGMENT_SIZE, SESSION_OPTIONS_SEGMENT_SIZE, SESSION_OPTIONS_ADD_SEGMENT_SIZE, SESSION_OPTIONS_RX_FIFO_SIZE, diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c index b24f5fd9..56e62637 100644 --- a/src/vnet/session/node.c +++ b/src/vnet/session/node.c @@ -378,24 +378,12 @@ session_tx_fifo_dequeue_and_snd (vlib_main_t * vm, vlib_node_runtime_t * node, n_tx_pkts, 0); } -stream_session_t * -session_event_get_session (session_fifo_event_t * e0, u8 thread_index) +always_inline stream_session_t * +session_event_get_session (session_fifo_event_t * e, u8 thread_index) { - svm_fifo_t *f0; - stream_session_t *s0; - u32 session_index0; - - f0 = e0->fifo; - session_index0 = f0->master_session_index; - - /* $$$ add multiple event queues, per vpp worker thread */ - ASSERT (f0->master_thread_index == thread_index); - - s0 = stream_session_get_if_valid (session_index0, thread_index); - - ASSERT (s0 == 0 || s0->thread_index == thread_index); - - return s0; + ASSERT (e->fifo->master_thread_index == thread_index); + return stream_session_get_if_valid (e->fifo->master_session_index, + thread_index); } void @@ -569,7 +557,6 @@ skip_dequeue: case FIFO_EVENT_BUILTIN_RX: s0 = session_event_get_session (e0, my_thread_index); svm_fifo_unset_event (s0->server_rx_fifo); - /* Get session's server */ app = application_get (s0->app_index); app->cb_fns.builtin_server_rx_callback (s0); break; diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c index dcef6261..262b7faa 100644 --- a/src/vnet/session/segment_manager.c +++ b/src/vnet/session/segment_manager.c @@ -30,7 +30,7 @@ segment_manager_t *segment_managers = 0; /** * Process private segment index */ -u32 private_segment_index = ~0; +u32 *private_segment_indices; /** * Default fifo and segment size. TODO config. @@ -70,7 +70,8 @@ session_manager_add_segment_i (segment_manager_t * sm, u32 segment_size, return VNET_API_ERROR_SVM_SEGMENT_CREATE_FAIL; } - vec_add1 (sm->segment_indices, ca->new_segment_index); + vec_append (sm->segment_indices, ca->new_segment_indices); + vec_free (ca->new_segment_indices); return 0; } @@ -111,22 +112,23 @@ static void { svm_fifo_segment_create_args_t _a, *a = &_a; - if (private_segment_index != ~0) + if (private_segment_indices) return; memset (a, 0, sizeof (*a)); a->segment_name = "process-private-segment"; a->segment_size = ~0; - a->new_segment_index = ~0; a->rx_fifo_size = props->rx_fifo_size; a->tx_fifo_size = props->tx_fifo_size; a->preallocated_fifo_pairs = props->preallocated_fifo_pairs; + a->private_segment_count = props->private_segment_count; + a->private_segment_size = props->private_segment_size; if (svm_fifo_segment_create_process_private (a)) clib_warning ("Failed to create process private segment"); - private_segment_index = a->new_segment_index; - ASSERT (private_segment_index != ~0); + private_segment_indices = a->new_segment_indices; + ASSERT (vec_len (private_segment_indices)); } /** @@ -156,10 +158,10 @@ segment_manager_init (segment_manager_t * sm, } else { - if (private_segment_index == ~0) + if (vec_len (private_segment_indices) == 0) segment_manager_alloc_process_private_segment (properties); - ASSERT (private_segment_index != ~0); - vec_add1 (sm->segment_indices, private_segment_index); + ASSERT (vec_len (private_segment_indices)); + vec_append (sm->segment_indices, private_segment_indices); } clib_spinlock_init (&sm->lockp); @@ -320,7 +322,7 @@ again: /* See if we're supposed to create another segment */ if (*server_rx_fifo == 0) { - if (sm->properties->add_segment) + if (sm->properties->add_segment && !sm->properties->use_private_segment) { if (added_a_segment) { @@ -379,6 +381,10 @@ segment_manager_dealloc_fifos (u32 svm_segment_index, svm_fifo_t * rx_fifo, svm_fifo_segment_free_fifo (fifo_segment, tx_fifo, FIFO_SEGMENT_TX_FREELIST); + /* Don't try to delete process-private segments */ + if (sm->properties->private_segment_count > 0) + return; + /* Remove segment only if it holds no fifos and not the first */ if (sm->segment_indices[0] != svm_segment_index && !svm_fifo_segment_has_fifos (fifo_segment)) diff --git a/src/vnet/session/segment_manager.h b/src/vnet/session/segment_manager.h index df38d2b3..41abeb22 100644 --- a/src/vnet/session/segment_manager.h +++ b/src/vnet/session/segment_manager.h @@ -39,6 +39,10 @@ typedef struct _segment_manager_properties /** Use private memory segment instead of shared memory */ u8 use_private_segment; + + /** Use one or more private mheaps, instead of the global heap */ + u32 private_segment_count; + u32 private_segment_size; } segment_manager_properties_t; typedef struct _segment_manager diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index fe198044..0a86d563 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -198,21 +198,28 @@ stream_session_lookup_listener4 (ip4_address_t * lcl, u16 lcl_port, u8 proto) */ stream_session_t * stream_session_lookup4 (ip4_address_t * lcl, ip4_address_t * rmt, - u16 lcl_port, u16 rmt_port, u8 proto, - u32 my_thread_index) + u16 lcl_port, u16 rmt_port, u8 proto) { session_manager_main_t *smm = &session_manager_main; session_kv4_t kv4; + stream_session_t *s; int rv; /* Lookup session amongst established ones */ make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto); rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4); if (rv == 0) - return stream_session_get_tsi (kv4.value, my_thread_index); + return stream_session_get_from_handle (kv4.value); /* If nothing is found, check if any listener is available */ - return stream_session_lookup_listener4 (lcl, lcl_port, proto); + if ((s = stream_session_lookup_listener4 (lcl, lcl_port, proto))) + return s; + + /* Finally, try half-open connections */ + rv = clib_bihash_search_inline_16_8 (&smm->v4_half_open_hash, &kv4); + if (rv == 0) + return stream_session_get_from_handle (kv4.value); + return 0; } stream_session_t * @@ -242,20 +249,27 @@ stream_session_lookup_listener6 (ip6_address_t * lcl, u16 lcl_port, u8 proto) * wildcarded local source (listener bound to all interfaces) */ stream_session_t * stream_session_lookup6 (ip6_address_t * lcl, ip6_address_t * rmt, - u16 lcl_port, u16 rmt_port, u8 proto, - u32 my_thread_index) + u16 lcl_port, u16 rmt_port, u8 proto) { session_manager_main_t *smm = vnet_get_session_manager_main (); session_kv6_t kv6; + stream_session_t *s; int rv; make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto); rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6); if (rv == 0) - return stream_session_get_tsi (kv6.value, my_thread_index); + return stream_session_get_from_handle (kv6.value); /* If nothing is found, check if any listener is available */ - return stream_session_lookup_listener6 (lcl, lcl_port, proto); + if ((s = stream_session_lookup_listener6 (lcl, lcl_port, proto))) + return s; + + /* Finally, try half-open connections */ + rv = clib_bihash_search_inline_48_8 (&smm->v6_half_open_hash, &kv6); + if (rv == 0) + return stream_session_get_from_handle (kv6.value); + return 0; } stream_session_t * @@ -340,7 +354,6 @@ stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt, rv = clib_bihash_search_inline_16_8 (&smm->v4_half_open_hash, &kv4); if (rv == 0) return tp_vfts[proto].get_half_open (kv4.value & 0xFFFFFFFF); - return 0; } @@ -390,6 +403,8 @@ stream_session_create_i (segment_manager_t * sm, transport_connection_t * tc, u32 thread_index = tc->thread_index; int rv; + ASSERT (thread_index == vlib_get_thread_index ()); + if ((rv = segment_manager_alloc_session_fifos (sm, &server_rx_fifo, &server_tx_fifo, &fifo_segment_index))) @@ -854,6 +869,7 @@ stream_session_accept (transport_connection_t * tc, u32 listener_index, s->app_index = server->index; s->listener_index = listener_index; + s->session_state = SESSION_STATE_ACCEPTING; /* Shoulder-tap the server */ if (notify) @@ -1088,6 +1104,27 @@ session_vpp_event_queue_allocate (session_manager_main_t * smm, } } +session_type_t +session_type_from_proto_and_ip (transport_proto_t proto, u8 is_ip4) +{ + if (proto == TRANSPORT_PROTO_TCP) + { + if (is_ip4) + return SESSION_TYPE_IP4_TCP; + else + return SESSION_TYPE_IP6_TCP; + } + else + { + if (is_ip4) + return SESSION_TYPE_IP4_UDP; + else + return SESSION_TYPE_IP6_UDP; + } + + return SESSION_N_TYPES; +} + static clib_error_t * session_manager_main_enable (vlib_main_t * vm) { @@ -1131,14 +1168,13 @@ session_manager_main_enable (vlib_main_t * vm) session_vpp_event_queue_allocate (smm, i); /* $$$$ preallocate hack config parameter */ - for (i = 0; i < 200000; i++) + for (i = 0; i < smm->preallocated_sessions; i++) { - stream_session_t *ss; + stream_session_t *ss __attribute__ ((unused)); pool_get_aligned (smm->sessions[0], ss, CLIB_CACHE_LINE_BYTES); - memset (ss, 0, sizeof (*ss)); } - for (i = 0; i < 200000; i++) + for (i = 0; i < smm->preallocated_sessions; i++) pool_put_index (smm->sessions[0], i); clib_bihash_init_16_8 (&smm->v4_session_hash, "v4 session table", @@ -1208,9 +1244,10 @@ session_manager_main_init (vlib_main_t * vm) return 0; } -VLIB_INIT_FUNCTION (session_manager_main_init) - static clib_error_t *session_config_fn (vlib_main_t * vm, - unformat_input_t * input) +VLIB_INIT_FUNCTION (session_manager_main_init); + +static clib_error_t * +session_config_fn (vlib_main_t * vm, unformat_input_t * input) { session_manager_main_t *smm = &session_manager_main; u32 nitems; @@ -1224,6 +1261,9 @@ VLIB_INIT_FUNCTION (session_manager_main_init) else clib_warning ("event queue length %d too small, ignored", nitems); } + if (unformat (input, "preallocated-sessions %d", + &smm->preallocated_sessions)) + ; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index 5fa4225c..b4507d4e 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -80,6 +80,10 @@ typedef enum SESSION_N_TYPES, } session_type_t; + +session_type_t +session_type_from_proto_and_ip (transport_proto_t proto, u8 is_ip4); + /* * Application session state */ @@ -87,6 +91,7 @@ typedef enum { SESSION_STATE_LISTENING, SESSION_STATE_CONNECTING, + SESSION_STATE_ACCEPTING, SESSION_STATE_READY, SESSION_STATE_CLOSED, SESSION_STATE_N_STATES, @@ -211,8 +216,12 @@ struct _session_manager_main /** Per transport rx function that can either dequeue or peek */ session_fifo_rx_fn *session_tx_fns[SESSION_N_TYPES]; + /** Session manager is enabled */ u8 is_enabled; + /** Preallocate session config parameter */ + u32 preallocated_sessions; + /* Convenience */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; @@ -247,13 +256,12 @@ stream_session_t *stream_session_lookup_listener4 (ip4_address_t * lcl, u16 lcl_port, u8 proto); stream_session_t *stream_session_lookup4 (ip4_address_t * lcl, ip4_address_t * rmt, u16 lcl_port, - u16 rmt_port, u8 proto, - u32 thread_index); + u16 rmt_port, u8 proto); stream_session_t *stream_session_lookup_listener6 (ip6_address_t * lcl, u16 lcl_port, u8 proto); stream_session_t *stream_session_lookup6 (ip6_address_t * lcl, ip6_address_t * rmt, u16 lcl_port, - u16 rmt_port, u8, u32 thread_index); + u16 rmt_port, u8 proto); transport_connection_t * stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt, u16 lcl_port, @@ -277,9 +285,24 @@ stream_session_get_tsi (u64 ti_and_si, u32 thread_index) ti_and_si & 0xFFFFFFFFULL); } +always_inline u8 +stream_session_is_valid (u32 si, u8 thread_index) +{ + stream_session_t *s; + s = pool_elt_at_index (session_manager_main.sessions[thread_index], si); + if (s->thread_index != thread_index || s->session_index != si + || s->server_rx_fifo->master_session_index != si + || s->server_tx_fifo->master_session_index != si + || s->server_rx_fifo->master_thread_index != thread_index + || s->server_tx_fifo->master_thread_index != thread_index) + return 0; + return 1; +} + always_inline stream_session_t * stream_session_get (u32 si, u32 thread_index) { + ASSERT (stream_session_is_valid (si, thread_index)); return pool_elt_at_index (session_manager_main.sessions[thread_index], si); } @@ -292,6 +315,7 @@ stream_session_get_if_valid (u64 si, u32 thread_index) if (pool_is_free_index (session_manager_main.sessions[thread_index], si)) return 0; + ASSERT (stream_session_is_valid (si, thread_index)); return pool_elt_at_index (session_manager_main.sessions[thread_index], si); } diff --git a/src/vnet/session/session_cli.c b/src/vnet/session/session_cli.c index 6b8341aa..e06bc586 100755 --- a/src/vnet/session/session_cli.c +++ b/src/vnet/session/session_cli.c @@ -47,7 +47,8 @@ format_stream_session (u8 * s, va_list * args) svm_fifo_max_enqueue (ss->server_tx_fifo), stream_session_get_index (ss)); - if (ss->session_state == SESSION_STATE_READY) + if (ss->session_state == SESSION_STATE_READY + || ss->session_state == SESSION_STATE_ACCEPTING) { s = format (s, "%U", tp_vft->format_connection, ss->connection_index, ss->thread_index, verbose); @@ -68,8 +69,9 @@ format_stream_session (u8 * s, va_list * args) } else if (ss->session_state == SESSION_STATE_CLOSED) { - s = format (s, "[CL] %-40U", tp_vft->format_connection, - ss->connection_index, ss->thread_index, verbose); + s = + format (s, "[CL] %U", tp_vft->format_connection, ss->connection_index, + ss->thread_index, verbose); if (verbose == 1) s = format (s, "%v", str); if (verbose > 1) @@ -93,7 +95,13 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, int verbose = 0, i; stream_session_t *pool; stream_session_t *s; - u8 *str = 0; + u8 *str = 0, one_session = 0, proto_set = 0, proto = 0; + u8 is_ip4 = 0, s_type = 0; + ip4_address_t lcl_ip4, rmt_ip4; + u32 lcl_port = 0, rmt_port = 0; + + memset (&lcl_ip4, 0, sizeof (lcl_ip4)); + memset (&rmt_ip4, 0, sizeof (rmt_ip4)); if (!smm->is_enabled) { @@ -106,10 +114,43 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, ; else if (unformat (input, "verbose")) verbose = 1; + else if (unformat (input, "tcp")) + { + proto_set = 1; + proto = TRANSPORT_PROTO_TCP; + } + else if (unformat (input, "%U:%d->%U:%d", + unformat_ip4_address, &lcl_ip4, &lcl_port, + unformat_ip4_address, &rmt_ip4, &rmt_port)) + { + one_session = 1; + is_ip4 = 1; + } + else break; } + if (one_session) + { + if (!proto_set) + { + vlib_cli_output (vm, "proto not set"); + return clib_error_return (0, "proto not set"); + } + + s_type = session_type_from_proto_and_ip (proto, is_ip4); + s = stream_session_lookup4 (&lcl_ip4, &rmt_ip4, + clib_host_to_net_u16 (lcl_port), + clib_host_to_net_u16 (rmt_port), s_type); + if (s) + vlib_cli_output (vm, "%U", format_stream_session, s, 2); + else + vlib_cli_output (vm, "session does not exist"); + + return 0; + } + for (i = 0; i < vec_len (smm->sessions); i++) { u32 once_per_pool; @@ -146,6 +187,7 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, } else vlib_cli_output (vm, "Thread %d: no active sessions", i); + vec_reset_length (str); } vec_free (str); @@ -161,15 +203,22 @@ VLIB_CLI_COMMAND (show_session_command, static) = }; /* *INDENT-ON* */ +static int +clear_session (stream_session_t * s) +{ + application_t *server = application_get (s->app_index); + server->cb_fns.session_disconnect_callback (s); + return 0; +} + static clib_error_t * clear_session_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { session_manager_main_t *smm = &session_manager_main; - u32 thread_index = 0; + u32 thread_index = 0, clear_all = 0; u32 session_index = ~0; - stream_session_t *pool, *session; - application_t *server; + stream_session_t **pool, *session; if (!smm->is_enabled) { @@ -182,28 +231,36 @@ clear_session_command_fn (vlib_main_t * vm, unformat_input_t * input, ; else if (unformat (input, "session %d", &session_index)) ; + else if (unformat (input, "all")) + clear_all = 1; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); } - if (session_index == ~0) + if (!clear_all && session_index == ~0) return clib_error_return (0, "session required, but not set."); - if (thread_index > vec_len (smm->sessions)) - return clib_error_return (0, "thread %d out of range [0-%d]", - thread_index, vec_len (smm->sessions)); - - pool = smm->sessions[thread_index]; - - if (pool_is_free_index (pool, session_index)) - return clib_error_return (0, "session %d not active", session_index); - - session = pool_elt_at_index (pool, session_index); - server = application_get (session->app_index); + if (session_index != ~0) + { + session = stream_session_get_if_valid (session_index, thread_index); + if (!session) + return clib_error_return (0, "no session %d on thread %d", + session_index, thread_index); + clear_session (session); + } - /* Disconnect both app and transport */ - server->cb_fns.session_disconnect_callback (session); + if (clear_all) + { + /* *INDENT-OFF* */ + vec_foreach (pool, smm->sessions) + { + pool_foreach(session, *pool, ({ + clear_session (session); + })); + }; + /* *INDENT-ON* */ + } return 0; } diff --git a/src/vnet/session/transport.h b/src/vnet/session/transport.h index 561a9257..9c38bab9 100644 --- a/src/vnet/session/transport.h +++ b/src/vnet/session/transport.h @@ -225,6 +225,12 @@ make_v6_ss_kv_from_tc (session_kv6_t * kv, transport_connection_t * t) t->rmt_port, t->proto); } +typedef enum _transport_proto +{ + TRANSPORT_PROTO_TCP, + TRANSPORT_PROTO_UDP +} transport_proto_t; + typedef struct _transport_endpoint { ip46_address_t ip; /** ip address */ diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index 6f8be082..a6c8a235 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -170,62 +170,90 @@ builtin_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, { tclient_main_t *tm = &tclient_main; int my_thread_index = vlib_get_thread_index (); - vl_api_disconnect_session_t *dmp; session_t *sp; int i; int delete_session; u32 *connection_indices; - u32 tx_quota = 0; - u32 delta, prev_bytes_received_this_session; + u32 *connections_this_batch; + u32 nconnections_this_batch; connection_indices = tm->connection_index_by_thread[my_thread_index]; + connections_this_batch = + tm->connections_this_batch_by_thread[my_thread_index]; - if (tm->run_test == 0 || vec_len (connection_indices) == 0) + if ((tm->run_test == 0) || + ((vec_len (connection_indices) == 0) + && vec_len (connections_this_batch) == 0)) return 0; - for (i = 0; i < vec_len (connection_indices); i++) + /* Grab another pile of connections */ + if (PREDICT_FALSE (vec_len (connections_this_batch) == 0)) + { + nconnections_this_batch = + clib_min (tm->connections_per_batch, vec_len (connection_indices)); + + ASSERT (nconnections_this_batch > 0); + vec_validate (connections_this_batch, nconnections_this_batch - 1); + clib_memcpy (connections_this_batch, + connection_indices + vec_len (connection_indices) + - nconnections_this_batch, + nconnections_this_batch * sizeof (u32)); + _vec_len (connection_indices) -= nconnections_this_batch; + } + + if (PREDICT_FALSE (tm->prev_conns != tm->connections_per_batch + && tm->prev_conns == vec_len (connections_this_batch))) + { + tm->repeats++; + tm->prev_conns = vec_len (connections_this_batch); + if (tm->repeats == 500000) + { + clib_warning ("stuck clients"); + } + } + else + { + tm->prev_conns = vec_len (connections_this_batch); + tm->repeats = 0; + } + + for (i = 0; i < vec_len (connections_this_batch); i++) { delete_session = 1; - sp = pool_elt_at_index (tm->sessions, connection_indices[i]); + sp = pool_elt_at_index (tm->sessions, connections_this_batch[i]); - if ((tm->no_return || tx_quota < 60) && sp->bytes_to_send > 0) + if (sp->bytes_to_send > 0) { send_test_chunk (tm, sp); delete_session = 0; - tx_quota++; } - if (!tm->no_return && sp->bytes_to_receive > 0) + if (sp->bytes_to_receive > 0) { - prev_bytes_received_this_session = sp->bytes_received; receive_test_chunk (tm, sp); - delta = sp->bytes_received - prev_bytes_received_this_session; - if (delta > 0) - tx_quota--; delete_session = 0; } if (PREDICT_FALSE (delete_session == 1)) { - __sync_fetch_and_add (&tm->tx_total, tm->bytes_to_send); + u32 index, thread_index; + stream_session_t *s; + + __sync_fetch_and_add (&tm->tx_total, sp->bytes_sent); __sync_fetch_and_add (&tm->rx_total, sp->bytes_received); - dmp = vl_msg_api_alloc_as_if_client (sizeof (*dmp)); - memset (dmp, 0, sizeof (*dmp)); - dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); - dmp->client_index = tm->my_client_index; - dmp->handle = sp->vpp_session_handle; - if (!unix_shared_memory_queue_add (tm->vl_input_queue, (u8 *) & dmp, - 1)) + stream_session_parse_handle (sp->vpp_session_handle, + &index, &thread_index); + s = stream_session_get_if_valid (index, thread_index); + + if (s) { - vec_delete (connection_indices, 1, i); - tm->connection_index_by_thread[my_thread_index] = - connection_indices; + stream_session_disconnect (s); + vec_delete (connections_this_batch, 1, i); + i--; __sync_fetch_and_add (&tm->ready_connections, -1); } else - { - vl_msg_api_free (dmp); - } + clib_warning ("session AWOL?"); /* Kick the debug CLI process */ if (tm->ready_connections == 0) @@ -236,6 +264,10 @@ builtin_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, } } } + + tm->connection_index_by_thread[my_thread_index] = connection_indices; + tm->connections_this_batch_by_thread[my_thread_index] = + connections_this_batch; return 0; } @@ -356,6 +388,8 @@ tcp_test_clients_init (vlib_main_t * vm) tm->vlib_main = vm; vec_validate (tm->connection_index_by_thread, thread_main->n_vlib_mains); + vec_validate (tm->connections_this_batch_by_thread, + thread_main->n_vlib_mains); return 0; } @@ -388,7 +422,8 @@ builtin_session_connected_callback (u32 app_index, u32 api_context, pool_get (tm->sessions, session); memset (session, 0, sizeof (*session)); session_index = session - tm->sessions; - session->bytes_to_receive = session->bytes_to_send = tm->bytes_to_send; + session->bytes_to_send = tm->bytes_to_send; + session->bytes_to_receive = tm->no_return ? 0ULL : tm->bytes_to_send; session->server_rx_fifo = s->server_rx_fifo; session->server_rx_fifo->client_session_index = session_index; session->server_tx_fifo = s->server_tx_fifo; @@ -485,6 +520,8 @@ attach_builtin_test_clients_app (void) options[SESSION_OPTIONS_SEGMENT_SIZE] = (2ULL << 32); options[SESSION_OPTIONS_RX_FIFO_SIZE] = tm->fifo_size; options[SESSION_OPTIONS_TX_FIFO_SIZE] = tm->fifo_size / 2; + options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = tm->private_segment_count; + options[APP_OPTIONS_PRIVATE_SEGMENT_SIZE] = tm->private_segment_size; options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = prealloc_fifos; options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; @@ -561,6 +598,9 @@ test_tcp_clients_command_fn (vlib_main_t * vm, tm->bytes_to_send = 8192; tm->no_return = 0; tm->fifo_size = 64 << 10; + tm->connections_per_batch = 1000; + tm->private_segment_count = 0; + tm->private_segment_size = 0; vec_free (tm->connect_uri); @@ -582,6 +622,20 @@ test_tcp_clients_command_fn (vlib_main_t * vm, tm->no_return = 1; else if (unformat (input, "fifo-size %d", &tm->fifo_size)) tm->fifo_size <<= 10; + else if (unformat (input, "private-segment-count %d", + &tm->private_segment_count)) + ; + else if (unformat (input, "private-segment-size %dm", &tmp)) + tm->private_segment_size = tmp << 20; + else if (unformat (input, "private-segment-size %dg", &tmp)) + tm->private_segment_size = tmp << 30; + else if (unformat (input, "private-segment-size %d", &tmp)) + tm->private_segment_size = tmp; + else if (unformat (input, "preallocate-fifos")) + tm->prealloc_fifos = 1; + else + if (unformat (input, "client-batch %d", &tm->connections_per_batch)) + ; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); @@ -688,9 +742,13 @@ test_tcp_clients_command_fn (vlib_main_t * vm, vlib_cli_output (vm, "zero delta-t?"); cleanup: - pool_free (tm->sessions); + tm->run_test = 0; for (i = 0; i < vec_len (tm->connection_index_by_thread); i++) - vec_reset_length (tm->connection_index_by_thread[i]); + { + vec_reset_length (tm->connection_index_by_thread[i]); + vec_reset_length (tm->connections_this_batch_by_thread[i]); + } + pool_free (tm->sessions); return 0; } diff --git a/src/vnet/tcp/builtin_client.h b/src/vnet/tcp/builtin_client.h index 3462e0ee..38af231d 100644 --- a/src/vnet/tcp/builtin_client.h +++ b/src/vnet/tcp/builtin_client.h @@ -63,6 +63,9 @@ typedef struct u32 configured_segment_size; u32 fifo_size; u32 expected_connections; /**< Number of clients/connections */ + u32 connections_per_batch; /**< Connections to rx/tx at once */ + u32 private_segment_count; /**< Number of private fifo segs */ + u32 private_segment_size; /**< size of private fifo segs */ /* * Test state variables @@ -72,6 +75,7 @@ typedef struct uword *session_index_by_vpp_handles; /**< Hash table for disconnecting */ u8 *connect_test_data; /**< Pre-computed test data */ u32 **connection_index_by_thread; + u32 **connections_this_batch_by_thread; /**< active connection batch */ pthread_t client_thread_handle; volatile u32 ready_connections; @@ -82,7 +86,8 @@ typedef struct f64 test_start_time; f64 test_end_time; - + u32 prev_conns; + u32 repeats; /* * Flags */ diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index 775bfc26..8e958ac0 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -56,12 +56,15 @@ typedef struct u32 fifo_size; /**< Fifo size */ u32 rcv_buffer_size; /**< Rcv buffer size */ u32 prealloc_fifos; /**< Preallocate fifos */ + u32 private_segment_count; /**< Number of private segments */ + u32 private_segment_size; /**< Size of private segments */ /* * Test state */ u8 **rx_buf; /**< Per-thread RX buffer */ u64 byte_index; + u32 **rx_retries; vlib_main_t *vlib_main; } builtin_server_main_t; @@ -77,6 +80,8 @@ builtin_session_accept_callback (stream_session_t * s) session_manager_get_vpp_event_queue (s->thread_index); s->session_state = SESSION_STATE_READY; bsm->byte_index = 0; + vec_validate (bsm->rx_retries[s->thread_index], s->session_index); + bsm->rx_retries[s->thread_index][s->session_index] = 0; return 0; } @@ -173,11 +178,16 @@ builtin_server_rx_callback (stream_session_t * s) builtin_server_main_t *bsm = &builtin_server_main; session_fifo_event_t evt; static int serial_number = 0; - u32 my_thread_id = vlib_get_thread_index (); + u32 thread_index = vlib_get_thread_index (); + + ASSERT (s->thread_index == thread_index); rx_fifo = s->server_rx_fifo; tx_fifo = s->server_tx_fifo; + ASSERT (rx_fifo->master_thread_index == thread_index); + ASSERT (tx_fifo->master_thread_index == thread_index); + max_dequeue = svm_fifo_max_dequeue (s->server_rx_fifo); max_enqueue = svm_fifo_max_enqueue (s->server_tx_fifo); @@ -201,21 +211,31 @@ builtin_server_rx_callback (stream_session_t * s) evt.event_type = FIFO_EVENT_BUILTIN_RX; evt.event_id = 0; - q = bsm->vpp_queue[s->thread_index]; + q = bsm->vpp_queue[thread_index]; if (PREDICT_FALSE (q->cursize == q->maxsize)) clib_warning ("out of event queue space"); - else - unix_shared_memory_queue_add (q, (u8 *) & evt, - 0 /* don't wait for mutex */ ); + else if (unix_shared_memory_queue_add (q, (u8 *) & evt, 0 /* don't wait for mutex */ + )) + clib_warning ("failed to enqueue self-tap"); + + bsm->rx_retries[thread_index][s->session_index]++; + if (bsm->rx_retries[thread_index][s->session_index] == 500000) + { + clib_warning ("session stuck: %U", format_stream_session, s, 2); + } + } + else + { + bsm->rx_retries[thread_index][s->session_index] = 0; } return 0; } - _vec_len (bsm->rx_buf[my_thread_id]) = max_transfer; + _vec_len (bsm->rx_buf[thread_index]) = max_transfer; actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, max_transfer, - bsm->rx_buf[my_thread_id]); + bsm->rx_buf[thread_index]); ASSERT (actual_transfer == max_transfer); // test_bytes (bsm, actual_transfer); @@ -225,7 +245,7 @@ builtin_server_rx_callback (stream_session_t * s) */ n_written = svm_fifo_enqueue_nowait (tx_fifo, actual_transfer, - bsm->rx_buf[my_thread_id]); + bsm->rx_buf[thread_index]); if (n_written != max_transfer) clib_warning ("short trout!"); @@ -237,11 +257,13 @@ builtin_server_rx_callback (stream_session_t * s) evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = serial_number++; - unix_shared_memory_queue_add (bsm->vpp_queue[s->thread_index], - (u8 *) & evt, 0 /* do wait for mutex */ ); + if (unix_shared_memory_queue_add (bsm->vpp_queue[s->thread_index], + (u8 *) & evt, + 0 /* do wait for mutex */ )) + clib_warning ("failed to enqueue tx evt"); } - if (PREDICT_FALSE (max_enqueue < max_dequeue)) + if (PREDICT_FALSE (n_written < max_dequeue)) goto rx_event; return 0; @@ -328,9 +350,13 @@ server_attach () a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 512 << 20; a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = bsm->fifo_size; a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = bsm->fifo_size; - a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; + a->options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = bsm->private_segment_count; + a->options[APP_OPTIONS_PRIVATE_SEGMENT_SIZE] = bsm->private_segment_size; a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = bsm->prealloc_fifos ? bsm->prealloc_fifos : 1; + + a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; + a->segment_name = segment_name; a->segment_name_length = ARRAY_LEN (segment_name); @@ -374,6 +400,8 @@ server_create (vlib_main_t * vm) num_threads = 1 /* main thread */ + vtm->n_threads; vec_validate (builtin_server_main.vpp_queue, num_threads - 1); vec_validate (bsm->rx_buf, num_threads - 1); + vec_validate (bsm->rx_retries, num_threads - 1); + for (i = 0; i < num_threads; i++) vec_validate (bsm->rx_buf[i], bsm->rcv_buffer_size); @@ -435,11 +463,14 @@ server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, { builtin_server_main_t *bsm = &builtin_server_main; int rv; + u32 tmp; bsm->no_echo = 0; bsm->fifo_size = 64 << 10; bsm->rcv_buffer_size = 128 << 10; bsm->prealloc_fifos = 0; + bsm->private_segment_count = 0; + bsm->private_segment_size = 0; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { @@ -449,8 +480,17 @@ server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, bsm->fifo_size <<= 10; else if (unformat (input, "rcv-buf-size %d", &bsm->rcv_buffer_size)) ; - else if (unformat (input, "prealloc-fifos", &bsm->prealloc_fifos)) + else if (unformat (input, "prealloc-fifos %d", &bsm->prealloc_fifos)) + ; + else if (unformat (input, "private-segment-count %d", + &bsm->private_segment_count)) ; + else if (unformat (input, "private-segment-size %dm", &tmp)) + bsm->private_segment_size = tmp << 20; + else if (unformat (input, "private-segment-size %dg", &tmp)) + bsm->private_segment_size = tmp << 30; + else if (unformat (input, "private-segment-size %d", &tmp)) + bsm->private_segment_size = tmp; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 4e85eb3f..f379e699 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -74,8 +74,16 @@ static void tcp_connection_unbind (u32 listener_index) { tcp_main_t *tm = vnet_get_tcp_main (); - TCP_EVT_DBG (TCP_EVT_UNBIND, - pool_elt_at_index (tm->listener_pool, listener_index)); + tcp_connection_t *tc; + + tc = pool_elt_at_index (tm->listener_pool, listener_index); + + TCP_EVT_DBG (TCP_EVT_UNBIND, tc); + + /* Poison the entry */ + if (CLIB_DEBUG > 0) + memset (tc, 0xFA, sizeof (*tc)); + pool_put_index (tm->listener_pool, listener_index); } @@ -124,9 +132,20 @@ tcp_connection_cleanup (tcp_connection_t * tc) /* Check if half-open */ if (tc->state == TCP_STATE_SYN_SENT) - pool_put (tm->half_open_connections, tc); + { + /* Poison the entry */ + if (CLIB_DEBUG > 0) + memset (tc, 0xFA, sizeof (*tc)); + pool_put (tm->half_open_connections, tc); + } else - pool_put (tm->connections[tc->c_thread_index], tc); + { + int thread_index = tc->c_thread_index; + /* Poison the entry */ + if (CLIB_DEBUG > 0) + memset (tc, 0xFA, sizeof (*tc)); + pool_put (tm->connections[thread_index], tc); + } } /** @@ -168,13 +187,14 @@ tcp_connection_reset (tcp_connection_t * tc) /* Make sure all timers are cleared */ tcp_connection_timers_reset (tc); - stream_session_reset_notify (&tc->connection); + + /* Wait for cleanup from session layer but not forever */ + tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME); break; case TCP_STATE_CLOSED: return; } - } /** @@ -278,6 +298,9 @@ tcp_allocate_local_port (tcp_main_t * tm, ip46_address_t * ip) tries = max - min; time_now = tcp_time_now (); + /* Only support active opens from thread 0 */ + ASSERT (vlib_get_thread_index () == 0); + /* Start at random point or max */ pool_get (tm->local_endpoints, tep); clib_memcpy (&tep->ip, ip, sizeof (*ip)); @@ -343,6 +366,7 @@ tcp_connection_timers_reset (tcp_connection_t * tc) } } +#if 0 typedef struct ip4_tcp_hdr { ip4_header_t ip; @@ -435,6 +459,7 @@ tcp_connection_fib_attach (tcp_connection_t * tc) tcp_connection_stack_on_fib_entry (tc); } +#endif /* 0 */ /** Initialize tcp connection variables * @@ -447,7 +472,7 @@ tcp_connection_init_vars (tcp_connection_t * tc) tcp_init_mss (tc); scoreboard_init (&tc->sack_sb); tcp_cc_init (tc); - tcp_connection_fib_attach (tc); + // tcp_connection_fib_attach (tc); } int @@ -485,14 +510,38 @@ tcp_connection_open (ip46_address_t * rmt_addr, u16 rmt_port, u8 is_ip4) if (is_ip4) { ip4_address_t *ip4; - ip4 = ip_interface_get_first_ip (sw_if_index, 1); - lcl_addr.ip4.as_u32 = ip4->as_u32; + int index; + if (vec_len (tm->ip4_src_addresses)) + { + index = tm->last_v4_address_rotor++; + if (tm->last_v4_address_rotor >= vec_len (tm->ip4_src_addresses)) + tm->last_v4_address_rotor = 0; + lcl_addr.ip4.as_u32 = tm->ip4_src_addresses[index].as_u32; + } + else + { + ip4 = ip_interface_get_first_ip (sw_if_index, 1); + lcl_addr.ip4.as_u32 = ip4->as_u32; + } } else { ip6_address_t *ip6; - ip6 = ip_interface_get_first_ip (sw_if_index, 0); - clib_memcpy (&lcl_addr.ip6, ip6, sizeof (*ip6)); + int index; + + if (vec_len (tm->ip6_src_addresses)) + { + index = tm->last_v6_address_rotor++; + if (tm->last_v6_address_rotor >= vec_len (tm->ip6_src_addresses)) + tm->last_v6_address_rotor = 0; + clib_memcpy (&lcl_addr.ip6, &tm->ip6_src_addresses[index], + sizeof (*ip6)); + } + else + { + ip6 = ip_interface_get_first_ip (sw_if_index, 0); + clib_memcpy (&lcl_addr.ip6, ip6, sizeof (*ip6)); + } } /* Allocate source port */ @@ -614,7 +663,7 @@ u8 * format_tcp_vars (u8 * s, va_list * args) { tcp_connection_t *tc = va_arg (*args, tcp_connection_t *); - s = format (s, " snd_una %u snd_nxt %u snd_una_max %u\n", + s = format (s, " snd_una %u snd_nxt %u snd_una_max %u", tc->snd_una - tc->iss, tc->snd_nxt - tc->iss, tc->snd_una_max - tc->iss); s = format (s, " rcv_nxt %u rcv_las %u\n", @@ -628,12 +677,17 @@ format_tcp_vars (u8 * s, va_list * args) s = format (s, " cong %U ", format_tcp_congestion_status, tc); s = format (s, "cwnd %u ssthresh %u rtx_bytes %u bytes_acked %u\n", tc->cwnd, tc->ssthresh, tc->snd_rxt_bytes, tc->bytes_acked); - s = format (s, " prev_ssthresh %u snd_congestion %u dupack %u\n", + s = format (s, " prev_ssthresh %u snd_congestion %u dupack %u", tc->prev_ssthresh, tc->snd_congestion - tc->iss, tc->rcv_dupacks); + s = format (s, " limited_transmit %u\n", tc->limited_transmit - tc->iss); + s = format (s, " tsecr %u tsecr_last_ack %u\n", tc->rcv_opts.tsecr, + tc->tsecr_last_ack); s = format (s, " rto %u rto_boff %u srtt %u rttvar %u rtt_ts %u ", tc->rto, tc->rto_boff, tc->srtt, tc->rttvar, tc->rtt_ts); s = format (s, "rtt_seq %u\n", tc->rtt_seq); + s = format (s, " tsval_recent %u tsval_recent_age %u\n", tc->tsval_recent, + tcp_time_now () - tc->tsval_recent_age); s = format (s, " scoreboard: %U\n", format_tcp_scoreboard, &tc->sack_sb); if (vec_len (tc->snd_sacks)) s = format (s, " sacks tx: %U\n", format_tcp_sacks, tc); @@ -719,11 +773,21 @@ format_tcp_sacks (u8 * s, va_list * args) tcp_connection_t *tc = va_arg (*args, tcp_connection_t *); sack_block_t *sacks = tc->snd_sacks; sack_block_t *block; - vec_foreach (block, sacks) - { - s = format (s, " start %u end %u\n", block->start - tc->irs, - block->end - tc->irs); - } + int i, len = 0; + + len = vec_len (sacks); + for (i = 0; i < len - 1; i++) + { + block = &sacks[i]; + s = format (s, " start %u end %u\n", block->start - tc->irs, + block->end - tc->irs); + } + if (len) + { + block = &sacks[len - 1]; + s = format (s, " start %u end %u", block->start - tc->irs, + block->end - tc->irs); + } return s; } @@ -796,14 +860,18 @@ tcp_session_send_mss (transport_connection_t * trans_conn) always_inline u32 tcp_round_snd_space (tcp_connection_t * tc, u32 snd_space) { - if (tc->snd_wnd < tc->snd_mss) + if (PREDICT_FALSE (tc->snd_wnd < tc->snd_mss)) { return tc->snd_wnd <= snd_space ? tc->snd_wnd : 0; } /* If we can't write at least a segment, don't try at all */ - if (snd_space < tc->snd_mss) - return 0; + if (PREDICT_FALSE (snd_space < tc->snd_mss)) + { + if (snd_space > clib_min (tc->mss, tc->rcv_opts.mss) - TCP_HDR_LEN_MAX) + return snd_space; + return 0; + } /* round down to mss multiple */ return snd_space - (snd_space % tc->snd_mss); @@ -1042,6 +1110,8 @@ tcp_main_enable (vlib_main_t * vm) vlib_thread_main_t *vtm = vlib_get_thread_main (); clib_error_t *error = 0; u32 num_threads; + int thread, i; + tcp_connection_t *tc __attribute__ ((unused)); if ((error = vlib_call_init_function (vm, ip_main_init))) return error; @@ -1074,6 +1144,27 @@ tcp_main_enable (vlib_main_t * vm) num_threads = 1 /* main thread */ + vtm->n_threads; vec_validate (tm->connections, num_threads - 1); + /* + * Preallocate connections + */ + for (thread = 0; thread < num_threads; thread++) + { + for (i = 0; i < tm->preallocated_connections; i++) + pool_get (tm->connections[thread], tc); + + for (i = 0; i < tm->preallocated_connections; i++) + pool_put_index (tm->connections[thread], i); + } + + /* + * Preallocate half-open connections + */ + for (i = 0; i < tm->preallocated_half_open_connections; i++) + pool_get (tm->half_open_connections, tc); + + for (i = 0; i < tm->preallocated_half_open_connections; i++) + pool_put_index (tm->half_open_connections, i); + /* Initialize per worker thread tx buffers (used for control messages) */ vec_validate (tm->tx_buffers, num_threads - 1); @@ -1116,7 +1207,6 @@ tcp_init (vlib_main_t * vm) { tcp_main_t *tm = vnet_get_tcp_main (); - tm->vlib_main = vm; tm->vnet_main = vnet_get_main (); tm->is_enabled = 0; @@ -1125,6 +1215,97 @@ tcp_init (vlib_main_t * vm) VLIB_INIT_FUNCTION (tcp_init); + +static clib_error_t * +tcp_config_fn (vlib_main_t * vm, unformat_input_t * input) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat + (input, "preallocated-connections %d", + &tm->preallocated_connections)) + ; + else if (unformat (input, "preallocated-half-open-connections %d", + &tm->preallocated_half_open_connections)) + ; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + return 0; +} + +VLIB_CONFIG_FUNCTION (tcp_config_fn, "tcp"); + +static clib_error_t * +tcp_src_address (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd_arg) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + ip4_address_t v4start, v4end; + ip6_address_t v6start, v6end; + int v4set = 0; + int v6set = 0; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "%U - %U", unformat_ip4_address, &v4start, + unformat_ip4_address, &v4end)) + v4set = 1; + else if (unformat (input, "%U", unformat_ip4_address, &v4start)) + { + memcpy (&v4end, &v4start, sizeof (v4start)); + v4set = 1; + } + else if (unformat (input, "%U - %U", unformat_ip6_address, &v6start, + unformat_ip4_address, &v6end)) + v6set = 1; + else if (unformat (input, "%U", unformat_ip6_address, &v6start)) + { + memcpy (&v6end, &v6start, sizeof (v4start)); + v6set = 1; + } + else + break; + } + + if (!v4set && !v6set) + return clib_error_return (0, "at least one v4 or v6 address required"); + + if (v4set) + { + u32 tmp; + + do + { + vec_add1 (tm->ip4_src_addresses, v4start); + tmp = clib_net_to_host_u32 (v4start.as_u32); + tmp++; + v4start.as_u32 = clib_host_to_net_u32 (tmp); + } + while (clib_host_to_net_u32 (v4start.as_u32) <= + clib_host_to_net_u32 (v4end.as_u32)); + } + if (v6set) + { + clib_warning ("v6 src address list unimplemented..."); + } + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (tcp_src_address_command, static) = +{ + .path = "tcp src-address", + .short_help = "tcp src-address [- ] add src address range", + .function = tcp_src_address, +}; +/* *INDENT-ON* */ + + + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 12d804b8..37b10fd4 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -348,6 +348,16 @@ typedef struct _tcp_main /* Flag that indicates if stack is on or off */ u8 is_enabled; + /** Number of preallocated connections */ + u32 preallocated_connections; + u32 preallocated_half_open_connections; + + /** Vectors of src addresses. Optional unless one needs > 63K active-opens */ + ip4_address_t *ip4_src_addresses; + u32 last_v4_address_rotor; + u32 last_v6_address_rotor; + ip6_address_t *ip6_src_addresses; + /* convenience */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; @@ -569,6 +579,7 @@ tcp_connection_force_ack (tcp_connection_t * tc, vlib_buffer_t * b) always_inline void tcp_timer_set (tcp_connection_t * tc, u8 timer_id, u32 interval) { + ASSERT (tc->c_thread_index == vlib_get_thread_index ()); tc->timers[timer_id] = tw_timer_start_16t_2w_512sl (&tcp_main.timer_wheels[tc->c_thread_index], tc->c_c_index, timer_id, interval); @@ -577,6 +588,7 @@ tcp_timer_set (tcp_connection_t * tc, u8 timer_id, u32 interval) always_inline void tcp_timer_reset (tcp_connection_t * tc, u8 timer_id) { + ASSERT (tc->c_thread_index == vlib_get_thread_index ()); if (tc->timers[timer_id] == TCP_TIMER_HANDLE_INVALID) return; @@ -588,6 +600,7 @@ tcp_timer_reset (tcp_connection_t * tc, u8 timer_id) always_inline void tcp_timer_update (tcp_connection_t * tc, u8 timer_id, u32 interval) { + ASSERT (tc->c_thread_index == vlib_get_thread_index ()); if (tc->timers[timer_id] != TCP_TIMER_HANDLE_INVALID) tw_timer_stop_16t_2w_512sl (&tcp_main.timer_wheels[tc->c_thread_index], tc->timers[timer_id]); diff --git a/src/vnet/tcp/tcp_debug.h b/src/vnet/tcp/tcp_debug.h index ae68ad1b..be51bca2 100755 --- a/src/vnet/tcp/tcp_debug.h +++ b/src/vnet/tcp/tcp_debug.h @@ -383,9 +383,16 @@ typedef enum _tcp_dbg_evt "establish", \ }, \ }; \ - DECLARE_ETD(_tc, _e, 2); \ - ed->data[0] = _timer_id; \ - ed->data[1] = _timer_id; \ + if (_tc) \ + { \ + DECLARE_ETD(_tc, _e, 2); \ + ed->data[0] = _timer_id; \ + ed->data[1] = _timer_id; \ + } \ + else \ + { \ + clib_warning ("pop for unexisting connection %d", _tc_index); \ + } \ } #define TCP_EVT_SEG_INVALID_HANDLER(_tc, _seq, _end, ...) \ diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index a2e6dad1..45db0da6 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -251,6 +251,7 @@ tcp_update_timestamp (tcp_connection_t * tc, u32 seq, u32 seq_end) if (tcp_opts_tstamp (&tc->rcv_opts) && tc->tsval_recent && seq_leq (seq, tc->rcv_las) && seq_leq (tc->rcv_las, seq_end)) { + ASSERT (timestamp_leq (tc->tsval_recent, tc->rcv_opts.tsval)); tc->tsval_recent = tc->rcv_opts.tsval; tc->tsval_recent_age = tcp_time_now (); } @@ -383,12 +384,9 @@ tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt) if (tc->srtt != 0) { err = mrtt - tc->srtt; -// tc->srtt += err >> 3; /* XXX Drop in RTT results in RTTVAR increase and bigger RTO. * The increase should be bound */ -// tc->rttvar += ((int) clib_abs (err) - (int) tc->rttvar) >> 2; - tc->srtt = clib_max ((int) tc->srtt + (err >> 3), 1); diff = (clib_abs (err) - (int) tc->rttvar) >> 2; tc->rttvar = clib_max ((int) tc->rttvar + diff, 1); @@ -491,6 +489,14 @@ tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd, && (prev_snd_wnd == tc->snd_wnd)); } +static u8 +tcp_is_lost_fin (tcp_connection_t * tc) +{ + if ((tc->flags & TCP_CONN_FINSNT) && tc->snd_una_max - tc->snd_una == 1) + return 1; + return 0; +} + /** * Checks if ack is a congestion control event. */ @@ -503,7 +509,7 @@ tcp_ack_is_cc_event (tcp_connection_t * tc, vlib_buffer_t * b, *is_dack = tc->sack_sb.last_sacked_bytes || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una); - return (*is_dack || tcp_in_cong_recovery (tc)); + return ((*is_dack || tcp_in_cong_recovery (tc)) && !tcp_is_lost_fin (tc)); } void @@ -750,10 +756,20 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) * last hole end */ tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1]; last_hole = scoreboard_last_hole (sb); - if (seq_gt (tc->snd_una_max, sb->high_sacked) - && seq_gt (tc->snd_una_max, last_hole->end)) - last_hole->end = tc->snd_una_max; - /* keep track of max byte sacked for when the last hole + if (seq_gt (tc->snd_una_max, last_hole->end)) + { + if (seq_geq (last_hole->start, sb->high_sacked)) + { + last_hole->end = tc->snd_una_max; + } + /* New hole after high sacked block */ + else if (seq_lt (sb->high_sacked, tc->snd_una_max)) + { + scoreboard_insert_hole (sb, sb->tail, sb->high_sacked, + tc->snd_una_max); + } + } + /* Keep track of max byte sacked for when the last hole * is acked */ if (seq_gt (tmp.end, sb->high_sacked)) sb->high_sacked = tmp.end; @@ -764,7 +780,6 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) while (hole && blk_index < vec_len (tc->rcv_opts.sacks)) { blk = &tc->rcv_opts.sacks[blk_index]; - if (seq_leq (blk->start, hole->start)) { /* Block covers hole. Remove hole */ @@ -784,6 +799,7 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) } else if (!next_hole) { + ASSERT (seq_geq (sb->high_sacked, ack)); sb->snd_una_adv = sb->high_sacked - ack; sb->last_bytes_delivered += sb->high_sacked - hole->end; } @@ -819,7 +835,6 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) { hole->end = blk->start; } - hole = scoreboard_next_hole (sb, hole); } } @@ -827,10 +842,13 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) scoreboard_update_bytes (tc, sb); sb->last_sacked_bytes = sb->sacked_bytes - (old_sacked_bytes - sb->last_bytes_delivered); + ASSERT (sb->last_sacked_bytes <= sb->sacked_bytes); ASSERT (sb->sacked_bytes == 0 || sb->sacked_bytes < tc->snd_una_max - seq_max (tc->snd_una, ack)); ASSERT (sb->last_sacked_bytes + sb->lost_bytes <= tc->snd_una_max - seq_max (tc->snd_una, ack)); + ASSERT (sb->head == TCP_INVALID_SACK_HOLE_INDEX || tcp_in_recovery (tc) + || sb->holes[sb->head].start == ack + sb->snd_una_adv); } /** @@ -916,7 +934,8 @@ tcp_cc_congestion_undo (tcp_connection_t * tc) static u8 tcp_cc_is_spurious_retransmit (tcp_connection_t * tc) { - return (tc->snd_rxt_ts + return (tcp_in_recovery (tc) + && tc->snd_rxt_ts && tcp_opts_tstamp (&tc->rcv_opts) && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts)); } @@ -994,6 +1013,7 @@ tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack) { ASSERT (tc->snd_una != tc->snd_una_max || tc->sack_sb.last_sacked_bytes); + tc->rcv_dupacks++; if (tc->rcv_dupacks > TCP_DUPACK_THRESHOLD && !tc->bytes_acked) @@ -1012,17 +1032,20 @@ tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack) goto partial_ack_test; } - /* If of of the two conditions lower hold, reset dupacks - * 1) Cumulative ack does not cover more than congestion threshold, - * and the following doesn't hold: the congestion window is - * greater than SMSS bytes and the difference between highest_ack - * and prev_highest_ack is at most 4*SMSS bytes (XXX) - * 2) RFC6582 heuristic to avoid multiple fast retransmits + /* If of of the two conditions lower hold, reset dupacks because + * we're probably after timeout (RFC6582 heuristics). + * If Cumulative ack does not cover more than congestion threshold, + * and: + * 1) The following doesn't hold: The congestion window is greater + * than SMSS bytes and the difference between highest_ack + * and prev_highest_ack is at most 4*SMSS bytes + * 2) Echoed timestamp in the last non-dup ack does not equal the + * stored timestamp */ - if ((seq_gt (tc->snd_una, tc->snd_congestion) - || !(tc->cwnd > tc->snd_mss - && tc->bytes_acked <= 4 * tc->snd_mss)) - || tc->rcv_opts.tsecr != tc->tsecr_last_ack) + if (seq_leq (tc->snd_una, tc->snd_congestion) + && ((!(tc->cwnd > tc->snd_mss + && tc->bytes_acked <= 4 * tc->snd_mss)) + || (tc->rcv_opts.tsecr != tc->tsecr_last_ack))) { tc->rcv_dupacks = 0; return; @@ -1038,6 +1061,7 @@ tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack) * three segments that have left the network and should've been * buffered at the receiver XXX */ tc->cwnd = tc->ssthresh + tc->rcv_dupacks * tc->snd_mss; + ASSERT (tc->cwnd >= tc->snd_mss); /* If cwnd allows, send more data */ if (tcp_opts_sack_permitted (&tc->rcv_opts) @@ -1112,7 +1136,7 @@ partial_ack: >= tc->sack_sb.last_bytes_delivered); rxt_delivered = tc->bytes_acked + tc->sack_sb.snd_una_adv - tc->sack_sb.last_bytes_delivered; - if (rxt_delivered && seq_gt (tc->sack_sb.high_rxt, tc->snd_una)) + if (0 && rxt_delivered && seq_gt (tc->sack_sb.high_rxt, tc->snd_una)) { /* If we have sacks and we haven't gotten an ack beyond high_rxt, * remove sacked bytes delivered */ @@ -1301,6 +1325,8 @@ tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b, { int written; + ASSERT (seq_geq (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt)); + /* Pure ACK. Update rcv_nxt and be done. */ if (PREDICT_FALSE (data_len == 0)) { @@ -1450,6 +1476,7 @@ tcp_segment_rcv (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b, /* Chop off the bytes in the past */ n_bytes_to_drop = tc->rcv_nxt - vnet_buffer (b)->tcp.seq_number; n_data_bytes -= n_bytes_to_drop; + vnet_buffer (b)->tcp.seq_number = tc->rcv_nxt; vlib_buffer_advance (b, n_bytes_to_drop); goto in_order; @@ -1912,11 +1939,12 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, goto drop; } - stream_session_init_fifos_pointers (&new_tc0->connection, - new_tc0->irs + 1, - new_tc0->iss + 1); /* Make sure after data segment processing ACK is sent */ new_tc0->flags |= TCP_CONN_SNDACK; + + /* Update rtt with the syn-ack sample */ + new_tc0->bytes_acked = 1; + tcp_update_rtt (new_tc0, vnet_buffer (b0)->tcp.ack_number); } /* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */ else @@ -1932,9 +1960,8 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, goto drop; } - stream_session_init_fifos_pointers (&new_tc0->connection, - new_tc0->irs + 1, - new_tc0->iss + 1); + tc0->rtt_ts = 0; + tcp_make_synack (new_tc0, b0); next0 = tcp_next_output (is_ip4); @@ -2151,8 +2178,6 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, << tc0->rcv_opts.wscale; tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number; tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number; - - /* Shoulder tap the server */ stream_session_accept_notify (&tc0->connection); /* Reset SYN-ACK retransmit timer */ @@ -2175,6 +2200,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* If FIN is ACKed */ if (tc0->snd_una == tc0->snd_una_max) { + ASSERT (tcp_fin (tcp0)); tc0->state = TCP_STATE_FIN_WAIT_2; /* Stop all timers, 2MSL will be set lower */ tcp_connection_timers_reset (tc0); @@ -2545,10 +2571,6 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, tcp_make_synack (child0, b0); next0 = tcp_next_output (is_ip4); - /* Init fifo pointers after we have iss */ - stream_session_init_fifos_pointers (&child0->connection, - child0->irs + 1, - child0->iss + 1); drop: if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { @@ -2886,9 +2908,12 @@ do { \ _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE); _(LISTEN, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_NONE); _(LISTEN, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_NONE); + _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, + TCP_ERROR_NONE); /* ACK for for a SYN-ACK -> tcp-rcv-process. */ _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); _(SYN_RCVD, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + _(SYN_RCVD, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); /* SYN-ACK for a SYN */ _(SYN_SENT, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE); @@ -2905,12 +2930,14 @@ do { \ _(ESTABLISHED, TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE); _(ESTABLISHED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE); + _(ESTABLISHED, TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE); /* ACK or FIN-ACK to our FIN */ _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); _(FIN_WAIT_1, TCP_FLAG_ACK | TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); /* FIN in reply to our FIN from the other side */ _(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + _(FIN_WAIT_1, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); /* FIN confirming that the peer (app) has closed */ _(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); _(FIN_WAIT_2, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); @@ -2929,6 +2956,8 @@ do { \ TCP_ERROR_NONE); _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED); _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED); + _(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, + TCP_ERROR_CONNECTION_CLOSED); #undef _ } diff --git a/src/vnet/tcp/tcp_newreno.c b/src/vnet/tcp/tcp_newreno.c index c825e952..103fea4c 100644 --- a/src/vnet/tcp/tcp_newreno.c +++ b/src/vnet/tcp/tcp_newreno.c @@ -63,8 +63,8 @@ newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type) * window deflation" attempts to ensure that, when fast recovery * eventually ends, approximately ssthresh amount of data will be * outstanding in the network.*/ - tc->cwnd = (tc->cwnd > tc->bytes_acked) ? - tc->cwnd - tc->bytes_acked : 0; + tc->cwnd = (tc->cwnd > tc->bytes_acked + tc->snd_mss) ? + tc->cwnd - tc->bytes_acked : tc->snd_mss; if (tc->bytes_acked > tc->snd_mss) tc->cwnd += tc->snd_mss; } diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index 41bebcb3..b418e8ba 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -19,17 +19,20 @@ vlib_node_registration_t tcp4_output_node; vlib_node_registration_t tcp6_output_node; -typedef enum _tcp_output_nect +typedef enum _tcp_output_next { TCP_OUTPUT_NEXT_DROP, + TCP_OUTPUT_NEXT_IP_LOOKUP, TCP_OUTPUT_N_NEXT } tcp_output_next_t; #define foreach_tcp4_output_next \ _ (DROP, "error-drop") \ + _ (IP_LOOKUP, "ip4-lookup") #define foreach_tcp6_output_next \ _ (DROP, "error-drop") \ + _ (IP_LOOKUP, "ip6-lookup") static char *tcp_error_strings[] = { #define tcp_error(n,s) s, @@ -427,16 +430,16 @@ tcp_init_mss (tcp_connection_t * tc) #define tcp_get_free_buffer_index(tm, bidx) \ do { \ u32 *my_tx_buffers, n_free_buffers; \ - u32 thread_index = vlib_get_thread_index(); \ - my_tx_buffers = tm->tx_buffers[thread_index]; \ + u32 thread_index = vlib_get_thread_index(); \ + my_tx_buffers = tm->tx_buffers[thread_index]; \ if (PREDICT_FALSE(vec_len (my_tx_buffers) == 0)) \ { \ n_free_buffers = 32; /* TODO config or macro */ \ vec_validate (my_tx_buffers, n_free_buffers - 1); \ _vec_len(my_tx_buffers) = vlib_buffer_alloc_from_free_list ( \ - tm->vlib_main, my_tx_buffers, n_free_buffers, \ + vlib_get_main(), my_tx_buffers, n_free_buffers, \ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); \ - tm->tx_buffers[thread_index] = my_tx_buffers; \ + tm->tx_buffers[thread_index] = my_tx_buffers; \ } \ /* buffer shortage */ \ if (PREDICT_FALSE (vec_len (my_tx_buffers) == 0)) \ @@ -445,12 +448,12 @@ do { \ _vec_len (my_tx_buffers) -= 1; \ } while (0) -#define tcp_return_buffer(tm) \ -do { \ - u32 *my_tx_buffers; \ - u32 thread_index = vlib_get_thread_index(); \ - my_tx_buffers = tm->tx_buffers[thread_index]; \ - _vec_len (my_tx_buffers) +=1; \ +#define tcp_return_buffer(tm) \ +do { \ + u32 *my_tx_buffers; \ + u32 thread_index = vlib_get_thread_index(); \ + my_tx_buffers = tm->tx_buffers[thread_index]; \ + _vec_len (my_tx_buffers) +=1; \ } while (0) always_inline void @@ -757,23 +760,22 @@ void tcp_push_ip_hdr (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b) { tcp_header_t *th = vlib_buffer_get_current (b); - + vlib_main_t *vm = vlib_get_main (); if (tc->c_is_ip4) { ip4_header_t *ih; - ih = vlib_buffer_push_ip4 (tm->vlib_main, b, &tc->c_lcl_ip4, + ih = vlib_buffer_push_ip4 (vm, b, &tc->c_lcl_ip4, &tc->c_rmt_ip4, IP_PROTOCOL_TCP); - th->checksum = ip4_tcp_udp_compute_checksum (tm->vlib_main, b, ih); + th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih); } else { ip6_header_t *ih; int bogus = ~0; - ih = vlib_buffer_push_ip6 (tm->vlib_main, b, &tc->c_lcl_ip6, + ih = vlib_buffer_push_ip6 (vm, b, &tc->c_lcl_ip6, &tc->c_rmt_ip6, IP_PROTOCOL_TCP); - th->checksum = ip6_tcp_udp_icmp_compute_checksum (tm->vlib_main, b, ih, - &bogus); + th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih, &bogus); ASSERT (!bogus); } } @@ -851,6 +853,13 @@ tcp_enqueue_to_output (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, u8 is_ip4) /* Decide where to send the packet */ next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index; + /* Initialize the trajectory trace, if configured */ + if (VLIB_BUFFER_TRACE_TRAJECTORY > 0) + { + b->pre_data[0] = 1; + b->pre_data[1] = next_index; + } + /* Enqueue the packet */ f = vlib_get_frame_to_node (vm, next_index); to_next = vlib_frame_vector_args (f); @@ -1144,6 +1153,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) /* Account for the SYN */ tc->snd_nxt += 1; + tc->rtt_ts = 0; } else { @@ -1232,7 +1242,7 @@ tcp_timer_persist_handler (u32 index) /* Nothing to send */ if (n_bytes <= 0) { - clib_warning ("persist found nothing to send"); + // clib_warning ("persist found nothing to send"); tcp_return_buffer (tm); return; } @@ -1448,7 +1458,7 @@ tcp46_output_inline (vlib_main_t * vm, tcp_connection_t *tc0; tcp_tx_trace_t *t0; tcp_header_t *th0 = 0; - u32 error0 = TCP_ERROR_PKTS_SENT, next0 = TCP_OUTPUT_NEXT_DROP; + u32 error0 = TCP_ERROR_PKTS_SENT, next0 = TCP_OUTPUT_NEXT_IP_LOOKUP; bi0 = from[0]; to_next[0] = bi0; @@ -1527,6 +1537,7 @@ tcp46_output_inline (vlib_main_t * vm, tc0->rto_boff = 0; } +#if 0 /* Make sure we haven't lost route to our peer */ if (PREDICT_FALSE (tc0->last_fib_check < tc0->snd_opts.tsval + TCP_FIB_RECHECK_PERIOD)) @@ -1547,6 +1558,10 @@ tcp46_output_inline (vlib_main_t * vm, /* Use pre-computed dpo to set next node */ next0 = tc0->c_rmt_dpo.dpoi_next_node; vnet_buffer (b0)->ip.adj_index[VLIB_TX] = tc0->c_rmt_dpo.dpoi_index; +#endif + + vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0; + vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0; b0->flags |= VNET_BUFFER_LOCALLY_ORIGINATED; done: diff --git a/src/vnet/tcp/tcp_packet.h b/src/vnet/tcp/tcp_packet.h index a6f62ee1..9ccfe655 100644 --- a/src/vnet/tcp/tcp_packet.h +++ b/src/vnet/tcp/tcp_packet.h @@ -168,6 +168,7 @@ typedef struct #define TCP_OPTION_LEN_TIMESTAMP 10 #define TCP_OPTION_LEN_SACK_BLOCK 8 +#define TCP_HDR_LEN_MAX 60 #define TCP_WND_MAX 65535U #define TCP_MAX_WND_SCALE 14 /* See RFC 1323 */ #define TCP_OPTS_ALIGN 4 diff --git a/src/vnet/tcp/tcp_test.c b/src/vnet/tcp/tcp_test.c index a461e3b8..510deb4f 100644 --- a/src/vnet/tcp/tcp_test.c +++ b/src/vnet/tcp/tcp_test.c @@ -290,7 +290,7 @@ tcp_test_sack_tx (vlib_main_t * vm, unformat_input_t * input) { tcp_connection_t _tc, *tc = &_tc; sack_block_t *sacks; - int i, verbose = 0; + int i, verbose = 0, expected; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { @@ -326,8 +326,12 @@ tcp_test_sack_tx (vlib_main_t * vm, unformat_input_t * input) sacks = vec_dup (tc->snd_sacks); tcp_update_sack_list (tc, 1100, 1200); - TCP_TEST ((vec_len (tc->snd_sacks) == 5), "sack blocks %d expected %d", - vec_len (tc->snd_sacks), 5); + if (verbose) + vlib_cli_output (vm, "add new segment [1100, 1200]\n%U", + format_tcp_sacks, tc); + expected = 5 < TCP_MAX_SACK_BLOCKS ? 6 : 5; + TCP_TEST ((vec_len (tc->snd_sacks) == expected), + "sack blocks %d expected %d", vec_len (tc->snd_sacks), expected); TCP_TEST ((tc->snd_sacks[0].start == 1100), "first sack block start %u expected %u", tc->snd_sacks[0].start, 1100); diff --git a/src/vnet/udp/udp_input.c b/src/vnet/udp/udp_input.c index e6b4f8fc..9a8ff076 100644 --- a/src/vnet/udp/udp_input.c +++ b/src/vnet/udp/udp_input.c @@ -123,7 +123,7 @@ udp4_uri_input_node_fn (vlib_main_t * vm, /* lookup session */ s0 = stream_session_lookup4 (&ip0->dst_address, &ip0->src_address, udp0->dst_port, udp0->src_port, - SESSION_TYPE_IP4_UDP, my_thread_index); + SESSION_TYPE_IP4_UDP); /* no listener */ if (PREDICT_FALSE (s0 == 0)) -- cgit 1.2.3-korg From 3eb5062b40feb3002de09a3caff86232d6e1adea Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Thu, 13 Jul 2017 01:24:57 -0400 Subject: Fixes and improved tcp/session debugging - Fix rx sack option parsing - Add session sack scoreboard tracing and replaying - Add svm fifo tracing and replaying - Scoreboard/svm fifo ooo segment reception fixes - Improved overall debugging Change-Id: Ieae07eba355e66f5935253232bb00f2dfb7ece00 Signed-off-by: Florin Coras --- src/svm/svm_fifo.c | 150 +++++++++++++++------ src/svm/svm_fifo.h | 41 ++++++ src/vnet/session/session.c | 74 ++++++++++- src/vnet/session/session.h | 38 ++++-- src/vnet/session/session_cli.c | 266 ++++++++++++++++++++++++++++++++----- src/vnet/tcp/builtin_client.c | 2 + src/vnet/tcp/builtin_server.c | 7 +- src/vnet/tcp/tcp.c | 208 +++++++++++++++++++++++++++++ src/vnet/tcp/tcp.h | 53 +++++++- src/vnet/tcp/tcp_input.c | 114 +++++++++++----- src/vnet/tcp/tcp_test.c | 289 +++++++++++++++++++++++++++++++++++++++++ 11 files changed, 1111 insertions(+), 131 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index da60fee5..fc2189c5 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -60,6 +60,90 @@ format_ooo_segment (u8 * s, va_list * args) return s; } +u8 * +svm_fifo_dump_trace (u8 * s, svm_fifo_t * f) +{ +#if SVM_FIFO_TRACE + svm_fifo_trace_elem_t *seg = 0; + int i = 0; + + if (f->trace) + { + vec_foreach (seg, f->trace) + { + s = format (s, "{%u, %u, %u}, ", seg->offset, seg->len, seg->action); + i++; + if (i % 5 == 0) + s = format (s, "\n"); + } + s = format (s, "\n"); + } + return s; +#else + return 0; +#endif +} + +u8 * +svm_fifo_replay (u8 * s, svm_fifo_t * f, u8 no_read, u8 verbose) +{ + int i, trace_len; + u8 *data = 0; + svm_fifo_trace_elem_t *trace; + u32 offset; + svm_fifo_t *dummy_fifo; + + if (!f) + return s; + +#if SVM_FIFO_TRACE + trace = f->trace; + trace_len = vec_len (trace); +#else + trace = 0; + trace_len = 0; +#endif + + dummy_fifo = svm_fifo_create (f->nitems); + memset (f->data, 0xFF, f->nitems); + + vec_validate (data, f->nitems); + for (i = 0; i < vec_len (data); i++) + data[i] = i; + + for (i = 0; i < trace_len; i++) + { + offset = trace[i].offset; + if (trace[i].action == 1) + { + if (verbose) + s = format (s, "adding [%u, %u]:", trace[i].offset, + (trace[i].offset + + trace[i].len) % dummy_fifo->nitems); + svm_fifo_enqueue_with_offset (dummy_fifo, trace[i].offset, + trace[i].len, &data[offset]); + } + else if (trace[i].action == 2) + { + if (verbose) + s = format (s, "adding [%u, %u]:", 0, trace[i].len); + svm_fifo_enqueue_nowait (dummy_fifo, trace[i].len, &data[offset]); + } + else if (!no_read) + { + if (verbose) + s = format (s, "read: %u", trace[i].len); + svm_fifo_dequeue_drop (dummy_fifo, trace[i].len); + } + if (verbose) + s = format (s, "%U", format_svm_fifo, dummy_fifo, 1); + } + + s = format (s, "result: %U", format_svm_fifo, dummy_fifo, 1); + + return s; +} + u8 * format_ooo_list (u8 * s, va_list * args) { @@ -73,6 +157,7 @@ format_ooo_list (u8 * s, va_list * args) s = format (s, " %U\n", format_ooo_segment, seg); ooo_segment_index = seg->next; } + return s; } @@ -94,10 +179,10 @@ format_svm_fifo (u8 * s, va_list * args) if (verbose) { - s = format (s, " ooo pool %d active elts\n", - pool_elts (f->ooo_segments)); + s = format (s, " ooo pool %d active elts newest %u\n", + pool_elts (f->ooo_segments), f->ooos_newest); if (svm_fifo_has_ooo_data (f)) - s = format (s, " %U", format_ooo_list, f); + s = format (s, " %U", format_ooo_list, f, verbose); } return s; } @@ -116,7 +201,6 @@ svm_fifo_create (u32 data_size_in_bytes) memset (f, 0, sizeof (*f)); f->nitems = data_size_in_bytes; f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX; - return (f); } @@ -178,6 +262,7 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) u32 new_index, s_end_pos, s_index; u32 normalized_position, normalized_end_position; + ASSERT (offset + length <= ooo_segment_distance_from_tail (f, f->head)); normalized_position = (f->tail + offset) % f->nitems; normalized_end_position = (f->tail + offset + length) % f->nitems; @@ -205,17 +290,9 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) s = prev; s_end_pos = ooo_segment_end_pos (f, s); - /* Check head and tail now since segment may be wider at both ends so - * merge tests lower won't work */ - if (position_lt (f, normalized_position, s->start)) - { - s->start = normalized_position; - s->length = position_diff (f, s_end_pos, s->start); - } - if (position_gt (f, normalized_end_position, s_end_pos)) - { - s->length = position_diff (f, normalized_end_position, s->start); - } + /* Since we have previous, normalized start position cannot be smaller + * than prev->start. Check tail */ + ASSERT (position_lt (f, s->start, normalized_position)); goto check_tail; } @@ -256,6 +333,7 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) /* Pool might've moved, get segment again */ s = pool_elt_at_index (f->ooo_segments, s_index); + /* Needs to be last */ ASSERT (s->next == OOO_SEGMENT_INVALID_INDEX); new_s->prev = s_index; @@ -274,32 +352,22 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) { s->start = normalized_position; s->length = position_diff (f, s_end_pos, s->start); - } - /* Overlapping tail */ - else if (position_gt (f, normalized_end_position, s_end_pos)) - { - s->length = position_diff (f, normalized_end_position, s->start); - } - /* New segment completely covered by current one */ - else - { - /* Do Nothing */ - s = 0; - goto done; + f->ooos_newest = s - f->ooo_segments; } check_tail: - /* The new segment's tail may cover multiple smaller ones */ + + /* Overlapping tail */ if (position_gt (f, normalized_end_position, s_end_pos)) { - /* Remove the completely overlapped segments */ - it = (s->next != OOO_SEGMENT_INVALID_INDEX) ? - pool_elt_at_index (f->ooo_segments, s->next) : 0; + s->length = position_diff (f, normalized_end_position, s->start); + + /* Remove the completely overlapped segments in the tail */ + it = ooo_segment_next (f, s); while (it && position_leq (f, ooo_segment_end_pos (f, it), normalized_end_position)) { - next = (it->next != OOO_SEGMENT_INVALID_INDEX) ? - pool_elt_at_index (f->ooo_segments, it->next) : 0; + next = ooo_segment_next (f, it); ooo_segment_del (f, it - f->ooo_segments); it = next; } @@ -307,16 +375,12 @@ check_tail: /* If partial overlap with last, merge */ if (it && position_leq (f, it->start, normalized_end_position)) { - s->length = - position_diff (f, ooo_segment_end_pos (f, it), s->start); + s->length = position_diff (f, ooo_segment_end_pos (f, it), + s->start); ooo_segment_del (f, it - f->ooo_segments); } + f->ooos_newest = s - f->ooo_segments; } - -done: - /* Most recently updated segment */ - if (s) - f->ooos_newest = s - f->ooo_segments; } /** @@ -422,6 +486,8 @@ svm_fifo_enqueue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here) total_copy_bytes = max_bytes; } + svm_fifo_trace_add (f, f->head, total_copy_bytes, 2); + /* Any out-of-order segments to collect? */ if (PREDICT_FALSE (f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX)) total_copy_bytes += ooo_segment_try_collect (f, total_copy_bytes); @@ -499,6 +565,8 @@ svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, if ((required_bytes + offset_from_tail) > (nitems - cursize)) return -1; + svm_fifo_trace_add (f, offset, required_bytes, 1); + ooo_segment_add (f, offset, required_bytes); /* Number of bytes we're going to copy */ @@ -707,6 +775,8 @@ svm_fifo_dequeue_drop (svm_fifo_t * f, u32 max_bytes) /* Number of bytes we're going to drop */ total_drop_bytes = (cursize < max_bytes) ? cursize : max_bytes; + svm_fifo_trace_add (f, f->tail, total_drop_bytes, 3); + /* Number of bytes in first copy segment */ first_drop_bytes = ((nitems - f->head) < total_drop_bytes) ? diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index fe21de47..a83cd858 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -36,8 +36,16 @@ typedef struct format_function_t format_ooo_segment; format_function_t format_ooo_list; +#define SVM_FIFO_TRACE (0) #define OOO_SEGMENT_INVALID_INDEX ((u32)~0) +typedef struct +{ + u32 offset; + u32 len; + u32 action; +} svm_fifo_trace_elem_t; + typedef struct _svm_fifo { volatile u32 cursize; /**< current fifo size */ @@ -64,9 +72,28 @@ typedef struct _svm_fifo u32 ooos_newest; /**< Last segment to have been updated */ struct _svm_fifo *next; /**< next in freelist/active chain */ struct _svm_fifo *prev; /**< prev in active chain */ +#if SVM_FIFO_TRACE + svm_fifo_trace_elem_t *trace; +#endif CLIB_CACHE_LINE_ALIGN_MARK (data); } svm_fifo_t; +#if SVM_FIFO_TRACE +#define svm_fifo_trace_add(_f, _s, _l, _t) \ +{ \ + svm_fifo_trace_elem_t *trace_elt; \ + vec_add2(_f->trace, trace_elt, 1); \ + trace_elt->offset = _s; \ + trace_elt->len = _l; \ + trace_elt->action = _t; \ +} +#else +#define svm_fifo_trace_add(_f, _s, _l, _t) +#endif + +u8 *svm_fifo_dump_trace (u8 * s, svm_fifo_t * f); +u8 *svm_fifo_replay (u8 * s, svm_fifo_t * f, u8 no_read, u8 verbose); + static inline u32 svm_fifo_max_dequeue (svm_fifo_t * f) { @@ -132,6 +159,12 @@ svm_fifo_newest_ooo_segment (svm_fifo_t * f) return pool_elt_at_index (f->ooo_segments, f->ooos_newest); } +always_inline void +svm_fifo_newest_ooo_segment_reset (svm_fifo_t * f) +{ + f->ooos_newest = OOO_SEGMENT_INVALID_INDEX; +} + always_inline u32 ooo_segment_distance_from_tail (svm_fifo_t * f, u32 pos) { @@ -174,6 +207,14 @@ ooo_segment_get_prev (svm_fifo_t * f, ooo_segment_t * s) return pool_elt_at_index (f->ooo_segments, s->prev); } +always_inline ooo_segment_t * +ooo_segment_next (svm_fifo_t * f, ooo_segment_t * s) +{ + if (s->next == OOO_SEGMENT_INVALID_INDEX) + return 0; + return pool_elt_at_index (f->ooo_segments, s->next); +} + #endif /* __included_ssvm_fifo_h__ */ /* diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 0a86d563..2c2a27c1 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -325,9 +325,9 @@ stream_session_half_open_lookup (session_manager_main_t * smm, } transport_connection_t * -stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt, - u16 lcl_port, u16 rmt_port, u8 proto, - u32 my_thread_index) +stream_session_lookup_transport_wt4 (ip4_address_t * lcl, ip4_address_t * rmt, + u16 lcl_port, u16 rmt_port, u8 proto, + u32 my_thread_index) { session_manager_main_t *smm = &session_manager_main; session_kv4_t kv4; @@ -358,9 +358,40 @@ stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt, } transport_connection_t * -stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt, - u16 lcl_port, u16 rmt_port, u8 proto, - u32 my_thread_index) +stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt, + u16 lcl_port, u16 rmt_port, u8 proto) +{ + session_manager_main_t *smm = &session_manager_main; + session_kv4_t kv4; + stream_session_t *s; + int rv; + + /* Lookup session amongst established ones */ + make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto); + rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4); + if (rv == 0) + { + s = stream_session_get_from_handle (kv4.value); + return tp_vfts[s->session_type].get_connection (s->connection_index, + s->thread_index); + } + + /* If nothing is found, check if any listener is available */ + s = stream_session_lookup_listener4 (lcl, lcl_port, proto); + if (s) + return tp_vfts[s->session_type].get_listener (s->connection_index); + + /* Finally, try half-open connections */ + rv = clib_bihash_search_inline_16_8 (&smm->v4_half_open_hash, &kv4); + if (rv == 0) + return tp_vfts[proto].get_half_open (kv4.value & 0xFFFFFFFF); + return 0; +} + +transport_connection_t * +stream_session_lookup_transport_wt6 (ip6_address_t * lcl, ip6_address_t * rmt, + u16 lcl_port, u16 rmt_port, u8 proto, + u32 my_thread_index) { session_manager_main_t *smm = &session_manager_main; stream_session_t *s; @@ -390,6 +421,37 @@ stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt, return 0; } +transport_connection_t * +stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt, + u16 lcl_port, u16 rmt_port, u8 proto) +{ + session_manager_main_t *smm = &session_manager_main; + stream_session_t *s; + session_kv6_t kv6; + int rv; + + make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto); + rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6); + if (rv == 0) + { + s = stream_session_get_from_handle (kv6.value); + return tp_vfts[s->session_type].get_connection (s->connection_index, + s->thread_index); + } + + /* If nothing is found, check if any listener is available */ + s = stream_session_lookup_listener6 (lcl, lcl_port, proto); + if (s) + return tp_vfts[s->session_type].get_listener (s->connection_index); + + /* Finally, try half-open connections */ + rv = clib_bihash_search_inline_48_8 (&smm->v6_half_open_hash, &kv6); + if (rv == 0) + return tp_vfts[proto].get_half_open (kv6.value & 0xFFFFFFFF); + + return 0; +} + int stream_session_create_i (segment_manager_t * sm, transport_connection_t * tc, stream_session_t ** ret_s) diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index b4507d4e..6069c574 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -263,15 +263,30 @@ stream_session_t *stream_session_lookup6 (ip6_address_t * lcl, ip6_address_t * rmt, u16 lcl_port, u16 rmt_port, u8 proto); transport_connection_t - * stream_session_lookup_transport4 (ip4_address_t * lcl, - ip4_address_t * rmt, u16 lcl_port, - u16 rmt_port, u8 proto, - u32 thread_index); -transport_connection_t - * stream_session_lookup_transport6 (ip6_address_t * lcl, - ip6_address_t * rmt, u16 lcl_port, - u16 rmt_port, u8 proto, - u32 thread_index); + * stream_session_lookup_transport_wt4 (ip4_address_t * lcl, + ip4_address_t * rmt, u16 lcl_port, + u16 rmt_port, u8 proto, + u32 thread_index); +transport_connection_t *stream_session_lookup_transport4 (ip4_address_t * lcl, + ip4_address_t * rmt, + u16 lcl_port, + u16 rmt_port, + u8 proto); +transport_connection_t *stream_session_lookup_transport_wt6 (ip6_address_t * + lcl, + ip6_address_t * + rmt, + u16 lcl_port, + u16 rmt_port, + u8 proto, + u32 + thread_index); +transport_connection_t *stream_session_lookup_transport6 (ip6_address_t * lcl, + ip6_address_t * rmt, + u16 lcl_port, + u16 rmt_port, + u8 proto); + stream_session_t *stream_session_lookup_listener (ip46_address_t * lcl, u16 lcl_port, u8 proto); void stream_session_table_add_for_tc (transport_connection_t * tc, u64 value); @@ -415,7 +430,12 @@ void stream_session_cleanup (stream_session_t * s); void session_send_session_evt_to_thread (u64 session_handle, fifo_event_type_t evt_type, u32 thread_index); + u8 *format_stream_session (u8 * s, va_list * args); +uword unformat_stream_session (unformat_input_t * input, va_list * args); +uword unformat_transport_connection (unformat_input_t * input, + va_list * args); + int send_session_connected_callback (u32 app_index, u32 api_context, stream_session_t * s, u8 is_fail); diff --git a/src/vnet/session/session_cli.c b/src/vnet/session/session_cli.c index e06bc586..e8e6f99c 100755 --- a/src/vnet/session/session_cli.c +++ b/src/vnet/session/session_cli.c @@ -81,12 +81,141 @@ format_stream_session (u8 * s, va_list * args) { clib_warning ("Session in state: %d!", ss->session_state); } - vec_free (str); return s; } +uword +unformat_stream_session_id (unformat_input_t * input, va_list * args) +{ + u8 *proto = va_arg (*args, u8 *); + ip46_address_t *lcl = va_arg (*args, ip46_address_t *); + ip46_address_t *rmt = va_arg (*args, ip46_address_t *); + u16 *lcl_port = va_arg (*args, u16 *); + u16 *rmt_port = va_arg (*args, u16 *); + u8 *is_ip4 = va_arg (*args, u8 *); + u8 tuple_is_set = 0; + + memset (lcl, 0, sizeof (*lcl)); + memset (rmt, 0, sizeof (*rmt)); + + if (unformat (input, "tcp")) + { + *proto = TRANSPORT_PROTO_TCP; + } + if (unformat (input, "udp")) + { + *proto = TRANSPORT_PROTO_UDP; + } + else if (unformat (input, "%U:%d->%U:%d", unformat_ip4_address, &lcl->ip4, + lcl_port, unformat_ip4_address, &rmt->ip4, rmt_port)) + { + *is_ip4 = 1; + tuple_is_set = 1; + } + else if (unformat (input, "%U:%d->%U:%d", unformat_ip6_address, &lcl->ip6, + lcl_port, unformat_ip6_address, &rmt->ip6, rmt_port)) + { + *is_ip4 = 0; + tuple_is_set = 1; + } + else + return 0; + + if (tuple_is_set) + return 1; + + return 0; +} + +uword +unformat_stream_session (unformat_input_t * input, va_list * args) +{ + stream_session_t **result = va_arg (*args, stream_session_t **); + stream_session_t *s; + u8 proto = ~0; + ip46_address_t lcl, rmt; + u32 lcl_port = 0, rmt_port = 0; + u8 is_ip4 = 0, s_type = ~0, id_is_set = 0; + + if (unformat (input, "%U", unformat_stream_session_id, &proto, &lcl, &rmt, + &lcl_port, &rmt_port, &is_ip4)) + { + id_is_set = 1; + } + else + return 0; + + if (!id_is_set) + { + return 0; + } + + s_type = session_type_from_proto_and_ip (proto, is_ip4); + if (is_ip4) + s = stream_session_lookup4 (&lcl.ip4, &rmt.ip4, + clib_host_to_net_u16 (lcl_port), + clib_host_to_net_u16 (rmt_port), s_type); + else + s = stream_session_lookup6 (&lcl.ip6, &rmt.ip6, + clib_host_to_net_u16 (lcl_port), + clib_host_to_net_u16 (rmt_port), s_type); + if (s) + { + *result = s; + return 1; + } + return 0; +} + +uword +unformat_transport_connection (unformat_input_t * input, va_list * args) +{ + transport_connection_t **result = va_arg (*args, transport_connection_t **); + u32 suggested_proto = va_arg (*args, u32); + transport_connection_t *tc; + u8 proto = ~0; + ip46_address_t lcl, rmt; + u32 lcl_port = 0, rmt_port = 0; + u8 is_ip4 = 0, s_type = ~0, id_is_set = 0; + + if (unformat (input, "%U", unformat_stream_session_id, &proto, &lcl, &rmt, + &lcl_port, &rmt_port, &is_ip4)) + { + id_is_set = 1; + } + else + return 0; + + if (!id_is_set) + { + return 0; + } + + proto = (proto == (u8) ~ 0) ? suggested_proto : proto; + if (proto == (u8) ~ 0) + return 0; + s_type = session_type_from_proto_and_ip (proto, is_ip4); + if (is_ip4) + tc = stream_session_lookup_transport4 (&lcl.ip4, &rmt.ip4, + clib_host_to_net_u16 (lcl_port), + clib_host_to_net_u16 (rmt_port), + s_type); + else + tc = stream_session_lookup_transport6 (&lcl.ip6, &rmt.ip6, + clib_host_to_net_u16 (lcl_port), + clib_host_to_net_u16 (rmt_port), + s_type); + + if (tc) + { + *result = tc; + return 1; + } + return 0; +} + static clib_error_t * show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) @@ -95,13 +224,7 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, int verbose = 0, i; stream_session_t *pool; stream_session_t *s; - u8 *str = 0, one_session = 0, proto_set = 0, proto = 0; - u8 is_ip4 = 0, s_type = 0; - ip4_address_t lcl_ip4, rmt_ip4; - u32 lcl_port = 0, rmt_port = 0; - - memset (&lcl_ip4, 0, sizeof (lcl_ip4)); - memset (&rmt_ip4, 0, sizeof (rmt_ip4)); + u8 *str = 0, one_session = 0; if (!smm->is_enabled) { @@ -114,40 +237,18 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, ; else if (unformat (input, "verbose")) verbose = 1; - else if (unformat (input, "tcp")) - { - proto_set = 1; - proto = TRANSPORT_PROTO_TCP; - } - else if (unformat (input, "%U:%d->%U:%d", - unformat_ip4_address, &lcl_ip4, &lcl_port, - unformat_ip4_address, &rmt_ip4, &rmt_port)) + else if (unformat (input, "%U", unformat_stream_session, &s)) { one_session = 1; - is_ip4 = 1; } - else - break; + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); } if (one_session) { - if (!proto_set) - { - vlib_cli_output (vm, "proto not set"); - return clib_error_return (0, "proto not set"); - } - - s_type = session_type_from_proto_and_ip (proto, is_ip4); - s = stream_session_lookup4 (&lcl_ip4, &rmt_ip4, - clib_host_to_net_u16 (lcl_port), - clib_host_to_net_u16 (rmt_port), s_type); - if (s) - vlib_cli_output (vm, "%U", format_stream_session, s, 2); - else - vlib_cli_output (vm, "session does not exist"); - + vlib_cli_output (vm, "%U", format_stream_session, s, 2); return 0; } @@ -274,6 +375,103 @@ VLIB_CLI_COMMAND (clear_session_command, static) = }; /* *INDENT-ON* */ +static clib_error_t * +show_session_fifo_trace_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + stream_session_t *s = 0; + u8 is_rx = 0, *str = 0; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "%U", unformat_stream_session, &s)) + ; + else if (unformat (input, "rx")) + is_rx = 1; + else if (unformat (input, "tx")) + is_rx = 0; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + + if (!SVM_FIFO_TRACE) + { + vlib_cli_output (vm, "fifo tracing not enabled"); + return 0; + } + + if (!s) + { + vlib_cli_output (vm, "could not find session"); + return 0; + } + + str = is_rx ? + svm_fifo_dump_trace (str, s->server_rx_fifo) : + svm_fifo_dump_trace (str, s->server_tx_fifo); + + vlib_cli_output (vm, "%v", str); + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_session_fifo_trace_command, static) = +{ + .path = "show session fifo trace", + .short_help = "show session fifo trace ", + .function = show_session_fifo_trace_command_fn, +}; +/* *INDENT-ON* */ + +static clib_error_t * +session_replay_fifo_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + stream_session_t *s = 0; + u8 is_rx = 0, *str = 0; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "%U", unformat_stream_session, &s)) + ; + else if (unformat (input, "rx")) + is_rx = 1; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + + if (!SVM_FIFO_TRACE) + { + vlib_cli_output (vm, "fifo tracing not enabled"); + return 0; + } + + if (!s) + { + vlib_cli_output (vm, "could not find session"); + return 0; + } + + str = is_rx ? + svm_fifo_replay (str, s->server_rx_fifo, 0, 1) : + svm_fifo_replay (str, s->server_tx_fifo, 0, 1); + + vlib_cli_output (vm, "%v", str); + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (session_replay_fifo_trace_command, static) = +{ + .path = "session replay fifo", + .short_help = "session replay fifo ", + .function = session_replay_fifo_command_fn, +}; +/* *INDENT-ON* */ + static clib_error_t * session_enable_disable_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index a6c8a235..a92bacaa 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -464,6 +464,8 @@ builtin_session_connected_callback (u32 app_index, u32 api_context, static void builtin_session_reset_callback (stream_session_t * s) { + if (s->session_state == SESSION_STATE_READY) + clib_warning ("Reset active connection %U", format_stream_session, s, 2); return; } diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index 8e958ac0..4ecaf56a 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -99,8 +99,7 @@ builtin_session_disconnect_callback (stream_session_t * s) void builtin_session_reset_callback (stream_session_t * s) { - clib_warning ("called.. "); - + clib_warning ("Reset session %U", format_stream_session, s, 2); stream_session_cleanup (s); } @@ -224,10 +223,6 @@ builtin_server_rx_callback (stream_session_t * s) clib_warning ("session stuck: %U", format_stream_session, s, 2); } } - else - { - bsm->rx_retries[thread_index][s->session_index] = 0; - } return 0; } diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index f379e699..8ed325d2 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -732,6 +732,7 @@ format_tcp_connection (u8 * s, va_list * args) if (verbose > 1) s = format (s, " %U\n%U", format_tcp_timers, tc, format_tcp_vars, tc); } + return s; } @@ -791,6 +792,30 @@ format_tcp_sacks (u8 * s, va_list * args) return s; } +u8 * +format_tcp_rcv_sacks (u8 * s, va_list * args) +{ + tcp_connection_t *tc = va_arg (*args, tcp_connection_t *); + sack_block_t *sacks = tc->rcv_opts.sacks; + sack_block_t *block; + int i, len = 0; + + len = vec_len (sacks); + for (i = 0; i < len - 1; i++) + { + block = &sacks[i]; + s = format (s, " start %u end %u\n", block->start - tc->iss, + block->end - tc->iss); + } + if (len) + { + block = &sacks[len - 1]; + s = format (s, " start %u end %u", block->start - tc->iss, + block->end - tc->iss); + } + return s; +} + u8 * format_tcp_sack_hole (u8 * s, va_list * args) { @@ -820,6 +845,7 @@ format_tcp_scoreboard (u8 * s, va_list * args) s = format (s, "%U", format_tcp_sack_hole, hole); hole = scoreboard_next_hole (sb, hole); } + return s; } @@ -1304,7 +1330,189 @@ VLIB_CLI_COMMAND (tcp_src_address_command, static) = }; /* *INDENT-ON* */ +static u8 * +tcp_scoreboard_dump_trace (u8 * s, sack_scoreboard_t * sb) +{ +#if TCP_SCOREBOARD_TRACE + + scoreboard_trace_elt_t *block; + int i = 0; + + if (!sb->trace) + return s; + + s = format (s, "scoreboard trace:"); + vec_foreach (block, sb->trace) + { + s = format (s, "{%u, %u, %u, %u, %u}, ", block->start, block->end, + block->ack, block->snd_una_max, block->group); + if ((++i % 3) == 0) + s = format (s, "\n"); + } + return s; +#else + return 0; +#endif +} + +static clib_error_t * +tcp_show_scoreboard_trace_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd_arg) +{ + transport_connection_t *tconn = 0; + tcp_connection_t *tc; + u8 *s = 0; + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "%U", unformat_transport_connection, &tconn, + TRANSPORT_PROTO_TCP)) + ; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + + if (!TCP_SCOREBOARD_TRACE) + { + vlib_cli_output (vm, "scoreboard tracing not enabled"); + return 0; + } + + tc = tcp_get_connection_from_transport (tconn); + s = tcp_scoreboard_dump_trace (s, &tc->sack_sb); + vlib_cli_output (vm, "%v", s); + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (tcp_show_scoreboard_trace_command, static) = +{ + .path = "show tcp scoreboard trace", + .short_help = "show tcp scoreboard trace ", + .function = tcp_show_scoreboard_trace_fn, +}; +/* *INDENT-ON* */ +u8 * +tcp_scoreboard_replay (u8 * s, tcp_connection_t * tc, u8 verbose) +{ + int i, trace_len; + scoreboard_trace_elt_t *trace; + u32 next_ack, left, group, has_new_ack = 0; + tcp_connection_t _dummy_tc, *dummy_tc = &_dummy_tc; + sack_block_t *block; + + if (!tc) + return s; + + memset (dummy_tc, 0, sizeof (*dummy_tc)); + tcp_connection_timers_init (dummy_tc); + scoreboard_init (&dummy_tc->sack_sb); + dummy_tc->rcv_opts.flags |= TCP_OPTS_FLAG_SACK; + +#if TCP_SCOREBOARD_TRACE + trace = tc->sack_sb.trace; + trace_len = vec_len (tc->sack_sb.trace); +#else + trace = 0; + trace_len = 0; +#endif + + for (i = 0; i < trace_len; i++) + { + if (trace[i].ack != 0) + { + dummy_tc->snd_una = trace[i].ack - 1448; + dummy_tc->snd_una_max = trace[i].ack; + } + } + + left = 0; + while (left < trace_len) + { + group = trace[left].group; + vec_reset_length (dummy_tc->rcv_opts.sacks); + has_new_ack = 0; + while (trace[left].group == group) + { + if (trace[left].ack != 0) + { + if (verbose) + s = format (s, "Adding ack %u, snd_una_max %u, segs: ", + trace[left].ack, trace[left].snd_una_max); + dummy_tc->snd_una_max = trace[left].snd_una_max; + next_ack = trace[left].ack; + has_new_ack = 1; + } + else + { + if (verbose) + s = format (s, "[%u, %u], ", trace[left].start, + trace[left].end); + vec_add2 (dummy_tc->rcv_opts.sacks, block, 1); + block->start = trace[left].start; + block->end = trace[left].end; + } + left++; + } + + /* Push segments */ + tcp_rcv_sacks (dummy_tc, next_ack); + if (has_new_ack) + dummy_tc->snd_una = next_ack + dummy_tc->sack_sb.snd_una_adv; + + if (verbose) + s = format (s, "result: %U", format_tcp_scoreboard, + &dummy_tc->sack_sb); + + } + s = format (s, "result: %U", format_tcp_scoreboard, &dummy_tc->sack_sb); + + return s; +} + +static clib_error_t * +tcp_scoreboard_trace_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd_arg) +{ + transport_connection_t *tconn = 0; + tcp_connection_t *tc = 0; + u8 *str = 0; + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "%U", unformat_transport_connection, &tconn, + TRANSPORT_PROTO_TCP)) + ; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + + if (!TCP_SCOREBOARD_TRACE) + { + vlib_cli_output (vm, "scoreboard tracing not enabled"); + return 0; + } + + tc = tcp_get_connection_from_transport (tconn); + if (!tc) + { + vlib_cli_output (vm, "connection not found"); + return 0; + } + str = tcp_scoreboard_replay (str, tc, 1); + vlib_cli_output (vm, "%v", str); + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (tcp_replay_scoreboard_command, static) = +{ + .path = "tcp replay scoreboard", + .short_help = "tcp replay scoreboard ", + .function = tcp_scoreboard_trace_fn, +}; +/* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 37b10fd4..fd0d02b9 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -62,6 +62,7 @@ typedef enum _tcp_state format_function_t format_tcp_state; format_function_t format_tcp_flags; format_function_t format_tcp_sacks; +format_function_t format_tcp_rcv_sacks; /** TCP timers */ #define foreach_tcp_timer \ @@ -151,9 +152,19 @@ enum #undef _ }; +#define TCP_SCOREBOARD_TRACE (0) #define TCP_MAX_SACK_BLOCKS 15 /**< Max number of SACK blocks stored */ #define TCP_INVALID_SACK_HOLE_INDEX ((u32)~0) +typedef struct _scoreboard_trace_elt +{ + u32 start; + u32 end; + u32 ack; + u32 snd_una_max; + u32 group; +} scoreboard_trace_elt_t; + typedef struct _sack_scoreboard_hole { u32 next; /**< Index for next entry in linked list */ @@ -177,8 +188,38 @@ typedef struct _sack_scoreboard u32 rescue_rxt; /**< Rescue sequence number */ u32 lost_bytes; /**< Bytes lost as per RFC6675 */ u32 cur_rxt_hole; /**< Retransmitting from this hole */ + +#if TCP_SCOREBOARD_TRACE + scoreboard_trace_elt_t *trace; +#endif + } sack_scoreboard_t; +#if TCP_SCOREBOARD_TRACE +#define tcp_scoreboard_trace_add(_tc, _ack) \ +{ \ + static u64 _group = 0; \ + sack_scoreboard_t *_sb = &_tc->sack_sb; \ + sack_block_t *_sack, *_sacks; \ + scoreboard_trace_elt_t *_elt; \ + int i; \ + _group++; \ + _sacks = _tc->rcv_opts.sacks; \ + for (i = 0; i < vec_len (_sacks); i++) \ + { \ + _sack = &_sacks[i]; \ + vec_add2 (_sb->trace, _elt, 1); \ + _elt->start = _sack->start; \ + _elt->end = _sack->end; \ + _elt->ack = _elt->end == _ack ? _ack : 0; \ + _elt->snd_una_max = _elt->end == _ack ? _tc->snd_una_max : 0; \ + _elt->group = _group; \ + } \ +} +#else +#define tcp_scoreboard_trace_add(_tc, _ack) +#endif + typedef enum _tcp_cc_algorithm_type { TCP_CC_NEWRENO, @@ -405,6 +446,12 @@ tcp_connection_get_if_valid (u32 conn_index, u32 thread_index) return pool_elt_at_index (tcp_main.connections[thread_index], conn_index); } +always_inline tcp_connection_t * +tcp_get_connection_from_transport (transport_connection_t * tconn) +{ + return (tcp_connection_t *) tconn; +} + void tcp_connection_close (tcp_connection_t * tc); void tcp_connection_cleanup (tcp_connection_t * tc); void tcp_connection_del (tcp_connection_t * tc); @@ -414,6 +461,8 @@ u8 *format_tcp_connection_id (u8 * s, va_list * args); u8 *format_tcp_connection (u8 * s, va_list * args); u8 *format_tcp_scoreboard (u8 * s, va_list * args); +u8 *tcp_scoreboard_replay (u8 * s, tcp_connection_t * tc, u8 verbose); + always_inline tcp_connection_t * tcp_listener_get (u32 tli) { @@ -689,7 +738,7 @@ sack_scoreboard_hole_t *scoreboard_next_rxt_hole (sack_scoreboard_t * sb, start, u8 have_sent_1_smss, u8 * can_rescue, u8 * snd_limited); -void scoreboard_init_high_rxt (sack_scoreboard_t * sb); +void scoreboard_init_high_rxt (sack_scoreboard_t * sb, u32 seq); always_inline sack_scoreboard_hole_t * scoreboard_get_hole (sack_scoreboard_t * sb, u32 index) @@ -740,6 +789,7 @@ scoreboard_clear (sack_scoreboard_t * sb) scoreboard_remove_hole (sb, hole); } ASSERT (sb->head == sb->tail && sb->head == TCP_INVALID_SACK_HOLE_INDEX); + ASSERT (pool_elts (sb->holes) == 0); sb->sacked_bytes = 0; sb->last_sacked_bytes = 0; sb->last_bytes_delivered = 0; @@ -759,6 +809,7 @@ scoreboard_hole_bytes (sack_scoreboard_hole_t * hole) always_inline u32 scoreboard_hole_index (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole) { + ASSERT (!pool_is_free_index (sb->holes, hole - sb->holes)); return hole - sb->holes; } diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index 45db0da6..bc7d9015 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -206,8 +206,8 @@ tcp_options_parse (tcp_header_t * th, tcp_options_t * to) vec_reset_length (to->sacks); for (j = 0; j < to->n_sack_blocks; j++) { - b.start = clib_net_to_host_u32 (*(u32 *) (data + 2 + 4 * j)); - b.end = clib_net_to_host_u32 (*(u32 *) (data + 6 + 4 * j)); + b.start = clib_net_to_host_u32 (*(u32 *) (data + 2 + 8 * j)); + b.end = clib_net_to_host_u32 (*(u32 *) (data + 6 + 8 * j)); vec_add1 (to->sacks, b); } break; @@ -540,6 +540,10 @@ scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole) if (scoreboard_hole_index (sb, hole) == sb->cur_rxt_hole) sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX; + /* Poison the entry */ + if (CLIB_DEBUG > 0) + memset (hole, 0xfe, sizeof (*hole)); + pool_put (sb->holes, hole); } @@ -555,7 +559,7 @@ scoreboard_insert_hole (sack_scoreboard_t * sb, u32 prev_index, hole->start = start; hole->end = end; - hole_index = hole - sb->holes; + hole_index = scoreboard_hole_index (sb, hole); prev = scoreboard_get_hole (sb, prev_index); if (prev) @@ -680,12 +684,30 @@ scoreboard_next_rxt_hole (sack_scoreboard_t * sb, } void -scoreboard_init_high_rxt (sack_scoreboard_t * sb) +scoreboard_init_high_rxt (sack_scoreboard_t * sb, u32 seq) { sack_scoreboard_hole_t *hole; hole = scoreboard_first_hole (sb); - sb->high_rxt = hole->start; - sb->cur_rxt_hole = sb->head; + if (hole) + { + seq = seq_gt (seq, hole->start) ? seq : hole->start; + sb->cur_rxt_hole = sb->head; + } + sb->high_rxt = seq; +} + +/** + * Test that scoreboard is sane after recovery + * + * Returns 1 if scoreboard is empty or if first hole beyond + * snd_una. + */ +u8 +tcp_scoreboard_is_sane_post_recovery (tcp_connection_t * tc) +{ + sack_scoreboard_hole_t *hole; + hole = scoreboard_first_hole (&tc->sack_sb); + return (!hole || seq_geq (hole->start, tc->snd_una)); } void @@ -712,7 +734,7 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) { if (seq_lt (blk->start, blk->end) && seq_gt (blk->start, tc->snd_una) - && seq_gt (blk->start, ack) && seq_leq (blk->end, tc->snd_nxt)) + && seq_gt (blk->start, ack) && seq_leq (blk->end, tc->snd_una_max)) { blk++; continue; @@ -731,6 +753,8 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) if (vec_len (tc->rcv_opts.sacks) == 0) return; + tcp_scoreboard_trace_add (tc, ack); + /* Make sure blocks are ordered */ for (i = 0; i < vec_len (tc->rcv_opts.sacks); i++) for (j = i + 1; j < vec_len (tc->rcv_opts.sacks); j++) @@ -797,7 +821,7 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) sb->last_bytes_delivered += next_hole->start - hole->end; } - else if (!next_hole) + else { ASSERT (seq_geq (sb->high_sacked, ack)); sb->snd_una_adv = sb->high_sacked - ack; @@ -824,12 +848,14 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) if (seq_lt (blk->end, hole->end)) { hole_index = scoreboard_hole_index (sb, hole); - scoreboard_insert_hole (sb, hole_index, blk->end, hole->end); + next_hole = scoreboard_insert_hole (sb, hole_index, blk->end, + hole->end); /* Pool might've moved */ hole = scoreboard_get_hole (sb, hole_index); hole->end = blk->start; blk_index++; + ASSERT (hole->next == scoreboard_hole_index (sb, next_hole)); } else if (seq_lt (blk->start, hole->end)) { @@ -957,7 +983,7 @@ tcp_cc_recover (tcp_connection_t * tc) ASSERT (tc->rto_boff == 0); ASSERT (!tcp_in_cong_recovery (tc)); - + ASSERT (tcp_scoreboard_is_sane_post_recovery (tc)); TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3); return 0; } @@ -965,7 +991,7 @@ tcp_cc_recover (tcp_connection_t * tc) static void tcp_cc_update (tcp_connection_t * tc, vlib_buffer_t * b) { - ASSERT (!tcp_in_cong_recovery (tc)); + ASSERT (!tcp_in_cong_recovery (tc) || tcp_is_lost_fin (tc)); /* Congestion avoidance */ tc->cc_algo->rcv_ack (tc); @@ -1064,10 +1090,10 @@ tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack) ASSERT (tc->cwnd >= tc->snd_mss); /* If cwnd allows, send more data */ - if (tcp_opts_sack_permitted (&tc->rcv_opts) - && scoreboard_first_hole (&tc->sack_sb)) + if (tcp_opts_sack_permitted (&tc->rcv_opts)) { - scoreboard_init_high_rxt (&tc->sack_sb); + scoreboard_init_high_rxt (&tc->sack_sb, + tc->snd_una + tc->snd_mss); tcp_fast_retransmit_sack (tc); } else @@ -1134,12 +1160,13 @@ partial_ack: /* Remove retransmitted bytes that have been delivered */ ASSERT (tc->bytes_acked + tc->sack_sb.snd_una_adv >= tc->sack_sb.last_bytes_delivered); - rxt_delivered = tc->bytes_acked + tc->sack_sb.snd_una_adv - - tc->sack_sb.last_bytes_delivered; - if (0 && rxt_delivered && seq_gt (tc->sack_sb.high_rxt, tc->snd_una)) + + if (seq_lt (tc->snd_una, tc->sack_sb.high_rxt)) { /* If we have sacks and we haven't gotten an ack beyond high_rxt, * remove sacked bytes delivered */ + rxt_delivered = tc->bytes_acked + tc->sack_sb.snd_una_adv + - tc->sack_sb.last_bytes_delivered; ASSERT (tc->snd_rxt_bytes >= rxt_delivered); tc->snd_rxt_bytes -= rxt_delivered; } @@ -1256,6 +1283,18 @@ tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b, return 0; } +static u8 +tcp_sack_vector_is_sane (sack_block_t * sacks) +{ + int i; + for (i = 1; i < vec_len (sacks); i++) + { + if (sacks[i - 1].end == sacks[i].start) + return 0; + } + return 1; +} + /** * Build SACK list as per RFC2018. * @@ -1316,6 +1355,9 @@ tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end) /* Replace old vector with new one */ vec_free (tc->snd_sacks); tc->snd_sacks = new_list; + + /* Segments should not 'touch' */ + ASSERT (tcp_sack_vector_is_sane (tc->snd_sacks)); } /** Enqueue data for delivery to application */ @@ -1330,7 +1372,6 @@ tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b, /* Pure ACK. Update rcv_nxt and be done. */ if (PREDICT_FALSE (data_len == 0)) { - tc->rcv_nxt = vnet_buffer (b)->tcp.seq_end; return TCP_ERROR_PURE_ACK; } @@ -1385,7 +1426,7 @@ tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b, u16 data_len) { stream_session_t *s0; - int rv; + int rv, offset; ASSERT (seq_gt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt)); @@ -1421,12 +1462,12 @@ tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b, newest = svm_fifo_newest_ooo_segment (s0->server_rx_fifo); if (newest) { - start = - tc->rcv_nxt + ooo_segment_offset (s0->server_rx_fifo, newest); + offset = ooo_segment_offset (s0->server_rx_fifo, newest); + ASSERT (offset <= vnet_buffer (b)->tcp.seq_number - tc->rcv_nxt); + start = tc->rcv_nxt + offset; end = start + ooo_segment_length (s0->server_rx_fifo, newest); tcp_update_sack_list (tc, start, end); - - ASSERT (seq_gt (start, tc->rcv_nxt)); + svm_fifo_newest_ooo_segment_reset (s0->server_rx_fifo); } } @@ -2736,12 +2777,12 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* lookup session */ tc0 = (tcp_connection_t *) - stream_session_lookup_transport4 (&ip40->dst_address, - &ip40->src_address, - tcp0->dst_port, - tcp0->src_port, - SESSION_TYPE_IP4_TCP, - my_thread_index); + stream_session_lookup_transport_wt4 (&ip40->dst_address, + &ip40->src_address, + tcp0->dst_port, + tcp0->src_port, + SESSION_TYPE_IP4_TCP, + my_thread_index); } else { @@ -2754,12 +2795,12 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, tc0 = (tcp_connection_t *) - stream_session_lookup_transport6 (&ip60->src_address, - &ip60->dst_address, - tcp0->src_port, - tcp0->dst_port, - SESSION_TYPE_IP6_TCP, - my_thread_index); + stream_session_lookup_transport_wt6 (&ip60->src_address, + &ip60->dst_address, + tcp0->src_port, + tcp0->dst_port, + SESSION_TYPE_IP6_TCP, + my_thread_index); } /* Length check */ @@ -2931,6 +2972,8 @@ do { \ _(ESTABLISHED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE); _(ESTABLISHED, TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE); + _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, + TCP_ERROR_NONE); /* ACK or FIN-ACK to our FIN */ _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); _(FIN_WAIT_1, TCP_FLAG_ACK | TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, @@ -2954,6 +2997,7 @@ do { \ _(TIME_WAIT, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); _(TIME_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + _(TIME_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED); _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED); _(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, diff --git a/src/vnet/tcp/tcp_test.c b/src/vnet/tcp/tcp_test.c index 510deb4f..f37ba96d 100644 --- a/src/vnet/tcp/tcp_test.c +++ b/src/vnet/tcp/tcp_test.c @@ -34,6 +34,38 @@ } \ } +/* *INDENT-OFF* */ +scoreboard_trace_elt_t sb_trace[] = {}; +/* *INDENT-ON* */ + +static int +tcp_test_scoreboard_replay (vlib_main_t * vm, unformat_input_t * input) +{ + int verbose = 0; + tcp_connection_t _tc, *tc = &_tc; + u8 *s = 0; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "detail")) + verbose = 1; + else + { + clib_error_t *e = clib_error_return + (0, "unknown input `%U'", format_unformat_error, input); + clib_error_report (e); + return -1; + } + } + +#if TCP_SCOREBOARD_TRACE + tc->sack_sb.trace = sb_trace; +#endif + s = tcp_scoreboard_replay (s, tc, verbose); + vlib_cli_output (vm, "%v", s); + return 0; +} + static int tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) { @@ -47,6 +79,8 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) { if (unformat (input, "verbose")) verbose = 1; + else if (unformat (input, "replay")) + return tcp_test_scoreboard_replay (vm, input); } memset (tc, 0, sizeof (*tc)); @@ -282,6 +316,44 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) TCP_TEST ((sb->last_bytes_delivered == 400), "last bytes delivered %d", sb->last_bytes_delivered); + /* + * One hole close to head, patch head, split in two and start acking + * the lowest part + */ + scoreboard_clear (sb); + tc->snd_una = 0; + tc->snd_una_max = 1000; + tc->snd_nxt = 1000; + + block.start = 500; + block.end = 1000; + vec_add1 (tc->rcv_opts.sacks, block); + tc->rcv_opts.n_sack_blocks = vec_len (tc->rcv_opts.sacks); + + tcp_rcv_sacks (tc, 0); + if (verbose) + vlib_cli_output (vm, "sb added [500, 1000]:\n%U", + format_tcp_scoreboard, sb); + + vec_reset_length (tc->rcv_opts.sacks); + block.start = 300; + block.end = 400; + vec_add1 (tc->rcv_opts.sacks, block); + tc->rcv_opts.n_sack_blocks = vec_len (tc->rcv_opts.sacks); + tcp_rcv_sacks (tc, 100); + if (verbose) + vlib_cli_output (vm, "sb added [0, 100] [300, 400]:\n%U", + format_tcp_scoreboard, sb); + TCP_TEST ((pool_elts (sb->holes) == 2), + "scoreboard has %d elements", pool_elts (sb->holes)); + + tc->snd_una = 100; + tcp_rcv_sacks (tc, 200); + tcp_rcv_sacks (tc, 300); + if (verbose) + vlib_cli_output (vm, "sb added [0, 300]:\n%U", format_tcp_scoreboard, sb); + TCP_TEST ((sb->sacked_bytes == 500), "sacked bytes %d", sb->sacked_bytes); + return 0; } @@ -390,6 +462,37 @@ tcp_test_sack_tx (vlib_main_t * vm, unformat_input_t * input) vlib_cli_output (vm, "advance rcv_nxt to 1200\n%U", format_tcp_sacks, tc); TCP_TEST ((vec_len (tc->snd_sacks) == 0), "sack blocks %d expected %d", vec_len (tc->snd_sacks), 0); + + + /* + * Add 2 blocks, overwrite first and update rcv_nxt to also remove it + */ + + vec_reset_length (tc->snd_sacks); + tc->rcv_nxt = 0; + + tcp_update_sack_list (tc, 100, 200); + tcp_update_sack_list (tc, 300, 400); + + if (verbose) + vlib_cli_output (vm, "add [100, 200] [300, 400]\n%U", + format_tcp_sacks, tc); + TCP_TEST ((vec_len (tc->snd_sacks) == 2), + "sack blocks %d expected %d", vec_len (tc->snd_sacks), 2); + TCP_TEST ((tc->snd_sacks[0].start == 300), + "first sack block start %u expected %u", tc->snd_sacks[0].start, + 300); + + tc->rcv_nxt = 100; + tcp_update_sack_list (tc, 100, 100); + if (verbose) + vlib_cli_output (vm, "add [100, 200] rcv_nxt = 100\n%U", + format_tcp_sacks, tc); + TCP_TEST ((vec_len (tc->snd_sacks) == 1), + "sack blocks %d expected %d", vec_len (tc->snd_sacks), 1); + TCP_TEST ((tc->snd_sacks[0].start == 300), + "first sack block start %u expected %u", tc->snd_sacks[0].start, + 300); return 0; } @@ -1188,6 +1291,176 @@ tcp_test_fifo4 (vlib_main_t * vm, unformat_input_t * input) return 0; } +static u32 +fifo_pos (svm_fifo_t * f, u32 pos) +{ + return pos % f->nitems; +} + +static int +tcp_test_fifo5 (vlib_main_t * vm, unformat_input_t * input) +{ + svm_fifo_t *f; + u32 fifo_size = 400, j = 0, offset = 200; + int i, rv, verbose = 0; + u8 *test_data = 0, *data_buf = 0; + ooo_segment_t *ooo_seg; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "verbose")) + verbose = 1; + else + { + clib_error_t *e = clib_error_return + (0, "unknown input `%U'", format_unformat_error, input); + clib_error_report (e); + return -1; + } + } + + f = fifo_prepare (fifo_size); + svm_fifo_init_pointers (f, offset); + + vec_validate (test_data, 399); + for (i = 0; i < vec_len (test_data); i++) + test_data[i] = i % 0xff; + + /* + * Start with [100, 200] and [300, 400] + */ + svm_fifo_enqueue_with_offset (f, 100, 100, &test_data[100]); + svm_fifo_enqueue_with_offset (f, 300, 100, &test_data[300]); + + TCP_TEST ((svm_fifo_number_ooo_segments (f) == 2), + "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); + TCP_TEST ((f->ooos_newest == 1), "newest %u", f->ooos_newest); + if (verbose) + vlib_cli_output (vm, "fifo after [100, 200] and [300, 400] : %U", + format_svm_fifo, f, 2 /* verbose */ ); + + /* + * Add [225, 275] + */ + + rv = svm_fifo_enqueue_with_offset (f, 225, 50, &test_data[200]); + if (verbose) + vlib_cli_output (vm, "fifo after [225, 275] : %U", + format_svm_fifo, f, 2 /* verbose */ ); + TCP_TEST ((svm_fifo_number_ooo_segments (f) == 3), + "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); + ooo_seg = svm_fifo_first_ooo_segment (f); + TCP_TEST ((ooo_seg->start == fifo_pos (f, 100 + offset)), + "first seg start %u expected %u", ooo_seg->start, + fifo_pos (f, 100 + offset)); + TCP_TEST ((ooo_seg->length == 100), "first seg length %u expected %u", + ooo_seg->length, 100); + ooo_seg = ooo_segment_next (f, ooo_seg); + TCP_TEST ((ooo_seg->start == fifo_pos (f, 225 + offset)), + "second seg start %u expected %u", + ooo_seg->start, fifo_pos (f, 225 + offset)); + TCP_TEST ((ooo_seg->length == 50), "second seg length %u expected %u", + ooo_seg->length, 50); + ooo_seg = ooo_segment_next (f, ooo_seg); + TCP_TEST ((ooo_seg->start == fifo_pos (f, 300 + offset)), + "third seg start %u expected %u", + ooo_seg->start, fifo_pos (f, 300 + offset)); + TCP_TEST ((ooo_seg->length == 100), "third seg length %u expected %u", + ooo_seg->length, 100); + TCP_TEST ((f->ooos_newest == 2), "newest %u", f->ooos_newest); + /* + * Add [190, 310] + */ + rv = svm_fifo_enqueue_with_offset (f, 190, 120, &test_data[190]); + if (verbose) + vlib_cli_output (vm, "fifo after [190, 310] : %U", + format_svm_fifo, f, 1 /* verbose */ ); + TCP_TEST ((svm_fifo_number_ooo_segments (f) == 1), + "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); + ooo_seg = svm_fifo_first_ooo_segment (f); + TCP_TEST ((ooo_seg->start == fifo_pos (f, offset + 100)), + "first seg start %u expected %u", + ooo_seg->start, fifo_pos (f, offset + 100)); + TCP_TEST ((ooo_seg->length == 300), "first seg length %u expected %u", + ooo_seg->length, 300); + + /* + * Add [0, 150] + */ + rv = svm_fifo_enqueue_nowait (f, 150, test_data); + + if (verbose) + vlib_cli_output (vm, "fifo after [0 150] : %U", format_svm_fifo, f, + 2 /* verbose */ ); + + TCP_TEST ((rv == 400), "managed to enqueue %u expected %u", rv, 400); + TCP_TEST ((svm_fifo_number_ooo_segments (f) == 0), + "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); + + vec_validate (data_buf, 399); + svm_fifo_peek (f, 0, 400, data_buf); + if (compare_data (data_buf, test_data, 0, 400, &j)) + { + TCP_TEST (0, "[%d] peeked %u expected %u", j, data_buf[j], + test_data[j]); + } + + /* + * Add [100 200] and overlap it with [50 250] + */ + svm_fifo_free (f); + f = fifo_prepare (fifo_size); + + svm_fifo_enqueue_with_offset (f, 100, 100, &test_data[100]); + svm_fifo_enqueue_with_offset (f, 50, 200, &test_data[50]); + TCP_TEST ((svm_fifo_number_ooo_segments (f) == 1), + "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); + ooo_seg = svm_fifo_first_ooo_segment (f); + TCP_TEST ((ooo_seg->start == 50), "first seg start %u expected %u", + ooo_seg->start, 50); + TCP_TEST ((ooo_seg->length == 200), "first seg length %u expected %u", + ooo_seg->length, 200); + + svm_fifo_free (f); + vec_free (test_data); + return 0; +} + +/* *INDENT-OFF* */ +svm_fifo_trace_elem_t fifo_trace[] = {}; +/* *INDENT-ON* */ + +static int +tcp_test_fifo_replay (vlib_main_t * vm, unformat_input_t * input) +{ + svm_fifo_t f; + int verbose = 0; + u8 no_read = 0, *str = 0; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "verbose")) + verbose = 1; + else if (unformat (input, "no-read")) + no_read = 1; + else + { + clib_error_t *e = clib_error_return + (0, "unknown input `%U'", format_unformat_error, input); + clib_error_report (e); + return -1; + } + } + +#if SVMF_FIFO_TRACE + f.trace = fifo_trace; +#endif + + str = svm_fifo_replay (str, &f, no_read, verbose); + vlib_cli_output (vm, "%v", str); + return 0; +} + static int tcp_test_fifo (vlib_main_t * vm, unformat_input_t * input) { @@ -1237,6 +1510,14 @@ tcp_test_fifo (vlib_main_t * vm, unformat_input_t * input) if (tcp_test_fifo3 (vm, input)) return -1; unformat_free (input); + + res = tcp_test_fifo4 (vm, input); + if (res) + return res; + + res = tcp_test_fifo5 (vm, input); + if (res) + return res; } else { @@ -1256,6 +1537,14 @@ tcp_test_fifo (vlib_main_t * vm, unformat_input_t * input) { res = tcp_test_fifo4 (vm, input); } + else if (unformat (input, "fifo5")) + { + res = tcp_test_fifo5 (vm, input); + } + else if (unformat (input, "replay")) + { + res = tcp_test_fifo_replay (vm, input); + } } return res; -- cgit 1.2.3-korg From d756b35032cdf7fdaaf0d6611388a54d32d72e92 Mon Sep 17 00:00:00 2001 From: Dave Wallace Date: Mon, 3 Jul 2017 13:11:38 -0400 Subject: Fix unlinking of /dev/shm files. - api-segment prefix not used when unlinking shm files - unlink root region on exit if no clients referenced - stale reference to freed segment name - don't add fake client to /db unless CLIB_DEBUG > 2 - turn off the gmond plugin - clean up unused vars in vpp/api Change-Id: I66451fcfd6ee64a12466c2d6c209050e3cdb74b7 Signed-off-by: Dave Wallace Signed-off-by: Dave Barach --- Makefile | 9 +++++++-- build-data/platforms/vpp.mk | 2 +- src/svm/ssvm.c | 11 ++++++++++- src/svm/svm.c | 38 +++++++++++++++++++++++++++++++------- src/svm/svm_fifo_segment.c | 2 +- src/svm/svmdb.c | 15 ++++++++++----- src/vlib/buffer.h | 18 +++++++++--------- src/vpp.am | 6 +++--- src/vpp/api/api.c | 1 - src/vpp/api/api_main.c | 2 -- src/vpp/api/custom_dump.c | 8 +------- src/vpp/stats/stats.c | 1 - 12 files changed, 73 insertions(+), 40 deletions(-) (limited to 'src/svm') diff --git a/Makefile b/Makefile index 0d21f335..46c51dd8 100644 --- a/Makefile +++ b/Makefile @@ -55,8 +55,10 @@ else ifeq ($(filter rhel centos fedora opensuse,$(OS_ID)),$(OS_ID)) PKG=rpm endif +# +libganglia1-dev if building the gmond plugin + DEB_DEPENDS = curl build-essential autoconf automake bison libssl-dev ccache -DEB_DEPENDS += debhelper dkms git libtool libganglia1-dev libapr1-dev dh-systemd +DEB_DEPENDS += debhelper dkms git libtool libapr1-dev dh-systemd DEB_DEPENDS += libconfuse-dev git-review exuberant-ctags cscope pkg-config DEB_DEPENDS += lcov chrpath autoconf nasm indent DEB_DEPENDS += python-all python-dev python-virtualenv python-pip libffi6 @@ -79,9 +81,12 @@ else RPM_DEPENDS += python-virtualenv RPM_DEPENDS_GROUPS = 'Development Tools' endif + +# +ganglia-devel if building the ganglia plugin + RPM_DEPENDS += chrpath libffi-devel rpm-build RPM_DEPENDS += https://kojipkgs.fedoraproject.org//packages/nasm/2.12.02/2.fc26/x86_64/nasm-2.12.02-2.fc26.x86_64.rpm -EPEL_DEPENDS = libconfuse-devel ganglia-devel epel-rpm-macros +EPEL_DEPENDS = libconfuse-devel epel-rpm-macros ifeq ($(filter rhel centos,$(OS_ID)),$(OS_ID)) EPEL_DEPENDS += lcov else diff --git a/build-data/platforms/vpp.mk b/build-data/platforms/vpp.mk index 4577fa2e..acbe0e7f 100644 --- a/build-data/platforms/vpp.mk +++ b/build-data/platforms/vpp.mk @@ -36,7 +36,7 @@ vpp_uses_dpdk = yes # Uncoment to enable building unit tests # vpp_enable_tests = yes -vpp_root_packages = vpp gmod +vpp_root_packages = vpp # DPDK configuration parameters # vpp_uses_dpdk_cryptodev_sw = yes diff --git a/src/svm/ssvm.c b/src/svm/ssvm.c index 6cda1f27..23e3cf44 100644 --- a/src/svm/ssvm.c +++ b/src/svm/ssvm.c @@ -29,6 +29,9 @@ ssvm_master_init (ssvm_private_t * ssvm, u32 master_index) if (ssvm->ssvm_size == 0) return SSVM_API_ERROR_NO_SIZE; + if (CLIB_DEBUG > 1) + clib_warning ("[%d] creating segment '%s'", getpid (), ssvm->name); + ssvm_filename = format (0, "/dev/shm/%s%c", ssvm->name, 0); unlink ((char *) ssvm_filename); @@ -176,12 +179,18 @@ ssvm_delete (ssvm_private_t * ssvm) fn = format (0, "/dev/shm/%s%c", ssvm->name, 0); + if (CLIB_DEBUG > 1) + clib_warning ("[%d] unlinking ssvm (%s) backing file '%s'", getpid (), + ssvm->name, fn); + /* Throw away the backing file */ if (unlink ((char *) fn) < 0) clib_unix_warning ("unlink segment '%s'", ssvm->name); - munmap ((void *) ssvm->requested_va, ssvm->ssvm_size); vec_free (fn); + vec_free (ssvm->name); + + munmap ((void *) ssvm->requested_va, ssvm->ssvm_size); } diff --git a/src/svm/svm.c b/src/svm/svm.c index c96135cf..600fa744 100644 --- a/src/svm/svm.c +++ b/src/svm/svm.c @@ -458,14 +458,15 @@ svm_map_region (svm_map_region_args_t * a) struct stat stat; struct timespec ts, tsrem; - if (CLIB_DEBUG > 1) - clib_warning ("[%d] map region %s", getpid (), a->name); - ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size); ASSERT (a->name); shm_name = shm_name_from_svm_map_region_args (a); + if (CLIB_DEBUG > 1) + clib_warning ("[%d] map region %s: shm_open (%s)", + getpid (), a->name, shm_name); + svm_fd = shm_open ((char *) shm_name, O_RDWR | O_CREAT | O_EXCL, 0777); if (svm_fd >= 0) @@ -947,6 +948,29 @@ svm_region_find_or_create (svm_map_region_args_t * a) return (rp); } +void +svm_region_unlink (svm_region_t * rp) +{ + svm_map_region_args_t _a, *a = &_a; + svm_main_region_t *mp; + u8 *shm_name; + + ASSERT (root_rp); + ASSERT (rp); + ASSERT (vec_c_string_is_terminated (rp->region_name)); + + mp = root_rp->data_base; + ASSERT (mp); + + a->root_path = (char *) mp->root_path; + a->name = rp->region_name; + shm_name = shm_name_from_svm_map_region_args (a); + if (CLIB_DEBUG > 1) + clib_warning ("[%d] shm_unlink (%s)", getpid (), shm_name); + shm_unlink ((const char *) shm_name); + vec_free (shm_name); +} + /* * svm_region_unmap * @@ -1056,7 +1080,7 @@ found: vec_free (name); region_unlock (rp); - shm_unlink (rp->region_name); + svm_region_unlink (rp); munmap ((void *) virtual_base, virtual_size); region_unlock (root_rp); svm_pop_heap (oldheap); @@ -1071,9 +1095,6 @@ found: /* * svm_region_exit - * There is no clean way to unlink the - * root region when all clients go away, - * so remove the pid entry and call it a day. */ void svm_region_exit () @@ -1116,6 +1137,9 @@ svm_region_exit () found: + if (vec_len (root_rp->client_pids) == 0) + svm_region_unlink (root_rp); + region_unlock (root_rp); svm_pop_heap (oldheap); diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index 69d4ecb9..c80374a7 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -105,7 +105,7 @@ svm_fifo_segment_create (svm_fifo_segment_create_args_t * a) s->ssvm.ssvm_size = a->segment_size; s->ssvm.i_am_master = 1; s->ssvm.my_pid = getpid (); - s->ssvm.name = (u8 *) a->segment_name; + s->ssvm.name = format (0, "%s", a->segment_name); s->ssvm.requested_va = sm->next_baseva; rv = ssvm_master_init (&s->ssvm, s - sm->segments); diff --git a/src/svm/svmdb.c b/src/svm/svmdb.c index 03dfe7c3..043b0924 100644 --- a/src/svm/svmdb.c +++ b/src/svm/svmdb.c @@ -106,11 +106,16 @@ svmdb_map (svmdb_map_args_t * dba) } /* Nope, it's our problem... */ - /* Add a bogus client (pid=0) so the svm won't be deallocated */ - oldheap = svm_push_pvt_heap (db_rp); - vec_add1 (client->db_rp->client_pids, 0); - svm_pop_heap (oldheap); - + if (CLIB_DEBUG > 2) + { + /* Add a bogus client (pid=0) so the svm won't be deallocated */ + clib_warning + ("[%d] adding fake client (pid=0) so '%s' won't be unlinked", + getpid (), db_rp->region_name); + oldheap = svm_push_pvt_heap (db_rp); + vec_add1 (client->db_rp->client_pids, 0); + svm_pop_heap (oldheap); + } oldheap = svm_push_data_heap (db_rp); vec_validate (hp, 0); diff --git a/src/vlib/buffer.h b/src/vlib/buffer.h index c810db4e..77528e77 100644 --- a/src/vlib/buffer.h +++ b/src/vlib/buffer.h @@ -87,17 +87,17 @@ typedef struct /* any change to the following line requres update of * vlib_buffer_get_free_list_index(...) and * vlib_buffer_set_free_list_index(...) functions */ -#define VLIB_BUFFER_FREE_LIST_INDEX_MASK ((1 << 4) - 1) +#define VLIB_BUFFER_FREE_LIST_INDEX_MASK ((1 << 5) - 1) -#define VLIB_BUFFER_IS_TRACED (1 << 4) -#define VLIB_BUFFER_LOG2_NEXT_PRESENT (5) +#define VLIB_BUFFER_IS_TRACED (1 << 5) +#define VLIB_BUFFER_LOG2_NEXT_PRESENT (6) #define VLIB_BUFFER_NEXT_PRESENT (1 << VLIB_BUFFER_LOG2_NEXT_PRESENT) -#define VLIB_BUFFER_IS_RECYCLED (1 << 6) -#define VLIB_BUFFER_TOTAL_LENGTH_VALID (1 << 7) -#define VLIB_BUFFER_REPL_FAIL (1 << 8) -#define VLIB_BUFFER_RECYCLE (1 << 9) -#define VLIB_BUFFER_FLOW_REPORT (1 << 10) -#define VLIB_BUFFER_EXT_HDR_VALID (1 << 11) +#define VLIB_BUFFER_IS_RECYCLED (1 << 7) +#define VLIB_BUFFER_TOTAL_LENGTH_VALID (1 << 8) +#define VLIB_BUFFER_REPL_FAIL (1 << 9) +#define VLIB_BUFFER_RECYCLE (1 << 10) +#define VLIB_BUFFER_FLOW_REPORT (1 << 11) +#define VLIB_BUFFER_EXT_HDR_VALID (1 << 12) /* User defined buffer flags. */ #define LOG2_VLIB_BUFFER_FLAG_USER(n) (32 - (n)) diff --git a/src/vpp.am b/src/vpp.am index 614bd26a..10a4e311 100644 --- a/src/vpp.am +++ b/src/vpp.am @@ -33,11 +33,11 @@ if WITH_APICLI vpp/api/plugin.h endif -# comment out to disable stats upload to gmond +# uncomment to enable stats upload to gmond +# bin_vpp_SOURCES += \ +# vpp/api/gmon.c bin_vpp_CFLAGS = @APICLI@ -bin_vpp_SOURCES += \ - vpp/api/gmon.c nobase_include_HEADERS += \ vpp/api/vpe_all_api_h.h \ diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c index de9a2477..4e892431 100644 --- a/src/vpp/api/api.c +++ b/src/vpp/api/api.c @@ -686,7 +686,6 @@ static void BAD_SW_IF_INDEX_LABEL; -out: REPLY_MACRO (VL_API_PROXY_ARP_INTFC_ENABLE_DISABLE_REPLY); } diff --git a/src/vpp/api/api_main.c b/src/vpp/api/api_main.c index ac09cd15..c355a5fd 100644 --- a/src/vpp/api/api_main.c +++ b/src/vpp/api/api_main.c @@ -232,8 +232,6 @@ unformat_sw_if_index (unformat_input_t * input, va_list * args) u32 *result = va_arg (*args, u32 *); vnet_main_t *vnm = vnet_get_main (); u32 sw_if_index = ~0; - u8 *if_name; - uword *p; if (unformat (input, "%U", unformat_vnet_sw_interface, vnm, &sw_if_index)) { diff --git a/src/vpp/api/custom_dump.c b/src/vpp/api/custom_dump.c index 3ac8874e..7f3a58d9 100644 --- a/src/vpp/api/custom_dump.c +++ b/src/vpp/api/custom_dump.c @@ -1174,7 +1174,6 @@ static void *vl_api_sr_policy_mod_t_print (vl_api_sr_policy_mod_t * mp, void *handle) { u8 *s; - u32 weight; ip6_address_t *segments = 0, *seg; ip6_address_t *this_address = (ip6_address_t *) mp->segments; @@ -1216,8 +1215,6 @@ static void *vl_api_sr_policy_del_t_print u8 *s; s = format (0, "SCRIPT: sr_policy_del "); - u8 bsid_addr[16]; - u32 sr_policy_index; s = format (s, "To be delivered. Good luck."); FINISH; } @@ -2432,7 +2429,7 @@ static void *vl_api_lisp_add_del_remote_mapping_t_print (vl_api_lisp_add_del_remote_mapping_t * mp, void *handle) { u8 *s; - u32 i, rloc_num = 0; + u32 rloc_num = 0; s = format (0, "SCRIPT: lisp_add_del_remote_mapping "); @@ -2574,7 +2571,6 @@ static void *vl_api_lisp_add_del_locator_set_t_print (vl_api_lisp_add_del_locator_set_t * mp, void *handle) { u8 *s; - u32 loc_num = 0, i; s = format (0, "SCRIPT: lisp_add_del_locator_set "); @@ -2583,8 +2579,6 @@ static void *vl_api_lisp_add_del_locator_set_t_print s = format (s, "locator-set %s ", mp->locator_set_name); - loc_num = clib_net_to_host_u32 (mp->locator_num); - FINISH; } diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c index 38821da7..422b7b3b 100644 --- a/src/vpp/stats/stats.c +++ b/src/vpp/stats/stats.c @@ -577,7 +577,6 @@ do_ip4_fibs (stats_main_t * sm) ip4_route_t *r; fib_table_t *fib; ip4_fib_t *v4_fib; - ip_lookup_main_t *lm = &im4->lookup_main; static uword *results; vl_api_vnet_ip4_fib_counters_t *mp = 0; u32 items_this_message; -- cgit 1.2.3-korg From 68810624f84467503482b82662c980e8f0e36deb Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Mon, 24 Jul 2017 17:40:28 -0700 Subject: Make tcp active open data structures thread safe - Cleanup half-open connections and timers on the right thread - Ensure half-open connection and transport endpoint pools are thread safe - Enqueue TX events to the correct vpp thread in the builtin client - Use transport proto in transport connections instead of session type Change-Id: Id13239a206afbff6f34a38afa510fe014e4b2049 Signed-off-by: Florin Coras Signed-off-by: Dave Barach --- src/svm/svm_fifo_segment.c | 6 ++ src/vnet/session/session.c | 14 +-- src/vnet/session/session.h | 3 +- src/vnet/session/session_cli.c | 2 +- src/vnet/session/session_lookup.c | 103 ++++++++------------ src/vnet/session/session_lookup.h | 5 +- src/vnet/session/transport.h | 6 +- src/vnet/session/transport_interface.c | 5 +- src/vnet/session/transport_interface.h | 2 +- src/vnet/tcp/builtin_client.c | 125 +++++++++++++++---------- src/vnet/tcp/builtin_client.h | 9 +- src/vnet/tcp/tcp.c | 166 ++++++++++++++++++++------------- src/vnet/tcp/tcp.h | 18 ++-- src/vnet/tcp/tcp_debug.h | 32 +++---- src/vnet/tcp/tcp_input.c | 36 ++++--- src/vnet/tcp/tcp_output.c | 19 +++- src/vnet/tcp/tcp_test.c | 14 +-- src/vnet/udp/udp.c | 8 +- src/vnet/udp/udp_input.c | 2 +- 19 files changed, 321 insertions(+), 254 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index c80374a7..a01e26e4 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -376,6 +376,12 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f, break; } + if (CLIB_DEBUG) + { + f->master_session_index = ~0; + f->master_thread_index = ~0; + } + ssvm_pop_heap (oldheap); ssvm_unlock_non_recursive (sh); } diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 48000a6f..004c7193 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -64,7 +64,8 @@ stream_session_create_i (segment_manager_t * sm, transport_connection_t * tc, s->server_tx_fifo = server_tx_fifo; /* Initialize state machine, such as it is... */ - s->session_type = tc->proto; + s->session_type = session_type_from_proto_and_ip (tc->transport_proto, + tc->is_ip4); s->session_state = SESSION_STATE_CONNECTING; s->svm_segment_index = fifo_segment_index; s->thread_index = thread_index; @@ -354,8 +355,7 @@ stream_session_init_fifos_pointers (transport_connection_t * tc, } int -stream_session_connect_notify (transport_connection_t * tc, u8 sst, - u8 is_fail) +stream_session_connect_notify (transport_connection_t * tc, u8 is_fail) { application_t *app; stream_session_t *new_s = 0; @@ -365,7 +365,7 @@ stream_session_connect_notify (transport_connection_t * tc, u8 sst, handle = stream_session_half_open_lookup_handle (&tc->lcl_ip, &tc->rmt_ip, tc->lcl_port, tc->rmt_port, - tc->proto); + tc->transport_proto); if (handle == HALF_OPEN_LOOKUP_INVALID_VALUE) { clib_warning ("This can't be good!"); @@ -391,7 +391,7 @@ stream_session_connect_notify (transport_connection_t * tc, u8 sst, new_s->app_index = app->index; } - /* Notify client */ + /* Notify client application */ if (app->cb_fns.session_connected_callback (app->index, api_context, new_s, is_fail)) { @@ -406,7 +406,7 @@ stream_session_connect_notify (transport_connection_t * tc, u8 sst, } /* Cleanup session lookup */ - stream_session_half_open_table_del (sst, tc); + stream_session_half_open_table_del (tc); return error; } @@ -567,7 +567,7 @@ stream_session_open (u32 app_index, session_type_t st, handle = (((u64) app_index) << 32) | (u64) tc->c_index; /* Add to the half-open lookup table */ - stream_session_half_open_table_add (st, tc, handle); + stream_session_half_open_table_add (tc, handle); *res = tc; diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index bb22f100..180b9f8a 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -274,8 +274,7 @@ stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer, u32 offset, u32 max_bytes); u32 stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes); -int stream_session_connect_notify (transport_connection_t * tc, u8 sst, - u8 is_fail); +int stream_session_connect_notify (transport_connection_t * tc, u8 is_fail); void stream_session_init_fifos_pointers (transport_connection_t * tc, u32 rx_pointer, u32 tx_pointer); diff --git a/src/vnet/session/session_cli.c b/src/vnet/session/session_cli.c index 4d432977..de564ea7 100755 --- a/src/vnet/session/session_cli.c +++ b/src/vnet/session/session_cli.c @@ -57,7 +57,7 @@ format_stream_session (u8 * s, va_list * args) u8 *str = 0; tp_vft = session_get_transport_vft (ss->session_type); - if (verbose == 1) + if (verbose == 1 && ss->session_state >= SESSION_STATE_ACCEPTING) str = format (0, "%-10u%-10u%-10lld", svm_fifo_max_dequeue (ss->server_rx_fifo), svm_fifo_max_enqueue (ss->server_tx_fifo), diff --git a/src/vnet/session/session_lookup.c b/src/vnet/session/session_lookup.c index b3862ee3..1ce22f80 100644 --- a/src/vnet/session/session_lookup.c +++ b/src/vnet/session/session_lookup.c @@ -107,7 +107,7 @@ always_inline void make_v4_ss_kv_from_tc (session_kv4_t * kv, transport_connection_t * t) { return make_v4_ss_kv (kv, &t->lcl_ip.ip4, &t->rmt_ip.ip4, t->lcl_port, - t->rmt_port, t->proto); + t->rmt_port, t->transport_proto); } always_inline void @@ -150,7 +150,7 @@ always_inline void make_v6_ss_kv_from_tc (session_kv6_t * kv, transport_connection_t * t) { make_v6_ss_kv (kv, &t->lcl_ip.ip6, &t->rmt_ip.ip6, t->lcl_port, - t->rmt_port, t->proto); + t->rmt_port, t->transport_proto); } /* @@ -164,23 +164,17 @@ stream_session_table_add_for_tc (transport_connection_t * tc, u64 value) session_kv4_t kv4; session_kv6_t kv6; - switch (tc->proto) + if (tc->is_ip4) { - case SESSION_TYPE_IP4_UDP: - case SESSION_TYPE_IP4_TCP: make_v4_ss_kv_from_tc (&kv4, tc); kv4.value = value; clib_bihash_add_del_16_8 (&sl->v4_session_hash, &kv4, 1 /* is_add */ ); - break; - case SESSION_TYPE_IP6_UDP: - case SESSION_TYPE_IP6_TCP: + } + else + { make_v6_ss_kv_from_tc (&kv6, tc); kv6.value = value; clib_bihash_add_del_48_8 (&sl->v6_session_hash, &kv6, 1 /* is_add */ ); - break; - default: - clib_warning ("Session type not supported"); - ASSERT (0); } } @@ -195,59 +189,24 @@ stream_session_table_add (session_manager_main_t * smm, stream_session_t * s, stream_session_table_add_for_tc (tc, value); } -void -stream_session_half_open_table_add (session_type_t sst, - transport_connection_t * tc, u64 value) -{ - session_lookup_t *sl = &session_lookup; - session_kv4_t kv4; - session_kv6_t kv6; - - switch (sst) - { - case SESSION_TYPE_IP4_UDP: - case SESSION_TYPE_IP4_TCP: - make_v4_ss_kv_from_tc (&kv4, tc); - kv4.value = value; - clib_bihash_add_del_16_8 (&sl->v4_half_open_hash, &kv4, - 1 /* is_add */ ); - break; - case SESSION_TYPE_IP6_UDP: - case SESSION_TYPE_IP6_TCP: - make_v6_ss_kv_from_tc (&kv6, tc); - kv6.value = value; - clib_bihash_add_del_48_8 (&sl->v6_half_open_hash, &kv6, - 1 /* is_add */ ); - break; - default: - clib_warning ("Session type not supported"); - ASSERT (0); - } -} - int stream_session_table_del_for_tc (transport_connection_t * tc) { session_lookup_t *sl = &session_lookup; session_kv4_t kv4; session_kv6_t kv6; - switch (tc->proto) + + if (tc->is_ip4) { - case SESSION_TYPE_IP4_UDP: - case SESSION_TYPE_IP4_TCP: make_v4_ss_kv_from_tc (&kv4, tc); return clib_bihash_add_del_16_8 (&sl->v4_session_hash, &kv4, 0 /* is_add */ ); - break; - case SESSION_TYPE_IP6_UDP: - case SESSION_TYPE_IP6_TCP: + } + else + { make_v6_ss_kv_from_tc (&kv6, tc); return clib_bihash_add_del_48_8 (&sl->v6_session_hash, &kv6, 0 /* is_add */ ); - break; - default: - clib_warning ("Session type not supported"); - ASSERT (0); } return 0; @@ -262,30 +221,48 @@ stream_session_table_del (stream_session_t * s) return stream_session_table_del_for_tc (ts); } + void -stream_session_half_open_table_del (u8 sst, transport_connection_t * tc) +stream_session_half_open_table_add (transport_connection_t * tc, u64 value) { session_lookup_t *sl = &session_lookup; session_kv4_t kv4; session_kv6_t kv6; - switch (sst) + if (tc->is_ip4) + { + make_v4_ss_kv_from_tc (&kv4, tc); + kv4.value = value; + clib_bihash_add_del_16_8 (&sl->v4_half_open_hash, &kv4, + 1 /* is_add */ ); + } + else + { + make_v6_ss_kv_from_tc (&kv6, tc); + kv6.value = value; + clib_bihash_add_del_48_8 (&sl->v6_half_open_hash, &kv6, + 1 /* is_add */ ); + } +} + +void +stream_session_half_open_table_del (transport_connection_t * tc) +{ + session_lookup_t *sl = &session_lookup; + session_kv4_t kv4; + session_kv6_t kv6; + + if (tc->is_ip4) { - case SESSION_TYPE_IP4_UDP: - case SESSION_TYPE_IP4_TCP: make_v4_ss_kv_from_tc (&kv4, tc); clib_bihash_add_del_16_8 (&sl->v4_half_open_hash, &kv4, 0 /* is_add */ ); - break; - case SESSION_TYPE_IP6_UDP: - case SESSION_TYPE_IP6_TCP: + } + else + { make_v6_ss_kv_from_tc (&kv6, tc); clib_bihash_add_del_48_8 (&sl->v6_half_open_hash, &kv6, 0 /* is_add */ ); - break; - default: - clib_warning ("Session type not supported"); - ASSERT (0); } } diff --git a/src/vnet/session/session_lookup.h b/src/vnet/session/session_lookup.h index 9e92dab1..cf1dc013 100644 --- a/src/vnet/session/session_lookup.h +++ b/src/vnet/session/session_lookup.h @@ -83,9 +83,8 @@ transport_connection_t *stream_session_half_open_lookup (ip46_address_t * lcl, void stream_session_table_add_for_tc (transport_connection_t * tc, u64 value); int stream_session_table_del_for_tc (transport_connection_t * tc); int stream_session_table_del (stream_session_t * s); -void stream_session_half_open_table_del (u8 sst, transport_connection_t * tc); -void stream_session_half_open_table_add (session_type_t sst, - transport_connection_t * tc, +void stream_session_half_open_table_del (transport_connection_t * tc); +void stream_session_half_open_table_add (transport_connection_t * tc, u64 value); void session_lookup_init (void); diff --git a/src/vnet/session/transport.h b/src/vnet/session/transport.h index 3895a60a..e56be338 100644 --- a/src/vnet/session/transport.h +++ b/src/vnet/session/transport.h @@ -31,12 +31,12 @@ typedef struct _transport_connection ip46_address_t lcl_ip; /**< Local IP */ u16 lcl_port; /**< Local port */ u16 rmt_port; /**< Remote port */ - u8 proto; /**< Protocol id (also session type) */ + u8 transport_proto; /**< Protocol id */ + u8 is_ip4; /**< Flag if IP4 connection */ u32 vrf; /**< FIB table id */ u32 s_index; /**< Parent session index */ u32 c_index; /**< Connection index in transport pool */ - u8 is_ip4; /**< Flag if IP4 connection */ u32 thread_index; /**< Worker-thread index */ fib_node_index_t rmt_fei; /**< FIB entry index for rmt */ @@ -56,7 +56,7 @@ typedef struct _transport_connection #define c_rmt_ip6 connection.rmt_ip.ip6 #define c_lcl_port connection.lcl_port #define c_rmt_port connection.rmt_port -#define c_proto connection.proto +#define c_transport_proto connection.transport_proto #define c_vrf connection.vrf #define c_state connection.state #define c_s_index connection.s_index diff --git a/src/vnet/session/transport_interface.c b/src/vnet/session/transport_interface.c index eb12aa69..ef8d1e49 100644 --- a/src/vnet/session/transport_interface.c +++ b/src/vnet/session/transport_interface.c @@ -73,9 +73,12 @@ transport_endpoint_table_del (transport_endpoint_table_t * ht, * @param vft - virtual function table */ void -session_register_transport (u8 session_type, +session_register_transport (transport_proto_t transport_proto, u8 is_ip4, const transport_proto_vft_t * vft) { + u8 session_type; + session_type = session_type_from_proto_and_ip (transport_proto, is_ip4); + vec_validate (tp_vfts, session_type); tp_vfts[session_type] = *vft; diff --git a/src/vnet/session/transport_interface.h b/src/vnet/session/transport_interface.h index b7e86ee7..661221c4 100644 --- a/src/vnet/session/transport_interface.h +++ b/src/vnet/session/transport_interface.h @@ -67,7 +67,7 @@ void transport_endpoint_table_add (transport_endpoint_table_t * ht, void transport_endpoint_table_del (transport_endpoint_table_t * ht, transport_endpoint_t * te); -void session_register_transport (u8 session_type, +void session_register_transport (transport_proto_t transport_proto, u8 is_ip4, const transport_proto_vft_t * vft); transport_proto_vft_t *session_get_transport_vft (u8 session_type); diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index 744f50e7..27e20f8e 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -45,6 +45,24 @@ #define TCP_BUILTIN_CLIENT_DBG (0) +static void +signal_evt_to_cli_i (int *code) +{ + tclient_main_t *tm = &tclient_main; + ASSERT (vlib_get_thread_index () == 0); + vlib_process_signal_event (tm->vlib_main, tm->cli_node_index, *code, 0); +} + +static void +signal_evt_to_cli (int code) +{ + if (vlib_get_thread_index () != 0) + vl_api_rpc_call_main_thread (signal_evt_to_cli_i, (u8 *) & code, + sizeof (code)); + else + signal_evt_to_cli_i (&code); +} + static void send_test_chunk (tclient_main_t * tm, session_t * s) { @@ -53,6 +71,7 @@ send_test_chunk (tclient_main_t * tm, session_t * s) u32 bytes_this_chunk; session_fifo_event_t evt; static int serial_number = 0; + svm_fifo_t *txf; int rv; ASSERT (vec_len (test_data) > 0); @@ -63,7 +82,8 @@ send_test_chunk (tclient_main_t * tm, session_t * s) bytes_this_chunk = bytes_this_chunk < s->bytes_to_send ? bytes_this_chunk : s->bytes_to_send; - rv = svm_fifo_enqueue_nowait (s->server_tx_fifo, bytes_this_chunk, + txf = s->server_tx_fifo; + rv = svm_fifo_enqueue_nowait (txf, bytes_this_chunk, test_data + test_buf_offset); /* If we managed to enqueue data... */ @@ -93,15 +113,16 @@ send_test_chunk (tclient_main_t * tm, session_t * s) } /* Poke the session layer */ - if (svm_fifo_set_event (s->server_tx_fifo)) + if (svm_fifo_set_event (txf)) { /* Fabricate TX event, send to vpp */ - evt.fifo = s->server_tx_fifo; + evt.fifo = txf; evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = serial_number++; - if (unix_shared_memory_queue_add (tm->vpp_event_queue, (u8 *) & evt, - 0 /* do wait for mutex */ )) + if (unix_shared_memory_queue_add + (tm->vpp_event_queue[txf->master_thread_index], (u8 *) & evt, + 0 /* do wait for mutex */ )) clib_warning ("could not enqueue event"); } } @@ -112,14 +133,16 @@ receive_test_chunk (tclient_main_t * tm, session_t * s) { svm_fifo_t *rx_fifo = s->server_rx_fifo; int n_read, test_bytes = 0; + u32 my_thread_index = vlib_get_thread_index (); /* Allow enqueuing of new event */ // svm_fifo_unset_event (rx_fifo); if (test_bytes) { - n_read = svm_fifo_dequeue_nowait (rx_fifo, vec_len (tm->rx_buf), - tm->rx_buf); + n_read = svm_fifo_dequeue_nowait (rx_fifo, + vec_len (tm->rx_buf[my_thread_index]), + tm->rx_buf[my_thread_index]); } else { @@ -151,10 +174,12 @@ receive_test_chunk (tclient_main_t * tm, session_t * s) int i; for (i = 0; i < n_read; i++) { - if (tm->rx_buf[i] != ((s->bytes_received + i) & 0xff)) + if (tm->rx_buf[my_thread_index][i] + != ((s->bytes_received + i) & 0xff)) { clib_warning ("read %d error at byte %lld, 0x%x not 0x%x", - n_read, s->bytes_received + i, tm->rx_buf[i], + n_read, s->bytes_received + i, + tm->rx_buf[my_thread_index][i], ((s->bytes_received + i) & 0xff)); } } @@ -247,7 +272,11 @@ builtin_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, if (s) { - stream_session_disconnect (s); + vnet_disconnect_args_t _a, *a = &_a; + a->handle = stream_session_handle (s); + a->app_index = tm->app_index; + vnet_disconnect_session (a); + vec_delete (connections_this_batch, 1, i); i--; __sync_fetch_and_add (&tm->ready_connections, -1); @@ -258,9 +287,7 @@ builtin_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, /* Kick the debug CLI process */ if (tm->ready_connections == 0) { - tm->test_end_time = vlib_time_now (vm); - vlib_process_signal_event (vm, tm->cli_node_index, - 2, 0 /* data */ ); + signal_evt_to_cli (2); } } } @@ -369,27 +396,31 @@ static int tcp_test_clients_init (vlib_main_t * vm) { tclient_main_t *tm = &tclient_main; - vlib_thread_main_t *thread_main = vlib_get_thread_main (); + vlib_thread_main_t *vtm = vlib_get_thread_main (); + u32 num_threads; int i; tclient_api_hookup (vm); if (create_api_loopback (tm)) return -1; + num_threads = 1 /* main thread */ + vtm->n_threads; + /* Init test data. Big buffer */ vec_validate (tm->connect_test_data, 1024 * 1024 - 1); for (i = 0; i < vec_len (tm->connect_test_data); i++) tm->connect_test_data[i] = i & 0xff; - tm->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); - vec_validate (tm->rx_buf, vec_len (tm->connect_test_data) - 1); + vec_validate (tm->rx_buf, num_threads - 1); + for (i = 0; i < num_threads; i++) + vec_validate (tm->rx_buf[i], vec_len (tm->connect_test_data) - 1); tm->is_init = 1; - tm->vlib_main = vm; - vec_validate (tm->connection_index_by_thread, thread_main->n_vlib_mains); - vec_validate (tm->connections_this_batch_by_thread, - thread_main->n_vlib_mains); + vec_validate (tm->connection_index_by_thread, vtm->n_vlib_mains); + vec_validate (tm->connections_this_batch_by_thread, vtm->n_vlib_mains); + vec_validate (tm->vpp_event_queue, vtm->n_vlib_mains); + return 0; } @@ -400,23 +431,28 @@ builtin_session_connected_callback (u32 app_index, u32 api_context, tclient_main_t *tm = &tclient_main; session_t *session; u32 session_index; - int i; + u8 thread_index = vlib_get_thread_index (); + + ASSERT (s->thread_index == thread_index); if (is_fail) { clib_warning ("connection %d failed!", api_context); - vlib_process_signal_event (tm->vlib_main, tm->cli_node_index, -1, - 0 /* data */ ); - return -1; + signal_evt_to_cli (-1); + return 0; } - tm->our_event_queue = session_manager_get_vpp_event_queue (s->thread_index); - tm->vpp_event_queue = session_manager_get_vpp_event_queue (s->thread_index); + if (!tm->vpp_event_queue[thread_index]) + tm->vpp_event_queue[thread_index] = + session_manager_get_vpp_event_queue (thread_index); /* * Setup session */ + clib_spinlock_lock_if_init (&tm->sessions_lock); pool_get (tm->sessions, session); + clib_spinlock_unlock_if_init (&tm->sessions_lock); + memset (session, 0, sizeof (*session)); session_index = session - tm->sessions; session->bytes_to_send = tm->bytes_to_send; @@ -427,32 +463,13 @@ builtin_session_connected_callback (u32 app_index, u32 api_context, session->server_tx_fifo->client_session_index = session_index; session->vpp_session_handle = stream_session_handle (s); - /* Add it to the session lookup table */ - hash_set (tm->session_index_by_vpp_handles, session->vpp_session_handle, - session_index); - - if (tm->ready_connections == tm->expected_connections - 1) - { - vlib_thread_main_t *thread_main = vlib_get_thread_main (); - int thread_index; - - thread_index = 0; - for (i = 0; i < pool_elts (tm->sessions); i++) - { - vec_add1 (tm->connection_index_by_thread[thread_index], i); - thread_index++; - if (thread_index == thread_main->n_vlib_mains) - thread_index = 0; - } - } + vec_add1 (tm->connection_index_by_thread[thread_index], session_index); __sync_fetch_and_add (&tm->ready_connections, 1); if (tm->ready_connections == tm->expected_connections) { tm->run_test = 1; - tm->test_start_time = vlib_time_now (tm->vlib_main); /* Signal the CLI process that the action is starting... */ - vlib_process_signal_event (tm->vlib_main, tm->cli_node_index, 1, - 0 /* data */ ); + signal_evt_to_cli (1); } return 0; @@ -606,7 +623,9 @@ test_tcp_clients_command_fn (vlib_main_t * vm, tm->connections_per_batch = 1000; tm->private_segment_count = 0; tm->private_segment_size = 0; - + tm->vlib_main = vm; + if (thread_main->n_vlib_mains > 1) + clib_spinlock_init (&tm->sessions_lock); vec_free (tm->connect_uri); while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) @@ -668,7 +687,9 @@ test_tcp_clients_command_fn (vlib_main_t * vm, start_tx_pthread (); #endif + vlib_worker_thread_barrier_sync (vm); vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ ); + vlib_worker_thread_barrier_release (vm); if (tm->test_client_attached == 0) { @@ -688,9 +709,8 @@ test_tcp_clients_command_fn (vlib_main_t * vm, clients_connect (vm, uri, n_clients); /* Park until the sessions come up, or ten seconds elapse... */ - vlib_process_wait_for_event_or_clock (vm, 10.0 /* timeout, seconds */ ); + vlib_process_wait_for_event_or_clock (vm, 10 /* timeout, seconds */ ); event_type = vlib_process_get_events (vm, &event_data); - switch (event_type) { case ~0: @@ -699,6 +719,7 @@ test_tcp_clients_command_fn (vlib_main_t * vm, goto cleanup; case 1: + tm->test_start_time = vlib_time_now (tm->vlib_main); vlib_cli_output (vm, "Test started at %.6f", tm->test_start_time); break; @@ -710,7 +731,6 @@ test_tcp_clients_command_fn (vlib_main_t * vm, /* Now wait for the sessions to finish... */ vlib_process_wait_for_event_or_clock (vm, cli_timeout); event_type = vlib_process_get_events (vm, &event_data); - switch (event_type) { case ~0: @@ -719,6 +739,7 @@ test_tcp_clients_command_fn (vlib_main_t * vm, goto cleanup; case 2: + tm->test_end_time = vlib_time_now (vm); vlib_cli_output (vm, "Test finished at %.6f", tm->test_end_time); break; @@ -753,6 +774,7 @@ cleanup: vec_reset_length (tm->connection_index_by_thread[i]); vec_reset_length (tm->connections_this_batch_by_thread[i]); } + pool_free (tm->sessions); return 0; @@ -765,6 +787,7 @@ VLIB_CLI_COMMAND (test_clients_command, static) = .short_help = "test tcp clients [nclients %d]" "[iterations %d] [bytes %d] [uri tcp://6.0.1.1/1234]", .function = test_tcp_clients_command_fn, + .is_mp_safe = 1, }; /* *INDENT-ON* */ diff --git a/src/vnet/tcp/builtin_client.h b/src/vnet/tcp/builtin_client.h index 38af231d..06d239ef 100644 --- a/src/vnet/tcp/builtin_client.h +++ b/src/vnet/tcp/builtin_client.h @@ -48,8 +48,7 @@ typedef struct * Application setup parameters */ unix_shared_memory_queue_t *vl_input_queue; /**< vpe input queue */ - unix_shared_memory_queue_t *our_event_queue; /**< Our event queue */ - unix_shared_memory_queue_t *vpp_event_queue; /**< $$$ single thread */ + unix_shared_memory_queue_t **vpp_event_queue; u32 cli_node_index; /**< cli process node index */ u32 my_client_index; /**< loopback API client handle */ @@ -70,9 +69,9 @@ typedef struct /* * Test state variables */ - session_t *sessions; /**< Sessions pool */ - u8 *rx_buf; /**< intermediate rx buffer */ - uword *session_index_by_vpp_handles; /**< Hash table for disconnecting */ + session_t *sessions; /**< Session pool, shared */ + clib_spinlock_t sessions_lock; + u8 **rx_buf; /**< intermediate rx buffers */ u8 *connect_test_data; /**< Pre-computed test data */ u32 **connection_index_by_thread; u32 **connections_this_batch_by_thread; /**< active connection batch */ diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 6d1cfa07..59b20747 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -37,15 +37,14 @@ tcp_connection_bind (u32 session_index, transport_endpoint_t * lcl) { listener->c_lcl_ip4.as_u32 = lcl->ip.ip4.as_u32; listener->c_is_ip4 = 1; - listener->c_proto = SESSION_TYPE_IP4_TCP; } else { clib_memcpy (&listener->c_lcl_ip6, &lcl->ip.ip6, sizeof (ip6_address_t)); - listener->c_proto = SESSION_TYPE_IP6_TCP; - } + } + listener->c_transport_proto = TRANSPORT_PROTO_TCP; listener->c_s_index = session_index; listener->state = TCP_STATE_LISTEN; @@ -95,6 +94,71 @@ tcp_session_get_listener (u32 listener_index) return &tc->connection; } +always_inline void +transport_endpoint_del (u32 tepi) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + clib_spinlock_lock_if_init (&tm->local_endpoints_lock); + pool_put_index (tm->local_endpoints, tepi); + clib_spinlock_unlock_if_init (&tm->local_endpoints_lock); +} + +always_inline transport_endpoint_t * +transport_endpoint_new (void) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + transport_endpoint_t *tep; + pool_get (tm->local_endpoints, tep); + return tep; +} + +/** + * Cleanup half-open connection + * + */ +void +tcp_half_open_connection_del (tcp_connection_t * tc) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + clib_spinlock_lock_if_init (&tm->half_open_lock); + pool_put_index (tm->half_open_connections, tc->c_c_index); + if (CLIB_DEBUG) + memset (tc, 0xFA, sizeof (*tc)); + clib_spinlock_unlock_if_init (&tm->half_open_lock); +} + +/** + * Try to cleanup half-open connection + * + * If called from a thread that doesn't own tc, the call won't have any + * effect. + * + * @param tc - connection to be cleaned up + * @return non-zero if cleanup failed. + */ +int +tcp_half_open_connection_cleanup (tcp_connection_t * tc) +{ + /* Make sure this is the owning thread */ + if (tc->c_thread_index != vlib_get_thread_index ()) + return 1; + tcp_timer_reset (tc, TCP_TIMER_ESTABLISH); + tcp_timer_reset (tc, TCP_TIMER_RETRANSMIT_SYN); + tcp_half_open_connection_del (tc); + return 0; +} + +tcp_connection_t * +tcp_half_open_connection_new (void) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + tcp_connection_t *tc = 0; + pool_get (tm->half_open_connections, tc); + memset (tc, 0, sizeof (*tc)); + tc->c_c_index = tc - tm->half_open_connections; + return tc; +} + /** * Cleans up connection state. * @@ -110,26 +174,28 @@ tcp_connection_cleanup (tcp_connection_t * tc) /* Cleanup local endpoint if this was an active connect */ tepi = transport_endpoint_lookup (&tm->local_endpoints_table, &tc->c_lcl_ip, tc->c_lcl_port); - - /*XXX lock */ if (tepi != TRANSPORT_ENDPOINT_INVALID_INDEX) { tep = pool_elt_at_index (tm->local_endpoints, tepi); transport_endpoint_table_del (&tm->local_endpoints_table, tep); - pool_put (tm->local_endpoints, tep); + transport_endpoint_del (tepi); } - /* Make sure all timers are cleared */ - tcp_connection_timers_reset (tc); - - /* Check if half-open */ + /* Check if connection is not yet fully established */ if (tc->state == TCP_STATE_SYN_SENT) { - tcp_half_open_connection_del (tc); + /* Try to remove the half-open connection. If this is not the owning + * thread, tc won't be removed. Retransmit or establish timers will + * eventually expire and call again cleanup on the right thread. */ + tcp_half_open_connection_cleanup (tc); } else { int thread_index = tc->c_thread_index; + + /* Make sure all timers are cleared */ + tcp_connection_timers_reset (tc); + /* Poison the entry */ if (CLIB_DEBUG > 0) memset (tc, 0xFA, sizeof (*tc)); @@ -152,32 +218,6 @@ tcp_connection_del (tcp_connection_t * tc) tcp_connection_cleanup (tc); } -/** - * Cleanup half-open connection - */ -void -tcp_half_open_connection_del (tcp_connection_t * tc) -{ - tcp_main_t *tm = vnet_get_tcp_main (); - if (CLIB_DEBUG) - memset (tc, 0xFA, sizeof (*tc)); - clib_spinlock_lock_if_init (&tm->half_open_lock); - pool_put (tm->half_open_connections, tc); - clib_spinlock_unlock_if_init (&tm->half_open_lock); -} - -tcp_connection_t * -tcp_half_open_connection_new () -{ - tcp_main_t *tm = vnet_get_tcp_main (); - tcp_connection_t *tc = 0; - clib_spinlock_lock_if_init (&tm->half_open_lock); - pool_get (tm->half_open_connections, tc); - clib_spinlock_unlock_if_init (&tm->half_open_lock); - memset (tc, 0, sizeof (*tc)); - return tc; -} - tcp_connection_t * tcp_connection_new (u8 thread_index) { @@ -207,9 +247,7 @@ tcp_connection_reset (tcp_connection_t * tc) tcp_connection_cleanup (tc); break; case TCP_STATE_SYN_SENT: - /* XXX remove sst from call */ - stream_session_connect_notify (&tc->connection, tc->connection.proto, - 1 /* fail */ ); + stream_session_connect_notify (&tc->connection, 1 /* fail */ ); tcp_connection_cleanup (tc); break; case TCP_STATE_ESTABLISHED: @@ -225,7 +263,7 @@ tcp_connection_reset (tcp_connection_t * tc) stream_session_reset_notify (&tc->connection); /* Wait for cleanup from session layer but not forever */ - tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME); + tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME); break; case TCP_STATE_CLOSED: return; @@ -325,8 +363,9 @@ ip_interface_get_first_ip (u32 sw_if_index, u8 is_ip4) * table to mark the pair as used. */ int -tcp_allocate_local_port (tcp_main_t * tm, ip46_address_t * ip) +tcp_allocate_local_port (ip46_address_t * ip) { + tcp_main_t *tm = vnet_get_tcp_main (); transport_endpoint_t *tep; u32 time_now, tei; u16 min = 1024, max = 65535; /* XXX configurable ? */ @@ -338,10 +377,6 @@ tcp_allocate_local_port (tcp_main_t * tm, ip46_address_t * ip) /* Only support active opens from thread 0 */ ASSERT (vlib_get_thread_index () == 0); - /* Start at random point or max */ - pool_get (tm->local_endpoints, tep); - clib_memcpy (&tep->ip, ip, sizeof (*ip)); - /* Search for first free slot */ for (; tries >= 0; tries--) { @@ -355,21 +390,22 @@ tcp_allocate_local_port (tcp_main_t * tm, ip46_address_t * ip) break; } - tep->port = port; - /* Look it up */ - tei = transport_endpoint_lookup (&tm->local_endpoints_table, &tep->ip, - tep->port); + tei = transport_endpoint_lookup (&tm->local_endpoints_table, ip, port); /* If not found, we're done */ if (tei == TRANSPORT_ENDPOINT_INVALID_INDEX) { + clib_spinlock_lock_if_init (&tm->local_endpoints_lock); + tep = transport_endpoint_new (); + clib_memcpy (&tep->ip, ip, sizeof (*ip)); + tep->port = port; transport_endpoint_table_add (&tm->local_endpoints_table, tep, tep - tm->local_endpoints); + clib_spinlock_unlock_if_init (&tm->local_endpoints_lock); + return tep->port; } } - /* No free ports */ - pool_put (tm->local_endpoints, tep); return -1; } @@ -592,7 +628,7 @@ tcp_connection_open (transport_endpoint_t * rmt) } /* Allocate source port */ - lcl_port = tcp_allocate_local_port (tm, &lcl_addr); + lcl_port = tcp_allocate_local_port (&lcl_addr); if (lcl_port < 1) { clib_warning ("Failed to allocate src port"); @@ -602,16 +638,14 @@ tcp_connection_open (transport_endpoint_t * rmt) /* * Create connection and send SYN */ - + clib_spinlock_lock_if_init (&tm->half_open_lock); tc = tcp_half_open_connection_new (); - clib_memcpy (&tc->c_rmt_ip, &rmt->ip, sizeof (ip46_address_t)); clib_memcpy (&tc->c_lcl_ip, &lcl_addr, sizeof (ip46_address_t)); tc->c_rmt_port = clib_host_to_net_u16 (rmt->port); tc->c_lcl_port = clib_host_to_net_u16 (lcl_port); - tc->c_c_index = tc - tm->half_open_connections; tc->c_is_ip4 = rmt->is_ip4; - tc->c_proto = rmt->is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP; + tc->c_transport_proto = TRANSPORT_PROTO_TCP; tc->c_vrf = rmt->vrf; /* The other connection vars will be initialized after SYN ACK */ tcp_connection_timers_init (tc); @@ -619,6 +653,7 @@ tcp_connection_open (transport_endpoint_t * rmt) TCP_EVT_DBG (TCP_EVT_OPEN, tc); tc->state = TCP_STATE_SYN_SENT; tcp_send_syn (tc); + clib_spinlock_unlock_if_init (&tm->half_open_lock); return tc->c_c_index; } @@ -1057,16 +1092,12 @@ void tcp_timer_establish_handler (u32 conn_index) { tcp_connection_t *tc; - u8 sst; tc = tcp_half_open_connection_get (conn_index); tc->timers[TCP_TIMER_ESTABLISH] = TCP_TIMER_HANDLE_INVALID; ASSERT (tc->state == TCP_STATE_SYN_SENT); - - sst = tc->c_is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP; - stream_session_connect_notify (&tc->connection, sst, 1 /* fail */ ); - + stream_session_connect_notify (&tc->connection, 1 /* fail */ ); tcp_connection_cleanup (tc); } @@ -1077,6 +1108,8 @@ tcp_timer_waitclose_handler (u32 conn_index) tcp_connection_t *tc; tc = tcp_connection_get (conn_index, thread_index); + if (!tc) + return; tc->timers[TCP_TIMER_WAITCLOSE] = TCP_TIMER_HANDLE_INVALID; /* Session didn't come back with a close(). Send FIN either way @@ -1180,8 +1213,8 @@ tcp_main_enable (vlib_main_t * vm) ip4_register_protocol (IP_PROTOCOL_TCP, tcp4_input_node.index); /* Register as transport with session layer */ - session_register_transport (SESSION_TYPE_IP4_TCP, &tcp_proto); - session_register_transport (SESSION_TYPE_IP6_TCP, &tcp_proto); + session_register_transport (TRANSPORT_PROTO_TCP, 1, &tcp_proto); + session_register_transport (TRANSPORT_PROTO_TCP, 0, &tcp_proto); /* * Initialize data structures @@ -1227,7 +1260,10 @@ tcp_main_enable (vlib_main_t * vm) 200000 /* $$$$ config parameter nbuckets */ , (64 << 20) /*$$$ config parameter table size */ ); if (num_threads > 1) - clib_spinlock_init (&tm->half_open_lock); + { + clib_spinlock_init (&tm->half_open_lock); + clib_spinlock_init (&tm->local_endpoints_lock); + } return error; } diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 89c30616..4fa681f8 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -115,7 +115,8 @@ extern timer_expiration_handler tcp_timer_retransmit_syn_handler; _(SENT_RCV_WND0, "Sent 0 receive window") \ _(RECOVERY, "Recovery on") \ _(FAST_RECOVERY, "Fast Recovery on") \ - _(FR_1_SMSS, "Sent 1 SMSS") + _(FR_1_SMSS, "Sent 1 SMSS") \ + _(HALF_OPEN_DONE, "Half-open completed") typedef enum _tcp_connection_flag_bits { @@ -381,6 +382,7 @@ typedef struct _tcp_main /* Local endpoints lookup table */ transport_endpoint_table_t local_endpoints_table; + clib_spinlock_t local_endpoints_lock; /* Congestion control algorithms registered */ tcp_cc_algorithm_t *cc_algos; @@ -430,7 +432,8 @@ clib_error_t *vnet_tcp_enable_disable (vlib_main_t * vm, u8 is_en); always_inline tcp_connection_t * tcp_connection_get (u32 conn_index, u32 thread_index) { - if (pool_is_free_index (tcp_main.connections[thread_index], conn_index)) + if (PREDICT_FALSE + (pool_is_free_index (tcp_main.connections[thread_index], conn_index))) return 0; return pool_elt_at_index (tcp_main.connections[thread_index], conn_index); } @@ -454,7 +457,7 @@ tcp_get_connection_from_transport (transport_connection_t * tconn) void tcp_connection_close (tcp_connection_t * tc); void tcp_connection_cleanup (tcp_connection_t * tc); void tcp_connection_del (tcp_connection_t * tc); -void tcp_half_open_connection_del (tcp_connection_t * tc); +int tcp_half_open_connection_cleanup (tcp_connection_t * tc); tcp_connection_t *tcp_connection_new (u8 thread_index); void tcp_connection_reset (tcp_connection_t * tc); @@ -473,9 +476,12 @@ tcp_listener_get (u32 tli) always_inline tcp_connection_t * tcp_half_open_connection_get (u32 conn_index) { - if (pool_is_free_index (tcp_main.half_open_connections, conn_index)) - return 0; - return pool_elt_at_index (tcp_main.half_open_connections, conn_index); + tcp_connection_t *tc = 0; + clib_spinlock_lock_if_init (&tcp_main.half_open_lock); + if (!pool_is_free_index (tcp_main.half_open_connections, conn_index)) + tc = pool_elt_at_index (tcp_main.half_open_connections, conn_index); + clib_spinlock_unlock_if_init (&tcp_main.half_open_lock); + return tc; } void tcp_make_ack (tcp_connection_t * ts, vlib_buffer_t * b); diff --git a/src/vnet/tcp/tcp_debug.h b/src/vnet/tcp/tcp_debug.h index e3da56f4..fc36eb29 100755 --- a/src/vnet/tcp/tcp_debug.h +++ b/src/vnet/tcp/tcp_debug.h @@ -19,9 +19,9 @@ #include #define TCP_DEBUG (1) -#define TCP_DEBUG_SM (2) -#define TCP_DEBUG_CC (0) -#define TCP_DEBUG_CC_STAT (0) +#define TCP_DEBUG_SM (0) +#define TCP_DEBUG_CC (1) +#define TCP_DEBUG_CC_STAT (1) #define foreach_tcp_dbg_evt \ _(INIT, "") \ @@ -197,6 +197,19 @@ typedef enum _tcp_dbg_evt ed->data[0] = _tc->c_c_index; \ } +#define TCP_EVT_SYN_RCVD_HANDLER(_tc, ...) \ +{ \ + TCP_EVT_INIT_HANDLER(_tc, 0); \ + ELOG_TYPE_DECLARE (_e) = \ + { \ + .format = "syn-rx: irs %u", \ + .format_args = "i4", \ + }; \ + DECLARE_ETD(_tc, _e, 1); \ + ed->data[0] = _tc->irs; \ + TCP_EVT_STATE_CHANGE_HANDLER(_tc); \ +} + #define TCP_EVT_UNBIND_HANDLER(_tc, ...) \ { \ TCP_EVT_DEALLOC_HANDLER(_tc); \ @@ -258,19 +271,6 @@ typedef enum _tcp_dbg_evt ed->data[0] = _tc->state; \ } -#define TCP_EVT_SYN_RCVD_HANDLER(_tc, ...) \ -{ \ - TCP_EVT_INIT_HANDLER(_tc, 0); \ - ELOG_TYPE_DECLARE (_e) = \ - { \ - .format = "syn-rx: irs %u", \ - .format_args = "i4", \ - }; \ - DECLARE_ETD(_tc, _e, 1); \ - ed->data[0] = _tc->irs; \ - TCP_EVT_STATE_CHANGE_HANDLER(_tc); \ -} - #define TCP_EVT_SYN_SENT_HANDLER(_tc, ...) \ { \ ELOG_TYPE_DECLARE (_e) = \ diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index d32b4fc8..6c59d70f 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -1724,9 +1724,13 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */ tc0->state = TCP_STATE_CLOSE_WAIT; TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0); - tc0->rcv_nxt += (vnet_buffer (b0)->tcp.data_len == 0); + if (vnet_buffer (b0)->tcp.data_len == 0) + { + tc0->rcv_nxt += 1; + next0 = TCP_ESTABLISHED_NEXT_DROP; + } stream_session_disconnect_notify (&tc0->connection); - tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME); + tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME); } done: @@ -1819,7 +1823,6 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, tcp_main_t *tm = vnet_get_tcp_main (); u32 n_left_from, next_index, *from, *to_next; u32 my_thread_index = vm->thread_index, errors = 0; - u8 sst = is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -1936,10 +1939,6 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (tcp_options_parse (tcp0, &tc0->rcv_opts)) goto drop; - /* Stop connection establishment and retransmit timers */ - tcp_timer_reset (tc0, TCP_TIMER_ESTABLISH); - tcp_timer_reset (tc0, TCP_TIMER_RETRANSMIT_SYN); - /* Valid SYN or SYN-ACK. Move connection from half-open pool to * current thread pool. */ pool_get (tm->connections[my_thread_index], new_tc0); @@ -1948,7 +1947,14 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, new_tc0->c_thread_index = my_thread_index; new_tc0->rcv_nxt = vnet_buffer (b0)->tcp.seq_end; new_tc0->irs = seq0; - tcp_half_open_connection_del (tc0); + new_tc0->timers[TCP_TIMER_ESTABLISH] = TCP_TIMER_HANDLE_INVALID; + new_tc0->timers[TCP_TIMER_RETRANSMIT_SYN] = + TCP_TIMER_HANDLE_INVALID; + + /* If this is not the owning thread, wait for syn retransmit to + * expire and cleanup then */ + if (tcp_half_open_connection_cleanup (tc0)) + tc0->flags |= TCP_CONN_HALF_OPEN_DONE; if (tcp_opts_tstamp (&new_tc0->rcv_opts)) { @@ -1980,11 +1986,10 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* Notify app that we have connection. If session layer can't * allocate session send reset */ - if (stream_session_connect_notify (&new_tc0->connection, sst, - 0)) + if (stream_session_connect_notify (&new_tc0->connection, 0)) { + tcp_send_reset (new_tc0, b0, is_ip4); tcp_connection_cleanup (new_tc0); - tcp_send_reset (tc0, b0, is_ip4); goto drop; } @@ -2002,8 +2007,7 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, new_tc0->state = TCP_STATE_SYN_RCVD; /* Notify app that we have connection */ - if (stream_session_connect_notify - (&new_tc0->connection, sst, 0)) + if (stream_session_connect_notify (&new_tc0->connection, 0)) { tcp_connection_cleanup (new_tc0); tcp_send_reset (tc0, b0, is_ip4); @@ -2250,6 +2254,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (tc0->snd_una == tc0->snd_una_max) { ASSERT (tcp_fin (tcp0)); + tc0->rcv_nxt += 1; tc0->state = TCP_STATE_FIN_WAIT_2; TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0); @@ -2263,6 +2268,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, * acknowledged ("ok") but do not delete the TCB. */ if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0)) goto drop; + /* check if rtx queue is empty and ack CLOSE TODO */ break; case TCP_STATE_CLOSE_WAIT: @@ -2384,7 +2390,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* Got FIN, send ACK! */ tc0->state = TCP_STATE_TIME_WAIT; tcp_connection_timers_reset (tc0); - tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME); + tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME); tcp_make_ack (tc0, b0); next0 = tcp_next_output (is_ip4); TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0); @@ -2745,7 +2751,7 @@ tcp_lookup_is_valid (tcp_connection_t * tc, tcp_header_t * hdr) if ((tmp = stream_session_half_open_lookup (&tc->c_lcl_ip, &tc->c_rmt_ip, tc->c_lcl_port, tc->c_rmt_port, - tc->c_proto))) + tc->c_transport_proto))) { if (tmp->lcl_port == hdr->dst_port && tmp->rmt_port == hdr->src_port) diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index 5e9ecf11..1ecb6ce6 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -1087,15 +1087,14 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) if (is_syn) { tc = tcp_half_open_connection_get (index); + tc->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID; } else { tc = tcp_connection_get (index, thread_index); + tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID; } - /* Make sure timer handle is set to invalid */ - tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID; - if (!tcp_in_recovery (tc) && tc->rto_boff > 0 && tc->state >= TCP_STATE_ESTABLISHED) { @@ -1154,6 +1153,20 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) /* Retransmit for SYN/SYNACK */ else if (tc->state == TCP_STATE_SYN_RCVD || tc->state == TCP_STATE_SYN_SENT) { + /* Half-open connection actually moved to established but we were + * waiting for syn retransmit to pop to call cleanup from the right + * thread. */ + if (tc->flags & TCP_CONN_HALF_OPEN_DONE) + { + ASSERT (tc->state == TCP_STATE_SYN_SENT); + if (tcp_half_open_connection_cleanup (tc)) + { + clib_warning ("could not remove half-open connection"); + ASSERT (0); + } + return; + } + /* Try without increasing RTO a number of times. If this fails, * start growing RTO exponentially */ if (tc->rto_boff > TCP_RTO_SYN_RETRIES) diff --git a/src/vnet/tcp/tcp_test.c b/src/vnet/tcp/tcp_test.c index 5c40ddf9..37640cc6 100644 --- a/src/vnet/tcp/tcp_test.c +++ b/src/vnet/tcp/tcp_test.c @@ -1574,7 +1574,7 @@ tcp_test_lookup (vlib_main_t * vm, unformat_input_t * input) tc->connection.rmt_ip.ip4.as_u32 = clib_host_to_net_u32 (0x06000103); tc->connection.lcl_port = 35051; tc->connection.rmt_port = 53764; - tc->connection.proto = 0; + tc->connection.transport_proto = 0; clib_memcpy (tc1, &tc->connection, sizeof (*tc1)); pool_get (session_manager_main.sessions[0], s); @@ -1590,7 +1590,7 @@ tcp_test_lookup (vlib_main_t * vm, unformat_input_t * input) tc->connection.rmt_ip.ip4.as_u32 = clib_host_to_net_u32 (0x06000102); tc->connection.lcl_port = 38225; tc->connection.rmt_port = 53764; - tc->connection.proto = 0; + tc->connection.transport_proto = 0; clib_memcpy (tc2, &tc->connection, sizeof (*tc2)); /* @@ -1601,7 +1601,7 @@ tcp_test_lookup (vlib_main_t * vm, unformat_input_t * input) tconn = stream_session_lookup_transport_wt4 (&tc1->lcl_ip.ip4, &tc1->rmt_ip.ip4, tc1->lcl_port, tc1->rmt_port, - tc1->proto, 0); + tc1->transport_proto, 0); cmp = (memcmp (&tconn->rmt_ip, &tc1->rmt_ip, sizeof (tc1->rmt_ip)) == 0); TCP_TEST ((cmp), "rmt ip is identical %d", cmp); TCP_TEST ((tconn->lcl_port == tc1->lcl_port), @@ -1614,7 +1614,7 @@ tcp_test_lookup (vlib_main_t * vm, unformat_input_t * input) tconn = stream_session_lookup_transport_wt4 (&tc2->lcl_ip.ip4, &tc2->rmt_ip.ip4, tc2->lcl_port, tc2->rmt_port, - tc2->proto, 0); + tc2->transport_proto, 0); TCP_TEST ((tconn == 0), "lookup result should be null"); /* @@ -1624,12 +1624,12 @@ tcp_test_lookup (vlib_main_t * vm, unformat_input_t * input) tconn = stream_session_lookup_transport_wt4 (&tc1->lcl_ip.ip4, &tc1->rmt_ip.ip4, tc1->lcl_port, tc1->rmt_port, - tc1->proto, 0); + tc1->transport_proto, 0); TCP_TEST ((tconn == 0), "lookup result should be null"); tconn = stream_session_lookup_transport_wt4 (&tc2->lcl_ip.ip4, &tc2->rmt_ip.ip4, tc2->lcl_port, tc2->rmt_port, - tc2->proto, 0); + tc2->transport_proto, 0); TCP_TEST ((tconn == 0), "lookup result should be null"); /* @@ -1639,7 +1639,7 @@ tcp_test_lookup (vlib_main_t * vm, unformat_input_t * input) tconn = stream_session_lookup_transport_wt4 (&tc2->lcl_ip.ip4, &tc2->rmt_ip.ip4, tc2->lcl_port, tc2->rmt_port, - tc2->proto, 0); + tc2->transport_proto, 0); TCP_TEST ((tconn == 0), "lookup result should be null"); return 0; diff --git a/src/vnet/udp/udp.c b/src/vnet/udp/udp.c index ff76a82e..fedf2cc0 100644 --- a/src/vnet/udp/udp.c +++ b/src/vnet/udp/udp.c @@ -34,7 +34,7 @@ udp_session_bind_ip4 (u32 session_index, transport_endpoint_t * lcl) memset (listener, 0, sizeof (udp_connection_t)); listener->c_lcl_port = clib_host_to_net_u16 (lcl->port); listener->c_lcl_ip4.as_u32 = lcl->ip.ip4.as_u32; - listener->c_proto = SESSION_TYPE_IP4_UDP; + listener->c_transport_proto = TRANSPORT_PROTO_UDP; udp_register_dst_port (um->vlib_main, lcl->port, udp4_uri_input_node.index, 1 /* is_ipv4 */ ); return 0; @@ -49,7 +49,7 @@ udp_session_bind_ip6 (u32 session_index, transport_endpoint_t * lcl) pool_get (um->udp_listeners, listener); listener->c_lcl_port = clib_host_to_net_u16 (lcl->port); clib_memcpy (&listener->c_lcl_ip6, &lcl->ip.ip6, sizeof (ip6_address_t)); - listener->c_proto = SESSION_TYPE_IP6_UDP; + listener->c_transport_proto = TRANSPORT_PROTO_UDP; udp_register_dst_port (um->vlib_main, lcl->port, udp4_uri_input_node.index, 0 /* is_ipv4 */ ); return 0; @@ -318,8 +318,8 @@ udp_init (vlib_main_t * vm) /* Register as transport with URI */ - session_register_transport (SESSION_TYPE_IP4_UDP, &udp4_proto); - session_register_transport (SESSION_TYPE_IP6_UDP, &udp6_proto); + session_register_transport (TRANSPORT_PROTO_UDP, 1, &udp4_proto); + session_register_transport (TRANSPORT_PROTO_UDP, 0, &udp6_proto); /* * Initialize data structures diff --git a/src/vnet/udp/udp_input.c b/src/vnet/udp/udp_input.c index 9a8ff076..6ccb1e52 100644 --- a/src/vnet/udp/udp_input.c +++ b/src/vnet/udp/udp_input.c @@ -179,7 +179,7 @@ udp4_uri_input_node_fn (vlib_main_t * vm, us->c_rmt_ip4.as_u32 = ip0->src_address.as_u32; us->c_lcl_port = udp0->dst_port; us->c_rmt_port = udp0->src_port; - us->c_proto = SESSION_TYPE_IP4_UDP; + us->c_transport_proto = TRANSPORT_PROTO_UDP; us->c_c_index = us - um->udp_sessions[my_thread_index]; /* -- cgit 1.2.3-korg From b88566498a83209f8646e4a6b1d247e69dae2aeb Mon Sep 17 00:00:00 2001 From: Dave Wallace Date: Mon, 31 Jul 2017 13:33:11 -0400 Subject: ssvm->name must be a vector containing a c-string. Change-Id: I14a97a7fdd000da62d2ed4bea71f55ee34a21311 Signed-off-by: Dave Wallace --- src/svm/ssvm.c | 2 ++ src/svm/svm_fifo_segment.c | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'src/svm') diff --git a/src/svm/ssvm.c b/src/svm/ssvm.c index 23e3cf44..e56e6b45 100644 --- a/src/svm/ssvm.c +++ b/src/svm/ssvm.c @@ -32,6 +32,7 @@ ssvm_master_init (ssvm_private_t * ssvm, u32 master_index) if (CLIB_DEBUG > 1) clib_warning ("[%d] creating segment '%s'", getpid (), ssvm->name); + ASSERT (vec_c_string_is_terminated (ssvm->name)); ssvm_filename = format (0, "/dev/shm/%s%c", ssvm->name, 0); unlink ((char *) ssvm_filename); @@ -110,6 +111,7 @@ ssvm_slave_init (ssvm_private_t * ssvm, int timeout_in_seconds) int ssvm_fd = -1; ssvm_shared_header_t *sh; + ASSERT (vec_c_string_is_terminated (ssvm->name)); ssvm->i_am_master = 0; while (timeout_in_seconds-- > 0) diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index a01e26e4..86661e51 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -105,7 +105,7 @@ svm_fifo_segment_create (svm_fifo_segment_create_args_t * a) s->ssvm.ssvm_size = a->segment_size; s->ssvm.i_am_master = 1; s->ssvm.my_pid = getpid (); - s->ssvm.name = format (0, "%s", a->segment_name); + s->ssvm.name = format (0, "%s%c", a->segment_name, 0); s->ssvm.requested_va = sm->next_baseva; rv = ssvm_master_init (&s->ssvm, s - sm->segments); @@ -192,7 +192,7 @@ svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t * a) s->ssvm.ssvm_size = ~0; s->ssvm.i_am_master = 1; s->ssvm.my_pid = getpid (); - s->ssvm.name = (u8 *) a->segment_name; + s->ssvm.name = format (0, "%s%c", a->segment_name, 0); s->ssvm.requested_va = ~0; /* Allocate a [sic] shared memory header, in process memory... */ @@ -240,7 +240,7 @@ svm_fifo_segment_attach (svm_fifo_segment_create_args_t * a) s->ssvm.ssvm_size = a->segment_size; s->ssvm.my_pid = getpid (); - s->ssvm.name = (u8 *) a->segment_name; + s->ssvm.name = format (0, "%s%c", a->segment_name, 0); s->ssvm.requested_va = sm->next_baseva; rv = ssvm_slave_init (&s->ssvm, sm->timeout_in_seconds); -- cgit 1.2.3-korg From 52851e6aa9304054fd1059c8dd284abf8e532bf2 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Mon, 7 Aug 2017 09:35:25 -0400 Subject: TCP proxy prototype - Clean up internal API client registration - Add proxy server - Add a reference count to the svm fifo Change-Id: I5ace1c85497062ed412d26ae76a9e6741af1e984 Signed-off-by: Dave Barach Signed-off-by: Florin Coras --- src/svm/svm_fifo.c | 10 +- src/svm/svm_fifo.h | 1 + src/svm/svm_fifo_segment.c | 5 + src/vlibmemory/api_common.h | 1 + src/vlibmemory/memory_vlib.c | 38 ++ src/vnet.am | 1 + src/vnet/session/application.c | 6 + src/vnet/session/application.h | 1 + src/vnet/session/application_interface.h | 4 + src/vnet/session/session.c | 43 ++- src/vnet/session/session.h | 8 +- src/vnet/session/stream_session.h | 5 +- src/vnet/tcp/builtin_client.c | 95 +---- src/vnet/tcp/builtin_proxy.c | 599 +++++++++++++++++++++++++++++++ src/vnet/tcp/builtin_proxy.h | 100 ++++++ src/vnet/tcp/builtin_server.c | 96 +---- 16 files changed, 801 insertions(+), 212 deletions(-) create mode 100644 src/vnet/tcp/builtin_proxy.c create mode 100644 src/vnet/tcp/builtin_proxy.h (limited to 'src/svm') diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index fc2189c5..e478c06e 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -201,14 +201,20 @@ svm_fifo_create (u32 data_size_in_bytes) memset (f, 0, sizeof (*f)); f->nitems = data_size_in_bytes; f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX; + f->refcnt = 1; return (f); } void svm_fifo_free (svm_fifo_t * f) { - pool_free (f->ooo_segments); - clib_mem_free (f); + ASSERT (f->refcnt > 0); + + if (--f->refcnt == 0) + { + pool_free (f->ooo_segments); + clib_mem_free (f); + } } always_inline ooo_segment_t * diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index a83cd858..f10b4d91 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -75,6 +75,7 @@ typedef struct _svm_fifo #if SVM_FIFO_TRACE svm_fifo_trace_elem_t *trace; #endif + i8 refcnt; CLIB_CACHE_LINE_ALIGN_MARK (data); } svm_fifo_t; diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index 86661e51..c04b9d8c 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -296,6 +296,7 @@ svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, memset (f, 0, sizeof (*f)); f->nitems = data_size_in_bytes; f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX; + f->refcnt = 1; goto found; } /* FALLTHROUGH */ @@ -344,6 +345,10 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f, svm_fifo_segment_header_t *fsh; void *oldheap; + ASSERT (f->refcnt > 0); + + if (--f->refcnt > 0) + return; sh = s->ssvm.sh; fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; diff --git a/src/vlibmemory/api_common.h b/src/vlibmemory/api_common.h index 53909cc4..19daecdf 100644 --- a/src/vlibmemory/api_common.h +++ b/src/vlibmemory/api_common.h @@ -126,6 +126,7 @@ int vl_client_connect_to_vlib_no_rx_pthread (const char *svm_name, u16 vl_client_get_first_plugin_msg_id (const char *plugin_name); void vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length); +u32 vl_api_memclnt_create_internal (char *, unix_shared_memory_queue_t *); #endif /* included_vlibmemory_api_common_h */ diff --git a/src/vlibmemory/memory_vlib.c b/src/vlibmemory/memory_vlib.c index 004a9974..688ce604 100644 --- a/src/vlibmemory/memory_vlib.c +++ b/src/vlibmemory/memory_vlib.c @@ -136,6 +136,44 @@ vl_api_serialize_message_table (api_main_t * am, u8 * vector) return serialize_close_vector (sm); } +/* + * vl_api_memclnt_create_internal + */ + +u32 +vl_api_memclnt_create_internal (char *name, unix_shared_memory_queue_t * q) +{ + vl_api_registration_t **regpp; + vl_api_registration_t *regp; + svm_region_t *svm; + void *oldheap; + api_main_t *am = &api_main; + + ASSERT (vlib_get_thread_index () == 0); + pool_get (am->vl_clients, regpp); + + svm = am->vlib_rp; + + pthread_mutex_lock (&svm->mutex); + oldheap = svm_push_data_heap (svm); + *regpp = clib_mem_alloc (sizeof (vl_api_registration_t)); + + regp = *regpp; + memset (regp, 0, sizeof (*regp)); + regp->registration_type = REGISTRATION_TYPE_SHMEM; + regp->vl_api_registration_pool_index = regpp - am->vl_clients; + + regp->vl_input_queue = q; + regp->name = format (0, "%s%c", name, 0); + + pthread_mutex_unlock (&svm->mutex); + svm_pop_heap (oldheap); + return vl_msg_api_handle_from_index_and_epoch + (regp->vl_api_registration_pool_index, + am->shmem_hdr->application_restarts); +} + + /* * vl_api_memclnt_create_t_handler */ diff --git a/src/vnet.am b/src/vnet.am index ad84c028..ede0376d 100644 --- a/src/vnet.am +++ b/src/vnet.am @@ -475,6 +475,7 @@ libvnet_la_SOURCES += \ vnet/tcp/builtin_client.c \ vnet/tcp/builtin_server.c \ vnet/tcp/builtin_http_server.c \ + vnet/tcp/builtin_proxy.c \ vnet/tcp/tcp_test.c \ vnet/tcp/tcp.c diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index 076c66f1..78c41b93 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -367,6 +367,12 @@ app_get_name_from_reg_index (application_t * app) return app_name; } +int +application_is_proxy (application_t * app) +{ + return !(app->flags & APP_OPTIONS_FLAGS_IS_PROXY); +} + u8 * format_application_listener (u8 * s, va_list * args) { diff --git a/src/vnet/session/application.h b/src/vnet/session/application.h index 35caae85..29d37a06 100644 --- a/src/vnet/session/application.h +++ b/src/vnet/session/application.h @@ -116,6 +116,7 @@ segment_manager_t *application_get_listen_segment_manager (application_t * s); segment_manager_t *application_get_connect_segment_manager (application_t * app); +int application_is_proxy (application_t * app); #endif /* SRC_VNET_SESSION_APPLICATION_H_ */ diff --git a/src/vnet/session/application_interface.h b/src/vnet/session/application_interface.h index ed9f89b3..1d63f6cc 100644 --- a/src/vnet/session/application_interface.h +++ b/src/vnet/session/application_interface.h @@ -100,6 +100,9 @@ typedef struct _vnet_connect_args /* Used for redirects */ void *mp; + + /* used for proxy connections */ + u64 server_handle; } vnet_connect_args_t; typedef struct _vnet_disconnect_args_t @@ -129,6 +132,7 @@ typedef enum _(USE_FIFO, "Use FIFO with redirects") \ _(ADD_SEGMENT, "Add segment and signal app if needed") \ _(BUILTIN_APP, "Application is builtin") \ + _(IS_PROXY, "Application is proxying") typedef enum _app_options { diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 4ba15291..991bcd5a 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -30,7 +30,7 @@ extern transport_proto_vft_t *tp_vfts; int stream_session_create_i (segment_manager_t * sm, transport_connection_t * tc, - stream_session_t ** ret_s) + u8 alloc_fifos, stream_session_t ** ret_s) { session_manager_main_t *smm = &session_manager_main; svm_fifo_t *server_rx_fifo = 0, *server_tx_fifo = 0; @@ -43,31 +43,37 @@ stream_session_create_i (segment_manager_t * sm, transport_connection_t * tc, ASSERT (thread_index == vlib_get_thread_index ()); - if ((rv = segment_manager_alloc_session_fifos (sm, &server_rx_fifo, - &server_tx_fifo, - &fifo_segment_index))) - return rv; - /* Create the session */ pool_get_aligned (smm->sessions[thread_index], s, CLIB_CACHE_LINE_BYTES); memset (s, 0, sizeof (*s)); - - /* Initialize backpointers */ pool_index = s - smm->sessions[thread_index]; - server_rx_fifo->master_session_index = pool_index; - server_rx_fifo->master_thread_index = thread_index; - server_tx_fifo->master_session_index = pool_index; - server_tx_fifo->master_thread_index = thread_index; + /* Allocate fifos */ + if (alloc_fifos) + { + if ((rv = segment_manager_alloc_session_fifos (sm, &server_rx_fifo, + &server_tx_fifo, + &fifo_segment_index))) + { + pool_put (smm->sessions[thread_index], s); + return rv; + } + /* Initialize backpointers */ + server_rx_fifo->master_session_index = pool_index; + server_rx_fifo->master_thread_index = thread_index; + + server_tx_fifo->master_session_index = pool_index; + server_tx_fifo->master_thread_index = thread_index; - s->server_rx_fifo = server_rx_fifo; - s->server_tx_fifo = server_tx_fifo; + s->server_rx_fifo = server_rx_fifo; + s->server_tx_fifo = server_tx_fifo; + s->svm_segment_index = fifo_segment_index; + } /* Initialize state machine, such as it is... */ s->session_type = session_type_from_proto_and_ip (tc->transport_proto, tc->is_ip4); s->session_state = SESSION_STATE_CONNECTING; - s->svm_segment_index = fifo_segment_index; s->thread_index = thread_index; s->session_index = pool_index; @@ -379,10 +385,11 @@ stream_session_connect_notify (transport_connection_t * tc, u8 is_fail) if (!is_fail) { segment_manager_t *sm; + u8 alloc_fifos; sm = application_get_connect_segment_manager (app); - + alloc_fifos = application_is_proxy (app); /* Create new session (svm segments are allocated if needed) */ - if (stream_session_create_i (sm, tc, &new_s)) + if (stream_session_create_i (sm, tc, alloc_fifos, &new_s)) { is_fail = 1; error = -1; @@ -515,7 +522,7 @@ stream_session_accept (transport_connection_t * tc, u32 listener_index, server = application_get (listener->app_index); sm = application_get_listen_segment_manager (server, listener); - if ((rv = stream_session_create_i (sm, tc, &s))) + if ((rv = stream_session_create_i (sm, tc, 1, &s))) return rv; s->app_index = server->index; diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index 538433da..74d82a40 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -183,10 +183,10 @@ stream_session_is_valid (u32 si, u8 thread_index) stream_session_t *s; s = pool_elt_at_index (session_manager_main.sessions[thread_index], si); if (s->thread_index != thread_index || s->session_index != si - || s->server_rx_fifo->master_session_index != si - || s->server_tx_fifo->master_session_index != si - || s->server_rx_fifo->master_thread_index != thread_index - || s->server_tx_fifo->master_thread_index != thread_index) + /* || s->server_rx_fifo->master_session_index != si + || s->server_tx_fifo->master_session_index != si + || s->server_rx_fifo->master_thread_index != thread_index + || s->server_tx_fifo->master_thread_index != thread_index */ ) return 0; return 1; } diff --git a/src/vnet/session/stream_session.h b/src/vnet/session/stream_session.h index 82bbf521..4c263211 100644 --- a/src/vnet/session/stream_session.h +++ b/src/vnet/session/stream_session.h @@ -83,8 +83,11 @@ typedef struct _stream_session_t u32 opaque2; + /** connected (server) session handle */ + u64 server_session_handle; + /** Opaque, pad to a 64-octet boundary */ - u64 opaque[2]; + u64 opaque[1]; } stream_session_t; #endif /* SRC_VNET_SESSION_STREAM_SESSION_H_ */ diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index 0cb9faa8..938e07ba 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -24,25 +24,6 @@ #include #include -/* define message IDs */ -#include - -/* define message structures */ -#define vl_typedefs -#include -#undef vl_typedefs - -/* define generated endian-swappers */ -#define vl_endianfun -#include -#undef vl_endianfun - -/* instantiate all the print functions we know about */ -#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) -#define vl_printfun -#include -#undef vl_printfun - #define TCP_BUILTIN_CLIENT_DBG (0) static void @@ -308,87 +289,16 @@ VLIB_REGISTER_NODE (builtin_client_node) = }; /* *INDENT-ON* */ -/* So we don't get "no handler for... " msgs */ -static void -vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) -{ - vlib_main_t *vm = vlib_get_main (); - tclient_main_t *tm = &tclient_main; - tm->my_client_index = mp->index; - vlib_process_signal_event (vm, tm->cli_node_index, 1 /* evt */ , - 0 /* data */ ); -} - static int create_api_loopback (tclient_main_t * tm) { - vlib_main_t *vm = vlib_get_main (); - vl_api_memclnt_create_t _m, *mp = &_m; - extern void vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t *); api_main_t *am = &api_main; vl_shmem_hdr_t *shmem_hdr; - uword *event_data = 0, event_type; - int resolved = 0; - - /* - * Create a "loopback" API client connection - * Don't do things like this unless you know what you're doing... - */ shmem_hdr = am->shmem_hdr; tm->vl_input_queue = shmem_hdr->vl_input_queue; - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = VL_API_MEMCLNT_CREATE; - mp->context = 0xFEEDFACE; - mp->input_queue = pointer_to_uword (tm->vl_input_queue); - strncpy ((char *) mp->name, "tcp_clients_tester", sizeof (mp->name) - 1); - - vl_api_memclnt_create_t_handler (mp); - - /* Wait for reply */ - vlib_process_wait_for_event_or_clock (vm, 1.0); - event_type = vlib_process_get_events (vm, &event_data); - switch (event_type) - { - case 1: - resolved = 1; - break; - case ~0: - /* timed out */ - break; - default: - clib_warning ("unknown event_type %d", event_type); - } - if (!resolved) - return -1; - return 0; -} - -#define foreach_tclient_static_api_msg \ -_(MEMCLNT_CREATE_REPLY, memclnt_create_reply) \ - -static clib_error_t * -tclient_api_hookup (vlib_main_t * vm) -{ - vl_msg_api_msg_config_t _c, *c = &_c; - - /* Hook up client-side static APIs to our handlers */ -#define _(N,n) do { \ - c->id = VL_API_##N; \ - c->name = #n; \ - c->handler = vl_api_##n##_t_handler; \ - c->cleanup = vl_noop_handler; \ - c->endian = vl_api_##n##_t_endian; \ - c->print = vl_api_##n##_t_print; \ - c->size = sizeof(vl_api_##n##_t); \ - c->traced = 1; /* trace, so these msgs print */ \ - c->replay = 0; /* don't replay client create/delete msgs */ \ - c->message_bounce = 0; /* don't bounce this message */ \ - vl_msg_api_config(c);} while (0); - - foreach_tclient_static_api_msg; -#undef _ - + tm->my_client_index = + vl_api_memclnt_create_internal ("tcp_test_client", tm->vl_input_queue); return 0; } @@ -400,7 +310,6 @@ tcp_test_clients_init (vlib_main_t * vm) u32 num_threads; int i; - tclient_api_hookup (vm); if (create_api_loopback (tm)) return -1; diff --git a/src/vnet/tcp/builtin_proxy.c b/src/vnet/tcp/builtin_proxy.c new file mode 100644 index 00000000..d8cfb11d --- /dev/null +++ b/src/vnet/tcp/builtin_proxy.c @@ -0,0 +1,599 @@ +/* +* Copyright (c) 2015-2017 Cisco and/or its affiliates. +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at: +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include +#include +#include +#include +#include + +builtin_proxy_main_t builtin_proxy_main; + +static void +delete_proxy_session (stream_session_t * s, int is_active_open) +{ + builtin_proxy_main_t *bpm = &builtin_proxy_main; + proxy_session_t *ps = 0; + vnet_disconnect_args_t _a, *a = &_a; + stream_session_t *active_open_session = 0; + stream_session_t *server_session = 0; + uword *p; + u64 handle; + + handle = stream_session_handle (s); + + clib_spinlock_lock_if_init (&bpm->sessions_lock); + if (is_active_open) + { + active_open_session = s; + + p = hash_get (bpm->proxy_session_by_active_open_handle, handle); + if (p == 0) + { + clib_warning ("proxy session for %s handle %lld (%llx) AWOL", + is_active_open ? "active open" : "server", + handle, handle); + } + else + { + ps = pool_elt_at_index (bpm->sessions, p[0]); + if (ps->vpp_server_handle != ~0) + server_session = stream_session_get_from_handle + (ps->vpp_server_handle); + else + server_session = 0; + } + } + else + { + server_session = s; + + p = hash_get (bpm->proxy_session_by_server_handle, handle); + if (p == 0) + { + clib_warning ("proxy session for %s handle %lld (%llx) AWOL", + is_active_open ? "active open" : "server", + handle, handle); + } + else + { + ps = pool_elt_at_index (bpm->sessions, p[0]); + if (ps->vpp_server_handle != ~0) + active_open_session = stream_session_get_from_handle + (ps->vpp_server_handle); + else + active_open_session = 0; + } + } + + if (ps) + { + if (CLIB_DEBUG > 0) + memset (ps, 0xFE, sizeof (*ps)); + pool_put (bpm->sessions, ps); + } + + clib_spinlock_unlock_if_init (&bpm->sessions_lock); + + if (active_open_session) + { + a->handle = stream_session_handle (active_open_session); + a->app_index = bpm->active_open_app_index; + hash_unset (bpm->proxy_session_by_active_open_handle, + stream_session_handle (active_open_session)); + vnet_disconnect_session (a); + } + + if (server_session) + { + a->handle = stream_session_handle (server_session); + a->app_index = bpm->server_app_index; + hash_unset (bpm->proxy_session_by_server_handle, + stream_session_handle (server_session)); + vnet_disconnect_session (a); + } +} + +static int +server_accept_callback (stream_session_t * s) +{ + builtin_proxy_main_t *bpm = &builtin_proxy_main; + + s->session_state = SESSION_STATE_READY; + + clib_spinlock_lock_if_init (&bpm->sessions_lock); + + return 0; +} + +static void +server_disconnect_callback (stream_session_t * s) +{ + delete_proxy_session (s, 0 /* is_active_open */ ); +} + +static void +server_reset_callback (stream_session_t * s) +{ + clib_warning ("Reset session %U", format_stream_session, s, 2); + delete_proxy_session (s, 0 /* is_active_open */ ); +} + +static int +server_connected_callback (u32 app_index, u32 api_context, + stream_session_t * s, u8 is_fail) +{ + clib_warning ("called..."); + return -1; +} + +static int +server_add_segment_callback (u32 client_index, + const u8 * seg_name, u32 seg_size) +{ + clib_warning ("called..."); + return -1; +} + +static int +server_redirect_connect_callback (u32 client_index, void *mp) +{ + clib_warning ("called..."); + return -1; +} + +static int +server_rx_callback (stream_session_t * s) +{ + u32 max_dequeue; + int actual_transfer __attribute__ ((unused)); + svm_fifo_t *tx_fifo, *rx_fifo; + builtin_proxy_main_t *bpm = &builtin_proxy_main; + u32 thread_index = vlib_get_thread_index (); + vnet_connect_args_t _a, *a = &_a; + proxy_session_t *ps; + int proxy_index; + uword *p; + svm_fifo_t *active_open_tx_fifo; + session_fifo_event_t evt; + + ASSERT (s->thread_index == thread_index); + + clib_spinlock_lock_if_init (&bpm->sessions_lock); + p = + hash_get (bpm->proxy_session_by_server_handle, stream_session_handle (s)); + + if (PREDICT_TRUE (p != 0)) + { + clib_spinlock_unlock_if_init (&bpm->sessions_lock); + active_open_tx_fifo = s->server_rx_fifo; + + /* + * Send event for active open tx fifo + */ + if (svm_fifo_set_event (active_open_tx_fifo)) + { + evt.fifo = active_open_tx_fifo; + evt.event_type = FIFO_EVENT_APP_TX; + if (unix_shared_memory_queue_add + (bpm->active_open_event_queue[thread_index], (u8 *) & evt, + 0 /* do wait for mutex */ )) + clib_warning ("failed to enqueue tx evt"); + } + } + else + { + rx_fifo = s->server_rx_fifo; + tx_fifo = s->server_tx_fifo; + + ASSERT (rx_fifo->master_thread_index == thread_index); + ASSERT (tx_fifo->master_thread_index == thread_index); + + max_dequeue = svm_fifo_max_dequeue (s->server_rx_fifo); + + if (PREDICT_FALSE (max_dequeue == 0)) + return 0; + + actual_transfer = svm_fifo_peek (rx_fifo, 0 /* relative_offset */ , + max_dequeue, + bpm->rx_buf[thread_index]); + + /* $$$ your message in this space: parse url, etc. */ + + memset (a, 0, sizeof (*a)); + + clib_spinlock_lock_if_init (&bpm->sessions_lock); + pool_get (bpm->sessions, ps); + memset (ps, 0, sizeof (*ps)); + ps->server_rx_fifo = rx_fifo; + ps->server_tx_fifo = tx_fifo; + ps->vpp_server_handle = stream_session_handle (s); + + proxy_index = ps - bpm->sessions; + + hash_set (bpm->proxy_session_by_server_handle, ps->vpp_server_handle, + proxy_index); + + clib_spinlock_unlock_if_init (&bpm->sessions_lock); + + a->uri = "tcp://6.0.2.2/23"; + a->api_context = proxy_index; + a->app_index = bpm->active_open_app_index; + a->mp = 0; + vnet_connect_uri (a); + } + + return 0; +} + +static session_cb_vft_t builtin_session_cb_vft = { + .session_accept_callback = server_accept_callback, + .session_disconnect_callback = server_disconnect_callback, + .session_connected_callback = server_connected_callback, + .add_segment_callback = server_add_segment_callback, + .redirect_connect_callback = server_redirect_connect_callback, + .builtin_server_rx_callback = server_rx_callback, + .session_reset_callback = server_reset_callback +}; + +static int +active_open_connected_callback (u32 app_index, u32 opaque, + stream_session_t * s, u8 is_fail) +{ + builtin_proxy_main_t *bpm = &builtin_proxy_main; + proxy_session_t *ps; + u8 thread_index = vlib_get_thread_index (); + session_fifo_event_t evt; + + if (is_fail) + { + clib_warning ("connection %d failed!", opaque); + return 0; + } + + /* + * Setup proxy session handle. + */ + clib_spinlock_lock_if_init (&bpm->sessions_lock); + + ps = pool_elt_at_index (bpm->sessions, opaque); + ps->vpp_active_open_handle = stream_session_handle (s); + + s->server_tx_fifo = ps->server_rx_fifo; + s->server_rx_fifo = ps->server_tx_fifo; + + /* + * Reset the active-open tx-fifo master indices so the active-open session + * will receive data, etc. + */ + s->server_tx_fifo->master_session_index = s->session_index; + s->server_tx_fifo->master_thread_index = s->thread_index; + + /* + * Account for the active-open session's use of the fifos + * so they won't disappear until the last session which uses + * them disappears + */ + s->server_tx_fifo->refcnt++; + s->server_rx_fifo->refcnt++; + + hash_set (bpm->proxy_session_by_active_open_handle, + ps->vpp_active_open_handle, opaque); + + clib_spinlock_unlock_if_init (&bpm->sessions_lock); + + /* + * Send event for active open tx fifo + */ + if (svm_fifo_set_event (s->server_tx_fifo)) + { + evt.fifo = s->server_tx_fifo; + evt.event_type = FIFO_EVENT_APP_TX; + if (unix_shared_memory_queue_add + (bpm->active_open_event_queue[thread_index], (u8 *) & evt, + 0 /* do wait for mutex */ )) + clib_warning ("failed to enqueue tx evt"); + } + + return 0; +} + +static void +active_open_reset_callback (stream_session_t * s) +{ + delete_proxy_session (s, 1 /* is_active_open */ ); +} + +static int +active_open_create_callback (stream_session_t * s) +{ + return 0; +} + +static void +active_open_disconnect_callback (stream_session_t * s) +{ + delete_proxy_session (s, 1 /* is_active_open */ ); +} + +static int +active_open_rx_callback (stream_session_t * s) +{ + builtin_proxy_main_t *bpm = &builtin_proxy_main; + session_fifo_event_t evt; + svm_fifo_t *server_rx_fifo; + u32 thread_index = vlib_get_thread_index (); + + server_rx_fifo = s->server_rx_fifo; + + /* + * Send event for server tx fifo + */ + if (svm_fifo_set_event (server_rx_fifo)) + { + evt.fifo = server_rx_fifo; + evt.event_type = FIFO_EVENT_APP_TX; + if (unix_shared_memory_queue_add + (bpm->server_event_queue[thread_index], (u8 *) & evt, + 0 /* do wait for mutex */ )) + clib_warning ("failed to enqueue server rx evt"); + } + + return 0; +} + +/* *INDENT-OFF* */ +static session_cb_vft_t builtin_clients = { + .session_reset_callback = active_open_reset_callback, + .session_connected_callback = active_open_connected_callback, + .session_accept_callback = active_open_create_callback, + .session_disconnect_callback = active_open_disconnect_callback, + .builtin_server_rx_callback = active_open_rx_callback +}; +/* *INDENT-ON* */ + + +static void +create_api_loopbacks (vlib_main_t * vm) +{ + builtin_proxy_main_t *bpm = &builtin_proxy_main; + api_main_t *am = &api_main; + vl_shmem_hdr_t *shmem_hdr; + + shmem_hdr = am->shmem_hdr; + bpm->vl_input_queue = shmem_hdr->vl_input_queue; + bpm->server_client_index = + vl_api_memclnt_create_internal ("proxy_server", bpm->vl_input_queue); + bpm->active_open_client_index = + vl_api_memclnt_create_internal ("proxy_active_open", bpm->vl_input_queue); +} + +static int +server_attach () +{ + builtin_proxy_main_t *bpm = &builtin_proxy_main; + u8 segment_name[128]; + u64 options[SESSION_OPTIONS_N_OPTIONS]; + vnet_app_attach_args_t _a, *a = &_a; + + memset (a, 0, sizeof (*a)); + memset (options, 0, sizeof (options)); + + a->api_client_index = bpm->server_client_index; + a->session_cb_vft = &builtin_session_cb_vft; + a->options = options; + a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 512 << 20; + a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = bpm->fifo_size; + a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = bpm->fifo_size; + a->options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = bpm->private_segment_count; + a->options[APP_OPTIONS_PRIVATE_SEGMENT_SIZE] = bpm->private_segment_size; + a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = + bpm->prealloc_fifos ? bpm->prealloc_fifos : 1; + + a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; + + a->segment_name = segment_name; + a->segment_name_length = ARRAY_LEN (segment_name); + + if (vnet_application_attach (a)) + { + clib_warning ("failed to attach server"); + return -1; + } + bpm->server_app_index = a->app_index; + + return 0; +} + +static int +active_open_attach (void) +{ + builtin_proxy_main_t *bpm = &builtin_proxy_main; + vnet_app_attach_args_t _a, *a = &_a; + u8 segment_name[128]; + u32 segment_name_length; + u64 options[16]; + + segment_name_length = ARRAY_LEN (segment_name); + + memset (a, 0, sizeof (*a)); + memset (options, 0, sizeof (options)); + + a->api_client_index = bpm->active_open_client_index; + a->segment_name = segment_name; + a->segment_name_length = segment_name_length; + a->session_cb_vft = &builtin_clients; + + options[SESSION_OPTIONS_ACCEPT_COOKIE] = 0x12345678; + options[SESSION_OPTIONS_SEGMENT_SIZE] = 512 << 20; + options[SESSION_OPTIONS_RX_FIFO_SIZE] = bpm->fifo_size; + options[SESSION_OPTIONS_TX_FIFO_SIZE] = bpm->fifo_size; + options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = bpm->private_segment_count; + options[APP_OPTIONS_PRIVATE_SEGMENT_SIZE] = bpm->private_segment_size; + options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = + bpm->prealloc_fifos ? bpm->prealloc_fifos : 1; + + options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP + | APP_OPTIONS_FLAGS_IS_PROXY; + + a->options = options; + + if (vnet_application_attach (a)) + return -1; + + bpm->active_open_app_index = a->app_index; + + return 0; +} + +static int +server_listen () +{ + builtin_proxy_main_t *bpm = &builtin_proxy_main; + vnet_bind_args_t _a, *a = &_a; + memset (a, 0, sizeof (*a)); + a->app_index = bpm->server_app_index; + a->uri = "tcp://0.0.0.0/23"; + return vnet_bind_uri (a); +} + +static int +server_create (vlib_main_t * vm) +{ + builtin_proxy_main_t *bpm = &builtin_proxy_main; + vlib_thread_main_t *vtm = vlib_get_thread_main (); + u32 num_threads; + int i; + + if (bpm->server_client_index == (u32) ~ 0) + create_api_loopbacks (vm); + + num_threads = 1 /* main thread */ + vtm->n_threads; + vec_validate (builtin_proxy_main.server_event_queue, num_threads - 1); + vec_validate (builtin_proxy_main.active_open_event_queue, num_threads - 1); + vec_validate (bpm->rx_buf, num_threads - 1); + + for (i = 0; i < num_threads; i++) + vec_validate (bpm->rx_buf[i], bpm->rcv_buffer_size); + + if (server_attach ()) + { + clib_warning ("failed to attach server app"); + return -1; + } + if (server_listen ()) + { + clib_warning ("failed to start listening"); + return -1; + } + if (active_open_attach ()) + { + clib_warning ("failed to attach active open app"); + return -1; + } + + for (i = 0; i < num_threads; i++) + { + bpm->active_open_event_queue[i] = + session_manager_get_vpp_event_queue (i); + + ASSERT (bpm->active_open_event_queue[i]); + + bpm->server_event_queue[i] = session_manager_get_vpp_event_queue (i); + } + + return 0; +} + +static clib_error_t * +proxy_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + builtin_proxy_main_t *bpm = &builtin_proxy_main; + int rv; + u32 tmp; + + bpm->fifo_size = 64 << 10; + bpm->rcv_buffer_size = 1024; + bpm->prealloc_fifos = 0; + bpm->private_segment_count = 0; + bpm->private_segment_size = 0; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "fifo-size %d", &bpm->fifo_size)) + bpm->fifo_size <<= 10; + else if (unformat (input, "rcv-buf-size %d", &bpm->rcv_buffer_size)) + ; + else if (unformat (input, "prealloc-fifos %d", &bpm->prealloc_fifos)) + ; + else if (unformat (input, "private-segment-count %d", + &bpm->private_segment_count)) + ; + else if (unformat (input, "private-segment-size %dm", &tmp)) + bpm->private_segment_size = tmp << 20; + else if (unformat (input, "private-segment-size %dg", &tmp)) + bpm->private_segment_size = tmp << 30; + else if (unformat (input, "private-segment-size %d", &tmp)) + bpm->private_segment_size = tmp; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + + vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ ); + + rv = server_create (vm); + switch (rv) + { + case 0: + break; + default: + return clib_error_return (0, "server_create returned %d", rv); + } + + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (server_create_command, static) = +{ + .path = "test proxy server", + .short_help = "test proxy server", + .function = proxy_server_create_command_fn, +}; +/* *INDENT-ON* */ + +clib_error_t * +builtin_tcp_proxy_main_init (vlib_main_t * vm) +{ + builtin_proxy_main_t *bpm = &builtin_proxy_main; + bpm->server_client_index = ~0; + bpm->active_open_client_index = ~0; + bpm->proxy_session_by_active_open_handle = hash_create (0, sizeof (uword)); + bpm->proxy_session_by_server_handle = hash_create (0, sizeof (uword)); + + return 0; +} + +VLIB_INIT_FUNCTION (builtin_tcp_proxy_main_init); + +/* +* fd.io coding-style-patch-verification: ON +* +* Local Variables: +* eval: (c-set-style "gnu") +* End: +*/ diff --git a/src/vnet/tcp/builtin_proxy.h b/src/vnet/tcp/builtin_proxy.h new file mode 100644 index 00000000..cf707a15 --- /dev/null +++ b/src/vnet/tcp/builtin_proxy.h @@ -0,0 +1,100 @@ + +/* + * builtin_proxy.h - skeleton vpp engine plug-in header file + * + * Copyright (c) + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __included_builtin_proxy_h__ +#define __included_builtin_proxy_h__ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +typedef struct +{ + svm_fifo_t *server_rx_fifo; + svm_fifo_t *server_tx_fifo; + + u64 vpp_server_handle; + u64 vpp_active_open_handle; +} proxy_session_t; + +typedef struct +{ + unix_shared_memory_queue_t *vl_input_queue; /**< vpe input queue */ + /** per-thread vectors */ + unix_shared_memory_queue_t **server_event_queue; + unix_shared_memory_queue_t **active_open_event_queue; + u8 **rx_buf; /**< intermediate rx buffers */ + + u32 cli_node_index; /**< cli process node index */ + u32 server_client_index; /**< server API client handle */ + u32 server_app_index; /**< server app index */ + u32 active_open_client_index; /**< active open API client handle */ + u32 active_open_app_index; /**< active open index after attach */ + + uword *proxy_session_by_server_handle; + uword *proxy_session_by_active_open_handle; + + /* + * Configuration params + */ + u8 *connect_uri; /**< URI for slave's connect */ + u32 configured_segment_size; + u32 fifo_size; + u32 private_segment_count; /**< Number of private fifo segs */ + u32 private_segment_size; /**< size of private fifo segs */ + int rcv_buffer_size; + + /* + * Test state variables + */ + proxy_session_t *sessions; /**< Session pool, shared */ + clib_spinlock_t sessions_lock; + u32 **connection_index_by_thread; + pthread_t client_thread_handle; + + /* + * Flags + */ + u8 is_init; + u8 prealloc_fifos; /**< Request fifo preallocation */ + + /* + * Convenience + */ + vlib_main_t *vlib_main; + vnet_main_t *vnet_main; + ethernet_main_t *ethernet_main; +} builtin_proxy_main_t; + +builtin_proxy_main_t builtin_proxy_main; + +#endif /* __included_builtin_proxy_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index 3416678e..2454a831 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -18,25 +18,6 @@ #include #include -/* define message IDs */ -#include - -/* define message structures */ -#define vl_typedefs -#include -#undef vl_typedefs - -/* define generated endian-swappers */ -#define vl_endianfun -#include -#undef vl_endianfun - -/* instantiate all the print functions we know about */ -#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) -#define vl_printfun -#include -#undef vl_printfun - typedef struct { /* @@ -279,46 +260,13 @@ static int create_api_loopback (vlib_main_t * vm) { builtin_server_main_t *bsm = &builtin_server_main; - vl_api_memclnt_create_t _m, *mp = &_m; - extern void vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t *); api_main_t *am = &api_main; vl_shmem_hdr_t *shmem_hdr; - uword *event_data = 0, event_type; - int resolved = 0; - - /* - * Create a "loopback" API client connection - * Don't do things like this unless you know what you're doing... - */ shmem_hdr = am->shmem_hdr; bsm->vl_input_queue = shmem_hdr->vl_input_queue; - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = VL_API_MEMCLNT_CREATE; - mp->context = 0xFEEDFACE; - mp->input_queue = pointer_to_uword (bsm->vl_input_queue); - strncpy ((char *) mp->name, "tcp_test_server", sizeof (mp->name) - 1); - - vl_api_memclnt_create_t_handler (mp); - - /* Wait for reply */ - bsm->node_index = vlib_get_current_process (vm)->node_runtime.node_index; - vlib_process_wait_for_event_or_clock (vm, 2.0); - event_type = vlib_process_get_events (vm, &event_data); - switch (event_type) - { - case 1: - resolved = 1; - break; - case ~0: - /* timed out */ - break; - default: - clib_warning ("unknown event_type %d", event_type); - } - if (!resolved) - return -1; - + bsm->my_client_index = + vl_api_memclnt_create_internal ("tcp_test_server", bsm->vl_input_queue); return 0; } @@ -413,45 +361,6 @@ server_create (vlib_main_t * vm) return 0; } -/* Get our api client index */ -static void -vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) -{ - vlib_main_t *vm = vlib_get_main (); - builtin_server_main_t *bsm = &builtin_server_main; - bsm->my_client_index = mp->index; - vlib_process_signal_event (vm, bsm->node_index, 1 /* evt */ , - 0 /* data */ ); -} - -#define foreach_tcp_builtin_server_api_msg \ -_(MEMCLNT_CREATE_REPLY, memclnt_create_reply) \ - -static clib_error_t * -tcp_builtin_server_api_hookup (vlib_main_t * vm) -{ - vl_msg_api_msg_config_t _c, *c = &_c; - - /* Hook up client-side static APIs to our handlers */ -#define _(N,n) do { \ - c->id = VL_API_##N; \ - c->name = #n; \ - c->handler = vl_api_##n##_t_handler; \ - c->cleanup = vl_noop_handler; \ - c->endian = vl_api_##n##_t_endian; \ - c->print = vl_api_##n##_t_print; \ - c->size = sizeof(vl_api_##n##_t); \ - c->traced = 1; /* trace, so these msgs print */ \ - c->replay = 0; /* don't replay client create/delete msgs */ \ - c->message_bounce = 0; /* don't bounce this message */ \ - vl_msg_api_config(c);} while (0); - - foreach_tcp_builtin_server_api_msg; -#undef _ - - return 0; -} - static clib_error_t * server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) @@ -491,7 +400,6 @@ server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, format_unformat_error, input); } - tcp_builtin_server_api_hookup (vm); vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ ); rv = server_create (vm); -- cgit 1.2.3-korg From 818eb54de01459ed3d823f8a9781bbed0845db82 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Wed, 2 Aug 2017 13:56:13 -0400 Subject: Improve the svm fifo allocator - Round up requested fifo size to the next power of two - Maintain per-segment power-of-two freelists - Allocate fifos in chunks, to amortize alignment overhead - Detach builtin test client application after each run so we can use different fifo sizes each time - Be more suspicious of session / application indices Useful prep work for dynamically resizing fifos. As far as the svm fifo code is concerned, it's OK to set fifo->nitems anywhere in the interval: [0, 1<<(fifo->freelist_index) + FIFO_SEGMENT_MIN_FIFO_SIZE] It's unlikely that setting nitems below the path MTU will work out very well... Change-Id: Idad73a027dfb7412056cb02988b77e300fa7e8a7 Signed-off-by: Dave Barach --- src/svm/svm_fifo.c | 5 +- src/svm/svm_fifo.h | 3 +- src/svm/svm_fifo_segment.c | 130 +++++++++++++++++++++++++++++++++++++----- src/svm/svm_fifo_segment.h | 6 +- src/vnet/session/session.c | 13 ++++- src/vnet/tcp/builtin_client.c | 14 +++++ 6 files changed, 151 insertions(+), 20 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index e478c06e..7f8127cf 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -192,8 +192,11 @@ svm_fifo_t * svm_fifo_create (u32 data_size_in_bytes) { svm_fifo_t *f; + u32 rounded_data_size; - f = clib_mem_alloc_aligned_or_null (sizeof (*f) + data_size_in_bytes, + /* always round fifo data size to the next highest power-of-two */ + rounded_data_size = (1 << (max_log2 (data_size_in_bytes))); + f = clib_mem_alloc_aligned_or_null (sizeof (*f) + rounded_data_size, CLIB_CACHE_LINE_BYTES); if (f == 0) return 0; diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index f10b4d91..84901d02 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -75,7 +75,8 @@ typedef struct _svm_fifo #if SVM_FIFO_TRACE svm_fifo_trace_elem_t *trace; #endif - i8 refcnt; + u32 freelist_index; /**< aka log2(allocated_size) - const. */ + i8 refcnt; /**< reference count */ CLIB_CACHE_LINE_ALIGN_MARK (data); } svm_fifo_t; diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index c04b9d8c..2094ba74 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -17,13 +17,54 @@ svm_fifo_segment_main_t svm_fifo_segment_main; +static void +allocate_new_fifo_chunk (svm_fifo_segment_header_t * fsh, + u32 data_size_in_bytes, int chunk_size) +{ + int freelist_index; + u32 size; + u8 *fifo_space; + u32 rounded_data_size; + svm_fifo_t *f; + int i; + + rounded_data_size = (1 << (max_log2 (data_size_in_bytes))); + freelist_index = max_log2 (rounded_data_size) + - max_log2 (FIFO_SEGMENT_MIN_FIFO_SIZE); + + /* Calculate space requirement $$$ round-up data_size_in_bytes */ + size = (sizeof (*f) + rounded_data_size) * chunk_size; + + /* Allocate fifo space. May fail. */ + fifo_space = clib_mem_alloc_aligned_at_offset + (size, CLIB_CACHE_LINE_BYTES, 0 /* align_offset */ , + 0 /* os_out_of_memory */ ); + + /* Out of space.. */ + if (fifo_space == 0) + return; + + /* Carve fifo space */ + f = (svm_fifo_t *) fifo_space; + for (i = 0; i < chunk_size; i++) + { + f->freelist_index = freelist_index; + f->next = fsh->free_fifos[freelist_index]; + fsh->free_fifos[freelist_index] = f; + fifo_space += sizeof (*f) + rounded_data_size; + f = (svm_fifo_t *) fifo_space; + } +} + static void preallocate_fifo_pairs (svm_fifo_segment_header_t * fsh, svm_fifo_segment_create_args_t * a) { u32 rx_fifo_size, tx_fifo_size; + u32 rx_rounded_data_size, tx_rounded_data_size; svm_fifo_t *f; u8 *rx_fifo_space, *tx_fifo_space; + int rx_freelist_index, tx_freelist_index; int i; /* Parameter check */ @@ -31,10 +72,39 @@ preallocate_fifo_pairs (svm_fifo_segment_header_t * fsh, || a->preallocated_fifo_pairs == 0) return; - /* Calculate space requirements */ - rx_fifo_size = (sizeof (*f) + a->rx_fifo_size) * a->preallocated_fifo_pairs; - tx_fifo_size = (sizeof (*f) + a->tx_fifo_size) * a->preallocated_fifo_pairs; + if (a->rx_fifo_size < FIFO_SEGMENT_MIN_FIFO_SIZE || + a->rx_fifo_size > FIFO_SEGMENT_MAX_FIFO_SIZE) + { + clib_warning ("rx fifo_size out of range %d", a->rx_fifo_size); + return; + } + + if (a->tx_fifo_size < FIFO_SEGMENT_MIN_FIFO_SIZE || + a->tx_fifo_size > FIFO_SEGMENT_MAX_FIFO_SIZE) + { + clib_warning ("tx fifo_size out of range %d", a->rx_fifo_size); + return; + } + + rx_rounded_data_size = (1 << (max_log2 (a->rx_fifo_size))); + + rx_freelist_index = max_log2 (a->rx_fifo_size) + - max_log2 (FIFO_SEGMENT_MIN_FIFO_SIZE); + + tx_rounded_data_size = (1 << (max_log2 (a->rx_fifo_size))); + + tx_freelist_index = max_log2 (a->tx_fifo_size) + - max_log2 (FIFO_SEGMENT_MIN_FIFO_SIZE); + /* Calculate space requirements */ + rx_fifo_size = (sizeof (*f) + rx_rounded_data_size) + * a->preallocated_fifo_pairs; + tx_fifo_size = (sizeof (*f) + tx_rounded_data_size) + * a->preallocated_fifo_pairs; + + vec_validate_init_empty (fsh->free_fifos, + clib_max (rx_freelist_index, tx_freelist_index), + 0); if (0) clib_warning ("rx_fifo_size %u (%d mb), tx_fifo_size %u (%d mb)", rx_fifo_size, rx_fifo_size >> 20, @@ -71,18 +141,20 @@ preallocate_fifo_pairs (svm_fifo_segment_header_t * fsh, f = (svm_fifo_t *) rx_fifo_space; for (i = 0; i < a->preallocated_fifo_pairs; i++) { - f->next = fsh->free_fifos[FIFO_SEGMENT_RX_FREELIST]; - fsh->free_fifos[FIFO_SEGMENT_RX_FREELIST] = f; - rx_fifo_space += sizeof (*f) + a->rx_fifo_size; + f->freelist_index = rx_freelist_index; + f->next = fsh->free_fifos[rx_freelist_index]; + fsh->free_fifos[rx_freelist_index] = f; + rx_fifo_space += sizeof (*f) + rx_rounded_data_size; f = (svm_fifo_t *) rx_fifo_space; } /* Carve tx fifo space */ f = (svm_fifo_t *) tx_fifo_space; for (i = 0; i < a->preallocated_fifo_pairs; i++) { - f->next = fsh->free_fifos[FIFO_SEGMENT_TX_FREELIST]; - fsh->free_fifos[FIFO_SEGMENT_TX_FREELIST] = f; - tx_fifo_space += sizeof (*f) + a->tx_fifo_size; + f->freelist_index = tx_freelist_index; + f->next = fsh->free_fifos[tx_freelist_index]; + fsh->free_fifos[tx_freelist_index] = f; + tx_fifo_space += sizeof (*f) + tx_rounded_data_size; f = (svm_fifo_t *) tx_fifo_space; } } @@ -277,6 +349,21 @@ svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, svm_fifo_segment_header_t *fsh; svm_fifo_t *f; void *oldheap; + int freelist_index; + + /* + * 2K minimum. It's not likely that anything good will happen + * with a 1K FIFO. + */ + if (data_size_in_bytes < FIFO_SEGMENT_MIN_FIFO_SIZE || + data_size_in_bytes > FIFO_SEGMENT_MAX_FIFO_SIZE) + { + clib_warning ("fifo size out of range %d", data_size_in_bytes); + return 0; + } + + freelist_index = max_log2 (data_size_in_bytes) + - max_log2 (FIFO_SEGMENT_MIN_FIFO_SIZE); sh = s->ssvm.sh; fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; @@ -288,15 +375,24 @@ svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, { case FIFO_SEGMENT_RX_FREELIST: case FIFO_SEGMENT_TX_FREELIST: - f = fsh->free_fifos[list_index]; - if (f) + vec_validate_init_empty (fsh->free_fifos, freelist_index, 0); + + f = fsh->free_fifos[freelist_index]; + if (PREDICT_FALSE (f == 0)) { - fsh->free_fifos[list_index] = f->next; + allocate_new_fifo_chunk (fsh, data_size_in_bytes, + FIFO_SEGMENT_ALLOC_CHUNK_SIZE); + f = fsh->free_fifos[freelist_index]; + } + if (PREDICT_TRUE (f != 0)) + { + fsh->free_fifos[freelist_index] = f->next; /* (re)initialize the fifo, as in svm_fifo_create */ memset (f, 0, sizeof (*f)); f->nitems = data_size_in_bytes; f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX; f->refcnt = 1; + f->freelist_index = freelist_index; goto found; } /* FALLTHROUGH */ @@ -316,6 +412,7 @@ svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, ssvm_unlock_non_recursive (sh); return (0); } + f->freelist_index = freelist_index; found: /* If rx_freelist add to active fifos list. When cleaning up segment, @@ -344,6 +441,7 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f, ssvm_shared_header_t *sh; svm_fifo_segment_header_t *fsh; void *oldheap; + int freelist_index; ASSERT (f->refcnt > 0); @@ -353,6 +451,10 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f, sh = s->ssvm.sh; fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; + freelist_index = f->freelist_index; + + ASSERT (freelist_index > 0 && freelist_index < vec_len (fsh->free_fifos)); + ssvm_lock_non_recursive (sh, 2); oldheap = ssvm_push_heap (sh); @@ -369,9 +471,9 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f, /* Fall through: we add only rx fifos to active pool */ case FIFO_SEGMENT_TX_FREELIST: /* Add to free list */ - f->next = fsh->free_fifos[list_index]; + f->next = fsh->free_fifos[freelist_index]; f->prev = 0; - fsh->free_fifos[list_index] = f; + fsh->free_fifos[freelist_index] = f; break; case FIFO_SEGMENT_FREELIST_NONE: break; diff --git a/src/svm/svm_fifo_segment.h b/src/svm/svm_fifo_segment.h index a7a3f469..68bb4d3d 100644 --- a/src/svm/svm_fifo_segment.h +++ b/src/svm/svm_fifo_segment.h @@ -27,11 +27,15 @@ typedef enum FIFO_SEGMENT_N_FREELISTS } svm_fifo_segment_freelist_t; +#define FIFO_SEGMENT_MIN_FIFO_SIZE 2048 +#define FIFO_SEGMENT_MAX_FIFO_SIZE (8<<20) /* 8mb max fifo size */ +#define FIFO_SEGMENT_ALLOC_CHUNK_SIZE 32 /* allocation quantum */ + typedef struct { svm_fifo_t *fifos; /**< Linked list of active RX fifos */ u8 *segment_name; /**< Segment name */ - svm_fifo_t *free_fifos[FIFO_SEGMENT_N_FREELISTS]; /**< Free lists */ + svm_fifo_t **free_fifos; /**< Freelists, by fifo size */ } svm_fifo_segment_header_t; typedef struct diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 991bcd5a..533a6c22 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -266,7 +266,13 @@ stream_session_enqueue_notify (stream_session_t * s, u8 block) return 0; /* Get session's server */ - app = application_get (s->app_index); + app = application_get_if_valid (s->app_index); + + if (PREDICT_FALSE (app == 0)) + { + clib_warning ("invalid s->app_index = %d", s->app_index); + return 0; + } /* Built-in server? Hand event to the callback... */ if (app->cb_fns.builtin_server_rx_callback) @@ -327,8 +333,9 @@ session_manager_flush_enqueue_events (u32 thread_index) stream_session_t *s0; /* Get session */ - s0 = stream_session_get (session_indices_to_enqueue[i], thread_index); - if (stream_session_enqueue_notify (s0, 0 /* don't block */ )) + s0 = stream_session_get_if_valid (session_indices_to_enqueue[i], + thread_index); + if (s0 == 0 || stream_session_enqueue_notify (s0, 0 /* don't block */ )) { errors++; } diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index 938e07ba..5fa54460 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -713,6 +713,20 @@ cleanup: pool_free (tm->sessions); + /* Detach the application, so we can use different fifo sizes next time */ + if (tm->test_client_attached) + { + vnet_app_detach_args_t _da, *da = &_da; + int rv; + + da->app_index = tm->app_index; + + rv = vnet_application_detach (da); + if (rv) + vlib_cli_output (vm, "WARNING: app detach failed..."); + tm->test_client_attached = 0; + tm->app_index = ~0; + } return 0; } -- cgit 1.2.3-korg From 5beec81360146536086f1996869b4ee32ca37ddc Mon Sep 17 00:00:00 2001 From: Jan Srnicek Date: Fri, 24 Mar 2017 10:18:11 +0100 Subject: jvpp: make shm_prefix configurable (VPP-591) svm.c - set default map region root path only if root path is not already present memory_shared.c - added option for tests to send memory region name and root path in one variable, if so name and root path are separated here and set to map region structure so find function can find it properly jvpp-registry.c - added parameters shmPrefix to be able pass + removed sudo restriction specific shared memory prefix that is used while starting python tests(see framework.py) JVppRegistyImpl - added option to specify shmPrefix VppJNIConnection - added option to specify shmPrefix Change-Id: I3f89f867fb9b20eef00fbd497cb0e41b25d6eab7 Signed-off-by: Jan Srnicek Signed-off-by: Matej Perina --- src/svm/svm.c | 3 +- src/vlibmemory/memory_shared.c | 19 ++++++++++-- .../io/fd/vpp/jvpp/JVppRegistryImpl.java | 7 +++++ .../io/fd/vpp/jvpp/VppJNIConnection.java | 23 +++++++++++---- src/vpp-api/java/jvpp-registry/jvpp_registry.c | 34 ++++++++++++---------- 5 files changed, 62 insertions(+), 24 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm.c b/src/svm/svm.c index 600fa744..0442ecb2 100644 --- a/src/svm/svm.c +++ b/src/svm/svm.c @@ -862,7 +862,8 @@ svm_region_find_or_create (svm_map_region_args_t * a) ASSERT (mp); /* Map the named region from the correct chroot environment */ - a->root_path = (char *) mp->root_path; + if (a->root_path == NULL) + a->root_path = (char *) mp->root_path; /* * See if this region is already known. If it is, we're diff --git a/src/vlibmemory/memory_shared.c b/src/vlibmemory/memory_shared.c index 41aa1231..9bab6573 100644 --- a/src/vlibmemory/memory_shared.c +++ b/src/vlibmemory/memory_shared.c @@ -341,12 +341,25 @@ vl_map_shmem (const char *region_name, int is_vlib) struct timespec ts, tsrem; u32 vlib_input_queue_length; + memset (a, 0, sizeof (*a)); + + if (strstr (region_name, "-vpe-api")) + { + char root_path[strlen (region_name)]; + strncpy (root_path, region_name, strlen (region_name) - 8); + a->root_path = root_path; + am->root_path = root_path; + } + if (is_vlib == 0) svm_region_init_chroot (am->root_path); - memset (a, 0, sizeof (*a)); - - a->name = region_name; + if (a->root_path != NULL) + { + a->name = "/vpe-api"; + } + else + a->name = region_name; a->size = am->api_size ? am->api_size : (16 << 20); a->flags = SVM_FLAGS_MHEAP; a->uid = am->api_uid; diff --git a/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/JVppRegistryImpl.java b/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/JVppRegistryImpl.java index 98ef1c15..6e938ae3 100644 --- a/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/JVppRegistryImpl.java +++ b/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/JVppRegistryImpl.java @@ -48,6 +48,13 @@ public final class JVppRegistryImpl implements JVppRegistry, ControlPingCallback pingCalls = new HashMap<>(); } + public JVppRegistryImpl(final String clientName, final String shmPrefix) throws IOException { + connection = new VppJNIConnection(clientName, shmPrefix); + connection.connect(); + pluginRegistry = new ConcurrentHashMap<>(); + pingCalls = new HashMap<>(); + } + @Override public VppConnection getConnection() { return connection; diff --git a/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/VppJNIConnection.java b/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/VppJNIConnection.java index 320c1283..53eaa790 100644 --- a/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/VppJNIConnection.java +++ b/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/VppJNIConnection.java @@ -17,8 +17,11 @@ package io.fd.vpp.jvpp; import static io.fd.vpp.jvpp.NativeLibraryLoader.loadLibrary; +import static java.lang.String.format; import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -30,13 +33,14 @@ import java.util.logging.Logger; */ public final class VppJNIConnection implements VppConnection { private static final Logger LOG = Logger.getLogger(VppJNIConnection.class.getName()); + private static final String DEFAULT_SHM_PREFIX = "/vpe-api"; static { final String libName = "libjvpp_registry.so"; try { loadLibrary(libName, VppJNIConnection.class); } catch (IOException e) { - LOG.log(Level.SEVERE, String.format("Can't find vpp jni library: %s", libName), e); + LOG.log(Level.SEVERE, format("Can't find vpp jni library: %s", libName), e); throw new ExceptionInInitializerError(e); } } @@ -44,6 +48,7 @@ public final class VppJNIConnection implements VppConnection { private ConnectionInfo connectionInfo; private final String clientName; + private final String shmPrefix; private volatile boolean disconnected = false; /** @@ -54,6 +59,12 @@ public final class VppJNIConnection implements VppConnection { */ public VppJNIConnection(final String clientName) { this.clientName = Objects.requireNonNull(clientName, "Null clientName"); + this.shmPrefix = DEFAULT_SHM_PREFIX; + } + + public VppJNIConnection(final String clientName, final String shmPrefix) { + this.clientName = Objects.requireNonNull(clientName, "Null clientName"); + this.shmPrefix = Objects.requireNonNull(shmPrefix, "Null shmPrefix"); } /** @@ -73,16 +84,18 @@ public final class VppJNIConnection implements VppConnection { @Override public void connect() throws IOException { - _connect(); + _connect(shmPrefix); } - private void _connect() throws IOException { + private void _connect(final String shmPrefix) throws IOException { + Objects.requireNonNull(shmPrefix, "Shared memory prefix must be defined"); + synchronized (VppJNIConnection.class) { if (connections.containsKey(clientName)) { throw new IOException("Client " + clientName + " already connected"); } - connectionInfo = clientConnect(clientName); + connectionInfo = clientConnect(shmPrefix, clientName); if (connectionInfo.status != 0) { throw new IOException("Connection returned error " + connectionInfo.status); } @@ -130,7 +143,7 @@ public final class VppJNIConnection implements VppConnection { } } - private static native ConnectionInfo clientConnect(String clientName); + private static native ConnectionInfo clientConnect(String shmPrefix, String clientName); private static native void clientDisconnect(); diff --git a/src/vpp-api/java/jvpp-registry/jvpp_registry.c b/src/vpp-api/java/jvpp-registry/jvpp_registry.c index 66adfea0..1e2c0176 100644 --- a/src/vpp-api/java/jvpp-registry/jvpp_registry.c +++ b/src/vpp-api/java/jvpp-registry/jvpp_registry.c @@ -243,14 +243,13 @@ static int send_initial_control_ping() { return rv; } -static int connect_to_vpe(char *name) { +static int connect_to_vpe(char *shm_prefix, char *name) { jvpp_main_t * jm = &jvpp_main; api_main_t * am = &api_main; jvpp_registry_main_t * rm = &jvpp_registry_main; - if (vl_client_connect_to_vlib("/vpe-api", name, 32) < 0) + if (vl_client_connect_to_vlib(shm_prefix, name, 32) < 0) return -1; - jm->my_client_index = am->my_client_index; jm->vl_input_queue = am->shmem_hdr->vl_input_queue; @@ -268,9 +267,15 @@ static int connect_to_vpe(char *name) { } JNIEXPORT jobject JNICALL Java_io_fd_vpp_jvpp_VppJNIConnection_clientConnect( - JNIEnv *env, jclass obj, jstring clientName) { + JNIEnv *env, jclass obj, jstring shmPrefix, jstring clientName) { + /* + * TODO introducing memory prefix as variable can be used in hc2vpp + * to be able to run without root privileges + * https://jira.fd.io/browse/HC2VPP-176 + */ int rv; const char *client_name; + const char *shm_prefix; void vl_msg_reply_handler_hookup(void); jvpp_main_t * jm = &jvpp_main; jvpp_registry_main_t * rm = &jvpp_registry_main; @@ -280,15 +285,6 @@ JNIEXPORT jobject JNICALL Java_io_fd_vpp_jvpp_VppJNIConnection_clientConnect( jmethodID connectionInfoConstructor = (*env)->GetMethodID(env, connectionInfoClass, "", "(JII)V"); - /* - * Bail out now if we're not running as root - */ - if (geteuid() != 0) { - return (*env)->NewObject(env, connectionInfoClass, - connectionInfoConstructor, 0, 0, - VNET_API_ERROR_NOT_RUNNING_AS_ROOT); - } - if (rm->is_connected) { return (*env)->NewObject(env, connectionInfoClass, connectionInfoConstructor, 0, 0, @@ -296,17 +292,25 @@ JNIEXPORT jobject JNICALL Java_io_fd_vpp_jvpp_VppJNIConnection_clientConnect( } client_name = (*env)->GetStringUTFChars(env, clientName, 0); + shm_prefix = (*env)->GetStringUTFChars(env, shmPrefix, 0); + if (!client_name) { return (*env)->NewObject(env, connectionInfoClass, - connectionInfoConstructor, 0, 0, VNET_API_ERROR_INVALID_VALUE); + connectionInfoConstructor, 0, 0, VNET_API_ERROR_INVALID_VALUE, shmPrefix); + } + + if (!shm_prefix) { + return (*env)->NewObject(env, connectionInfoClass, + connectionInfoConstructor, 0, 0, VNET_API_ERROR_INVALID_VALUE, shmPrefix); } - rv = connect_to_vpe((char *) client_name); + rv = connect_to_vpe((char *) shm_prefix, (char *) client_name); if (rv < 0) clib_warning("connection failed, rv %d", rv); (*env)->ReleaseStringUTFChars(env, clientName, client_name); + (*env)->ReleaseStringUTFChars(env, shmPrefix, shm_prefix); return (*env)->NewObject(env, connectionInfoClass, connectionInfoConstructor, (jlong) pointer_to_uword (jm->vl_input_queue), -- cgit 1.2.3-korg From 3cdc25ffbaa572639f99e197172c568e4324bc03 Mon Sep 17 00:00:00 2001 From: Ole Troan Date: Thu, 17 Aug 2017 11:07:33 +0200 Subject: API: More gracefully fail when opening shared memory segment fails. API clients would fail with an ASSERT (and core dump) whenever the API shared memory segment could not be opened. This returns an error value to the client's connect instead. Change-Id: Id122a3a090b24b139c382ae09f341bde61fd2540 Signed-off-by: Ole Troan --- src/svm/svm.c | 13 ++++++++----- src/svm/svm_common.h | 2 +- src/vlibmemory/memory_shared.c | 8 ++++++-- 3 files changed, 15 insertions(+), 8 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm.c b/src/svm/svm.c index 0442ecb2..663324e0 100644 --- a/src/svm/svm.c +++ b/src/svm/svm.c @@ -733,7 +733,7 @@ svm_mutex_cleanup (void) } } -static void +static int svm_region_init_internal (svm_map_region_args_t * a) { svm_region_t *rp; @@ -742,7 +742,7 @@ svm_region_init_internal (svm_map_region_args_t * a) /* guard against klutz calls */ if (root_rp) - return; + return -1; root_rp_refcount++; @@ -757,7 +757,8 @@ svm_region_init_internal (svm_map_region_args_t * a) a->baseva += randomize_baseva; rp = svm_map_region (a); - ASSERT (rp); + if (!rp) + return -1; region_lock (rp, 3); @@ -778,6 +779,8 @@ svm_region_init_internal (svm_map_region_args_t * a) } region_unlock (rp); root_rp = rp; + + return 0; } void @@ -797,7 +800,7 @@ svm_region_init (void) svm_region_init_internal (a); } -void +int svm_region_init_chroot (const char *root_path) { svm_map_region_args_t _a, *a = &_a; @@ -811,7 +814,7 @@ svm_region_init_chroot (const char *root_path) a->uid = 0; a->gid = 0; - svm_region_init_internal (a); + return svm_region_init_internal (a); } void diff --git a/src/svm/svm_common.h b/src/svm/svm_common.h index 1f184432..1f6d83c0 100644 --- a/src/svm/svm_common.h +++ b/src/svm/svm_common.h @@ -110,7 +110,7 @@ typedef struct void *svm_region_find_or_create (svm_map_region_args_t * a); void svm_region_init (void); -void svm_region_init_chroot (const char *root_path); +int svm_region_init_chroot (const char *root_path); void svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid); void svm_region_init_args (svm_map_region_args_t * a); void svm_region_exit (void); diff --git a/src/vlibmemory/memory_shared.c b/src/vlibmemory/memory_shared.c index 9bab6573..fbdabd06 100644 --- a/src/vlibmemory/memory_shared.c +++ b/src/vlibmemory/memory_shared.c @@ -337,7 +337,7 @@ vl_map_shmem (const char *region_name, int is_vlib) void *oldheap; vl_shmem_hdr_t *shmem_hdr = 0; api_main_t *am = &api_main; - int i; + int i, rv; struct timespec ts, tsrem; u32 vlib_input_queue_length; @@ -352,7 +352,11 @@ vl_map_shmem (const char *region_name, int is_vlib) } if (is_vlib == 0) - svm_region_init_chroot (am->root_path); + { + rv = svm_region_init_chroot (am->root_path); + if (rv) + return rv; + } if (a->root_path != NULL) { -- cgit 1.2.3-korg From d135c19a1fde609b82be5a30413d6b9ab43811e3 Mon Sep 17 00:00:00 2001 From: Matej Perina Date: Tue, 18 Jul 2017 13:59:41 +0200 Subject: jvpp: introducing callback api and future api tests for all plugins (VPP-591) test can be run with: make test TEST=test_jvpp memory_shared.c: declaring and assigning variable in if statement makes it usage outside statement impossible. Looks like memory space assigned to variable declared in statement is freed when statement ends svm.c: - fixed case when root path can have a "/" at beggining - added option for test to operate over shared memory space with /vpe-api name and not create new one with name consisting of root path and region name which would require root permisions Change-Id: Iff1170dc6a5c1be134c152f2757c7ab9b919a8ed Signed-off-by: Matej Perina --- src/svm/svm.c | 2 +- .../io/fd/vpp/jvpp/acl/test/CallbackApiTest.java | 33 +++++++++ .../io/fd/vpp/jvpp/acl/test/FutureApiTest.java | 62 ++++++++++++++++ .../jvpp-acl/io/fd/vpp/jvpp/acl/test/Readme.txt | 4 ++ .../io/fd/vpp/jvpp/core/test/CallbackApiTest.java | 33 +++++++++ .../io/fd/vpp/jvpp/core/test/FutureApiTest.java | 79 +++++++++++++++++++++ .../jvpp-core/io/fd/vpp/jvpp/core/test/Readme.txt | 18 +++++ .../vpp/jvpp/ioamexport/test/CallbackApiTest.java | 33 +++++++++ .../fd/vpp/jvpp/ioamexport/test/FutureApiTest.java | 60 ++++++++++++++++ .../io/fd/vpp/jvpp/ioamexport/test/Readme.txt | 4 ++ .../fd/vpp/jvpp/ioampot/test/CallbackApiTest.java | 33 +++++++++ .../io/fd/vpp/jvpp/ioampot/test/FutureApiTest.java | 66 +++++++++++++++++ .../io/fd/vpp/jvpp/ioampot/test/Readme.txt | 4 ++ .../vpp/jvpp/ioamtrace/test/CallbackApiTest.java | 33 +++++++++ .../fd/vpp/jvpp/ioamtrace/test/FutureApiTest.java | 60 ++++++++++++++++ .../io/fd/vpp/jvpp/ioamtrace/test/Readme.txt | 4 ++ .../io/fd/vpp/jvpp/nat/test/CallbackApiTest.java | 33 +++++++++ .../io/fd/vpp/jvpp/nat/test/FutureApiTest.java | 66 +++++++++++++++++ .../jvpp-nat/io/fd/vpp/jvpp/nat/test/Readme.txt | 4 ++ .../io/fd/vpp/jvpp/AbstractCallbackApiTest.java | 63 +++++++++++++++++ .../jvpp-registry/io/fd/vpp/jvpp/Assertions.java | 32 +++++++++ test/jvpp_connection.py | 54 ++++++++++++++ test/test_jvpp.py | 82 ++++++++++++++++++++++ 23 files changed, 861 insertions(+), 1 deletion(-) create mode 100644 src/vpp-api/java/jvpp-acl/io/fd/vpp/jvpp/acl/test/CallbackApiTest.java create mode 100644 src/vpp-api/java/jvpp-acl/io/fd/vpp/jvpp/acl/test/FutureApiTest.java create mode 100644 src/vpp-api/java/jvpp-acl/io/fd/vpp/jvpp/acl/test/Readme.txt create mode 100644 src/vpp-api/java/jvpp-core/io/fd/vpp/jvpp/core/test/CallbackApiTest.java create mode 100644 src/vpp-api/java/jvpp-core/io/fd/vpp/jvpp/core/test/FutureApiTest.java create mode 100644 src/vpp-api/java/jvpp-core/io/fd/vpp/jvpp/core/test/Readme.txt create mode 100644 src/vpp-api/java/jvpp-ioamexport/io/fd/vpp/jvpp/ioamexport/test/CallbackApiTest.java create mode 100644 src/vpp-api/java/jvpp-ioamexport/io/fd/vpp/jvpp/ioamexport/test/FutureApiTest.java create mode 100644 src/vpp-api/java/jvpp-ioamexport/io/fd/vpp/jvpp/ioamexport/test/Readme.txt create mode 100644 src/vpp-api/java/jvpp-ioampot/io/fd/vpp/jvpp/ioampot/test/CallbackApiTest.java create mode 100644 src/vpp-api/java/jvpp-ioampot/io/fd/vpp/jvpp/ioampot/test/FutureApiTest.java create mode 100644 src/vpp-api/java/jvpp-ioampot/io/fd/vpp/jvpp/ioampot/test/Readme.txt create mode 100644 src/vpp-api/java/jvpp-ioamtrace/io/fd/vpp/jvpp/ioamtrace/test/CallbackApiTest.java create mode 100644 src/vpp-api/java/jvpp-ioamtrace/io/fd/vpp/jvpp/ioamtrace/test/FutureApiTest.java create mode 100644 src/vpp-api/java/jvpp-ioamtrace/io/fd/vpp/jvpp/ioamtrace/test/Readme.txt create mode 100644 src/vpp-api/java/jvpp-nat/io/fd/vpp/jvpp/nat/test/CallbackApiTest.java create mode 100644 src/vpp-api/java/jvpp-nat/io/fd/vpp/jvpp/nat/test/FutureApiTest.java create mode 100644 src/vpp-api/java/jvpp-nat/io/fd/vpp/jvpp/nat/test/Readme.txt create mode 100644 src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/AbstractCallbackApiTest.java create mode 100644 src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/Assertions.java create mode 100644 test/jvpp_connection.py create mode 100644 test/test_jvpp.py (limited to 'src/svm') diff --git a/src/svm/svm.c b/src/svm/svm.c index 663324e0..14c5bd9b 100644 --- a/src/svm/svm.c +++ b/src/svm/svm.c @@ -426,7 +426,7 @@ shm_name_from_svm_map_region_args (svm_map_region_args_t * a) if (a->name[0] == '/') name_offset = 1; - shm_name = format (0, "/%s-%s%c", a->root_path, + shm_name = format (0, "/%s-%s%c", &a->root_path[root_path_offset], &a->name[name_offset], 0); } else diff --git a/src/vpp-api/java/jvpp-acl/io/fd/vpp/jvpp/acl/test/CallbackApiTest.java b/src/vpp-api/java/jvpp-acl/io/fd/vpp/jvpp/acl/test/CallbackApiTest.java new file mode 100644 index 00000000..a7bbb7f4 --- /dev/null +++ b/src/vpp-api/java/jvpp-acl/io/fd/vpp/jvpp/acl/test/CallbackApiTest.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.fd.vpp.jvpp.acl.test; + +import io.fd.vpp.jvpp.AbstractCallbackApiTest; +import io.fd.vpp.jvpp.acl.JVppAclImpl; + +import java.util.logging.Logger; + +public class CallbackApiTest extends AbstractCallbackApiTest { + + private static Logger LOG = Logger.getLogger(CallbackApiTest.class.getName()); + + + public static void main(String[] args) throws Exception { + LOG.info("Testing ControlPing using Java callback API for core plugin"); + testControlPing(args[0], new JVppAclImpl()); + } +} diff --git a/src/vpp-api/java/jvpp-acl/io/fd/vpp/jvpp/acl/test/FutureApiTest.java b/src/vpp-api/java/jvpp-acl/io/fd/vpp/jvpp/acl/test/FutureApiTest.java new file mode 100644 index 00000000..ff1c73c4 --- /dev/null +++ b/src/vpp-api/java/jvpp-acl/io/fd/vpp/jvpp/acl/test/FutureApiTest.java @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.fd.vpp.jvpp.acl.test; + +import io.fd.vpp.jvpp.Assertions; +import io.fd.vpp.jvpp.JVppRegistry; +import io.fd.vpp.jvpp.JVppRegistryImpl; +import io.fd.vpp.jvpp.acl.JVppAclImpl; +import io.fd.vpp.jvpp.acl.dto.AclDetailsReplyDump; +import io.fd.vpp.jvpp.acl.dto.AclDump; +import io.fd.vpp.jvpp.acl.future.FutureJVppAclFacade; + +import java.util.concurrent.CompletableFuture; +import java.util.logging.Logger; + +public class FutureApiTest { + + private static final Logger LOG = Logger.getLogger(FutureApiTest.class.getName()); + + public static void main(String[] args) throws Exception { + testFutureApi(args); + } + + private static void testFutureApi(String[] args) throws Exception { + LOG.info("Testing Java future API for core plugin"); + try (final JVppRegistry registry = new JVppRegistryImpl("FutureApiTest", args[0]); + final FutureJVppAclFacade jvppFacade = new FutureJVppAclFacade(registry, new JVppAclImpl())) { + LOG.info("Successfully connected to VPP"); + + testAclDump(jvppFacade); + + LOG.info("Disconnecting..."); + } + } + + private static void testAclDump(final FutureJVppAclFacade jvpp) throws Exception { + LOG.info("Sending AclDump request..."); + final AclDump request = new AclDump(); + + final CompletableFuture + replyFuture = jvpp.aclDump(request).toCompletableFuture(); + final AclDetailsReplyDump reply = replyFuture.get(); + + Assertions.assertNotNull(reply); + } + + +} diff --git a/src/vpp-api/java/jvpp-acl/io/fd/vpp/jvpp/acl/test/Readme.txt b/src/vpp-api/java/jvpp-acl/io/fd/vpp/jvpp/acl/test/Readme.txt new file mode 100644 index 00000000..1b465851 --- /dev/null +++ b/src/vpp-api/java/jvpp-acl/io/fd/vpp/jvpp/acl/test/Readme.txt @@ -0,0 +1,4 @@ +release version: +sudo java -cp build-vpp-native/vpp/vpp-api/java/jvpp-registry-17.10.jar:build-vpp-native/vpp/vpp-api/java/jvpp-acl-17.10.jar io.fd.vpp.jvpp.acl.test.[test-name] +debug version: +sudo java -cp build-vpp_debug-native/vpp/vpp-api/java/jvpp-registry-17.10.jar:build-vpp_debug-native/vpp/vpp-api/java/jvpp-acl-17.10.jar io.fd.vpp.jvpp.acl.test.[test-name] diff --git a/src/vpp-api/java/jvpp-core/io/fd/vpp/jvpp/core/test/CallbackApiTest.java b/src/vpp-api/java/jvpp-core/io/fd/vpp/jvpp/core/test/CallbackApiTest.java new file mode 100644 index 00000000..493116c8 --- /dev/null +++ b/src/vpp-api/java/jvpp-core/io/fd/vpp/jvpp/core/test/CallbackApiTest.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.fd.vpp.jvpp.core.test; + +import io.fd.vpp.jvpp.AbstractCallbackApiTest; +import io.fd.vpp.jvpp.core.JVppCoreImpl; + +import java.util.logging.Logger; + +public class CallbackApiTest extends AbstractCallbackApiTest { + + private static Logger LOG = Logger.getLogger(CallbackApiTest.class.getName()); + + + public static void main(String[] args) throws Exception { + LOG.info("Testing ControlPing using Java callback API for core plugin"); + testControlPing(args[0], new JVppCoreImpl()); + } +} diff --git a/src/vpp-api/java/jvpp-core/io/fd/vpp/jvpp/core/test/FutureApiTest.java b/src/vpp-api/java/jvpp-core/io/fd/vpp/jvpp/core/test/FutureApiTest.java new file mode 100644 index 00000000..d3acecc2 --- /dev/null +++ b/src/vpp-api/java/jvpp-core/io/fd/vpp/jvpp/core/test/FutureApiTest.java @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.fd.vpp.jvpp.core.test; + +import io.fd.vpp.jvpp.JVppRegistry; +import io.fd.vpp.jvpp.JVppRegistryImpl; +import io.fd.vpp.jvpp.core.JVppCoreImpl; +import io.fd.vpp.jvpp.core.dto.BridgeDomainDetailsReplyDump; +import io.fd.vpp.jvpp.core.dto.BridgeDomainDump; +import io.fd.vpp.jvpp.core.dto.GetNodeIndex; +import io.fd.vpp.jvpp.core.dto.GetNodeIndexReply; +import io.fd.vpp.jvpp.core.dto.ShowVersion; +import io.fd.vpp.jvpp.core.dto.ShowVersionReply; +import io.fd.vpp.jvpp.core.dto.SwInterfaceDetails; +import io.fd.vpp.jvpp.core.dto.SwInterfaceDetailsReplyDump; +import io.fd.vpp.jvpp.core.dto.SwInterfaceDump; +import io.fd.vpp.jvpp.core.future.FutureJVppCoreFacade; +import java.nio.charset.StandardCharsets; +import java.util.Objects; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Future; +import java.util.logging.Level; +import java.util.logging.Logger; + +public class FutureApiTest { + + private static final Logger LOG = Logger.getLogger(FutureApiTest.class.getName()); + + public static void main(String[] args) throws Exception { + testFutureApi(args); + } + + private static void testFutureApi(String[] args) throws Exception { + LOG.info("Testing Java future API for core plugin"); + try (final JVppRegistry registry = new JVppRegistryImpl("FutureApiTest", args[0]); + final FutureJVppCoreFacade jvppFacade = new FutureJVppCoreFacade(registry, new JVppCoreImpl())) { + LOG.info("Successfully connected to VPP"); + + testEmptyBridgeDomainDump(jvppFacade); + + LOG.info("Disconnecting..."); + } + } + + private static void testEmptyBridgeDomainDump(final FutureJVppCoreFacade jvpp) throws Exception { + LOG.info("Sending BridgeDomainDump request..."); + final BridgeDomainDump request = new BridgeDomainDump(); + request.bdId = -1; // dump call + + final CompletableFuture + replyFuture = jvpp.bridgeDomainDump(request).toCompletableFuture(); + final BridgeDomainDetailsReplyDump reply = replyFuture.get(); + + if (reply == null || reply.bridgeDomainDetails == null) { + throw new IllegalStateException("Received null response for empty dump: " + reply); + } else { + LOG.info( + String.format( + "Received bridge-domain dump reply with list of bridge-domains: %s", + reply.bridgeDomainDetails)); + } + } + + +} diff --git a/src/vpp-api/java/jvpp-core/io/fd/vpp/jvpp/core/test/Readme.txt b/src/vpp-api/java/jvpp-core/io/fd/vpp/jvpp/core/test/Readme.txt new file mode 100644 index 00000000..b74cf60a --- /dev/null +++ b/src/vpp-api/java/jvpp-core/io/fd/vpp/jvpp/core/test/Readme.txt @@ -0,0 +1,18 @@ +This package contains basic tests for jvpp. To run the tests: + +- Make sure VPP is running +- From VPP's build-root/ folder execute: + - release version: sudo java -cp build-vpp-native/vpp/vpp-api/java/jvpp-registry-17.10.jar:build-vpp-native/vpp/vpp-api/java/jvpp-core-17.10.jar io.fd.vpp.jvpp.core.test.[test name] + - debug version: sudo java -cp build-vpp_debug-native/vpp/vpp-api/java/jvpp-registry-17.10.jar:build-vpp_debug-native/vpp/vpp-api/java/jvpp-core-17.10.jar io.fd.vpp.jvpp.core.test.[test name] + +Available tests: +CallbackApiTest - Similar to ControlPingTest, invokes more complex calls (e.g. interface dump) using low level JVpp APIs +CallbackJVppFacadeNotificationTest - Tests interface notifications using Callback based JVpp facade +CallbackJVppFacadeTest - Execution of more complex calls using Callback based JVpp facade +CallbackNotificationApiTest - Tests interface notifications using low level JVpp APIs +ControlPingTest - Simple test executing a single control ping using low level JVpp APIs +CreateSubInterfaceTest - Tests sub-interface creation +FutureApiNotificationTest - Tests interface notifications using Future based JVpp facade +FutureApiTest - Execution of more complex calls using Future based JVpp facade +L2AclTest - Tests L2 ACL creation +LispAdjacencyTest - Tests lisp adjacency creation and read (custom vpe.api type support showcase) diff --git a/src/vpp-api/java/jvpp-ioamexport/io/fd/vpp/jvpp/ioamexport/test/CallbackApiTest.java b/src/vpp-api/java/jvpp-ioamexport/io/fd/vpp/jvpp/ioamexport/test/CallbackApiTest.java new file mode 100644 index 00000000..ba49d77d --- /dev/null +++ b/src/vpp-api/java/jvpp-ioamexport/io/fd/vpp/jvpp/ioamexport/test/CallbackApiTest.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.fd.vpp.jvpp.ioamexport.test; + +import io.fd.vpp.jvpp.AbstractCallbackApiTest; +import io.fd.vpp.jvpp.ioamexport.JVppIoamexportImpl; + +import java.util.logging.Logger; + + +public class CallbackApiTest extends AbstractCallbackApiTest { + + private static Logger LOG = Logger.getLogger(CallbackApiTest.class.getName()); + + public static void main(String[] args) throws Exception { + LOG.info("Testing ControlPing using Java callback API for ioamexport plugin"); + testControlPing(args[0], new JVppIoamexportImpl()); + } +} diff --git a/src/vpp-api/java/jvpp-ioamexport/io/fd/vpp/jvpp/ioamexport/test/FutureApiTest.java b/src/vpp-api/java/jvpp-ioamexport/io/fd/vpp/jvpp/ioamexport/test/FutureApiTest.java new file mode 100644 index 00000000..048d2445 --- /dev/null +++ b/src/vpp-api/java/jvpp-ioamexport/io/fd/vpp/jvpp/ioamexport/test/FutureApiTest.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.fd.vpp.jvpp.ioamexport.test; + + +import io.fd.vpp.jvpp.Assertions; +import io.fd.vpp.jvpp.JVppRegistry; +import io.fd.vpp.jvpp.JVppRegistryImpl; +import io.fd.vpp.jvpp.ioamexport.JVppIoamexportImpl; +import io.fd.vpp.jvpp.ioamexport.dto.IoamExportIp6EnableDisable; +import io.fd.vpp.jvpp.ioamexport.dto.IoamExportIp6EnableDisableReply; +import io.fd.vpp.jvpp.ioamexport.future.FutureJVppIoamexportFacade; + +import java.util.concurrent.Future; +import java.util.logging.Logger; + +public class FutureApiTest { + + private static final Logger LOG = Logger.getLogger(FutureApiTest.class.getName()); + + public static void main(String[] args) throws Exception { + testCallbackApi(args); + } + + private static void testCallbackApi(String[] args) throws Exception { + LOG.info("Testing Java callback API for ioamexport plugin"); + try (final JVppRegistry registry = new JVppRegistryImpl("FutureApiTest", args[0]); + final FutureJVppIoamexportFacade jvpp = new FutureJVppIoamexportFacade(registry, new JVppIoamexportImpl())) { + LOG.info("Successfully connected to VPP"); + + testIoamExportIp6EnableDisable(jvpp); + + LOG.info("Disconnecting..."); + } + } + + private static void testIoamExportIp6EnableDisable(FutureJVppIoamexportFacade jvpp) throws Exception { + LOG.info("Sending IoamExportIp6EnableDisable request..."); + final IoamExportIp6EnableDisable request = new IoamExportIp6EnableDisable(); + + final Future replyFuture = jvpp.ioamExportIp6EnableDisable(request).toCompletableFuture(); + final IoamExportIp6EnableDisableReply reply = replyFuture.get(); + + Assertions.assertNotNull(reply); + } +} diff --git a/src/vpp-api/java/jvpp-ioamexport/io/fd/vpp/jvpp/ioamexport/test/Readme.txt b/src/vpp-api/java/jvpp-ioamexport/io/fd/vpp/jvpp/ioamexport/test/Readme.txt new file mode 100644 index 00000000..820071a8 --- /dev/null +++ b/src/vpp-api/java/jvpp-ioamexport/io/fd/vpp/jvpp/ioamexport/test/Readme.txt @@ -0,0 +1,4 @@ +release version: +sudo java -cp build-vpp-native/vpp/vpp-api/java/jvpp-registry-17.10.jar:build-vpp_debug-native/vpp/vpp-api/java/jvpp-ioamexport-17.10.jar io.fd.vpp.jvpp.ioamexport.test.[test-name] +debug version: +sudo java -cp build-vpp_debug-native/vpp/vpp-api/java/jvpp-registry-17.10.jar:build-vpp_debug-native/vpp/vpp-api/java/jvpp-ioamexport-17.10.jar io.fd.vpp.jvpp.ioamexport.test.[test-name] diff --git a/src/vpp-api/java/jvpp-ioampot/io/fd/vpp/jvpp/ioampot/test/CallbackApiTest.java b/src/vpp-api/java/jvpp-ioampot/io/fd/vpp/jvpp/ioampot/test/CallbackApiTest.java new file mode 100644 index 00000000..20b85d89 --- /dev/null +++ b/src/vpp-api/java/jvpp-ioampot/io/fd/vpp/jvpp/ioampot/test/CallbackApiTest.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.fd.vpp.jvpp.ioampot.test; + +import io.fd.vpp.jvpp.AbstractCallbackApiTest; +import io.fd.vpp.jvpp.ioampot.JVppIoampotImpl; + +import java.util.logging.Logger; + +public class CallbackApiTest extends AbstractCallbackApiTest { + + private static Logger LOG = Logger.getLogger(CallbackApiTest.class.getName()); + + + public static void main(String[] args) throws Exception { + LOG.info("Testing ControlPing using Java callback API for ioampot plugin"); + testControlPing(args[0], new JVppIoampotImpl()); + } +} diff --git a/src/vpp-api/java/jvpp-ioampot/io/fd/vpp/jvpp/ioampot/test/FutureApiTest.java b/src/vpp-api/java/jvpp-ioampot/io/fd/vpp/jvpp/ioampot/test/FutureApiTest.java new file mode 100644 index 00000000..6401c678 --- /dev/null +++ b/src/vpp-api/java/jvpp-ioampot/io/fd/vpp/jvpp/ioampot/test/FutureApiTest.java @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.fd.vpp.jvpp.ioampot.test; + + +import io.fd.vpp.jvpp.JVppRegistry; +import io.fd.vpp.jvpp.JVppRegistryImpl; +import io.fd.vpp.jvpp.ioampot.JVppIoampotImpl; +import io.fd.vpp.jvpp.ioampot.dto.PotProfileShowConfigDetailsReplyDump; +import io.fd.vpp.jvpp.ioampot.dto.PotProfileShowConfigDump; +import io.fd.vpp.jvpp.ioampot.future.FutureJVppIoampotFacade; + +import java.util.concurrent.Future; +import java.util.logging.Logger; + +public class FutureApiTest { + + private static final Logger LOG = Logger.getLogger(io.fd.vpp.jvpp.ioampot.test.FutureApiTest.class.getName()); + + public static void main(String[] args) throws Exception { + testCallbackApi(args); + } + + private static void testCallbackApi(String[] args) throws Exception { + LOG.info("Testing Java callback API for ioampot plugin"); + try (final JVppRegistry registry = new JVppRegistryImpl("FutureApiTest", args[0]); + final FutureJVppIoampotFacade jvpp = new FutureJVppIoampotFacade(registry, new JVppIoampotImpl())) { + LOG.info("Successfully connected to VPP"); + + testPotProfileShowConfigDump(jvpp); + + LOG.info("Disconnecting..."); + } + } + + private static void testPotProfileShowConfigDump(FutureJVppIoampotFacade jvpp) throws Exception { + LOG.info("Sending PotProfileShowConfigDump request..."); + final PotProfileShowConfigDump request = new PotProfileShowConfigDump(); + + final Future replyFuture = jvpp.potProfileShowConfigDump(request).toCompletableFuture(); + final PotProfileShowConfigDetailsReplyDump reply = replyFuture.get(); + + if (reply == null || reply.potProfileShowConfigDetails == null) { + throw new IllegalStateException("Received null response for empty dump: " + reply); + } else { + LOG.info( + String.format( + "Received pot profile show config dump reply: %s", + reply.potProfileShowConfigDetails)); + } + } +} diff --git a/src/vpp-api/java/jvpp-ioampot/io/fd/vpp/jvpp/ioampot/test/Readme.txt b/src/vpp-api/java/jvpp-ioampot/io/fd/vpp/jvpp/ioampot/test/Readme.txt new file mode 100644 index 00000000..f3cae262 --- /dev/null +++ b/src/vpp-api/java/jvpp-ioampot/io/fd/vpp/jvpp/ioampot/test/Readme.txt @@ -0,0 +1,4 @@ +release version: +sudo java -cp build-vpp-native/vpp/vpp-api/java/jvpp-registry-17.10.jar:build-vpp_debug-native/vpp/vpp-api/java/jvpp-ioampot-17.10.jar io.fd.vpp.jvpp.ioampot.test.[test-name] +debug version: +sudo java -cp build-vpp_debug-native/vpp/vpp-api/java/jvpp-registry-17.10.jar:build-vpp_debug-native/vpp/vpp-api/java/jvpp-ioampot-17.10.jar io.fd.vpp.jvpp.ioampot.test.[test-name] diff --git a/src/vpp-api/java/jvpp-ioamtrace/io/fd/vpp/jvpp/ioamtrace/test/CallbackApiTest.java b/src/vpp-api/java/jvpp-ioamtrace/io/fd/vpp/jvpp/ioamtrace/test/CallbackApiTest.java new file mode 100644 index 00000000..4a71db52 --- /dev/null +++ b/src/vpp-api/java/jvpp-ioamtrace/io/fd/vpp/jvpp/ioamtrace/test/CallbackApiTest.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.fd.vpp.jvpp.ioamtrace.test; + +import io.fd.vpp.jvpp.AbstractCallbackApiTest; +import io.fd.vpp.jvpp.ioamtrace.JVppIoamtraceImpl; + +import java.util.logging.Logger; + +public class CallbackApiTest extends AbstractCallbackApiTest { + + private static Logger LOG = Logger.getLogger(CallbackApiTest.class.getName()); + + + public static void main(String[] args) throws Exception { + LOG.info("Testing ControlPing using Java callback API for ioamtrace plugin"); + testControlPing(args[0], new JVppIoamtraceImpl()); + } +} diff --git a/src/vpp-api/java/jvpp-ioamtrace/io/fd/vpp/jvpp/ioamtrace/test/FutureApiTest.java b/src/vpp-api/java/jvpp-ioamtrace/io/fd/vpp/jvpp/ioamtrace/test/FutureApiTest.java new file mode 100644 index 00000000..4e13ed1f --- /dev/null +++ b/src/vpp-api/java/jvpp-ioamtrace/io/fd/vpp/jvpp/ioamtrace/test/FutureApiTest.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.fd.vpp.jvpp.ioamtrace.test; + + +import io.fd.vpp.jvpp.Assertions; +import io.fd.vpp.jvpp.JVppRegistry; +import io.fd.vpp.jvpp.JVppRegistryImpl; +import io.fd.vpp.jvpp.ioamtrace.JVppIoamtraceImpl; +import io.fd.vpp.jvpp.ioamtrace.dto.TraceProfileShowConfig; +import io.fd.vpp.jvpp.ioamtrace.dto.TraceProfileShowConfigReply; +import io.fd.vpp.jvpp.ioamtrace.future.FutureJVppIoamtraceFacade; + +import java.util.concurrent.Future; +import java.util.logging.Logger; + +public class FutureApiTest { + + private static final Logger LOG = Logger.getLogger(io.fd.vpp.jvpp.ioamtrace.test.FutureApiTest.class.getName()); + + public static void main(String[] args) throws Exception { + testCallbackApi(args); + } + + private static void testCallbackApi(String[] args) throws Exception { + LOG.info("Testing Java callback API for ioamtrace plugin"); + try (final JVppRegistry registry = new JVppRegistryImpl("FutureApiTest", args[0]); + final FutureJVppIoamtraceFacade jvpp = new FutureJVppIoamtraceFacade(registry, new JVppIoamtraceImpl())) { + LOG.info("Successfully connected to VPP"); + + testTraceProfileShowConfig(jvpp); + + LOG.info("Disconnecting..."); + } + } + + private static void testTraceProfileShowConfig(FutureJVppIoamtraceFacade jvpp) throws Exception { + LOG.info("Sending TraceProfileShowConfig request..."); + final TraceProfileShowConfig request = new TraceProfileShowConfig(); + + final Future replyFuture = jvpp.traceProfileShowConfig(request).toCompletableFuture(); + final TraceProfileShowConfigReply reply = replyFuture.get(); + + Assertions.assertNotNull(reply); + } +} diff --git a/src/vpp-api/java/jvpp-ioamtrace/io/fd/vpp/jvpp/ioamtrace/test/Readme.txt b/src/vpp-api/java/jvpp-ioamtrace/io/fd/vpp/jvpp/ioamtrace/test/Readme.txt new file mode 100644 index 00000000..9a1ba829 --- /dev/null +++ b/src/vpp-api/java/jvpp-ioamtrace/io/fd/vpp/jvpp/ioamtrace/test/Readme.txt @@ -0,0 +1,4 @@ +release version: +sudo java -cp build-vpp-native/vpp/vpp-api/java/jvpp-registry-17.10.jar:build-vpp_debug-native/vpp/vpp-api/java/jvpp-ioamtrace-17.10.jar io.fd.vpp.jvpp.ioamtrace.test.[test-name] +debug version: +sudo java -cp build-vpp_debug-native/vpp/vpp-api/java/jvpp-registry-17.10.jar:build-vpp_debug-native/vpp/vpp-api/java/jvpp-ioamtrace-17.10.jar io.fd.vpp.jvpp.ioamtrace.test.[test-name] diff --git a/src/vpp-api/java/jvpp-nat/io/fd/vpp/jvpp/nat/test/CallbackApiTest.java b/src/vpp-api/java/jvpp-nat/io/fd/vpp/jvpp/nat/test/CallbackApiTest.java new file mode 100644 index 00000000..a6f82148 --- /dev/null +++ b/src/vpp-api/java/jvpp-nat/io/fd/vpp/jvpp/nat/test/CallbackApiTest.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.fd.vpp.jvpp.nat.test; + +import io.fd.vpp.jvpp.AbstractCallbackApiTest; +import io.fd.vpp.jvpp.nat.JVppNatImpl; + +import java.util.logging.Logger; + +public class CallbackApiTest extends AbstractCallbackApiTest { + + private static Logger LOG = Logger.getLogger(CallbackApiTest.class.getName()); + + + public static void main(String[] args) throws Exception { + LOG.info("Testing ControlPing using Java callback API for core plugin"); + testControlPing(args[0], new JVppNatImpl()); + } +} diff --git a/src/vpp-api/java/jvpp-nat/io/fd/vpp/jvpp/nat/test/FutureApiTest.java b/src/vpp-api/java/jvpp-nat/io/fd/vpp/jvpp/nat/test/FutureApiTest.java new file mode 100644 index 00000000..8643dcf4 --- /dev/null +++ b/src/vpp-api/java/jvpp-nat/io/fd/vpp/jvpp/nat/test/FutureApiTest.java @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.fd.vpp.jvpp.nat.test; + + +import io.fd.vpp.jvpp.JVppRegistry; +import io.fd.vpp.jvpp.JVppRegistryImpl; +import io.fd.vpp.jvpp.nat.JVppNatImpl; +import io.fd.vpp.jvpp.nat.dto.SnatAddressDetailsReplyDump; +import io.fd.vpp.jvpp.nat.dto.SnatAddressDump; +import io.fd.vpp.jvpp.nat.future.FutureJVppNatFacade; + +import java.util.concurrent.Future; +import java.util.logging.Logger; + +public class FutureApiTest { + + private static final Logger LOG = Logger.getLogger(io.fd.vpp.jvpp.nat.test.FutureApiTest.class.getName()); + + public static void main(String[] args) throws Exception { + testCallbackApi(args); + } + + private static void testCallbackApi(String[] args) throws Exception { + LOG.info("Testing Java callback API for snat plugin"); + try (final JVppRegistry registry = new JVppRegistryImpl("FutureApiTest", args[0]); + final FutureJVppNatFacade jvpp = new FutureJVppNatFacade(registry, new JVppNatImpl())) { + LOG.info("Successfully connected to VPP"); + + testAclDump(jvpp); + + LOG.info("Disconnecting..."); + } + } + + private static void testAclDump(FutureJVppNatFacade jvpp) throws Exception { + LOG.info("Sending SnatAddressDump request..."); + final SnatAddressDump request = new SnatAddressDump(); + + final Future replyFuture = jvpp.snatAddressDump(request).toCompletableFuture(); + final SnatAddressDetailsReplyDump reply = replyFuture.get(); + + if (reply == null || reply.snatAddressDetails == null) { + throw new IllegalStateException("Received null response for empty dump: " + reply); + } else { + LOG.info( + String.format( + "Received snat address dump reply with list of snat address: %s", + reply.snatAddressDetails)); + } + } +} diff --git a/src/vpp-api/java/jvpp-nat/io/fd/vpp/jvpp/nat/test/Readme.txt b/src/vpp-api/java/jvpp-nat/io/fd/vpp/jvpp/nat/test/Readme.txt new file mode 100644 index 00000000..6f758089 --- /dev/null +++ b/src/vpp-api/java/jvpp-nat/io/fd/vpp/jvpp/nat/test/Readme.txt @@ -0,0 +1,4 @@ +release version: +sudo java -cp build-vpp-native/vpp/vpp-api/java/jvpp-registry-17.10.jar:build-vpp_debug-native/vpp/vpp-api/java/jvpp-nat-17.10.jar io.fd.vpp.jvpp.nat.test.[test-name] +debug version: +sudo java -cp build-vpp_debug-native/vpp/vpp-api/java/jvpp-registry-17.10.jar:build-vpp_debug-native/vpp/vpp-api/java/jvpp-nat-17.10.jar io.fd.vpp.jvpp.nat.test.[test-name] diff --git a/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/AbstractCallbackApiTest.java b/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/AbstractCallbackApiTest.java new file mode 100644 index 00000000..d221d1e0 --- /dev/null +++ b/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/AbstractCallbackApiTest.java @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.fd.vpp.jvpp; + +import io.fd.vpp.jvpp.callback.ControlPingCallback; +import io.fd.vpp.jvpp.dto.ControlPing; +import io.fd.vpp.jvpp.dto.ControlPingReply; + +public abstract class AbstractCallbackApiTest { + + private static int receivedPingCount = 0; + private static int errorPingCount = 0; + + public static void testControlPing(String shm_prefix, JVpp jvpp) throws Exception { + try (JVppRegistry registry = new JVppRegistryImpl("CallbackApiTest", shm_prefix)) { + + registry.register(jvpp, new ControlPingCallback() { + @Override + public void onControlPingReply(final ControlPingReply reply) { + System.out.printf("Received ControlPingReply: %s%n", reply); + receivedPingCount++; + } + + @Override + public void onError(VppCallbackException ex) { + System.out.printf("Received onError exception: call=%s, reply=%d, context=%d ", ex.getMethodName(), + ex.getErrorCode(), ex.getCtxId()); + errorPingCount++; + } + + }); + System.out.println("Successfully connected to VPP"); + Thread.sleep(1000); + + System.out.println("Sending control ping using JVppRegistry"); + registry.controlPing(jvpp.getClass()); + + Thread.sleep(2000); + + System.out.println("Sending control ping using JVpp plugin"); + jvpp.send(new ControlPing()); + + Thread.sleep(2000); + System.out.println("Disconnecting..."); + Assertions.assertEquals(2, receivedPingCount); + Assertions.assertEquals(0, errorPingCount); + } + } +} diff --git a/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/Assertions.java b/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/Assertions.java new file mode 100644 index 00000000..f8b591f5 --- /dev/null +++ b/src/vpp-api/java/jvpp-registry/io/fd/vpp/jvpp/Assertions.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.fd.vpp.jvpp; + +public class Assertions { + + public static void assertEquals(final int expected, final int actual) { + if (expected != actual) { + throw new IllegalArgumentException(String.format("Expected[%s]/Actual[%s]", expected, actual)); + } + } + + public static void assertNotNull(final Object value) { + if (value == null) { + throw new IllegalArgumentException("Variable is null"); + } + } +} diff --git a/test/jvpp_connection.py b/test/jvpp_connection.py new file mode 100644 index 00000000..bb48745c --- /dev/null +++ b/test/jvpp_connection.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +import os +import subprocess +from vpp_papi_provider import VppPapiProvider +from threading import Timer + +from framework import VppTestCase + +# Api files path +API_FILES_PATH = "vpp/vpp-api/java" + +# Registry jar file name prefix +REGISTRY_JAR_PREFIX = "jvpp-registry" + + +class TestJVppConnection(VppTestCase): + + def full_jar_name(self, install_dir, jar_name, version): + return os.path.join(install_dir, API_FILES_PATH, + "{0}-{1}.jar".format(jar_name, version)) + + def jvpp_connection_test(self, api_jar_name, test_class_name, timeout): + install_dir = os.getenv('VPP_TEST_BUILD_DIR') + print("Install directory : {0}".format(install_dir)) + + version_reply = self.vapi.show_version() + version = version_reply.version.split("-")[0] + registry_jar_path = self.full_jar_name(install_dir, + REGISTRY_JAR_PREFIX, version) + print("JVpp Registry jar path : {0}".format(registry_jar_path)) + + api_jar_path = self.full_jar_name(install_dir, api_jar_name, version) + print("Api jar path : {0}".format(api_jar_path)) + + # passes shm prefix as parameter to create connection with same value + command = ["java", "-cp", + "{0}:{1}".format(registry_jar_path, api_jar_path), + test_class_name, "/{0}-vpe-api".format(self.shm_prefix)] + print("Test Command : {0}, Timeout : {1}".format(command, timeout)) + + self.process = subprocess.Popen(command, shell=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, bufsize=1, + universal_newlines=True) + + out, err = self.process.communicate() + print("Process output : {0}{1}".format(os.linesep, out)) + print("Process error output : {0}{1}".format(os.linesep, err)) + self.assert_equal(self.process.returncode, 0, "process return code") + + def tearDown(self): + print("Tearing down jvpp test") + if self.process.poll() is None: + self.process.kill() diff --git a/test/test_jvpp.py b/test/test_jvpp.py new file mode 100644 index 00000000..664ed2f9 --- /dev/null +++ b/test/test_jvpp.py @@ -0,0 +1,82 @@ +from jvpp_connection import TestJVppConnection + + +class TestJVpp(TestJVppConnection): + """ JVPP Core Test Case """ + + def invoke_for_jvpp_core(self, api_jar_name, test_class_name): + self.jvpp_connection_test(api_jar_name=api_jar_name, + test_class_name=test_class_name, + timeout=10) + + def test_vpp_core_callback_api(self): + """ JVPP Core Callback Api Test Case """ + self.invoke_for_jvpp_core(api_jar_name="jvpp-core", + test_class_name="io.fd.vpp.jvpp.core.test." + "CallbackApiTest") + + def test_vpp_core_future_api(self): + """JVPP Core Future Api Test Case""" + self.invoke_for_jvpp_core(api_jar_name="jvpp-core", + test_class_name="io.fd.vpp.jvpp.core.test." + "FutureApiTest") + + def test_vpp_acl_callback_api(self): + """ JVPP Acl Callback Api Test Case """ + self.invoke_for_jvpp_core(api_jar_name="jvpp-acl", + test_class_name="io.fd.vpp.jvpp.acl.test." + "CallbackApiTest") + + def test_vpp_acl_future_api(self): + """JVPP Acl Future Api Test Case""" + self.invoke_for_jvpp_core(api_jar_name="jvpp-acl", + test_class_name="io.fd.vpp.jvpp.acl.test." + "FutureApiTest") + + def test_vpp_ioamexport_callback_api(self): + """ JVPP Ioamexport Callback Api Test Case """ + self.invoke_for_jvpp_core(api_jar_name="jvpp-ioamexport", + test_class_name="io.fd.vpp.jvpp.ioamexport." + "test.CallbackApiTest") + + def test_vpp_ioamexport_future_api(self): + """JVPP Ioamexport Future Api Test Case""" + self.invoke_for_jvpp_core(api_jar_name="jvpp-ioamexport", + test_class_name="io.fd.vpp.jvpp.ioamexport." + "test.FutureApiTest") + + def test_vpp_ioampot_callback_api(self): + """ JVPP Ioampot Callback Api Test Case """ + self.invoke_for_jvpp_core(api_jar_name="jvpp-ioampot", + test_class_name="io.fd.vpp.jvpp.ioampot." + "test.CallbackApiTest") + + def test_vpp_ioampot_future_api(self): + """JVPP Ioampot Future Api Test Case""" + self.invoke_for_jvpp_core(api_jar_name="jvpp-ioampot", + test_class_name="io.fd.vpp.jvpp.ioampot." + "test.FutureApiTest") + + def test_vpp_ioamtrace_callback_api(self): + """ JVPP Ioamtrace Callback Api Test Case """ + self.invoke_for_jvpp_core(api_jar_name="jvpp-ioamtrace", + test_class_name="io.fd.vpp.jvpp.ioamtrace." + "test.CallbackApiTest") + + def test_vpp_ioamtrace_future_api(self): + """JVPP Ioamtrace Future Api Test Case""" + self.invoke_for_jvpp_core(api_jar_name="jvpp-ioamtrace", + test_class_name="io.fd.vpp.jvpp.ioamtrace." + "test.FutureApiTest") + + def test_vpp_snat_callback_api(self): + """ JVPP Snat Callback Api Test Case """ + self.invoke_for_jvpp_core(api_jar_name="jvpp-nat", + test_class_name="io.fd.vpp.jvpp.nat.test." + "CallbackApiTest") + + def test_vpp_snat_future_api(self): + """JVPP Snat Future Api Test Case""" + self.invoke_for_jvpp_core(api_jar_name="jvpp-nat", + test_class_name="io.fd.vpp.jvpp.nat.test." + "FutureApiTest") -- cgit 1.2.3-korg From 1f152cd6faf96b524b6b7071b5cffe1916f9c5cc Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Fri, 18 Aug 2017 19:28:03 -0700 Subject: tcp: retransmit and multi-buffer segment fixes and improvements - set session state as closed on session manager delete - enable retransmit as opposed to persist timer after persist timer completes - properly discard buffer chain bytes when new data overlaps ooo segments - don't use rxt bytes in snd space estimate used on tx path Change-Id: Id9cab686e532e5fe70c775d5440260e8eb890a9f Signed-off-by: Florin Coras --- src/svm/svm_fifo.c | 14 +- src/vnet/session/segment_manager.c | 1 + src/vnet/session/session.c | 105 ++++++++++++--- src/vnet/session/session_node.c | 26 ++-- src/vnet/session/stream_session.h | 2 +- src/vnet/tcp/tcp.c | 39 +++--- src/vnet/tcp/tcp.h | 23 +++- src/vnet/tcp/tcp_input.c | 27 ++-- src/vnet/tcp/tcp_output.c | 264 +++++++++++++++++++++---------------- 9 files changed, 319 insertions(+), 182 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index 7f8127cf..8fe82f56 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -53,10 +53,12 @@ ooo_segment_end_pos (svm_fifo_t * f, ooo_segment_t * s) u8 * format_ooo_segment (u8 * s, va_list * args) { + svm_fifo_t *f = va_arg (*args, svm_fifo_t *); ooo_segment_t *seg = va_arg (*args, ooo_segment_t *); - - s = format (s, "pos %u, len %u, next %d, prev %d", - seg->start, seg->length, seg->next, seg->prev); + u32 normalized_start = (seg->start + f->nitems - f->tail) % f->nitems; + s = format (s, "[%u, %u], len %u, next %d, prev %d", normalized_start, + (normalized_start + seg->length) % f->nitems, seg->length, + seg->next, seg->prev); return s; } @@ -154,7 +156,7 @@ format_ooo_list (u8 * s, va_list * args) while (ooo_segment_index != OOO_SEGMENT_INVALID_INDEX) { seg = pool_elt_at_index (f->ooo_segments, ooo_segment_index); - s = format (s, " %U\n", format_ooo_segment, seg); + s = format (s, " %U\n", format_ooo_segment, f, seg); ooo_segment_index = seg->next; } @@ -557,7 +559,6 @@ svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, { u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; u32 cursize, nitems, normalized_offset; - u32 offset_from_tail; f->ooos_newest = OOO_SEGMENT_INVALID_INDEX; @@ -570,8 +571,7 @@ svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, normalized_offset = (f->tail + offset) % nitems; /* Will this request fit? */ - offset_from_tail = (nitems + normalized_offset - f->tail) % nitems; - if ((required_bytes + offset_from_tail) > (nitems - cursize)) + if ((required_bytes + offset) > (nitems - cursize)) return -1; svm_fifo_trace_add (f, offset, required_bytes, 1); diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c index 262b7faa..43977063 100644 --- a/src/vnet/session/segment_manager.c +++ b/src/vnet/session/segment_manager.c @@ -224,6 +224,7 @@ segment_manager_del (segment_manager_t * sm) session = stream_session_get (session_index, thread_index); /* Instead of directly removing the session call disconnect */ + session->session_state = SESSION_STATE_CLOSED; session_send_session_evt_to_thread (stream_session_handle (session), FIFO_EVENT_DISCONNECT, thread_index); diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 70a5cd83..6fe99047 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -92,38 +92,104 @@ stream_session_create_i (segment_manager_t * sm, transport_connection_t * tc, return 0; } -/** Enqueue buffer chain tail */ +/** + * Discards bytes from buffer chain + * + * It discards n_bytes_to_drop starting at first buffer after chain_b + */ +always_inline void +session_enqueue_discard_chain_bytes (vlib_main_t * vm, vlib_buffer_t * b, + vlib_buffer_t ** chain_b, + u32 n_bytes_to_drop) +{ + vlib_buffer_t *next = *chain_b; + u32 to_drop = n_bytes_to_drop; + ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT); + while (to_drop && (next->flags & VLIB_BUFFER_NEXT_PRESENT)) + { + next = vlib_get_buffer (vm, next->next_buffer); + if (next->current_length > to_drop) + { + vlib_buffer_advance (next, to_drop); + to_drop = 0; + } + else + { + to_drop -= next->current_length; + next->current_length = 0; + } + } + *chain_b = next; + + if (to_drop == 0) + b->total_length_not_including_first_buffer -= n_bytes_to_drop; +} + +/** + * Enqueue buffer chain tail + */ always_inline int session_enqueue_chain_tail (stream_session_t * s, vlib_buffer_t * b, u32 offset, u8 is_in_order) { vlib_buffer_t *chain_b; - u32 chain_bi = b->next_buffer, len; + u32 chain_bi, len, diff; vlib_main_t *vm = vlib_get_main (); u8 *data; - u16 written = 0; + u32 written = 0; int rv = 0; + if (is_in_order && offset) + { + diff = offset - b->current_length; + if (diff > b->total_length_not_including_first_buffer) + return 0; + chain_b = b; + session_enqueue_discard_chain_bytes (vm, b, &chain_b, diff); + chain_bi = vlib_get_buffer_index (vm, chain_b); + } + else + chain_bi = b->next_buffer; + do { chain_b = vlib_get_buffer (vm, chain_bi); data = vlib_buffer_get_current (chain_b); len = chain_b->current_length; + if (!len) + continue; if (is_in_order) { rv = svm_fifo_enqueue_nowait (s->server_rx_fifo, len, data); - if (rv < len) + if (rv == len) + { + written += rv; + } + else if (rv < len) { return (rv > 0) ? (written + rv) : written; } - written += rv; + else if (rv > len) + { + written += rv; + + /* written more than what was left in chain */ + if (written > b->total_length_not_including_first_buffer) + return written; + + /* drop the bytes that have already been delivered */ + session_enqueue_discard_chain_bytes (vm, b, &chain_b, rv - len); + } } else { rv = svm_fifo_enqueue_with_offset (s->server_rx_fifo, offset, len, data); if (rv) - return -1; + { + clib_warning ("failed to enqueue multi-buffer seg"); + return -1; + } offset += len; } } @@ -155,22 +221,22 @@ stream_session_enqueue_data (transport_connection_t * tc, vlib_buffer_t * b, u32 offset, u8 queue_event, u8 is_in_order) { stream_session_t *s; - int enqueued = 0, rv; + int enqueued = 0, rv, in_order_off; s = stream_session_get (tc->s_index, tc->thread_index); if (is_in_order) { - enqueued = - svm_fifo_enqueue_nowait (s->server_rx_fifo, b->current_length, - vlib_buffer_get_current (b)); - if (PREDICT_FALSE - ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && enqueued > 0)) + enqueued = svm_fifo_enqueue_nowait (s->server_rx_fifo, + b->current_length, + vlib_buffer_get_current (b)); + if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) + && enqueued >= 0)) { - rv = session_enqueue_chain_tail (s, b, 0, 1); - if (rv <= 0) - return enqueued; - enqueued += rv; + in_order_off = enqueued > b->current_length ? enqueued : 0; + rv = session_enqueue_chain_tail (s, b, in_order_off, 1); + if (rv > 0) + enqueued += rv; } } else @@ -179,9 +245,10 @@ stream_session_enqueue_data (transport_connection_t * tc, vlib_buffer_t * b, b->current_length, vlib_buffer_get_current (b)); if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && !rv)) - rv = session_enqueue_chain_tail (s, b, offset + b->current_length, 0); - if (rv) - return -1; + session_enqueue_chain_tail (s, b, offset + b->current_length, 0); + /* if something was enqueued, report even this as success for ooo + * segment handling */ + return rv; } if (queue_event) diff --git a/src/vnet/session/session_node.c b/src/vnet/session/session_node.c index fac2b852..cd52742b 100644 --- a/src/vnet/session/session_node.c +++ b/src/vnet/session/session_node.c @@ -76,7 +76,7 @@ session_tx_fifo_chain_tail (session_manager_main_t * smm, vlib_main_t * vm, u8 thread_index, svm_fifo_t * fifo, vlib_buffer_t * b0, u32 bi0, u8 n_bufs_per_seg, u32 left_from_seg, u32 * left_to_snd0, - u16 * n_bufs, u32 * rx_offset, u16 deq_per_buf, + u16 * n_bufs, u32 * tx_offset, u16 deq_per_buf, u8 peek_data) { vlib_buffer_t *chain_b0, *prev_b0; @@ -104,8 +104,8 @@ session_tx_fifo_chain_tail (session_manager_main_t * smm, vlib_main_t * vm, data0 = vlib_buffer_get_current (chain_b0); if (peek_data) { - n_bytes_read = svm_fifo_peek (fifo, *rx_offset, len_to_deq0, data0); - *rx_offset += n_bytes_read; + n_bytes_read = svm_fifo_peek (fifo, *tx_offset, len_to_deq0, data0); + *tx_offset += n_bytes_read; } else { @@ -126,7 +126,8 @@ session_tx_fifo_chain_tail (session_manager_main_t * smm, vlib_main_t * vm, if (to_deq == 0) break; } - ASSERT (to_deq == 0); + ASSERT (to_deq == 0 + && b0->total_length_not_including_first_buffer == left_from_seg); *left_to_snd0 -= left_from_seg; } @@ -144,7 +145,7 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, transport_proto_vft_t *transport_vft; u32 next_index, next0, *to_next, n_left_to_next, bi0; vlib_buffer_t *b0; - u32 rx_offset = 0, max_dequeue0, n_bytes_per_seg; + u32 tx_offset = 0, max_dequeue0, n_bytes_per_seg, left_for_seg; u16 snd_mss0, n_bufs_per_seg, n_bufs; u8 *data0; int i, n_bytes_read; @@ -170,11 +171,11 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, if (peek_data) { /* Offset in rx fifo from where to peek data */ - rx_offset = transport_vft->tx_fifo_offset (tc0); + tx_offset = transport_vft->tx_fifo_offset (tc0); } /* Check how much we can pull. If buffering, subtract the offset */ - max_dequeue0 = svm_fifo_max_dequeue (s0->server_tx_fifo) - rx_offset; + max_dequeue0 = svm_fifo_max_dequeue (s0->server_tx_fifo) - tx_offset; /* Nothing to read return */ if (max_dequeue0 == 0) @@ -193,6 +194,7 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, } else { + /* Expectation is that snd_space0 is already a multiple of snd_mss */ max_len_to_snd0 = snd_space0; } @@ -265,8 +267,7 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, b0 = vlib_get_buffer (vm, bi0); b0->error = 0; - b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID - | VNET_BUFFER_F_LOCALLY_ORIGINATED; + b0->flags = VNET_BUFFER_F_LOCALLY_ORIGINATED; b0->current_data = 0; b0->total_length_not_including_first_buffer = 0; @@ -274,11 +275,11 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, data0 = vlib_buffer_make_headroom (b0, MAX_HDRS_LEN); if (peek_data) { - n_bytes_read = svm_fifo_peek (s0->server_tx_fifo, rx_offset, + n_bytes_read = svm_fifo_peek (s0->server_tx_fifo, tx_offset, len_to_deq0, data0); /* Keep track of progress locally, transport is also supposed to * increment it independently when pushing the header */ - rx_offset += n_bytes_read; + tx_offset += n_bytes_read; } else { @@ -299,12 +300,11 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, */ if (PREDICT_FALSE (n_bufs_per_seg > 1 && left_to_snd0)) { - u32 left_for_seg; left_for_seg = clib_min (snd_mss0 - n_bytes_read, left_to_snd0); session_tx_fifo_chain_tail (smm, vm, thread_index, s0->server_tx_fifo, b0, bi0, n_bufs_per_seg, left_for_seg, - &left_to_snd0, &n_bufs, &rx_offset, + &left_to_snd0, &n_bufs, &tx_offset, deq_per_buf, peek_data); } diff --git a/src/vnet/session/stream_session.h b/src/vnet/session/stream_session.h index 533cf97f..275052d3 100644 --- a/src/vnet/session/stream_session.h +++ b/src/vnet/session/stream_session.h @@ -56,7 +56,7 @@ typedef struct _stream_session_t u8 session_type; /** State */ - u8 session_state; + volatile u8 session_state; u8 thread_index; diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 6edf52af..197fff96 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -798,7 +798,8 @@ format_tcp_vars (u8 * s, va_list * args) s = format (s, "rtt_seq %u\n", tc->rtt_seq); s = format (s, " tsval_recent %u tsval_recent_age %u\n", tc->tsval_recent, tcp_time_now () - tc->tsval_recent_age); - s = format (s, " scoreboard: %U\n", format_tcp_scoreboard, &tc->sack_sb); + s = format (s, " scoreboard: %U\n", format_tcp_scoreboard, &tc->sack_sb, + tc); if (vec_len (tc->snd_sacks)) s = format (s, " sacks tx: %U\n", format_tcp_sacks, tc); @@ -858,7 +859,7 @@ format_tcp_session (u8 * s, va_list * args) if (tc) s = format (s, "%U", format_tcp_connection, tc, verbose); else - s = format (s, "empty"); + s = format (s, "empty\n"); return s; } @@ -930,7 +931,11 @@ u8 * format_tcp_sack_hole (u8 * s, va_list * args) { sack_scoreboard_hole_t *hole = va_arg (*args, sack_scoreboard_hole_t *); - s = format (s, "[%u, %u]", hole->start, hole->end); + tcp_connection_t *tc = va_arg (*args, tcp_connection_t *); + if (tc) + s = format (s, " [%u, %u]", hole->start - tc->iss, hole->end - tc->iss); + else + s = format (s, " [%u, %u]", hole->start, hole->end); return s; } @@ -938,6 +943,7 @@ u8 * format_tcp_scoreboard (u8 * s, va_list * args) { sack_scoreboard_t *sb = va_arg (*args, sack_scoreboard_t *); + tcp_connection_t *tc = va_arg (*args, tcp_connection_t *); sack_scoreboard_hole_t *hole; s = format (s, "sacked_bytes %u last_sacked_bytes %u lost_bytes %u\n", sb->sacked_bytes, sb->last_sacked_bytes, sb->lost_bytes); @@ -952,7 +958,7 @@ format_tcp_scoreboard (u8 * s, va_list * args) while (hole) { - s = format (s, "%U", format_tcp_sack_hole, hole); + s = format (s, "%U", format_tcp_sack_hole, hole, tc); hole = scoreboard_next_hole (sb, hole); } @@ -1001,13 +1007,10 @@ tcp_round_snd_space (tcp_connection_t * tc, u32 snd_space) return tc->snd_wnd <= snd_space ? tc->snd_wnd : 0; } - /* If we can't write at least a segment, don't try at all */ + /* If not snd_wnd constrained and we can't write at least a segment, + * don't try at all */ if (PREDICT_FALSE (snd_space < tc->snd_mss)) - { - if (snd_space > clib_min (tc->mss, tc->rcv_opts.mss) - TCP_HDR_LEN_MAX) - return snd_space; - return 0; - } + return 0; /* round down to mss multiple */ return snd_space - (snd_space % tc->snd_mss); @@ -1030,7 +1033,7 @@ tcp_snd_space (tcp_connection_t * tc) if (PREDICT_TRUE (tcp_in_cong_recovery (tc) == 0)) { - snd_space = tcp_available_snd_space (tc); + snd_space = tcp_available_output_snd_space (tc); /* If we haven't gotten dupacks or if we did and have gotten sacked * bytes then we can still send as per Limited Transmit (RFC3042) */ @@ -1051,17 +1054,20 @@ tcp_snd_space (tcp_connection_t * tc) if (tcp_in_recovery (tc)) { tc->snd_nxt = tc->snd_una_max; - snd_space = tcp_available_wnd (tc) - tc->snd_rxt_bytes + snd_space = tcp_available_snd_wnd (tc) - tc->snd_rxt_bytes - (tc->snd_una_max - tc->snd_congestion); if (snd_space <= 0 || (tc->snd_una_max - tc->snd_una) >= tc->snd_wnd) return 0; return tcp_round_snd_space (tc, snd_space); } - /* If in fast recovery, send 1 SMSS if wnd allows */ - if (tcp_in_fastrecovery (tc) - && tcp_available_snd_space (tc) && !tcp_fastrecovery_sent_1_smss (tc)) + /* RFC 5681: When previously unsent data is available and the new value of + * cwnd and the receiver's advertised window allow, a TCP SHOULD send 1*SMSS + * bytes of previously unsent data. */ + if (tcp_in_fastrecovery (tc) && !tcp_fastrecovery_sent_1_smss (tc)) { + if (tcp_available_output_snd_space (tc) < tc->snd_mss) + return 0; tcp_fastrecovery_1_smss_on (tc); return tc->snd_mss; } @@ -1073,7 +1079,8 @@ u32 tcp_session_send_space (transport_connection_t * trans_conn) { tcp_connection_t *tc = (tcp_connection_t *) trans_conn; - return tcp_snd_space (tc); + return clib_min (tcp_snd_space (tc), + tc->snd_wnd - (tc->snd_nxt - tc->snd_una)); } i32 diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 097cc8cf..9e4660b8 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -498,7 +498,9 @@ tcp_half_open_connection_get (u32 conn_index) void tcp_make_ack (tcp_connection_t * ts, vlib_buffer_t * b); void tcp_make_fin (tcp_connection_t * tc, vlib_buffer_t * b); void tcp_make_synack (tcp_connection_t * ts, vlib_buffer_t * b); -void tcp_send_reset (tcp_connection_t * tc, vlib_buffer_t * pkt, u8 is_ip4); +void tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt, + u8 is_ip4); +void tcp_send_reset (tcp_connection_t * tc); void tcp_send_syn (tcp_connection_t * tc); void tcp_send_fin (tcp_connection_t * tc); void tcp_init_mss (tcp_connection_t * tc); @@ -582,15 +584,30 @@ tcp_loss_wnd (const tcp_connection_t * tc) } always_inline u32 -tcp_available_wnd (const tcp_connection_t * tc) +tcp_available_snd_wnd (const tcp_connection_t * tc) { return clib_min (tc->cwnd, tc->snd_wnd); } +always_inline u32 +tcp_available_output_snd_space (const tcp_connection_t * tc) +{ + u32 available_wnd = tcp_available_snd_wnd (tc); + int flight_size = (int) (tc->snd_nxt - tc->snd_una); + + if (available_wnd <= flight_size) + return 0; + + return available_wnd - flight_size; +} + +/** + * Estimate of how many bytes we can still push into the network + */ always_inline u32 tcp_available_snd_space (const tcp_connection_t * tc) { - u32 available_wnd = tcp_available_wnd (tc); + u32 available_wnd = tcp_available_snd_wnd (tc); u32 flight_size = tcp_flight_size (tc); if (available_wnd <= flight_size) diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index 6f9e4c7a..95f9ade1 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -1363,7 +1363,7 @@ always_inline int tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b, u16 data_len) { - int written; + int written, error = TCP_ERROR_ENQUEUED; ASSERT (seq_geq (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt)); @@ -1381,12 +1381,12 @@ tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b, /* Update rcv_nxt */ if (PREDICT_TRUE (written == data_len)) { - tc->rcv_nxt = vnet_buffer (b)->tcp.seq_end; + tc->rcv_nxt += written; } /* If more data written than expected, account for out-of-order bytes. */ else if (written > data_len) { - tc->rcv_nxt = vnet_buffer (b)->tcp.seq_end + written - data_len; + tc->rcv_nxt += written; /* Send ACK confirming the update */ tc->flags |= TCP_CONN_SNDACK; @@ -1400,7 +1400,7 @@ tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b, * not be enqueued. Inform peer */ tc->flags |= TCP_CONN_SNDACK; - return TCP_ERROR_PARTIALLY_ENQUEUED; + error = TCP_ERROR_PARTIALLY_ENQUEUED; } else { @@ -1415,7 +1415,7 @@ tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b, tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt); } - return TCP_ERROR_ENQUEUED; + return error; } /** Enqueue out-of-order data */ @@ -1495,10 +1495,10 @@ tcp_can_delack (tcp_connection_t * tc) static int tcp_buffer_discard_bytes (vlib_buffer_t * b, u32 n_bytes_to_drop) { - u32 discard; + u32 discard, first = b->current_length; vlib_main_t *vm = vlib_get_main (); - /* Handle multi segment packets */ + /* Handle multi-buffer segments */ if (n_bytes_to_drop > b->current_length) { if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT)) @@ -1511,7 +1511,12 @@ tcp_buffer_discard_bytes (vlib_buffer_t * b, u32 n_bytes_to_drop) n_bytes_to_drop -= discard; } while (n_bytes_to_drop); + if (n_bytes_to_drop > first) + b->total_length_not_including_first_buffer -= n_bytes_to_drop - first; } + else + vlib_buffer_advance (b, n_bytes_to_drop); + vnet_buffer (b)->tcp.data_len -= n_bytes_to_drop; return 0; } @@ -1908,7 +1913,7 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (ack0 <= tc0->iss || ack0 > tc0->snd_nxt) { if (!tcp_rst (tcp0)) - tcp_send_reset (tc0, b0, is_ip4); + tcp_send_reset_w_pkt (tc0, b0, is_ip4); goto drop; } @@ -1995,7 +2000,7 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, * allocate session send reset */ if (stream_session_connect_notify (&new_tc0->connection, 0)) { - tcp_send_reset (new_tc0, b0, is_ip4); + tcp_send_reset_w_pkt (new_tc0, b0, is_ip4); tcp_connection_cleanup (new_tc0); goto drop; } @@ -2017,7 +2022,7 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (stream_session_connect_notify (&new_tc0->connection, 0)) { tcp_connection_cleanup (new_tc0); - tcp_send_reset (tc0, b0, is_ip4); + tcp_send_reset_w_pkt (tc0, b0, is_ip4); TCP_EVT_DBG (TCP_EVT_RST_SENT, tc0); goto drop; } @@ -2221,7 +2226,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, */ if (!tcp_rcv_ack_is_acceptable (tc0, b0)) { - tcp_send_reset (tc0, b0, is_ip4); + tcp_send_reset_w_pkt (tc0, b0, is_ip4); goto drop; } diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index 7da0c073..c56eadf8 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -440,7 +440,8 @@ tcp_init_mss (tcp_connection_t * tc) always_inline int tcp_alloc_tx_buffers (tcp_main_t * tm, u8 thread_index, u32 n_free_buffers) { - vec_validate (tm->tx_buffers[thread_index], n_free_buffers - 1); + vec_validate (tm->tx_buffers[thread_index], + vec_len (tm->tx_buffers[thread_index]) + n_free_buffers - 1); _vec_len (tm->tx_buffers[thread_index]) = vlib_buffer_alloc_from_free_list (vlib_get_main (), tm->tx_buffers[thread_index], @@ -480,27 +481,31 @@ tcp_return_buffer (tcp_main_t * tm) _vec_len (my_tx_buffers) += 1; } -always_inline void +always_inline void * tcp_reuse_buffer (vlib_main_t * vm, vlib_buffer_t * b) { - vlib_buffer_t *it = b; - u32 save_free_list = b->flags & VLIB_BUFFER_FREE_LIST_INDEX_MASK; - do - { - it->current_data = 0; - it->current_length = 0; - it->total_length_not_including_first_buffer = 0; - } - while ((it->flags & VLIB_BUFFER_NEXT_PRESENT) - && (it = vlib_get_buffer (vm, it->next_buffer))); - if (b->flags & VLIB_BUFFER_NEXT_PRESENT) vlib_buffer_free_one (vm, b->next_buffer); - b->flags = save_free_list; + b->flags = 0; + b->current_data = 0; + b->current_length = 0; + b->total_length_not_including_first_buffer = 0; + vnet_buffer (b)->tcp.flags = 0; /* Leave enough space for headers */ - vlib_buffer_make_headroom (b, MAX_HDRS_LEN); + return vlib_buffer_make_headroom (b, MAX_HDRS_LEN); +} + +always_inline void * +tcp_init_buffer (vlib_main_t * vm, vlib_buffer_t * b) +{ + ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0); + b->flags = VNET_BUFFER_F_LOCALLY_ORIGINATED; + b->total_length_not_including_first_buffer = 0; vnet_buffer (b)->tcp.flags = 0; + + /* Leave enough space for headers */ + return vlib_buffer_make_headroom (b, MAX_HDRS_LEN); } /** @@ -632,6 +637,59 @@ tcp_enqueue_to_ip_lookup (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, vlib_put_frame_to_node (vm, next_index, f); } +always_inline void +tcp_enqueue_to_output_i (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, + u8 is_ip4, u8 flush) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + u32 thread_index = vlib_get_thread_index (); + u32 *to_next, next_index; + vlib_frame_t *f; + + b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED; + b->error = 0; + + /* Decide where to send the packet */ + next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index; + + /* Initialize the trajectory trace, if configured */ + if (VLIB_BUFFER_TRACE_TRAJECTORY > 0) + { + b->pre_data[0] = 1; + b->pre_data[1] = next_index; + } + + /* Get frame to v4/6 output node */ + f = tm->tx_frames[!is_ip4][thread_index]; + if (!f) + { + f = vlib_get_frame_to_node (vm, next_index); + ASSERT (f); + tm->tx_frames[!is_ip4][thread_index] = f; + } + to_next = vlib_frame_vector_args (f); + to_next[f->n_vectors] = bi; + f->n_vectors += 1; + if (flush || f->n_vectors == VLIB_FRAME_SIZE) + { + vlib_put_frame_to_node (vm, next_index, f); + tm->tx_frames[!is_ip4][thread_index] = 0; + } +} + +always_inline void +tcp_enqueue_to_output (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, u8 is_ip4) +{ + tcp_enqueue_to_output_i (vm, b, bi, is_ip4, 0); +} + +always_inline void +tcp_enqueue_to_output_now (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, + u8 is_ip4) +{ + tcp_enqueue_to_output_i (vm, b, bi, is_ip4, 1); +} + int tcp_make_reset_in_place (vlib_main_t * vm, vlib_buffer_t * b0, tcp_state_t state, u8 thread_index, u8 is_ip4) @@ -712,9 +770,11 @@ tcp_make_reset_in_place (vlib_main_t * vm, vlib_buffer_t * b0, /** * Send reset without reusing existing buffer + * + * It extracts connection info out of original packet */ void -tcp_send_reset (tcp_connection_t * tc, vlib_buffer_t * pkt, u8 is_ip4) +tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt, u8 is_ip4) { vlib_buffer_t *b; u32 bi; @@ -730,9 +790,7 @@ tcp_send_reset (tcp_connection_t * tc, vlib_buffer_t * pkt, u8 is_ip4) return; b = vlib_get_buffer (vm, bi); - - /* Leave enough space for headers */ - vlib_buffer_make_headroom (b, MAX_HDRS_LEN); + tcp_init_buffer (vm, b); /* Make and write options */ tcp_hdr_len = sizeof (tcp_header_t); @@ -787,6 +845,38 @@ tcp_send_reset (tcp_connection_t * tc, vlib_buffer_t * pkt, u8 is_ip4) TCP_EVT_DBG (TCP_EVT_RST_SENT, tc); } +/** + * Build and set reset packet for connection + */ +void +tcp_send_reset (tcp_connection_t * tc) +{ + vlib_main_t *vm = vlib_get_main (); + tcp_main_t *tm = vnet_get_tcp_main (); + vlib_buffer_t *b; + u32 bi; + tcp_header_t *th; + u16 tcp_hdr_opts_len, advertise_wnd, opts_write_len; + u8 flags; + + if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) + return; + b = vlib_get_buffer (vm, bi); + tcp_init_buffer (vm, b); + + tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state); + tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t); + advertise_wnd = tcp_window_to_advertise (tc, TCP_STATE_ESTABLISHED); + flags = TCP_FLAG_RST; + th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt, + tc->rcv_nxt, tcp_hdr_opts_len, flags, + advertise_wnd); + opts_write_len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts); + ASSERT (opts_write_len == tc->snd_opts_len); + vnet_buffer (b)->tcp.connection_index = tc->c_c_index; + tcp_enqueue_to_output_now (vm, b, bi, tc->c_is_ip4); +} + void tcp_push_ip_hdr (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b) { @@ -835,9 +925,7 @@ tcp_send_syn (tcp_connection_t * tc) return; b = vlib_get_buffer (vm, bi); - - /* Leave enough space for headers */ - vlib_buffer_make_headroom (b, MAX_HDRS_LEN); + tcp_init_buffer (vm, b); /* Set random initial sequence */ time_now = tcp_time_now (); @@ -875,59 +963,6 @@ tcp_send_syn (tcp_connection_t * tc) TCP_EVT_DBG (TCP_EVT_SYN_SENT, tc); } -always_inline void -tcp_enqueue_to_output_i (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, - u8 is_ip4, u8 flush) -{ - tcp_main_t *tm = vnet_get_tcp_main (); - u32 thread_index = vlib_get_thread_index (); - u32 *to_next, next_index; - vlib_frame_t *f; - - b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED; - b->error = 0; - - /* Decide where to send the packet */ - next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index; - - /* Initialize the trajectory trace, if configured */ - if (VLIB_BUFFER_TRACE_TRAJECTORY > 0) - { - b->pre_data[0] = 1; - b->pre_data[1] = next_index; - } - - /* Get frame to v4/6 output node */ - f = tm->tx_frames[!is_ip4][thread_index]; - if (!f) - { - f = vlib_get_frame_to_node (vm, next_index); - ASSERT (f); - tm->tx_frames[!is_ip4][thread_index] = f; - } - to_next = vlib_frame_vector_args (f); - to_next[f->n_vectors] = bi; - f->n_vectors += 1; - if (flush || f->n_vectors == VLIB_FRAME_SIZE) - { - vlib_put_frame_to_node (vm, next_index, f); - tm->tx_frames[!is_ip4][thread_index] = 0; - } -} - -always_inline void -tcp_enqueue_to_output (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, u8 is_ip4) -{ - tcp_enqueue_to_output_i (vm, b, bi, is_ip4, 0); -} - -always_inline void -tcp_enqueue_to_output_now (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, - u8 is_ip4) -{ - tcp_enqueue_to_output_i (vm, b, bi, is_ip4, 1); -} - /** * Flush tx frame populated by retransmits and timer pops */ @@ -969,7 +1004,7 @@ tcp_send_fin (tcp_connection_t * tc) if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) return; b = vlib_get_buffer (vm, bi); - + /* buffer will be initialized by in tcp_make_fin */ tcp_make_fin (tc, b); tcp_enqueue_to_output_now (vm, b, bi, tc->c_is_ip4); tc->flags |= TCP_CONN_FINSNT; @@ -1013,6 +1048,8 @@ tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, tcp_header_t *th; data_len = b->current_length + b->total_length_not_including_first_buffer; + ASSERT (!b->total_length_not_including_first_buffer + || (b->flags & VLIB_BUFFER_NEXT_PRESENT)); vnet_buffer (b)->tcp.flags = 0; if (compute_opts) @@ -1106,29 +1143,27 @@ tcp_prepare_retransmit_segment (tcp_connection_t * tc, u32 offset, * Make sure we can retransmit something */ available_bytes = stream_session_tx_fifo_max_dequeue (&tc->connection); + available_bytes -= offset; if (!available_bytes) return 0; max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes); max_deq_bytes = clib_min (available_bytes, max_deq_bytes); - seg_size = max_deq_bytes + MAX_HDRS_LEN; /* Start is beyond snd_congestion */ start = tc->snd_una + offset; if (seq_geq (start, tc->snd_congestion)) - { - goto done; - } + goto done; /* Don't overshoot snd_congestion */ if (seq_gt (start + max_deq_bytes, tc->snd_congestion)) { max_deq_bytes = tc->snd_congestion - start; if (max_deq_bytes == 0) - { - goto done; - } + goto done; } + seg_size = max_deq_bytes + MAX_HDRS_LEN; + /* * Prepare options */ @@ -1141,7 +1176,7 @@ tcp_prepare_retransmit_segment (tcp_connection_t * tc, u32 offset, if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) return 0; *b = vlib_get_buffer (vm, bi); - data = vlib_buffer_make_headroom (*b, MAX_HDRS_LEN); + data = tcp_init_buffer (vm, *b); /* Easy case, buffer size greater than mss */ if (PREDICT_TRUE (seg_size <= tm->bytes_per_buffer)) @@ -1162,7 +1197,6 @@ tcp_prepare_retransmit_segment (tcp_connection_t * tc, u32 offset, int i; n_bufs_per_seg = ceil ((double) seg_size / tm->bytes_per_buffer); - ASSERT (available_bytes >= max_deq_bytes); /* Make sure we have enough buffers */ available_bufs = vec_len (tm->tx_buffers[thread_index]); @@ -1182,8 +1216,6 @@ tcp_prepare_retransmit_segment (tcp_connection_t * tc, u32 offset, b[0]->current_length = n_bytes; b[0]->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; b[0]->total_length_not_including_first_buffer = 0; - - tcp_push_hdr_i (tc, *b, tc->state, 0); max_deq_bytes -= n_bytes; chain_b = *b; @@ -1197,22 +1229,22 @@ tcp_prepare_retransmit_segment (tcp_connection_t * tc, u32 offset, chain_b->current_data = 0; data = vlib_buffer_get_current (chain_b); n_peeked = stream_session_peek_bytes (&tc->connection, data, - n_bytes, len_to_deq); - n_bytes += n_peeked; + offset + n_bytes, len_to_deq); ASSERT (n_peeked == len_to_deq); + n_bytes += n_peeked; chain_b->current_length = n_peeked; - b[0]->total_length_not_including_first_buffer += - chain_b->current_length; + chain_b->flags = 0; + chain_b->next_buffer = 0; /* update previous buffer */ prev_b->next_buffer = chain_bi; prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT; - /* update current buffer */ - chain_b->next_buffer = 0; - max_deq_bytes -= n_peeked; + b[0]->total_length_not_including_first_buffer += n_peeked; } + + tcp_push_hdr_i (tc, *b, tc->state, 0); } ASSERT (n_bytes > 0); @@ -1348,7 +1380,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) return; b = vlib_get_buffer (vm, bi); - vlib_buffer_make_headroom (b, MAX_HDRS_LEN); + tcp_init_buffer (vm, b); tcp_push_hdr_i (tc, b, tc->state, 1); /* Account for the SYN */ @@ -1409,8 +1441,9 @@ tcp_timer_persist_handler (u32 index) u32 thread_index = vlib_get_thread_index (); tcp_connection_t *tc; vlib_buffer_t *b; - u32 bi, old_snd_nxt, snd_bytes = 0, available_bytes = 0; + u32 bi, old_snd_nxt, max_snd_bytes, available_bytes, offset; int n_bytes = 0; + u8 *data; tc = tcp_connection_get_if_valid (index, thread_index); @@ -1419,12 +1452,13 @@ tcp_timer_persist_handler (u32 index) /* Make sure timer handle is set to invalid */ tc->timers[TCP_TIMER_PERSIST] = TCP_TIMER_HANDLE_INVALID; + offset = tc->snd_una_max - tc->snd_una; /* Problem already solved or worse */ available_bytes = stream_session_tx_fifo_max_dequeue (&tc->connection); if (tc->state == TCP_STATE_CLOSED || tc->state > TCP_STATE_ESTABLISHED || tc->snd_wnd > tc->snd_mss || tcp_in_recovery (tc) - || !available_bytes) + || !available_bytes || available_bytes <= offset) return; /* Increment RTO backoff */ @@ -1437,18 +1471,17 @@ tcp_timer_persist_handler (u32 index) if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) return; b = vlib_get_buffer (vm, bi); + data = tcp_init_buffer (vm, b); - tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una); + tcp_validate_txf_size (tc, offset); tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state); - snd_bytes = clib_min (tc->snd_mss, tm->bytes_per_buffer); - n_bytes = stream_session_peek_bytes (&tc->connection, - vlib_buffer_get_current (b), - tc->snd_una_max - tc->snd_una, - snd_bytes); - ASSERT (n_bytes != 0); + max_snd_bytes = clib_min (tc->snd_mss, tm->bytes_per_buffer - MAX_HDRS_LEN); + n_bytes = stream_session_peek_bytes (&tc->connection, data, offset, + max_snd_bytes); b->current_length = n_bytes; - ASSERT (tc->snd_nxt == tc->snd_una_max || tc->rto_boff > 1 - || tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)); + ASSERT (n_bytes != 0 && (tc->snd_nxt == tc->snd_una_max || tc->rto_boff > 1 + || tcp_timer_is_active (tc, + TCP_TIMER_RETRANSMIT))); /* Allow updating of snd_una_max but don't update snd_nxt */ old_snd_nxt = tc->snd_nxt; @@ -1456,8 +1489,8 @@ tcp_timer_persist_handler (u32 index) tc->snd_nxt = old_snd_nxt; tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); - /* Re-enable persist timer */ - tcp_persist_timer_set (tc); + /* Just sent new data, enable retransmit */ + tcp_retransmit_timer_update (tc); } /** @@ -1490,7 +1523,7 @@ void tcp_fast_retransmit_sack (tcp_connection_t * tc) { vlib_main_t *vm = vlib_get_main (); - u32 n_written = 0, offset = 0, max_bytes; + u32 n_written = 0, offset, max_bytes; vlib_buffer_t *b = 0; sack_scoreboard_hole_t *hole; sack_scoreboard_t *sb; @@ -1523,7 +1556,9 @@ tcp_fast_retransmit_sack (tcp_connection_t * tc) * unSACKed sequence number SHOULD be returned, and RescueRxt set to * RecoveryPoint. HighRxt MUST NOT be updated. */ - max_bytes = clib_min (tc->snd_mss, snd_space); + max_bytes = clib_min (tc->snd_mss, + tc->snd_congestion - tc->snd_una); + max_bytes = clib_min (max_bytes, snd_space); offset = tc->snd_congestion - tc->snd_una - max_bytes; sb->rescue_rxt = tc->snd_congestion; tc->snd_nxt = tc->snd_una + offset; @@ -1535,9 +1570,12 @@ tcp_fast_retransmit_sack (tcp_connection_t * tc) break; } - max_bytes = snd_limited ? tc->snd_mss : hole->end - sb->high_rxt; + max_bytes = clib_min (hole->end - sb->high_rxt, snd_space); + max_bytes = snd_limited ? clib_min (max_bytes, tc->snd_mss) : max_bytes; + if (max_bytes == 0) + break; offset = sb->high_rxt - tc->snd_una; - tc->snd_nxt = tc->snd_una + offset; + tc->snd_nxt = sb->high_rxt; n_written = tcp_prepare_retransmit_segment (tc, offset, max_bytes, &b); /* Nothing left to retransmit */ @@ -1547,6 +1585,7 @@ tcp_fast_retransmit_sack (tcp_connection_t * tc) bi = vlib_get_buffer_index (vm, b); sb->high_rxt += n_written; tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); + ASSERT (n_written <= snd_space); snd_space -= n_written; } @@ -1835,6 +1874,7 @@ tcp_push_header (transport_connection_t * tconn, vlib_buffer_t * b) tc = (tcp_connection_t *) tconn; tcp_push_hdr_i (tc, b, TCP_STATE_ESTABLISHED, 0); + ASSERT (seq_leq (tc->snd_una_max, tc->snd_una + tc->snd_wnd)); if (tc->rtt_ts == 0 && !tcp_in_cong_recovery (tc)) { -- cgit 1.2.3-korg From c87c91d8b0e85997debaf575f2e30cc2702edf25 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Wed, 16 Aug 2017 19:55:49 -0700 Subject: session: segment manager improvements - cleanup connects segment manager even if first - fix segment manager allocation for listen sessions - improve handling of process private segments (mheaps/main heap) - added segment manager cli Change-Id: Ic2ca97c3622ab2286d5fb5772aeb57680e64f769 Signed-off-by: Florin Coras Signed-off-by: Dave Wallace --- src/svm/svm_fifo_segment.c | 62 +++++-- src/svm/svm_fifo_segment.h | 13 +- src/svm/test_svm_fifo1.c | 10 +- src/uri/vppcom.c | 2 +- src/vnet/session/application.c | 71 ++++++-- src/vnet/session/application.h | 8 +- src/vnet/session/segment_manager.c | 365 +++++++++++++++++++++++++------------ src/vnet/session/segment_manager.h | 29 +-- src/vnet/session/session.c | 9 +- src/vnet/session/session_cli.c | 19 +- src/vnet/session/session_node.c | 3 +- src/vnet/tcp/tcp.c | 6 +- 12 files changed, 407 insertions(+), 190 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index 2094ba74..6600a423 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -188,7 +188,7 @@ svm_fifo_segment_create (svm_fifo_segment_create_args_t * a) return (rv); } - /* Note; requested_va updated due to seg base addr randomization */ + /* Note: requested_va updated due to seg base addr randomization */ sm->next_baseva = s->ssvm.requested_va + a->segment_size; sh = s->ssvm.sh; @@ -200,7 +200,6 @@ svm_fifo_segment_create (svm_fifo_segment_create_args_t * a) sh->opaque[0] = fsh; s->h = fsh; fsh->segment_name = format (0, "%s%c", a->segment_name, 0); - preallocate_fifo_pairs (fsh, a); ssvm_pop_heap (oldheap); @@ -226,25 +225,19 @@ svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t * a) if (a->private_segment_count && a->private_segment_size) { - void *mem; u8 *heap; u32 pagesize = clib_mem_get_page_size (); u32 rnd_size; + rnd_size = (a->private_segment_size + (pagesize - 1)) & ~pagesize; for (i = 0; i < a->private_segment_count; i++) { - rnd_size = (a->private_segment_size + (pagesize - 1)) & ~pagesize; - - mem = mmap (0, rnd_size, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, - -1 /* fd */ , 0 /* offset */ ); - - if (mem == MAP_FAILED) + heap = mheap_alloc (0, rnd_size); + if (heap == 0) { - clib_unix_warning ("mmap"); + clib_unix_warning ("mheap alloc"); return -1; } - heap = mheap_alloc (mem, rnd_size); heap_header = mheap_header (heap); heap_header->flags |= MHEAP_FLAG_THREAD_SAFE; vec_add1 (heaps, heap); @@ -279,6 +272,9 @@ svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t * a) memset (fsh, 0, sizeof (*fsh)); sh->opaque[0] = fsh; s->h = fsh; + fsh->flags = FIFO_SEGMENT_F_IS_PRIVATE; + if (!a->private_segment_count) + fsh->flags |= FIFO_SEGMENT_F_IS_MAIN_HEAP; fsh->segment_name = format (0, "%s%c", a->segment_name, 0); if (a->private_segment_count) @@ -288,7 +284,6 @@ svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t * a) preallocate_fifo_pairs (fsh, a); clib_mem_set_heap (oldheap); } - sh->ready = 1; vec_add1 (a->new_segment_indices, s - sm->segments); } @@ -336,8 +331,20 @@ void svm_fifo_segment_delete (svm_fifo_segment_private_t * s) { svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; - ssvm_delete (&s->ssvm); - pool_put (sm->segments, s); + if (s->h->flags & FIFO_SEGMENT_F_IS_PRIVATE) + { + /* Don't try to free vpp's heap! */ + if (!(s->h->flags & FIFO_SEGMENT_F_IS_MAIN_HEAP)) + mheap_free (s->ssvm.sh->heap); + clib_mem_free (s->ssvm.sh); + clib_mem_free (s->h); + pool_put (sm->segments, s); + } + else + { + ssvm_delete (&s->ssvm); + pool_put (sm->segments, s); + } } svm_fifo_t * @@ -352,8 +359,8 @@ svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, int freelist_index; /* - * 2K minimum. It's not likely that anything good will happen - * with a 1K FIFO. + * 4K minimum. It's not likely that anything good will happen + * with a smaller FIFO. */ if (data_size_in_bytes < FIFO_SEGMENT_MIN_FIFO_SIZE || data_size_in_bytes > FIFO_SEGMENT_MAX_FIFO_SIZE) @@ -428,6 +435,7 @@ found: } fsh->fifos = f; } + fsh->n_active_fifos++; ssvm_pop_heap (oldheap); ssvm_unlock_non_recursive (sh); @@ -489,6 +497,7 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f, f->master_thread_index = ~0; } + fsh->n_active_fifos--; ssvm_pop_heap (oldheap); ssvm_unlock_non_recursive (sh); } @@ -508,6 +517,25 @@ svm_fifo_segment_index (svm_fifo_segment_private_t * s) return s - svm_fifo_segment_main.segments; } +/** + * Retrieve svm segments pool. Used only for debug purposes. + */ +svm_fifo_segment_private_t * +svm_fifo_segment_segments_pool (void) +{ + svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; + return sm->segments; +} + +/** + * Get number of active fifos + */ +u32 +svm_fifo_segment_num_fifos (svm_fifo_segment_private_t * fifo_segment) +{ + return fifo_segment->h->n_active_fifos; +} + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/svm/svm_fifo_segment.h b/src/svm/svm_fifo_segment.h index 68bb4d3d..5cc4754a 100644 --- a/src/svm/svm_fifo_segment.h +++ b/src/svm/svm_fifo_segment.h @@ -27,15 +27,20 @@ typedef enum FIFO_SEGMENT_N_FREELISTS } svm_fifo_segment_freelist_t; -#define FIFO_SEGMENT_MIN_FIFO_SIZE 2048 +#define FIFO_SEGMENT_MIN_FIFO_SIZE 4096 #define FIFO_SEGMENT_MAX_FIFO_SIZE (8<<20) /* 8mb max fifo size */ -#define FIFO_SEGMENT_ALLOC_CHUNK_SIZE 32 /* allocation quantum */ +#define FIFO_SEGMENT_ALLOC_CHUNK_SIZE 32 /* Allocation quantum */ + +#define FIFO_SEGMENT_F_IS_PRIVATE 1 << 0 /* Private segment */ +#define FIFO_SEGMENT_F_IS_MAIN_HEAP 1 << 1 /* Segment is main heap */ typedef struct { svm_fifo_t *fifos; /**< Linked list of active RX fifos */ u8 *segment_name; /**< Segment name */ svm_fifo_t **free_fifos; /**< Freelists, by fifo size */ + u32 n_active_fifos; /**< Number of active fifos */ + u8 flags; /**< Segment flags */ } svm_fifo_segment_header_t; typedef struct @@ -70,7 +75,7 @@ typedef struct } svm_fifo_segment_create_args_t; static inline svm_fifo_segment_private_t * -svm_fifo_get_segment (u32 segment_index) +svm_fifo_segment_get_segment (u32 segment_index) { svm_fifo_segment_main_t *ssm = &svm_fifo_segment_main; return vec_elt_at_index (ssm->segments, segment_index); @@ -112,6 +117,8 @@ void svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_segment_freelist_t index); void svm_fifo_segment_init (u64 baseva, u32 timeout_in_seconds); u32 svm_fifo_segment_index (svm_fifo_segment_private_t * s); +u32 svm_fifo_segment_num_fifos (svm_fifo_segment_private_t * fifo_segment); +svm_fifo_segment_private_t *svm_fifo_segment_segments_pool (void); #endif /* __included_ssvm_fifo_segment_h__ */ diff --git a/src/svm/test_svm_fifo1.c b/src/svm/test_svm_fifo1.c index 63d75845..3bdca949 100644 --- a/src/svm/test_svm_fifo1.c +++ b/src/svm/test_svm_fifo1.c @@ -39,7 +39,7 @@ hello_world (int verbose) if (rv) return clib_error_return (0, "svm_fifo_segment_create returned %d", rv); - sp = svm_fifo_get_segment (a->new_segment_indices[0]); + sp = svm_fifo_segment_get_segment (a->new_segment_indices[0]); f = svm_fifo_segment_alloc_fifo (sp, 4096, FIFO_SEGMENT_RX_FREELIST); @@ -92,7 +92,7 @@ master (int verbose) if (rv) return clib_error_return (0, "svm_fifo_segment_create returned %d", rv); - sp = svm_fifo_get_segment (a->new_segment_indices[0]); + sp = svm_fifo_segment_get_segment (a->new_segment_indices[0]); f = svm_fifo_segment_alloc_fifo (sp, 4096, FIFO_SEGMENT_RX_FREELIST); @@ -128,7 +128,7 @@ mempig (int verbose) if (rv) return clib_error_return (0, "svm_fifo_segment_create returned %d", rv); - sp = svm_fifo_get_segment (a->new_segment_indices[0]); + sp = svm_fifo_segment_get_segment (a->new_segment_indices[0]); for (i = 0; i < 1000; i++) { @@ -186,7 +186,7 @@ offset (int verbose) if (rv) return clib_error_return (0, "svm_fifo_segment_create returned %d", rv); - sp = svm_fifo_get_segment (a->new_segment_indices[0]); + sp = svm_fifo_segment_get_segment (a->new_segment_indices[0]); f = svm_fifo_segment_alloc_fifo (sp, 200 << 10, FIFO_SEGMENT_RX_FREELIST); @@ -246,7 +246,7 @@ slave (int verbose) if (rv) return clib_error_return (0, "svm_fifo_segment_attach returned %d", rv); - sp = svm_fifo_get_segment (a->new_segment_indices[0]); + sp = svm_fifo_segment_get_segment (a->new_segment_indices[0]); sh = sp->ssvm.sh; fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; diff --git a/src/uri/vppcom.c b/src/uri/vppcom.c index 9a4bea8f..c41acd54 100644 --- a/src/uri/vppcom.c +++ b/src/uri/vppcom.c @@ -1076,7 +1076,7 @@ vl_api_connect_sock_t_handler (vl_api_connect_sock_t * mp) session->sm_seg_index = a->new_segment_indices[0]; vec_reset_length (a->new_segment_indices); - seg = svm_fifo_get_segment (session->sm_seg_index); + seg = svm_fifo_segment_get_segment (session->sm_seg_index); rx_fifo = session->server_rx_fifo = svm_fifo_segment_alloc_fifo (seg, vcm->cfg.rx_fifo_size, FIFO_SEGMENT_RX_FREELIST); diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index 25a4efa5..d105119c 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -80,8 +80,8 @@ application_new () pool_get (app_pool, app); memset (app, 0, sizeof (*app)); app->index = application_get_index (app); - app->connects_seg_manager = ~0; - app->first_segment_manager = ~0; + app->connects_seg_manager = APP_INVALID_SEGMENT_MANAGER_INDEX; + app->first_segment_manager = APP_INVALID_SEGMENT_MANAGER_INDEX; if (CLIB_DEBUG > 1) clib_warning ("[%d] New app (%d)", getpid (), app->index); return app; @@ -104,14 +104,8 @@ application_del (application_t * app) clib_warning ("[%d] Delete app (%d)", getpid (), app->index); /* - * Cleanup segment managers + * Listener cleanup */ - if ((app->connects_seg_manager != (u32) ~ 0) && - (app->connects_seg_manager != app->first_segment_manager)) - { - sm = segment_manager_get (app->connects_seg_manager); - segment_manager_del (sm); - } /* *INDENT-OFF* */ hash_foreach (handle, index, app->listeners_table, @@ -120,7 +114,6 @@ application_del (application_t * app) })); /* *INDENT-ON* */ - /* Actual listener cleanup */ for (i = 0; i < vec_len (handles); i++) { a->app_index = app->index; @@ -129,10 +122,30 @@ application_del (application_t * app) vnet_unbind (a); } - if (app->first_segment_manager != ~0) + /* + * Connects segment manager cleanup + */ + + if (app->connects_seg_manager != APP_INVALID_SEGMENT_MANAGER_INDEX) + { + sm = segment_manager_get (app->connects_seg_manager); + sm->app_index = SEGMENT_MANAGER_INVALID_APP_INDEX; + segment_manager_init_del (sm); + } + + + /* If first segment manager is used by a listener */ + if (app->first_segment_manager != APP_INVALID_SEGMENT_MANAGER_INDEX + && app->first_segment_manager != app->connects_seg_manager) { sm = segment_manager_get (app->first_segment_manager); - segment_manager_first_segment_maybe_del (sm); + /* .. and has no fifos, e.g. it might be used for redirected sessions, + * remove it */ + if (!segment_manager_has_fifos (sm)) + { + sm->app_index = SEGMENT_MANAGER_INVALID_APP_INDEX; + segment_manager_del (sm); + } } application_table_del (app); @@ -159,6 +172,7 @@ application_init (application_t * app, u32 api_client_index, u64 * options, segment_manager_t *sm; segment_manager_properties_t *props; u32 app_evt_queue_size, first_seg_size; + u32 default_rx_fifo_size = 16 << 10, default_tx_fifo_size = 16 << 10; int rv; app_evt_queue_size = options[APP_EVT_QUEUE_SIZE] > 0 ? @@ -170,7 +184,11 @@ application_init (application_t * app, u32 api_client_index, u64 * options, props = &app->sm_properties; props->add_segment_size = options[SESSION_OPTIONS_ADD_SEGMENT_SIZE]; props->rx_fifo_size = options[SESSION_OPTIONS_RX_FIFO_SIZE]; + props->rx_fifo_size = + props->rx_fifo_size ? props->rx_fifo_size : default_rx_fifo_size; props->tx_fifo_size = options[SESSION_OPTIONS_TX_FIFO_SIZE]; + props->tx_fifo_size = + props->tx_fifo_size ? props->tx_fifo_size : default_tx_fifo_size; props->add_segment = props->add_segment_size != 0; props->preallocated_fifo_pairs = options[APP_OPTIONS_PREALLOC_FIFO_PAIRS]; props->use_private_segment = options[APP_OPTIONS_FLAGS] @@ -181,6 +199,7 @@ application_init (application_t * app, u32 api_client_index, u64 * options, first_seg_size = options[SESSION_OPTIONS_SEGMENT_SIZE]; if ((rv = segment_manager_init (sm, props, first_seg_size))) return rv; + sm->first_is_protected = 1; app->first_segment_manager = segment_manager_index (sm); app->api_client_index = api_client_index; @@ -225,7 +244,8 @@ application_alloc_segment_manager (application_t * app) { segment_manager_t *sm = 0; - if (app->first_segment_manager != (u32) ~ 0 + /* If the first segment manager is not in use, don't allocate a new one */ + if (app->first_segment_manager != APP_INVALID_SEGMENT_MANAGER_INDEX && app->first_segment_manager_in_use == 0) { sm = segment_manager_get (app->first_segment_manager); @@ -302,11 +322,15 @@ application_stop_listen (application_t * srv, u64 handle) ASSERT (indexp); sm = segment_manager_get (*indexp); - segment_manager_del (sm); if (srv->first_segment_manager == *indexp) { + /* Delete sessions but don't remove segment manager */ srv->first_segment_manager_in_use = 0; - srv->first_segment_manager = ~0; + segment_manager_del_sessions (sm); + } + else + { + segment_manager_init_del (sm); } hash_unset (srv->listeners_table, handle); listen_session_del (listener); @@ -379,6 +403,21 @@ application_is_proxy (application_t * app) return !(app->flags & APP_OPTIONS_FLAGS_IS_PROXY); } +int +application_add_segment_notify (u32 app_index, u32 fifo_segment_index) +{ + application_t *app = application_get (app_index); + u32 seg_size = 0; + u8 *seg_name; + + /* Send an API message to the external app, to map new segment */ + ASSERT (app->cb_fns.add_segment_callback); + + segment_manager_get_segment_info (fifo_segment_index, &seg_name, &seg_size); + return app->cb_fns.add_segment_callback (app->api_client_index, seg_name, + seg_size); +} + u8 * format_application_listener (u8 * s, va_list * args) { @@ -449,7 +488,7 @@ application_format_connects (application_t * app, int verbose) svm_fifo_t *fifo; u8 *str; - fifo_segment = svm_fifo_get_segment (sm->segment_indices[j]); + fifo_segment = svm_fifo_segment_get_segment (sm->segment_indices[j]); fifo = svm_fifo_segment_get_fifo_list (fifo_segment); while (fifo) { diff --git a/src/vnet/session/application.h b/src/vnet/session/application.h index 95a39c05..e030c376 100644 --- a/src/vnet/session/application.h +++ b/src/vnet/session/application.h @@ -82,9 +82,12 @@ typedef struct _application */ u32 connects_seg_manager; - /* Lookup tables for listeners. Value is segment manager index */ + /** Lookup tables for listeners. Value is segment manager index */ uword *listeners_table; + /** First segment manager has in the the first segment the application's + * event fifo. Depending on what the app does, it may be either used for + * a listener or for connects. */ u32 first_segment_manager; u8 first_segment_manager_in_use; @@ -92,6 +95,8 @@ typedef struct _application segment_manager_properties_t sm_properties; } application_t; +#define APP_INVALID_SEGMENT_MANAGER_INDEX ((u32) ~0) + application_t *application_new (); int application_init (application_t * app, u32 api_client_index, u64 * options, @@ -118,6 +123,7 @@ segment_manager_t *application_get_listen_segment_manager (application_t * segment_manager_t *application_get_connect_segment_manager (application_t * app); int application_is_proxy (application_t * app); +int application_add_segment_notify (u32 app_index, u32 fifo_segment_index); #endif /* SRC_VNET_SESSION_APPLICATION_H_ */ diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c index 43977063..7cf66ee3 100644 --- a/src/vnet/session/segment_manager.c +++ b/src/vnet/session/segment_manager.c @@ -42,7 +42,7 @@ void segment_manager_get_segment_info (u32 index, u8 ** name, u32 * size) { svm_fifo_segment_private_t *s; - s = svm_fifo_get_segment (index); + s = svm_fifo_segment_get_segment (index); *name = s->h->segment_name; *size = s->ssvm.ssvm_size; } @@ -56,23 +56,39 @@ session_manager_add_segment_i (segment_manager_t * sm, u32 segment_size, memset (ca, 0, sizeof (*ca)); - ca->segment_name = (char *) segment_name; - ca->segment_size = segment_size; - ca->rx_fifo_size = sm->properties->rx_fifo_size; - ca->tx_fifo_size = sm->properties->tx_fifo_size; - ca->preallocated_fifo_pairs = sm->properties->preallocated_fifo_pairs; + if (!sm->properties->use_private_segment) + { + ca->segment_name = (char *) segment_name; + ca->segment_size = segment_size; + ca->rx_fifo_size = sm->properties->rx_fifo_size; + ca->tx_fifo_size = sm->properties->tx_fifo_size; + ca->preallocated_fifo_pairs = sm->properties->preallocated_fifo_pairs; - rv = svm_fifo_segment_create (ca); - if (rv) + rv = svm_fifo_segment_create (ca); + if (rv) + { + clib_warning ("svm_fifo_segment_create ('%s', %d) failed", + ca->segment_name, ca->segment_size); + return VNET_API_ERROR_SVM_SEGMENT_CREATE_FAIL; + } + } + else { - clib_warning ("svm_fifo_segment_create ('%s', %d) failed", - ca->segment_name, ca->segment_size); - return VNET_API_ERROR_SVM_SEGMENT_CREATE_FAIL; + ca->segment_name = "process-private-segment"; + ca->segment_size = ~0; + ca->rx_fifo_size = sm->properties->rx_fifo_size; + ca->tx_fifo_size = sm->properties->tx_fifo_size; + ca->preallocated_fifo_pairs = sm->properties->preallocated_fifo_pairs; + ca->private_segment_count = sm->properties->private_segment_count; + ca->private_segment_size = sm->properties->private_segment_size; + + if (svm_fifo_segment_create_process_private (ca)) + clib_warning ("Failed to create process private segment"); + + ASSERT (vec_len (ca->new_segment_indices)); } - vec_append (sm->segment_indices, ca->new_segment_indices); vec_free (ca->new_segment_indices); - return 0; } @@ -106,29 +122,13 @@ session_manager_add_first_segment (segment_manager_t * sm, u32 segment_size) return rv; } -static void - segment_manager_alloc_process_private_segment - (segment_manager_properties_t * props) +segment_manager_t * +segment_manager_new () { - svm_fifo_segment_create_args_t _a, *a = &_a; - - if (private_segment_indices) - return; - - memset (a, 0, sizeof (*a)); - a->segment_name = "process-private-segment"; - a->segment_size = ~0; - a->rx_fifo_size = props->rx_fifo_size; - a->tx_fifo_size = props->tx_fifo_size; - a->preallocated_fifo_pairs = props->preallocated_fifo_pairs; - a->private_segment_count = props->private_segment_count; - a->private_segment_size = props->private_segment_size; - - if (svm_fifo_segment_create_process_private (a)) - clib_warning ("Failed to create process private segment"); - - private_segment_indices = a->new_segment_indices; - ASSERT (vec_len (private_segment_indices)); + segment_manager_t *sm; + pool_get (segment_managers, sm); + memset (sm, 0, sizeof (*sm)); + return sm; } /** @@ -147,65 +147,73 @@ segment_manager_init (segment_manager_t * sm, first_seg_size = first_seg_size > 0 ? first_seg_size : default_segment_size; - if (sm->properties->use_private_segment == 0) - { - rv = session_manager_add_first_segment (sm, first_seg_size); - if (rv) - { - clib_warning ("Failed to allocate segment"); - return rv; - } - } - else + rv = session_manager_add_first_segment (sm, first_seg_size); + if (rv) { - if (vec_len (private_segment_indices) == 0) - segment_manager_alloc_process_private_segment (properties); - ASSERT (vec_len (private_segment_indices)); - vec_append (sm->segment_indices, private_segment_indices); + clib_warning ("Failed to allocate segment"); + return rv; } clib_spinlock_init (&sm->lockp); return 0; } -void -segment_manager_first_segment_maybe_del (segment_manager_t * sm) +u8 +segment_manager_has_fifos (segment_manager_t * sm) { - svm_fifo_segment_private_t *fifo_segment; - - /* If the first semgment has no fifos, then delete the 1st segment - */ - fifo_segment = svm_fifo_get_segment (sm->segment_indices[0]); - if (!svm_fifo_segment_has_fifos (fifo_segment)) + svm_fifo_segment_private_t *segment; + /* Weird, but handle it */ + if (vec_len (sm->segment_indices) == 0) + return 0; + if (vec_len (sm->segment_indices) == 1) { - clib_spinlock_lock (&sm->lockp); - svm_fifo_segment_delete (fifo_segment); - vec_del1 (sm->segment_indices, 0); - clib_spinlock_unlock (&sm->lockp); + segment = svm_fifo_segment_get_segment (sm->segment_indices[0]); + if (svm_fifo_segment_num_fifos (segment) == 0) + return 0; } + if (CLIB_DEBUG) + { + svm_fifo_segment_private_t *segment; + int i; + for (i = 1; i < vec_len (sm->segment_indices); i++) + { + segment = svm_fifo_segment_get_segment (sm->segment_indices[i]); + if (!svm_fifo_segment_has_fifos (segment)) + clib_warning ("segment has no fifos!"); + } + } + return 1; } - /** - * Removes segment manager. - * - * Since the fifos allocated in the segment keep backpointers to the sessions - * prior to removing the segment, we call session disconnect. This - * subsequently propages into transport. +static void +segment_manager_del_segment (segment_manager_t * sm, u32 segment_index) +{ + svm_fifo_segment_private_t *fifo_segment; + u32 svm_segment_index; + clib_spinlock_lock (&sm->lockp); + svm_segment_index = sm->segment_indices[segment_index]; + fifo_segment = svm_fifo_segment_get_segment (svm_segment_index); + svm_fifo_segment_delete (fifo_segment); + vec_del1 (sm->segment_indices, segment_index); + clib_spinlock_unlock (&sm->lockp); +} + +/** + * Initiate disconnects for all sessions 'owned' by a segment manager */ void -segment_manager_del (segment_manager_t * sm) +segment_manager_del_sessions (segment_manager_t * sm) { int j; svm_fifo_segment_private_t *fifo_segment; + svm_fifo_t *fifo; + ASSERT (vec_len (sm->segment_indices)); /* Across all fifo segments used by the server */ for (j = 0; j < vec_len (sm->segment_indices); j++) { - svm_fifo_t *fifo; - - /* Vector of fifos allocated in the segment */ - fifo_segment = svm_fifo_get_segment (sm->segment_indices[j]); + fifo_segment = svm_fifo_segment_get_segment (sm->segment_indices[j]); fifo = svm_fifo_segment_get_fifo_list (fifo_segment); /* @@ -220,14 +228,17 @@ segment_manager_del (segment_manager_t * sm) session_index = fifo->master_session_index; thread_index = fifo->master_thread_index; - session = stream_session_get (session_index, thread_index); /* Instead of directly removing the session call disconnect */ - session->session_state = SESSION_STATE_CLOSED; - session_send_session_evt_to_thread (stream_session_handle (session), - FIFO_EVENT_DISCONNECT, - thread_index); + if (session->session_state != SESSION_STATE_CLOSED) + { + session->session_state = SESSION_STATE_CLOSED; + session_send_session_evt_to_thread (stream_session_handle + (session), + FIFO_EVENT_DISCONNECT, + thread_index); + } fifo = fifo->next; } @@ -235,27 +246,51 @@ segment_manager_del (segment_manager_t * sm) * sessions if the segment can be removed. */ } +} - segment_manager_first_segment_maybe_del (sm); +/** + * Removes segment manager. + * + * Since the fifos allocated in the segment keep backpointers to the sessions + * prior to removing the segment, we call session disconnect. This + * subsequently propages into transport. + */ +void +segment_manager_del (segment_manager_t * sm) +{ + ASSERT (vec_len (sm->segment_indices) <= 1); + if (vec_len (sm->segment_indices)) + { + /* The first segment in the first segment manager is not removed when + * all fifos are removed. It can only be removed when the manager is + * explicitly deleted/detached by the app. */ + if (CLIB_DEBUG) + { + svm_fifo_segment_private_t *fifo_segment; + fifo_segment = + svm_fifo_segment_get_segment (sm->segment_indices[0]); + ASSERT (!svm_fifo_segment_has_fifos (fifo_segment)); + } + segment_manager_del_segment (sm, 0); + } clib_spinlock_free (&sm->lockp); + if (CLIB_DEBUG) + memset (sm, 0xfe, sizeof (*sm)); pool_put (segment_managers, sm); } -static int -segment_manager_notify_app_seg_add (segment_manager_t * sm, - u32 fifo_segment_index) +void +segment_manager_init_del (segment_manager_t * sm) { - application_t *app = application_get (sm->app_index); - u32 seg_size = 0; - u8 *seg_name; - - /* Send an API message to the external app, to map new segment */ - ASSERT (app->cb_fns.add_segment_callback); - - segment_manager_get_segment_info (fifo_segment_index, &seg_name, &seg_size); - return app->cb_fns.add_segment_callback (app->api_client_index, seg_name, - seg_size); + if (segment_manager_has_fifos (sm)) + segment_manager_del_sessions (sm); + else + { + ASSERT (!sm->first_is_protected + || sm->app_index == SEGMENT_MANAGER_INVALID_APP_INDEX); + segment_manager_del (sm); + } } int @@ -280,16 +315,14 @@ again: for (i = 0; i < vec_len (sm->segment_indices); i++) { *fifo_segment_index = sm->segment_indices[i]; - fifo_segment = svm_fifo_get_segment (*fifo_segment_index); + fifo_segment = svm_fifo_segment_get_segment (*fifo_segment_index); - /* FC: cleanup, make sure sm->properties->xxx_fifo_size always set */ fifo_size = sm->properties->rx_fifo_size; fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; *server_rx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size, FIFO_SEGMENT_RX_FREELIST); - /* FC: cleanup, make sure sm->properties->xxx_fifo_size always set */ fifo_size = sm->properties->tx_fifo_size; fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; *server_tx_fifo = @@ -327,7 +360,7 @@ again: { if (added_a_segment) { - clib_warning ("added a segment, still cant allocate a fifo"); + clib_warning ("added a segment, still can't allocate a fifo"); clib_spinlock_unlock (&sm->lockp); return SESSION_ERROR_NEW_SEG_NO_SPACE; } @@ -357,7 +390,8 @@ again: clib_spinlock_unlock (&sm->lockp); if (added_a_segment) - return segment_manager_notify_app_seg_add (sm, *fifo_segment_index); + return application_add_segment_notify (sm->app_index, + *fifo_segment_index); return 0; } @@ -368,32 +402,49 @@ segment_manager_dealloc_fifos (u32 svm_segment_index, svm_fifo_t * rx_fifo, { segment_manager_t *sm; svm_fifo_segment_private_t *fifo_segment; + u32 i, segment_index = ~0; + u8 is_first; sm = segment_manager_get_if_valid (rx_fifo->segment_manager); /* It's possible to have no segment manager if the session was removed - * as result of a detach */ + * as result of a detach. */ if (!sm) return; - fifo_segment = svm_fifo_get_segment (svm_segment_index); + fifo_segment = svm_fifo_segment_get_segment (svm_segment_index); svm_fifo_segment_free_fifo (fifo_segment, rx_fifo, FIFO_SEGMENT_RX_FREELIST); svm_fifo_segment_free_fifo (fifo_segment, tx_fifo, FIFO_SEGMENT_TX_FREELIST); - /* Don't try to delete process-private segments */ - if (sm->properties->private_segment_count > 0) - return; - - /* Remove segment only if it holds no fifos and not the first */ - if (sm->segment_indices[0] != svm_segment_index - && !svm_fifo_segment_has_fifos (fifo_segment)) + /* + * Try to remove svm segment if it has no fifos. This can be done only if + * the segment is not the first in the segment manager or if it is first + * and it is not protected. Moreover, if the segment is first and the app + * has detached from the segment manager, remove the segment manager. + */ + if (!svm_fifo_segment_has_fifos (fifo_segment)) { - clib_spinlock_lock (&sm->lockp); - svm_fifo_segment_delete (fifo_segment); - vec_del1 (sm->segment_indices, svm_segment_index); - clib_spinlock_unlock (&sm->lockp); + is_first = sm->segment_indices[0] == svm_segment_index; + + /* Remove segment if it holds no fifos or first but not protected */ + if (!is_first || !sm->first_is_protected) + { + /* Find the segment manager segment index */ + for (i = 0; i < vec_len (sm->segment_indices); i++) + if (sm->segment_indices[i] == svm_segment_index) + { + segment_index = i; + break; + } + ASSERT (segment_index != (u32) ~ 0); + segment_manager_del_segment (sm, segment_index); + } + + /* Remove segment manager if no sessions and detached from app */ + if (sm->app_index == SEGMENT_MANAGER_INVALID_APP_INDEX && is_first) + segment_manager_del (sm); } } @@ -410,14 +461,14 @@ segment_manager_alloc_queue (segment_manager_t * sm, u32 queue_size) ASSERT (sm->segment_indices != 0); - segment = svm_fifo_get_segment (sm->segment_indices[0]); + segment = svm_fifo_segment_get_segment (sm->segment_indices[0]); sh = segment->ssvm.sh; oldheap = ssvm_push_heap (sh); - q = - unix_shared_memory_queue_init (queue_size, sizeof (session_fifo_event_t), - 0 /* consumer pid */ , 0 - /* signal when queue non-empty */ ); + q = unix_shared_memory_queue_init (queue_size, + sizeof (session_fifo_event_t), + 0 /* consumer pid */ , + 0 /* signal when queue non-empty */ ); ssvm_pop_heap (oldheap); return q; } @@ -435,7 +486,7 @@ segment_manager_dealloc_queue (segment_manager_t * sm, ASSERT (sm->segment_indices != 0); - segment = svm_fifo_get_segment (sm->segment_indices[0]); + segment = svm_fifo_segment_get_segment (sm->segment_indices[0]); sh = segment->ssvm.sh; oldheap = ssvm_push_heap (sh); @@ -443,6 +494,90 @@ segment_manager_dealloc_queue (segment_manager_t * sm, ssvm_pop_heap (oldheap); } +static clib_error_t * +segment_manager_show_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + svm_fifo_segment_private_t *segments, *seg; + segment_manager_t *sm; + u8 show_segments = 0, verbose = 0, *name; + uword address; + u64 size; + u32 fifos; + mheap_t *heap_header; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "segments")) + show_segments = 1; + else if (unformat (input, "verbose")) + verbose = 1; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + vlib_cli_output (vm, "%d segment managers allocated", + pool_elts (segment_managers)); + if (verbose && pool_elts (segment_managers)) + { + vlib_cli_output (vm, "%-10s%=15s%=12s", "Index", "App Index", + "Segments"); + + /* *INDENT-OFF* */ + pool_foreach (sm, segment_managers, ({ + vlib_cli_output (vm, "%-10d%=15d%=12d", segment_manager_index(sm), + sm->app_index, vec_len (sm->segment_indices)); + })); + /* *INDENT-ON* */ + + } + if (show_segments) + { + segments = svm_fifo_segment_segments_pool (); + vlib_cli_output (vm, "%d svm fifo segments allocated", + pool_elts (segments)); + vlib_cli_output (vm, "%-20s%=12s%=12s%=15s", "Name", "Size (M)", + "Fifos", "Address"); + + /* *INDENT-OFF* */ + pool_foreach (seg, segments, ({ + if (seg->h->flags & FIFO_SEGMENT_F_IS_PRIVATE) + { + address = pointer_to_uword (seg->ssvm.sh->heap); + if (seg->h->flags & FIFO_SEGMENT_F_IS_MAIN_HEAP) + name = format (0, "main heap"); + else + name = format (0, "private heap"); + heap_header = mheap_header (seg->ssvm.sh->heap); + size = heap_header->max_size; + } + else + { + address = seg->ssvm.sh->ssvm_va; + size = seg->ssvm.ssvm_size; + name = seg->ssvm.sh->name; + } + fifos = svm_fifo_segment_num_fifos (seg); + vlib_cli_output (vm, "%-20s%=12u%=12u%=15x", name, size << 20, fifos, + address); + if (seg->h->flags & FIFO_SEGMENT_F_IS_PRIVATE) + vec_free (name); + })); + /* *INDENT-ON* */ + + } + return 0; +} + + /* *INDENT-OFF* */ +VLIB_CLI_COMMAND (segment_manager_show_command, static) = +{ + .path = "show segment-manager", + .short_help = "show segment-manager [segments]", + .function = segment_manager_show_fn, +}; +/* *INDENT-ON* */ + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/session/segment_manager.h b/src/vnet/session/segment_manager.h index 41abeb22..6e5b8989 100644 --- a/src/vnet/session/segment_manager.h +++ b/src/vnet/session/segment_manager.h @@ -55,23 +55,25 @@ typedef struct _segment_manager /** Owner app index */ u32 app_index; - /** Pointer to manager properties. Could be shared among all of - * an app's segment managers s*/ + /** + * Pointer to manager properties. Could be shared among all of + * an app's segment managers s + */ segment_manager_properties_t *properties; + + /** + * First segment should not be deleted unless segment manger is deleted. + * This also indicates that the segment manager is the first to have been + * allocated for the app. + */ + u8 first_is_protected; } segment_manager_t; +#define SEGMENT_MANAGER_INVALID_APP_INDEX ((u32) ~0) + /** Pool of segment managers */ extern segment_manager_t *segment_managers; -always_inline segment_manager_t * -segment_manager_new () -{ - segment_manager_t *sm; - pool_get (segment_managers, sm); - memset (sm, 0, sizeof (*sm)); - return sm; -} - always_inline segment_manager_t * segment_manager_get (u32 index) { @@ -92,6 +94,7 @@ segment_manager_index (segment_manager_t * sm) return sm - segment_managers; } +segment_manager_t *segment_manager_new (); int segment_manager_init (segment_manager_t * sm, segment_manager_properties_t * properties, @@ -100,9 +103,11 @@ segment_manager_init (segment_manager_t * sm, void segment_manager_get_segment_info (u32 index, u8 ** name, u32 * size); int session_manager_add_first_segment (segment_manager_t * sm, u32 segment_size); -void segment_manager_first_segment_maybe_del (segment_manager_t * sm); int session_manager_add_segment (segment_manager_t * sm); +void segment_manager_del_sessions (segment_manager_t * sm); void segment_manager_del (segment_manager_t * sm); +void segment_manager_init_del (segment_manager_t * sm); +u8 segment_manager_has_fifos (segment_manager_t * sm); int segment_manager_alloc_session_fifos (segment_manager_t * sm, svm_fifo_t ** server_rx_fifo, diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 6fe99047..ee22ccbe 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -457,7 +457,10 @@ stream_session_connect_notify (transport_connection_t * tc, u8 is_fail) /* Get the app's index from the handle we stored when opening connection * and the opaque (api_context for external apps) from transport session * index*/ - app = application_get (handle >> 32); + app = application_get_if_valid (handle >> 32); + if (!app) + return -1; + opaque = tc->s_index; if (!is_fail) @@ -562,9 +565,7 @@ stream_session_delete_notify (transport_connection_t * tc) /* App might've been removed already */ s = stream_session_get_if_valid (tc->s_index, tc->thread_index); if (!s) - { - return; - } + return; stream_session_delete (s); } diff --git a/src/vnet/session/session_cli.c b/src/vnet/session/session_cli.c index f60048f1..028dc9d8 100755 --- a/src/vnet/session/session_cli.c +++ b/src/vnet/session/session_cli.c @@ -64,7 +64,8 @@ format_stream_session (u8 * s, va_list * args) stream_session_get_index (ss)); if (ss->session_state == SESSION_STATE_READY - || ss->session_state == SESSION_STATE_ACCEPTING) + || ss->session_state == SESSION_STATE_ACCEPTING + || ss->session_state == SESSION_STATE_CLOSED) { s = format (s, "%U", tp_vft->format_connection, ss->connection_index, ss->thread_index, verbose); @@ -83,16 +84,6 @@ format_stream_session (u8 * s, va_list * args) s = format (s, "%-40U%v", tp_vft->format_half_open, ss->connection_index, str); } - else if (ss->session_state == SESSION_STATE_CLOSED) - { - s = - format (s, "[CL] %U", tp_vft->format_connection, ss->connection_index, - ss->thread_index, verbose); - if (verbose == 1) - s = format (s, "%v", str); - if (verbose > 1) - s = format (s, "%U", format_stream_session_fifos, ss, verbose); - } else { clib_warning ("Session in state: %d!", ss->session_state); @@ -284,9 +275,9 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, { if (once_per_pool && verbose == 1) { - str = - format (str, "%-50s%-15s%-10s%-10s%-10s", "Connection", - "State", "Rx-f", "Tx-f", "S-idx"); + str = format (str, "%-50s%-15s%-10s%-10s%-10s", + "Connection", "State", "Rx-f", "Tx-f", + "S-idx"); vlib_cli_output (vm, "%v", str); vec_reset_length (str); once_per_pool = 0; diff --git a/src/vnet/session/session_node.c b/src/vnet/session/session_node.c index cd52742b..dec6d13c 100644 --- a/src/vnet/session/session_node.c +++ b/src/vnet/session/session_node.c @@ -394,7 +394,6 @@ session_tx_fifo_dequeue_and_snd (vlib_main_t * vm, vlib_node_runtime_t * node, always_inline stream_session_t * session_event_get_session (session_fifo_event_t * e, u8 thread_index) { - ASSERT (e->fifo->master_thread_index == thread_index); return stream_session_get_if_valid (e->fifo->master_session_index, thread_index); } @@ -643,6 +642,8 @@ skip_dequeue: break; case FIFO_EVENT_BUILTIN_RX: s0 = session_event_get_session (e0, my_thread_index); + if (PREDICT_FALSE (!s0)) + continue; svm_fifo_unset_event (s0->server_rx_fifo); app = application_get (s0->app_index); app->cb_fns.builtin_server_rx_callback (s0); diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 6b2b4759..0a826a52 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -316,8 +316,10 @@ tcp_connection_close (tcp_connection_t * tc) tcp_send_fin (tc); tc->state = TCP_STATE_LAST_ACK; break; + case TCP_STATE_FIN_WAIT_1: + break; default: - clib_warning ("shouldn't be here"); + clib_warning ("state: %u", tc->state); } TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc); @@ -836,6 +838,8 @@ format_tcp_connection (u8 * s, va_list * args) tcp_connection_t *tc = va_arg (*args, tcp_connection_t *); u32 verbose = va_arg (*args, u32); + if (!tc) + return s; s = format (s, "%-50U", format_tcp_connection_id, tc); if (verbose) { -- cgit 1.2.3-korg From b7f1faa7fbd4575f28766e552a73810c6de0ace3 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Tue, 29 Aug 2017 11:43:37 -0400 Subject: Add fixed-size, preallocated pool support Simply call pool_init_fixed(...) before using the pool. Note that fixed, preallocated pools live in individually-mmap'ed address segments, except for the free element bitmap. A large fixed pool can exceed 4gb. Fix tcp buffer allocator leak, remove broken assert Change-Id: I4421082e12a77c41c6e20f7747f3150dcd01fc26 Signed-off-by: Dave Barach --- src/svm/svm_fifo_segment.c | 2 +- src/vnet/session/application_interface.c | 19 +++++ src/vnet/session/session.c | 32 +++----- src/vnet/session/session_cli.c | 4 +- src/vnet/tcp/tcp.c | 24 +++--- src/vnet/tcp/tcp_output.c | 42 ++++++---- src/vppinfra.am | 26 +++--- src/vppinfra/bihash_24_8.h | 2 +- src/vppinfra/pool.c | 131 +++++++++++++++++++++++++++++++ src/vppinfra/pool.h | 119 +++++++++++++++++++++------- src/vppinfra/test_fpool.c | 69 ++++++++++++++++ src/vppinfra/tw_timer_16t_1w_2048sl.h | 1 + src/vppinfra/tw_timer_16t_2w_512sl.h | 3 +- src/vppinfra/tw_timer_1t_3w_1024sl_ov.h | 1 + src/vppinfra/tw_timer_2t_1w_2048sl.h | 1 + src/vppinfra/tw_timer_4t_3w_256sl.h | 1 + src/vppinfra/tw_timer_4t_3w_4sl_ov.h | 1 + src/vppinfra/tw_timer_template.c | 116 +++++++++++++++++++++++++-- src/vppinfra/tw_timer_template.h | 24 +++++- 19 files changed, 520 insertions(+), 98 deletions(-) create mode 100644 src/vppinfra/pool.c create mode 100644 src/vppinfra/test_fpool.c (limited to 'src/svm') diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index 6600a423..97d9976b 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -461,7 +461,7 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f, freelist_index = f->freelist_index; - ASSERT (freelist_index > 0 && freelist_index < vec_len (fsh->free_fifos)); + ASSERT (freelist_index < vec_len (fsh->free_fifos)); ssvm_lock_non_recursive (sh, 2); oldheap = ssvm_push_heap (sh); diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c index 566a52d7..8dbc3a1a 100644 --- a/src/vnet/session/application_interface.c +++ b/src/vnet/session/application_interface.c @@ -207,11 +207,22 @@ unformat_vnet_uri (unformat_input_t * input, va_list * args) return 0; } +static u8 *cache_uri; +static session_type_t cache_sst; +static transport_endpoint_t *cache_tep; + int parse_uri (char *uri, session_type_t * sst, transport_endpoint_t * tep) { unformat_input_t _input, *input = &_input; + if (cache_uri && !strncmp (uri, (char *) cache_uri, vec_len (cache_uri))) + { + *sst = cache_sst; + *tep = *cache_tep; + return 0; + } + /* Make sure */ uri = (char *) format (0, "%s%c", uri, 0); @@ -224,6 +235,14 @@ parse_uri (char *uri, session_type_t * sst, transport_endpoint_t * tep) } unformat_free (input); + vec_free (cache_uri); + cache_uri = (u8 *) uri; + cache_sst = *sst; + if (cache_tep) + clib_mem_free (cache_tep); + cache_tep = clib_mem_alloc (sizeof (*tep)); + *cache_tep = *tep; + return 0; } diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index dcd141f1..17644e29 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -889,32 +889,24 @@ session_manager_main_enable (vlib_main_t * vm) session_vpp_event_queue_allocate (smm, i); /* Preallocate sessions */ - if (num_threads == 1) + if (smm->preallocated_sessions) { - for (i = 0; i < smm->preallocated_sessions; i++) + if (num_threads == 1) { - stream_session_t *ss __attribute__ ((unused)); - pool_get_aligned (smm->sessions[0], ss, CLIB_CACHE_LINE_BYTES); + pool_init_fixed (smm->sessions[0], smm->preallocated_sessions); } - - for (i = 0; i < smm->preallocated_sessions; i++) - pool_put_index (smm->sessions[0], i); - } - else - { - int j; - preallocated_sessions_per_worker = smm->preallocated_sessions / - (num_threads - 1); - - for (j = 1; j < num_threads; j++) + else { - for (i = 0; i < preallocated_sessions_per_worker; i++) + int j; + preallocated_sessions_per_worker = + (1.1 * (f64) smm->preallocated_sessions / + (f64) (num_threads - 1)); + + for (j = 1; j < num_threads; j++) { - stream_session_t *ss __attribute__ ((unused)); - pool_get_aligned (smm->sessions[j], ss, CLIB_CACHE_LINE_BYTES); + pool_init_fixed (smm->sessions[j], + preallocated_sessions_per_worker); } - for (i = 0; i < preallocated_sessions_per_worker; i++) - pool_put_index (smm->sessions[j], i); } } diff --git a/src/vnet/session/session_cli.c b/src/vnet/session/session_cli.c index 028dc9d8..d9f516be 100755 --- a/src/vnet/session/session_cli.c +++ b/src/vnet/session/session_cli.c @@ -115,8 +115,8 @@ unformat_stream_session_id (unformat_input_t * input, va_list * args) { *proto = TRANSPORT_PROTO_UDP; } - else if (unformat (input, "%U:%d->%U:%d", unformat_ip4_address, &lcl->ip4, - lcl_port, unformat_ip4_address, &rmt->ip4, rmt_port)) + if (unformat (input, "%U:%d->%U:%d", unformat_ip4_address, &lcl->ip4, + lcl_port, unformat_ip4_address, &rmt->ip4, rmt_port)) { *is_ip4 = 1; tuple_is_set = 1; diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 0a826a52..a4c13084 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -1150,6 +1150,10 @@ tcp_timer_establish_handler (u32 conn_index) else { tc = tcp_connection_get (conn_index, vlib_get_thread_index ()); + /* note: the connection may have already disappeared */ + if (PREDICT_FALSE (tc == 0)) + return; + ASSERT (tc->state == TCP_STATE_SYN_RCVD); } tc->timers[TCP_TIMER_ESTABLISH] = TCP_TIMER_HANDLE_INVALID; @@ -1244,7 +1248,7 @@ tcp_main_enable (vlib_main_t * vm) vlib_thread_main_t *vtm = vlib_get_thread_main (); clib_error_t *error = 0; u32 num_threads; - int i, thread; + int thread; tcp_connection_t *tc __attribute__ ((unused)); u32 preallocated_connections_per_thread; @@ -1297,21 +1301,17 @@ tcp_main_enable (vlib_main_t * vm) } for (; thread < num_threads; thread++) { - for (i = 0; i < preallocated_connections_per_thread; i++) - pool_get (tm->connections[thread], tc); - - for (i = 0; i < preallocated_connections_per_thread; i++) - pool_put_index (tm->connections[thread], i); + if (preallocated_connections_per_thread) + pool_init_fixed (tm->connections[thread], + preallocated_connections_per_thread); } /* - * Preallocate half-open connections + * Use a preallocated half-open connection pool? */ - for (i = 0; i < tm->preallocated_half_open_connections; i++) - pool_get (tm->half_open_connections, tc); - - for (i = 0; i < tm->preallocated_half_open_connections; i++) - pool_put_index (tm->half_open_connections, i); + if (tm->preallocated_half_open_connections) + pool_init_fixed (tm->half_open_connections, + tm->preallocated_half_open_connections); /* Initialize per worker thread tx buffers (used for control messages) */ vec_validate (tm->tx_buffers, num_threads - 1); diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index 02555513..15a9dcb4 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -440,13 +440,16 @@ tcp_init_mss (tcp_connection_t * tc) always_inline int tcp_alloc_tx_buffers (tcp_main_t * tm, u8 thread_index, u32 n_free_buffers) { + u32 current_length = vec_len (tm->tx_buffers[thread_index]); + vec_validate (tm->tx_buffers[thread_index], - vec_len (tm->tx_buffers[thread_index]) + n_free_buffers - 1); + current_length + n_free_buffers - 1); _vec_len (tm->tx_buffers[thread_index]) = - vlib_buffer_alloc_from_free_list (vlib_get_main (), - tm->tx_buffers[thread_index], - n_free_buffers, - VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); + current_length + vlib_buffer_alloc_from_free_list (vlib_get_main (), + tm->tx_buffers + [thread_index], + n_free_buffers, + VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); /* buffer shortage, report failure */ if (vec_len (tm->tx_buffers[thread_index]) == 0) { @@ -1293,11 +1296,17 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) if (is_syn) { tc = tcp_half_open_connection_get (index); + /* Note: the connection may have transitioned to ESTABLISHED... */ + if (PREDICT_FALSE (tc == 0)) + return; tc->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID; } else { tc = tcp_connection_get (index, thread_index); + /* Note: the connection may have been closed and pool_put */ + if (PREDICT_FALSE (tc == 0)) + return; tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID; } @@ -1332,25 +1341,27 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 1); - /* Send one segment */ + /* Send one segment. Note that n_bytes may be zero due to buffer shortfall */ n_bytes = tcp_prepare_retransmit_segment (tc, 0, tc->snd_mss, &b); - ASSERT (n_bytes); - bi = vlib_get_buffer_index (vm, b); + /* TODO be less aggressive about this */ scoreboard_clear (&tc->sack_sb); if (n_bytes == 0) { - clib_warning ("could not retransmit anything"); - clib_warning ("%U", format_tcp_connection, tc, 2); - + if (b) + { + clib_warning ("retransmit fail: %U", format_tcp_connection, tc, + 2); + ASSERT (tc->rto_boff > 1 && tc->snd_una == tc->snd_congestion); + } /* Try again eventually */ tcp_retransmit_timer_set (tc); - ASSERT (0 || (tc->rto_boff > 1 - && tc->snd_una == tc->snd_congestion)); return; } + bi = vlib_get_buffer_index (vm, b); + /* For first retransmit, record timestamp (Eifel detection RFC3522) */ if (tc->rto_boff == 1) tc->snd_rxt_ts = tcp_time_now (); @@ -1378,7 +1389,10 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX); if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) - return; + { + clib_warning ("tcp_get_free_buffer_index FAIL"); + return; + } b = vlib_get_buffer (vm, bi); tcp_init_buffer (vm, b); tcp_push_hdr_i (tc, b, tc->state, 1); diff --git a/src/vppinfra.am b/src/vppinfra.am index 533bacd6..8f01114c 100644 --- a/src/vppinfra.am +++ b/src/vppinfra.am @@ -18,10 +18,11 @@ TESTS = if ENABLE_TESTS TESTS += test_bihash_template \ test_dlist \ - test_elog \ test_elf \ + test_elog \ test_fifo \ test_format \ + test_fpool \ test_hash \ test_heap \ test_longjmp \ @@ -42,17 +43,16 @@ TESTS += test_bihash_template \ test_zvec endif -TESTS += test_bihash_template - noinst_PROGRAMS = $(TESTS) check_PROGRAMS = $(TESTS) test_bihash_template_SOURCES = vppinfra/test_bihash_template.c test_dlist_SOURCES = vppinfra/test_dlist.c -test_elog_SOURCES = vppinfra/test_elog.c test_elf_SOURCES = vppinfra/test_elf.c +test_elog_SOURCES = vppinfra/test_elog.c test_fifo_SOURCES = vppinfra/test_fifo.c test_format_SOURCES = vppinfra/test_format.c +test_fpool_SOURCES = vppinfra/test_fpool.c test_hash_SOURCES = vppinfra/test_hash.c test_heap_SOURCES = vppinfra/test_heap.c test_longjmp_SOURCES = vppinfra/test_longjmp.c @@ -61,8 +61,8 @@ test_md5_SOURCES = vppinfra/test_md5.c test_mheap_SOURCES = vppinfra/test_mheap.c test_pool_iterate_SOURCES = vppinfra/test_pool_iterate.c test_ptclosure_SOURCES = vppinfra/test_ptclosure.c -test_random_SOURCES = vppinfra/test_random.c test_random_isaac_SOURCES = vppinfra/test_random_isaac.c +test_random_SOURCES = vppinfra/test_random.c test_serialize_SOURCES = vppinfra/test_serialize.c test_slist_SOURCES = vppinfra/test_slist.c test_socket_SOURCES = vppinfra/test_socket.c @@ -76,10 +76,11 @@ test_zvec_SOURCES = vppinfra/test_zvec.c # So we'll need -DDEBUG to enable ASSERTs test_bihash_template_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_dlist_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG -test_elog_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_elf_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG +test_elog_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_fifo_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_format_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG +test_fpool_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_hash_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_heap_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_longjmp_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG @@ -90,9 +91,9 @@ test_pool_iterate_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_ptclosure_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_random_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_random_isaac_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG -test_socket_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_serialize_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_slist_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG +test_socket_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_time_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_timing_wheel_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_tw_timer_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG @@ -101,10 +102,11 @@ test_zvec_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_bihash_template_LDADD = libvppinfra.la test_dlist_LDADD = libvppinfra.la -test_elog_LDADD = libvppinfra.la test_elf_LDADD = libvppinfra.la +test_elog_LDADD = libvppinfra.la test_fifo_LDADD = libvppinfra.la test_format_LDADD = libvppinfra.la +test_fpool_LDADD = libvppinfra.la test_hash_LDADD = libvppinfra.la test_heap_LDADD = libvppinfra.la test_longjmp_LDADD = libvppinfra.la @@ -113,8 +115,8 @@ test_md5_LDADD = libvppinfra.la test_mheap_LDADD = libvppinfra.la test_pool_iterate_LDADD = libvppinfra.la test_ptclosure_LDADD = libvppinfra.la -test_random_LDADD = libvppinfra.la test_random_isaac_LDADD = libvppinfra.la +test_random_LDADD = libvppinfra.la test_serialize_LDADD = libvppinfra.la test_slist_LDADD = libvppinfra.la test_socket_LDADD = libvppinfra.la @@ -126,10 +128,11 @@ test_zvec_LDADD = libvppinfra.la test_bihash_template_LDFLAGS = -static test_dlist_LDFLAGS = -static -test_elog_LDFLAGS = -static test_elf_LDFLAGS = -static +test_elog_LDFLAGS = -static test_fifo_LDFLAGS = -static test_format_LDFLAGS = -static +test_fpool_LDFLAGS = -static test_hash_LDFLAGS = -static test_heap_LDFLAGS = -static test_longjmp_LDFLAGS = -static @@ -138,8 +141,8 @@ test_md5_LDFLAGS = -static test_mheap_LDFLAGS = -static test_pool_iterate_LDFLAGS = -static test_ptclosure_LDFLAGS = -static -test_random_LDFLAGS = -static test_random_isaac_LDFLAGS = -static +test_random_LDFLAGS = -static test_serialize_LDFLAGS = -static test_slist_LDFLAGS = -static test_socket_LDFLAGS = -static @@ -247,6 +250,7 @@ CLIB_CORE = \ vppinfra/fifo.c \ vppinfra/fheap.c \ vppinfra/format.c \ + vppinfra/pool.c \ vppinfra/graph.c \ vppinfra/hash.c \ vppinfra/heap.c \ diff --git a/src/vppinfra/bihash_24_8.h b/src/vppinfra/bihash_24_8.h index d0be028c..173168fe 100644 --- a/src/vppinfra/bihash_24_8.h +++ b/src/vppinfra/bihash_24_8.h @@ -18,7 +18,7 @@ #define BIHASH_TYPE _24_8 #define BIHASH_KVP_PER_PAGE 4 -#define BIHASH_KVP_CACHE_SIZE 3 +#define BIHASH_KVP_CACHE_SIZE 0 #ifndef __included_bihash_24_8_h__ #define __included_bihash_24_8_h__ diff --git a/src/vppinfra/pool.c b/src/vppinfra/pool.c new file mode 100644 index 00000000..ed83b41a --- /dev/null +++ b/src/vppinfra/pool.c @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* + Copyright (c) 2001, 2002, 2003, 2004 Eliot Dresselhaus + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + +#include + +void +_pool_init_fixed (void **pool_ptr, u32 elt_size, u32 max_elts) +{ + u8 *mmap_base; + u64 vector_size; + u64 free_index_size; + u64 total_size; + u64 page_size; + pool_header_t *fh; + vec_header_t *vh; + u8 *v; + u32 *fi; + u32 i; + u32 set_bits; + + ASSERT (elt_size); + ASSERT (max_elts); + + vector_size = pool_aligned_header_bytes + vec_header_bytes (0) + + (u64) elt_size *max_elts; + + free_index_size = vec_header_bytes (0) + sizeof (u32) * max_elts; + + /* Round up to a cache line boundary */ + vector_size = (vector_size + CLIB_CACHE_LINE_BYTES - 1) + & ~(CLIB_CACHE_LINE_BYTES - 1); + + free_index_size = (free_index_size + CLIB_CACHE_LINE_BYTES - 1) + & ~(CLIB_CACHE_LINE_BYTES - 1); + + total_size = vector_size + free_index_size; + + /* Round up to an even number of pages */ + page_size = clib_mem_get_page_size (); + total_size = (total_size + page_size - 1) & ~(page_size - 1); + + /* mmap demand zero memory */ + + mmap_base = mmap (0, total_size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + + if (mmap_base == MAP_FAILED) + { + clib_unix_warning ("mmap"); + *pool_ptr = 0; + } + + /* First comes the pool header */ + fh = (pool_header_t *) mmap_base; + /* Find the user vector pointer */ + v = (u8 *) (mmap_base + pool_aligned_header_bytes); + /* Finally, the vector header */ + vh = _vec_find (v); + + fh->free_bitmap = 0; /* No free elts (yet) */ + fh->max_elts = max_elts; + fh->mmap_base = mmap_base; + fh->mmap_size = total_size; + + vh->len = max_elts; + + /* Build the free-index vector */ + vh = (vec_header_t *) (v + vector_size); + vh->len = max_elts; + fi = (u32 *) (vh + 1); + + fh->free_indices = fi; + + /* Set the entire free bitmap */ + clib_bitmap_alloc (fh->free_bitmap, max_elts); + memset (fh->free_bitmap, 0xff, vec_len (fh->free_bitmap) * sizeof (uword)); + + /* Clear any extraneous set bits */ + set_bits = vec_len (fh->free_bitmap) * BITS (uword); + + for (i = max_elts; i < set_bits; i++) + fh->free_bitmap = clib_bitmap_set (fh->free_bitmap, i, 0); + + /* Create the initial free vector */ + for (i = 0; i < max_elts; i++) + fi[i] = (max_elts - 1) - i; + + *pool_ptr = v; +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vppinfra/pool.h b/src/vppinfra/pool.h index 56536b77..62d5b54e 100644 --- a/src/vppinfra/pool.h +++ b/src/vppinfra/pool.h @@ -56,6 +56,16 @@ typedef struct /** Vector of free indices. One element for each set bit in bitmap. */ u32 *free_indices; + + /* The following fields are set for fixed-size, preallocated pools */ + + /** Maximum size of the pool, in elements */ + u32 max_elts; + + /** mmap segment info: base + length */ + u8 *mmap_base; + u64 mmap_size; + } pool_header_t; /** Align pool header so that pointers are naturally aligned. */ @@ -69,6 +79,15 @@ pool_header (void *v) return vec_aligned_header (v, sizeof (pool_header_t), sizeof (void *)); } +extern void _pool_init_fixed (void **, u32, u32); +extern void fpool_free (void *); + +/** initialize a fixed-size, preallocated pool */ +#define pool_init_fixed(pool,max_elts) \ +{ \ + _pool_init_fixed((void **)&(pool),sizeof(pool[0]),max_elts); \ +} + /** Validate a pool */ always_inline void pool_validate (void *v) @@ -98,7 +117,7 @@ pool_header_validate_index (void *v, uword index) do { \ uword __pool_validate_index = (i); \ vec_validate_ha ((v), __pool_validate_index, \ - pool_aligned_header_bytes, /* align */ 0); \ + pool_aligned_header_bytes, /* align */ 0); \ pool_header_validate_index ((v), __pool_validate_index); \ } while (0) @@ -166,34 +185,40 @@ pool_free_elts (void *v) First search free list. If nothing is free extend vector of objects. */ -#define pool_get_aligned(P,E,A) \ -do { \ - pool_header_t * _pool_var (p) = pool_header (P); \ - uword _pool_var (l); \ - \ - _pool_var (l) = 0; \ - if (P) \ - _pool_var (l) = vec_len (_pool_var (p)->free_indices); \ - \ - if (_pool_var (l) > 0) \ - { \ - /* Return free element from free list. */ \ +#define pool_get_aligned(P,E,A) \ +do { \ + pool_header_t * _pool_var (p) = pool_header (P); \ + uword _pool_var (l); \ + \ + _pool_var (l) = 0; \ + if (P) \ + _pool_var (l) = vec_len (_pool_var (p)->free_indices); \ + \ + if (_pool_var (l) > 0) \ + { \ + /* Return free element from free list. */ \ uword _pool_var (i) = _pool_var (p)->free_indices[_pool_var (l) - 1]; \ - (E) = (P) + _pool_var (i); \ - _pool_var (p)->free_bitmap = \ + (E) = (P) + _pool_var (i); \ + _pool_var (p)->free_bitmap = \ clib_bitmap_andnoti (_pool_var (p)->free_bitmap, _pool_var (i)); \ - _vec_len (_pool_var (p)->free_indices) = _pool_var (l) - 1; \ - } \ - else \ - { \ - /* Nothing on free list, make a new element and return it. */ \ - P = _vec_resize (P, \ - /* length_increment */ 1, \ + _vec_len (_pool_var (p)->free_indices) = _pool_var (l) - 1; \ + } \ + else \ + { \ + /* fixed-size, preallocated pools cannot expand */ \ + if ((P) && _pool_var(p)->max_elts) \ + { \ + clib_warning ("can't expand fixed-size pool"); \ + os_out_of_memory(); \ + } \ + /* Nothing on free list, make a new element and return it. */ \ + P = _vec_resize (P, \ + /* length_increment */ 1, \ /* new size */ (vec_len (P) + 1) * sizeof (P[0]), \ - pool_aligned_header_bytes, \ - /* align */ (A)); \ - E = vec_end (P) - 1; \ - } \ + pool_aligned_header_bytes, \ + /* align */ (A)); \ + E = vec_end (P) - 1; \ + } \ } while (0) /** Allocate an object E from a pool P (unspecified alignment). */ @@ -207,7 +232,11 @@ do { \ \ _pool_var (l) = 0; \ if (P) \ + { \ + if (_pool_var (p)->max_elts) \ + return 0; \ _pool_var (l) = vec_len (_pool_var (p)->free_indices); \ + } \ \ /* Free elements, certainly won't expand */ \ if (_pool_var (l) > 0) \ @@ -248,7 +277,16 @@ do { \ /* Add element to free bitmap and to free list. */ \ _pool_var (p)->free_bitmap = \ clib_bitmap_ori (_pool_var (p)->free_bitmap, _pool_var (l)); \ - vec_add1 (_pool_var (p)->free_indices, _pool_var (l)); \ + /* Preallocated pool? */ \ + if (_pool_var (p)->max_elts) \ + { \ + ASSERT(_pool_var(l) < _pool_var (p)->max_elts); \ + _pool_var(p)->free_indices[_vec_len(_pool_var(p)->free_indices)] = \ + _pool_var(l); \ + _vec_len(_pool_var(p)->free_indices) += 1; \ + } \ + else \ + vec_add1 (_pool_var (p)->free_indices, _pool_var (l)); \ } while (0) /** Free pool element with given index. */ @@ -262,6 +300,17 @@ do { \ #define pool_alloc_aligned(P,N,A) \ do { \ pool_header_t * _p; \ + \ + if ((P)) \ + { \ + _p = pool_header (P); \ + if (_p->max_elts) \ + { \ + clib_warning ("Can't expand fixed-size pool"); \ + os_out_of_memory(); \ + } \ + } \ + \ (P) = _vec_resize ((P), 0, (vec_len (P) + (N)) * sizeof (P[0]), \ pool_aligned_header_bytes, \ (A)); \ @@ -281,8 +330,20 @@ _pool_free (void *v) if (!v) return v; clib_bitmap_free (p->free_bitmap); - vec_free (p->free_indices); - vec_free_h (v, pool_aligned_header_bytes); + + if (p->max_elts) + { + int rv; + + rv = munmap (p->mmap_base, p->mmap_size); + if (rv) + clib_unix_warning ("munmap"); + } + else + { + vec_free (p->free_indices); + vec_free_h (v, pool_aligned_header_bytes); + } return 0; } diff --git a/src/vppinfra/test_fpool.c b/src/vppinfra/test_fpool.c new file mode 100644 index 00000000..e2d67f16 --- /dev/null +++ b/src/vppinfra/test_fpool.c @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ + +#include + +/* can be a very large size */ +#define NELTS 1024 + +int +main (int argc, char *argv[]) +{ + u32 *junk = 0; + int i; + u32 *tp = 0; + u32 *indices = 0; + + clib_mem_init (0, 3ULL << 30); + + vec_validate (indices, NELTS - 1); + _vec_len (indices) = 0; + + pool_init_fixed (tp, NELTS); + + for (i = 0; i < NELTS; i++) + { + pool_get (tp, junk); + vec_add1 (indices, junk - tp); + *junk = i; + } + + for (i = 0; i < NELTS; i++) + { + junk = pool_elt_at_index (tp, indices[i]); + ASSERT (*junk == i); + } + + fformat (stdout, "%d pool elts before deletes\n", pool_elts (tp)); + + pool_put_index (tp, indices[12]); + pool_put_index (tp, indices[43]); + + fformat (stdout, "%d pool elts after deletes\n", pool_elts (tp)); + + pool_validate (tp); + + pool_free (tp); + return 0; +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vppinfra/tw_timer_16t_1w_2048sl.h b/src/vppinfra/tw_timer_16t_1w_2048sl.h index 66cf7d37..761646b3 100644 --- a/src/vppinfra/tw_timer_16t_1w_2048sl.h +++ b/src/vppinfra/tw_timer_16t_1w_2048sl.h @@ -27,6 +27,7 @@ #undef TW_OVERFLOW_VECTOR #undef TW_FAST_WHEEL_BITMAP #undef TW_TIMER_ALLOW_DUPLICATE_STOP +#undef TW_START_STOP_TRACE_SIZE #define TW_TIMER_WHEELS 1 #define TW_SLOTS_PER_RING 2048 diff --git a/src/vppinfra/tw_timer_16t_2w_512sl.h b/src/vppinfra/tw_timer_16t_2w_512sl.h index 00587b8e..029f529d 100644 --- a/src/vppinfra/tw_timer_16t_2w_512sl.h +++ b/src/vppinfra/tw_timer_16t_2w_512sl.h @@ -27,6 +27,7 @@ #undef TW_OVERFLOW_VECTOR #undef TW_FAST_WHEEL_BITMAP #undef TW_TIMER_ALLOW_DUPLICATE_STOP +#undef TW_START_STOP_TRACE_SIZE #define TW_TIMER_WHEELS 2 #define TW_SLOTS_PER_RING 512 @@ -36,7 +37,7 @@ #define LOG2_TW_TIMERS_PER_OBJECT 4 #define TW_SUFFIX _16t_2w_512sl #define TW_FAST_WHEEL_BITMAP 0 -#define TW_TIMER_ALLOW_DUPLICATE_STOP 0 +#define TW_TIMER_ALLOW_DUPLICATE_STOP 1 #include diff --git a/src/vppinfra/tw_timer_1t_3w_1024sl_ov.h b/src/vppinfra/tw_timer_1t_3w_1024sl_ov.h index e5e4cc19..0b455e02 100644 --- a/src/vppinfra/tw_timer_1t_3w_1024sl_ov.h +++ b/src/vppinfra/tw_timer_1t_3w_1024sl_ov.h @@ -27,6 +27,7 @@ #undef TW_OVERFLOW_VECTOR #undef TW_FAST_WHEEL_BITMAP #undef TW_TIMER_ALLOW_DUPLICATE_STOP +#undef TW_START_STOP_TRACE_SIZE #define TW_TIMER_WHEELS 3 #define TW_SLOTS_PER_RING 1024 diff --git a/src/vppinfra/tw_timer_2t_1w_2048sl.h b/src/vppinfra/tw_timer_2t_1w_2048sl.h index 98b548b3..6ae86688 100644 --- a/src/vppinfra/tw_timer_2t_1w_2048sl.h +++ b/src/vppinfra/tw_timer_2t_1w_2048sl.h @@ -27,6 +27,7 @@ #undef TW_OVERFLOW_VECTOR #undef TW_FAST_WHEEL_BITMAP #undef TW_TIMER_ALLOW_DUPLICATE_STOP +#undef TW_START_STOP_TRACE_SIZE #define TW_TIMER_WHEELS 1 #define TW_SLOTS_PER_RING 2048 diff --git a/src/vppinfra/tw_timer_4t_3w_256sl.h b/src/vppinfra/tw_timer_4t_3w_256sl.h index 07203de8..16c41bcd 100644 --- a/src/vppinfra/tw_timer_4t_3w_256sl.h +++ b/src/vppinfra/tw_timer_4t_3w_256sl.h @@ -27,6 +27,7 @@ #undef TW_OVERFLOW_VECTOR #undef TW_FAST_WHEEL_BITMAP #undef TW_TIMER_ALLOW_DUPLICATE_STOP +#undef TW_START_STOP_TRACE_SIZE #define TW_TIMER_WHEELS 3 #define TW_SLOTS_PER_RING 256 diff --git a/src/vppinfra/tw_timer_4t_3w_4sl_ov.h b/src/vppinfra/tw_timer_4t_3w_4sl_ov.h index 20a01d05..845ffeac 100644 --- a/src/vppinfra/tw_timer_4t_3w_4sl_ov.h +++ b/src/vppinfra/tw_timer_4t_3w_4sl_ov.h @@ -27,6 +27,7 @@ #undef TW_OVERFLOW_VECTOR #undef TW_FAST_WHEEL_BITMAP #undef TW_TIMER_ALLOW_DUPLICATE_STOP +#undef TW_START_STOP_TRACE_SIZE #define TW_TIMER_WHEELS 3 #define TW_SLOTS_PER_RING 4 diff --git a/src/vppinfra/tw_timer_template.c b/src/vppinfra/tw_timer_template.c index c0a9685a..aba00142 100644 --- a/src/vppinfra/tw_timer_template.c +++ b/src/vppinfra/tw_timer_template.c @@ -18,6 +18,87 @@ * * */ +#if TW_START_STOP_TRACE_SIZE > 0 + +void TW (tw_timer_trace) (TWT (tw_timer_wheel) * tw, u32 timer_id, + u32 pool_index, u32 handle) +{ + TWT (trace) * t = &tw->traces[tw->trace_index]; + + t->timer_id = timer_id; + t->pool_index = pool_index; + t->handle = handle; + + tw->trace_index++; + if (tw->trace_index == TW_START_STOP_TRACE_SIZE) + { + tw->trace_index = 0; + tw->trace_wrapped++; + } +} + +void TW (tw_search_trace) (TWT (tw_timer_wheel) * tw, u32 handle) +{ + u32 i, start_pos; + TWT (trace) * t; + char *s = "bogus!"; + + /* reverse search for the supplied handle */ + + start_pos = tw->trace_index; + if (start_pos == 0) + start_pos = TW_START_STOP_TRACE_SIZE - 1; + else + start_pos--; + + for (i = start_pos; i > 0; i--) + { + t = &tw->traces[i]; + if (t->handle == handle) + { + switch (t->timer_id) + { + case 0xFF: + s = "stopped"; + break; + case 0xFE: + s = "expired"; + break; + default: + s = "started"; + break; + } + fformat (stderr, "handle 0x%x (%d) %s at trace %d\n", + handle, handle, s, i); + } + } + if (tw->trace_wrapped > 0) + { + for (i = TW_START_STOP_TRACE_SIZE; i >= tw->trace_index; i--) + { + t = &tw->traces[i]; + if (t->handle == handle) + { + switch (t->timer_id) + { + case 0xFF: + s = "stopped"; + break; + case 0xFE: + s = "expired"; + break; + default: + s = "started"; + break; + } + fformat (stderr, "handle 0x%x (%d) %s at trace %d\n", + handle, handle, s, i); + } + } + } +} +#endif /* TW_START_STOP_TRACE_SIZE > 0 */ + static inline u32 TW (make_internal_timer_handle) (u32 pool_index, u32 timer_id) { @@ -127,6 +208,9 @@ TW (tw_timer_start) (TWT (tw_timer_wheel) * tw, u32 pool_index, u32 timer_id, t->expiration_time = tw->current_tick + interval; ts = &tw->overflow; timer_addhead (tw->timers, ts->head_index, t - tw->timers); +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, timer_id, pool_index, t - tw->timers); +#endif return t - tw->timers; } #endif @@ -177,7 +261,9 @@ TW (tw_timer_start) (TWT (tw_timer_wheel) * tw, u32 pool_index, u32 timer_id, ts = &tw->w[TW_TIMER_RING_GLACIER][glacier_ring_offset]; timer_addhead (tw->timers, ts->head_index, t - tw->timers); - +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, timer_id, pool_index, t - tw->timers); +#endif return t - tw->timers; } #endif @@ -193,7 +279,9 @@ TW (tw_timer_start) (TWT (tw_timer_wheel) * tw, u32 pool_index, u32 timer_id, ts = &tw->w[TW_TIMER_RING_SLOW][slow_ring_offset]; timer_addhead (tw->timers, ts->head_index, t - tw->timers); - +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, timer_id, pool_index, t - tw->timers); +#endif return t - tw->timers; } #else @@ -208,6 +296,9 @@ TW (tw_timer_start) (TWT (tw_timer_wheel) * tw, u32 pool_index, u32 timer_id, #if TW_FAST_WHEEL_BITMAP tw->fast_slot_bitmap = clib_bitmap_set (tw->fast_slot_bitmap, fast_ring_offset, 1); +#endif +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, timer_id, pool_index, t - tw->timers); #endif return t - tw->timers; } @@ -265,6 +356,9 @@ void TW (tw_timer_stop) (TWT (tw_timer_wheel) * tw, u32 handle) if (pool_is_free_index (tw->timers, handle)) return; #endif +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, ~0, ~0, handle); +#endif t = pool_elt_at_index (tw->timers, handle); @@ -302,6 +396,7 @@ TW (tw_timer_wheel_init) (TWT (tw_timer_wheel) * tw, tw->timer_interval = timer_interval_in_seconds; tw->ticks_per_second = 1.0 / timer_interval_in_seconds; tw->first_expires_tick = ~0ULL; + vec_validate (tw->expired_timer_handles, 0); _vec_len (tw->expired_timer_handles) = 0; @@ -476,6 +571,9 @@ static inline new_glacier_ring_offset == 0)) { vec_add1 (callback_vector, t->user_handle); +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, 0xfe, ~0, t - tw->timers); +#endif pool_put (tw->timers, t); } /* Timer moves to the glacier ring */ @@ -536,6 +634,9 @@ static inline t->fast_ring_offset == 0)) { vec_add1 (callback_vector, t->user_handle); +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, 0xfe, ~0, t - tw->timers); +#endif pool_put (tw->timers, t); } /* Timer expires during slow-wheel tick 0 */ @@ -587,6 +688,9 @@ static inline if (PREDICT_FALSE (t->fast_ring_offset == 0)) { vec_add1 (callback_vector, t->user_handle); +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, 0xfe, ~0, t - tw->timers); +#endif pool_put (tw->timers, t); } else /* typical case */ @@ -620,6 +724,9 @@ static inline t = pool_elt_at_index (tw->timers, next_index); next_index = t->next; vec_add1 (callback_vector, t->user_handle); +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, 0xfe, ~0, t - tw->timers); +#endif pool_put (tw->timers, t); } @@ -628,10 +735,7 @@ static inline { /* The callback is optional. We return the u32 * handle vector */ if (tw->expired_timer_callback) - { - tw->expired_timer_callback (callback_vector); - _vec_len (callback_vector) = 0; - } + tw->expired_timer_callback (callback_vector); tw->expired_timer_handles = callback_vector; } diff --git a/src/vppinfra/tw_timer_template.h b/src/vppinfra/tw_timer_template.h index 0404e3f4..0217644d 100644 --- a/src/vppinfra/tw_timer_template.h +++ b/src/vppinfra/tw_timer_template.h @@ -170,6 +170,13 @@ typedef enum } tw_ring_index_t; #endif /* __defined_tw_timer_wheel_slot__ */ +typedef CLIB_PACKED (struct + { + u8 timer_id; + u32 pool_index; + u32 handle; + }) TWT (trace); + typedef struct { /** Timer pool */ @@ -211,11 +218,20 @@ typedef struct /** expired timer callback, receives a vector of handles */ void (*expired_timer_callback) (u32 * expired_timer_handles); - /** vector of expired timers */ + /** vectors of expired timers */ u32 *expired_timer_handles; /** maximum expirations */ u32 max_expirations; + + /** current trace index */ +#if TW_START_STOP_TRACE_SIZE > 0 + /* Start/stop/expire tracing */ + u32 trace_index; + u32 trace_wrapped; + TWT (trace) traces[TW_START_STOP_TRACE_SIZE]; +#endif + } TWT (tw_timer_wheel); u32 TW (tw_timer_start) (TWT (tw_timer_wheel) * tw, @@ -236,6 +252,12 @@ u32 *TW (tw_timer_expire_timers_vec) (TWT (tw_timer_wheel) * tw, f64 now, u32 TW (tw_timer_first_expires_in_ticks) (TWT (tw_timer_wheel) * tw); #endif +#if TW_START_STOP_TRACE_SIZE > 0 +void TW (tw_search_trace) (TWT (tw_timer_wheel) * tw, u32 handle); +void TW (tw_timer_trace) (TWT (tw_timer_wheel) * tw, u32 timer_id, + u32 pool_index, u32 handle); +#endif + /* * fd.io coding-style-patch-verification: ON * -- cgit 1.2.3-korg From 91f3e744a37b9f6f3c87ac45e54142f16fd2d3d5 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Fri, 1 Sep 2017 19:12:11 -0400 Subject: Improve "show segment-manager segments" Clean up private-segment fifo preallocation Change-Id: I53c630ed81d48f2832a204992d73635536926983 Signed-off-by: Dave Barach --- src/svm/svm_fifo_segment.c | 101 +++++++++++++++++++++++++++++++++++-- src/svm/svm_fifo_segment.h | 4 ++ src/vnet/session/segment_manager.c | 54 +++++++++++++++++--- src/vnet/tcp/builtin_client.c | 14 ++--- src/vnet/tcp/builtin_proxy.c | 16 +++--- src/vnet/tcp/builtin_server.c | 16 +++--- 6 files changed, 176 insertions(+), 29 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index 97d9976b..3bdd2b28 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -221,13 +221,13 @@ svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t * a) u8 **heaps = 0; mheap_t *heap_header; int segment_count = 1; + u32 rnd_size = 0; int i; if (a->private_segment_count && a->private_segment_size) { u8 *heap; u32 pagesize = clib_mem_get_page_size (); - u32 rnd_size; rnd_size = (a->private_segment_size + (pagesize - 1)) & ~pagesize; for (i = 0; i < a->private_segment_count; i++) @@ -246,7 +246,8 @@ svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t * a) } /* Spread preallocated fifo pairs across segments */ - a->preallocated_fifo_pairs /= segment_count; + a->preallocated_fifo_pairs = + (a->preallocated_fifo_pairs + segment_count - 1) / segment_count; /* Allocate segments */ for (i = 0; i < segment_count; i++) @@ -254,7 +255,7 @@ svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t * a) pool_get (sm->segments, s); memset (s, 0, sizeof (*s)); - s->ssvm.ssvm_size = ~0; + s->ssvm.ssvm_size = rnd_size; s->ssvm.i_am_master = 1; s->ssvm.my_pid = getpid (); s->ssvm.name = format (0, "%s%c", a->segment_name, 0); @@ -536,6 +537,100 @@ svm_fifo_segment_num_fifos (svm_fifo_segment_private_t * fifo_segment) return fifo_segment->h->n_active_fifos; } +u32 +svm_fifo_segment_num_free_fifos (svm_fifo_segment_private_t * fifo_segment, + u32 fifo_size_in_bytes) +{ + ssvm_shared_header_t *sh; + svm_fifo_segment_header_t *fsh; + svm_fifo_t *f; + int i; + u32 count = 0, rounded_data_size, freelist_index; + + sh = fifo_segment->ssvm.sh; + fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; + + /* Count all free fifos? */ + if (fifo_size_in_bytes == ~0) + { + for (i = 0; i < vec_len (fsh->free_fifos); i++) + { + f = fsh->free_fifos[i]; + if (f == 0) + continue; + + while (f) + { + f = f->next; + count++; + } + } + return count; + } + + rounded_data_size = (1 << (max_log2 (fifo_size_in_bytes))); + freelist_index = max_log2 (rounded_data_size) + - max_log2 (FIFO_SEGMENT_MIN_FIFO_SIZE); + + if (freelist_index > vec_len (fsh->free_fifos)) + return 0; + + f = fsh->free_fifos[freelist_index]; + if (f == 0) + return 0; + + while (f) + { + f = f->next; + count++; + } + return count; +} + +/** + * Segment format function + */ +u8 * +format_svm_fifo_segment (u8 * s, va_list * args) +{ + svm_fifo_segment_private_t *sp + = va_arg (*args, svm_fifo_segment_private_t *); + int verbose = va_arg (*args, int); + ssvm_shared_header_t *sh; + svm_fifo_segment_header_t *fsh; + svm_fifo_t *f; + int i; + u32 count; + uword indent = format_get_indent (s) + 2; + + sh = sp->ssvm.sh; + fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; + + s = format (s, "%USegment Heap: %U\n", format_white_space, indent, + format_mheap, sh->heap, verbose); + s = format (s, "%U segment has %u active fifos\n", + format_white_space, indent, svm_fifo_segment_num_fifos (sp)); + + for (i = 0; i < vec_len (fsh->free_fifos); i++) + { + f = fsh->free_fifos[i]; + if (f == 0) + continue; + count = 0; + while (f) + { + f = f->next; + count++; + } + + s = format (s, "%U%-5u Kb: %u free", + format_white_space, indent + 2, + 1 << (i + max_log2 (FIFO_SEGMENT_MIN_FIFO_SIZE) - 10), + count); + } + return s; +} + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/svm/svm_fifo_segment.h b/src/svm/svm_fifo_segment.h index 5cc4754a..7c97e9b4 100644 --- a/src/svm/svm_fifo_segment.h +++ b/src/svm/svm_fifo_segment.h @@ -118,7 +118,11 @@ void svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, void svm_fifo_segment_init (u64 baseva, u32 timeout_in_seconds); u32 svm_fifo_segment_index (svm_fifo_segment_private_t * s); u32 svm_fifo_segment_num_fifos (svm_fifo_segment_private_t * fifo_segment); +u32 svm_fifo_segment_num_free_fifos (svm_fifo_segment_private_t * + fifo_segment, u32 fifo_size_in_bytes); + svm_fifo_segment_private_t *svm_fifo_segment_segments_pool (void); +format_function_t format_svm_fifo_segment; #endif /* __included_ssvm_fifo_segment_h__ */ diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c index 7cf66ee3..c23e4c02 100644 --- a/src/vnet/session/segment_manager.c +++ b/src/vnet/session/segment_manager.c @@ -74,6 +74,11 @@ session_manager_add_segment_i (segment_manager_t * sm, u32 segment_size, } else { + u32 rx_fifo_size, tx_fifo_size, rx_rounded_data_size, + tx_rounded_data_size; + u32 approx_segment_count; + u64 approx_total_size; + ca->segment_name = "process-private-segment"; ca->segment_size = ~0; ca->rx_fifo_size = sm->properties->rx_fifo_size; @@ -82,6 +87,36 @@ session_manager_add_segment_i (segment_manager_t * sm, u32 segment_size, ca->private_segment_count = sm->properties->private_segment_count; ca->private_segment_size = sm->properties->private_segment_size; + /* Default to a small private segment */ + if (ca->private_segment_size == 0) + ca->private_segment_size = 128 << 20; + + /* Calculate space requirements */ + rx_rounded_data_size = (1 << (max_log2 (ca->rx_fifo_size))); + tx_rounded_data_size = (1 << (max_log2 (ca->tx_fifo_size))); + + rx_fifo_size = sizeof (svm_fifo_t) + rx_rounded_data_size; + tx_fifo_size = sizeof (svm_fifo_t) + tx_rounded_data_size; + + approx_total_size = (u64) ca->preallocated_fifo_pairs + * (rx_fifo_size + tx_fifo_size); + approx_segment_count = + (approx_total_size + + (ca->private_segment_size - 1)) / (u64) ca->private_segment_size; + + /* The user asked us to figure it out... */ + if (ca->private_segment_count == 0) + { + ca->private_segment_count = approx_segment_count; + } + /* Follow directions, but issue a warning */ + else if (approx_segment_count != ca->private_segment_count) + { + clib_warning + ("Honoring segment count %u, but calculated count was %u", + ca->private_segment_count, approx_segment_count); + } + if (svm_fifo_segment_create_process_private (ca)) clib_warning ("Failed to create process private segment"); @@ -503,7 +538,9 @@ segment_manager_show_fn (vlib_main_t * vm, unformat_input_t * input, u8 show_segments = 0, verbose = 0, *name; uword address; u64 size; - u32 fifos; + u32 active_fifos; + u32 free_fifos; + mheap_t *heap_header; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) @@ -536,8 +573,8 @@ segment_manager_show_fn (vlib_main_t * vm, unformat_input_t * input, segments = svm_fifo_segment_segments_pool (); vlib_cli_output (vm, "%d svm fifo segments allocated", pool_elts (segments)); - vlib_cli_output (vm, "%-20s%=12s%=12s%=15s", "Name", "Size (M)", - "Fifos", "Address"); + vlib_cli_output (vm, "%-20s%=12s%=16s%=16s%=16s", "Name", + "HeapSize (M)", "ActiveFifos", "FreeFifos", "Address"); /* *INDENT-OFF* */ pool_foreach (seg, segments, ({ @@ -557,9 +594,14 @@ segment_manager_show_fn (vlib_main_t * vm, unformat_input_t * input, size = seg->ssvm.ssvm_size; name = seg->ssvm.sh->name; } - fifos = svm_fifo_segment_num_fifos (seg); - vlib_cli_output (vm, "%-20s%=12u%=12u%=15x", name, size << 20, fifos, + active_fifos = svm_fifo_segment_num_fifos (seg); + free_fifos = svm_fifo_segment_num_free_fifos (seg, ~0 /* size */); + vlib_cli_output (vm, "%-20v%=16llu%=16u%=16u%16llx", + name, size >> 20ULL, active_fifos, free_fifos, address); + if (verbose) + vlib_cli_output (vm, "%U", + format_svm_fifo_segment, seg, verbose); if (seg->h->flags & FIFO_SEGMENT_F_IS_PRIVATE) vec_free (name); })); @@ -573,7 +615,7 @@ segment_manager_show_fn (vlib_main_t * vm, unformat_input_t * input, VLIB_CLI_COMMAND (segment_manager_show_command, static) = { .path = "show segment-manager", - .short_help = "show segment-manager [segments]", + .short_help = "show segment-manager [segments][verbose]", .function = segment_manager_show_fn, }; /* *INDENT-ON* */ diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index 44b1f70e..94e6b4ae 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -563,12 +563,14 @@ test_tcp_clients_command_fn (vlib_main_t * vm, else if (unformat (input, "private-segment-count %d", &tm->private_segment_count)) ; - else if (unformat (input, "private-segment-size %dm", &tmp)) - tm->private_segment_size = tmp << 20; - else if (unformat (input, "private-segment-size %dg", &tmp)) - tm->private_segment_size = tmp << 30; - else if (unformat (input, "private-segment-size %d", &tmp)) - tm->private_segment_size = tmp; + else if (unformat (input, "private-segment-size %U", + unformat_memory_size, &tmp)) + { + if (tmp >= 0x100000000ULL) + return clib_error_return + (0, "private segment size %lld (%llu) too large", tmp, tmp); + tm->private_segment_size = tmp; + } else if (unformat (input, "preallocate-fifos")) tm->prealloc_fifos = 1; else if (unformat (input, "preallocate-sessions")) diff --git a/src/vnet/tcp/builtin_proxy.c b/src/vnet/tcp/builtin_proxy.c index d8cfb11d..91377e76 100644 --- a/src/vnet/tcp/builtin_proxy.c +++ b/src/vnet/tcp/builtin_proxy.c @@ -523,7 +523,7 @@ proxy_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, { builtin_proxy_main_t *bpm = &builtin_proxy_main; int rv; - u32 tmp; + u64 tmp; bpm->fifo_size = 64 << 10; bpm->rcv_buffer_size = 1024; @@ -542,12 +542,14 @@ proxy_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, else if (unformat (input, "private-segment-count %d", &bpm->private_segment_count)) ; - else if (unformat (input, "private-segment-size %dm", &tmp)) - bpm->private_segment_size = tmp << 20; - else if (unformat (input, "private-segment-size %dg", &tmp)) - bpm->private_segment_size = tmp << 30; - else if (unformat (input, "private-segment-size %d", &tmp)) - bpm->private_segment_size = tmp; + else if (unformat (input, "private-segment-size %U", + unformat_memory_size, &tmp)) + { + if (tmp >= 0x100000000ULL) + return clib_error_return + (0, "private segment size %lld (%llu) too large", tmp, tmp); + bpm->private_segment_size = tmp; + } else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index 295c189d..93314529 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -369,7 +369,7 @@ server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, builtin_server_main_t *bsm = &builtin_server_main; u8 server_uri_set = 0; int rv; - u32 tmp; + u64 tmp; bsm->no_echo = 0; bsm->fifo_size = 64 << 10; @@ -392,12 +392,14 @@ server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, else if (unformat (input, "private-segment-count %d", &bsm->private_segment_count)) ; - else if (unformat (input, "private-segment-size %dm", &tmp)) - bsm->private_segment_size = tmp << 20; - else if (unformat (input, "private-segment-size %dg", &tmp)) - bsm->private_segment_size = tmp << 30; - else if (unformat (input, "private-segment-size %d", &tmp)) - bsm->private_segment_size = tmp; + else if (unformat (input, "private-segment-size %U", + unformat_memory_size, &tmp)) + { + if (tmp >= 0x100000000ULL) + return clib_error_return + (0, "private segment size %lld (%llu) too large", tmp, tmp); + bsm->private_segment_size = tmp; + } else if (unformat (input, "uri %s", &bsm->server_uri)) server_uri_set = 1; else -- cgit 1.2.3-korg From 19296116be4754e43751399e25f5206cafc70c1f Mon Sep 17 00:00:00 2001 From: Dave Wallace Date: Thu, 31 Aug 2017 15:54:11 -0400 Subject: Set uid/gid on ssvm segment file. Change-Id: I482bb9654f4dfe240bace5c2b61056cfd04cf018 Signed-off-by: Dave Wallace --- src/svm/ssvm.c | 7 +++++++ src/svm/svm.c | 4 +++- src/svm/svm_common.h | 2 ++ 3 files changed, 12 insertions(+), 1 deletion(-) (limited to 'src/svm') diff --git a/src/svm/ssvm.c b/src/svm/ssvm.c index e56e6b45..c04982de 100644 --- a/src/svm/ssvm.c +++ b/src/svm/ssvm.c @@ -13,10 +13,12 @@ * limitations under the License. */ #include "ssvm.h" +#include "svm_common.h" int ssvm_master_init (ssvm_private_t * ssvm, u32 master_index) { + svm_main_region_t *smr = svm_get_root_rp ()->data_base; int ssvm_fd; u8 *ssvm_filename; u8 junk = 0; @@ -47,6 +49,11 @@ ssvm_master_init (ssvm_private_t * ssvm, u32 master_index) return SSVM_API_ERROR_CREATE_FAILURE; } + if (fchmod (ssvm_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) < 0) + clib_unix_warning ("ssvm segment chmod"); + if (fchown (ssvm_fd, smr->uid, smr->gid) < 0) + clib_unix_warning ("ssvm segment chown"); + if (lseek (ssvm_fd, ssvm->ssvm_size, SEEK_SET) < 0) { clib_unix_warning ("lseek"); diff --git a/src/svm/svm.c b/src/svm/svm.c index 14c5bd9b..f97803cd 100644 --- a/src/svm/svm.c +++ b/src/svm/svm.c @@ -471,7 +471,7 @@ svm_map_region (svm_map_region_args_t * a) if (svm_fd >= 0) { - if (fchmod (svm_fd, 0770) < 0) + if (fchmod (svm_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) < 0) clib_unix_warning ("segment chmod"); /* This turns out to fail harmlessly if the client starts first */ if (fchown (svm_fd, a->uid, a->gid) < 0) @@ -774,6 +774,8 @@ svm_region_init_internal (svm_map_region_args_t * a) vec_validate (mp, 0); mp->name_hash = hash_create_string (0, sizeof (uword)); mp->root_path = a->root_path ? format (0, "%s%c", a->root_path, 0) : 0; + mp->uid = a->uid; + mp->gid = a->gid; rp->data_base = mp; svm_pop_heap (oldheap); } diff --git a/src/svm/svm_common.h b/src/svm/svm_common.h index 1f6d83c0..ea3ec87a 100644 --- a/src/svm/svm_common.h +++ b/src/svm/svm_common.h @@ -105,6 +105,8 @@ typedef struct svm_subregion_t *subregions; /* subregion pool */ uword *name_hash; u8 *root_path; + int uid; + int gid; } svm_main_region_t; -- cgit 1.2.3-korg From ab7b8d93cf1098970bc17fb4937376bb1ff33a21 Mon Sep 17 00:00:00 2001 From: Chris Luke Date: Thu, 7 Sep 2017 07:40:13 -0400 Subject: Fixes for issues reported by Coverity (VPP-972) Change-Id: I25238debb7081b4467aec4620dfdef33fbef3295 Signed-off-by: Chris Luke --- src/svm/svm_fifo.c | 2 +- src/uri/sock_test_client.c | 2 ++ src/uri/sock_test_server.c | 12 +++++++++--- src/uri/vppcom.c | 17 ++++++++--------- src/vlib/unix/main.c | 8 ++++---- src/vnet/ipsec/ikev2.c | 11 ++++++++++- src/vppinfra/socket.c | 16 ++++++++++++++-- 7 files changed, 48 insertions(+), 20 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index 8fe82f56..42eb1ee8 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -443,7 +443,7 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) } } - ASSERT (bytes >= 0 && bytes <= f->nitems); + ASSERT (bytes <= f->nitems); return bytes; } diff --git a/src/uri/sock_test_client.c b/src/uri/sock_test_client.c index 4319f01b..ab8e5a0e 100644 --- a/src/uri/sock_test_client.c +++ b/src/uri/sock_test_client.c @@ -895,6 +895,8 @@ main (int argc, char **argv) case 'w': fprintf (stderr, "ERROR: Option -%c requires an argument.\n", optopt); + break; + default: if (isprint (optopt)) fprintf (stderr, "ERROR: Unknown option `-%c'.\n", optopt); diff --git a/src/uri/sock_test_server.c b/src/uri/sock_test_server.c index f703b177..29adea25 100644 --- a/src/uri/sock_test_server.c +++ b/src/uri/sock_test_server.c @@ -514,9 +514,15 @@ main (int argc, char **argv) continue; } - else if (strlen ((char *) conn->buf)) - printf ("\nSERVER (fd %d): RX (%d bytes) - '%s'\n", - conn->fd, rx_bytes, conn->buf); + else if (((char *) conn->buf)[0] != 0) + { + // If it looks vaguely like a string, make sure it's terminated + ((char *) conn->buf)[rx_bytes < + conn->buf_size ? rx_bytes : + conn->buf_size - 1] = 0; + printf ("\nSERVER (fd %d): RX (%d bytes) - '%s'\n", + conn->fd, rx_bytes, conn->buf); + } } else // rx_bytes < 0 { diff --git a/src/uri/vppcom.c b/src/uri/vppcom.c index aec1295f..aa307f1d 100644 --- a/src/uri/vppcom.c +++ b/src/uri/vppcom.c @@ -1369,23 +1369,18 @@ vppcom_cfg_heapsize (char *conf_fname) argc++; char **tmp = realloc (argv, argc * sizeof (char *)); if (tmp == NULL) - { - fclose (fp); - goto defaulted; - } + goto defaulted; argv = tmp; arg = strndup (p, 1024); if (arg == NULL) - { - fclose (fp); - goto defaulted; - } + goto defaulted; argv[argc - 1] = arg; p = strtok (NULL, " \t\n"); } } fclose (fp); + fp = NULL; char **tmp = realloc (argv, (argc + 1) * sizeof (char *)); if (tmp == NULL) @@ -1438,6 +1433,10 @@ vppcom_cfg_heapsize (char *conf_fname) } defaulted: + if (fp != NULL) + fclose (fp); + if (argv != NULL) + free (argv); if (!clib_mem_init (0, vcl_cfg->heapsize)) clib_warning ("[%d] vppcom heap allocation failure!", vcm->my_pid); else if (VPPCOM_DEBUG > 0) @@ -1687,7 +1686,7 @@ input_done: unformat_free (input); file_done: - if (fd > 0) + if (fd >= 0) close (fd); } diff --git a/src/vlib/unix/main.c b/src/vlib/unix/main.c index c90e1331..3a92b2e3 100644 --- a/src/vlib/unix/main.c +++ b/src/vlib/unix/main.c @@ -434,6 +434,10 @@ unix_config (vlib_main_t * vm, unformat_input_t * input) vlib_default_runtime_dir, 0); } + error = setup_signal_handlers (um); + if (error) + return error; + if (um->pidfile) { if ((error = vlib_unix_validate_runtime_file (um, @@ -448,10 +452,6 @@ unix_config (vlib_main_t * vm, unformat_input_t * input) } } - error = setup_signal_handlers (um); - if (error) - return error; - if (!(um->flags & UNIX_FLAG_INTERACTIVE)) { openlog (vm->name, LOG_CONS | LOG_PERROR | LOG_PID, LOG_DAEMON); diff --git a/src/vnet/ipsec/ikev2.c b/src/vnet/ipsec/ikev2.c index 296654ec..a3dc7b87 100644 --- a/src/vnet/ipsec/ikev2.c +++ b/src/vnet/ipsec/ikev2.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -1595,8 +1596,16 @@ ikev2_create_tunnel_interface (vnet_main_t * vnm, ikev2_sa_t * sa, + sa->profile->lifetime; if (sa->profile->lifetime_jitter) { + // This is not much better than rand(3), which Coverity warns + // is unsuitable for security applications; random_u32 is + // however fast. If this perturbance to the expiration time + // needs to use a better RNG then we may need to use something + // like /dev/urandom which has significant overhead. + u32 rnd = (u32) (vlib_time_now (vnm->vlib_main) * 1e6); + rnd = random_u32 (&rnd); + child->time_to_expiration += - 1 + (rand () % sa->profile->lifetime_jitter); + 1 + (rnd % sa->profile->lifetime_jitter); } } diff --git a/src/vppinfra/socket.c b/src/vppinfra/socket.c index 7ade440c..37dcbbfd 100644 --- a/src/vppinfra/socket.c +++ b/src/vppinfra/socket.c @@ -359,9 +359,21 @@ clib_socket_init (clib_socket_t * s) && s->flags & SOCKET_ALLOW_GROUP_WRITE) { struct stat st = { 0 }; - stat (((struct sockaddr_un *) &addr)->sun_path, &st); + if (stat (((struct sockaddr_un *) &addr)->sun_path, &st) < 0) + { + error = clib_error_return_unix (0, "stat (fd %d, '%s')", + s->fd, s->config); + goto done; + } st.st_mode |= S_IWGRP; - chmod (((struct sockaddr_un *) &addr)->sun_path, st.st_mode); + if (chmod (((struct sockaddr_un *) &addr)->sun_path, st.st_mode) < + 0) + { + error = + clib_error_return_unix (0, "chmod (fd %d, '%s', mode %o)", + s->fd, s->config, st.st_mode); + goto done; + } } } else -- cgit 1.2.3-korg From 9d063047eb1a3738cb0fc9ebebb55793d155bb20 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Thu, 14 Sep 2017 03:08:00 -0400 Subject: session/tcp: improve preallocated segment handling - add preallocated segment flag - don't remove pre-allocated segments except if application detaches - when preallocating fifos in multiple segments, completely fill a segment before moving to the next - detach server application from segment-managers when deleting app - batch syn/syn-ack/fin (re)transmissions - loosen up close-wait and time-wait times Change-Id: I412f53ce601cc83b3acc26aeffd7fa2d52d73b03 Signed-off-by: Florin Coras --- src/svm/svm_fifo_segment.c | 33 +++++++------- src/svm/svm_fifo_segment.h | 1 + src/vnet/session/application.c | 2 + src/vnet/session/segment_manager.c | 73 ++++++++++++++++-------------- src/vnet/session/session.c | 18 +++----- src/vnet/session/session_node.c | 21 +++++---- src/vnet/tcp/builtin_client.c | 2 +- src/vnet/tcp/tcp.c | 7 ++- src/vnet/tcp/tcp.h | 9 ++-- src/vnet/tcp/tcp_debug.h | 9 +--- src/vnet/tcp/tcp_input.c | 81 +++++++++++++++++---------------- src/vnet/tcp/tcp_output.c | 91 ++++++++++++++++++++++++++++++-------- 12 files changed, 209 insertions(+), 138 deletions(-) (limited to 'src/svm') diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index 3bdd2b28..da2b7935 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -57,11 +57,12 @@ allocate_new_fifo_chunk (svm_fifo_segment_header_t * fsh, } static void -preallocate_fifo_pairs (svm_fifo_segment_header_t * fsh, +preallocate_fifo_pairs (svm_fifo_segment_private_t * s, svm_fifo_segment_create_args_t * a) { - u32 rx_fifo_size, tx_fifo_size; - u32 rx_rounded_data_size, tx_rounded_data_size; + svm_fifo_segment_header_t *fsh = s->h; + u32 rx_fifo_size, tx_fifo_size, pairs_to_allocate; + u32 rx_rounded_data_size, tx_rounded_data_size, pair_size; svm_fifo_t *f; u8 *rx_fifo_space, *tx_fifo_space; int rx_freelist_index, tx_freelist_index; @@ -97,10 +98,11 @@ preallocate_fifo_pairs (svm_fifo_segment_header_t * fsh, - max_log2 (FIFO_SEGMENT_MIN_FIFO_SIZE); /* Calculate space requirements */ - rx_fifo_size = (sizeof (*f) + rx_rounded_data_size) - * a->preallocated_fifo_pairs; - tx_fifo_size = (sizeof (*f) + tx_rounded_data_size) - * a->preallocated_fifo_pairs; + pair_size = 2 * sizeof (*f) + rx_rounded_data_size + tx_rounded_data_size; + pairs_to_allocate = clib_min (s->ssvm.ssvm_size / pair_size, + a->preallocated_fifo_pairs); + rx_fifo_size = (sizeof (*f) + rx_rounded_data_size) * pairs_to_allocate; + tx_fifo_size = (sizeof (*f) + tx_rounded_data_size) * pairs_to_allocate; vec_validate_init_empty (fsh->free_fifos, clib_max (rx_freelist_index, tx_freelist_index), @@ -139,7 +141,7 @@ preallocate_fifo_pairs (svm_fifo_segment_header_t * fsh, /* Carve rx fifo space */ f = (svm_fifo_t *) rx_fifo_space; - for (i = 0; i < a->preallocated_fifo_pairs; i++) + for (i = 0; i < pairs_to_allocate; i++) { f->freelist_index = rx_freelist_index; f->next = fsh->free_fifos[rx_freelist_index]; @@ -149,7 +151,7 @@ preallocate_fifo_pairs (svm_fifo_segment_header_t * fsh, } /* Carve tx fifo space */ f = (svm_fifo_t *) tx_fifo_space; - for (i = 0; i < a->preallocated_fifo_pairs; i++) + for (i = 0; i < pairs_to_allocate; i++) { f->freelist_index = tx_freelist_index; f->next = fsh->free_fifos[tx_freelist_index]; @@ -157,6 +159,9 @@ preallocate_fifo_pairs (svm_fifo_segment_header_t * fsh, tx_fifo_space += sizeof (*f) + tx_rounded_data_size; f = (svm_fifo_t *) tx_fifo_space; } + + /* Account for the pairs allocated */ + a->preallocated_fifo_pairs -= pairs_to_allocate; } /** (master) create an svm fifo segment */ @@ -200,7 +205,7 @@ svm_fifo_segment_create (svm_fifo_segment_create_args_t * a) sh->opaque[0] = fsh; s->h = fsh; fsh->segment_name = format (0, "%s%c", a->segment_name, 0); - preallocate_fifo_pairs (fsh, a); + preallocate_fifo_pairs (s, a); ssvm_pop_heap (oldheap); @@ -245,10 +250,6 @@ svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t * a) segment_count = a->private_segment_count; } - /* Spread preallocated fifo pairs across segments */ - a->preallocated_fifo_pairs = - (a->preallocated_fifo_pairs + segment_count - 1) / segment_count; - /* Allocate segments */ for (i = 0; i < segment_count; i++) { @@ -280,9 +281,11 @@ svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t * a) if (a->private_segment_count) { + if (i != 0) + fsh->flags |= FIFO_SEGMENT_F_IS_PREALLOCATED; oldheap = clib_mem_get_heap (); clib_mem_set_heap (sh->heap); - preallocate_fifo_pairs (fsh, a); + preallocate_fifo_pairs (s, a); clib_mem_set_heap (oldheap); } sh->ready = 1; diff --git a/src/svm/svm_fifo_segment.h b/src/svm/svm_fifo_segment.h index 7c97e9b4..5b771328 100644 --- a/src/svm/svm_fifo_segment.h +++ b/src/svm/svm_fifo_segment.h @@ -33,6 +33,7 @@ typedef enum #define FIFO_SEGMENT_F_IS_PRIVATE 1 << 0 /* Private segment */ #define FIFO_SEGMENT_F_IS_MAIN_HEAP 1 << 1 /* Segment is main heap */ +#define FIFO_SEGMENT_F_IS_PREALLOCATED 1 << 2 /* Segment is preallocated */ typedef struct { diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index d105119c..2b789c5f 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -111,6 +111,8 @@ application_del (application_t * app) hash_foreach (handle, index, app->listeners_table, ({ vec_add1 (handles, handle); + sm = segment_manager_get (index); + sm->app_index = SEGMENT_MANAGER_INVALID_APP_INDEX; })); /* *INDENT-ON* */ diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c index c23e4c02..48d02755 100644 --- a/src/vnet/session/segment_manager.c +++ b/src/vnet/session/segment_manager.c @@ -197,27 +197,24 @@ u8 segment_manager_has_fifos (segment_manager_t * sm) { svm_fifo_segment_private_t *segment; - /* Weird, but handle it */ - if (vec_len (sm->segment_indices) == 0) - return 0; - if (vec_len (sm->segment_indices) == 1) - { - segment = svm_fifo_segment_get_segment (sm->segment_indices[0]); - if (svm_fifo_segment_num_fifos (segment) == 0) - return 0; - } - if (CLIB_DEBUG) + int i; + + for (i = 0; i < vec_len (sm->segment_indices); i++) { - svm_fifo_segment_private_t *segment; - int i; - for (i = 1; i < vec_len (sm->segment_indices); i++) - { - segment = svm_fifo_segment_get_segment (sm->segment_indices[i]); - if (!svm_fifo_segment_has_fifos (segment)) - clib_warning ("segment has no fifos!"); - } + segment = svm_fifo_segment_get_segment (sm->segment_indices[i]); + if (CLIB_DEBUG && i && !svm_fifo_segment_has_fifos (segment) + && !(segment->h->flags & FIFO_SEGMENT_F_IS_PREALLOCATED)) + clib_warning ("segment %d has no fifos!", sm->segment_indices[i]); + if (svm_fifo_segment_has_fifos (segment)) + return 1; } - return 1; + return 0; +} + +static u8 +segment_manager_app_detached (segment_manager_t * sm) +{ + return (sm->app_index == SEGMENT_MANAGER_INVALID_APP_INDEX); } static void @@ -228,6 +225,13 @@ segment_manager_del_segment (segment_manager_t * sm, u32 segment_index) clib_spinlock_lock (&sm->lockp); svm_segment_index = sm->segment_indices[segment_index]; fifo_segment = svm_fifo_segment_get_segment (svm_segment_index); + if (!fifo_segment + || ((fifo_segment->h->flags & FIFO_SEGMENT_F_IS_PREALLOCATED) + && !segment_manager_app_detached (sm))) + { + clib_spinlock_unlock (&sm->lockp); + return; + } svm_fifo_segment_delete (fifo_segment); vec_del1 (sm->segment_indices, segment_index); clib_spinlock_unlock (&sm->lockp); @@ -288,26 +292,29 @@ segment_manager_del_sessions (segment_manager_t * sm) * * Since the fifos allocated in the segment keep backpointers to the sessions * prior to removing the segment, we call session disconnect. This - * subsequently propages into transport. + * subsequently propagates into transport. */ void segment_manager_del (segment_manager_t * sm) { + int i; - ASSERT (vec_len (sm->segment_indices) <= 1); - if (vec_len (sm->segment_indices)) + ASSERT (!segment_manager_has_fifos (sm) + && segment_manager_app_detached (sm)); + + /* If we have empty preallocated segments that haven't been removed, remove + * them now. Apart from that, the first segment in the first segment manager + * is not removed when all fifos are removed. It can only be removed when + * the manager is explicitly deleted/detached by the app. */ + for (i = vec_len (sm->segment_indices) - 1; i >= 0; i--) { - /* The first segment in the first segment manager is not removed when - * all fifos are removed. It can only be removed when the manager is - * explicitly deleted/detached by the app. */ if (CLIB_DEBUG) { - svm_fifo_segment_private_t *fifo_segment; - fifo_segment = - svm_fifo_segment_get_segment (sm->segment_indices[0]); - ASSERT (!svm_fifo_segment_has_fifos (fifo_segment)); + svm_fifo_segment_private_t *segment; + segment = svm_fifo_segment_get_segment (sm->segment_indices[i]); + ASSERT (!svm_fifo_segment_has_fifos (segment)); } - segment_manager_del_segment (sm, 0); + segment_manager_del_segment (sm, i); } clib_spinlock_free (&sm->lockp); if (CLIB_DEBUG) @@ -322,8 +329,7 @@ segment_manager_init_del (segment_manager_t * sm) segment_manager_del_sessions (sm); else { - ASSERT (!sm->first_is_protected - || sm->app_index == SEGMENT_MANAGER_INVALID_APP_INDEX); + ASSERT (!sm->first_is_protected || segment_manager_app_detached (sm)); segment_manager_del (sm); } } @@ -478,7 +484,8 @@ segment_manager_dealloc_fifos (u32 svm_segment_index, svm_fifo_t * rx_fifo, } /* Remove segment manager if no sessions and detached from app */ - if (sm->app_index == SEGMENT_MANAGER_INVALID_APP_INDEX && is_first) + if (segment_manager_app_detached (sm) + && !segment_manager_has_fifos (sm)) segment_manager_del (sm); } } diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 792e6612..dc930ce8 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -453,7 +453,7 @@ stream_session_connect_notify (transport_connection_t * tc, u8 is_fail) st); if (handle == HALF_OPEN_LOOKUP_INVALID_VALUE) { - clib_warning ("half-open was removed!"); + TCP_DBG ("half-open was removed!"); return -1; } @@ -732,6 +732,7 @@ session_send_session_evt_to_thread (u64 session_handle, u32 thread_index) { static u16 serial_number = 0; + u32 tries = 0; session_fifo_event_t evt; unix_shared_memory_queue_t *q; @@ -741,21 +742,14 @@ session_send_session_evt_to_thread (u64 session_handle, evt.event_id = serial_number++; q = session_manager_get_vpp_event_queue (thread_index); - - /* Based on request block (or not) for lack of space */ - if (PREDICT_TRUE (q->cursize < q->maxsize)) + while (unix_shared_memory_queue_add (q, (u8 *) & evt, 1)) { - if (unix_shared_memory_queue_add (q, (u8 *) & evt, - 0 /* do wait for mutex */ )) + if (tries++ == 3) { - clib_warning ("failed to enqueue evt"); + TCP_DBG ("failed to enqueue evt"); + break; } } - else - { - clib_warning ("queue full"); - return; - } } /** diff --git a/src/vnet/session/session_node.c b/src/vnet/session/session_node.c index 09687687..d0155849 100644 --- a/src/vnet/session/session_node.c +++ b/src/vnet/session/session_node.c @@ -168,15 +168,19 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, return 0; } + /* Check how much we can pull. */ + max_dequeue0 = svm_fifo_max_dequeue (s0->server_tx_fifo); + if (peek_data) { - /* Offset in rx fifo from where to peek data */ + /* Offset in rx fifo from where to peek data */ tx_offset = transport_vft->tx_fifo_offset (tc0); + if (PREDICT_FALSE (tx_offset >= max_dequeue0)) + max_dequeue0 = 0; + else + max_dequeue0 -= tx_offset; } - /* Check how much we can pull. If buffering, subtract the offset */ - max_dequeue0 = svm_fifo_max_dequeue (s0->server_tx_fifo) - tx_offset; - /* Nothing to read return */ if (max_dequeue0 == 0) { @@ -277,6 +281,8 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, { n_bytes_read = svm_fifo_peek (s0->server_tx_fifo, tx_offset, len_to_deq0, data0); + if (n_bytes_read <= 0) + goto dequeue_fail; /* Keep track of progress locally, transport is also supposed to * increment it independently when pushing the header */ tx_offset += n_bytes_read; @@ -285,11 +291,10 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, { n_bytes_read = svm_fifo_dequeue_nowait (s0->server_tx_fifo, len_to_deq0, data0); + if (n_bytes_read <= 0) + goto dequeue_fail; } - if (n_bytes_read <= 0) - goto dequeue_fail; - b0->current_length = n_bytes_read; left_to_snd0 -= n_bytes_read; @@ -616,7 +621,7 @@ skip_dequeue: case FIFO_EVENT_APP_TX: s0 = session_event_get_session (e0, my_thread_index); - if (CLIB_DEBUG && !s0) + if (PREDICT_FALSE (!s0)) { clib_warning ("It's dead, Jim!"); continue; diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index 5b4c8679..527b3289 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -510,7 +510,7 @@ clients_connect (vlib_main_t * vm, u8 * uri, u32 n_clients) if ((i % 4) == 0) vlib_process_suspend (vm, 10e-6); ASSERT (i + 1 >= tm->ready_connections); - while (i + 1 - tm->ready_connections > 8000) + while (i + 1 - tm->ready_connections > 1000) { vlib_process_suspend (vm, 100e-6); } diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 04f1e068..f779428f 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -1035,7 +1035,7 @@ tcp_round_snd_space (tcp_connection_t * tc, u32 snd_space) /* If not snd_wnd constrained and we can't write at least a segment, * don't try at all */ if (PREDICT_FALSE (snd_space < tc->snd_mss)) - return 0; + return snd_space < tc->cwnd ? 0 : snd_space; /* round down to mss multiple */ return snd_space - (snd_space % tc->snd_mss); @@ -1167,6 +1167,7 @@ tcp_timer_establish_handler (u32 conn_index) { ASSERT (tc->state == TCP_STATE_SYN_SENT); stream_session_connect_notify (&tc->connection, 1 /* fail */ ); + TCP_DBG ("establish pop: %U", format_tcp_connection, tc, 2); } else { @@ -1174,7 +1175,7 @@ tcp_timer_establish_handler (u32 conn_index) /* note: the connection may have already disappeared */ if (PREDICT_FALSE (tc == 0)) return; - + TCP_DBG ("establish pop: %U", format_tcp_connection, tc, 2); ASSERT (tc->state == TCP_STATE_SYN_RCVD); /* Start cleanup. App wasn't notified yet so use delete notify as * opposed to delete to cleanup session layer state. */ @@ -1369,6 +1370,8 @@ tcp_main_enable (vlib_main_t * vm) vec_validate (tm->tx_frames[0], num_threads - 1); vec_validate (tm->tx_frames[1], num_threads - 1); + vec_validate (tm->ip_lookup_tx_frames[0], num_threads - 1); + vec_validate (tm->ip_lookup_tx_frames[1], num_threads - 1); tm->bytes_per_buffer = vlib_buffer_free_list_buffer_size (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 6020a3de..bb8091af 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -99,8 +99,9 @@ extern timer_expiration_handler tcp_timer_retransmit_syn_handler; #define TCP_ESTABLISH_TIME 750 /* 75s */ #define TCP_SYN_RCVD_TIME 600 /* 60s */ #define TCP_2MSL_TIME 300 /* 30s */ -#define TCP_CLOSEWAIT_TIME 20 /* 0.1s */ -#define TCP_CLEANUP_TIME 5 /* 0.5s Time to wait before cleanup */ +#define TCP_CLOSEWAIT_TIME 20 /* 2s */ +#define TCP_TIMEWAIT_TIME 20 /* 2s */ +#define TCP_CLEANUP_TIME 10 /* 1s Time to wait before cleanup */ #define TCP_TIMER_PERSIST_MIN 2 /* 0.2s */ #define TCP_RTO_MAX 60 * THZ /* Min max RTO (60s) as per RFC6298 */ @@ -372,8 +373,10 @@ typedef struct _tcp_main /** per-worker tx buffer free lists */ u32 **tx_buffers; - /** per-worker tx frames to 4/6 output nodes */ + /** per-worker tx frames to tcp 4/6 output nodes */ vlib_frame_t **tx_frames[2]; + /** per-worker tx frames to ip 4/6 lookup nodes */ + vlib_frame_t **ip_lookup_tx_frames[2]; /* Per worker-thread timer wheel for connections timers */ tw_timer_wheel_16t_2w_512sl_t *timer_wheels; diff --git a/src/vnet/tcp/tcp_debug.h b/src/vnet/tcp/tcp_debug.h index cf77e6e6..4bc6b42e 100755 --- a/src/vnet/tcp/tcp_debug.h +++ b/src/vnet/tcp/tcp_debug.h @@ -82,13 +82,7 @@ typedef enum _tcp_dbg_evt * Infra and evt track setup */ -#define TCP_DBG(_tc, _evt, _args...) \ -{ \ - u8 *_tmp = 0; \ - _tmp = format(_tmp, "%U", format_tcp_connection_verbose, _tc); \ - clib_warning("%s", _tmp); \ - vec_free(_tmp); \ -} +#define TCP_DBG(_fmt, _args...) clib_warning (_fmt, ##_args) #define DECLARE_ETD(_tc, _e, _size) \ struct \ @@ -240,6 +234,7 @@ typedef enum _tcp_dbg_evt #define TCP_EVT_DBG(_evt, _args...) CC(_evt, _HANDLER)(_args) #else #define TCP_EVT_DBG(_evt, _args...) +#define TCP_DBG(_fmt, _args...) #endif /* diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index 841e72a5..64a07070 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -351,12 +351,17 @@ tcp_segment_validate (vlib_main_t * vm, tcp_connection_t * tc0, if (tcp_syn (th0)) { /* TODO implement RFC 5961 */ - if (tc0->state != TCP_STATE_SYN_RCVD) - tcp_make_ack (tc0, b0); + if (tc0->state == TCP_STATE_SYN_RCVD) + { + tcp_make_synack (tc0, b0); + TCP_EVT_DBG (TCP_EVT_SYN_RCVD, tc0, 0); + } else - tcp_make_synack (tc0, b0); + { + tcp_make_ack (tc0, b0); + TCP_EVT_DBG (TCP_EVT_SYNACK_RCVD, tc0); + } *next0 = tcp_next_output (tc0->c_is_ip4); - TCP_EVT_DBG (TCP_EVT_SYN_RCVD, tc0, 0); return -1; } @@ -1747,18 +1752,17 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* 8: check the FIN bit */ if (PREDICT_FALSE (is_fin)) { - /* Enter CLOSE-WAIT and notify session. Don't send ACK, instead - * wait for session to call close. To avoid lingering + /* Enter CLOSE-WAIT and notify session. To avoid lingering * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */ - tc0->state = TCP_STATE_CLOSE_WAIT; - TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0); + /* Account for the FIN if nothing else was received */ if (vnet_buffer (b0)->tcp.data_len == 0) - { - tc0->rcv_nxt += 1; - next0 = TCP_ESTABLISHED_NEXT_DROP; - } + tc0->rcv_nxt += 1; + tcp_make_ack (tc0, b0); + next0 = tcp_next_output (tc0->c_is_ip4); + tc0->state = TCP_STATE_CLOSE_WAIT; stream_session_disconnect_notify (&tc0->connection); tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME); + TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0); } done: @@ -1973,6 +1977,12 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, seq0 = vnet_buffer (b0)->tcp.seq_number; tcp0 = tcp_buffer_hdr (b0); + /* Crude check to see if the connection handle does not match + * the packet. Probably connection just switched to established */ + if (PREDICT_FALSE (tcp0->dst_port != tc0->c_lcl_port + || tcp0->src_port != tc0->c_rmt_port)) + goto drop; + if (PREDICT_FALSE (!tcp_ack (tcp0) && !tcp_rst (tcp0) && !tcp_syn (tcp0))) goto drop; @@ -2265,6 +2275,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, tcp_header_t *tcp0 = 0; tcp_connection_t *tc0; u32 next0 = TCP_RCV_PROCESS_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED; + u8 is_fin0; bi0 = from[0]; to_next[0] = bi0; @@ -2283,11 +2294,11 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, } tcp0 = tcp_buffer_hdr (b0); + is_fin0 = tcp_is_fin (tcp0); /* SYNs, FINs and data consume sequence numbers */ vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number - + tcp_is_syn (tcp0) + tcp_is_fin (tcp0) - + vnet_buffer (b0)->tcp.data_len; + + tcp_is_syn (tcp0) + is_fin0 + vnet_buffer (b0)->tcp.data_len; if (CLIB_DEBUG) { @@ -2384,21 +2395,14 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* If FIN is ACKed */ else if (tc0->snd_una == tc0->snd_una_max) { - tc0->rcv_nxt += 1; tc0->state = TCP_STATE_FIN_WAIT_2; TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0); - if (tcp_fin (tcp0)) - { - /* Stop all timers, 2MSL will be set lower */ - tcp_connection_timers_reset (tc0); - } - else - { - /* Wait for peer to finish sending its data */ - tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, - TCP_2MSL_TIME); - } + /* Stop all retransmit timers because we have nothing more + * to send. Enable waitclose though because we're willing to + * wait for peer's FIN but not indefinitely. */ + tcp_connection_timers_reset (tc0); + tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME); } break; case TCP_STATE_FIN_WAIT_2: @@ -2434,10 +2438,10 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (!tcp_rcv_ack_is_acceptable (tc0, b0)) goto drop; + tc0->snd_una = vnet_buffer (b0)->tcp.ack_number; /* Apparently our FIN was lost */ - if (tcp_fin (tcp0)) + if (is_fin0) { - /* Don't "make" fin since that increments snd_nxt */ tcp_send_fin (tc0); goto drop; } @@ -2450,8 +2454,6 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, * particular, this makes sure that we won't have dead sessions * when processing events on the tx path */ tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME); - - /* Stop retransmit */ tcp_retransmit_timer_reset (tc0); goto drop; @@ -2466,8 +2468,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, goto drop; tcp_make_ack (tc0, b0); - tcp_timer_reset (tc0, TCP_TIMER_WAITCLOSE); - tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME); + tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME); goto drop; @@ -2486,6 +2487,8 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, case TCP_STATE_FIN_WAIT_2: if (vnet_buffer (b0)->tcp.data_len) error0 = tcp_segment_rcv (tm, tc0, b0, &next0); + else if (is_fin0) + tc0->rcv_nxt += 1; break; case TCP_STATE_CLOSE_WAIT: case TCP_STATE_CLOSING: @@ -2497,7 +2500,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, } /* 8: check the FIN bit */ - if (!tcp_fin (tcp0)) + if (!is_fin0) goto drop; switch (tc0->state) @@ -2527,19 +2530,19 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME); break; case TCP_STATE_FIN_WAIT_2: - /* Got FIN, send ACK! */ + /* Got FIN, send ACK! Be more aggressive with resource cleanup */ tc0->state = TCP_STATE_TIME_WAIT; tcp_connection_timers_reset (tc0); - tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME); + tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME); tcp_make_ack (tc0, b0); next0 = tcp_next_output (is_ip4); TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0); break; case TCP_STATE_TIME_WAIT: - /* Remain in the TIME-WAIT state. Restart the 2 MSL time-wait + /* Remain in the TIME-WAIT state. Restart the time-wait * timeout. */ - tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME); + tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME); break; } TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0); @@ -3162,9 +3165,9 @@ do { \ TCP_ERROR_NONE); _(TIME_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); _(TIME_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); - _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED); + _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED); _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED); - _(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, + _(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED); #undef _ } diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index b843c926..be29f05f 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -629,9 +629,11 @@ tcp_make_synack (tcp_connection_t * tc, vlib_buffer_t * b) } always_inline void -tcp_enqueue_to_ip_lookup (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, - u8 is_ip4) +tcp_enqueue_to_ip_lookup_i (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, + u8 is_ip4, u8 flush) { + tcp_main_t *tm = vnet_get_tcp_main (); + u32 thread_index = vlib_get_thread_index (); u32 *to_next, next_index; vlib_frame_t *f; @@ -643,13 +645,42 @@ tcp_enqueue_to_ip_lookup (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, /* Send to IP lookup */ next_index = is_ip4 ? ip4_lookup_node.index : ip6_lookup_node.index; - f = vlib_get_frame_to_node (vm, next_index); + if (VLIB_BUFFER_TRACE_TRAJECTORY > 0) + { + b->pre_data[0] = 2; + b->pre_data[1] = next_index; + } + + f = tm->ip_lookup_tx_frames[!is_ip4][thread_index]; + if (!f) + { + f = vlib_get_frame_to_node (vm, next_index); + ASSERT (f); + tm->ip_lookup_tx_frames[!is_ip4][thread_index] = f; + } - /* Enqueue the packet */ to_next = vlib_frame_vector_args (f); - to_next[0] = bi; - f->n_vectors = 1; - vlib_put_frame_to_node (vm, next_index, f); + to_next[f->n_vectors] = bi; + f->n_vectors += 1; + if (flush || f->n_vectors == VLIB_FRAME_SIZE) + { + vlib_put_frame_to_node (vm, next_index, f); + tm->ip_lookup_tx_frames[!is_ip4][thread_index] = 0; + } +} + +always_inline void +tcp_enqueue_to_ip_lookup_now (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, + u8 is_ip4) +{ + tcp_enqueue_to_ip_lookup_i (vm, b, bi, is_ip4, 1); +} + +always_inline void +tcp_enqueue_to_ip_lookup (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, + u8 is_ip4) +{ + tcp_enqueue_to_ip_lookup_i (vm, b, bi, is_ip4, 0); } always_inline void @@ -666,8 +697,6 @@ tcp_enqueue_to_output_i (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, /* Decide where to send the packet */ next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index; - - /* Initialize the trajectory trace, if configured */ if (VLIB_BUFFER_TRACE_TRAJECTORY > 0) { b->pre_data[0] = 1; @@ -856,7 +885,7 @@ tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt, u8 is_ip4) ASSERT (!bogus); } - tcp_enqueue_to_ip_lookup (vm, b, bi, is_ip4); + tcp_enqueue_to_ip_lookup_now (vm, b, bi, is_ip4); TCP_EVT_DBG (TCP_EVT_RST_SENT, tc); } @@ -968,7 +997,24 @@ tcp_flush_frame_to_output (vlib_main_t * vm, u8 thread_index, u8 is_ip4) } /** - * Flush both v4 and v6 tx frames for thread index + * Flush ip lookup tx frames populated by timer pops + */ +always_inline void +tcp_flush_frame_to_ip_lookup (vlib_main_t * vm, u8 thread_index, u8 is_ip4) +{ + if (tcp_main.ip_lookup_tx_frames[!is_ip4][thread_index]) + { + u32 next_index; + next_index = is_ip4 ? ip4_lookup_node.index : ip6_lookup_node.index; + vlib_put_frame_to_node (vm, next_index, + tcp_main.ip_lookup_tx_frames[!is_ip4] + [thread_index]); + tcp_main.ip_lookup_tx_frames[!is_ip4][thread_index] = 0; + } +} + +/** + * Flush v4 and v6 tcp and ip-lookup tx frames for thread index */ void tcp_flush_frames_to_output (u8 thread_index) @@ -976,6 +1022,8 @@ tcp_flush_frames_to_output (u8 thread_index) vlib_main_t *vm = vlib_get_main (); tcp_flush_frame_to_output (vm, thread_index, 1); tcp_flush_frame_to_output (vm, thread_index, 0); + tcp_flush_frame_to_ip_lookup (vm, thread_index, 1); + tcp_flush_frame_to_ip_lookup (vm, thread_index, 0); } /** @@ -984,22 +1032,28 @@ tcp_flush_frames_to_output (u8 thread_index) void tcp_send_fin (tcp_connection_t * tc) { - vlib_buffer_t *b; - u32 bi; tcp_main_t *tm = vnet_get_tcp_main (); vlib_main_t *vm = vlib_get_main (); + vlib_buffer_t *b; + u32 bi; + u8 fin_snt = 0; + if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) return; b = vlib_get_buffer (vm, bi); - /* buffer will be initialized by in tcp_make_fin */ + fin_snt = tc->flags & TCP_CONN_FINSNT; + if (fin_snt) + tc->snd_nxt = tc->snd_una; tcp_make_fin (tc, b); tcp_enqueue_to_output_now (vm, b, bi, tc->c_is_ip4); - if (!(tc->flags & TCP_CONN_FINSNT)) + if (!fin_snt) { tc->flags |= TCP_CONN_FINSNT; tc->flags &= ~TCP_CONN_FINPNDG; - tc->snd_nxt += 1; + /* Account for the FIN */ + tc->snd_una_max += 1; + tc->snd_nxt = tc->snd_una_max; } tcp_retransmit_timer_force_update (tc); TCP_EVT_DBG (TCP_EVT_FIN_SENT, tc); @@ -1398,7 +1452,8 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) else if (tc->state == TCP_STATE_SYN_RCVD) { tc->rto_boff += 1; - tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX); + if (tc->rto_boff > TCP_RTO_SYN_RETRIES) + tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX); tc->rtt_ts = 0; if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) @@ -1414,7 +1469,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) else { ASSERT (tc->state == TCP_STATE_CLOSED); - clib_warning ("connection closed ..."); + TCP_DBG ("connection state: %d", tc->state); return; } } -- cgit 1.2.3-korg