summaryrefslogtreecommitdiffstats
path: root/src/plugins/nat/out2in_ed.c
AgeCommit message (Expand)AuthorFilesLines
2020-06-27nat: replace speculative buffer enqueue modelKlement Sekera1-476/+439
2020-06-08nat: more long read after short write optimizationKlement Sekera1-99/+120
2020-06-04nat: fixed input validationFilip Varga1-1/+0
2020-05-14nat: ED: store both thread&session idx in hashKlement Sekera1-26/+76
2020-05-13nat: fix LRU blocked by inactive sessionKlement Sekera1-10/+11
2020-05-13nat: perf improvement - replace branchy codeKlement Sekera1-15/+15
2020-05-13nat: handoff next node feature fixFilip Varga1-2/+2
2020-05-08nat: fix per thread data vlib_main_t usage take 2Ole Troan1-6/+7
2020-05-07nat: fix per thread data vlib_main_t usageFilip Varga1-3/+6
2020-05-04nat: per vrf session limitsFilip Varga1-4/+12
2020-04-27nat: improve perf - long read after short writeKlement Sekera1-73/+68
2020-04-24nat: ignore user hash in ED NATKlement Sekera1-41/+7
2020-04-24nat: make usage of vnet_buffer2 transparentKlement Sekera1-4/+4
2020-04-24nat: ED: reduce number of hash tables usedKlement Sekera1-10/+0
2020-04-08nat: ED: global session LRU listKlement Sekera1-5/+1
2020-04-07nat: ED: port re-use algorithmKlement Sekera1-0/+10
2020-04-07nat: don't drop packet for expired sessionKlement Sekera1-2/+2
2020-03-31nat: fix unknown proto translation out2in_edAlexander Chernavin1-2/+2
2020-03-26nat: transitory timeout for TCP CLOSED stateKlement Sekera1-8/+38
2020-03-25nat: fix error countersKlement Sekera1-0/+5
2020-03-13nat: timed out session scavenging upgradeFilip Varga1-2/+15
2020-02-20nat: nat44 split slow and fast pathFilip Varga1-567/+169
2020-01-17nat: refactor of port/address allocation functionsFilip Varga1-4/+4
2020-01-03nat: use SVRKlement Sekera1-546/+221
2019-12-12nat: session cleanup fixFilip Varga1-0/+3
2019-11-19nat: respect arc features (multi worker)Filip Varga1-139/+138
2019-11-05nat: respect udp checksumFilip Varga1-26/+101
2019-11-04nat: revert respect udp checksumOle Troan1-101/+26
2019-10-28nat: respect udp checksumFilip Varga1-26/+101
2019-08-22nat: handoff traffic matching for dynamic NATFilip Varga1-79/+3
2019-07-31nat: elog rewrite for multi-worker supportFilip Varga1-17/+17
2019-05-30NAT: create bypass in correct thread if workers>1Alexander Chernavin1-9/+40
2019-04-26nat: fix ED mode unknown proto session managementMatthew Smith1-1/+10
2019-02-27NAT44: active-passive HA (VPP-1571)Matus Fabian1-12/+24
2019-02-18NAT: VPP-1552 code migration from old multiarch schemeFilip Varga1-31/+16
2019-01-21NAT: VPP-1537 IPFIX per worker processingFilip Varga1-3/+5
2018-12-14NAT: counters (VPP-1484)Matus Fabian1-12/+48
2018-11-29NAT: syslog - sessions logging (VPP-1139)Matus Fabian1-0/+21
2018-11-19NAT44: fix bug in TCP close with output-feature interface (VPP-1493)Matus Fabian1-1/+30
2018-11-14Remove c-11 memcpy checks from perf-critical codeDave Barach1-6/+5
2018-10-08NAT44: do not create session record for identity mapping (VPP-1439)Matus Fabian1-6/+34
2018-09-26NAT44: endpoint-dependent mode session timeout improvement (VPP-1423)Matus Fabian1-0/+14
2018-09-25NAT44: fix nat44_o2i_ed_is_idle_session_cb (VPP-1424)Matus Fabian1-1/+1
2018-09-24NAT44: endpoint-dependent mode session timeout improvement (VPP-1423)Matus Fabian1-3/+3
2018-09-21NAT: Refactoring / Housekeeping (VPP-1415)Matus Fabian1-0/+1868
ass="o">->producer_evtfd = -1; q->consumer_evtfd = -1; clib_memset (&attr, 0, sizeof (attr)); clib_memset (&cattr, 0, sizeof (cattr)); if (pthread_mutexattr_init (&attr)) clib_unix_warning ("mutexattr_init"); if (pthread_mutexattr_setpshared (&attr, PTHREAD_PROCESS_SHARED)) clib_unix_warning ("pthread_mutexattr_setpshared"); if (pthread_mutexattr_setrobust (&attr, PTHREAD_MUTEX_ROBUST)) clib_unix_warning ("setrobust"); if (pthread_mutex_init (&q->mutex, &attr)) clib_unix_warning ("mutex_init"); if (pthread_mutexattr_destroy (&attr)) clib_unix_warning ("mutexattr_destroy"); if (pthread_condattr_init (&cattr)) clib_unix_warning ("condattr_init"); /* prints funny-looking messages in the Linux target */ if (pthread_condattr_setpshared (&cattr, PTHREAD_PROCESS_SHARED)) clib_unix_warning ("condattr_setpshared"); if (pthread_cond_init (&q->condvar, &cattr)) clib_unix_warning ("cond_init1"); if (pthread_condattr_destroy (&cattr)) clib_unix_warning ("cond_init2"); return (q); } svm_queue_t * svm_queue_alloc_and_init (int nels, int elsize, int consumer_pid) { svm_queue_t *q; q = clib_mem_alloc_aligned (sizeof (svm_queue_t) + nels * elsize, CLIB_CACHE_LINE_BYTES); clib_memset (q, 0, sizeof (*q)); q = svm_queue_init (q, nels, elsize); q->consumer_pid = consumer_pid; return q; } /* * svm_queue_free */ void svm_queue_free (svm_queue_t * q) { (void) pthread_mutex_destroy (&q->mutex); (void) pthread_cond_destroy (&q->condvar); clib_mem_free (q); } void svm_queue_lock (svm_queue_t * q) { int rv = pthread_mutex_lock (&q->mutex); if (PREDICT_FALSE (rv == EOWNERDEAD)) pthread_mutex_consistent (&q->mutex); } static int svm_queue_trylock (svm_queue_t * q) { int rv = pthread_mutex_trylock (&q->mutex); if (PREDICT_FALSE (rv == EOWNERDEAD)) rv = pthread_mutex_consistent (&q->mutex); return rv; } void svm_queue_unlock (svm_queue_t * q) { pthread_mutex_unlock (&q->mutex); } int svm_queue_is_full (svm_queue_t * q) { return q->cursize == q->maxsize; } static inline void svm_queue_send_signal_inline (svm_queue_t * q, u8 is_prod) { if (q->producer_evtfd == -1) { (void) pthread_cond_broadcast (&q->condvar); } else { int __clib_unused rv, fd; u64 data = 1; ASSERT (q->consumer_evtfd > 0 && q->producer_evtfd > 0); fd = is_prod ? q->producer_evtfd : q->consumer_evtfd; rv = write (fd, &data, sizeof (data)); if (PREDICT_FALSE (rv < 0)) clib_unix_warning ("signal write on %d returned %d", fd, rv); } } void svm_queue_send_signal (svm_queue_t * q, u8 is_prod) { svm_queue_send_signal_inline (q, is_prod); } static inline void svm_queue_wait_inline (svm_queue_t * q) { if (q->producer_evtfd == -1) { pthread_cond_wait (&q->condvar, &q->mutex); } else { /* Fake a wait for event. We could use epoll but that would mean * using yet another fd. Should do for now */ u32 cursize = q->cursize; svm_queue_unlock (q); while (q->cursize == cursize) CLIB_PAUSE (); svm_queue_lock (q); } } void svm_queue_wait (svm_queue_t * q) { svm_queue_wait_inline (q); } static inline int svm_queue_timedwait_inline (svm_queue_t * q, double timeout) { struct timespec ts; ts.tv_sec = unix_time_now () + (u32) timeout; ts.tv_nsec = (timeout - (u32) timeout) * 1e9; if (q->producer_evtfd == -1) { return pthread_cond_timedwait (&q->condvar, &q->mutex, &ts); } else { double max_time = unix_time_now () + timeout; u32 cursize = q->cursize; int rv; svm_queue_unlock (q); while (q->cursize == cursize && unix_time_now () < max_time) CLIB_PAUSE (); rv = unix_time_now () < max_time ? 0 : ETIMEDOUT; svm_queue_lock (q); return rv; } } int svm_queue_timedwait (svm_queue_t * q, double timeout) { return svm_queue_timedwait_inline (q, timeout); } /* * svm_queue_add_nolock */ int svm_queue_add_nolock (svm_queue_t * q, u8 * elem) { i8 *tailp; int need_broadcast = 0; if (PREDICT_FALSE (q->cursize == q->maxsize)) { while (q->cursize == q->maxsize) svm_queue_wait_inline (q); } tailp = (i8 *) (&q->data[0] + q->elsize * q->tail); clib_memcpy_fast (tailp, elem, q->elsize); q->tail++; q->cursize++; need_broadcast = (q->cursize == 1); if (q->tail == q->maxsize) q->tail = 0; if (need_broadcast) svm_queue_send_signal_inline (q, 1); return 0; } void svm_queue_add_raw (svm_queue_t * q, u8 * elem) { i8 *tailp; tailp = (i8 *) (&q->data[0] + q->elsize * q->tail); clib_memcpy_fast (tailp, elem, q->elsize); q->tail = (q->tail + 1) % q->maxsize; q->cursize++; if (q->cursize == 1) svm_queue_send_signal_inline (q, 1); } /* * svm_queue_add */ int svm_queue_add (svm_queue_t * q, u8 * elem, int nowait) { i8 *tailp; int need_broadcast = 0; if (nowait) { /* zero on success */ if (svm_queue_trylock (q)) { return (-1); } } else svm_queue_lock (q); if (PREDICT_FALSE (q->cursize == q->maxsize)) { if (nowait) { svm_queue_unlock (q); return (-2); } while (q->cursize == q->maxsize) svm_queue_wait_inline (q); } tailp = (i8 *) (&q->data[0] + q->elsize * q->tail); clib_memcpy_fast (tailp, elem, q->elsize); q->tail++; q->cursize++; need_broadcast = (q->cursize == 1); if (q->tail == q->maxsize) q->tail = 0; if (need_broadcast) svm_queue_send_signal_inline (q, 1); svm_queue_unlock (q); return 0; } /* * svm_queue_add2 */ int svm_queue_add2 (svm_queue_t * q, u8 * elem, u8 * elem2, int nowait) { i8 *tailp; int need_broadcast = 0; if (nowait) { /* zero on success */ if (svm_queue_trylock (q)) { return (-1); } } else svm_queue_lock (q); if (PREDICT_FALSE (q->cursize + 1 == q->maxsize)) { if (nowait) { svm_queue_unlock (q); return (-2); } while (q->cursize + 1 == q->maxsize) svm_queue_wait_inline (q); } tailp = (i8 *) (&q->data[0] + q->elsize * q->tail); clib_memcpy_fast (tailp, elem, q->elsize); q->tail++; q->cursize++; if (q->tail == q->maxsize) q->tail = 0; need_broadcast = (q->cursize == 1); tailp = (i8 *) (&q->data[0] + q->elsize * q->tail); clib_memcpy_fast (tailp, elem2, q->elsize); q->tail++; q->cursize++; if (q->tail == q->maxsize) q->tail = 0; if (need_broadcast) svm_queue_send_signal_inline (q, 1); svm_queue_unlock (q); return 0; } /* * svm_queue_sub */ int svm_queue_sub (svm_queue_t * q, u8 * elem, svm_q_conditional_wait_t cond, u32 time) { i8 *headp; int need_broadcast = 0; int rc = 0; if (cond == SVM_Q_NOWAIT) { /* zero on success */ if (svm_queue_trylock (q)) { return (-1); } } else svm_queue_lock (q); if (PREDICT_FALSE (q->cursize == 0)) { if (cond == SVM_Q_NOWAIT) { svm_queue_unlock (q); return (-2); } else if (cond == SVM_Q_TIMEDWAIT) { while (q->cursize == 0 && rc == 0) rc = svm_queue_timedwait_inline (q, time); if (rc == ETIMEDOUT) { svm_queue_unlock (q); return ETIMEDOUT; } } else { while (q->cursize == 0) svm_queue_wait_inline (q); } } headp = (i8 *) (&q->data[0] + q->elsize * q->head); clib_memcpy_fast (elem, headp, q->elsize); q->head++; /* $$$$ JFC shouldn't this be == 0? */ if (q->cursize == q->maxsize) need_broadcast = 1; q->cursize--; if (q->head == q->maxsize) q->head = 0; if (need_broadcast) svm_queue_send_signal_inline (q, 0); svm_queue_unlock (q); return 0; } int svm_queue_sub2 (svm_queue_t * q, u8 * elem) { int need_broadcast; i8 *headp; svm_queue_lock (q); if (q->cursize == 0) { svm_queue_unlock (q); return -1; } headp = (i8 *) (&q->data[0] + q->elsize * q->head); clib_memcpy_fast (elem, headp, q->elsize); q->head++; need_broadcast = (q->cursize == q->maxsize / 2); q->cursize--; if (PREDICT_FALSE (q->head == q->maxsize)) q->head = 0; svm_queue_unlock (q); if (need_broadcast) svm_queue_send_signal_inline (q, 0); return 0; } int svm_queue_sub_raw (svm_queue_t * q, u8 * elem) { int need_broadcast; i8 *headp; if (PREDICT_FALSE (q->cursize == 0)) { while (q->cursize == 0) ; } headp = (i8 *) (&q->data[0] + q->elsize * q->head); clib_memcpy_fast (elem, headp, q->elsize); need_broadcast = q->cursize == q->maxsize; q->head = (q->head + 1) % q->maxsize; q->cursize--; if (PREDICT_FALSE (need_broadcast)) svm_queue_send_signal_inline (q, 0); return 0; } void svm_queue_set_producer_event_fd (svm_queue_t * q, int fd) { q->producer_evtfd = fd; } void svm_queue_set_consumer_event_fd (svm_queue_t * q, int fd) { q->consumer_evtfd = fd; } /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */