aboutsummaryrefslogtreecommitdiffstats
path: root/src/scripts
AgeCommit message (Expand)AuthorFilesLines
2019-06-18ipsec: ipsec-tun protectNeale Ranns1-0/+71
2019-06-12tcp: add cc stats plotting toolsFlorin Coras2-0/+231
2019-05-31tools: FEATURE.yaml meta-data infrastructureOle Troan1-0/+131
2019-04-30crypto: enforce per-alg crypto key lengthBenoît Ganne1-2/+2
2019-03-26Convert GRE nodes to new buffer APIs and multiarchBenoît Ganne2-0/+124
2019-03-26Simplify adjacency rewrite codeBenoît Ganne1-0/+67
2019-03-04IPSEC: script to bounce IPSEC traffic through a pipe to test encrypt and decrpytNeale Ranns1-0/+66
2019-02-20pg: remove no-recycle optionDamjan Marion39-77/+0
2019-01-29cmake: fix out-of-git-tree buildDamjan Marion1-1/+1
2019-01-20Rework of debian packagingDamjan Marion1-0/+37
2018-08-17CMake as an alternative to autotools (experimental)Damjan Marion1-0/+28
2018-06-15NAT44: endpoint dependent mode (VPP-1273)Matus Fabian2-1/+49
2018-03-12License text cleanupDave Barach1-0/+13
2017-12-13NAT64: multi-thread support (VPP-891)Matus Fabian2-0/+86
2017-10-30Remove old Python vppctl scriptChris Luke1-134/+0
2017-10-16udp: refactor udp codeFlorin Coras2-25/+72
2017-08-23NAT: Rename snat plugin to nat (VPP-955)Matus Fabian4-10/+10
2017-08-04SNAT: fix address and port allocation for multiple worker threads (VPP-925)Matus Fabian1-5/+12
2017-05-09Add support for tcp/session buffer chainsFlorin Coras1-2/+17
2017-05-05First commit SR MPLSPablo Camarillo7-118/+11
2017-04-19Fix "make dist" to include version number, docouple it from rpm packagingDamjan Marion1-8/+4
2017-04-13Session layer refactoringFlorin Coras1-1/+2
2017-03-13VPP-659 Improve tcp/session debugging and testingFlorin Coras1-0/+4
2017-03-10VPP-659 TCP improvementsFlorin Coras3-3/+28
2017-03-07DHCP Multiple Servers (VPP-602, VPP-605)Neale Ranns1-1/+2
2017-03-07CGN: Deterministic NAT (VPP-623)Matus Fabian1-0/+108
2017-03-04Cleanup URI code and TCP bugfixingFlorin Coras2-0/+66
2017-03-01VPP-598: tcp stack initial commitDave Barach5-8/+91
2017-02-28vlib: add buffer cloning supportDamjan Marion1-8/+11
2017-02-21dhcp: multiple additionsNeale Ranns1-0/+21
2017-02-02Fix SR multicast post mfib commitNeale Ranns1-0/+58
2017-01-27IP Multicast FIB (mfib)Neale Ranns1-0/+22
2017-01-27Add multi-vpp support back into pythonic vppctlEd Warnicke1-7/+20
2017-01-25[re]Enable per-Adjacency/neighbour countersNeale Ranns1-2/+16
2017-01-21Fix issue in rpm versioning for release buildsDamjan Marion1-1/+1
2017-01-13vppctl: new bash completion for vppctl commandsPadraig Connolly1-0/+30
2017-01-10Revert "vppctl: bash completion for vppctl commands"Damjan Marion1-30/+0
2017-01-09vppctl: bash completion for vppctl commandsPadraig Connolly1-0/+30
2017-01-03fix version.h generation for out-of-tree buildsDamjan Marion1-0/+54
2016-12-28Reorganize source tree to use single autotools instanceDamjan Marion78-0/+3766
ss="n">l2_packet) { /* Save ethernet header */ ctx->l2_header[0] = ((u64 *) ip)[0]; ctx->l2_header[1] = ((u64 *) ip)[1]; ctx->l2_header[2] = ((u64 *) ip)[2]; /* set ip to the true ip header */ ip = (ip4_header_t *) (((u8 *) ip) + vnet_buffer (b0)->l2.l2_len); } /* * Copy L3 fields. * We need to save TOS for ip4 and ip6 packets. * Fortunately the TOS field is * in the first two bytes of both the ip4 and ip6 headers. */ ctx->ip_tos = *((u16 *) (ip)); /* * Save the ip4 checksum as well. We just blindly save the corresponding two * bytes even for ip6 packets. */ ctx->ip4_checksum = ip->checksum; return ctx; } replication_context_t * replication_recycle (vlib_main_t * vm, vlib_buffer_t * b0, u32 is_last) { replication_main_t *rm = &replication_main; replication_context_t *ctx; uword thread_index = vm->thread_index; ip4_header_t *ip; /* Get access to the replication context */ ctx = pool_elt_at_index (rm->contexts[thread_index], b0->recycle_count); /* Restore vnet buffer state */ clib_memcpy (vnet_buffer (b0), ctx->vnet_buffer, sizeof (vnet_buffer_opaque_t)); /* Restore the vlan flags */ b0->flags &= ~VNET_BUFFER_FLAGS_VLAN_BITS; b0->flags |= ctx->flags; /* Restore the packet start (current_data) and length */ vlib_buffer_advance (b0, ctx->current_data - b0->current_data); /* Restore packet contents */ ip = (ip4_header_t *) vlib_buffer_get_current (b0); if (ctx->l2_packet) { /* Restore ethernet header */ ((u64 *) ip)[0] = ctx->l2_header[0]; ((u64 *) ip)[1] = ctx->l2_header[1]; ((u64 *) ip)[2] = ctx->l2_header[2]; /* set ip to the true ip header */ ip = (ip4_header_t *) (((u8 *) ip) + vnet_buffer (b0)->l2.l2_len); } // Restore L3 fields *((u16 *) (ip)) = ctx->ip_tos; ip->checksum = ctx->ip4_checksum; if (is_last) { /* * This is the last replication in the list. * Restore original buffer free functionality. */ vlib_buffer_set_free_list_index (b0, ctx->saved_free_list_index); b0->flags &= ~VLIB_BUFFER_RECYCLE; /* Free context back to its pool */ pool_put (rm->contexts[thread_index], ctx); } return ctx; } /* * fish pkts back from the recycle queue/freelist * un-flatten the context chains */ static void replication_recycle_callback (vlib_main_t * vm, vlib_buffer_free_list_t * fl) { vlib_frame_t *f = 0; u32 n_left_from; u32 n_left_to_next = 0; u32 n_this_frame = 0; u32 *from; u32 *to_next = 0; u32 bi0, pi0; vlib_buffer_t *b0; int i; replication_main_t *rm = &replication_main; replication_context_t *ctx; u32 feature_node_index = 0; uword thread_index = vm->thread_index; /* * All buffers in the list are destined to the same recycle node. * Pull the recycle node index from the first buffer. * Note: this could be sped up if the node index were stuffed into * the freelist itself. */ if (vec_len (fl->buffers) > 0) { bi0 = fl->buffers[0]; b0 = vlib_get_buffer (vm, bi0); ctx = pool_elt_at_index (rm->contexts[thread_index], b0->recycle_count); feature_node_index = ctx->recycle_node_index; } /* buffers */ for (i = 0; i < 2; i++) { if (i == 0) { from = fl->buffers; n_left_from = vec_len (from); } while (n_left_from > 0) { if (PREDICT_FALSE (n_left_to_next == 0)) { if (f) { f->n_vectors = n_this_frame; vlib_put_frame_to_node (vm, feature_node_index, f); } f = vlib_get_frame_to_node (vm, feature_node_index); to_next = vlib_frame_vector_args (f); n_left_to_next = VLIB_FRAME_SIZE; n_this_frame = 0; } bi0 = from[0]; if (PREDICT_TRUE (n_left_from > 1)) { pi0 = from[1]; vlib_prefetch_buffer_with_index (vm, pi0, LOAD); } b0 = vlib_get_buffer (vm, bi0); /* Mark that this buffer was just recycled */ b0->flags |= VLIB_BUFFER_IS_RECYCLED; #if (CLIB_DEBUG > 0) if (vm->buffer_main->callbacks_registered == 0) vlib_buffer_set_known_state (bi0, VLIB_BUFFER_KNOWN_ALLOCATED); #endif /* If buffer is traced, mark frame as traced */ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) f->flags |= VLIB_FRAME_TRACE; to_next[0] = bi0; from++; to_next++; n_this_frame++; n_left_to_next--; n_left_from--; } } vec_reset_length (fl->buffers); if (f) { ASSERT (n_this_frame); f->n_vectors = n_this_frame; vlib_put_frame_to_node (vm, feature_node_index, f); } } clib_error_t * replication_init (vlib_main_t * vm) { replication_main_t *rm = &replication_main; vlib_buffer_main_t *bm = vm->buffer_main; vlib_buffer_free_list_t *fl; __attribute__ ((unused)) replication_context_t *ctx; vlib_thread_main_t *tm = vlib_get_thread_main (); rm->vlib_main = vm; rm->vnet_main = vnet_get_main (); rm->recycle_list_index = vlib_buffer_create_free_list (vm, 1024 /* fictional */ , "replication-recycle"); fl = pool_elt_at_index (bm->buffer_free_list_pool, rm->recycle_list_index); fl->buffers_added_to_freelist_function = replication_recycle_callback; /* Verify the replication context is the expected size */ ASSERT (sizeof (replication_context_t) == 128); /* 2 cache lines */ vec_validate (rm->contexts, tm->n_vlib_mains - 1); return 0; } VLIB_INIT_FUNCTION (replication_init); /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */