aboutsummaryrefslogtreecommitdiffstats
path: root/README.md
blob: 0503680761046633ec66afe99dec5f72767d44b9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
Vector Packet Processing
========================

## Introduction

The VPP platform is an extensible framework that provides out-of-the-box
production quality switch/router functionality. It is the open source version
of Cisco's Vector Packet Processing (VPP) technology: a high performance,
packet-processing stack that can run on commodity CPUs.

The benefits of this implementation of VPP are its high performance, proven
technology, its modularity and flexibility, and rich feature set.

For more information on VPP and its features please visit the
[FD.io website](http://fd.io/) and
[What is VPP?](https://wiki.fd.io/view/VPP/What_is_VPP%3F) pages.


## Changes

Details of the changes leading up to this version of VPP can be found under
doc/releasenotes.


## Directory layout

| Directory name         | Description                                 |
| ---------------------- | ------------------------------------------- |
| build-data             | Build metadata                              |
| build-root             | Build output directory                      |
| docs                   | Sphinx Documentation                        |
| dpdk                   | DPDK patches and build infrastructure       |
| extras/libmemif        | Client library for memif                    |
| src/examples           | VPP example code                            |
| src/plugins            | VPP bundled plugins directory               |
| src/svm                | Shared virtual memory allocation library    |
| src/tests              | Standalone tests (not part of test harness) |
| src/vat                | VPP API test program                        |
| src/vlib               | VPP application library                     |
| src/vlibapi            | VPP API library                             |
| src/vlibmemory         | VPP Memory management                       |
| src/vnet               | VPP networking                              |
| src/vpp                | VPP application                             |
| src/vpp-api            | VPP application API bindings                |
| src/vppinfra           | VPP core library                            |
| src/vpp/api            | Not-yet-relocated API bindings              |
| test                   | Unit tests and Python test harness          |

## Getting started

In general anyone interested in building, developing or running VPP should
consult the [VPP wiki](https://wiki.fd.io/view/VPP) for more complete
documentation.

In particular, readers are recommended to take a look at [Pulling, Building,
Running, Hacking, Pushing](https://wiki.fd.io/view/VPP/Pulling,_Building,_Run
ning,_Hacking_and_Pushing_VPP_Code) which provides extensive step-by-step
coverage of the topic.

For the impatient, some salient information is distilled below.


### Quick-start: On an existing Linux host

To install system dependencies, build VPP and then install it, simply run the
build script. This should be performed a non-privileged user with `sudo`
access from the project base directory:

    ./extras/vagrant/build.sh

If you want a more fine-grained approach because you intend to do some
development work, the `Makefile` in the root directory of the source tree
provides several convenience shortcuts as `make` targets that may be of
interest. To see the available targets run:

    make


### Quick-start: Vagrant

The directory `extras/vagrant` contains a `VagrantFile` and supporting
scripts to bootstrap a working VPP inside a Vagrant-managed Virtual Machine.
This VM can then be used to test concepts with VPP or as a development
platform to extend VPP. Some obvious caveats apply when using a VM for VPP
since its performance will never match that of bare metal; if your work is
timing or performance sensitive, consider using bare metal in addition or
instead of the VM.

For this to work you will need a working installation of Vagrant. Instructions
for this can be found [on the Setting up Vagrant wiki page]
(https://wiki.fd.io/view/DEV/Setting_Up_Vagrant).


## More information

Several modules provide documentation, see @subpage user_doc for more
end-user-oriented information. Also see @subpage dev_doc for developer notes.

Visit the [VPP wiki](https://wiki.fd.io/view/VPP) for details on more
advanced building strategies and other development notes.
#include <vnet/lisp-cp/lisp_cp_messages.h> #include <vnet/udp/udp_packet.h> /* Returns IP ID for the packet */ /* static u16 ip_id = 0; static inline u16 get_IP_ID() { ip_id++; return (ip_id); } */ u16 udp_ip4_checksum (const void *b, u32 len, u8 * src, u8 * dst) { const u16 *buf = b; u16 *ip_src = (u16 *) src; u16 *ip_dst = (u16 *) dst; u32 length = len; u32 sum = 0; while (len > 1) { sum += *buf++; if (sum & 0x80000000) sum = (sum & 0xFFFF) + (sum >> 16); len -= 2; } /* Add the padding if the packet length is odd */ if (len & 1) sum += *((u8 *) buf); /* Add the pseudo-header */ sum += *(ip_src++); sum += *ip_src; sum += *(ip_dst++); sum += *ip_dst; sum += clib_host_to_net_u16 (IP_PROTOCOL_UDP); sum += clib_host_to_net_u16 (length); /* Add the carries */ while (sum >> 16) sum = (sum & 0xFFFF) + (sum >> 16); /* Return the one's complement of sum */ return ((u16) (~sum)); } u16 udp_ip6_checksum (ip6_header_t * ip6, udp_header_t * up, u32 len) { size_t i; register const u16 *sp; u32 sum; union { struct { ip6_address_t ph_src; ip6_address_t ph_dst; u32 ph_len; u8 ph_zero[3]; u8 ph_nxt; } ph; u16 pa[20]; } phu; /* pseudo-header */ clib_memset (&phu, 0, sizeof (phu)); phu.ph.ph_src = ip6->src_address; phu.ph.ph_dst = ip6->dst_address; phu.ph.ph_len = clib_host_to_net_u32 (len); phu.ph.ph_nxt = IP_PROTOCOL_UDP; sum = 0; for (i = 0; i < sizeof (phu.pa) / sizeof (phu.pa[0]); i++) sum += phu.pa[i]; sp = (const u16 *) up; for (i = 0; i < (len & ~1); i += 2) sum += *sp++; if (len & 1) sum += clib_host_to_net_u16 ((*(const u8 *) sp) << 8); while (sum > 0xffff) sum = (sum & 0xffff) + (sum >> 16); sum = ~sum & 0xffff; return (sum); } u16 udp_checksum (udp_header_t * uh, u32 udp_len, void *ih, u8 version) { switch (version) { case IP4: return (udp_ip4_checksum (uh, udp_len, ((ip4_header_t *) ih)->src_address.as_u8, ((ip4_header_t *) ih)->dst_address.as_u8)); case IP6: return (udp_ip6_checksum (ih, uh, udp_len)); default: return ~0; } } void * pkt_push_udp (vlib_main_t * vm, vlib_buffer_t * b, u16 sp, u16 dp) { udp_header_t *uh; u16 udp_len = sizeof (udp_header_t) + vlib_buffer_length_in_chain (vm, b); uh = vlib_buffer_push_uninit (b, sizeof (*uh)); uh->src_port = clib_host_to_net_u16 (sp); uh->dst_port = clib_host_to_net_u16 (dp); uh->length = clib_host_to_net_u16 (udp_len); uh->checksum = 0; return uh; } void * pkt_push_ip (vlib_main_t * vm, vlib_buffer_t * b, ip_address_t * src, ip_address_t * dst, u32 proto, u8 csum_offload) { if (ip_addr_version (src) != ip_addr_version (dst)) { clib_warning ("src %U and dst %U IP have different AFI! Discarding!", format_ip_address, src, format_ip_address, dst); return 0; } switch (ip_addr_version (src)) { case IP4: return vlib_buffer_push_ip4 (vm, b, &ip_addr_v4 (src), &ip_addr_v4 (dst), proto, csum_offload); break; case IP6: return vlib_buffer_push_ip6 (vm, b, &ip_addr_v6 (src), &ip_addr_v6 (dst), proto); break; } return 0; } void * pkt_push_udp_and_ip (vlib_main_t * vm, vlib_buffer_t * b, u16 sp, u16 dp, ip_address_t * sip, ip_address_t * dip, u8 csum_offload) { u16 udpsum; udp_header_t *uh; void *ih; uh = pkt_push_udp (vm, b, sp, dp); if (csum_offload) { ih = pkt_push_ip (vm, b, sip, dip, IP_PROTOCOL_UDP, 1); b->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM; vnet_buffer (b)->l3_hdr_offset = (u8 *) ih - b->data; vnet_buffer (b)->l4_hdr_offset = (u8 *) uh - b->data; uh->checksum = 0; } else { ih = pkt_push_ip (vm, b, sip, dip, IP_PROTOCOL_UDP, 0); udpsum = udp_checksum (uh, clib_net_to_host_u16 (uh->length), ih, ip_addr_version (sip)); if (udpsum == (u16) ~ 0) { clib_warning ("Failed UDP checksum! Discarding"); return 0; } /* clear flags used for csum since we're not offloading */ b->flags &= ~(VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_IS_IP6); uh->checksum = udpsum; } return ih; } void * pkt_push_ecm_hdr (vlib_buffer_t * b) { ecm_hdr_t *h; h = vlib_buffer_push_uninit (b, sizeof (h[0])); clib_memset (h, 0, sizeof (h[0])); h->type = LISP_ENCAP_CONTROL_TYPE; clib_memset (h->reserved2, 0, sizeof (h->reserved2)); return h; } /* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */