summaryrefslogtreecommitdiffstats
path: root/README.md
blob: 4cc283b5e530fd12246a33599306cb375fdc26ac (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
Vector Packet Processing
========================

## Introduction

The VPP platform is an extensible framework that provides out-of-the-box
production quality switch/router functionality. It is the open source version
of Cisco's Vector Packet Processing (VPP) technology: a high performance,
packet-processing stack that can run on commodity CPUs.

The benefits of this implementation of VPP are its high performance, proven
technology, its modularity and flexibility, and rich feature set.

For more information on VPP and its features please visit the
[FD.io website](http://fd.io/) and
[What is VPP?](https://wiki.fd.io/view/VPP/What_is_VPP%3F) pages.


## Changes

Details of the changes leading up to this version of VPP can be found under
@ref release_notes.


## Directory layout

| Directory name         | Description                                 |
| ---------------------- | ------------------------------------------- |
|      build-data        | Build metadata                              |
|      build-root        | Build output directory                      |
|      doxygen           | Documentation generator configuration       |
|      dpdk              | DPDK patches and build infrastructure       |
| @ref extras/libmemif   | Client library for memif                    |
| @ref src/examples      | VPP example code                            |
| @ref src/plugins       | VPP bundled plugins directory               |
| @ref src/svm           | Shared virtual memory allocation library    |
|      src/tests         | Standalone tests (not part of test harness) |
|      src/vat           | VPP API test program                        |
| @ref src/vlib          | VPP application library                     |
| @ref src/vlibapi       | VPP API library                             |
| @ref src/vlibmemory    | VPP Memory management                       |
| @ref src/vnet          | VPP networking                              |
| @ref src/vpp           | VPP application                             |
| @ref src/vpp-api       | VPP application API bindings                |
| @ref src/vppinfra      | VPP core library                            |
| @ref src/vpp/api       | Not-yet-relocated API bindings              |
|      test              | Unit tests and Python test harness          |

## Getting started

In general anyone interested in building, developing or running VPP should
consult the [VPP wiki](https://wiki.fd.io/view/VPP) for more complete
documentation.

In particular, readers are recommended to take a look at [Pulling, Building,
Running, Hacking, Pushing](https://wiki.fd.io/view/VPP/Pulling,_Building,_Run
ning,_Hacking_and_Pushing_VPP_Code) which provides extensive step-by-step
coverage of the topic.

For the impatient, some salient information is distilled below.


### Quick-start: On an existing Linux host

To install system dependencies, build VPP and then install it, simply run the
build script. This should be performed a non-privileged user with `sudo`
access from the project base directory:

    ./extras/vagrant/build.sh

If you want a more fine-grained approach because you intend to do some
development work, the `Makefile` in the root directory of the source tree
provides several convenience shortcuts as `make` targets that may be of
interest. To see the available targets run:

    make


### Quick-start: Vagrant

The directory `extras/vagrant` contains a `VagrantFile` and supporting
scripts to bootstrap a working VPP inside a Vagrant-managed Virtual Machine.
This VM can then be used to test concepts with VPP or as a development
platform to extend VPP. Some obvious caveats apply when using a VM for VPP
since its performance will never match that of bare metal; if your work is
timing or performance sensitive, consider using bare metal in addition or
instead of the VM.

For this to work you will need a working installation of Vagrant. Instructions
for this can be found [on the Setting up Vagrant wiki page]
(https://wiki.fd.io/view/DEV/Setting_Up_Vagrant).


## More information

Several modules provide documentation, see @subpage user_doc for more
end-user-oriented information. Also see @subpage dev_doc for developer notes.

Visit the [VPP wiki](https://wiki.fd.io/view/VPP) for details on more
advanced building strategies and other development notes.


## Test Framework

There is PyDoc generated documentation available for the VPP test framework.
See @ref test_framework_doc for details.
__included_vat_h__ #define __included_vat_h__ #include <stdio.h> #include <setjmp.h> #include <vppinfra/clib.h> #include <vppinfra/format.h> #include <vppinfra/error.h> #include <vppinfra/time.h> #include <vppinfra/macros.h> #include <vnet/vnet.h> #include <vlib/vlib.h> #include <vlib/unix/unix.h> #include <vlibapi/api.h> #include <vlibmemory/api.h> #include "vat/json_format.h" #include <vlib/vlib.h> typedef struct { u8 *interface_name; u32 sw_if_index; /* * Subinterface ID. A number 0-N to uniquely identify this * subinterface under the super interface */ u32 sub_id; /* 0 = dot1q, 1=dot1ad */ u8 sub_dot1ad; /* Number of tags 0-2 */ u8 sub_number_of_tags; u16 sub_outer_vlan_id; u16 sub_inner_vlan_id; u8 sub_exact_match; u8 sub_default; u8 sub_outer_vlan_id_any; u8 sub_inner_vlan_id_any; /* vlan tag rewrite */ u32 vtr_op; u32 vtr_push_dot1q; u32 vtr_tag1; u32 vtr_tag2; } sw_interface_subif_t; typedef struct { u8 ip[16]; u8 prefix_length; } ip_address_details_t; typedef struct { u8 present; ip_address_details_t *addr; } ip_details_t; typedef struct { u64 packets; u64 bytes; } interface_counter_t; typedef struct { struct in_addr address; u8 address_length; u64 packets; u64 bytes; } ip4_fib_counter_t; typedef struct { struct in6_addr address; u8 address_length; u64 packets; u64 bytes; } ip6_fib_counter_t; typedef struct { /* vpe input queue */ unix_shared_memory_queue_t *vl_input_queue; /* interface name table */ uword *sw_if_index_by_interface_name; /* subinterface table */ sw_interface_subif_t *sw_if_subif_table; /* Graph node table */ uword *graph_node_index_by_name; vlib_node_t **graph_nodes; /* ip tables */ ip_details_t *ip_details_by_sw_if_index[2]; /* sw_if_index of currently processed interface */ u32 current_sw_if_index; /* remember that we are dumping ipv6 */ u8 is_ipv6; /* function table */ uword *function_by_name; /* help strings */ uword *help_by_name; /* macro table */ macro_main_t macro_main; /* Errors by number */ uword *error_string_by_error_number; /* Main thread can spin (w/ timeout) here if needed */ u32 async_mode; u32 async_errors; volatile u32 result_ready; volatile i32 retval; volatile u32 sw_if_index; volatile u8 *shmem_result; volatile u8 *cmd_reply; /* our client index */ u32 my_client_index; /* Time is of the essence... */ clib_time_t clib_time; /* Unwind (so we can quit) */ jmp_buf jump_buf; int jump_buf_set; volatile int do_exit; /* temporary parse buffer */ unformat_input_t *input; /* input buffer */ u8 *inbuf; /* stdio input / output FILEs */ FILE *ifp, *ofp; u8 *current_file; u32 input_line_number; /* exec mode toggle */ int exec_mode; /* Regenerate the interface table */ volatile int regenerate_interface_table; /* flag for JSON output format */ u8 json_output; /* flag for interface event display */ u8 interface_event_display; /* JSON tree used in composing dump api call results */ vat_json_node_t json_tree; /* counters */ u64 **simple_interface_counters; interface_counter_t **combined_interface_counters; ip4_fib_counter_t **ip4_fib_counters; u32 *ip4_fib_counters_vrf_id_by_index; ip6_fib_counter_t **ip6_fib_counters; u32 *ip6_fib_counters_vrf_id_by_index; /* Convenience */ vlib_main_t *vlib_main; } vat_main_t; vat_main_t vat_main; static inline f64 vat_time_now (vat_main_t * vam) { #if VPP_API_TEST_BUILTIN return vlib_time_now (vam->vlib_main); #else return clib_time_now (&vam->clib_time); #endif } #if VPP_API_TEST_BUILTIN #define errmsg(fmt,args...) \ do { \ vat_main_t *__vam = &vat_main; \ vlib_cli_output (__vam->vlib_main, fmt, ##args); \ } while(0); #else #define errmsg(fmt,args...) \ do { \ vat_main_t *__vam = &vat_main; \ if(__vam->ifp != stdin) \ fformat(__vam->ofp,"%s(%d): \n", __vam->current_file, \ __vam->input_line_number); \ fformat(__vam->ofp, fmt "\n", ##args); \ fflush(__vam->ofp); \ } while(0); #endif void vat_api_hookup (vat_main_t * vam); int api_sw_interface_dump (vat_main_t * vam); void do_one_file (vat_main_t * vam); int exec (vat_main_t * vam); /* Plugin API library functions */ char *vat_plugin_path; char *vat_plugin_name_filter; void vat_plugin_api_reference (void); uword unformat_sw_if_index (unformat_input_t * input, va_list * args); uword unformat_ip4_address (unformat_input_t * input, va_list * args); uword unformat_ethernet_address (unformat_input_t * input, va_list * args); uword unformat_ethernet_type_host_byte_order (unformat_input_t * input, va_list * args); uword unformat_ip6_address (unformat_input_t * input, va_list * args); u8 *format_ip4_address (u8 * s, va_list * args); u8 *format_ethernet_address (u8 * s, va_list * args); #if VPP_API_TEST_BUILTIN #define print api_cli_output void api_cli_output (void *, const char *fmt, ...); #else #define print fformat_append_cr void fformat_append_cr (FILE *, const char *fmt, ...); #endif #endif /* __included_vat_h__ */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */