diff options
author | Christian Ehrhardt <christian.ehrhardt@canonical.com> | 2016-12-08 14:07:29 +0100 |
---|---|---|
committer | Christian Ehrhardt <christian.ehrhardt@canonical.com> | 2016-12-08 14:10:05 +0100 |
commit | 6b3e017e5d25f15da73f7700f7f2ac553ef1a2e9 (patch) | |
tree | 1b1fb3f903b2282e261ade69e3c17952b3fd3464 /examples | |
parent | 32e04ea00cd159613e04acef75e52bfca6eeff2f (diff) |
Imported Upstream version 16.11
Change-Id: I1944c65ddc88a9ad70f8c0eb6731552b84fbcb77
Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'examples')
66 files changed, 3416 insertions, 2715 deletions
diff --git a/examples/Makefile b/examples/Makefile index 18b41b90..d49c7f29 100644 --- a/examples/Makefile +++ b/examples/Makefile @@ -61,7 +61,6 @@ ifneq ($(PQOS_INSTALL_PATH),) DIRS-y += l2fwd-cat endif DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += l2fwd-crypto -DIRS-$(CONFIG_RTE_LIBRTE_IVSHMEM) += l2fwd-ivshmem DIRS-$(CONFIG_RTE_LIBRTE_JOBSTATS) += l2fwd-jobstats DIRS-y += l2fwd-keepalive DIRS-y += l2fwd-keepalive/ka-agent diff --git a/examples/bond/main.c b/examples/bond/main.c index 776fad0a..6402c6b3 100644 --- a/examples/bond/main.c +++ b/examples/bond/main.c @@ -67,7 +67,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_log.h> #include <rte_mempool.h> #include <rte_mbuf.h> diff --git a/examples/dpdk_qat/crypto.c b/examples/dpdk_qat/crypto.c index 8954bf87..02032f30 100644 --- a/examples/dpdk_qat/crypto.c +++ b/examples/dpdk_qat/crypto.c @@ -53,7 +53,6 @@ #include <rte_lcore.h> #include <rte_atomic.h> #include <rte_branch_prediction.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_mbuf.h> #include <rte_string_fns.h> diff --git a/examples/dpdk_qat/main.c b/examples/dpdk_qat/main.c index 3c6112d7..aa9b1d5c 100644 --- a/examples/dpdk_qat/main.c +++ b/examples/dpdk_qat/main.c @@ -62,7 +62,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_mbuf.h> #include <rte_ip.h> diff --git a/examples/exception_path/main.c b/examples/exception_path/main.c index e5eedcc1..73d50b69 100644 --- a/examples/exception_path/main.c +++ b/examples/exception_path/main.c @@ -65,7 +65,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_log.h> #include <rte_mempool.h> #include <rte_mbuf.h> diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c index 2f452648..e1e32c66 100644 --- a/examples/ip_fragmentation/main.c +++ b/examples/ip_fragmentation/main.c @@ -64,7 +64,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_mbuf.h> #include <rte_lpm.h> diff --git a/examples/ip_pipeline/app.h b/examples/ip_pipeline/app.h index 6a6fdd97..f8b84e09 100644 --- a/examples/ip_pipeline/app.h +++ b/examples/ip_pipeline/app.h @@ -177,19 +177,29 @@ struct app_pktq_tm_params { uint32_t burst_write; }; +struct app_pktq_tap_params { + char *name; + uint32_t parsed; + uint32_t burst_read; + uint32_t burst_write; + uint32_t dropless; + uint64_t n_retries; + uint32_t mempool_id; /* Position in the app->mempool_params */ +}; + struct app_pktq_source_params { char *name; uint32_t parsed; uint32_t mempool_id; /* Position in the app->mempool_params array */ uint32_t burst; - char *file_name; /* Full path of PCAP file to be copied to mbufs */ + const char *file_name; /* Full path of PCAP file to be copied to mbufs */ uint32_t n_bytes_per_pkt; }; struct app_pktq_sink_params { char *name; uint8_t parsed; - char *file_name; /* Full path of PCAP file to be copied to mbufs */ + const char *file_name; /* Full path of PCAP file to be copied to mbufs */ uint32_t n_pkts_to_dump; }; @@ -204,6 +214,7 @@ enum app_pktq_in_type { APP_PKTQ_IN_HWQ, APP_PKTQ_IN_SWQ, APP_PKTQ_IN_TM, + APP_PKTQ_IN_TAP, APP_PKTQ_IN_KNI, APP_PKTQ_IN_SOURCE, }; @@ -217,6 +228,7 @@ enum app_pktq_out_type { APP_PKTQ_OUT_HWQ, APP_PKTQ_OUT_SWQ, APP_PKTQ_OUT_TM, + APP_PKTQ_OUT_TAP, APP_PKTQ_OUT_KNI, APP_PKTQ_OUT_SINK, }; @@ -441,6 +453,10 @@ struct app_eal_params { #define APP_MAX_PKTQ_TM APP_MAX_LINKS +#ifndef APP_MAX_PKTQ_TAP +#define APP_MAX_PKTQ_TAP APP_MAX_LINKS +#endif + #define APP_MAX_PKTQ_KNI APP_MAX_LINKS #ifndef APP_MAX_PKTQ_SOURCE @@ -494,6 +510,7 @@ struct app_params { struct app_pktq_hwq_out_params hwq_out_params[APP_MAX_HWQ_OUT]; struct app_pktq_swq_params swq_params[APP_MAX_PKTQ_SWQ]; struct app_pktq_tm_params tm_params[APP_MAX_PKTQ_TM]; + struct app_pktq_tap_params tap_params[APP_MAX_PKTQ_TAP]; struct app_pktq_kni_params kni_params[APP_MAX_PKTQ_KNI]; struct app_pktq_source_params source_params[APP_MAX_PKTQ_SOURCE]; struct app_pktq_sink_params sink_params[APP_MAX_PKTQ_SINK]; @@ -506,6 +523,7 @@ struct app_params { uint32_t n_pktq_hwq_out; uint32_t n_pktq_swq; uint32_t n_pktq_tm; + uint32_t n_pktq_tap; uint32_t n_pktq_kni; uint32_t n_pktq_source; uint32_t n_pktq_sink; @@ -520,6 +538,7 @@ struct app_params { struct app_link_data link_data[APP_MAX_LINKS]; struct rte_ring *swq[APP_MAX_PKTQ_SWQ]; struct rte_sched_port *tm[APP_MAX_PKTQ_TM]; + int tap[APP_MAX_PKTQ_TAP]; #ifdef RTE_LIBRTE_KNI struct rte_kni *kni[APP_MAX_PKTQ_KNI]; #endif /* RTE_LIBRTE_KNI */ @@ -786,6 +805,66 @@ app_tm_get_reader(struct app_params *app, } static inline uint32_t +app_tap_get_readers(struct app_params *app, struct app_pktq_tap_params *tap) +{ + uint32_t pos = tap - app->tap_params; + uint32_t n_pipelines = RTE_MIN(app->n_pipelines, + RTE_DIM(app->pipeline_params)); + uint32_t n_readers = 0, i; + + for (i = 0; i < n_pipelines; i++) { + struct app_pipeline_params *p = &app->pipeline_params[i]; + uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in)); + uint32_t j; + + for (j = 0; j < n_pktq_in; j++) { + struct app_pktq_in_params *pktq = &p->pktq_in[j]; + + if ((pktq->type == APP_PKTQ_IN_TAP) && + (pktq->id == pos)) + n_readers++; + } + } + + return n_readers; +} + +static inline struct app_pipeline_params * +app_tap_get_reader(struct app_params *app, + struct app_pktq_tap_params *tap, + uint32_t *pktq_in_id) +{ + struct app_pipeline_params *reader = NULL; + uint32_t pos = tap - app->tap_params; + uint32_t n_pipelines = RTE_MIN(app->n_pipelines, + RTE_DIM(app->pipeline_params)); + uint32_t n_readers = 0, id = 0, i; + + for (i = 0; i < n_pipelines; i++) { + struct app_pipeline_params *p = &app->pipeline_params[i]; + uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in)); + uint32_t j; + + for (j = 0; j < n_pktq_in; j++) { + struct app_pktq_in_params *pktq = &p->pktq_in[j]; + + if ((pktq->type == APP_PKTQ_IN_TAP) && + (pktq->id == pos)) { + n_readers++; + reader = p; + id = j; + } + } + } + + if (n_readers != 1) + return NULL; + + *pktq_in_id = id; + return reader; +} + +static inline uint32_t app_kni_get_readers(struct app_params *app, struct app_pktq_kni_params *kni) { uint32_t pos = kni - app->kni_params; @@ -1043,6 +1122,68 @@ app_tm_get_writer(struct app_params *app, } static inline uint32_t +app_tap_get_writers(struct app_params *app, struct app_pktq_tap_params *tap) +{ + uint32_t pos = tap - app->tap_params; + uint32_t n_pipelines = RTE_MIN(app->n_pipelines, + RTE_DIM(app->pipeline_params)); + uint32_t n_writers = 0, i; + + for (i = 0; i < n_pipelines; i++) { + struct app_pipeline_params *p = &app->pipeline_params[i]; + uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out, + RTE_DIM(p->pktq_out)); + uint32_t j; + + for (j = 0; j < n_pktq_out; j++) { + struct app_pktq_out_params *pktq = &p->pktq_out[j]; + + if ((pktq->type == APP_PKTQ_OUT_TAP) && + (pktq->id == pos)) + n_writers++; + } + } + + return n_writers; +} + +static inline struct app_pipeline_params * +app_tap_get_writer(struct app_params *app, + struct app_pktq_tap_params *tap, + uint32_t *pktq_out_id) +{ + struct app_pipeline_params *writer = NULL; + uint32_t pos = tap - app->tap_params; + uint32_t n_pipelines = RTE_MIN(app->n_pipelines, + RTE_DIM(app->pipeline_params)); + uint32_t n_writers = 0, id = 0, i; + + for (i = 0; i < n_pipelines; i++) { + struct app_pipeline_params *p = &app->pipeline_params[i]; + uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out, + RTE_DIM(p->pktq_out)); + uint32_t j; + + for (j = 0; j < n_pktq_out; j++) { + struct app_pktq_out_params *pktq = &p->pktq_out[j]; + + if ((pktq->type == APP_PKTQ_OUT_TAP) && + (pktq->id == pos)) { + n_writers++; + writer = p; + id = j; + } + } + } + + if (n_writers != 1) + return NULL; + + *pktq_out_id = id; + return writer; +} + +static inline uint32_t app_kni_get_writers(struct app_params *app, struct app_pktq_kni_params *kni) { uint32_t pos = kni - app->kni_params; diff --git a/examples/ip_pipeline/config/network_layers.cfg b/examples/ip_pipeline/config/network_layers.cfg index 8054d9fe..397b5d77 100644 --- a/examples/ip_pipeline/config/network_layers.cfg +++ b/examples/ip_pipeline/config/network_layers.cfg @@ -203,12 +203,16 @@ type = PASS-THROUGH ; Loop-back (UDP place-holder) core = 1 pktq_in = SWQ2 pktq_out = SWQ0 +swap = 282 286 ; IPSRC <-> IPDST +swap = 290 292 ; PORTSRC <-> PORTDST [PIPELINE5] type = PASS-THROUGH ; Loop-back (TCP place-holder) core = 1 pktq_in = SWQ3 pktq_out = SWQ1 +swap = 282 286 ; IPSRC <-> IPDST +swap = 290 292 ; PORTSRC <-> PORTDST [PIPELINE6] type = PASS-THROUGH ; Drop (ICMP place-holder) diff --git a/examples/ip_pipeline/config/network_layers.sh b/examples/ip_pipeline/config/network_layers.sh index 3b86bebd..449b0069 100644 --- a/examples/ip_pipeline/config/network_layers.sh +++ b/examples/ip_pipeline/config/network_layers.sh @@ -56,7 +56,7 @@ p 1 route add 100.3.0.0 16 port 3 ether 10.3.0.2 # Prio = 1 (High): [SA = ANY, DA = 10.1.0.1, SP = ANY, DP = 1001, PROTO = UDP] => Allow # Prio = 1 (High): [SA = ANY, DA = 10.2.0.1, SP = ANY, DP = 1002, PROTO = UDP] => Allow # Prio = 1 (High): [SA = ANY, DA = 10.3.0.1, SP = ANY, DP = 1003, PROTO = UDP] => Allow -p 1 firewall add default 1 #SINK0 +p 2 firewall add default 1 #SINK0 p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.0.0.1 32 0 65535 1000 1000 17 0xF port 0 p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.1.0.1 32 0 65535 1001 1001 17 0xF port 0 p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.2.0.1 32 0 65535 1002 1002 17 0xF port 0 @@ -72,8 +72,8 @@ p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.3.0.1 32 0 65535 1003 1003 17 0xF # TCP [SA = 100.2.0.10, DA = 10.2.0.1, SP = 1002, DP = 80] => socket ID = 2 # TCP [SA = 100.3.0.10, DA = 10.3.0.1, SP = 1003, DP = 80] => socket ID = 3 p 3 flow add default 1 #SINK1 -p 3 flow add ipv4 100.0.0.10 10.0.0.1 1000 80 6 port 1 id 0 -p 3 flow add ipv4 100.1.0.10 10.1.0.1 1001 80 6 port 1 id 1 -p 3 flow add ipv4 100.2.0.10 10.2.0.1 1002 80 6 port 1 id 2 -p 3 flow add ipv4 100.3.0.10 10.3.0.1 1003 80 6 port 1 id 3 +p 3 flow add ipv4 100.0.0.10 10.0.0.1 1000 80 6 port 0 id 0 +p 3 flow add ipv4 100.1.0.10 10.1.0.1 1001 80 6 port 0 id 1 +p 3 flow add ipv4 100.2.0.10 10.2.0.1 1002 80 6 port 0 id 2 +p 3 flow add ipv4 100.3.0.10 10.3.0.1 1003 80 6 port 0 id 3 #p 3 flow ls diff --git a/examples/ip_pipeline/config/tap.cfg b/examples/ip_pipeline/config/tap.cfg new file mode 100644 index 00000000..10d35ebb --- /dev/null +++ b/examples/ip_pipeline/config/tap.cfg @@ -0,0 +1,64 @@ +; BSD LICENSE +; +; Copyright(c) 2016 Intel Corporation. All rights reserved. +; All rights reserved. +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions +; are met: +; +; * Redistributions of source code must retain the above copyright +; notice, this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright +; notice, this list of conditions and the following disclaimer in +; the documentation and/or other materials provided with the +; distribution. +; * Neither the name of Intel Corporation nor the names of its +; contributors may be used to endorse or promote products derived +; from this software without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +; ______________ ______________________ +; | | TAP0 | | +; RXQ0.0 --->| |------->|--+ | +; | | TAP1 | | br0 | +; TXQ1.0 <---| |<-------|<-+ | +; | Pass-through | | Linux Kernel | +; | (P1) | | Network Stack | +; | | TAP1 | | +; RXQ1.0 --->| |------->|--+ | +; | | TAP0 | | br0 | +; TXQ0.0 <---| |<-------|<-+ | +; |______________| |______________________| +; +; Configure Linux kernel bridge between TAP0 and TAP1 interfaces: +; [Linux]$ ifconfig TAP0 up +; [Linux]$ ifconfig TAP1 up +; [Linux]$ brctl addbr "br0" +; [Linux]$ brctl addif br0 TAP0 +; [Linux]$ brctl addif br0 TAP1 +; [Linux]$ ifconfig br0 up + +[EAL] +log_level = 0 + +[PIPELINE0] +type = MASTER +core = 0 + +[PIPELINE1] +type = PASS-THROUGH +core = 1 +pktq_in = RXQ0.0 TAP1 RXQ1.0 TAP0 +pktq_out = TAP0 TXQ1.0 TAP1 TXQ0.0 diff --git a/examples/ip_pipeline/config_check.c b/examples/ip_pipeline/config_check.c index af1b6284..dd9d4d8b 100644 --- a/examples/ip_pipeline/config_check.c +++ b/examples/ip_pipeline/config_check.c @@ -316,6 +316,36 @@ check_tms(struct app_params *app) } static void +check_taps(struct app_params *app) +{ + uint32_t i; + + for (i = 0; i < app->n_pktq_tap; i++) { + struct app_pktq_tap_params *p = &app->tap_params[i]; + uint32_t n_readers = app_tap_get_readers(app, p); + uint32_t n_writers = app_tap_get_writers(app, p); + + APP_CHECK((n_readers != 0), + "%s has no reader\n", p->name); + + APP_CHECK((n_readers == 1), + "%s has more than one reader\n", p->name); + + APP_CHECK((n_writers != 0), + "%s has no writer\n", p->name); + + APP_CHECK((n_writers == 1), + "%s has more than one writer\n", p->name); + + APP_CHECK((p->burst_read > 0), + "%s read burst size is 0\n", p->name); + + APP_CHECK((p->burst_write > 0), + "%s write burst size is 0\n", p->name); + } +} + +static void check_knis(struct app_params *app) { uint32_t i; @@ -476,6 +506,7 @@ app_config_check(struct app_params *app) check_txqs(app); check_swqs(app); check_tms(app); + check_taps(app); check_knis(app); check_sources(app); check_sinks(app); diff --git a/examples/ip_pipeline/config_parse.c b/examples/ip_pipeline/config_parse.c index 8fe81577..8b372e94 100644 --- a/examples/ip_pipeline/config_parse.c +++ b/examples/ip_pipeline/config_parse.c @@ -189,6 +189,15 @@ struct app_pktq_tm_params default_tm_params = { .burst_write = 32, }; +struct app_pktq_tap_params default_tap_params = { + .parsed = 0, + .burst_read = 32, + .burst_write = 32, + .dropless = 0, + .n_retries = 0, + .mempool_id = 0, +}; + struct app_pktq_kni_params default_kni_params = { .parsed = 0, .socket_id = 0, @@ -207,7 +216,7 @@ struct app_pktq_source_params default_source_params = { .parsed = 0, .mempool_id = 0, .burst = 32, - .file_name = NULL, + .file_name = "./config/packets.pcap", .n_bytes_per_pkt = 0, }; @@ -852,6 +861,9 @@ parse_pipeline_pktq_in(struct app_params *app, type = APP_PKTQ_IN_TM; id = APP_PARAM_ADD(app->tm_params, name); APP_PARAM_ADD_LINK_FOR_TM(app, name); + } else if (validate_name(name, "TAP", 1) == 0) { + type = APP_PKTQ_IN_TAP; + id = APP_PARAM_ADD(app->tap_params, name); } else if (validate_name(name, "KNI", 1) == 0) { type = APP_PKTQ_IN_KNI; id = APP_PARAM_ADD(app->kni_params, name); @@ -901,6 +913,9 @@ parse_pipeline_pktq_out(struct app_params *app, type = APP_PKTQ_OUT_TM; id = APP_PARAM_ADD(app->tm_params, name); APP_PARAM_ADD_LINK_FOR_TM(app, name); + } else if (validate_name(name, "TAP", 1) == 0) { + type = APP_PKTQ_OUT_TAP; + id = APP_PARAM_ADD(app->tap_params, name); } else if (validate_name(name, "KNI", 1) == 0) { type = APP_PKTQ_OUT_KNI; id = APP_PARAM_ADD(app->kni_params, name); @@ -1896,6 +1911,88 @@ parse_tm(struct app_params *app, } static void +parse_tap(struct app_params *app, + const char *section_name, + struct rte_cfgfile *cfg) +{ + struct app_pktq_tap_params *param; + struct rte_cfgfile_entry *entries; + int n_entries, i; + ssize_t param_idx; + + n_entries = rte_cfgfile_section_num_entries(cfg, section_name); + PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name); + + entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry)); + PARSE_ERROR_MALLOC(entries != NULL); + + rte_cfgfile_section_entries(cfg, section_name, entries, n_entries); + + param_idx = APP_PARAM_ADD(app->tap_params, section_name); + param = &app->tap_params[param_idx]; + PARSE_CHECK_DUPLICATE_SECTION(param); + + for (i = 0; i < n_entries; i++) { + struct rte_cfgfile_entry *ent = &entries[i]; + + if (strcmp(ent->name, "burst_read") == 0) { + int status = parser_read_uint32( + ¶m->burst_read, ent->value); + + PARSE_ERROR((status == 0), section_name, + ent->name); + continue; + } + + if (strcmp(ent->name, "burst_write") == 0) { + int status = parser_read_uint32( + ¶m->burst_write, ent->value); + + PARSE_ERROR((status == 0), section_name, + ent->name); + continue; + } + + if (strcmp(ent->name, "dropless") == 0) { + int status = parser_read_arg_bool(ent->value); + + PARSE_ERROR((status != -EINVAL), section_name, + ent->name); + param->dropless = status; + continue; + } + + if (strcmp(ent->name, "n_retries") == 0) { + int status = parser_read_uint64(¶m->n_retries, + ent->value); + + PARSE_ERROR((status == 0), section_name, + ent->name); + continue; + } + + if (strcmp(ent->name, "mempool") == 0) { + int status = validate_name(ent->value, + "MEMPOOL", 1); + ssize_t idx; + + PARSE_ERROR((status == 0), section_name, + ent->name); + + idx = APP_PARAM_ADD(app->mempool_params, ent->value); + param->mempool_id = idx; + + continue; + } + + /* unrecognized */ + PARSE_ERROR_INVALID(0, section_name, ent->name); + } + + free(entries); +} + +static void parse_kni(struct app_params *app, const char *section_name, struct rte_cfgfile *cfg) @@ -2286,6 +2383,7 @@ static const struct config_section cfg_file_scheme[] = { {"TXQ", 2, parse_txq}, {"SWQ", 1, parse_swq}, {"TM", 1, parse_tm}, + {"TAP", 1, parse_tap}, {"KNI", 1, parse_kni}, {"SOURCE", 1, parse_source}, {"SINK", 1, parse_sink}, @@ -2425,6 +2523,7 @@ app_config_parse(struct app_params *app, const char *file_name) APP_PARAM_COUNT(app->hwq_out_params, app->n_pktq_hwq_out); APP_PARAM_COUNT(app->swq_params, app->n_pktq_swq); APP_PARAM_COUNT(app->tm_params, app->n_pktq_tm); + APP_PARAM_COUNT(app->tap_params, app->n_pktq_tap); APP_PARAM_COUNT(app->kni_params, app->n_pktq_kni); APP_PARAM_COUNT(app->source_params, app->n_pktq_source); APP_PARAM_COUNT(app->sink_params, app->n_pktq_sink); @@ -2789,6 +2888,30 @@ save_tm_params(struct app_params *app, FILE *f) } static void +save_tap_params(struct app_params *app, FILE *f) +{ + struct app_pktq_tap_params *p; + size_t i, count; + + count = RTE_DIM(app->tap_params); + for (i = 0; i < count; i++) { + p = &app->tap_params[i]; + if (!APP_PARAM_VALID(p)) + continue; + + fprintf(f, "[%s]\n", p->name); + fprintf(f, "%s = %" PRIu32 "\n", "burst_read", p->burst_read); + fprintf(f, "%s = %" PRIu32 "\n", "burst_write", p->burst_write); + fprintf(f, "%s = %s\n", "dropless", p->dropless ? "yes" : "no"); + fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries); + fprintf(f, "%s = %s\n", "mempool", + app->mempool_params[p->mempool_id].name); + + fputc('\n', f); + } +} + +static void save_kni_params(struct app_params *app, FILE *f) { struct app_pktq_kni_params *p; @@ -2942,6 +3065,9 @@ save_pipeline_params(struct app_params *app, FILE *f) case APP_PKTQ_IN_TM: name = app->tm_params[pp->id].name; break; + case APP_PKTQ_IN_TAP: + name = app->tap_params[pp->id].name; + break; case APP_PKTQ_IN_KNI: name = app->kni_params[pp->id].name; break; @@ -2979,6 +3105,9 @@ save_pipeline_params(struct app_params *app, FILE *f) case APP_PKTQ_OUT_TM: name = app->tm_params[pp->id].name; break; + case APP_PKTQ_OUT_TAP: + name = app->tap_params[pp->id].name; + break; case APP_PKTQ_OUT_KNI: name = app->kni_params[pp->id].name; break; @@ -3067,6 +3196,7 @@ app_config_save(struct app_params *app, const char *file_name) save_txq_params(app, file); save_swq_params(app, file); save_tm_params(app, file); + save_tap_params(app, file); save_kni_params(app, file); save_source_params(app, file); save_sink_params(app, file); @@ -3083,10 +3213,6 @@ app_config_init(struct app_params *app) memcpy(app, &app_params_default, sizeof(struct app_params)); - /* configure default_source_params */ - default_source_params.file_name = strdup("./config/packets.pcap"); - PARSE_ERROR_MALLOC(default_source_params.file_name != NULL); - for (i = 0; i < RTE_DIM(app->mempool_params); i++) memcpy(&app->mempool_params[i], &mempool_params_default, @@ -3117,6 +3243,11 @@ app_config_init(struct app_params *app) &default_tm_params, sizeof(default_tm_params)); + for (i = 0; i < RTE_DIM(app->tap_params); i++) + memcpy(&app->tap_params[i], + &default_tap_params, + sizeof(default_tap_params)); + for (i = 0; i < RTE_DIM(app->kni_params); i++) memcpy(&app->kni_params[i], &default_kni_params, diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c index 891b3274..3b36b53a 100644 --- a/examples/ip_pipeline/init.c +++ b/examples/ip_pipeline/init.c @@ -34,6 +34,14 @@ #include <inttypes.h> #include <stdio.h> #include <string.h> +#include <netinet/in.h> +#ifdef RTE_EXEC_ENV_LINUXAPP +#include <linux/if.h> +#include <linux/if_tun.h> +#endif +#include <fcntl.h> +#include <sys/ioctl.h> +#include <unistd.h> #include <rte_cycles.h> #include <rte_ethdev.h> @@ -606,29 +614,12 @@ app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp) } } -static int -app_link_is_virtual(struct app_link_params *p) -{ - uint32_t pmd_id = p->pmd_id; - struct rte_eth_dev *dev = &rte_eth_devices[pmd_id]; - - if (dev->dev_type == RTE_ETH_DEV_VIRTUAL) - return 1; - - return 0; -} - void app_link_up_internal(struct app_params *app, struct app_link_params *cp) { uint32_t i; int status; - if (app_link_is_virtual(cp)) { - cp->state = 1; - return; - } - /* For each link, add filters for IP of current link */ if (cp->ip != 0) { for (i = 0; i < app->n_links; i++) { @@ -736,11 +727,6 @@ app_link_down_internal(struct app_params *app, struct app_link_params *cp) uint32_t i; int status; - if (app_link_is_virtual(cp)) { - cp->state = 0; - return; - } - /* PMD link down */ status = rte_eth_dev_set_link_down(cp->pmd_id); if (status < 0) @@ -1176,6 +1162,44 @@ app_init_tm(struct app_params *app) } } +#ifndef RTE_EXEC_ENV_LINUXAPP +static void +app_init_tap(struct app_params *app) { + if (app->n_pktq_tap == 0) + return; + + rte_panic("TAP device not supported.\n"); +} +#else +static void +app_init_tap(struct app_params *app) +{ + uint32_t i; + + for (i = 0; i < app->n_pktq_tap; i++) { + struct app_pktq_tap_params *p_tap = &app->tap_params[i]; + struct ifreq ifr; + int fd, status; + + APP_LOG(app, HIGH, "Initializing %s ...", p_tap->name); + + fd = open("/dev/net/tun", O_RDWR | O_NONBLOCK); + if (fd < 0) + rte_panic("Cannot open file /dev/net/tun\n"); + + memset(&ifr, 0, sizeof(ifr)); + ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */ + snprintf(ifr.ifr_name, IFNAMSIZ, "%s", p_tap->name); + + status = ioctl(fd, TUNSETIFF, (void *) &ifr); + if (status < 0) + rte_panic("TAP setup error\n"); + + app->tap[i] = fd; + } +} +#endif + #ifdef RTE_LIBRTE_KNI static int kni_config_network_interface(uint8_t port_id, uint8_t if_up) { @@ -1392,6 +1416,24 @@ void app_pipeline_params_get(struct app_params *app, out->burst_size = app->tm_params[in->id].burst_read; break; } +#ifdef RTE_EXEC_ENV_LINUXAPP + case APP_PKTQ_IN_TAP: + { + struct app_pktq_tap_params *tap_params = + &app->tap_params[in->id]; + struct app_mempool_params *mempool_params = + &app->mempool_params[tap_params->mempool_id]; + struct rte_mempool *mempool = + app->mempool[tap_params->mempool_id]; + + out->type = PIPELINE_PORT_IN_FD_READER; + out->params.fd.fd = app->tap[in->id]; + out->params.fd.mtu = mempool_params->buffer_size; + out->params.fd.mempool = mempool; + out->burst_size = app->tap_params[in->id].burst_read; + break; + } +#endif #ifdef RTE_LIBRTE_KNI case APP_PKTQ_IN_KNI: { @@ -1536,6 +1578,19 @@ void app_pipeline_params_get(struct app_params *app, app->tm_params[in->id].burst_write; break; } +#ifdef RTE_EXEC_ENV_LINUXAPP + case APP_PKTQ_OUT_TAP: + { + struct rte_port_fd_writer_params *params = + &out->params.fd; + + out->type = PIPELINE_PORT_OUT_FD_WRITER; + params->fd = app->tap[in->id]; + params->tx_burst_sz = + app->tap_params[in->id].burst_write; + break; + } +#endif #ifdef RTE_LIBRTE_KNI case APP_PKTQ_OUT_KNI: { @@ -1752,6 +1807,7 @@ int app_init(struct app_params *app) app_init_link(app); app_init_swq(app); app_init_tm(app); + app_init_tap(app); app_init_kni(app); app_init_msgq(app); diff --git a/examples/ip_pipeline/pipeline/pipeline_common_be.c b/examples/ip_pipeline/pipeline/pipeline_common_be.c index 50dcb694..347e72b5 100644 --- a/examples/ip_pipeline/pipeline/pipeline_common_be.c +++ b/examples/ip_pipeline/pipeline/pipeline_common_be.c @@ -32,7 +32,6 @@ */ #include <rte_common.h> -#include <rte_ring.h> #include <rte_malloc.h> #include "pipeline_common_be.h" diff --git a/examples/ip_pipeline/pipeline/pipeline_common_fe.c b/examples/ip_pipeline/pipeline/pipeline_common_fe.c index cd1d082a..75211878 100644 --- a/examples/ip_pipeline/pipeline/pipeline_common_fe.c +++ b/examples/ip_pipeline/pipeline/pipeline_common_fe.c @@ -36,7 +36,6 @@ #include <unistd.h> #include <rte_common.h> -#include <rte_ring.h> #include <rte_malloc.h> #include <cmdline_rdline.h> #include <cmdline_parse.h> @@ -157,6 +156,7 @@ app_pipeline_track_pktq_out_to_link(struct app_params *app, break; } + case APP_PKTQ_OUT_TAP: case APP_PKTQ_OUT_SINK: default: return NULL; diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough.c b/examples/ip_pipeline/pipeline/pipeline_passthrough.c index 63ce1472..2c9eb2e3 100644 --- a/examples/ip_pipeline/pipeline/pipeline_passthrough.c +++ b/examples/ip_pipeline/pipeline/pipeline_passthrough.c @@ -52,7 +52,7 @@ app_pipeline_passthrough_track(struct pipeline_params *p, if (status) return -1; - if (pp.lb_hash_enabled) + if (pp.dma_hash_lb_enabled) return -1; *port_out = port_in / (p->n_ports_in / p->n_ports_out); diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c index 356f02d0..8b71a7d4 100644 --- a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c +++ b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c @@ -31,6 +31,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include <stdio.h> #include <string.h> #include <rte_common.h> @@ -45,10 +46,17 @@ #include "parser.h" #include "hash_func.h" +#define SWAP_DIM (PIPELINE_PASSTHROUGH_SWAP_N_FIELDS_MAX * \ + (PIPELINE_PASSTHROUGH_SWAP_FIELD_SIZE_MAX / sizeof(uint64_t))) + struct pipeline_passthrough { struct pipeline p; struct pipeline_passthrough_params params; rte_table_hash_op_hash f_hash; + uint32_t swap_field0_offset[SWAP_DIM]; + uint32_t swap_field1_offset[SWAP_DIM]; + uint64_t swap_field_mask[SWAP_DIM]; + uint32_t swap_n_fields; } __rte_cache_aligned; static pipeline_msg_req_handler handlers[] = { @@ -69,7 +77,7 @@ static pipeline_msg_req_handler handlers[] = { }; static inline __attribute__((always_inline)) void -pkt_work( +pkt_work_dma( struct rte_mbuf *pkt, void *arg, uint32_t dma_size, @@ -114,7 +122,7 @@ pkt_work( } static inline __attribute__((always_inline)) void -pkt4_work( +pkt4_work_dma( struct rte_mbuf **pkts, void *arg, uint32_t dma_size, @@ -209,148 +217,231 @@ pkt4_work( } } -#define PKT_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \ +static inline __attribute__((always_inline)) void +pkt_work_swap( + struct rte_mbuf *pkt, + void *arg) +{ + struct pipeline_passthrough *p = arg; + uint32_t i; + + /* Read(field0, field1), compute(field0, field1), write(field0, field1) */ + for (i = 0; i < p->swap_n_fields; i++) { + uint64_t *field0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt, + p->swap_field0_offset[i]); + uint64_t *field1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt, + p->swap_field1_offset[i]); + uint64_t mask = p->swap_field_mask[i]; + + uint64_t field0 = *field0_ptr; + uint64_t field1 = *field1_ptr; + + *field0_ptr = (field0 & (~mask)) + (field1 & mask); + *field1_ptr = (field0 & mask) + (field1 & (~mask)); + } +} + +static inline __attribute__((always_inline)) void +pkt4_work_swap( + struct rte_mbuf **pkts, + void *arg) +{ + struct pipeline_passthrough *p = arg; + uint32_t i; + + /* Read(field0, field1), compute(field0, field1), write(field0, field1) */ + for (i = 0; i < p->swap_n_fields; i++) { + uint64_t *pkt0_field0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[0], + p->swap_field0_offset[i]); + uint64_t *pkt1_field0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[1], + p->swap_field0_offset[i]); + uint64_t *pkt2_field0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[2], + p->swap_field0_offset[i]); + uint64_t *pkt3_field0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[3], + p->swap_field0_offset[i]); + + uint64_t *pkt0_field1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[0], + p->swap_field1_offset[i]); + uint64_t *pkt1_field1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[1], + p->swap_field1_offset[i]); + uint64_t *pkt2_field1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[2], + p->swap_field1_offset[i]); + uint64_t *pkt3_field1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[3], + p->swap_field1_offset[i]); + + uint64_t mask = p->swap_field_mask[i]; + + uint64_t pkt0_field0 = *pkt0_field0_ptr; + uint64_t pkt1_field0 = *pkt1_field0_ptr; + uint64_t pkt2_field0 = *pkt2_field0_ptr; + uint64_t pkt3_field0 = *pkt3_field0_ptr; + + uint64_t pkt0_field1 = *pkt0_field1_ptr; + uint64_t pkt1_field1 = *pkt1_field1_ptr; + uint64_t pkt2_field1 = *pkt2_field1_ptr; + uint64_t pkt3_field1 = *pkt3_field1_ptr; + + *pkt0_field0_ptr = (pkt0_field0 & (~mask)) + (pkt0_field1 & mask); + *pkt1_field0_ptr = (pkt1_field0 & (~mask)) + (pkt1_field1 & mask); + *pkt2_field0_ptr = (pkt2_field0 & (~mask)) + (pkt2_field1 & mask); + *pkt3_field0_ptr = (pkt3_field0 & (~mask)) + (pkt3_field1 & mask); + + *pkt0_field1_ptr = (pkt0_field0 & mask) + (pkt0_field1 & (~mask)); + *pkt1_field1_ptr = (pkt1_field0 & mask) + (pkt1_field1 & (~mask)); + *pkt2_field1_ptr = (pkt2_field0 & mask) + (pkt2_field1 & (~mask)); + *pkt3_field1_ptr = (pkt3_field0 & mask) + (pkt3_field1 & (~mask)); + } +} + +#define PKT_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \ static inline void \ -pkt_work_size##dma_size##_hash##hash_enabled \ +pkt_work_dma_size##dma_size##_hash##hash_enabled \ ##_lb##lb_hash##_pw##port_pow2( \ struct rte_mbuf *pkt, \ void *arg) \ { \ - pkt_work(pkt, arg, dma_size, hash_enabled, lb_hash, port_pow2); \ + pkt_work_dma(pkt, arg, dma_size, hash_enabled, lb_hash, port_pow2); \ } -#define PKT4_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \ +#define PKT4_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \ static inline void \ -pkt4_work_size##dma_size##_hash##hash_enabled \ +pkt4_work_dma_size##dma_size##_hash##hash_enabled \ ##_lb##lb_hash##_pw##port_pow2( \ struct rte_mbuf **pkts, \ void *arg) \ { \ - pkt4_work(pkts, arg, dma_size, hash_enabled, lb_hash, port_pow2); \ + pkt4_work_dma(pkts, arg, dma_size, hash_enabled, lb_hash, port_pow2); \ } -#define port_in_ah(dma_size, hash_enabled, lb_hash, port_pow2) \ -PKT_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \ -PKT4_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \ -PIPELINE_PORT_IN_AH(port_in_ah_size##dma_size##_hash \ +#define port_in_ah_dma(dma_size, hash_enabled, lb_hash, port_pow2) \ +PKT_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \ +PKT4_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \ +PIPELINE_PORT_IN_AH(port_in_ah_dma_size##dma_size##_hash \ ##hash_enabled##_lb##lb_hash##_pw##port_pow2, \ - pkt_work_size##dma_size##_hash##hash_enabled \ + pkt_work_dma_size##dma_size##_hash##hash_enabled \ ##_lb##lb_hash##_pw##port_pow2, \ - pkt4_work_size##dma_size##_hash##hash_enabled \ + pkt4_work_dma_size##dma_size##_hash##hash_enabled \ ##_lb##lb_hash##_pw##port_pow2) #define port_in_ah_lb(dma_size, hash_enabled, lb_hash, port_pow2) \ -PKT_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \ -PKT4_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \ +PKT_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \ +PKT4_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \ PIPELINE_PORT_IN_AH_HIJACK_ALL( \ - port_in_ah_size##dma_size##_hash##hash_enabled \ + port_in_ah_lb_size##dma_size##_hash##hash_enabled \ ##_lb##lb_hash##_pw##port_pow2, \ - pkt_work_size##dma_size##_hash##hash_enabled \ + pkt_work_dma_size##dma_size##_hash##hash_enabled \ ##_lb##lb_hash##_pw##port_pow2, \ - pkt4_work_size##dma_size##_hash##hash_enabled \ + pkt4_work_dma_size##dma_size##_hash##hash_enabled \ ##_lb##lb_hash##_pw##port_pow2) -/* Port in AH (dma_size, hash_enabled, lb_hash, port_pow2) */ +PIPELINE_PORT_IN_AH(port_in_ah_swap, pkt_work_swap, pkt4_work_swap) + + +/* Port in AH DMA(dma_size, hash_enabled, lb_hash, port_pow2) */ -port_in_ah(8, 0, 0, 0) -port_in_ah(8, 1, 0, 0) +port_in_ah_dma(8, 0, 0, 0) +port_in_ah_dma(8, 1, 0, 0) port_in_ah_lb(8, 1, 1, 0) port_in_ah_lb(8, 1, 1, 1) -port_in_ah(16, 0, 0, 0) -port_in_ah(16, 1, 0, 0) +port_in_ah_dma(16, 0, 0, 0) +port_in_ah_dma(16, 1, 0, 0) port_in_ah_lb(16, 1, 1, 0) port_in_ah_lb(16, 1, 1, 1) -port_in_ah(24, 0, 0, 0) -port_in_ah(24, 1, 0, 0) +port_in_ah_dma(24, 0, 0, 0) +port_in_ah_dma(24, 1, 0, 0) port_in_ah_lb(24, 1, 1, 0) port_in_ah_lb(24, 1, 1, 1) -port_in_ah(32, 0, 0, 0) -port_in_ah(32, 1, 0, 0) +port_in_ah_dma(32, 0, 0, 0) +port_in_ah_dma(32, 1, 0, 0) port_in_ah_lb(32, 1, 1, 0) port_in_ah_lb(32, 1, 1, 1) -port_in_ah(40, 0, 0, 0) -port_in_ah(40, 1, 0, 0) +port_in_ah_dma(40, 0, 0, 0) +port_in_ah_dma(40, 1, 0, 0) port_in_ah_lb(40, 1, 1, 0) port_in_ah_lb(40, 1, 1, 1) -port_in_ah(48, 0, 0, 0) -port_in_ah(48, 1, 0, 0) +port_in_ah_dma(48, 0, 0, 0) +port_in_ah_dma(48, 1, 0, 0) port_in_ah_lb(48, 1, 1, 0) port_in_ah_lb(48, 1, 1, 1) -port_in_ah(56, 0, 0, 0) -port_in_ah(56, 1, 0, 0) +port_in_ah_dma(56, 0, 0, 0) +port_in_ah_dma(56, 1, 0, 0) port_in_ah_lb(56, 1, 1, 0) port_in_ah_lb(56, 1, 1, 1) -port_in_ah(64, 0, 0, 0) -port_in_ah(64, 1, 0, 0) +port_in_ah_dma(64, 0, 0, 0) +port_in_ah_dma(64, 1, 0, 0) port_in_ah_lb(64, 1, 1, 0) port_in_ah_lb(64, 1, 1, 1) static rte_pipeline_port_in_action_handler get_port_in_ah(struct pipeline_passthrough *p) { - if (p->params.dma_enabled == 0) + if ((p->params.dma_enabled == 0) && + (p->params.swap_enabled == 0)) return NULL; + if (p->params.swap_enabled) + return port_in_ah_swap; + if (p->params.dma_hash_enabled) { - if (p->params.lb_hash_enabled) { + if (p->params.dma_hash_lb_enabled) { if (rte_is_power_of_2(p->p.n_ports_out)) switch (p->params.dma_size) { - case 8: return port_in_ah_size8_hash1_lb1_pw1; - case 16: return port_in_ah_size16_hash1_lb1_pw1; - case 24: return port_in_ah_size24_hash1_lb1_pw1; - case 32: return port_in_ah_size32_hash1_lb1_pw1; - case 40: return port_in_ah_size40_hash1_lb1_pw1; - case 48: return port_in_ah_size48_hash1_lb1_pw1; - case 56: return port_in_ah_size56_hash1_lb1_pw1; - case 64: return port_in_ah_size64_hash1_lb1_pw1; + case 8: return port_in_ah_lb_size8_hash1_lb1_pw1; + case 16: return port_in_ah_lb_size16_hash1_lb1_pw1; + case 24: return port_in_ah_lb_size24_hash1_lb1_pw1; + case 32: return port_in_ah_lb_size32_hash1_lb1_pw1; + case 40: return port_in_ah_lb_size40_hash1_lb1_pw1; + case 48: return port_in_ah_lb_size48_hash1_lb1_pw1; + case 56: return port_in_ah_lb_size56_hash1_lb1_pw1; + case 64: return port_in_ah_lb_size64_hash1_lb1_pw1; default: return NULL; } else switch (p->params.dma_size) { - case 8: return port_in_ah_size8_hash1_lb1_pw0; - case 16: return port_in_ah_size16_hash1_lb1_pw0; - case 24: return port_in_ah_size24_hash1_lb1_pw0; - case 32: return port_in_ah_size32_hash1_lb1_pw0; - case 40: return port_in_ah_size40_hash1_lb1_pw0; - case 48: return port_in_ah_size48_hash1_lb1_pw0; - case 56: return port_in_ah_size56_hash1_lb1_pw0; - case 64: return port_in_ah_size64_hash1_lb1_pw0; + case 8: return port_in_ah_lb_size8_hash1_lb1_pw0; + case 16: return port_in_ah_lb_size16_hash1_lb1_pw0; + case 24: return port_in_ah_lb_size24_hash1_lb1_pw0; + case 32: return port_in_ah_lb_size32_hash1_lb1_pw0; + case 40: return port_in_ah_lb_size40_hash1_lb1_pw0; + case 48: return port_in_ah_lb_size48_hash1_lb1_pw0; + case 56: return port_in_ah_lb_size56_hash1_lb1_pw0; + case 64: return port_in_ah_lb_size64_hash1_lb1_pw0; default: return NULL; } } else switch (p->params.dma_size) { - case 8: return port_in_ah_size8_hash1_lb0_pw0; - case 16: return port_in_ah_size16_hash1_lb0_pw0; - case 24: return port_in_ah_size24_hash1_lb0_pw0; - case 32: return port_in_ah_size32_hash1_lb0_pw0; - case 40: return port_in_ah_size40_hash1_lb0_pw0; - case 48: return port_in_ah_size48_hash1_lb0_pw0; - case 56: return port_in_ah_size56_hash1_lb0_pw0; - case 64: return port_in_ah_size64_hash1_lb0_pw0; + case 8: return port_in_ah_dma_size8_hash1_lb0_pw0; + case 16: return port_in_ah_dma_size16_hash1_lb0_pw0; + case 24: return port_in_ah_dma_size24_hash1_lb0_pw0; + case 32: return port_in_ah_dma_size32_hash1_lb0_pw0; + case 40: return port_in_ah_dma_size40_hash1_lb0_pw0; + case 48: return port_in_ah_dma_size48_hash1_lb0_pw0; + case 56: return port_in_ah_dma_size56_hash1_lb0_pw0; + case 64: return port_in_ah_dma_size64_hash1_lb0_pw0; default: return NULL; } } else switch (p->params.dma_size) { - case 8: return port_in_ah_size8_hash0_lb0_pw0; - case 16: return port_in_ah_size16_hash0_lb0_pw0; - case 24: return port_in_ah_size24_hash0_lb0_pw0; - case 32: return port_in_ah_size32_hash0_lb0_pw0; - case 40: return port_in_ah_size40_hash0_lb0_pw0; - case 48: return port_in_ah_size48_hash0_lb0_pw0; - case 56: return port_in_ah_size56_hash0_lb0_pw0; - case 64: return port_in_ah_size64_hash0_lb0_pw0; + case 8: return port_in_ah_dma_size8_hash0_lb0_pw0; + case 16: return port_in_ah_dma_size16_hash0_lb0_pw0; + case 24: return port_in_ah_dma_size24_hash0_lb0_pw0; + case 32: return port_in_ah_dma_size32_hash0_lb0_pw0; + case 40: return port_in_ah_dma_size40_hash0_lb0_pw0; + case 48: return port_in_ah_dma_size48_hash0_lb0_pw0; + case 56: return port_in_ah_dma_size56_hash0_lb0_pw0; + case 64: return port_in_ah_dma_size64_hash0_lb0_pw0; default: return NULL; } } @@ -362,17 +453,19 @@ pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p, uint32_t dma_dst_offset_present = 0; uint32_t dma_src_offset_present = 0; uint32_t dma_src_mask_present = 0; + char dma_mask_str[PIPELINE_PASSTHROUGH_DMA_SIZE_MAX * 2 + 1]; uint32_t dma_size_present = 0; uint32_t dma_hash_offset_present = 0; - uint32_t lb_present = 0; + uint32_t dma_hash_lb_present = 0; uint32_t i; - char dma_mask_str[PIPELINE_PASSTHROUGH_DMA_SIZE_MAX * 2 + 1]; /* default values */ p->dma_enabled = 0; p->dma_hash_enabled = 0; - p->lb_hash_enabled = 0; + p->dma_hash_lb_enabled = 0; memset(p->dma_src_mask, 0xFF, sizeof(p->dma_src_mask)); + p->swap_enabled = 0; + p->swap_n_fields = 0; for (i = 0; i < params->n_args; i++) { char *arg_name = params->args_name[i]; @@ -485,7 +578,6 @@ pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p, params->name, arg_name, arg_value); p->dma_hash_enabled = 1; - p->dma_enabled = 1; continue; } @@ -493,19 +585,39 @@ pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p, /* load_balance mode */ if (strcmp(arg_name, "lb") == 0) { PIPELINE_PARSE_ERR_DUPLICATE( - lb_present == 0, + dma_hash_lb_present == 0, params->name, arg_name); - lb_present = 1; + dma_hash_lb_present = 1; + + if (strcmp(arg_value, "hash") || + strcmp(arg_value, "HASH")) - if ((strcmp(arg_value, "hash") == 0) || - (strcmp(arg_value, "HASH") == 0)) - p->lb_hash_enabled = 1; - else PIPELINE_PARSE_ERR_INV_VAL(0, params->name, arg_name, arg_value); + p->dma_hash_lb_enabled = 1; + + continue; + } + + /* swap */ + if (strcmp(arg_name, "swap") == 0) { + uint32_t a, b, n_args; + int len; + + n_args = sscanf(arg_value, "%" SCNu32 " %" SCNu32 "%n", + &a, &b, &len); + PIPELINE_PARSE_ERR_INV_VAL(((n_args == 2) && + ((size_t) len == strlen(arg_value))), + params->name, arg_name, arg_value); + + p->swap_field0_offset[p->swap_n_fields] = a; + p->swap_field1_offset[p->swap_n_fields] = b; + p->swap_n_fields++; + p->swap_enabled = 1; + continue; } @@ -514,6 +626,9 @@ pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p, } /* Check correlations between arguments */ + PIPELINE_ARG_CHECK((p->dma_enabled + p->swap_enabled < 2), + "Parse error in section \"%s\": DMA and SWAP actions are both enabled", + params->name); PIPELINE_ARG_CHECK((dma_dst_offset_present == p->dma_enabled), "Parse error in section \"%s\": missing entry " "\"dma_dst_offset\"", params->name); @@ -523,12 +638,12 @@ pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p, PIPELINE_ARG_CHECK((dma_size_present == p->dma_enabled), "Parse error in section \"%s\": missing entry " "\"dma_size\"", params->name); - PIPELINE_ARG_CHECK((dma_hash_offset_present == p->dma_enabled), - "Parse error in section \"%s\": missing entry " - "\"dma_hash_offset\"", params->name); - PIPELINE_ARG_CHECK((p->lb_hash_enabled <= p->dma_hash_enabled), - "Parse error in section \"%s\": missing entry " - "\"dma_hash_offset\"", params->name); + PIPELINE_ARG_CHECK((p->dma_hash_enabled <= p->dma_enabled), + "Parse error in section \"%s\": missing all DMA entries", + params->name); + PIPELINE_ARG_CHECK((p->dma_hash_lb_enabled <= p->dma_hash_enabled), + "Parse error in section \"%s\": missing all DMA hash entries ", + params->name); if (dma_src_mask_present) { uint32_t dma_size = p->dma_size; @@ -547,7 +662,7 @@ pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p, "dma_src_mask", dma_mask_str); } - if (p->lb_hash_enabled) + if (p->dma_hash_lb_enabled) PIPELINE_ARG_CHECK((params->n_ports_out > 1), "Parse error in section \"%s\": entry \"lb\" not " "allowed for single output port pipeline", @@ -562,7 +677,6 @@ pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p, return 0; } - static rte_table_hash_op_hash get_hash_function(struct pipeline_passthrough *p) { @@ -580,6 +694,47 @@ get_hash_function(struct pipeline_passthrough *p) } } +static int +pipeline_passthrough_swap_convert(struct pipeline_passthrough *p) +{ + uint32_t i; + + p->swap_n_fields = 0; + + for (i = 0; i < p->params.swap_n_fields; i++) { + uint32_t offset0 = p->params.swap_field0_offset[i]; + uint32_t offset1 = p->params.swap_field1_offset[i]; + uint32_t size = offset1 - offset0; + uint32_t j; + + /* Check */ + if ((offset0 >= offset1) || + (size > PIPELINE_PASSTHROUGH_SWAP_FIELD_SIZE_MAX) || + (p->swap_n_fields >= SWAP_DIM)) + return -1; + + for (j = 0; j < (size / sizeof(uint64_t)); j++) { + p->swap_field0_offset[p->swap_n_fields] = offset0; + p->swap_field1_offset[p->swap_n_fields] = offset1; + p->swap_field_mask[p->swap_n_fields] = UINT64_MAX; + p->swap_n_fields++; + offset0 += sizeof(uint64_t); + offset1 += sizeof(uint64_t); + } + if (size % sizeof(uint64_t)) { + uint32_t n_bits = (size % sizeof(uint64_t)) * 8; + + p->swap_field0_offset[p->swap_n_fields] = offset0; + p->swap_field1_offset[p->swap_n_fields] = offset1; + p->swap_field_mask[p->swap_n_fields] = + RTE_LEN2MASK(n_bits, uint64_t); + p->swap_n_fields++; + } + } + + return 0; +} + static void* pipeline_passthrough_init(struct pipeline_params *params, __rte_unused void *arg) @@ -609,6 +764,8 @@ pipeline_passthrough_init(struct pipeline_params *params, /* Parse arguments */ if (pipeline_passthrough_parse_args(&p_pt->params, params)) return NULL; + if (pipeline_passthrough_swap_convert(p_pt)) + return NULL; p_pt->f_hash = get_hash_function(p_pt); /* Pipeline */ @@ -712,7 +869,7 @@ pipeline_passthrough_init(struct pipeline_params *params, /* Add entries to tables */ for (i = 0; i < p->n_ports_in; i++) { - uint32_t port_out_id = (p_pt->params.lb_hash_enabled == 0) ? + uint32_t port_out_id = (p_pt->params.dma_hash_lb_enabled == 0) ? (i / (p->n_ports_in / p->n_ports_out)) : 0; diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.h b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.h index 9368cec7..decb2684 100644 --- a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.h +++ b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.h @@ -38,6 +38,14 @@ #define PIPELINE_PASSTHROUGH_DMA_SIZE_MAX 64 +#ifndef PIPELINE_PASSTHROUGH_SWAP_N_FIELDS_MAX +#define PIPELINE_PASSTHROUGH_SWAP_N_FIELDS_MAX 8 +#endif + +#ifndef PIPELINE_PASSTHROUGH_SWAP_FIELD_SIZE_MAX +#define PIPELINE_PASSTHROUGH_SWAP_FIELD_SIZE_MAX 16 +#endif + struct pipeline_passthrough_params { uint32_t dma_enabled; uint32_t dma_dst_offset; @@ -47,7 +55,13 @@ struct pipeline_passthrough_params { uint32_t dma_hash_enabled; uint32_t dma_hash_offset; - uint32_t lb_hash_enabled; + + uint32_t dma_hash_lb_enabled; + + uint32_t swap_enabled; + uint32_t swap_field0_offset[PIPELINE_PASSTHROUGH_SWAP_N_FIELDS_MAX]; + uint32_t swap_field1_offset[PIPELINE_PASSTHROUGH_SWAP_N_FIELDS_MAX]; + uint32_t swap_n_fields; }; int diff --git a/examples/ip_pipeline/pipeline_be.h b/examples/ip_pipeline/pipeline_be.h index b562472b..0cfcc809 100644 --- a/examples/ip_pipeline/pipeline_be.h +++ b/examples/ip_pipeline/pipeline_be.h @@ -39,6 +39,7 @@ #include <rte_port_frag.h> #include <rte_port_ras.h> #include <rte_port_sched.h> +#include <rte_port_fd.h> #include <rte_port_source_sink.h> #ifdef RTE_LIBRTE_KNI #include <rte_port_kni.h> @@ -52,6 +53,7 @@ enum pipeline_port_in_type { PIPELINE_PORT_IN_RING_READER_IPV4_FRAG, PIPELINE_PORT_IN_RING_READER_IPV6_FRAG, PIPELINE_PORT_IN_SCHED_READER, + PIPELINE_PORT_IN_FD_READER, PIPELINE_PORT_IN_KNI_READER, PIPELINE_PORT_IN_SOURCE, }; @@ -65,6 +67,7 @@ struct pipeline_port_in_params { struct rte_port_ring_reader_ipv4_frag_params ring_ipv4_frag; struct rte_port_ring_reader_ipv6_frag_params ring_ipv6_frag; struct rte_port_sched_reader_params sched; + struct rte_port_fd_reader_params fd; #ifdef RTE_LIBRTE_KNI struct rte_port_kni_reader_params kni; #endif @@ -89,6 +92,8 @@ pipeline_port_in_params_convert(struct pipeline_port_in_params *p) return (void *) &p->params.ring_ipv6_frag; case PIPELINE_PORT_IN_SCHED_READER: return (void *) &p->params.sched; + case PIPELINE_PORT_IN_FD_READER: + return (void *) &p->params.fd; #ifdef RTE_LIBRTE_KNI case PIPELINE_PORT_IN_KNI_READER: return (void *) &p->params.kni; @@ -116,6 +121,8 @@ pipeline_port_in_params_get_ops(struct pipeline_port_in_params *p) return &rte_port_ring_reader_ipv6_frag_ops; case PIPELINE_PORT_IN_SCHED_READER: return &rte_port_sched_reader_ops; + case PIPELINE_PORT_IN_FD_READER: + return &rte_port_fd_reader_ops; #ifdef RTE_LIBRTE_KNI case PIPELINE_PORT_IN_KNI_READER: return &rte_port_kni_reader_ops; @@ -137,6 +144,7 @@ enum pipeline_port_out_type { PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS, PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS, PIPELINE_PORT_OUT_SCHED_WRITER, + PIPELINE_PORT_OUT_FD_WRITER, PIPELINE_PORT_OUT_KNI_WRITER, PIPELINE_PORT_OUT_KNI_WRITER_NODROP, PIPELINE_PORT_OUT_SINK, @@ -154,6 +162,7 @@ struct pipeline_port_out_params { struct rte_port_ring_writer_ipv4_ras_params ring_ipv4_ras; struct rte_port_ring_writer_ipv6_ras_params ring_ipv6_ras; struct rte_port_sched_writer_params sched; + struct rte_port_fd_writer_params fd; #ifdef RTE_LIBRTE_KNI struct rte_port_kni_writer_params kni; struct rte_port_kni_writer_nodrop_params kni_nodrop; @@ -184,6 +193,8 @@ pipeline_port_out_params_convert(struct pipeline_port_out_params *p) return (void *) &p->params.ring_ipv6_ras; case PIPELINE_PORT_OUT_SCHED_WRITER: return (void *) &p->params.sched; + case PIPELINE_PORT_OUT_FD_WRITER: + return (void *) &p->params.fd; #ifdef RTE_LIBRTE_KNI case PIPELINE_PORT_OUT_KNI_WRITER: return (void *) &p->params.kni; @@ -219,6 +230,8 @@ pipeline_port_out_params_get_ops(struct pipeline_port_out_params *p) return &rte_port_ring_writer_ipv6_ras_ops; case PIPELINE_PORT_OUT_SCHED_WRITER: return &rte_port_sched_writer_ops; + case PIPELINE_PORT_OUT_FD_WRITER: + return &rte_port_fd_writer_ops; #ifdef RTE_LIBRTE_KNI case PIPELINE_PORT_OUT_KNI_WRITER: return &rte_port_kni_writer_ops; diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c index ef09a2ed..50fe4228 100644 --- a/examples/ip_reassembly/main.c +++ b/examples/ip_reassembly/main.c @@ -65,7 +65,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_mbuf.h> #include <rte_malloc.h> diff --git a/examples/ipsec-secgw/Makefile b/examples/ipsec-secgw/Makefile index 06b6db1e..17e91551 100644 --- a/examples/ipsec-secgw/Makefile +++ b/examples/ipsec-secgw/Makefile @@ -53,6 +53,7 @@ endif # # all source are stored in SRCS-y # +SRCS-y += parser.c SRCS-y += ipsec.c SRCS-y += esp.c SRCS-y += sp4.c diff --git a/examples/ipsec-secgw/ep0.cfg b/examples/ipsec-secgw/ep0.cfg new file mode 100644 index 00000000..299aa9e0 --- /dev/null +++ b/examples/ipsec-secgw/ep0.cfg @@ -0,0 +1,160 @@ +########################################################################### +# IPSEC-SECGW Endpoint sample configuration +# +# The main purpose of this file is to show how to configure two systems +# back-to-back that would forward traffic through an IPsec tunnel. This +# file is the Endpoint 0 configuration. To use this configuration file, +# add the following command-line option: +# +# -f ./ep0.cfg +# +########################################################################### + +#SP IPv4 rules +sp ipv4 out esp protect 5 pri 1 dst 192.168.105.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 6 pri 1 dst 192.168.106.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 10 pri 1 dst 192.168.175.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 11 pri 1 dst 192.168.176.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 15 pri 1 dst 192.168.200.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 16 pri 1 dst 192.168.201.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 25 pri 1 dst 192.168.55.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 26 pri 1 dst 192.168.56.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 dst 192.168.240.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 dst 192.168.241.0/24 sport 0:65535 dport 0:65535 + +sp ipv4 in esp protect 105 pri 1 dst 192.168.115.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 106 pri 1 dst 192.168.116.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 110 pri 1 dst 192.168.185.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 111 pri 1 dst 192.168.186.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 115 pri 1 dst 192.168.210.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 116 pri 1 dst 192.168.211.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 115 pri 1 dst 192.168.210.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 125 pri 1 dst 192.168.65.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 125 pri 1 dst 192.168.65.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 126 pri 1 dst 192.168.66.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 dst 192.168.245.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 dst 192.168.246.0/24 sport 0:65535 dport 0:65535 + +#SP IPv6 rules +sp ipv6 out esp protect 5 pri 1 dst 0000:0000:0000:0000:5555:5555:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 6 pri 1 dst 0000:0000:0000:0000:6666:6666:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 10 pri 1 dst 0000:0000:1111:1111:0000:0000:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 11 pri 1 dst 0000:0000:1111:1111:1111:1111:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 25 pri 1 dst 0000:0000:0000:0000:aaaa:aaaa:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 26 pri 1 dst 0000:0000:0000:0000:bbbb:bbbb:0000:0000/96 \ +sport 0:65535 dport 0:65535 + +sp ipv6 in esp protect 15 pri 1 dst ffff:0000:0000:0000:5555:5555:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 16 pri 1 dst ffff:0000:0000:0000:6666:6666:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 110 pri 1 dst ffff:0000:1111:1111:0000:0000:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 111 pri 1 dst ffff:0000:1111:1111:1111:1111:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 125 pri 1 dst ffff:0000:0000:0000:aaaa:aaaa:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 126 pri 1 dst ffff:0000:0000:0000:bbbb:bbbb:0000:0000/96 \ +sport 0:65535 dport 0:65535 + +#SA rules +sa out 5 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +mode ipv4-tunnel src 172.16.1.5 dst 172.16.2.5 + +sa out 6 cipher_algo aes-128-cbc cipher_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0 auth_algo sha1-hmac auth_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0:a0:a0:a0:a0 mode ipv4-tunnel src 172.16.1.6 dst 172.16.2.6 + +sa out 10 cipher_algo aes-128-cbc cipher_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1 auth_algo sha1-hmac auth_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1:a1:a1:a1:a1 mode transport + +sa out 11 cipher_algo aes-128-cbc cipher_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2 auth_algo sha1-hmac auth_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2:b2:b2:b2:b2 mode transport + +sa out 15 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.1.5 \ +dst 172.16.2.5 + +sa out 16 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.1.6 \ +dst 172.16.2.6 + +sa out 25 cipher_algo aes-128-cbc cipher_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3 auth_algo sha1-hmac auth_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3:c3:c3:c3:c3 mode ipv6-tunnel \ +src 1111:1111:1111:1111:1111:1111:1111:5555 \ +dst 2222:2222:2222:2222:2222:2222:2222:5555 + +sa out 26 cipher_algo aes-128-cbc cipher_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d auth_algo sha1-hmac auth_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d:4d:4d:4d:4d mode ipv6-tunnel \ +src 1111:1111:1111:1111:1111:1111:1111:6666 \ +dst 2222:2222:2222:2222:2222:2222:2222:6666 + +sa in 105 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +mode ipv4-tunnel src 172.16.2.5 dst 172.16.1.5 + +sa in 106 cipher_algo aes-128-cbc cipher_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0 auth_algo sha1-hmac auth_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0:a0:a0:a0:a0 mode ipv4-tunnel src 172.16.2.6 dst 172.16.1.6 + +sa in 110 cipher_algo aes-128-cbc cipher_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1 auth_algo sha1-hmac auth_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1:a1:a1:a1:a1 mode transport + +sa in 111 cipher_algo aes-128-cbc cipher_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2 auth_algo sha1-hmac auth_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2:b2:b2:b2:b2 mode transport + +sa in 115 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.5 \ +dst 172.16.1.5 + +sa in 116 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.6 dst 172.16.1.6 + +sa in 125 cipher_algo aes-128-cbc cipher_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3 auth_algo sha1-hmac auth_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3:c3:c3:c3:c3 mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:5555 \ +dst 1111:1111:1111:1111:1111:1111:1111:5555 + +sa in 126 cipher_algo aes-128-cbc cipher_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d auth_algo sha1-hmac auth_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d:4d:4d:4d:4d mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:6666 \ +dst 1111:1111:1111:1111:1111:1111:1111:6666 + +#Routing rules +rt ipv4 dst 172.16.2.5/32 port 0 +rt ipv4 dst 172.16.2.6/32 port 1 +rt ipv4 dst 192.168.175.0/24 port 0 +rt ipv4 dst 192.168.176.0/24 port 1 +rt ipv4 dst 192.168.240.0/24 port 0 +rt ipv4 dst 192.168.241.0/24 port 1 +rt ipv4 dst 192.168.115.0/24 port 2 +rt ipv4 dst 192.168.116.0/24 port 3 +rt ipv4 dst 192.168.65.0/24 port 2 +rt ipv4 dst 192.168.66.0/24 port 3 +rt ipv4 dst 192.168.185.0/24 port 2 +rt ipv4 dst 192.168.186.0/24 port 3 +rt ipv4 dst 192.168.210.0/24 port 2 +rt ipv4 dst 192.168.211.0/24 port 3 +rt ipv4 dst 192.168.245.0/24 port 2 +rt ipv4 dst 192.168.246.0/24 port 3 + +rt ipv6 dst 2222:2222:2222:2222:2222:2222:2222:5555/116 port 0 +rt ipv6 dst 2222:2222:2222:2222:2222:2222:2222:6666/116 port 1 +rt ipv6 dst 0000:0000:1111:1111:0000:0000:0000:0000/116 port 0 +rt ipv6 dst 0000:0000:1111:1111:1111:1111:0000:0000/116 port 1 +rt ipv6 dst ffff:0000:0000:0000:aaaa:aaaa:0000:0000/116 port 2 +rt ipv6 dst ffff:0000:0000:0000:bbbb:bbbb:0000:0000/116 port 3 +rt ipv6 dst ffff:0000:0000:0000:5555:5555:0000:0000/116 port 2 +rt ipv6 dst ffff:0000:0000:0000:6666:6666:0000:0000/116 port 3 +rt ipv6 dst ffff:0000:1111:1111:0000:0000:0000:0000/116 port 2 +rt ipv6 dst ffff:0000:1111:1111:1111:1111:0000:0000/116 port 3 diff --git a/examples/ipsec-secgw/ep1.cfg b/examples/ipsec-secgw/ep1.cfg new file mode 100644 index 00000000..3f6ff811 --- /dev/null +++ b/examples/ipsec-secgw/ep1.cfg @@ -0,0 +1,160 @@ +########################################################################### +# IPSEC-SECGW Endpoint1 sample configuration +# +# The main purpose of this file is to show how to configure two systems +# back-to-back that would forward traffic through an IPsec tunnel. This +# file is the Endpoint1 configuration. To use this configuration file, +# add the following command-line option: +# +# -f ./ep1.cfg +# +########################################################################### + +#SP IPv4 rules +sp ipv4 in esp protect 5 pri 1 dst 192.168.105.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 6 pri 1 dst 192.168.106.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 10 pri 1 dst 192.168.175.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 11 pri 1 dst 192.168.176.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 15 pri 1 dst 192.168.200.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 16 pri 1 dst 192.168.201.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 25 pri 1 dst 192.168.55.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 26 pri 1 dst 192.168.56.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass dst 192.168.240.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass dst 192.168.241.0/24 sport 0:65535 dport 0:65535 + +sp ipv4 out esp protect 105 pri 1 dst 192.168.115.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 106 pri 1 dst 192.168.116.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 110 pri 1 dst 192.168.185.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 111 pri 1 dst 192.168.186.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 115 pri 1 dst 192.168.210.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 116 pri 1 dst 192.168.211.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 115 pri 1 dst 192.168.210.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 125 pri 1 dst 192.168.65.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 125 pri 1 dst 192.168.65.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 126 pri 1 dst 192.168.66.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 dst 192.168.245.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 dst 192.168.246.0/24 sport 0:65535 dport 0:65535 + +#SP IPv6 rules +sp ipv6 in esp protect 5 pri 1 dst 0000:0000:0000:0000:5555:5555:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 6 pri 1 dst 0000:0000:0000:0000:6666:6666:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 10 pri 1 dst 0000:0000:1111:1111:0000:0000:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 11 pri 1 dst 0000:0000:1111:1111:1111:1111:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 25 pri 1 dst 0000:0000:0000:0000:aaaa:aaaa:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 26 pri 1 dst 0000:0000:0000:0000:bbbb:bbbb:0000:0000/96 \ +sport 0:65535 dport 0:65535 + +sp ipv6 out esp protect 15 pri 1 dst ffff:0000:0000:0000:5555:5555:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 16 pri 1 dst ffff:0000:0000:0000:6666:6666:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 110 pri 1 dst ffff:0000:1111:1111:0000:0000:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 111 pri 1 dst ffff:0000:1111:1111:1111:1111:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 125 pri 1 dst ffff:0000:0000:0000:aaaa:aaaa:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 126 pri 1 dst ffff:0000:0000:0000:bbbb:bbbb:0000:0000/96 \ +sport 0:65535 dport 0:65535 + +#SA rules +sa in 5 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +mode ipv4-tunnel src 172.16.1.5 dst 172.16.2.5 + +sa in 6 cipher_algo aes-128-cbc cipher_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0 auth_algo sha1-hmac auth_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0:a0:a0:a0:a0 mode ipv4-tunnel src 172.16.1.6 dst 172.16.2.6 + +sa in 10 cipher_algo aes-128-cbc cipher_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1 auth_algo sha1-hmac auth_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1:a1:a1:a1:a1 mode transport + +sa in 11 cipher_algo aes-128-cbc cipher_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2 auth_algo sha1-hmac auth_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2:b2:b2:b2:b2 mode transport + +sa in 15 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.1.5 \ +dst 172.16.2.5 + +sa in 16 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.1.6 \ +dst 172.16.2.6 + +sa in 25 cipher_algo aes-128-cbc cipher_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3 auth_algo sha1-hmac auth_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3:c3:c3:c3:c3 mode ipv6-tunnel \ +src 1111:1111:1111:1111:1111:1111:1111:5555 \ +dst 2222:2222:2222:2222:2222:2222:2222:5555 + +sa in 26 cipher_algo aes-128-cbc cipher_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d auth_algo sha1-hmac auth_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d:4d:4d:4d:4d mode ipv6-tunnel \ +src 1111:1111:1111:1111:1111:1111:1111:6666 \ +dst 2222:2222:2222:2222:2222:2222:2222:6666 + +sa out 105 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +mode ipv4-tunnel src 172.16.2.5 dst 172.16.1.5 + +sa out 106 cipher_algo aes-128-cbc cipher_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0 auth_algo sha1-hmac auth_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0:a0:a0:a0:a0 mode ipv4-tunnel src 172.16.2.6 dst 172.16.1.6 + +sa out 110 cipher_algo aes-128-cbc cipher_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1 auth_algo sha1-hmac auth_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1:a1:a1:a1:a1 mode transport + +sa out 111 cipher_algo aes-128-cbc cipher_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2 auth_algo sha1-hmac auth_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2:b2:b2:b2:b2 mode transport + +sa out 115 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.5 \ +dst 172.16.1.5 + +sa out 116 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.6 dst 172.16.1.6 + +sa out 125 cipher_algo aes-128-cbc cipher_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3 auth_algo sha1-hmac auth_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3:c3:c3:c3:c3 mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:5555 \ +dst 1111:1111:1111:1111:1111:1111:1111:5555 + +sa out 126 cipher_algo aes-128-cbc cipher_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d auth_algo sha1-hmac auth_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d:4d:4d:4d:4d mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:6666 \ +dst 1111:1111:1111:1111:1111:1111:1111:6666 + +#Routing rules +rt ipv4 dst 172.16.1.5/32 port 0 +rt ipv4 dst 172.16.1.6/32 port 1 +rt ipv4 dst 192.168.185.0/24 port 0 +rt ipv4 dst 192.168.186.0/24 port 1 +rt ipv4 dst 192.168.245.0/24 port 0 +rt ipv4 dst 192.168.246.0/24 port 1 +rt ipv4 dst 192.168.105.0/24 port 2 +rt ipv4 dst 192.168.106.0/24 port 3 +rt ipv4 dst 192.168.55.0/24 port 2 +rt ipv4 dst 192.168.56.0/24 port 3 +rt ipv4 dst 192.168.175.0/24 port 2 +rt ipv4 dst 192.168.176.0/24 port 3 +rt ipv4 dst 192.168.200.0/24 port 2 +rt ipv4 dst 192.168.201.0/24 port 3 +rt ipv4 dst 192.168.240.0/24 port 2 +rt ipv4 dst 192.168.241.0/24 port 3 + +rt ipv6 dst 1111:1111:1111:1111:1111:1111:1111:5555/116 port 0 +rt ipv6 dst 1111:1111:1111:1111:1111:1111:1111:6666/116 port 1 +rt ipv6 dst ffff:0000:1111:1111:0000:0000:0000:0000/116 port 0 +rt ipv6 dst ffff:0000:1111:1111:1111:1111:0000:0000/116 port 1 +rt ipv6 dst 0000:0000:0000:0000:aaaa:aaaa:0000:0000/116 port 2 +rt ipv6 dst 0000:0000:0000:0000:bbbb:bbbb:0000:0000/116 port 3 +rt ipv6 dst 0000:0000:0000:0000:5555:5555:0000:0000/116 port 2 +rt ipv6 dst 0000:0000:0000:0000:6666:6666:0000:0000/116 port 3 +rt ipv6 dst 0000:0000:1111:1111:0000:0000:0000:0000/116 port 2 +rt ipv6 dst 0000:0000:1111:1111:1111:1111:0000:0000/116 port 3 diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c index 05caa77a..ec5a2e62 100644 --- a/examples/ipsec-secgw/esp.c +++ b/examples/ipsec-secgw/esp.c @@ -50,21 +50,6 @@ #include "esp.h" #include "ipip.h" -static inline void -random_iv_u64(uint64_t *buf, uint16_t n) -{ - uint32_t left = n & 0x7; - uint32_t i; - - RTE_ASSERT((n & 0x3) == 0); - - for (i = 0; i < (n >> 3); i++) - buf[i] = rte_rand(); - - if (left) - *((uint32_t *)&buf[i]) = (uint32_t)lrand48(); -} - int esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, struct rte_crypto_op *cop) @@ -98,22 +83,62 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, return -EINVAL; } - sym_cop = (struct rte_crypto_sym_op *)(cop + 1); + sym_cop = get_sym_cop(cop); sym_cop->m_src = m; sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len; sym_cop->cipher.data.length = payload_len; - sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(m, void*, - ip_hdr_len + sizeof(struct esp_hdr)); - sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m, - ip_hdr_len + sizeof(struct esp_hdr)); - sym_cop->cipher.iv.length = sa->iv_len; + struct cnt_blk *icb; + uint8_t *aad; + uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr)); + + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_AES_CBC: + sym_cop->cipher.iv.data = iv; + sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m, + ip_hdr_len + sizeof(struct esp_hdr)); + sym_cop->cipher.iv.length = sa->iv_len; + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + case RTE_CRYPTO_CIPHER_AES_GCM: + icb = get_cnt_blk(m); + icb->salt = sa->salt; + memcpy(&icb->iv, iv, 8); + icb->cnt = rte_cpu_to_be_32(1); + sym_cop->cipher.iv.data = (uint8_t *)icb; + sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m, + (uint8_t *)icb - rte_pktmbuf_mtod(m, uint8_t *)); + sym_cop->cipher.iv.length = 16; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } - sym_cop->auth.data.offset = ip_hdr_len; - sym_cop->auth.data.length = sizeof(struct esp_hdr) + - sa->iv_len + payload_len; + switch (sa->auth_algo) { + case RTE_CRYPTO_AUTH_NULL: + case RTE_CRYPTO_AUTH_SHA1_HMAC: + sym_cop->auth.data.offset = ip_hdr_len; + sym_cop->auth.data.length = sizeof(struct esp_hdr) + + sa->iv_len + payload_len; + break; + case RTE_CRYPTO_AUTH_AES_GCM: + aad = get_aad(m); + memcpy(aad, iv - sizeof(struct esp_hdr), 8); + sym_cop->auth.aad.data = aad; + sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m, + aad - rte_pktmbuf_mtod(m, uint8_t *)); + sym_cop->auth.aad.length = 8; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", + sa->auth_algo); + return -EINVAL; + } sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*, rte_pktmbuf_pkt_len(m) - sa->digest_len); @@ -282,10 +307,32 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, sa->seq++; esp->spi = rte_cpu_to_be_32(sa->spi); - esp->seq = rte_cpu_to_be_32(sa->seq); + esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq); - if (sa->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC) - random_iv_u64((uint64_t *)(esp + 1), sa->iv_len); + uint64_t *iv = (uint64_t *)(esp + 1); + + sym_cop = get_sym_cop(cop); + sym_cop->m_src = m; + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_AES_CBC: + memset(iv, 0, sa->iv_len); + sym_cop->cipher.data.offset = ip_hdr_len + + sizeof(struct esp_hdr); + sym_cop->cipher.data.length = pad_payload_len + sa->iv_len; + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + case RTE_CRYPTO_CIPHER_AES_GCM: + *iv = sa->seq; + sym_cop->cipher.data.offset = ip_hdr_len + + sizeof(struct esp_hdr) + sa->iv_len; + sym_cop->cipher.data.length = pad_payload_len; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } /* Fill pad_len using default sequential scheme */ for (i = 0; i < pad_len - 2; i++) @@ -293,22 +340,37 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, padding[pad_len - 2] = pad_len - 2; padding[pad_len - 1] = nlp; - sym_cop = (struct rte_crypto_sym_op *)(cop + 1); - - sym_cop->m_src = m; - sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + - sa->iv_len; - sym_cop->cipher.data.length = pad_payload_len; - - sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(m, uint8_t *, - ip_hdr_len + sizeof(struct esp_hdr)); + struct cnt_blk *icb = get_cnt_blk(m); + icb->salt = sa->salt; + icb->iv = sa->seq; + icb->cnt = rte_cpu_to_be_32(1); + sym_cop->cipher.iv.data = (uint8_t *)icb; sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m, - ip_hdr_len + sizeof(struct esp_hdr)); - sym_cop->cipher.iv.length = sa->iv_len; + (uint8_t *)icb - rte_pktmbuf_mtod(m, uint8_t *)); + sym_cop->cipher.iv.length = 16; - sym_cop->auth.data.offset = ip_hdr_len; - sym_cop->auth.data.length = sizeof(struct esp_hdr) + sa->iv_len + - pad_payload_len; + uint8_t *aad; + + switch (sa->auth_algo) { + case RTE_CRYPTO_AUTH_NULL: + case RTE_CRYPTO_AUTH_SHA1_HMAC: + sym_cop->auth.data.offset = ip_hdr_len; + sym_cop->auth.data.length = sizeof(struct esp_hdr) + + sa->iv_len + pad_payload_len; + break; + case RTE_CRYPTO_AUTH_AES_GCM: + aad = get_aad(m); + memcpy(aad, esp, 8); + sym_cop->auth.aad.data = aad; + sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m, + aad - rte_pktmbuf_mtod(m, uint8_t *)); + sym_cop->auth.aad.length = 8; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", + sa->auth_algo); + return -EINVAL; + } sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, rte_pktmbuf_pkt_len(m) - sa->digest_len); diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 266ae205..5a4c9b71 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -72,6 +72,7 @@ #include <rte_cryptodev.h> #include "ipsec.h" +#include "parser.h" #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 @@ -81,6 +82,7 @@ #define NB_MBUF (32000) +#define CDEV_QUEUE_DESC 2048 #define CDEV_MAP_ENTRIES 1024 #define CDEV_MP_NB_OBJS 2048 #define CDEV_MP_CACHE_SZ 64 @@ -88,8 +90,6 @@ #define OPTION_CONFIG "config" #define OPTION_SINGLE_SA "single-sa" -#define OPTION_EP0 "ep0" -#define OPTION_EP1 "ep1" #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ @@ -158,7 +158,6 @@ static uint32_t enabled_port_mask; static uint32_t unprotected_port_mask; static int32_t promiscuous_on = 1; static int32_t numa_on = 1; /**< NUMA is enabled by default. */ -static int32_t ep = -1; /**< Endpoint configuration (0 or 1) */ static uint32_t nb_lcores; static uint32_t single_sa; static uint32_t single_sa_idx; @@ -838,7 +837,7 @@ print_usage(const char *prgname) { printf("%s [EAL options] -- -p PORTMASK -P -u PORTMASK" " --"OPTION_CONFIG" (port,queue,lcore)[,(port,queue,lcore]" - " --single-sa SAIDX --ep0|--ep1\n" + " --single-sa SAIDX -f CONFIG_FILE\n" " -p PORTMASK: hexadecimal bitmask of ports to configure\n" " -P : enable promiscuous mode\n" " -u PORTMASK: hexadecimal bitmask of unprotected ports\n" @@ -846,8 +845,8 @@ print_usage(const char *prgname) "rx queues configuration\n" " --single-sa SAIDX: use single SA index for outbound, " "bypassing the SP\n" - " --ep0: Configure as Endpoint 0\n" - " --ep1: Configure as Endpoint 1\n", prgname); + " -f CONFIG_FILE: Configuration file path\n", + prgname); } static int32_t @@ -960,18 +959,6 @@ parse_args_long_options(struct option *lgopts, int32_t option_index) } } - if (__STRNCMP(optname, OPTION_EP0)) { - printf("endpoint 0\n"); - ep = 0; - ret = 0; - } - - if (__STRNCMP(optname, OPTION_EP1)) { - printf("endpoint 1\n"); - ep = 1; - ret = 0; - } - return ret; } #undef __STRNCMP @@ -986,14 +973,13 @@ parse_args(int32_t argc, char **argv) static struct option lgopts[] = { {OPTION_CONFIG, 1, 0, 0}, {OPTION_SINGLE_SA, 1, 0, 0}, - {OPTION_EP0, 0, 0, 0}, - {OPTION_EP1, 0, 0, 0}, {NULL, 0, 0, 0} }; + int32_t f_present = 0; argvopt = argv; - while ((opt = getopt_long(argc, argvopt, "p:Pu:", + while ((opt = getopt_long(argc, argvopt, "p:Pu:f:", lgopts, &option_index)) != EOF) { switch (opt) { @@ -1017,6 +1003,21 @@ parse_args(int32_t argc, char **argv) return -1; } break; + case 'f': + if (f_present == 1) { + printf("\"-f\" option present more than " + "once!\n"); + print_usage(prgname); + return -1; + } + if (parse_cfg_file(optarg) < 0) { + printf("parsing file \"%s\" failed\n", + optarg); + print_usage(prgname); + return -1; + } + f_present = 1; + break; case 0: if (parse_args_long_options(lgopts, option_index)) { print_usage(prgname); @@ -1029,6 +1030,11 @@ parse_args(int32_t argc, char **argv) } } + if (f_present == 0) { + printf("Mandatory option \"-f\" not present\n"); + return -1; + } + if (optind >= 0) argv[optind-1] = prgname; @@ -1267,7 +1273,7 @@ cryptodevs_init(void) rte_panic("Failed to initialize crypodev %u\n", cdev_id); - qp_conf.nb_descriptors = CDEV_MP_NB_OBJS; + qp_conf.nb_descriptors = CDEV_QUEUE_DESC; for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++) if (rte_cryptodev_queue_pair_setup(cdev_id, qp, &qp_conf, dev_conf.socket_id)) @@ -1411,9 +1417,6 @@ main(int32_t argc, char **argv) if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid parameters\n"); - if (ep < 0) - rte_exit(EXIT_FAILURE, "need to choose either EP0 or EP1\n"); - if ((unprotected_port_mask & enabled_port_mask) != unprotected_port_mask) rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n", @@ -1443,13 +1446,13 @@ main(int32_t argc, char **argv) if (socket_ctx[socket_id].mbuf_pool) continue; - sa_init(&socket_ctx[socket_id], socket_id, ep); + sa_init(&socket_ctx[socket_id], socket_id); - sp4_init(&socket_ctx[socket_id], socket_id, ep); + sp4_init(&socket_ctx[socket_id], socket_id); - sp6_init(&socket_ctx[socket_id], socket_id, ep); + sp6_init(&socket_ctx[socket_id], socket_id); - rt_init(&socket_ctx[socket_id], socket_id, ep); + rt_init(&socket_ctx[socket_id], socket_id); pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF); } diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c index 1e87d0df..f49143b9 100644 --- a/examples/ipsec-secgw/ipsec.c +++ b/examples/ipsec-secgw/ipsec.c @@ -124,6 +124,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, priv->sa = sa; priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; rte_prefetch0(&priv->sym_cop); priv->cop.sym = &priv->sym_cop; diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h index a442a74a..dbc8c2cb 100644 --- a/examples/ipsec-secgw/ipsec.h +++ b/examples/ipsec-secgw/ipsec.h @@ -90,11 +90,14 @@ struct ip_addr { } ip; }; +#define MAX_KEY_SIZE 20 + struct ipsec_sa { uint32_t spi; uint32_t cdev_id_qp; + uint64_t seq; + uint32_t salt; struct rte_cryptodev_sym_session *crypto_session; - uint32_t seq; enum rte_crypto_cipher_algorithm cipher_algo; enum rte_crypto_auth_algorithm auth_algo; uint16_t digest_len; @@ -106,14 +109,20 @@ struct ipsec_sa { #define TRANSPORT (1 << 2) struct ip_addr src; struct ip_addr dst; + uint8_t cipher_key[MAX_KEY_SIZE]; + uint16_t cipher_key_len; + uint8_t auth_key[MAX_KEY_SIZE]; + uint16_t auth_key_len; + uint16_t aad_len; struct rte_crypto_sym_xform *xforms; } __rte_cache_aligned; struct ipsec_mbuf_metadata { + uint8_t buf[32]; struct ipsec_sa *sa; struct rte_crypto_op cop; struct rte_crypto_sym_op sym_cop; -}; +} __rte_cache_aligned; struct cdev_qp { uint16_t id; @@ -151,6 +160,12 @@ struct socket_ctx { struct rte_mempool *mbuf_pool; }; +struct cnt_blk { + uint32_t salt; + uint64_t iv; + uint32_t cnt; +} __attribute__((packed)); + uint16_t ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t len); @@ -171,6 +186,28 @@ get_priv(struct rte_mbuf *m) return RTE_PTR_ADD(m, sizeof(struct rte_mbuf)); } +static inline void * +get_cnt_blk(struct rte_mbuf *m) +{ + struct ipsec_mbuf_metadata *priv = get_priv(m); + + return &priv->buf[0]; +} + +static inline void * +get_aad(struct rte_mbuf *m) +{ + struct ipsec_mbuf_metadata *priv = get_priv(m); + + return &priv->buf[16]; +} + +static inline void * +get_sym_cop(struct rte_crypto_op *cop) +{ + return (cop + 1); +} + int inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx); @@ -183,15 +220,15 @@ outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[], struct ipsec_sa *sa[], uint16_t nb_pkts); void -sp4_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep); +sp4_init(struct socket_ctx *ctx, int32_t socket_id); void -sp6_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep); +sp6_init(struct socket_ctx *ctx, int32_t socket_id); void -sa_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep); +sa_init(struct socket_ctx *ctx, int32_t socket_id); void -rt_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep); +rt_init(struct socket_ctx *ctx, int32_t socket_id); #endif /* __IPSEC_H__ */ diff --git a/examples/ipsec-secgw/parser.c b/examples/ipsec-secgw/parser.c new file mode 100644 index 00000000..9d0ea462 --- /dev/null +++ b/examples/ipsec-secgw/parser.c @@ -0,0 +1,591 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <rte_common.h> +#include <rte_crypto.h> + +#include <cmdline_parse_string.h> +#include <cmdline_parse_num.h> +#include <cmdline_parse_ipaddr.h> +#include <cmdline_socket.h> +#include <cmdline.h> + +#include "ipsec.h" +#include "parser.h" + +#define PARSE_DELIMITER " \f\n\r\t\v" +static int +parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens) +{ + uint32_t i; + + if ((string == NULL) || + (tokens == NULL) || + (*n_tokens < 1)) + return -EINVAL; + + for (i = 0; i < *n_tokens; i++) { + tokens[i] = strtok_r(string, PARSE_DELIMITER, &string); + if (tokens[i] == NULL) + break; + } + + if ((i == *n_tokens) && + (NULL != strtok_r(string, PARSE_DELIMITER, &string))) + return -E2BIG; + + *n_tokens = i; + return 0; +} + +#define INADDRSZ 4 +#define IN6ADDRSZ 16 + +/* int + * inet_pton4(src, dst) + * like inet_aton() but without all the hexadecimal and shorthand. + * return: + * 1 if `src' is a valid dotted quad, else 0. + * notice: + * does not touch `dst' unless it's returning 1. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton4(const char *src, unsigned char *dst) +{ + static const char digits[] = "0123456789"; + int saw_digit, octets, ch; + unsigned char tmp[INADDRSZ], *tp; + + saw_digit = 0; + octets = 0; + *(tp = tmp) = 0; + while ((ch = *src++) != '\0') { + const char *pch; + + pch = strchr(digits, ch); + if (pch != NULL) { + unsigned int new = *tp * 10 + (pch - digits); + + if (new > 255) + return 0; + if (!saw_digit) { + if (++octets > 4) + return 0; + saw_digit = 1; + } + *tp = (unsigned char)new; + } else if (ch == '.' && saw_digit) { + if (octets == 4) + return 0; + *++tp = 0; + saw_digit = 0; + } else + return 0; + } + if (octets < 4) + return 0; + + memcpy(dst, tmp, INADDRSZ); + return 1; +} + +/* int + * inet_pton6(src, dst) + * convert presentation level address to network order binary form. + * return: + * 1 if `src' is a valid [RFC1884 2.2] address, else 0. + * notice: + * (1) does not touch `dst' unless it's returning 1. + * (2) :: in a full address is silently ignored. + * credit: + * inspired by Mark Andrews. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton6(const char *src, unsigned char *dst) +{ + static const char xdigits_l[] = "0123456789abcdef", + xdigits_u[] = "0123456789ABCDEF"; + unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0; + const char *xdigits = 0, *curtok = 0; + int ch = 0, saw_xdigit = 0, count_xdigit = 0; + unsigned int val = 0; + unsigned dbloct_count = 0; + + memset((tp = tmp), '\0', IN6ADDRSZ); + endp = tp + IN6ADDRSZ; + colonp = NULL; + /* Leading :: requires some special handling. */ + if (*src == ':') + if (*++src != ':') + return 0; + curtok = src; + saw_xdigit = count_xdigit = 0; + val = 0; + + while ((ch = *src++) != '\0') { + const char *pch; + + pch = strchr((xdigits = xdigits_l), ch); + if (pch == NULL) + pch = strchr((xdigits = xdigits_u), ch); + if (pch != NULL) { + if (count_xdigit >= 4) + return 0; + val <<= 4; + val |= (pch - xdigits); + if (val > 0xffff) + return 0; + saw_xdigit = 1; + count_xdigit++; + continue; + } + if (ch == ':') { + curtok = src; + if (!saw_xdigit) { + if (colonp) + return 0; + colonp = tp; + continue; + } else if (*src == '\0') { + return 0; + } + if (tp + sizeof(int16_t) > endp) + return 0; + *tp++ = (unsigned char) ((val >> 8) & 0xff); + *tp++ = (unsigned char) (val & 0xff); + saw_xdigit = 0; + count_xdigit = 0; + val = 0; + dbloct_count++; + continue; + } + if (ch == '.' && ((tp + INADDRSZ) <= endp) && + inet_pton4(curtok, tp) > 0) { + tp += INADDRSZ; + saw_xdigit = 0; + dbloct_count += 2; + break; /* '\0' was seen by inet_pton4(). */ + } + return 0; + } + if (saw_xdigit) { + if (tp + sizeof(int16_t) > endp) + return 0; + *tp++ = (unsigned char) ((val >> 8) & 0xff); + *tp++ = (unsigned char) (val & 0xff); + dbloct_count++; + } + if (colonp != NULL) { + /* if we already have 8 double octets, having a colon + * means error */ + if (dbloct_count == 8) + return 0; + + /* + * Since some memmove()'s erroneously fail to handle + * overlapping regions, we'll do the shift by hand. + */ + const int n = tp - colonp; + int i; + + for (i = 1; i <= n; i++) { + endp[-i] = colonp[n - i]; + colonp[n - i] = 0; + } + tp = endp; + } + if (tp != endp) + return 0; + memcpy(dst, tmp, IN6ADDRSZ); + return 1; +} + +int +parse_ipv4_addr(const char *token, struct in_addr *ipv4, uint32_t *mask) +{ + char ip_str[256] = {0}; + char *pch; + + pch = strchr(token, '/'); + if (pch != NULL) { + strncpy(ip_str, token, pch - token); + pch += 1; + if (is_str_num(pch) != 0) + return -EINVAL; + if (mask) + *mask = atoi(pch); + } else { + strncpy(ip_str, token, sizeof(ip_str) - 1); + if (mask) + *mask = 0; + } + + if (strlen(ip_str) >= INET_ADDRSTRLEN) + return -EINVAL; + + if (inet_pton4(ip_str, (unsigned char *)ipv4) != 1) + return -EINVAL; + + return 0; +} + +int +parse_ipv6_addr(const char *token, struct in6_addr *ipv6, uint32_t *mask) +{ + char ip_str[256] = {0}; + char *pch; + + pch = strchr(token, '/'); + if (pch != NULL) { + strncpy(ip_str, token, pch - token); + pch += 1; + if (is_str_num(pch) != 0) + return -EINVAL; + if (mask) + *mask = atoi(pch); + } else { + strncpy(ip_str, token, sizeof(ip_str) - 1); + if (mask) + *mask = 0; + } + + if (strlen(ip_str) >= INET6_ADDRSTRLEN) + return -EINVAL; + + if (inet_pton6(ip_str, (unsigned char *)ipv6) != 1) + return -EINVAL; + + return 0; +} + +int +parse_range(const char *token, uint16_t *low, uint16_t *high) +{ + char ch; + char num_str[20]; + uint32_t pos; + int range_low = -1; + int range_high = -1; + + if (!low || !high) + return -1; + + memset(num_str, 0, 20); + pos = 0; + + while ((ch = *token++) != '\0') { + if (isdigit(ch)) { + if (pos >= 19) + return -1; + num_str[pos++] = ch; + } else if (ch == ':') { + if (range_low != -1) + return -1; + range_low = atoi(num_str); + memset(num_str, 0, 20); + pos = 0; + } + } + + if (strlen(num_str) == 0) + return -1; + + range_high = atoi(num_str); + + *low = (uint16_t)range_low; + *high = (uint16_t)range_high; + + return 0; +} + +/** sp add parse */ +struct cfg_sp_add_cfg_item { + cmdline_fixed_string_t sp_keyword; + cmdline_multi_string_t multi_string; +}; + +static void +cfg_sp_add_cfg_item_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, void *data) +{ + struct cfg_sp_add_cfg_item *params = parsed_result; + char *tokens[32]; + uint32_t n_tokens = RTE_DIM(tokens); + struct parse_status *status = (struct parse_status *)data; + + APP_CHECK((parse_tokenize_string(params->multi_string, tokens, + &n_tokens) == 0), status, "too many arguments"); + + if (status->status < 0) + return; + + if (strcmp(tokens[0], "ipv4") == 0) { + parse_sp4_tokens(tokens, n_tokens, status); + if (status->status < 0) + return; + } else if (strcmp(tokens[0], "ipv6") == 0) { + parse_sp6_tokens(tokens, n_tokens, status); + if (status->status < 0) + return; + } else { + APP_CHECK(0, status, "unrecognizable input %s\n", + tokens[0]); + return; + } +} + +static cmdline_parse_token_string_t cfg_sp_add_sp_str = + TOKEN_STRING_INITIALIZER(struct cfg_sp_add_cfg_item, + sp_keyword, "sp"); + +static cmdline_parse_token_string_t cfg_sp_add_multi_str = + TOKEN_STRING_INITIALIZER(struct cfg_sp_add_cfg_item, multi_string, + TOKEN_STRING_MULTI); + +cmdline_parse_inst_t cfg_sp_add_rule = { + .f = cfg_sp_add_cfg_item_parsed, + .data = NULL, + .help_str = "", + .tokens = { + (void *) &cfg_sp_add_sp_str, + (void *) &cfg_sp_add_multi_str, + NULL, + }, +}; + +/* sa add parse */ +struct cfg_sa_add_cfg_item { + cmdline_fixed_string_t sa_keyword; + cmdline_multi_string_t multi_string; +}; + +static void +cfg_sa_add_cfg_item_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, void *data) +{ + struct cfg_sa_add_cfg_item *params = parsed_result; + char *tokens[32]; + uint32_t n_tokens = RTE_DIM(tokens); + struct parse_status *status = (struct parse_status *)data; + + APP_CHECK(parse_tokenize_string(params->multi_string, tokens, + &n_tokens) == 0, status, "too many arguments\n"); + + parse_sa_tokens(tokens, n_tokens, status); +} + +static cmdline_parse_token_string_t cfg_sa_add_sa_str = + TOKEN_STRING_INITIALIZER(struct cfg_sa_add_cfg_item, + sa_keyword, "sa"); + +static cmdline_parse_token_string_t cfg_sa_add_multi_str = + TOKEN_STRING_INITIALIZER(struct cfg_sa_add_cfg_item, multi_string, + TOKEN_STRING_MULTI); + +cmdline_parse_inst_t cfg_sa_add_rule = { + .f = cfg_sa_add_cfg_item_parsed, + .data = NULL, + .help_str = "", + .tokens = { + (void *) &cfg_sa_add_sa_str, + (void *) &cfg_sa_add_multi_str, + NULL, + }, +}; + +/* rt add parse */ +struct cfg_rt_add_cfg_item { + cmdline_fixed_string_t rt_keyword; + cmdline_multi_string_t multi_string; +}; + +static void +cfg_rt_add_cfg_item_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, void *data) +{ + struct cfg_rt_add_cfg_item *params = parsed_result; + char *tokens[32]; + uint32_t n_tokens = RTE_DIM(tokens); + struct parse_status *status = (struct parse_status *)data; + + APP_CHECK(parse_tokenize_string( + params->multi_string, tokens, &n_tokens) == 0, + status, "too many arguments\n"); + if (status->status < 0) + return; + + parse_rt_tokens(tokens, n_tokens, status); +} + +static cmdline_parse_token_string_t cfg_rt_add_rt_str = + TOKEN_STRING_INITIALIZER(struct cfg_rt_add_cfg_item, + rt_keyword, "rt"); + +static cmdline_parse_token_string_t cfg_rt_add_multi_str = + TOKEN_STRING_INITIALIZER(struct cfg_rt_add_cfg_item, multi_string, + TOKEN_STRING_MULTI); + +cmdline_parse_inst_t cfg_rt_add_rule = { + .f = cfg_rt_add_cfg_item_parsed, + .data = NULL, + .help_str = "", + .tokens = { + (void *) &cfg_rt_add_rt_str, + (void *) &cfg_rt_add_multi_str, + NULL, + }, +}; + +/** set of cfg items */ +cmdline_parse_ctx_t ipsec_ctx[] = { + (cmdline_parse_inst_t *)&cfg_sp_add_rule, + (cmdline_parse_inst_t *)&cfg_sa_add_rule, + (cmdline_parse_inst_t *)&cfg_rt_add_rule, + NULL, +}; + +int +parse_cfg_file(const char *cfg_filename) +{ + struct cmdline *cl = cmdline_stdin_new(ipsec_ctx, ""); + FILE *f = fopen(cfg_filename, "r"); + char str[1024] = {0}, *get_s = NULL; + uint32_t line_num = 0; + struct parse_status status = {0}; + + if (f == NULL) { + rte_panic("Error: invalid file descriptor %s\n", cfg_filename); + goto error_exit; + } + + if (cl == NULL) { + rte_panic("Error: cannot create cmdline instance\n"); + goto error_exit; + } + + cfg_sp_add_rule.data = &status; + cfg_sa_add_rule.data = &status; + cfg_rt_add_rule.data = &status; + + do { + char oneline[1024]; + char *pos; + get_s = fgets(oneline, 1024, f); + + if (!get_s) + break; + + line_num++; + + if (strlen(oneline) > 1022) { + rte_panic("%s:%u: error: " + "the line contains more characters the parser can handle\n", + cfg_filename, line_num); + goto error_exit; + } + + /* process comment char '#' */ + if (oneline[0] == '#') + continue; + + pos = strchr(oneline, '#'); + if (pos != NULL) + *pos = '\0'; + + /* process line concatenator '\' */ + pos = strchr(oneline, 92); + if (pos != NULL) { + if (pos != oneline+strlen(oneline) - 2) { + rte_panic("%s:%u: error: " + "no character should exist after '\\'\n", + cfg_filename, line_num); + goto error_exit; + } + + *pos = '\0'; + + if (strlen(oneline) + strlen(str) > 1022) { + rte_panic("%s:%u: error: " + "the concatenated line contains more characters the parser can handle\n", + cfg_filename, line_num); + goto error_exit; + } + + strncpy(str + strlen(str), oneline, + strlen(oneline)); + + continue; + } + + /* copy the line to str and process */ + if (strlen(oneline) + strlen(str) > 1022) { + rte_panic("%s:%u: error: " + "the line contains more characters the parser can handle\n", + cfg_filename, line_num); + goto error_exit; + } + strncpy(str + strlen(str), oneline, + strlen(oneline)); + + str[strlen(str)] = '\n'; + if (cmdline_parse(cl, str) < 0) { + rte_panic("%s:%u: error: parsing \"%s\" failed\n", + cfg_filename, line_num, str); + goto error_exit; + } + + if (status.status < 0) { + rte_panic("%s:%u: error: %s", cfg_filename, + line_num, status.parse_msg); + goto error_exit; + } + + memset(str, 0, 1024); + } while (1); + + cmdline_stdin_exit(cl); + fclose(f); + + return 0; + +error_exit: + if (cl) + cmdline_stdin_exit(cl); + if (f) + fclose(f); + + return -1; +} diff --git a/examples/ipsec-secgw/parser.h b/examples/ipsec-secgw/parser.h new file mode 100644 index 00000000..d31ae016 --- /dev/null +++ b/examples/ipsec-secgw/parser.h @@ -0,0 +1,116 @@ +/* BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <netinet/in.h> +#include <netinet/ip.h> + +#ifndef __PARSER_H +#define __PARSER_H + +struct parse_status { + int status; + char parse_msg[256]; +}; + +#define APP_CHECK(exp, status, fmt, ...) \ +do { \ + if (!(exp)) { \ + sprintf(status->parse_msg, fmt "\n", \ + ## __VA_ARGS__); \ + status->status = -1; \ + } else \ + status->status = 0; \ +} while (0) + +#define APP_CHECK_PRESENCE(val, str, status) \ + APP_CHECK(val == 0, status, \ + "item \"%s\" already present", str) + +#define APP_CHECK_TOKEN_EQUAL(tokens, index, ref, status) \ + APP_CHECK(strcmp(tokens[index], ref) == 0, status, \ + "unrecognized input \"%s\": expect \"%s\"\n", \ + tokens[index], ref) + +static inline int +is_str_num(const char *str) +{ + uint32_t i; + + for (i = 0; i < strlen(str); i++) + if (!isdigit(str[i])) + return -1; + + return 0; +} + +#define APP_CHECK_TOKEN_IS_NUM(tokens, index, status) \ + APP_CHECK(is_str_num(tokens[index]) == 0, status, \ + "input \"%s\" is not valid number string", tokens[index]) + + +#define INCREMENT_TOKEN_INDEX(index, max_num, status) \ +do { \ + APP_CHECK(index + 1 < max_num, status, "reaching the end of " \ + "the token array"); \ + index++; \ +} while (0) + +int +parse_ipv4_addr(const char *token, struct in_addr *ipv4, uint32_t *mask); + +int +parse_ipv6_addr(const char *token, struct in6_addr *ipv6, uint32_t *mask); + +int +parse_range(const char *token, uint16_t *low, uint16_t *high); + +void +parse_sp4_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status); + +void +parse_sp6_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status); + +void +parse_sa_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status); + +void +parse_rt_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status); + +int +parse_cfg_file(const char *cfg_filename); + +#endif diff --git a/examples/ipsec-secgw/rt.c b/examples/ipsec-secgw/rt.c index fa5f0420..e03c5f0b 100644 --- a/examples/ipsec-secgw/rt.c +++ b/examples/ipsec-secgw/rt.c @@ -41,6 +41,7 @@ #include <rte_ip.h> #include "ipsec.h" +#include "parser.h" #define RT_IPV4_MAX_RULES 1024 #define RT_IPV6_MAX_RULES 1024 @@ -57,135 +58,106 @@ struct ip6_route { uint8_t if_out; }; -static struct ip4_route rt_ip4_ep0[] = { - /* Outbound */ - /* Tunnels */ - { IPv4(172, 16, 2, 5), 32, 0 }, - { IPv4(172, 16, 2, 6), 32, 1 }, - /* Transport */ - { IPv4(192, 168, 175, 0), 24, 0 }, - { IPv4(192, 168, 176, 0), 24, 1 }, - /* Bypass */ - { IPv4(192, 168, 240, 0), 24, 0 }, - { IPv4(192, 168, 241, 0), 24, 1 }, +struct ip4_route rt_ip4[RT_IPV4_MAX_RULES]; +uint32_t nb_rt_ip4; - /* Inbound */ - /* Tunnels */ - { IPv4(192, 168, 115, 0), 24, 2 }, - { IPv4(192, 168, 116, 0), 24, 3 }, - { IPv4(192, 168, 65, 0), 24, 2 }, - { IPv4(192, 168, 66, 0), 24, 3 }, - /* Transport */ - { IPv4(192, 168, 185, 0), 24, 2 }, - { IPv4(192, 168, 186, 0), 24, 3 }, - /* NULL */ - { IPv4(192, 168, 210, 0), 24, 2 }, - { IPv4(192, 168, 211, 0), 24, 3 }, - /* Bypass */ - { IPv4(192, 168, 245, 0), 24, 2 }, - { IPv4(192, 168, 246, 0), 24, 3 }, -}; - -static struct ip6_route rt_ip6_ep0[] = { - /* Outbound */ - /* Tunnels */ - { { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, - 0x22, 0x22, 0x22, 0x22, 0x22, 0x55, 0x55 }, 116, 0 }, - { { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, - 0x22, 0x22, 0x22, 0x22, 0x22, 0x66, 0x66 }, 116, 1 }, - /* Transport */ - { { 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 116, 0 }, - { { 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00 }, 116, 1 }, - /* Inbound */ - /* Tunnels */ - { { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, - 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00 }, 116, 2 }, - { { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbb, - 0xbb, 0xbb, 0xbb, 0x00, 0x00, 0x00, 0x00 }, 116, 3 }, - { { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, - 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00 }, 116, 2 }, - { { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, - 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00 }, 116, 3 }, - /* Transport */ - { { 0xff, 0xff, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 116, 2 }, - { { 0xff, 0xff, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00 }, 116, 3 }, -}; - -static struct ip4_route rt_ip4_ep1[] = { - /* Outbound */ - /* Tunnels */ - { IPv4(172, 16, 1, 5), 32, 0 }, - { IPv4(172, 16, 1, 6), 32, 1 }, - /* Transport */ - { IPv4(192, 168, 185, 0), 24, 0 }, - { IPv4(192, 168, 186, 0), 24, 1 }, - /* Bypass */ - { IPv4(192, 168, 245, 0), 24, 0 }, - { IPv4(192, 168, 246, 0), 24, 1 }, +struct ip6_route rt_ip6[RT_IPV4_MAX_RULES]; +uint32_t nb_rt_ip6; - /* Inbound */ - /* Tunnels */ - { IPv4(192, 168, 105, 0), 24, 2 }, - { IPv4(192, 168, 106, 0), 24, 3 }, - { IPv4(192, 168, 55, 0), 24, 2 }, - { IPv4(192, 168, 56, 0), 24, 3 }, - /* Transport */ - { IPv4(192, 168, 175, 0), 24, 2 }, - { IPv4(192, 168, 176, 0), 24, 3 }, - /* NULL */ - { IPv4(192, 168, 200, 0), 24, 2 }, - { IPv4(192, 168, 201, 0), 24, 3 }, - /* Bypass */ - { IPv4(192, 168, 240, 0), 24, 2 }, - { IPv4(192, 168, 241, 0), 24, 3 }, -}; +void +parse_rt_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status) +{ + uint32_t ti; + uint32_t *n_rts = NULL; + struct ip4_route *route_ipv4 = NULL; + struct ip6_route *route_ipv6 = NULL; + + if (strcmp(tokens[0], "ipv4") == 0) { + n_rts = &nb_rt_ip4; + route_ipv4 = &rt_ip4[*n_rts]; + + APP_CHECK(*n_rts <= RT_IPV4_MAX_RULES - 1, status, + "too many rt rules, abort insertion\n"); + if (status->status < 0) + return; + + } else if (strcmp(tokens[0], "ipv6") == 0) { + n_rts = &nb_rt_ip6; + route_ipv6 = &rt_ip6[*n_rts]; + + APP_CHECK(*n_rts <= RT_IPV6_MAX_RULES - 1, status, + "too many rt rules, abort insertion\n"); + if (status->status < 0) + return; + } else { + APP_CHECK(0, status, "unrecognized input \"%s\"", + tokens[0]); + return; + } -static struct ip6_route rt_ip6_ep1[] = { - /* Outbound */ - /* Tunnels */ - { { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x55, 0x55 }, 116, 0 }, - { { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x66, 0x66 }, 116, 1 }, - /* Transport */ - { { 0xff, 0xff, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 116, 0 }, - { { 0xff, 0xff, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00 }, 116, 1 }, + for (ti = 1; ti < n_tokens; ti++) { + if (strcmp(tokens[ti], "dst") == 0) { + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (route_ipv4 != NULL) { + struct in_addr ip; + uint32_t depth = 0; + + APP_CHECK(parse_ipv4_addr(tokens[ti], + &ip, &depth) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + route_ipv4->ip = rte_bswap32( + (uint32_t)ip.s_addr); + route_ipv4->depth = (uint8_t)depth; + } else { + struct in6_addr ip; + uint32_t depth; + + APP_CHECK(parse_ipv6_addr(tokens[ti], + &ip, &depth) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv6 address", + tokens[ti]); + if (status->status < 0) + return; + memcpy(route_ipv6->ip, ip.s6_addr, 16); + route_ipv6->depth = (uint8_t)depth; + } + } + + if (strcmp(tokens[ti], "port") == 0) { + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + if (route_ipv4 != NULL) + route_ipv4->if_out = atoi(tokens[ti]); + else + route_ipv6->if_out = atoi(tokens[ti]); + } + } - /* Inbound */ - /* Tunnels */ - { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, - 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00 }, 116, 2 }, - { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbb, - 0xbb, 0xbb, 0xbb, 0x00, 0x00, 0x00, 0x00 }, 116, 3 }, - { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, - 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00 }, 116, 2 }, - { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, - 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00 }, 116, 3 }, - /* Transport */ - { { 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 116, 2 }, - { { 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00 }, 116, 3 }, -}; + *n_rts = *n_rts + 1; +} void -rt_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep) +rt_init(struct socket_ctx *ctx, int32_t socket_id) { char name[PATH_MAX]; uint32_t i; int32_t ret; struct rte_lpm *lpm; struct rte_lpm6 *lpm6; - struct ip4_route *rt; - struct ip6_route *rt6; char a, b, c, d; - uint32_t nb_routes, nb_routes6; struct rte_lpm_config conf = { 0 }; struct rte_lpm6_config conf6 = { 0 }; @@ -200,23 +172,12 @@ rt_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep) rte_exit(EXIT_FAILURE, "IPv6 Routing Table for socket %u " "already initialized\n", socket_id); + if (nb_rt_ip4 == 0 && nb_rt_ip6 == 0) + RTE_LOG(WARNING, IPSEC, "No Routing rule specified\n"); + printf("Creating IPv4 Routing Table (RT) context with %u max routes\n", RT_IPV4_MAX_RULES); - if (ep == 0) { - rt = rt_ip4_ep0; - nb_routes = RTE_DIM(rt_ip4_ep0); - rt6 = rt_ip6_ep0; - nb_routes6 = RTE_DIM(rt_ip6_ep0); - } else if (ep == 1) { - rt = rt_ip4_ep1; - nb_routes = RTE_DIM(rt_ip4_ep1); - rt6 = rt_ip6_ep1; - nb_routes6 = RTE_DIM(rt_ip6_ep1); - } else - rte_exit(EXIT_FAILURE, "Invalid EP value %u. Only 0 or 1 " - "supported.\n", ep); - /* create the LPM table */ snprintf(name, sizeof(name), "%s_%u", "rt_ip4", socket_id); conf.max_rules = RT_IPV4_MAX_RULES; @@ -227,15 +188,17 @@ rt_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep) "on socket %d\n", name, socket_id); /* populate the LPM table */ - for (i = 0; i < nb_routes; i++) { - ret = rte_lpm_add(lpm, rt[i].ip, rt[i].depth, rt[i].if_out); + for (i = 0; i < nb_rt_ip4; i++) { + ret = rte_lpm_add(lpm, rt_ip4[i].ip, rt_ip4[i].depth, + rt_ip4[i].if_out); if (ret < 0) rte_exit(EXIT_FAILURE, "Fail to add entry num %u to %s " "LPM table on socket %d\n", i, name, socket_id); - uint32_t_to_char(rt[i].ip, &a, &b, &c, &d); + uint32_t_to_char(rt_ip4[i].ip, &a, &b, &c, &d); printf("LPM: Adding route %hhu.%hhu.%hhu.%hhu/%hhu (%hhu)\n", - a, b, c, d, rt[i].depth, rt[i].if_out); + a, b, c, d, rt_ip4[i].depth, + rt_ip4[i].if_out); } snprintf(name, sizeof(name), "%s_%u", "rt_ip6", socket_id); @@ -247,24 +210,24 @@ rt_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep) "on socket %d\n", name, socket_id); /* populate the LPM table */ - for (i = 0; i < nb_routes6; i++) { - ret = rte_lpm6_add(lpm6, rt6[i].ip, rt6[i].depth, - rt6[i].if_out); + for (i = 0; i < nb_rt_ip6; i++) { + ret = rte_lpm6_add(lpm6, rt_ip6[i].ip, rt_ip6[i].depth, + rt_ip6[i].if_out); if (ret < 0) rte_exit(EXIT_FAILURE, "Fail to add entry num %u to %s " "LPM table on socket %d\n", i, name, socket_id); printf("LPM6: Adding route " " %hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx/%hhx (%hhx)\n", - (uint16_t)((rt6[i].ip[0] << 8) | rt6[i].ip[1]), - (uint16_t)((rt6[i].ip[2] << 8) | rt6[i].ip[3]), - (uint16_t)((rt6[i].ip[4] << 8) | rt6[i].ip[5]), - (uint16_t)((rt6[i].ip[6] << 8) | rt6[i].ip[7]), - (uint16_t)((rt6[i].ip[8] << 8) | rt6[i].ip[9]), - (uint16_t)((rt6[i].ip[10] << 8) | rt6[i].ip[11]), - (uint16_t)((rt6[i].ip[12] << 8) | rt6[i].ip[13]), - (uint16_t)((rt6[i].ip[14] << 8) | rt6[i].ip[15]), - rt6[i].depth, rt6[i].if_out); + (uint16_t)((rt_ip6[i].ip[0] << 8) | rt_ip6[i].ip[1]), + (uint16_t)((rt_ip6[i].ip[2] << 8) | rt_ip6[i].ip[3]), + (uint16_t)((rt_ip6[i].ip[4] << 8) | rt_ip6[i].ip[5]), + (uint16_t)((rt_ip6[i].ip[6] << 8) | rt_ip6[i].ip[7]), + (uint16_t)((rt_ip6[i].ip[8] << 8) | rt_ip6[i].ip[9]), + (uint16_t)((rt_ip6[i].ip[10] << 8) | rt_ip6[i].ip[11]), + (uint16_t)((rt_ip6[i].ip[12] << 8) | rt_ip6[i].ip[13]), + (uint16_t)((rt_ip6[i].ip[14] << 8) | rt_ip6[i].ip[15]), + rt_ip6[i].depth, rt_ip6[i].if_out); } ctx->rt_ip4 = (struct rt_ctx *)lpm; diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c index 4439e0f5..8c4406cf 100644 --- a/examples/ipsec-secgw/sa.c +++ b/examples/ipsec-secgw/sa.c @@ -45,246 +45,501 @@ #include <rte_byteorder.h> #include <rte_errno.h> #include <rte_ip.h> +#include <rte_random.h> #include "ipsec.h" #include "esp.h" +#include "parser.h" + +struct supported_cipher_algo { + const char *keyword; + enum rte_crypto_cipher_algorithm algo; + uint16_t iv_len; + uint16_t block_size; + uint16_t key_len; +}; -/* SAs Outbound */ -const struct ipsec_sa sa_out[] = { - { - .spi = 5, - .src.ip.ip4 = IPv4(172, 16, 1, 5), - .dst.ip.ip4 = IPv4(172, 16, 2, 5), - .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC, - .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .digest_len = 12, - .iv_len = 16, - .block_size = 16, - .flags = IP4_TUNNEL - }, - { - .spi = 6, - .src.ip.ip4 = IPv4(172, 16, 1, 6), - .dst.ip.ip4 = IPv4(172, 16, 2, 6), - .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC, - .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .digest_len = 12, - .iv_len = 16, - .block_size = 16, - .flags = IP4_TUNNEL - }, - { - .spi = 10, - .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC, - .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .digest_len = 12, - .iv_len = 16, - .block_size = 16, - .flags = TRANSPORT - }, - { - .spi = 11, - .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC, - .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .digest_len = 12, - .iv_len = 16, - .block_size = 16, - .flags = TRANSPORT - }, - { - .spi = 15, - .src.ip.ip4 = IPv4(172, 16, 1, 5), - .dst.ip.ip4 = IPv4(172, 16, 2, 5), - .cipher_algo = RTE_CRYPTO_CIPHER_NULL, - .auth_algo = RTE_CRYPTO_AUTH_NULL, - .digest_len = 0, - .iv_len = 0, - .block_size = 4, - .flags = IP4_TUNNEL - }, - { - .spi = 16, - .src.ip.ip4 = IPv4(172, 16, 1, 6), - .dst.ip.ip4 = IPv4(172, 16, 2, 6), - .cipher_algo = RTE_CRYPTO_CIPHER_NULL, - .auth_algo = RTE_CRYPTO_AUTH_NULL, - .digest_len = 0, - .iv_len = 0, - .block_size = 4, - .flags = IP4_TUNNEL - }, - { - .spi = 25, - .src.ip.ip6.ip6_b = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x55, 0x55 }, - .dst.ip.ip6.ip6_b = { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, - 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x55, 0x55 }, - .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC, - .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .digest_len = 12, - .iv_len = 16, - .block_size = 16, - .flags = IP6_TUNNEL - }, - { - .spi = 26, - .src.ip.ip6.ip6_b = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x66, 0x66 }, - .dst.ip.ip6.ip6_b = { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, - 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x66, 0x66 }, - .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC, - .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .digest_len = 12, - .iv_len = 16, - .block_size = 16, - .flags = IP6_TUNNEL - }, +struct supported_auth_algo { + const char *keyword; + enum rte_crypto_auth_algorithm algo; + uint16_t digest_len; + uint16_t key_len; + uint8_t aad_len; + uint8_t key_not_req; }; -/* SAs Inbound */ -const struct ipsec_sa sa_in[] = { - { - .spi = 105, - .src.ip.ip4 = IPv4(172, 16, 2, 5), - .dst.ip.ip4 = IPv4(172, 16, 1, 5), - .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC, - .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .digest_len = 12, - .iv_len = 16, - .block_size = 16, - .flags = IP4_TUNNEL - }, +const struct supported_cipher_algo cipher_algos[] = { { - .spi = 106, - .src.ip.ip4 = IPv4(172, 16, 2, 6), - .dst.ip.ip4 = IPv4(172, 16, 1, 6), - .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC, - .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .digest_len = 12, - .iv_len = 16, - .block_size = 16, - .flags = IP4_TUNNEL + .keyword = "null", + .algo = RTE_CRYPTO_CIPHER_NULL, + .iv_len = 0, + .block_size = 4, + .key_len = 0 }, { - .spi = 110, - .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC, - .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .digest_len = 12, - .iv_len = 16, - .block_size = 16, - .flags = TRANSPORT + .keyword = "aes-128-cbc", + .algo = RTE_CRYPTO_CIPHER_AES_CBC, + .iv_len = 16, + .block_size = 16, + .key_len = 16 }, { - .spi = 111, - .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC, - .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .digest_len = 12, - .iv_len = 16, - .block_size = 16, - .flags = TRANSPORT + .keyword = "aes-128-gcm", + .algo = RTE_CRYPTO_CIPHER_AES_GCM, + .iv_len = 8, + .block_size = 4, + .key_len = 20 }, { - .spi = 115, - .src.ip.ip4 = IPv4(172, 16, 2, 5), - .dst.ip.ip4 = IPv4(172, 16, 1, 5), - .cipher_algo = RTE_CRYPTO_CIPHER_NULL, - .auth_algo = RTE_CRYPTO_AUTH_NULL, - .digest_len = 0, - .iv_len = 0, - .block_size = 4, - .flags = IP4_TUNNEL - }, + .keyword = "aes-128-ctr", + .algo = RTE_CRYPTO_CIPHER_AES_CTR, + .iv_len = 8, + .block_size = 16, /* XXX AESNI MB limition, should be 4 */ + .key_len = 20 + } +}; + +const struct supported_auth_algo auth_algos[] = { { - .spi = 116, - .src.ip.ip4 = IPv4(172, 16, 2, 6), - .dst.ip.ip4 = IPv4(172, 16, 1, 6), - .cipher_algo = RTE_CRYPTO_CIPHER_NULL, - .auth_algo = RTE_CRYPTO_AUTH_NULL, - .digest_len = 0, - .iv_len = 0, - .block_size = 4, - .flags = IP4_TUNNEL + .keyword = "null", + .algo = RTE_CRYPTO_AUTH_NULL, + .digest_len = 0, + .key_len = 0, + .key_not_req = 1 }, { - .spi = 125, - .src.ip.ip6.ip6_b = { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, - 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x55, 0x55 }, - .dst.ip.ip6.ip6_b = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x55, 0x55 }, - .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC, - .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .digest_len = 12, - .iv_len = 16, - .block_size = 16, - .flags = IP6_TUNNEL + .keyword = "sha1-hmac", + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, + .digest_len = 12, + .key_len = 20 }, { - .spi = 126, - .src.ip.ip6.ip6_b = { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, - 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x66, 0x66 }, - .dst.ip.ip6.ip6_b = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x66, 0x66 }, - .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC, - .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .digest_len = 12, - .iv_len = 16, - .block_size = 16, - .flags = IP6_TUNNEL - }, + .keyword = "aes-128-gcm", + .algo = RTE_CRYPTO_AUTH_AES_GCM, + .digest_len = 16, + .aad_len = 8, + .key_not_req = 1 + } }; -static uint8_t cipher_key[256] = "sixteenbytes key"; +struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES]; +uint32_t nb_sa_out; + +struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES]; +uint32_t nb_sa_in; + +static const struct supported_cipher_algo * +find_match_cipher_algo(const char *cipher_keyword) +{ + size_t i; -/* AES CBC xform */ -const struct rte_crypto_sym_xform aescbc_enc_xf = { - NULL, - RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { RTE_CRYPTO_CIPHER_OP_ENCRYPT, RTE_CRYPTO_CIPHER_AES_CBC, - .key = { cipher_key, 16 } } + for (i = 0; i < RTE_DIM(cipher_algos); i++) { + const struct supported_cipher_algo *algo = + &cipher_algos[i]; + + if (strcmp(cipher_keyword, algo->keyword) == 0) + return algo; } -}; -const struct rte_crypto_sym_xform aescbc_dec_xf = { - NULL, - RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { RTE_CRYPTO_CIPHER_OP_DECRYPT, RTE_CRYPTO_CIPHER_AES_CBC, - .key = { cipher_key, 16 } } + return NULL; +} + +static const struct supported_auth_algo * +find_match_auth_algo(const char *auth_keyword) +{ + size_t i; + + for (i = 0; i < RTE_DIM(auth_algos); i++) { + const struct supported_auth_algo *algo = + &auth_algos[i]; + + if (strcmp(auth_keyword, algo->keyword) == 0) + return algo; } -}; -static uint8_t auth_key[256] = "twentybytes hash key"; + return NULL; +} -/* SHA1 HMAC xform */ -const struct rte_crypto_sym_xform sha1hmac_gen_xf = { - NULL, - RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { RTE_CRYPTO_AUTH_OP_GENERATE, RTE_CRYPTO_AUTH_SHA1_HMAC, - .key = { auth_key, 20 }, 12, 0 } +/** parse_key_string + * parse x:x:x:x.... hex number key string into uint8_t *key + * return: + * > 0: number of bytes parsed + * 0: failed + */ +static uint32_t +parse_key_string(const char *key_str, uint8_t *key) +{ + const char *pt_start = key_str, *pt_end = key_str; + uint32_t nb_bytes = 0; + + while (pt_end != NULL) { + char sub_str[3] = {0}; + + pt_end = strchr(pt_start, ':'); + + if (pt_end == NULL) { + if (strlen(pt_start) > 2) + return 0; + strncpy(sub_str, pt_start, 2); + } else { + if (pt_end - pt_start > 2) + return 0; + + strncpy(sub_str, pt_start, pt_end - pt_start); + pt_start = pt_end + 1; + } + + key[nb_bytes++] = strtol(sub_str, NULL, 16); } -}; -const struct rte_crypto_sym_xform sha1hmac_verify_xf = { - NULL, - RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { RTE_CRYPTO_AUTH_OP_VERIFY, RTE_CRYPTO_AUTH_SHA1_HMAC, - .key = { auth_key, 20 }, 12, 0 } + return nb_bytes; +} + +void +parse_sa_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status) +{ + struct ipsec_sa *rule = NULL; + uint32_t ti; /*token index*/ + uint32_t *ri /*rule index*/; + uint32_t cipher_algo_p = 0; + uint32_t auth_algo_p = 0; + uint32_t src_p = 0; + uint32_t dst_p = 0; + uint32_t mode_p = 0; + + if (strcmp(tokens[0], "in") == 0) { + ri = &nb_sa_in; + + APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status, + "too many sa rules, abort insertion\n"); + if (status->status < 0) + return; + + rule = &sa_in[*ri]; + } else { + ri = &nb_sa_out; + + APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status, + "too many sa rules, abort insertion\n"); + if (status->status < 0) + return; + + rule = &sa_out[*ri]; } -}; -/* AES CBC xform */ -const struct rte_crypto_sym_xform null_cipher_xf = { - NULL, - RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { .algo = RTE_CRYPTO_CIPHER_NULL } + /* spi number */ + APP_CHECK_TOKEN_IS_NUM(tokens, 1, status); + if (status->status < 0) + return; + rule->spi = atoi(tokens[1]); + + for (ti = 2; ti < n_tokens; ti++) { + if (strcmp(tokens[ti], "mode") == 0) { + APP_CHECK_PRESENCE(mode_p, tokens[ti], status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (strcmp(tokens[ti], "ipv4-tunnel") == 0) + rule->flags = IP4_TUNNEL; + else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) + rule->flags = IP6_TUNNEL; + else if (strcmp(tokens[ti], "transport") == 0) + rule->flags = TRANSPORT; + else { + APP_CHECK(0, status, "unrecognized " + "input \"%s\"", tokens[ti]); + return; + } + + mode_p = 1; + continue; + } + + if (strcmp(tokens[ti], "cipher_algo") == 0) { + const struct supported_cipher_algo *algo; + uint32_t key_len; + + APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti], + status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + algo = find_match_cipher_algo(tokens[ti]); + + APP_CHECK(algo != NULL, status, "unrecognized " + "input \"%s\"", tokens[ti]); + + rule->cipher_algo = algo->algo; + rule->block_size = algo->block_size; + rule->iv_len = algo->iv_len; + rule->cipher_key_len = algo->key_len; + + /* for NULL algorithm, no cipher key required */ + if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { + cipher_algo_p = 1; + continue; + } + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0, + status, "unrecognized input \"%s\", " + "expect \"cipher_key\"", tokens[ti]); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + key_len = parse_key_string(tokens[ti], + rule->cipher_key); + APP_CHECK(key_len == rule->cipher_key_len, status, + "unrecognized input \"%s\"", tokens[ti]); + if (status->status < 0) + return; + + if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC) + rule->salt = (uint32_t)rte_rand(); + + if ((algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) || + (algo->algo == RTE_CRYPTO_CIPHER_AES_GCM)) { + key_len -= 4; + rule->cipher_key_len = key_len; + memcpy(&rule->salt, + &rule->cipher_key[key_len], 4); + } + + cipher_algo_p = 1; + continue; + } + + if (strcmp(tokens[ti], "auth_algo") == 0) { + const struct supported_auth_algo *algo; + uint32_t key_len; + + APP_CHECK_PRESENCE(auth_algo_p, tokens[ti], + status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + algo = find_match_auth_algo(tokens[ti]); + APP_CHECK(algo != NULL, status, "unrecognized " + "input \"%s\"", tokens[ti]); + + rule->auth_algo = algo->algo; + rule->auth_key_len = algo->key_len; + rule->digest_len = algo->digest_len; + rule->aad_len = algo->key_len; + + /* NULL algorithm and combined algos do not + * require auth key + */ + if (algo->key_not_req) { + auth_algo_p = 1; + continue; + } + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(strcmp(tokens[ti], "auth_key") == 0, + status, "unrecognized input \"%s\", " + "expect \"auth_key\"", tokens[ti]); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + key_len = parse_key_string(tokens[ti], + rule->auth_key); + APP_CHECK(key_len == rule->auth_key_len, status, + "unrecognized input \"%s\"", tokens[ti]); + if (status->status < 0) + return; + + auth_algo_p = 1; + continue; + } + + if (strcmp(tokens[ti], "src") == 0) { + APP_CHECK_PRESENCE(src_p, tokens[ti], status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (rule->flags == IP4_TUNNEL) { + struct in_addr ip; + + APP_CHECK(parse_ipv4_addr(tokens[ti], + &ip, NULL) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + rule->src.ip.ip4 = rte_bswap32( + (uint32_t)ip.s_addr); + } else if (rule->flags == IP6_TUNNEL) { + struct in6_addr ip; + + APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, + NULL) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv6 addr", + tokens[ti]); + if (status->status < 0) + return; + memcpy(rule->src.ip.ip6.ip6_b, + ip.s6_addr, 16); + } else if (rule->flags == TRANSPORT) { + APP_CHECK(0, status, "unrecognized input " + "\"%s\"", tokens[ti]); + return; + } + + src_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dst") == 0) { + APP_CHECK_PRESENCE(dst_p, tokens[ti], status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (rule->flags == IP4_TUNNEL) { + struct in_addr ip; + + APP_CHECK(parse_ipv4_addr(tokens[ti], + &ip, NULL) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + rule->dst.ip.ip4 = rte_bswap32( + (uint32_t)ip.s_addr); + } else if (rule->flags == IP6_TUNNEL) { + struct in6_addr ip; + + APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, + NULL) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv6 addr", + tokens[ti]); + if (status->status < 0) + return; + memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16); + } else if (rule->flags == TRANSPORT) { + APP_CHECK(0, status, "unrecognized " + "input \"%s\"", tokens[ti]); + return; + } + + dst_p = 1; + continue; + } + + /* unrecognizeable input */ + APP_CHECK(0, status, "unrecognized input \"%s\"", + tokens[ti]); + return; } -}; -const struct rte_crypto_sym_xform null_auth_xf = { - NULL, - RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { .algo = RTE_CRYPTO_AUTH_NULL } + APP_CHECK(cipher_algo_p == 1, status, "missing cipher options"); + if (status->status < 0) + return; + + APP_CHECK(auth_algo_p == 1, status, "missing auth options"); + if (status->status < 0) + return; + + APP_CHECK(mode_p == 1, status, "missing mode option"); + if (status->status < 0) + return; + + *ri = *ri + 1; +} + +static inline void +print_one_sa_rule(const struct ipsec_sa *sa, int inbound) +{ + uint32_t i; + uint8_t a, b, c, d; + + printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi); + + for (i = 0; i < RTE_DIM(cipher_algos); i++) { + if (cipher_algos[i].algo == sa->cipher_algo) { + printf("%s ", cipher_algos[i].keyword); + break; + } } -}; + + for (i = 0; i < RTE_DIM(auth_algos); i++) { + if (auth_algos[i].algo == sa->auth_algo) { + printf("%s ", auth_algos[i].keyword); + break; + } + } + + printf("mode:"); + + switch (sa->flags) { + case IP4_TUNNEL: + printf("IP4Tunnel "); + uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d); + printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a); + uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d); + printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a); + break; + case IP6_TUNNEL: + printf("IP6Tunnel "); + for (i = 0; i < 16; i++) { + if (i % 2 && i != 15) + printf("%.2x:", sa->src.ip.ip6.ip6_b[i]); + else + printf("%.2x", sa->src.ip.ip6.ip6_b[i]); + } + printf(" "); + for (i = 0; i < 16; i++) { + if (i % 2 && i != 15) + printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]); + else + printf("%.2x", sa->dst.ip.ip6.ip6_b[i]); + } + break; + case TRANSPORT: + printf("Transport"); + break; + } + printf("\n"); +} struct sa_ctx { struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES]; @@ -347,25 +602,55 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], } if (inbound) { - if (sa->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { - sa_ctx->xf[idx].a = null_auth_xf; - sa_ctx->xf[idx].b = null_cipher_xf; - } else { - sa_ctx->xf[idx].a = sha1hmac_verify_xf; - sa_ctx->xf[idx].b = aescbc_dec_xf; - } + sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo; + sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key; + sa_ctx->xf[idx].b.cipher.key.length = + sa->cipher_key_len; + sa_ctx->xf[idx].b.cipher.op = + RTE_CRYPTO_CIPHER_OP_DECRYPT; + sa_ctx->xf[idx].b.next = NULL; + + sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH; + sa_ctx->xf[idx].a.auth.algo = sa->auth_algo; + sa_ctx->xf[idx].a.auth.add_auth_data_length = + sa->aad_len; + sa_ctx->xf[idx].a.auth.key.data = sa->auth_key; + sa_ctx->xf[idx].a.auth.key.length = + sa->auth_key_len; + sa_ctx->xf[idx].a.auth.digest_length = + sa->digest_len; + sa_ctx->xf[idx].a.auth.op = + RTE_CRYPTO_AUTH_OP_VERIFY; + } else { /* outbound */ - if (sa->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { - sa_ctx->xf[idx].a = null_cipher_xf; - sa_ctx->xf[idx].b = null_auth_xf; - } else { - sa_ctx->xf[idx].a = aescbc_enc_xf; - sa_ctx->xf[idx].b = sha1hmac_gen_xf; - } + sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo; + sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key; + sa_ctx->xf[idx].a.cipher.key.length = + sa->cipher_key_len; + sa_ctx->xf[idx].a.cipher.op = + RTE_CRYPTO_CIPHER_OP_ENCRYPT; + sa_ctx->xf[idx].a.next = NULL; + + sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH; + sa_ctx->xf[idx].b.auth.algo = sa->auth_algo; + sa_ctx->xf[idx].b.auth.add_auth_data_length = + sa->aad_len; + sa_ctx->xf[idx].b.auth.key.data = sa->auth_key; + sa_ctx->xf[idx].b.auth.key.length = + sa->auth_key_len; + sa_ctx->xf[idx].b.auth.digest_length = + sa->digest_len; + sa_ctx->xf[idx].b.auth.op = + RTE_CRYPTO_AUTH_OP_GENERATE; } + sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b; sa_ctx->xf[idx].b.next = NULL; sa->xforms = &sa_ctx->xf[idx].a; + + print_one_sa_rule(sa, inbound); } return 0; @@ -386,10 +671,8 @@ sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], } void -sa_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep) +sa_init(struct socket_ctx *ctx, int32_t socket_id) { - const struct ipsec_sa *sa_out_entries, *sa_in_entries; - uint32_t nb_out_entries, nb_in_entries; const char *name; if (ctx == NULL) @@ -403,35 +686,30 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep) rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already " "initialized\n", socket_id); - if (ep == 0) { - sa_out_entries = sa_out; - nb_out_entries = RTE_DIM(sa_out); - sa_in_entries = sa_in; - nb_in_entries = RTE_DIM(sa_in); - } else if (ep == 1) { - sa_out_entries = sa_in; - nb_out_entries = RTE_DIM(sa_in); - sa_in_entries = sa_out; - nb_in_entries = RTE_DIM(sa_out); - } else - rte_exit(EXIT_FAILURE, "Invalid EP value %u. " - "Only 0 or 1 supported.\n", ep); + if (nb_sa_in > 0) { + name = "sa_in"; + ctx->sa_in = sa_create(name, socket_id); + if (ctx->sa_in == NULL) + rte_exit(EXIT_FAILURE, "Error [%d] creating SA " + "context %s in socket %d\n", rte_errno, + name, socket_id); - name = "sa_in"; - ctx->sa_in = sa_create(name, socket_id); - if (ctx->sa_in == NULL) - rte_exit(EXIT_FAILURE, "Error [%d] creating SA context %s " - "in socket %d\n", rte_errno, name, socket_id); - - name = "sa_out"; - ctx->sa_out = sa_create(name, socket_id); - if (ctx->sa_out == NULL) - rte_exit(EXIT_FAILURE, "Error [%d] creating SA context %s " - "in socket %d\n", rte_errno, name, socket_id); + sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in); + } else + RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n"); - sa_in_add_rules(ctx->sa_in, sa_in_entries, nb_in_entries); + if (nb_sa_out > 0) { + name = "sa_out"; + ctx->sa_out = sa_create(name, socket_id); + if (ctx->sa_out == NULL) + rte_exit(EXIT_FAILURE, "Error [%d] creating SA " + "context %s in socket %d\n", rte_errno, + name, socket_id); - sa_out_add_rules(ctx->sa_out, sa_out_entries, nb_out_entries); + sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out); + } else + RTE_LOG(WARNING, IPSEC, "No SA Outbound rule " + "specified\n"); } int diff --git a/examples/ipsec-secgw/sp4.c b/examples/ipsec-secgw/sp4.c index 9c4b256b..38c72a92 100644 --- a/examples/ipsec-secgw/sp4.c +++ b/examples/ipsec-secgw/sp4.c @@ -42,8 +42,9 @@ #include <rte_ip.h> #include "ipsec.h" +#include "parser.h" -#define MAX_ACL_RULE_NUM 1000 +#define MAX_ACL_RULE_NUM 1024 /* * Rule and trace formats definitions. @@ -113,211 +114,306 @@ struct rte_acl_field_def ip4_defs[NUM_FIELDS_IPV4] = { RTE_ACL_RULE_DEF(acl4_rules, RTE_DIM(ip4_defs)); -const struct acl4_rules acl4_rules_out[] = { - { - .data = {.userdata = PROTECT(5), .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 105, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 106, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(10), .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 175, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(11), .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 176, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(15), .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 200, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(16), .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 201, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(25), .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 55, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(26), .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 56, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = BYPASS, .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 240, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = BYPASS, .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 241, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} +struct acl4_rules acl4_rules_out[MAX_ACL_RULE_NUM]; +uint32_t nb_acl4_rules_out; + +struct acl4_rules acl4_rules_in[MAX_ACL_RULE_NUM]; +uint32_t nb_acl4_rules_in; + +void +parse_sp4_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status) +{ + struct acl4_rules *rule_ipv4 = NULL; + + uint32_t *ri = NULL; /* rule index */ + uint32_t ti = 0; /* token index */ + + uint32_t esp_p = 0; + uint32_t protect_p = 0; + uint32_t bypass_p = 0; + uint32_t discard_p = 0; + uint32_t pri_p = 0; + uint32_t src_p = 0; + uint32_t dst_p = 0; + uint32_t proto_p = 0; + uint32_t sport_p = 0; + uint32_t dport_p = 0; + + if (strcmp(tokens[1], "in") == 0) { + ri = &nb_acl4_rules_in; + + APP_CHECK(*ri <= MAX_ACL_RULE_NUM - 1, status, + "too many sp rules, abort insertion\n"); + if (status->status < 0) + return; + + rule_ipv4 = &acl4_rules_in[*ri]; + + } else if (strcmp(tokens[1], "out") == 0) { + ri = &nb_acl4_rules_out; + + APP_CHECK(*ri <= MAX_ACL_RULE_NUM - 1, status, + "too many sp rules, abort insertion\n"); + if (status->status < 0) + return; + + rule_ipv4 = &acl4_rules_out[*ri]; + } else { + APP_CHECK(0, status, "unrecognized input \"%s\", expect" + " \"in\" or \"out\"\n", tokens[ti]); + return; } -}; -const struct acl4_rules acl4_rules_in[] = { - { - .data = {.userdata = PROTECT(105), .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 115, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(106), .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 116, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(110), .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 185, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(111), .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 186, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(115), .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 210, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(116), .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 211, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(125), .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 65, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(126), .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 66, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = BYPASS, .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 245, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = BYPASS, .category_mask = 1, .priority = 1}, - /* destination IPv4 */ - .field[2] = {.value.u32 = IPv4(192, 168, 246, 0), - .mask_range.u32 = 24,}, - /* source port */ - .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} + rule_ipv4->data.category_mask = 1; + + for (ti = 2; ti < n_tokens; ti++) { + if (strcmp(tokens[ti], "esp") == 0) { + /* currently do nothing */ + APP_CHECK_PRESENCE(esp_p, tokens[ti], status); + if (status->status < 0) + return; + esp_p = 1; + continue; + } + + if (strcmp(tokens[ti], "protect") == 0) { + APP_CHECK_PRESENCE(protect_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(bypass_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "bypass"); + if (status->status < 0) + return; + APP_CHECK(discard_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + + rule_ipv4->data.userdata = + PROTECT(atoi(tokens[ti])); + + protect_p = 1; + continue; + } + + if (strcmp(tokens[ti], "bypass") == 0) { + APP_CHECK_PRESENCE(bypass_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(protect_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "protect"); + if (status->status < 0) + return; + APP_CHECK(discard_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + + rule_ipv4->data.userdata = BYPASS; + + bypass_p = 1; + continue; + } + + if (strcmp(tokens[ti], "discard") == 0) { + APP_CHECK_PRESENCE(discard_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(protect_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "protect"); + if (status->status < 0) + return; + APP_CHECK(bypass_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + + rule_ipv4->data.userdata = DISCARD; + + discard_p = 1; + continue; + } + + if (strcmp(tokens[ti], "pri") == 0) { + APP_CHECK_PRESENCE(pri_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + + rule_ipv4->data.priority = atoi(tokens[ti]); + + pri_p = 1; + continue; + } + + if (strcmp(tokens[ti], "src") == 0) { + struct in_addr ip; + uint32_t depth; + + APP_CHECK_PRESENCE(src_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_ipv4_addr(tokens[ti], &ip, + &depth) == 0, status, "unrecognized " + "input \"%s\", expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + + rule_ipv4->field[1].value.u32 = + rte_bswap32(ip.s_addr); + rule_ipv4->field[1].mask_range.u32 = + depth; + + src_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dst") == 0) { + struct in_addr ip; + uint32_t depth; + + APP_CHECK_PRESENCE(dst_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK(parse_ipv4_addr(tokens[ti], &ip, + &depth) == 0, status, "unrecognized " + "input \"%s\", expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + + rule_ipv4->field[2].value.u32 = + rte_bswap32(ip.s_addr); + rule_ipv4->field[2].mask_range.u32 = + depth; + + dst_p = 1; + continue; + } + + if (strcmp(tokens[ti], "proto") == 0) { + uint16_t low, high; + + APP_CHECK_PRESENCE(proto_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &low, &high) + == 0, status, "unrecognized input \"%s\"" + ", expect \"from:to\"", tokens[ti]); + if (status->status < 0) + return; + APP_CHECK(low <= 0xff, status, "proto low " + "over-limit"); + if (status->status < 0) + return; + APP_CHECK(high <= 0xff, status, "proto high " + "over-limit"); + if (status->status < 0) + return; + + rule_ipv4->field[0].value.u8 = (uint8_t)low; + rule_ipv4->field[0].mask_range.u8 = (uint8_t)high; + + proto_p = 1; + continue; + } + + if (strcmp(tokens[ti], "sport") == 0) { + uint16_t port_low, port_high; + + APP_CHECK_PRESENCE(sport_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &port_low, + &port_high) == 0, status, "unrecognized " + "input \"%s\", expect \"port_from:" + "port_to\"", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv4->field[3].value.u16 = port_low; + rule_ipv4->field[3].mask_range.u16 = port_high; + + sport_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dport") == 0) { + uint16_t port_low, port_high; + + APP_CHECK_PRESENCE(dport_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &port_low, + &port_high) == 0, status, "unrecognized " + "input \"%s\", expect \"port_from:" + "port_to\"", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv4->field[4].value.u16 = port_low; + rule_ipv4->field[4].mask_range.u16 = port_high; + + dport_p = 1; + continue; + } + + /* unrecognizeable input */ + APP_CHECK(0, status, "unrecognized input \"%s\"", + tokens[ti]); + return; } -}; + + /* check if argument(s) are missing */ + APP_CHECK(esp_p == 1, status, "missing argument \"esp\""); + if (status->status < 0) + return; + + APP_CHECK(protect_p | bypass_p | discard_p, status, "missing " + "argument \"protect\", \"bypass\", or \"discard\""); + if (status->status < 0) + return; + + *ri = *ri + 1; +} static void print_one_ip4_rule(const struct acl4_rules *rule, int32_t extra) @@ -406,11 +502,9 @@ acl4_init(const char *name, int32_t socketid, const struct acl4_rules *rules, } void -sp4_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep) +sp4_init(struct socket_ctx *ctx, int32_t socket_id) { const char *name; - const struct acl4_rules *rules_out, *rules_in; - uint32_t nb_out_rules, nb_in_rules; if (ctx == NULL) rte_exit(EXIT_FAILURE, "NULL context.\n"); @@ -423,25 +517,19 @@ sp4_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep) rte_exit(EXIT_FAILURE, "Outbound SP DB for socket %u already " "initialized\n", socket_id); - if (ep == 0) { - rules_out = acl4_rules_out; - nb_out_rules = RTE_DIM(acl4_rules_out); - rules_in = acl4_rules_in; - nb_in_rules = RTE_DIM(acl4_rules_in); - } else if (ep == 1) { - rules_out = acl4_rules_in; - nb_out_rules = RTE_DIM(acl4_rules_in); - rules_in = acl4_rules_out; - nb_in_rules = RTE_DIM(acl4_rules_out); + if (nb_acl4_rules_in > 0) { + name = "sp_ip4_in"; + ctx->sp_ip4_in = (struct sp_ctx *)acl4_init(name, + socket_id, acl4_rules_in, nb_acl4_rules_in); } else - rte_exit(EXIT_FAILURE, "Invalid EP value %u. " - "Only 0 or 1 supported.\n", ep); - - name = "sp_ip4_in"; - ctx->sp_ip4_in = (struct sp_ctx *)acl4_init(name, socket_id, - rules_in, nb_in_rules); + RTE_LOG(WARNING, IPSEC, "No IPv4 SP Inbound rule " + "specified\n"); - name = "sp_ip4_out"; - ctx->sp_ip4_out = (struct sp_ctx *)acl4_init(name, socket_id, - rules_out, nb_out_rules); + if (nb_acl4_rules_out > 0) { + name = "sp_ip4_out"; + ctx->sp_ip4_out = (struct sp_ctx *)acl4_init(name, + socket_id, acl4_rules_out, nb_acl4_rules_out); + } else + RTE_LOG(WARNING, IPSEC, "No IPv4 SP Outbound rule " + "specified\n"); } diff --git a/examples/ipsec-secgw/sp6.c b/examples/ipsec-secgw/sp6.c index 1dda11a4..62fb492c 100644 --- a/examples/ipsec-secgw/sp6.c +++ b/examples/ipsec-secgw/sp6.c @@ -42,8 +42,9 @@ #include <rte_ip.h> #include "ipsec.h" +#include "parser.h" -#define MAX_ACL_RULE_NUM 1000 +#define MAX_ACL_RULE_NUM 1024 enum { IP6_PROTO, @@ -144,155 +145,363 @@ struct rte_acl_field_def ip6_defs[IP6_NUM] = { RTE_ACL_RULE_DEF(acl6_rules, RTE_DIM(ip6_defs)); -const struct acl6_rules acl6_rules_out[] = { - { - .data = {.userdata = PROTECT(5), .category_mask = 1, .priority = 1}, - /* destination IPv6 */ - .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,}, - .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,}, - .field[7] = {.value.u32 = 0x55555555, .mask_range.u32 = 32,}, - .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,}, - /* source port */ - .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 1}, - /* destination IPv6 */ - .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,}, - .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,}, - .field[7] = {.value.u32 = 0x66666666, .mask_range.u32 = 32,}, - .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,}, - /* source port */ - .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(10), .category_mask = 1, .priority = 1}, - /* destination IPv6 */ - .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,}, - .field[6] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,}, - .field[7] = {.value.u32 = 0x00000000, .mask_range.u32 = 32,}, - .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,}, - /* source port */ - .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(11), .category_mask = 1, .priority = 1}, - /* destination IPv6 */ - .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,}, - .field[6] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,}, - .field[7] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,}, - .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,}, - /* source port */ - .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(25), .category_mask = 1, .priority = 1}, - /* destination IPv6 */ - .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,}, - .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,}, - .field[7] = {.value.u32 = 0xaaaaaaaa, .mask_range.u32 = 32,}, - .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,}, - /* source port */ - .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(26), .category_mask = 1, .priority = 1}, - /* destination IPv6 */ - .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,}, - .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,}, - .field[7] = {.value.u32 = 0xbbbbbbbb, .mask_range.u32 = 32,}, - .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,}, - /* source port */ - .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} +struct acl6_rules acl6_rules_out[MAX_ACL_RULE_NUM]; +uint32_t nb_acl6_rules_out; + +struct acl6_rules acl6_rules_in[MAX_ACL_RULE_NUM]; +uint32_t nb_acl6_rules_in; + +void +parse_sp6_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status) +{ + struct acl6_rules *rule_ipv6 = NULL; + + uint32_t *ri = NULL; /* rule index */ + uint32_t ti = 0; /* token index */ + + uint32_t esp_p = 0; + uint32_t protect_p = 0; + uint32_t bypass_p = 0; + uint32_t discard_p = 0; + uint32_t pri_p = 0; + uint32_t src_p = 0; + uint32_t dst_p = 0; + uint32_t proto_p = 0; + uint32_t sport_p = 0; + uint32_t dport_p = 0; + + if (strcmp(tokens[1], "in") == 0) { + ri = &nb_acl6_rules_in; + + APP_CHECK(*ri <= MAX_ACL_RULE_NUM - 1, status, "too " + "many sp rules, abort insertion\n"); + if (status->status < 0) + return; + + rule_ipv6 = &acl6_rules_in[*ri]; + + } else if (strcmp(tokens[1], "out") == 0) { + ri = &nb_acl6_rules_out; + + APP_CHECK(*ri <= MAX_ACL_RULE_NUM - 1, status, "too " + "many sp rules, abort insertion\n"); + if (status->status < 0) + return; + + rule_ipv6 = &acl6_rules_out[*ri]; + + } else { + APP_CHECK(0, status, "unrecognized input \"%s\", expect" + " \"in\" or \"out\"\n", tokens[ti]); + return; } -}; -const struct acl6_rules acl6_rules_in[] = { - { - .data = {.userdata = PROTECT(15), .category_mask = 1, .priority = 1}, - /* destination IPv6 */ - .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,}, - .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,}, - .field[7] = {.value.u32 = 0x55555555, .mask_range.u32 = 32,}, - .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,}, - /* source port */ - .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(16), .category_mask = 1, .priority = 1}, - /* destination IPv6 */ - .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,}, - .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,}, - .field[7] = {.value.u32 = 0x66666666, .mask_range.u32 = 32,}, - .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,}, - /* source port */ - .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(110), .category_mask = 1, .priority = 1}, - /* destination IPv6 */ - .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,}, - .field[6] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,}, - .field[7] = {.value.u32 = 0x00000000, .mask_range.u32 = 32,}, - .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,}, - /* source port */ - .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(111), .category_mask = 1, .priority = 1}, - /* destination IPv6 */ - .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,}, - .field[6] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,}, - .field[7] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,}, - .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,}, - /* source port */ - .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(125), .category_mask = 1, .priority = 1}, - /* destination IPv6 */ - .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,}, - .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,}, - .field[7] = {.value.u32 = 0xaaaaaaaa, .mask_range.u32 = 32,}, - .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,}, - /* source port */ - .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} - }, - { - .data = {.userdata = PROTECT(126), .category_mask = 1, .priority = 1}, - /* destination IPv6 */ - .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,}, - .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,}, - .field[7] = {.value.u32 = 0xbbbbbbbb, .mask_range.u32 = 32,}, - .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,}, - /* source port */ - .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, - /* destination port */ - .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} + rule_ipv6->data.category_mask = 1; + + + for (ti = 2; ti < n_tokens; ti++) { + if (strcmp(tokens[ti], "esp") == 0) { + /* currently do nothing */ + APP_CHECK_PRESENCE(esp_p, tokens[ti], status); + if (status->status < 0) + return; + esp_p = 1; + continue; + } + + if (strcmp(tokens[ti], "protect") == 0) { + APP_CHECK_PRESENCE(protect_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(bypass_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "bypass"); + if (status->status < 0) + return; + APP_CHECK(discard_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + + rule_ipv6->data.userdata = + PROTECT(atoi(tokens[ti])); + + protect_p = 1; + continue; + } + + if (strcmp(tokens[ti], "bypass") == 0) { + APP_CHECK_PRESENCE(bypass_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(protect_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "protect"); + if (status->status < 0) + return; + APP_CHECK(discard_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + + rule_ipv6->data.userdata = BYPASS; + + bypass_p = 1; + continue; + } + + if (strcmp(tokens[ti], "discard") == 0) { + APP_CHECK_PRESENCE(discard_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(protect_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "protect"); + if (status->status < 0) + return; + APP_CHECK(bypass_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + + rule_ipv6->data.userdata = DISCARD; + + discard_p = 1; + continue; + } + + if (strcmp(tokens[ti], "pri") == 0) { + APP_CHECK_PRESENCE(pri_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + + rule_ipv6->data.priority = atoi(tokens[ti]); + + pri_p = 1; + continue; + } + + if (strcmp(tokens[ti], "src") == 0) { + struct in6_addr ip; + uint32_t depth; + + APP_CHECK_PRESENCE(src_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, + &depth) == 0, status, "unrecognized " + "input \"%s\", expect valid ipv6 " + "addr", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv6->field[1].value.u32 = + (uint32_t)ip.s6_addr[0] << 24 | + (uint32_t)ip.s6_addr[1] << 16 | + (uint32_t)ip.s6_addr[2] << 8 | + (uint32_t)ip.s6_addr[3]; + rule_ipv6->field[1].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[2].value.u32 = + (uint32_t)ip.s6_addr[4] << 24 | + (uint32_t)ip.s6_addr[5] << 16 | + (uint32_t)ip.s6_addr[6] << 8 | + (uint32_t)ip.s6_addr[7]; + rule_ipv6->field[2].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[3].value.u32 = + (uint32_t)ip.s6_addr[8] << 24 | + (uint32_t)ip.s6_addr[9] << 16 | + (uint32_t)ip.s6_addr[10] << 8 | + (uint32_t)ip.s6_addr[11]; + rule_ipv6->field[3].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[4].value.u32 = + (uint32_t)ip.s6_addr[12] << 24 | + (uint32_t)ip.s6_addr[13] << 16 | + (uint32_t)ip.s6_addr[14] << 8 | + (uint32_t)ip.s6_addr[15]; + rule_ipv6->field[4].mask_range.u32 = + (depth > 32) ? 32 : depth; + + src_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dst") == 0) { + struct in6_addr ip; + uint32_t depth; + + APP_CHECK_PRESENCE(dst_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, + &depth) == 0, status, "unrecognized " + "input \"%s\", expect valid ipv6 " + "addr", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv6->field[5].value.u32 = + (uint32_t)ip.s6_addr[0] << 24 | + (uint32_t)ip.s6_addr[1] << 16 | + (uint32_t)ip.s6_addr[2] << 8 | + (uint32_t)ip.s6_addr[3]; + rule_ipv6->field[5].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[6].value.u32 = + (uint32_t)ip.s6_addr[4] << 24 | + (uint32_t)ip.s6_addr[5] << 16 | + (uint32_t)ip.s6_addr[6] << 8 | + (uint32_t)ip.s6_addr[7]; + rule_ipv6->field[6].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[7].value.u32 = + (uint32_t)ip.s6_addr[8] << 24 | + (uint32_t)ip.s6_addr[9] << 16 | + (uint32_t)ip.s6_addr[10] << 8 | + (uint32_t)ip.s6_addr[11]; + rule_ipv6->field[7].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[8].value.u32 = + (uint32_t)ip.s6_addr[12] << 24 | + (uint32_t)ip.s6_addr[13] << 16 | + (uint32_t)ip.s6_addr[14] << 8 | + (uint32_t)ip.s6_addr[15]; + rule_ipv6->field[8].mask_range.u32 = + (depth > 32) ? 32 : depth; + + dst_p = 1; + continue; + } + + if (strcmp(tokens[ti], "proto") == 0) { + uint16_t low, high; + + APP_CHECK_PRESENCE(proto_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &low, &high) + == 0, status, "unrecognized input \"%s\"" + ", expect \"from:to\"", tokens[ti]); + if (status->status < 0) + return; + APP_CHECK(low <= 0xff, status, "proto low " + "over-limit"); + if (status->status < 0) + return; + APP_CHECK(high <= 0xff, status, "proto high " + "over-limit"); + if (status->status < 0) + return; + + rule_ipv6->field[0].value.u8 = (uint8_t)low; + rule_ipv6->field[0].mask_range.u8 = (uint8_t)high; + + proto_p = 1; + continue; + } + + if (strcmp(tokens[ti], "sport") == 0) { + uint16_t port_low, port_high; + + APP_CHECK_PRESENCE(sport_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &port_low, + &port_high) == 0, status, "unrecognized " + "input \"%s\", expect \"port_from:" + "port_to\"", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv6->field[9].value.u16 = port_low; + rule_ipv6->field[9].mask_range.u16 = port_high; + + sport_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dport") == 0) { + uint16_t port_low, port_high; + + APP_CHECK_PRESENCE(dport_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &port_low, + &port_high) == 0, status, "unrecognized " + "input \"%s\", expect \"port_from:" + "port_to\"", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv6->field[10].value.u16 = port_low; + rule_ipv6->field[10].mask_range.u16 = port_high; + + dport_p = 1; + continue; + } + + /* unrecognizeable input */ + APP_CHECK(0, status, "unrecognized input \"%s\"", + tokens[ti]); + return; } -}; + + /* check if argument(s) are missing */ + APP_CHECK(esp_p == 1, status, "missing argument \"esp\""); + if (status->status < 0) + return; + + APP_CHECK(protect_p | bypass_p | discard_p, status, "missing " + "argument \"protect\", \"bypass\", or \"discard\""); + if (status->status < 0) + return; + + *ri = *ri + 1; +} static inline void print_one_ip6_rule(const struct acl6_rules *rule, int32_t extra) @@ -407,11 +616,9 @@ acl6_init(const char *name, int32_t socketid, const struct acl6_rules *rules, } void -sp6_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep) +sp6_init(struct socket_ctx *ctx, int32_t socket_id) { const char *name; - const struct acl6_rules *rules_out, *rules_in; - uint32_t nb_out_rules, nb_in_rules; if (ctx == NULL) rte_exit(EXIT_FAILURE, "NULL context.\n"); @@ -424,25 +631,19 @@ sp6_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep) rte_exit(EXIT_FAILURE, "Outbound IPv6 SP DB for socket %u " "already initialized\n", socket_id); - if (ep == 0) { - rules_out = acl6_rules_out; - nb_out_rules = RTE_DIM(acl6_rules_out); - rules_in = acl6_rules_in; - nb_in_rules = RTE_DIM(acl6_rules_in); - } else if (ep == 1) { - rules_out = acl6_rules_in; - nb_out_rules = RTE_DIM(acl6_rules_in); - rules_in = acl6_rules_out; - nb_in_rules = RTE_DIM(acl6_rules_out); + if (nb_acl6_rules_in > 0) { + name = "sp_ip6_in"; + ctx->sp_ip6_in = (struct sp_ctx *)acl6_init(name, + socket_id, acl6_rules_in, nb_acl6_rules_in); } else - rte_exit(EXIT_FAILURE, "Invalid EP value %u. " - "Only 0 or 1 supported.\n", ep); + RTE_LOG(WARNING, IPSEC, "No IPv6 SP Inbound rule " + "specified\n"); - name = "sp_ip6_in"; - ctx->sp_ip6_in = (struct sp_ctx *)acl6_init(name, socket_id, - rules_in, nb_in_rules); - - name = "sp_ip6_out"; - ctx->sp_ip6_out = (struct sp_ctx *)acl6_init(name, socket_id, - rules_out, nb_out_rules); + if (nb_acl6_rules_out > 0) { + name = "sp_ip6_out"; + ctx->sp_ip6_out = (struct sp_ctx *)acl6_init(name, + socket_id, acl6_rules_out, nb_acl6_rules_out); + } else + RTE_LOG(WARNING, IPSEC, "No IPv6 SP Outbound rule " + "specified\n"); } diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c index f013d927..708d76e9 100644 --- a/examples/ipv4_multicast/main.c +++ b/examples/ipv4_multicast/main.c @@ -63,7 +63,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_mbuf.h> #include <rte_malloc.h> diff --git a/examples/kni/main.c b/examples/kni/main.c index f9fc61e0..57313d11 100644 --- a/examples/kni/main.c +++ b/examples/kni/main.c @@ -65,7 +65,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_log.h> #include <rte_mempool.h> #include <rte_mbuf.h> diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c index 6cfa9168..43fef59e 100644 --- a/examples/l2fwd-crypto/main.c +++ b/examples/l2fwd-crypto/main.c @@ -72,7 +72,6 @@ #include <rte_per_lcore.h> #include <rte_prefetch.h> #include <rte_random.h> -#include <rte_ring.h> #include <rte_hexdump.h> enum cdev_type { @@ -341,16 +340,24 @@ fill_supported_algorithm_tables(void) strcpy(supported_auth_algo[i], "NOT_SUPPORTED"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_GCM], "AES_GCM"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_GMAC], "AES_GMAC"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_MD5_HMAC], "MD5_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_MD5], "MD5"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_NULL], "NULL"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_XCBC_MAC], "AES_XCBC_MAC"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA1_HMAC], "SHA1_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA1], "SHA1"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA224_HMAC], "SHA224_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA224], "SHA224"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA256_HMAC], "SHA256_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA256], "SHA256"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA384_HMAC], "SHA384_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA384], "SHA384"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA512_HMAC], "SHA512_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA512], "SHA512"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SNOW3G_UIA2], "SNOW3G_UIA2"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_ZUC_EIA3], "ZUC_EIA3"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_KASUMI_F9], "KASUMI_F9"); for (i = 0; i < RTE_CRYPTO_CIPHER_LIST_END; i++) @@ -361,7 +368,10 @@ fill_supported_algorithm_tables(void) strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_GCM], "AES_GCM"); strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_NULL], "NULL"); strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_SNOW3G_UEA2], "SNOW3G_UEA2"); + strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_ZUC_EEA3], "ZUC_EEA3"); strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_KASUMI_F8], "KASUMI_F8"); + strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_3DES_CTR], "3DES_CTR"); + strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_3DES_CBC], "3DES_CBC"); } @@ -474,9 +484,10 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m, rte_pktmbuf_pkt_len(m) - cparams->digest_length); op->sym->auth.digest.length = cparams->digest_length; - /* For SNOW3G/KASUMI algorithms, offset/length must be in bits */ + /* For wireless algorithms, offset/length must be in bits */ if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || - cparams->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9) { + cparams->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 || + cparams->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) { op->sym->auth.data.offset = ipdata_offset << 3; op->sym->auth.data.length = data_len << 3; } else { @@ -496,9 +507,10 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m, op->sym->cipher.iv.phys_addr = cparams->iv.phys_addr; op->sym->cipher.iv.length = cparams->iv.length; - /* For SNOW3G algorithms, offset/length must be in bits */ + /* For wireless algorithms, offset/length must be in bits */ if (cparams->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || - cparams->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8) { + cparams->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 || + cparams->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) { op->sym->cipher.data.offset = ipdata_offset << 3; op->sym->cipher.data.length = data_len << 3; } else { diff --git a/examples/l2fwd-ivshmem/Makefile b/examples/l2fwd-ivshmem/Makefile deleted file mode 100644 index 5f1d1728..00000000 --- a/examples/l2fwd-ivshmem/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# BSD LICENSE -# -# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Intel Corporation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -ifeq ($(RTE_SDK),) -$(error "Please define RTE_SDK environment variable") -endif - -# Default target, can be overriden by command line or environment -RTE_TARGET ?= x86_64-ivshmem-linuxapp-gcc - -include $(RTE_SDK)/mk/rte.vars.mk - -DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += host guest - -include $(RTE_SDK)/mk/rte.extsubdir.mk diff --git a/examples/l2fwd-ivshmem/guest/Makefile b/examples/l2fwd-ivshmem/guest/Makefile deleted file mode 100644 index 3ca73b43..00000000 --- a/examples/l2fwd-ivshmem/guest/Makefile +++ /dev/null @@ -1,50 +0,0 @@ -# BSD LICENSE -# -# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Intel Corporation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -ifeq ($(RTE_SDK),) -$(error "Please define RTE_SDK environment variable") -endif - -# Default target, can be overriden by command line or environment -RTE_TARGET ?= x86_64-ivshmem-linuxapp-gcc - -include $(RTE_SDK)/mk/rte.vars.mk - -# binary name -APP = guest - -# all source are stored in SRCS-y -SRCS-y := guest.c - -CFLAGS += -O3 -CFLAGS += $(WERROR_FLAGS) - -include $(RTE_SDK)/mk/rte.extapp.mk diff --git a/examples/l2fwd-ivshmem/guest/guest.c b/examples/l2fwd-ivshmem/guest/guest.c deleted file mode 100644 index 7c49521b..00000000 --- a/examples/l2fwd-ivshmem/guest/guest.c +++ /dev/null @@ -1,452 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <stdint.h> -#include <unistd.h> -#include <getopt.h> -#include <signal.h> -#include <sys/mman.h> -#include <sys/types.h> -#include <sys/stat.h> -#include <sys/queue.h> -#include <sys/file.h> -#include <unistd.h> -#include <limits.h> -#include <errno.h> -#include <sys/ioctl.h> -#include <sys/time.h> - -#include <rte_common.h> -#include <rte_eal_memconfig.h> -#include <rte_log.h> -#include <rte_memory.h> -#include <rte_memcpy.h> -#include <rte_memzone.h> -#include <rte_eal.h> -#include <rte_per_lcore.h> -#include <rte_launch.h> -#include <rte_atomic.h> -#include <rte_cycles.h> -#include <rte_prefetch.h> -#include <rte_lcore.h> -#include <rte_per_lcore.h> -#include <rte_branch_prediction.h> -#include <rte_interrupts.h> -#include <rte_pci.h> -#include <rte_random.h> -#include <rte_debug.h> -#include <rte_ether.h> -#include <rte_ethdev.h> -#include <rte_ring.h> -#include <rte_mempool.h> -#include <rte_mbuf.h> -#include <rte_ivshmem.h> - -#include "../include/common.h" - -#define MAX_RX_QUEUE_PER_LCORE 16 -#define MAX_TX_QUEUE_PER_PORT 16 -struct lcore_queue_conf { - unsigned n_rx_port; - unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE]; - struct mbuf_table rx_mbufs[RTE_MAX_ETHPORTS]; - struct vm_port_param * port_param[MAX_RX_QUEUE_PER_LCORE]; -} __rte_cache_aligned; -static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; - -/* Print out statistics on packets dropped */ -static void -print_stats(void) -{ - uint64_t total_packets_dropped, total_packets_tx, total_packets_rx; - unsigned portid; - - total_packets_dropped = 0; - total_packets_tx = 0; - total_packets_rx = 0; - - const char clr[] = { 27, '[', '2', 'J', '\0' }; - const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' }; - - /* Clear screen and move to top left */ - printf("%s%s", clr, topLeft); - - printf("\nPort statistics ===================================="); - - for (portid = 0; portid < ctrl->nb_ports; portid++) { - /* skip ports that are not enabled */ - printf("\nStatistics for port %u ------------------------------" - "\nPackets sent: %24"PRIu64 - "\nPackets received: %20"PRIu64 - "\nPackets dropped: %21"PRIu64, - portid, - ctrl->vm_ports[portid].stats.tx, - ctrl->vm_ports[portid].stats.rx, - ctrl->vm_ports[portid].stats.dropped); - - total_packets_dropped += ctrl->vm_ports[portid].stats.dropped; - total_packets_tx += ctrl->vm_ports[portid].stats.tx; - total_packets_rx += ctrl->vm_ports[portid].stats.rx; - } - printf("\nAggregate statistics ===============================" - "\nTotal packets sent: %18"PRIu64 - "\nTotal packets received: %14"PRIu64 - "\nTotal packets dropped: %15"PRIu64, - total_packets_tx, - total_packets_rx, - total_packets_dropped); - printf("\n====================================================\n"); -} - -/* display usage */ -static void -l2fwd_ivshmem_usage(const char *prgname) -{ - printf("%s [EAL options] -- [-q NQ -T PERIOD]\n" - " -q NQ: number of queue (=ports) per lcore (default is 1)\n" - " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n", - prgname); -} - -static unsigned int -l2fwd_ivshmem_parse_nqueue(const char *q_arg) -{ - char *end = NULL; - unsigned long n; - - /* parse hexadecimal string */ - n = strtoul(q_arg, &end, 10); - if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) - return 0; - if (n == 0) - return 0; - if (n >= MAX_RX_QUEUE_PER_LCORE) - return 0; - - return n; -} - -static int -l2fwd_ivshmem_parse_timer_period(const char *q_arg) -{ - char *end = NULL; - int n; - - /* parse number string */ - n = strtol(q_arg, &end, 10); - if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) - return -1; - if (n >= MAX_TIMER_PERIOD) - return -1; - - return n; -} - -/* Parse the argument given in the command line of the application */ -static int -l2fwd_ivshmem_parse_args(int argc, char **argv) -{ - int opt, ret; - char **argvopt; - int option_index; - char *prgname = argv[0]; - static struct option lgopts[] = { - {NULL, 0, 0, 0} - }; - - argvopt = argv; - - while ((opt = getopt_long(argc, argvopt, "q:p:T:", - lgopts, &option_index)) != EOF) { - - switch (opt) { - - /* nqueue */ - case 'q': - l2fwd_ivshmem_rx_queue_per_lcore = l2fwd_ivshmem_parse_nqueue(optarg); - if (l2fwd_ivshmem_rx_queue_per_lcore == 0) { - printf("invalid queue number\n"); - l2fwd_ivshmem_usage(prgname); - return -1; - } - break; - - /* timer period */ - case 'T': - timer_period = l2fwd_ivshmem_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND; - if (timer_period < 0) { - printf("invalid timer period\n"); - l2fwd_ivshmem_usage(prgname); - return -1; - } - break; - - /* long options */ - case 0: - l2fwd_ivshmem_usage(prgname); - return -1; - - default: - l2fwd_ivshmem_usage(prgname); - return -1; - } - } - - if (optind >= 0) - argv[optind-1] = prgname; - - ret = optind-1; - optind = 0; /* reset getopt lib */ - return ret; -} - -/* - * this loop is getting packets from RX rings of each port, and puts them - * into TX rings of destination ports. - */ -static void -fwd_loop(void) -{ - - struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; - struct rte_mbuf **m_table; - struct rte_mbuf *m; - struct rte_ring *rx, *tx; - unsigned lcore_id, len; - uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc; - unsigned i, j, portid, nb_rx; - struct lcore_queue_conf *qconf; - struct ether_hdr *eth; - void *tmp; - - prev_tsc = 0; - timer_tsc = 0; - - lcore_id = rte_lcore_id(); - qconf = &lcore_queue_conf[lcore_id]; - - if (qconf->n_rx_port == 0) { - RTE_LOG(INFO, L2FWD_IVSHMEM, "lcore %u has nothing to do\n", lcore_id); - return; - } - - RTE_LOG(INFO, L2FWD_IVSHMEM, "entering main loop on lcore %u\n", lcore_id); - - for (i = 0; i < qconf->n_rx_port; i++) { - portid = qconf->rx_port_list[i]; - RTE_LOG(INFO, L2FWD_IVSHMEM, " -- lcoreid=%u portid=%u\n", lcore_id, - portid); - } - - while (ctrl->state == STATE_FWD) { - cur_tsc = rte_rdtsc(); - - diff_tsc = cur_tsc - prev_tsc; - - /* - * Read packet from RX queues and send it to TX queues - */ - for (i = 0; i < qconf->n_rx_port; i++) { - - portid = qconf->rx_port_list[i]; - - len = qconf->rx_mbufs[portid].len; - - rx = ctrl->vm_ports[portid].rx_ring; - tx = ctrl->vm_ports[portid].dst->tx_ring; - - m_table = qconf->rx_mbufs[portid].m_table; - - /* if we have something in the queue, try and transmit it down */ - if (len != 0) { - - /* if we succeed in sending the packets down, mark queue as free */ - if (rte_ring_enqueue_bulk(tx, (void**) m_table, len) == 0) { - ctrl->vm_ports[portid].stats.tx += len; - qconf->rx_mbufs[portid].len = 0; - len = 0; - } - } - - nb_rx = rte_ring_count(rx); - - nb_rx = RTE_MIN(nb_rx, (unsigned) MAX_PKT_BURST); - - if (nb_rx == 0) - continue; - - /* if we can get packets into the m_table */ - if (nb_rx < (RTE_DIM(qconf->rx_mbufs[portid].m_table) - len)) { - - /* this situation cannot exist, so if we fail to dequeue, that - * means something went horribly wrong, hence the failure. */ - if (rte_ring_dequeue_bulk(rx, (void**) pkts_burst, nb_rx) < 0) { - ctrl->state = STATE_FAIL; - return; - } - - ctrl->vm_ports[portid].stats.rx += nb_rx; - - /* put packets into the queue */ - for (j = 0; j < nb_rx; j++) { - m = pkts_burst[j]; - - rte_prefetch0(rte_pktmbuf_mtod(m, void *)); - - m_table[len + j] = m; - - eth = rte_pktmbuf_mtod(m, struct ether_hdr *); - - /* 02:00:00:00:00:xx */ - tmp = ð->d_addr.addr_bytes[0]; - *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)portid << 40); - - /* src addr */ - ether_addr_copy(&ctrl->vm_ports[portid].dst->ethaddr, - ð->s_addr); - } - qconf->rx_mbufs[portid].len += nb_rx; - - } - - } - - /* if timer is enabled */ - if (timer_period > 0) { - - /* advance the timer */ - timer_tsc += diff_tsc; - - /* if timer has reached its timeout */ - if (unlikely(timer_tsc >= (uint64_t) timer_period)) { - - /* do this only on master core */ - if (lcore_id == rte_get_master_lcore()) { - print_stats(); - /* reset the timer */ - timer_tsc = 0; - } - } - } - - prev_tsc = cur_tsc; - } -} - -static int -l2fwd_ivshmem_launch_one_lcore(__attribute__((unused)) void *dummy) -{ - fwd_loop(); - return 0; -} - -int -main(int argc, char **argv) -{ - struct lcore_queue_conf *qconf; - const struct rte_memzone * mz; - int ret; - uint8_t portid; - unsigned rx_lcore_id, lcore_id; - - /* init EAL */ - ret = rte_eal_init(argc, argv); - if (ret < 0) - rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n"); - argc -= ret; - argv += ret; - - /* parse application arguments (after the EAL ones) */ - ret = l2fwd_ivshmem_parse_args(argc, argv); - if (ret < 0) - rte_exit(EXIT_FAILURE, "Invalid l2fwd-ivshmem arguments\n"); - - /* find control structure */ - mz = rte_memzone_lookup(CTRL_MZ_NAME); - if (mz == NULL) - rte_exit(EXIT_FAILURE, "Cannot find control memzone\n"); - - ctrl = (struct ivshmem_ctrl*) mz->addr; - - /* lock the ctrl so that we don't have conflicts with anything else */ - rte_spinlock_lock(&ctrl->lock); - - if (ctrl->state == STATE_FWD) - rte_exit(EXIT_FAILURE, "Forwarding already started!\n"); - - rx_lcore_id = 0; - qconf = NULL; - - /* Initialize the port/queue configuration of each logical core */ - for (portid = 0; portid < ctrl->nb_ports; portid++) { - - /* get the lcore_id for this port */ - while (rte_lcore_is_enabled(rx_lcore_id) == 0 || - lcore_queue_conf[rx_lcore_id].n_rx_port == - l2fwd_ivshmem_rx_queue_per_lcore) { - rx_lcore_id++; - if (rx_lcore_id >= RTE_MAX_LCORE) - rte_exit(EXIT_FAILURE, "Not enough cores\n"); - } - - if (qconf != &lcore_queue_conf[rx_lcore_id]) - /* Assigned a new logical core in the loop above. */ - qconf = &lcore_queue_conf[rx_lcore_id]; - - qconf->rx_port_list[qconf->n_rx_port] = portid; - qconf->port_param[qconf->n_rx_port] = &ctrl->vm_ports[portid]; - qconf->n_rx_port++; - - printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned) portid); - } - - sigsetup(); - - /* indicate that we are ready to forward */ - ctrl->state = STATE_FWD; - - /* unlock */ - rte_spinlock_unlock(&ctrl->lock); - - /* launch per-lcore init on every lcore */ - rte_eal_mp_remote_launch(l2fwd_ivshmem_launch_one_lcore, NULL, CALL_MASTER); - RTE_LCORE_FOREACH_SLAVE(lcore_id) { - if (rte_eal_wait_lcore(lcore_id) < 0) - return -1; - } - - return 0; -} diff --git a/examples/l2fwd-ivshmem/host/Makefile b/examples/l2fwd-ivshmem/host/Makefile deleted file mode 100644 index f91419e9..00000000 --- a/examples/l2fwd-ivshmem/host/Makefile +++ /dev/null @@ -1,50 +0,0 @@ -# BSD LICENSE -# -# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Intel Corporation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -ifeq ($(RTE_SDK),) -$(error "Please define RTE_SDK environment variable") -endif - -# Default target, can be overriden by command line or environment -RTE_TARGET ?= x86_64-ivshmem-linuxapp-gcc - -include $(RTE_SDK)/mk/rte.vars.mk - -# binary name -APP = host - -# all source are stored in SRCS-y -SRCS-y := host.c - -CFLAGS += -O3 -CFLAGS += $(WERROR_FLAGS) - -include $(RTE_SDK)/mk/rte.extapp.mk diff --git a/examples/l2fwd-ivshmem/host/host.c b/examples/l2fwd-ivshmem/host/host.c deleted file mode 100644 index da7b00d9..00000000 --- a/examples/l2fwd-ivshmem/host/host.c +++ /dev/null @@ -1,895 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <unistd.h> -#include <stdlib.h> -#include <stdio.h> -#include <string.h> -#include <limits.h> -#include <inttypes.h> -#include <getopt.h> -#include <signal.h> - -#include <rte_eal.h> -#include <rte_cycles.h> -#include <rte_eal_memconfig.h> -#include <rte_debug.h> -#include <rte_ether.h> -#include <rte_ethdev.h> -#include <rte_string_fns.h> -#include <rte_ivshmem.h> -#include <rte_ring.h> -#include <rte_mempool.h> -#include <rte_mbuf.h> - -#include "../include/common.h" - -/* - * Configurable number of RX/TX ring descriptors - */ -#define RTE_TEST_RX_DESC_DEFAULT 128 -#define RTE_TEST_TX_DESC_DEFAULT 512 -static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; -static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; - -#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ - -/* mask of enabled ports */ -static uint32_t l2fwd_ivshmem_enabled_port_mask = 0; - -static struct ether_addr l2fwd_ivshmem_ports_eth_addr[RTE_MAX_ETHPORTS]; - -#define NB_MBUF 8192 - -#define MAX_RX_QUEUE_PER_LCORE 16 -#define MAX_TX_QUEUE_PER_PORT 16 -struct lcore_queue_conf { - unsigned n_rx_port; - unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE]; - struct vm_port_param * port_param[MAX_RX_QUEUE_PER_LCORE]; - struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS]; - struct mbuf_table rx_mbufs[RTE_MAX_ETHPORTS]; -} __rte_cache_aligned; -static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; - -static const struct rte_eth_conf port_conf = { - .rxmode = { - .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 0, /**< CRC stripped by hardware */ - }, - .txmode = { - .mq_mode = ETH_MQ_TX_NONE, - }, -}; - -#define METADATA_NAME "l2fwd_ivshmem" -#define CMDLINE_OPT_FWD_CONF "fwd-conf" - -#define QEMU_CMD_FMT "/tmp/ivshmem_qemu_cmdline_%s" - -struct port_statistics port_statistics[RTE_MAX_ETHPORTS]; - -struct rte_mempool * l2fwd_ivshmem_pktmbuf_pool = NULL; - -/* Print out statistics on packets dropped */ -static void -print_stats(void) -{ - uint64_t total_packets_dropped, total_packets_tx, total_packets_rx; - uint64_t total_vm_packets_dropped = 0; - uint64_t total_vm_packets_tx, total_vm_packets_rx; - unsigned portid; - - total_packets_dropped = 0; - total_packets_tx = 0; - total_packets_rx = 0; - total_vm_packets_tx = 0; - total_vm_packets_rx = 0; - - const char clr[] = { 27, '[', '2', 'J', '\0' }; - const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' }; - - /* Clear screen and move to top left */ - printf("%s%s", clr, topLeft); - - printf("\nPort statistics ===================================="); - - for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { - /* skip disabled ports */ - if ((l2fwd_ivshmem_enabled_port_mask & (1 << portid)) == 0) - continue; - printf("\nStatistics for port %u ------------------------------" - "\nPackets sent: %24"PRIu64 - "\nPackets received: %20"PRIu64 - "\nPackets dropped: %21"PRIu64, - portid, - port_statistics[portid].tx, - port_statistics[portid].rx, - port_statistics[portid].dropped); - - total_packets_dropped += port_statistics[portid].dropped; - total_packets_tx += port_statistics[portid].tx; - total_packets_rx += port_statistics[portid].rx; - } - - printf("\nVM statistics ======================================"); - for (portid = 0; portid < ctrl->nb_ports; portid++) { - printf("\nStatistics for port %u ------------------------------" - "\nPackets sent: %24"PRIu64 - "\nPackets received: %20"PRIu64, - portid, - ctrl->vm_ports[portid].stats.tx, - ctrl->vm_ports[portid].stats.rx); - - total_vm_packets_dropped += ctrl->vm_ports[portid].stats.dropped; - total_vm_packets_tx += ctrl->vm_ports[portid].stats.tx; - total_vm_packets_rx += ctrl->vm_ports[portid].stats.rx; - } - printf("\nAggregate statistics ===============================" - "\nTotal packets sent: %18"PRIu64 - "\nTotal packets received: %14"PRIu64 - "\nTotal packets dropped: %15"PRIu64 - "\nTotal VM packets sent: %15"PRIu64 - "\nTotal VM packets received: %11"PRIu64, - total_packets_tx, - total_packets_rx, - total_packets_dropped, - total_vm_packets_tx, - total_vm_packets_rx); - printf("\n====================================================\n"); -} - -static int -print_to_file(const char *cmdline, const char *config_name) -{ - FILE *file; - char path[PATH_MAX]; - - snprintf(path, sizeof(path), QEMU_CMD_FMT, config_name); - file = fopen(path, "w"); - if (file == NULL) { - RTE_LOG(ERR, L2FWD_IVSHMEM, "Could not open '%s' \n", path); - return -1; - } - - RTE_LOG(DEBUG, L2FWD_IVSHMEM, "QEMU command line for config '%s': %s \n", - config_name, cmdline); - - fprintf(file, "%s\n", cmdline); - fclose(file); - return 0; -} - -static int -generate_ivshmem_cmdline(const char *config_name) -{ - char cmdline[PATH_MAX]; - if (rte_ivshmem_metadata_cmdline_generate(cmdline, sizeof(cmdline), - config_name) < 0) - return -1; - - if (print_to_file(cmdline, config_name) < 0) - return -1; - - rte_ivshmem_metadata_dump(stdout, config_name); - return 0; -} - -/* display usage */ -static void -l2fwd_ivshmem_usage(const char *prgname) -{ - printf("%s [EAL options] -- -p PORTMASK [-q NQ -T PERIOD]\n" - " -p PORTMASK: hexadecimal bitmask of ports to configure\n" - " -q NQ: number of queue (=ports) per lcore (default is 1)\n" - " -T PERIOD: statistics will be refreshed each PERIOD seconds " - "(0 to disable, 10 default, 86400 maximum)\n", - prgname); -} - -static unsigned int -l2fwd_ivshmem_parse_nqueue(const char *q_arg) -{ - char *end = NULL; - unsigned long n; - - /* parse hexadecimal string */ - n = strtoul(q_arg, &end, 10); - if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) - return 0; - if (n == 0) - return 0; - if (n >= MAX_RX_QUEUE_PER_LCORE) - return 0; - - return n; -} - -static int -l2fwd_ivshmem_parse_portmask(const char *portmask) -{ - char *end = NULL; - unsigned long pm; - - /* parse hexadecimal string */ - pm = strtoul(portmask, &end, 16); - if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) - return -1; - - if (pm == 0) - return -1; - - return pm; -} - -static int -l2fwd_ivshmem_parse_timer_period(const char *q_arg) -{ - char *end = NULL; - int n; - - /* parse number string */ - n = strtol(q_arg, &end, 10); - if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) - return -1; - if (n >= MAX_TIMER_PERIOD) - return -1; - - return n; -} - -/* Parse the argument given in the command line of the application */ -static int -l2fwd_ivshmem_parse_args(int argc, char **argv) -{ - int opt, ret; - char **argvopt; - int option_index; - char *prgname = argv[0]; - static struct option lgopts[] = { - {CMDLINE_OPT_FWD_CONF, 1, 0, 0}, - {NULL, 0, 0, 0} - }; - - argvopt = argv; - - while ((opt = getopt_long(argc, argvopt, "q:p:T:", - lgopts, &option_index)) != EOF) { - - switch (opt) { - /* portmask */ - case 'p': - l2fwd_ivshmem_enabled_port_mask = l2fwd_ivshmem_parse_portmask(optarg); - if (l2fwd_ivshmem_enabled_port_mask == 0) { - printf("invalid portmask\n"); - l2fwd_ivshmem_usage(prgname); - return -1; - } - break; - - /* nqueue */ - case 'q': - l2fwd_ivshmem_rx_queue_per_lcore = l2fwd_ivshmem_parse_nqueue(optarg); - if (l2fwd_ivshmem_rx_queue_per_lcore == 0) { - printf("invalid queue number\n"); - l2fwd_ivshmem_usage(prgname); - return -1; - } - break; - - /* timer period */ - case 'T': - timer_period = l2fwd_ivshmem_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND; - if (timer_period < 0) { - printf("invalid timer period\n"); - l2fwd_ivshmem_usage(prgname); - return -1; - } - break; - - /* long options */ - case 0: - l2fwd_ivshmem_usage(prgname); - return -1; - - default: - l2fwd_ivshmem_usage(prgname); - return -1; - } - } - - if (optind >= 0) - argv[optind-1] = prgname; - - ret = optind-1; - optind = 0; /* reset getopt lib */ - return ret; -} - -/* Check the link status of all ports in up to 9s, and print them finally */ -static void -check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) -{ -#define CHECK_INTERVAL 100 /* 100ms */ -#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ - uint8_t portid, count, all_ports_up, print_flag = 0; - struct rte_eth_link link; - - printf("\nChecking link status"); - fflush(stdout); - for (count = 0; count <= MAX_CHECK_TIME; count++) { - all_ports_up = 1; - for (portid = 0; portid < port_num; portid++) { - if ((port_mask & (1 << portid)) == 0) - continue; - memset(&link, 0, sizeof(link)); - rte_eth_link_get_nowait(portid, &link); - /* print link status if flag set */ - if (print_flag == 1) { - if (link.link_status) - printf("Port %d Link Up - speed %u " - "Mbps - %s\n", (uint8_t)portid, - (unsigned)link.link_speed, - (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? - ("full-duplex") : ("half-duplex\n")); - else - printf("Port %d Link Down\n", - (uint8_t)portid); - continue; - } - /* clear all_ports_up flag if any link down */ - if (link.link_status == ETH_LINK_DOWN) { - all_ports_up = 0; - break; - } - } - /* after finally printing all link status, get out */ - if (print_flag == 1) - break; - - if (all_ports_up == 0) { - printf("."); - fflush(stdout); - rte_delay_ms(CHECK_INTERVAL); - } - - /* set the print_flag if all ports up or timeout */ - if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { - print_flag = 1; - printf("done\n"); - } - } -} - -/* Send the burst of packets on an output interface */ -static int -l2fwd_ivshmem_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port) -{ - struct rte_mbuf **m_table; - unsigned ret; - unsigned queueid =0; - - m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; - - ret = rte_eth_tx_burst(port, (uint16_t) queueid, m_table, (uint16_t) n); - port_statistics[port].tx += ret; - if (unlikely(ret < n)) { - port_statistics[port].dropped += (n - ret); - do { - rte_pktmbuf_free(m_table[ret]); - } while (++ret < n); - } - - return 0; -} - -/* Enqueue packets for TX and prepare them to be sent on the network */ -static int -l2fwd_ivshmem_send_packet(struct rte_mbuf *m, uint8_t port) -{ - unsigned lcore_id, len; - struct lcore_queue_conf *qconf; - - lcore_id = rte_lcore_id(); - - qconf = &lcore_queue_conf[lcore_id]; - len = qconf->tx_mbufs[port].len; - qconf->tx_mbufs[port].m_table[len] = m; - len++; - - /* enough pkts to be sent */ - if (unlikely(len == MAX_PKT_BURST)) { - l2fwd_ivshmem_send_burst(qconf, MAX_PKT_BURST, port); - len = 0; - } - - qconf->tx_mbufs[port].len = len; - return 0; -} - -static int -l2fwd_ivshmem_receive_burst(struct lcore_queue_conf *qconf, unsigned portid, - unsigned vm_port) -{ - struct rte_mbuf ** m; - struct rte_ring * rx; - unsigned len, pkt_idx; - - m = qconf->rx_mbufs[portid].m_table; - len = qconf->rx_mbufs[portid].len; - rx = qconf->port_param[vm_port]->rx_ring; - - /* if enqueueing failed, ring is probably full, so drop the packets */ - if (rte_ring_enqueue_bulk(rx, (void**) m, len) < 0) { - port_statistics[portid].dropped += len; - - pkt_idx = 0; - do { - rte_pktmbuf_free(m[pkt_idx]); - } while (++pkt_idx < len); - } - else - /* increment rx stats by however many packets we managed to receive */ - port_statistics[portid].rx += len; - - return 0; -} - -/* Enqueue packets for RX and prepare them to be sent to VM */ -static int -l2fwd_ivshmem_receive_packets(struct rte_mbuf ** m, unsigned n, unsigned portid, - unsigned vm_port) -{ - unsigned lcore_id, len, pkt_idx; - struct lcore_queue_conf *qconf; - - lcore_id = rte_lcore_id(); - - qconf = &lcore_queue_conf[lcore_id]; - - len = qconf->rx_mbufs[portid].len; - pkt_idx = 0; - - /* enqueue packets */ - while (pkt_idx < n && len < MAX_PKT_BURST * 2) { - qconf->rx_mbufs[portid].m_table[len++] = m[pkt_idx++]; - } - - /* increment queue len by however many packets we managed to receive */ - qconf->rx_mbufs[portid].len += pkt_idx; - - /* drop the unreceived packets */ - if (unlikely(pkt_idx < n)) { - port_statistics[portid].dropped += n - pkt_idx; - do { - rte_pktmbuf_free(m[pkt_idx]); - } while (++pkt_idx < n); - } - - /* drain the queue halfway through the maximum capacity */ - if (unlikely(qconf->rx_mbufs[portid].len >= MAX_PKT_BURST)) - l2fwd_ivshmem_receive_burst(qconf, portid, vm_port); - - return 0; -} - -/* loop for host forwarding mode. - * the data flow is as follows: - * 1) get packets from TX queue and send it out from a given port - * 2) RX packets from given port and enqueue them on RX ring - * 3) dequeue packets from TX ring and put them on TX queue for a given port - */ -static void -fwd_loop(void) -{ - struct rte_mbuf *pkts_burst[MAX_PKT_BURST * 2]; - struct rte_mbuf *m; - unsigned lcore_id; - uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc; - unsigned i, j, portid, nb_rx; - struct lcore_queue_conf *qconf; - struct rte_ring *tx; - const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; - - prev_tsc = 0; - timer_tsc = 0; - - lcore_id = rte_lcore_id(); - qconf = &lcore_queue_conf[lcore_id]; - - if (qconf->n_rx_port == 0) { - RTE_LOG(INFO, L2FWD_IVSHMEM, "lcore %u has nothing to do\n", lcore_id); - return; - } - - RTE_LOG(INFO, L2FWD_IVSHMEM, "entering main loop on lcore %u\n", lcore_id); - - for (i = 0; i < qconf->n_rx_port; i++) { - - portid = qconf->rx_port_list[i]; - RTE_LOG(INFO, L2FWD_IVSHMEM, " -- lcoreid=%u portid=%u\n", lcore_id, - portid); - } - - while (ctrl->state == STATE_FWD) { - - cur_tsc = rte_rdtsc(); - - /* - * Burst queue drain - */ - diff_tsc = cur_tsc - prev_tsc; - if (unlikely(diff_tsc > drain_tsc)) { - - /* - * TX - */ - for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { - if (qconf->tx_mbufs[portid].len == 0) - continue; - l2fwd_ivshmem_send_burst(qconf, - qconf->tx_mbufs[portid].len, - (uint8_t) portid); - qconf->tx_mbufs[portid].len = 0; - } - - /* - * RX - */ - for (i = 0; i < qconf->n_rx_port; i++) { - portid = qconf->rx_port_list[i]; - if (qconf->rx_mbufs[portid].len == 0) - continue; - l2fwd_ivshmem_receive_burst(qconf, portid, i); - qconf->rx_mbufs[portid].len = 0; - } - - /* if timer is enabled */ - if (timer_period > 0) { - - /* advance the timer */ - timer_tsc += diff_tsc; - - /* if timer has reached its timeout */ - if (unlikely(timer_tsc >= (uint64_t) timer_period)) { - - /* do this only on master core */ - if (lcore_id == rte_get_master_lcore()) { - print_stats(); - /* reset the timer */ - timer_tsc = 0; - } - } - } - - prev_tsc = cur_tsc; - } - - /* - * packet RX and forwarding - */ - for (i = 0; i < qconf->n_rx_port; i++) { - - /* RX packets from port and put them on RX ring */ - portid = qconf->rx_port_list[i]; - nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, - pkts_burst, MAX_PKT_BURST); - - if (nb_rx != 0) - l2fwd_ivshmem_receive_packets(pkts_burst, nb_rx, portid, i); - - /* dequeue packets from TX ring and send them to TX queue */ - tx = qconf->port_param[i]->tx_ring; - - nb_rx = rte_ring_count(tx); - - nb_rx = RTE_MIN(nb_rx, (unsigned) MAX_PKT_BURST); - - if (nb_rx == 0) - continue; - - /* should not happen */ - if (unlikely(rte_ring_dequeue_bulk(tx, (void**) pkts_burst, nb_rx) < 0)) { - ctrl->state = STATE_FAIL; - return; - } - - for (j = 0; j < nb_rx; j++) { - m = pkts_burst[j]; - l2fwd_ivshmem_send_packet(m, portid); - } - } - } -} - -static int -l2fwd_ivshmem_launch_one_lcore(__attribute__((unused)) void *dummy) -{ - fwd_loop(); - return 0; -} - -int main(int argc, char **argv) -{ - char name[RTE_RING_NAMESIZE]; - struct rte_ring *r; - struct lcore_queue_conf *qconf; - struct rte_eth_dev_info dev_info; - uint8_t portid, port_nr; - uint8_t nb_ports, nb_ports_available; - uint8_t nb_ports_in_mask; - int ret; - unsigned lcore_id, rx_lcore_id; - - /* init EAL */ - ret = rte_eal_init(argc, argv); - if (ret < 0) - rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n"); - argc -= ret; - argv += ret; - - /* parse application arguments (after the EAL ones) */ - ret = l2fwd_ivshmem_parse_args(argc, argv); - if (ret < 0) - rte_exit(EXIT_FAILURE, "Invalid l2fwd-ivshmem arguments\n"); - - /* create a shared mbuf pool */ - l2fwd_ivshmem_pktmbuf_pool = - rte_pktmbuf_pool_create(MBUF_MP_NAME, NB_MBUF, 32, - 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); - if (l2fwd_ivshmem_pktmbuf_pool == NULL) - rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n"); - - nb_ports = rte_eth_dev_count(); - if (nb_ports == 0) - rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); - - /* - * reserve memzone to communicate with VMs - we cannot use rte_malloc here - * because while it is technically possible, it is a very bad idea to share - * the heap between two primary processes. - */ - ctrl_mz = rte_memzone_reserve(CTRL_MZ_NAME, sizeof(struct ivshmem_ctrl), - SOCKET_ID_ANY, 0); - if (ctrl_mz == NULL) - rte_exit(EXIT_FAILURE, "Cannot reserve control memzone\n"); - ctrl = (struct ivshmem_ctrl*) ctrl_mz->addr; - - memset(ctrl, 0, sizeof(struct ivshmem_ctrl)); - - /* - * Each port is assigned an output port. - */ - nb_ports_in_mask = 0; - for (portid = 0; portid < nb_ports; portid++) { - /* skip ports that are not enabled */ - if ((l2fwd_ivshmem_enabled_port_mask & (1 << portid)) == 0) - continue; - if (portid % 2) { - ctrl->vm_ports[nb_ports_in_mask].dst = &ctrl->vm_ports[nb_ports_in_mask-1]; - ctrl->vm_ports[nb_ports_in_mask-1].dst = &ctrl->vm_ports[nb_ports_in_mask]; - } - - nb_ports_in_mask++; - - rte_eth_dev_info_get(portid, &dev_info); - } - if (nb_ports_in_mask % 2) { - printf("Notice: odd number of ports in portmask.\n"); - ctrl->vm_ports[nb_ports_in_mask-1].dst = - &ctrl->vm_ports[nb_ports_in_mask-1]; - } - - rx_lcore_id = 0; - qconf = NULL; - - printf("Initializing ports configuration...\n"); - - nb_ports_available = nb_ports; - - /* Initialise each port */ - for (portid = 0; portid < nb_ports; portid++) { - - /* skip ports that are not enabled */ - if ((l2fwd_ivshmem_enabled_port_mask & (1 << portid)) == 0) { - printf("Skipping disabled port %u\n", (unsigned) portid); - nb_ports_available--; - continue; - } - - /* init port */ - printf("Initializing port %u... ", (unsigned) portid); - fflush(stdout); - ret = rte_eth_dev_configure(portid, 1, 1, &port_conf); - if (ret < 0) - rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n", - ret, (unsigned) portid); - - rte_eth_macaddr_get(portid,&l2fwd_ivshmem_ports_eth_addr[portid]); - - /* init one RX queue */ - fflush(stdout); - ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, - rte_eth_dev_socket_id(portid), - NULL, - l2fwd_ivshmem_pktmbuf_pool); - if (ret < 0) - rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n", - ret, (unsigned) portid); - - /* init one TX queue on each port */ - fflush(stdout); - ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, - rte_eth_dev_socket_id(portid), - NULL); - if (ret < 0) - rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n", - ret, (unsigned) portid); - - /* Start device */ - ret = rte_eth_dev_start(portid); - if (ret < 0) - rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n", - ret, (unsigned) portid); - - printf("done: \n"); - - rte_eth_promiscuous_enable(portid); - - printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n", - (unsigned) portid, - l2fwd_ivshmem_ports_eth_addr[portid].addr_bytes[0], - l2fwd_ivshmem_ports_eth_addr[portid].addr_bytes[1], - l2fwd_ivshmem_ports_eth_addr[portid].addr_bytes[2], - l2fwd_ivshmem_ports_eth_addr[portid].addr_bytes[3], - l2fwd_ivshmem_ports_eth_addr[portid].addr_bytes[4], - l2fwd_ivshmem_ports_eth_addr[portid].addr_bytes[5]); - - /* initialize port stats */ - memset(&port_statistics, 0, sizeof(port_statistics)); - } - - if (!nb_ports_available) { - rte_exit(EXIT_FAILURE, - "All available ports are disabled. Please set portmask.\n"); - } - port_nr = 0; - - /* Initialize the port/queue configuration of each logical core */ - for (portid = 0; portid < nb_ports; portid++) { - if ((l2fwd_ivshmem_enabled_port_mask & (1 << portid)) == 0) - continue; - - /* get the lcore_id for this port */ - while (rte_lcore_is_enabled(rx_lcore_id) == 0 || - lcore_queue_conf[rx_lcore_id].n_rx_port == - l2fwd_ivshmem_rx_queue_per_lcore) { - rx_lcore_id++; - if (rx_lcore_id >= RTE_MAX_LCORE) - rte_exit(EXIT_FAILURE, "Not enough cores\n"); - } - - if (qconf != &lcore_queue_conf[rx_lcore_id]) - /* Assigned a new logical core in the loop above. */ - qconf = &lcore_queue_conf[rx_lcore_id]; - - - rte_eth_macaddr_get(portid, &ctrl->vm_ports[port_nr].ethaddr); - - qconf->rx_port_list[qconf->n_rx_port] = portid; - qconf->port_param[qconf->n_rx_port] = &ctrl->vm_ports[port_nr]; - qconf->n_rx_port++; - port_nr++; - printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned) portid); - } - - check_all_ports_link_status(nb_ports_available, l2fwd_ivshmem_enabled_port_mask); - - /* create rings for each VM port (several ports can be on the same VM). - * note that we store the pointers in ctrl - that way, they are the same - * and valid across all VMs because ctrl is also in DPDK memory */ - for (portid = 0; portid < nb_ports_available; portid++) { - - /* RX ring. SP/SC because it's only used by host and a single VM */ - snprintf(name, sizeof(name), "%s%i", RX_RING_PREFIX, portid); - r = rte_ring_create(name, NB_MBUF, - SOCKET_ID_ANY, RING_F_SP_ENQ | RING_F_SC_DEQ); - if (r == NULL) - rte_exit(EXIT_FAILURE, "Cannot create ring %s\n", name); - - ctrl->vm_ports[portid].rx_ring = r; - - /* TX ring. SP/SC because it's only used by host and a single VM */ - snprintf(name, sizeof(name), "%s%i", TX_RING_PREFIX, portid); - r = rte_ring_create(name, NB_MBUF, - SOCKET_ID_ANY, RING_F_SP_ENQ | RING_F_SC_DEQ); - if (r == NULL) - rte_exit(EXIT_FAILURE, "Cannot create ring %s\n", name); - - ctrl->vm_ports[portid].tx_ring = r; - } - - /* create metadata, output cmdline */ - if (rte_ivshmem_metadata_create(METADATA_NAME) < 0) - rte_exit(EXIT_FAILURE, "Cannot create IVSHMEM metadata\n"); - - if (rte_ivshmem_metadata_add_memzone(ctrl_mz, METADATA_NAME)) - rte_exit(EXIT_FAILURE, "Cannot add memzone to IVSHMEM metadata\n"); - - if (rte_ivshmem_metadata_add_mempool(l2fwd_ivshmem_pktmbuf_pool, METADATA_NAME)) - rte_exit(EXIT_FAILURE, "Cannot add mbuf mempool to IVSHMEM metadata\n"); - - for (portid = 0; portid < nb_ports_available; portid++) { - if (rte_ivshmem_metadata_add_ring(ctrl->vm_ports[portid].rx_ring, - METADATA_NAME) < 0) - rte_exit(EXIT_FAILURE, "Cannot add ring %s to IVSHMEM metadata\n", - ctrl->vm_ports[portid].rx_ring->name); - if (rte_ivshmem_metadata_add_ring(ctrl->vm_ports[portid].tx_ring, - METADATA_NAME) < 0) - rte_exit(EXIT_FAILURE, "Cannot add ring %s to IVSHMEM metadata\n", - ctrl->vm_ports[portid].tx_ring->name); - } - generate_ivshmem_cmdline(METADATA_NAME); - - ctrl->nb_ports = nb_ports_available; - - printf("Waiting for VM to initialize...\n"); - - /* wait for VM to initialize */ - while (ctrl->state != STATE_FWD) { - if (ctrl->state == STATE_FAIL) - rte_exit(EXIT_FAILURE, "VM reported failure\n"); - - sleep(1); - } - - printf("Done!\n"); - - sigsetup(); - - /* launch per-lcore init on every lcore */ - rte_eal_mp_remote_launch(l2fwd_ivshmem_launch_one_lcore, NULL, CALL_MASTER); - RTE_LCORE_FOREACH_SLAVE(lcore_id) { - if (rte_eal_wait_lcore(lcore_id) < 0) - return -1; - } - - if (ctrl->state == STATE_FAIL) - rte_exit(EXIT_FAILURE, "VM reported failure\n"); - - return 0; -} diff --git a/examples/l2fwd-ivshmem/include/common.h b/examples/l2fwd-ivshmem/include/common.h deleted file mode 100644 index 8564d32b..00000000 --- a/examples/l2fwd-ivshmem/include/common.h +++ /dev/null @@ -1,111 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _IVSHMEM_COMMON_H_ -#define _IVSHMEM_COMMON_H_ - -#define RTE_LOGTYPE_L2FWD_IVSHMEM RTE_LOGTYPE_USER1 - -#define CTRL_MZ_NAME "CTRL_MEMZONE" -#define MBUF_MP_NAME "MBUF_MEMPOOL" -#define RX_RING_PREFIX "RX_" -#define TX_RING_PREFIX "TX_" - -/* A tsc-based timer responsible for triggering statistics printout */ -#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */ -#define MAX_TIMER_PERIOD 86400 /* 1 day max */ -static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */ - -#define DIM(x)\ - (sizeof(x)/sizeof(x)[0]) - -#define MAX_PKT_BURST 32 - -const struct rte_memzone * ctrl_mz; - -enum l2fwd_state { - STATE_NONE = 0, - STATE_FWD, - STATE_EXIT, - STATE_FAIL -}; - -/* Per-port statistics struct */ -struct port_statistics { - uint64_t tx; - uint64_t rx; - uint64_t dropped; -} __rte_cache_aligned; - -struct mbuf_table { - unsigned len; - struct rte_mbuf *m_table[MAX_PKT_BURST * 2]; /**< allow up to two bursts */ -}; - -struct vm_port_param { - struct rte_ring * rx_ring; /**< receiving ring for current port */ - struct rte_ring * tx_ring; /**< transmitting ring for current port */ - struct vm_port_param * dst; /**< current port's destination port */ - volatile struct port_statistics stats; /**< statistics for current port */ - struct ether_addr ethaddr; /**< Ethernet address of the port */ -}; - -/* control structure, to synchronize host and VM */ -struct ivshmem_ctrl { - rte_spinlock_t lock; - uint8_t nb_ports; /**< total nr of ports */ - volatile enum l2fwd_state state; /**< report state */ - struct vm_port_param vm_ports[RTE_MAX_ETHPORTS]; -}; - -struct ivshmem_ctrl * ctrl; - -static unsigned int l2fwd_ivshmem_rx_queue_per_lcore = 1; - -static void sighandler(int __rte_unused s) -{ - ctrl->state = STATE_EXIT; -} - -static void sigsetup(void) -{ - struct sigaction sigIntHandler; - - sigIntHandler.sa_handler = sighandler; - sigemptyset(&sigIntHandler.sa_mask); - sigIntHandler.sa_flags = 0; - - sigaction(SIGINT, &sigIntHandler, NULL); -} - -#endif /* _IVSHMEM_COMMON_H_ */ diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c index 614ea604..dd9201b2 100644 --- a/examples/l2fwd-jobstats/main.c +++ b/examples/l2fwd-jobstats/main.c @@ -59,7 +59,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_mbuf.h> #include <rte_spinlock.h> diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c index 84a59eb6..60cccdb1 100644 --- a/examples/l2fwd-keepalive/main.c +++ b/examples/l2fwd-keepalive/main.c @@ -66,7 +66,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_mbuf.h> #include <rte_timer.h> diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c index 88979216..b2f58519 100644 --- a/examples/l2fwd/main.c +++ b/examples/l2fwd/main.c @@ -68,12 +68,14 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_mbuf.h> static volatile bool force_quit; +/* MAC updating enabled by default */ +static int mac_updating = 1; + #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1 #define NB_MBUF 8192 @@ -186,23 +188,32 @@ print_stats(void) } static void -l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) +l2fwd_mac_updating(struct rte_mbuf *m, unsigned dest_portid) { struct ether_hdr *eth; void *tmp; - unsigned dst_port; - int sent; - struct rte_eth_dev_tx_buffer *buffer; - dst_port = l2fwd_dst_ports[portid]; eth = rte_pktmbuf_mtod(m, struct ether_hdr *); /* 02:00:00:00:00:xx */ tmp = ð->d_addr.addr_bytes[0]; - *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40); + *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40); /* src addr */ - ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr); + ether_addr_copy(&l2fwd_ports_eth_addr[dest_portid], ð->s_addr); +} + +static void +l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) +{ + unsigned dst_port; + int sent; + struct rte_eth_dev_tx_buffer *buffer; + + dst_port = l2fwd_dst_ports[portid]; + + if (mac_updating) + l2fwd_mac_updating(m, dst_port); buffer = tx_buffer[dst_port]; sent = rte_eth_tx_buffer(dst_port, 0, buffer, m); @@ -322,7 +333,11 @@ l2fwd_usage(const char *prgname) printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n" " -p PORTMASK: hexadecimal bitmask of ports to configure\n" " -q NQ: number of queue (=ports) per lcore (default is 1)\n" - " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n", + " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n" + " --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n" + " When enabled:\n" + " - The source MAC address is replaced by the TX port MAC address\n" + " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n", prgname); } @@ -386,6 +401,8 @@ l2fwd_parse_args(int argc, char **argv) int option_index; char *prgname = argv[0]; static struct option lgopts[] = { + { "mac-updating", no_argument, &mac_updating, 1}, + { "no-mac-updating", no_argument, &mac_updating, 0}, {NULL, 0, 0, 0} }; @@ -428,8 +445,7 @@ l2fwd_parse_args(int argc, char **argv) /* long options */ case 0: - l2fwd_usage(prgname); - return -1; + break; default: l2fwd_usage(prgname); @@ -542,6 +558,8 @@ main(int argc, char **argv) if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n"); + printf("MAC updating %s\n", mac_updating ? "enabled" : "disabled"); + /* convert to number of cycles */ timer_period *= rte_get_timer_hz(); diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c index 16f6110e..3cfbb40e 100644 --- a/examples/l3fwd-acl/main.c +++ b/examples/l3fwd-acl/main.c @@ -63,7 +63,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_mbuf.h> #include <rte_ip.h> diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c index f746960e..b65d683c 100644 --- a/examples/l3fwd-power/main.c +++ b/examples/l3fwd-power/main.c @@ -66,7 +66,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_mbuf.h> #include <rte_ip.h> diff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c index ca01b112..f56e8db9 100644 --- a/examples/l3fwd-vf/main.c +++ b/examples/l3fwd-vf/main.c @@ -65,7 +65,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_mbuf.h> #include <rte_ip.h> diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h index d8798b7d..011ba148 100644 --- a/examples/l3fwd/l3fwd.h +++ b/examples/l3fwd/l3fwd.h @@ -68,7 +68,7 @@ #define MASK_ETH 0x3f /* Hash parameters. */ -#ifdef RTE_ARCH_X86_64 +#ifdef RTE_ARCH_64 /* default to 4 million hash entries (approx) */ #define L3FWD_HASH_ENTRIES (1024*1024*4) #else diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c index def5a024..9cc44603 100644 --- a/examples/l3fwd/l3fwd_em.c +++ b/examples/l3fwd/l3fwd_em.c @@ -47,7 +47,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_cycles.h> #include <rte_mbuf.h> @@ -58,13 +57,17 @@ #include "l3fwd.h" -#ifdef RTE_MACHINE_CPUFLAG_SSE4_2 +#if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32) +#define EM_HASH_CRC 1 +#endif + +#ifdef EM_HASH_CRC #include <rte_hash_crc.h> #define DEFAULT_HASH_FUNC rte_hash_crc #else #include <rte_jhash.h> #define DEFAULT_HASH_FUNC rte_jhash -#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */ +#endif #define IPV6_ADDR_LEN 16 @@ -169,17 +172,17 @@ ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len, t = k->proto; p = (const uint32_t *)&k->port_src; -#ifdef RTE_MACHINE_CPUFLAG_SSE4_2 +#ifdef EM_HASH_CRC init_val = rte_hash_crc_4byte(t, init_val); init_val = rte_hash_crc_4byte(k->ip_src, init_val); init_val = rte_hash_crc_4byte(k->ip_dst, init_val); init_val = rte_hash_crc_4byte(*p, init_val); -#else /* RTE_MACHINE_CPUFLAG_SSE4_2 */ +#else init_val = rte_jhash_1word(t, init_val); init_val = rte_jhash_1word(k->ip_src, init_val); init_val = rte_jhash_1word(k->ip_dst, init_val); init_val = rte_jhash_1word(*p, init_val); -#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */ +#endif return init_val; } @@ -191,16 +194,16 @@ ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len, const union ipv6_5tuple_host *k; uint32_t t; const uint32_t *p; -#ifdef RTE_MACHINE_CPUFLAG_SSE4_2 +#ifdef EM_HASH_CRC const uint32_t *ip_src0, *ip_src1, *ip_src2, *ip_src3; const uint32_t *ip_dst0, *ip_dst1, *ip_dst2, *ip_dst3; -#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */ +#endif k = data; t = k->proto; p = (const uint32_t *)&k->port_src; -#ifdef RTE_MACHINE_CPUFLAG_SSE4_2 +#ifdef EM_HASH_CRC ip_src0 = (const uint32_t *) k->ip_src; ip_src1 = (const uint32_t *)(k->ip_src+4); ip_src2 = (const uint32_t *)(k->ip_src+8); @@ -219,14 +222,14 @@ ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len, init_val = rte_hash_crc_4byte(*ip_dst2, init_val); init_val = rte_hash_crc_4byte(*ip_dst3, init_val); init_val = rte_hash_crc_4byte(*p, init_val); -#else /* RTE_MACHINE_CPUFLAG_SSE4_2 */ +#else init_val = rte_jhash_1word(t, init_val); init_val = rte_jhash(k->ip_src, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val); init_val = rte_jhash(k->ip_dst, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val); init_val = rte_jhash_1word(*p, init_val); -#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */ +#endif return init_val; } @@ -259,8 +262,16 @@ em_mask_key(void *key, xmm_t mask) return vandq_s32(data, mask); } +#elif defined(RTE_MACHINE_CPUFLAG_ALTIVEC) +static inline xmm_t +em_mask_key(void *key, xmm_t mask) +{ + xmm_t data = vec_ld(0, (xmm_t *)(key)); + + return vec_and(data, mask); +} #else -#error No vector engine (SSE, NEON) available, check your toolchain +#error No vector engine (SSE, NEON, ALTIVEC) available, check your toolchain #endif static inline uint8_t diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c index d941bdfd..f6212697 100644 --- a/examples/l3fwd/l3fwd_lpm.c +++ b/examples/l3fwd/l3fwd_lpm.c @@ -46,7 +46,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_cycles.h> #include <rte_mbuf.h> diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c index acedd20e..7223e773 100644 --- a/examples/l3fwd/main.c +++ b/examples/l3fwd/main.c @@ -66,7 +66,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_mbuf.h> #include <rte_ip.h> diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c index 04dc3e40..14a038b7 100644 --- a/examples/link_status_interrupt/main.c +++ b/examples/link_status_interrupt/main.c @@ -67,7 +67,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_mbuf.h> diff --git a/examples/load_balancer/config.c b/examples/load_balancer/config.c index 3f6ddee5..157fd528 100644 --- a/examples/load_balancer/config.c +++ b/examples/load_balancer/config.c @@ -63,7 +63,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_mbuf.h> #include <rte_ip.h> diff --git a/examples/load_balancer/main.c b/examples/load_balancer/main.c index 7ede3585..c97bf6fa 100644 --- a/examples/load_balancer/main.c +++ b/examples/load_balancer/main.c @@ -64,7 +64,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_mbuf.h> #include <rte_ip.h> diff --git a/examples/multi_process/l2fwd_fork/flib.c b/examples/multi_process/l2fwd_fork/flib.c index 343f09f1..85bbc2d3 100644 --- a/examples/multi_process/l2fwd_fork/flib.c +++ b/examples/multi_process/l2fwd_fork/flib.c @@ -70,7 +70,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_mempool.h> #include <rte_mbuf.h> #include <rte_string_fns.h> diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c index 6bbff076..d30ff4a4 100644 --- a/examples/multi_process/symmetric_mp/main.c +++ b/examples/multi_process/symmetric_mp/main.c @@ -64,7 +64,6 @@ #include <rte_debug.h> #include <rte_atomic.h> #include <rte_branch_prediction.h> -#include <rte_ring.h> #include <rte_debug.h> #include <rte_interrupts.h> #include <rte_pci.h> diff --git a/examples/packet_ordering/Makefile b/examples/packet_ordering/Makefile index 9e080a30..de066c4c 100644 --- a/examples/packet_ordering/Makefile +++ b/examples/packet_ordering/Makefile @@ -34,7 +34,7 @@ $(error "Please define RTE_SDK environment variable") endif # Default target, can be overridden by command line or environment -RTE_TARGET ?= x86_64-ivshmem-linuxapp-gcc +RTE_TARGET ?= x86_64-native-linuxapp-gcc include $(RTE_SDK)/mk/rte.vars.mk diff --git a/examples/performance-thread/common/lthread_int.h b/examples/performance-thread/common/lthread_int.h index b858b55e..031d8afc 100644 --- a/examples/performance-thread/common/lthread_int.h +++ b/examples/performance-thread/common/lthread_int.h @@ -69,10 +69,10 @@ #include <pthread.h> #include <time.h> +#include <rte_memory.h> #include <rte_cycles.h> #include <rte_per_lcore.h> #include <rte_timer.h> -#include <rte_ring.h> #include <rte_atomic_64.h> #include <rte_spinlock.h> #include <ctx.h> diff --git a/examples/quota_watermark/qwctl/qwctl.c b/examples/quota_watermark/qwctl/qwctl.c index 4961089b..29c501ca 100644 --- a/examples/quota_watermark/qwctl/qwctl.c +++ b/examples/quota_watermark/qwctl/qwctl.c @@ -41,7 +41,6 @@ #include <rte_log.h> #include <rte_memzone.h> -#include <rte_ring.h> #include <cmdline_rdline.h> #include <cmdline_parse.h> diff --git a/examples/tep_termination/main.c b/examples/tep_termination/main.c index 622f248a..1d6d4635 100644 --- a/examples/tep_termination/main.c +++ b/examples/tep_termination/main.c @@ -1151,8 +1151,7 @@ print_stats(void) } /** - * Main function, does initialisation and calls the per-lcore functions. The CUSE - * device is also registered here to handle the IOCTLs. + * Main function, does initialisation and calls the per-lcore functions. */ int main(int argc, char *argv[]) @@ -1253,14 +1252,12 @@ main(int argc, char *argv[]) } rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF); - /* Register CUSE device to handle IOCTLs. */ ret = rte_vhost_driver_register((char *)&dev_basename, 0); if (ret != 0) - rte_exit(EXIT_FAILURE, "CUSE device setup failure.\n"); + rte_exit(EXIT_FAILURE, "failed to register vhost driver.\n"); rte_vhost_driver_callback_register(&virtio_net_device_ops); - /* Start CUSE session. */ rte_vhost_driver_session_start(); return 0; diff --git a/examples/tep_termination/vxlan.c b/examples/tep_termination/vxlan.c index 9142c8d9..9f0e8362 100644 --- a/examples/tep_termination/vxlan.c +++ b/examples/tep_termination/vxlan.c @@ -141,14 +141,17 @@ process_inner_cksums(struct ether_hdr *eth_hdr, union tunnel_offload_info *info) ethertype, ol_flags); } else if (l4_proto == IPPROTO_TCP) { tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len); - ol_flags |= PKT_TX_TCP_CKSUM; - tcp_hdr->cksum = get_psd_sum(l3_hdr, ethertype, - ol_flags); + /* Put PKT_TX_TCP_SEG bit setting before get_psd_sum(), because + * it depends on PKT_TX_TCP_SEG to calculate pseudo-header + * checksum. + */ if (tso_segsz != 0) { ol_flags |= PKT_TX_TCP_SEG; info->tso_segsz = tso_segsz; info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2; } + ol_flags |= PKT_TX_TCP_CKSUM; + tcp_hdr->cksum = get_psd_sum(l3_hdr, ethertype, ol_flags); } else if (l4_proto == IPPROTO_SCTP) { sctp_hdr = (struct sctp_hdr *)((char *)l3_hdr + info->l3_len); @@ -237,6 +240,8 @@ encapsulation(struct rte_mbuf *m, uint8_t queue_id) m->outer_l2_len = sizeof(struct ether_hdr); m->outer_l3_len = sizeof(struct ipv4_hdr); + ol_flags |= PKT_TX_TUNNEL_VXLAN; + m->ol_flags |= ol_flags; m->tso_segsz = tx_offload.tso_segsz; diff --git a/examples/vhost/main.c b/examples/vhost/main.c index 92a98233..07098594 100644 --- a/examples/vhost/main.c +++ b/examples/vhost/main.c @@ -90,9 +90,6 @@ /* Size of buffers used for snprintfs. */ #define MAX_PRINT_BUFF 6072 -/* Maximum character device basename size. */ -#define MAX_BASENAME_SZ 10 - /* Maximum long option length for option parsing. */ #define MAX_LONG_OPT_SZ 64 @@ -109,9 +106,6 @@ static uint32_t num_devices; static struct rte_mempool *mbuf_pool; static int mergeable; -/* Do vlan strip on host, enabled on default */ -static uint32_t vlan_strip = 1; - /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */ typedef enum { VM2VM_DISABLED = 0, @@ -133,14 +127,16 @@ static uint32_t enable_tx_csum; static uint32_t enable_tso; static int client_mode; +static int dequeue_zero_copy; /* Specify timeout (in useconds) between retries on RX. */ static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US; /* Specify the number of retries on RX. */ static uint32_t burst_rx_retry_num = BURST_RX_RETRIES; -/* Character device basename. Can be set by user. */ -static char dev_basename[MAX_BASENAME_SZ] = "vhost-net"; +/* Socket file paths. Can be set by user */ +static char *socket_files; +static int nb_sockets; /* empty vmdq configuration structure. Filled in programatically */ static struct rte_eth_conf vmdq_conf_default = { @@ -299,6 +295,17 @@ port_init(uint8_t port) rx_ring_size = RTE_TEST_RX_DESC_DEFAULT; tx_ring_size = RTE_TEST_TX_DESC_DEFAULT; + + /* + * When dequeue zero copy is enabled, guest Tx used vring will be + * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc + * (tx_ring_size here) must be small enough so that the driver will + * hit the free threshold easily and free mbufs timely. Otherwise, + * guest Tx vring would be starved. + */ + if (dequeue_zero_copy) + tx_ring_size = 64; + tx_rings = (uint16_t)rte_lcore_count(); retval = validate_num_devices(MAX_DEVICES); @@ -392,17 +399,18 @@ port_init(uint8_t port) } /* - * Set character device basename. + * Set socket file path. */ static int -us_vhost_parse_basename(const char *q_arg) +us_vhost_parse_socket_path(const char *q_arg) { /* parse number string */ - - if (strnlen(q_arg, MAX_BASENAME_SZ) > MAX_BASENAME_SZ) + if (strnlen(q_arg, PATH_MAX) > PATH_MAX) return -1; - else - snprintf((char*)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg); + + socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1)); + snprintf(socket_files + nb_sockets * PATH_MAX, PATH_MAX, "%s", q_arg); + nb_sockets++; return 0; } @@ -462,7 +470,7 @@ us_vhost_usage(const char *prgname) RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n" " --vm2vm [0|1|2]\n" " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n" - " --dev-basename <name>\n" + " --socket-file <path>\n" " --nb-devices ND\n" " -p PORTMASK: Set mask for ports to be used by application\n" " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n" @@ -470,12 +478,12 @@ us_vhost_usage(const char *prgname) " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n" " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n" " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n" - " --vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n" " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n" - " --dev-basename: The basename to be used for the character device.\n" + " --socket-file: The path of the socket file.\n" " --tx-csum [0|1] disable/enable TX checksum offload.\n" " --tso [0|1] disable/enable TCP segment offload.\n" - " --client register a vhost-user socket as client mode.\n", + " --client register a vhost-user socket as client mode.\n" + " --dequeue-zero-copy enables dequeue zero copy\n", prgname); } @@ -495,12 +503,12 @@ us_vhost_parse_args(int argc, char **argv) {"rx-retry-delay", required_argument, NULL, 0}, {"rx-retry-num", required_argument, NULL, 0}, {"mergeable", required_argument, NULL, 0}, - {"vlan-strip", required_argument, NULL, 0}, {"stats", required_argument, NULL, 0}, - {"dev-basename", required_argument, NULL, 0}, + {"socket-file", required_argument, NULL, 0}, {"tx-csum", required_argument, NULL, 0}, {"tso", required_argument, NULL, 0}, {"client", no_argument, &client_mode, 1}, + {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1}, {NULL, 0, 0, 0}, }; @@ -618,27 +626,12 @@ us_vhost_parse_args(int argc, char **argv) } } - /* Enable/disable RX VLAN strip on host. */ - if (!strncmp(long_option[option_index].name, - "vlan-strip", MAX_LONG_OPT_SZ)) { - ret = parse_num_opt(optarg, 1); - if (ret == -1) { - RTE_LOG(INFO, VHOST_CONFIG, - "Invalid argument for VLAN strip [0|1]\n"); - us_vhost_usage(prgname); - return -1; - } else { - vlan_strip = !!ret; - vmdq_conf_default.rxmode.hw_vlan_strip = - vlan_strip; - } - } - /* Enable/disable stats. */ if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) { ret = parse_num_opt(optarg, INT32_MAX); if (ret == -1) { - RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n"); + RTE_LOG(INFO, VHOST_CONFIG, + "Invalid argument for stats [0..N]\n"); us_vhost_usage(prgname); return -1; } else { @@ -646,10 +639,13 @@ us_vhost_parse_args(int argc, char **argv) } } - /* Set character device basename. */ - if (!strncmp(long_option[option_index].name, "dev-basename", MAX_LONG_OPT_SZ)) { - if (us_vhost_parse_basename(optarg) == -1) { - RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for character device basename (Max %d characters)\n", MAX_BASENAME_SZ); + /* Set socket file path. */ + if (!strncmp(long_option[option_index].name, + "socket-file", MAX_LONG_OPT_SZ)) { + if (us_vhost_parse_socket_path(optarg) == -1) { + RTE_LOG(INFO, VHOST_CONFIG, + "Invalid argument for socket name (Max %d characters)\n", + PATH_MAX); us_vhost_usage(prgname); return -1; } @@ -761,10 +757,7 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m) "(%d) failed to add device MAC address to VMDQ\n", vdev->vid); - /* Enable stripping of the vlan tag as we handle routing. */ - if (vlan_strip) - rte_eth_dev_set_vlan_strip_on_queue(ports[0], - (uint16_t)vdev->vmdq_rx_q, 1); + rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1); /* Set device as ready for RX. */ vdev->ready = DEVICE_RX; @@ -1340,14 +1333,27 @@ print_stats(void) } } +static void +unregister_drivers(int socket_num) +{ + int i, ret; + + for (i = 0; i < socket_num; i++) { + ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX); + if (ret != 0) + RTE_LOG(ERR, VHOST_CONFIG, + "Fail to unregister vhost driver for %s.\n", + socket_files + i * PATH_MAX); + } +} + /* When we receive a INT signal, unregister vhost driver */ static void sigint_handler(__rte_unused int signum) { /* Unregister vhost driver. */ - int ret = rte_vhost_driver_unregister((char *)&dev_basename); - if (ret != 0) - rte_exit(EXIT_FAILURE, "vhost driver unregister failure.\n"); + unregister_drivers(nb_sockets); + exit(0); } @@ -1403,15 +1409,14 @@ create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size, } /* - * Main function, does initialisation and calls the per-lcore functions. The CUSE - * device is also registered here to handle the IOCTLs. + * Main function, does initialisation and calls the per-lcore functions. */ int main(int argc, char *argv[]) { unsigned lcore_id, core_id = 0; unsigned nb_ports, valid_num_ports; - int ret; + int ret, i; uint8_t portid; static pthread_t tid; char thread_name[RTE_MAX_THREAD_NAME_LEN]; @@ -1509,14 +1514,22 @@ main(int argc, char *argv[]) if (client_mode) flags |= RTE_VHOST_USER_CLIENT; - /* Register vhost(cuse or user) driver to handle vhost messages. */ - ret = rte_vhost_driver_register(dev_basename, flags); - if (ret != 0) - rte_exit(EXIT_FAILURE, "vhost driver register failure.\n"); + if (dequeue_zero_copy) + flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY; + + /* Register vhost user driver to handle vhost messages. */ + for (i = 0; i < nb_sockets; i++) { + ret = rte_vhost_driver_register + (socket_files + i * PATH_MAX, flags); + if (ret != 0) { + unregister_drivers(i); + rte_exit(EXIT_FAILURE, + "vhost driver register failure.\n"); + } + } rte_vhost_driver_callback_register(&virtio_net_device_ops); - /* Start CUSE session. */ rte_vhost_driver_session_start(); return 0; diff --git a/examples/vhost_xen/main.c b/examples/vhost_xen/main.c index 2e403576..f4dbaa48 100644 --- a/examples/vhost_xen/main.c +++ b/examples/vhost_xen/main.c @@ -1407,8 +1407,7 @@ print_stats(void) int init_virtio_net(struct virtio_net_device_ops const * const ops); /* - * Main function, does initialisation and calls the per-lcore functions. The CUSE - * device is also registered here to handle the IOCTLs. + * Main function, does initialisation and calls the per-lcore functions. */ int main(int argc, char *argv[]) diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c index 360492ba..f639355d 100644 --- a/examples/vmdq/main.c +++ b/examples/vmdq/main.c @@ -63,7 +63,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_log.h> #include <rte_mempool.h> #include <rte_mbuf.h> diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c index 617263b4..35ffffad 100644 --- a/examples/vmdq_dcb/main.c +++ b/examples/vmdq_dcb/main.c @@ -63,7 +63,6 @@ #include <rte_debug.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_ring.h> #include <rte_log.h> #include <rte_mempool.h> #include <rte_mbuf.h> |