diff options
Diffstat (limited to 'src/plugins')
48 files changed, 6255 insertions, 199 deletions
diff --git a/src/plugins/builtinurl/builtinurl.api b/src/plugins/builtinurl/builtinurl.api index f292fd77a8e..80efa73c725 100644 --- a/src/plugins/builtinurl/builtinurl.api +++ b/src/plugins/builtinurl/builtinurl.api @@ -35,6 +35,7 @@ option version = "1.0.0"; */ autoreply define builtinurl_enable { + option deprecated="incorporated in http_static plugin"; /* Client identifier, set from api_main.my_client_index */ u32 client_index; diff --git a/src/plugins/dev_octeon/CMakeLists.txt b/src/plugins/dev_octeon/CMakeLists.txt index e8abf1a3389..c6271ecdfba 100644 --- a/src/plugins/dev_octeon/CMakeLists.txt +++ b/src/plugins/dev_octeon/CMakeLists.txt @@ -1,7 +1,7 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright(c) 2022 Cisco Systems, Inc. -if (NOT VPP_PLATFORM_NAME STREQUAL "octeon10") +if (NOT VPP_PLATFORM_NAME STREQUAL "octeon10" AND NOT VPP_PLATFORM_NAME STREQUAL "octeon9") return() endif() @@ -21,6 +21,10 @@ endif() include_directories (${OCTEON_ROC_DIR}/) +if (VPP_PLATFORM_NAME STREQUAL "octeon9") + add_compile_definitions(PLATFORM_OCTEON9) +endif() + add_vpp_plugin(dev_octeon SOURCES init.c @@ -31,6 +35,7 @@ add_vpp_plugin(dev_octeon rx_node.c tx_node.c flow.c + counter.c MULTIARCH_SOURCES rx_node.c diff --git a/src/plugins/dev_octeon/counter.c b/src/plugins/dev_octeon/counter.c new file mode 100644 index 00000000000..dd73684c386 --- /dev/null +++ b/src/plugins/dev_octeon/counter.c @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2024 Marvell. + * SPDX-License-Identifier: Apache-2.0 + * https://spdx.org/licenses/Apache-2.0.html + */ + +#include <vnet/vnet.h> +#include <vnet/dev/dev.h> +#include <vnet/dev/pci.h> +#include <vnet/dev/counters.h> +#include <dev_octeon/octeon.h> +#include <dev_octeon/common.h> + +VLIB_REGISTER_LOG_CLASS (oct_log, static) = { + .class_name = "oct", + .subclass_name = "counters", +}; + +typedef enum +{ + OCT_PORT_CTR_RX_BYTES, + OCT_PORT_CTR_TX_BYTES, + OCT_PORT_CTR_RX_PACKETS, + OCT_PORT_CTR_TX_PACKETS, + OCT_PORT_CTR_RX_DROPS, + OCT_PORT_CTR_TX_DROPS, + OCT_PORT_CTR_RX_DROP_BYTES, + OCT_PORT_CTR_RX_UCAST, + OCT_PORT_CTR_TX_UCAST, + OCT_PORT_CTR_RX_MCAST, + OCT_PORT_CTR_TX_MCAST, + OCT_PORT_CTR_RX_BCAST, + OCT_PORT_CTR_TX_BCAST, + OCT_PORT_CTR_RX_FCS, + OCT_PORT_CTR_RX_ERR, + OCT_PORT_CTR_RX_DROP_MCAST, + OCT_PORT_CTR_RX_DROP_BCAST, + OCT_PORT_CTR_RX_DROP_L3_MCAST, + OCT_PORT_CTR_RX_DROP_L3_BCAST, +} oct_port_counter_id_t; + +vnet_dev_counter_t oct_port_counters[] = { + VNET_DEV_CTR_RX_BYTES (OCT_PORT_CTR_RX_BYTES), + VNET_DEV_CTR_RX_PACKETS (OCT_PORT_CTR_RX_PACKETS), + VNET_DEV_CTR_RX_DROPS (OCT_PORT_CTR_RX_DROPS), + VNET_DEV_CTR_VENDOR (OCT_PORT_CTR_RX_DROP_BYTES, RX, BYTES, "drop bytes"), + VNET_DEV_CTR_VENDOR (OCT_PORT_CTR_RX_UCAST, RX, PACKETS, "unicast"), + VNET_DEV_CTR_VENDOR (OCT_PORT_CTR_RX_MCAST, RX, PACKETS, "multicast"), + VNET_DEV_CTR_VENDOR (OCT_PORT_CTR_RX_BCAST, RX, PACKETS, "broadcast"), + VNET_DEV_CTR_VENDOR (OCT_PORT_CTR_RX_FCS, RX, PACKETS, "fcs"), + VNET_DEV_CTR_VENDOR (OCT_PORT_CTR_RX_ERR, RX, PACKETS, "error"), + VNET_DEV_CTR_VENDOR (OCT_PORT_CTR_RX_DROP_MCAST, RX, PACKETS, + "drop multicast"), + VNET_DEV_CTR_VENDOR (OCT_PORT_CTR_RX_DROP_BCAST, RX, PACKETS, + "drop broadcast"), + VNET_DEV_CTR_VENDOR (OCT_PORT_CTR_RX_DROP_L3_MCAST, RX, PACKETS, + "drop L3 multicast"), + VNET_DEV_CTR_VENDOR (OCT_PORT_CTR_RX_DROP_L3_BCAST, RX, PACKETS, + "drop L3 broadcast"), + + VNET_DEV_CTR_TX_BYTES (OCT_PORT_CTR_TX_BYTES), + VNET_DEV_CTR_TX_PACKETS (OCT_PORT_CTR_TX_PACKETS), + VNET_DEV_CTR_TX_DROPS (OCT_PORT_CTR_TX_DROPS), + VNET_DEV_CTR_VENDOR (OCT_PORT_CTR_TX_UCAST, TX, PACKETS, "unicast"), + VNET_DEV_CTR_VENDOR (OCT_PORT_CTR_TX_MCAST, TX, PACKETS, "multicast"), + VNET_DEV_CTR_VENDOR (OCT_PORT_CTR_TX_BCAST, TX, PACKETS, "broadcast"), +}; + +typedef enum +{ + OCT_RXQ_CTR_BYTES, + OCT_RXQ_CTR_PKTS, + OCT_RXQ_CTR_DROPS, + OCT_RXQ_CTR_DROP_BYTES, + OCT_RXQ_CTR_ERR, +} oct_rxq_counter_id_t; + +vnet_dev_counter_t oct_rxq_counters[] = { + VNET_DEV_CTR_RX_BYTES (OCT_RXQ_CTR_BYTES), + VNET_DEV_CTR_RX_PACKETS (OCT_RXQ_CTR_PKTS), + VNET_DEV_CTR_RX_DROPS (OCT_RXQ_CTR_DROPS), + VNET_DEV_CTR_VENDOR (OCT_RXQ_CTR_DROP_BYTES, RX, BYTES, "drop bytes"), + VNET_DEV_CTR_VENDOR (OCT_RXQ_CTR_ERR, RX, PACKETS, "error"), +}; + +typedef enum +{ + OCT_TXQ_CTR_BYTES, + OCT_TXQ_CTR_PKTS, + OCT_TXQ_CTR_DROPS, + OCT_TXQ_CTR_DROP_BYTES, +} oct_txq_counter_id_t; + +vnet_dev_counter_t oct_txq_counters[] = { + VNET_DEV_CTR_TX_BYTES (OCT_TXQ_CTR_BYTES), + VNET_DEV_CTR_TX_PACKETS (OCT_TXQ_CTR_PKTS), + VNET_DEV_CTR_TX_DROPS (OCT_TXQ_CTR_DROPS), + VNET_DEV_CTR_VENDOR (OCT_TXQ_CTR_DROP_BYTES, TX, BYTES, "drop bytes"), +}; + +static vnet_dev_rv_t +oct_roc_err (vnet_dev_t *dev, int rv, char *fmt, ...) +{ + u8 *s = 0; + va_list va; + + va_start (va, fmt); + s = va_format (s, fmt, &va); + va_end (va); + + log_err (dev, "%v - ROC error %s (%d)", s, roc_error_msg_get (rv), rv); + + vec_free (s); + return VNET_DEV_ERR_INTERNAL; +} + +void +oct_port_add_counters (vlib_main_t *vm, vnet_dev_port_t *port) +{ + vnet_dev_port_add_counters (vm, port, oct_port_counters, + ARRAY_LEN (oct_port_counters)); + + foreach_vnet_dev_port_rx_queue (rxq, port) + { + vnet_dev_rx_queue_add_counters (vm, rxq, oct_rxq_counters, + ARRAY_LEN (oct_rxq_counters)); + } + + foreach_vnet_dev_port_tx_queue (txq, port) + { + vnet_dev_tx_queue_add_counters (vm, txq, oct_txq_counters, + ARRAY_LEN (oct_txq_counters)); + } +} + +vnet_dev_rv_t +oct_port_get_stats (vlib_main_t *vm, vnet_dev_port_t *port) +{ + vnet_dev_t *dev = port->dev; + oct_device_t *cd = vnet_dev_get_data (dev); + struct roc_nix *nix = cd->nix; + int rrv; + struct roc_nix_stats stats; + + if ((rrv = roc_nix_stats_get (nix, &stats))) + return oct_roc_err (dev, rrv, "roc_nix_stats_get() failed"); + + foreach_vnet_dev_counter (c, port->counter_main) + { + switch (c->user_data) + { + case OCT_PORT_CTR_RX_BYTES: + vnet_dev_counter_value_update (vm, c, stats.rx_octs); + break; + case OCT_PORT_CTR_TX_BYTES: + vnet_dev_counter_value_update (vm, c, stats.tx_octs); + break; + case OCT_PORT_CTR_RX_PACKETS: + vnet_dev_counter_value_update ( + vm, c, stats.rx_ucast + stats.rx_bcast + stats.rx_mcast); + break; + case OCT_PORT_CTR_TX_PACKETS: + vnet_dev_counter_value_update ( + vm, c, stats.tx_ucast + stats.tx_bcast + stats.tx_mcast); + break; + case OCT_PORT_CTR_RX_DROPS: + vnet_dev_counter_value_update (vm, c, stats.rx_drop); + break; + case OCT_PORT_CTR_TX_DROPS: + vnet_dev_counter_value_update (vm, c, stats.tx_drop); + break; + case OCT_PORT_CTR_RX_DROP_BYTES: + vnet_dev_counter_value_update (vm, c, stats.rx_drop_octs); + break; + case OCT_PORT_CTR_RX_UCAST: + vnet_dev_counter_value_update (vm, c, stats.rx_ucast); + break; + case OCT_PORT_CTR_TX_UCAST: + vnet_dev_counter_value_update (vm, c, stats.tx_ucast); + break; + case OCT_PORT_CTR_RX_MCAST: + vnet_dev_counter_value_update (vm, c, stats.rx_mcast); + break; + case OCT_PORT_CTR_TX_MCAST: + vnet_dev_counter_value_update (vm, c, stats.tx_mcast); + break; + case OCT_PORT_CTR_RX_BCAST: + vnet_dev_counter_value_update (vm, c, stats.rx_bcast); + break; + case OCT_PORT_CTR_TX_BCAST: + vnet_dev_counter_value_update (vm, c, stats.tx_bcast); + break; + case OCT_PORT_CTR_RX_FCS: + vnet_dev_counter_value_update (vm, c, stats.rx_fcs); + break; + case OCT_PORT_CTR_RX_ERR: + vnet_dev_counter_value_update (vm, c, stats.rx_err); + break; + case OCT_PORT_CTR_RX_DROP_MCAST: + vnet_dev_counter_value_update (vm, c, stats.rx_drop_mcast); + break; + case OCT_PORT_CTR_RX_DROP_BCAST: + vnet_dev_counter_value_update (vm, c, stats.rx_drop_bcast); + break; + case OCT_PORT_CTR_RX_DROP_L3_MCAST: + vnet_dev_counter_value_update (vm, c, stats.rx_drop_l3_mcast); + break; + case OCT_PORT_CTR_RX_DROP_L3_BCAST: + vnet_dev_counter_value_update (vm, c, stats.rx_drop_l3_bcast); + break; + default: + ASSERT (0); + } + } + + return VNET_DEV_OK; +} + +vnet_dev_rv_t +oct_rxq_get_stats (vlib_main_t *vm, vnet_dev_port_t *port, + vnet_dev_rx_queue_t *rxq) +{ + oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq); + struct roc_nix_stats_queue qstats; + vnet_dev_t *dev = port->dev; + oct_device_t *cd = vnet_dev_get_data (dev); + struct roc_nix *nix = cd->nix; + int rrv; + + if ((rrv = roc_nix_stats_queue_get (nix, crq->rq.qid, 1, &qstats))) + return oct_roc_err (dev, rrv, "roc_nix_stats_queue_get() failed"); + + foreach_vnet_dev_counter (c, rxq->counter_main) + { + switch (c->user_data) + { + case OCT_RXQ_CTR_BYTES: + vnet_dev_counter_value_update (vm, c, qstats.rx_octs); + break; + case OCT_RXQ_CTR_PKTS: + vnet_dev_counter_value_update (vm, c, qstats.rx_pkts); + break; + case OCT_RXQ_CTR_DROPS: + vnet_dev_counter_value_update (vm, c, qstats.rx_drop_pkts); + break; + case OCT_RXQ_CTR_DROP_BYTES: + vnet_dev_counter_value_update (vm, c, qstats.rx_drop_octs); + break; + case OCT_RXQ_CTR_ERR: + vnet_dev_counter_value_update (vm, c, qstats.rx_error_pkts); + break; + default: + ASSERT (0); + } + } + + return VNET_DEV_OK; +} + +vnet_dev_rv_t +oct_txq_get_stats (vlib_main_t *vm, vnet_dev_port_t *port, + vnet_dev_tx_queue_t *txq) +{ + oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq); + struct roc_nix_stats_queue qstats; + vnet_dev_t *dev = port->dev; + oct_device_t *cd = vnet_dev_get_data (dev); + struct roc_nix *nix = cd->nix; + int rrv; + + if ((rrv = roc_nix_stats_queue_get (nix, ctq->sq.qid, 0, &qstats))) + return oct_roc_err (dev, rrv, "roc_nix_stats_queue_get() failed"); + + foreach_vnet_dev_counter (c, txq->counter_main) + { + switch (c->user_data) + { + case OCT_TXQ_CTR_BYTES: + vnet_dev_counter_value_update (vm, c, qstats.tx_octs); + break; + case OCT_TXQ_CTR_PKTS: + vnet_dev_counter_value_update (vm, c, qstats.tx_pkts); + break; + case OCT_TXQ_CTR_DROPS: + vnet_dev_counter_value_update (vm, c, qstats.tx_drop_pkts); + break; + case OCT_TXQ_CTR_DROP_BYTES: + vnet_dev_counter_value_update (vm, c, qstats.tx_drop_octs); + break; + default: + ASSERT (0); + } + } + + return VNET_DEV_OK; +} + +void +oct_port_clear_counters (vlib_main_t *vm, vnet_dev_port_t *port) +{ + vnet_dev_t *dev = port->dev; + oct_device_t *cd = vnet_dev_get_data (dev); + struct roc_nix *nix = cd->nix; + int rrv; + + if ((rrv = roc_nix_stats_reset (nix))) + oct_roc_err (dev, rrv, "roc_nix_stats_reset() failed"); +} + +void +oct_rxq_clear_counters (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq) +{ + oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq); + vnet_dev_t *dev = rxq->port->dev; + oct_device_t *cd = vnet_dev_get_data (dev); + struct roc_nix *nix = cd->nix; + int rrv; + + if ((rrv = roc_nix_stats_queue_reset (nix, crq->rq.qid, 1))) + oct_roc_err (dev, rrv, + "roc_nix_stats_queue_reset() failed for rx queue %u", + rxq->queue_id); +} + +void +oct_txq_clear_counters (vlib_main_t *vm, vnet_dev_tx_queue_t *txq) +{ + oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq); + vnet_dev_t *dev = txq->port->dev; + oct_device_t *cd = vnet_dev_get_data (dev); + struct roc_nix *nix = cd->nix; + int rrv; + + if ((rrv = roc_nix_stats_queue_reset (nix, ctq->sq.qid, 0))) + oct_roc_err (dev, rrv, + "roc_nix_stats_queue_reset() failed for tx queue %u", + txq->queue_id); +} diff --git a/src/plugins/dev_octeon/flow.c b/src/plugins/dev_octeon/flow.c index 1c367a036ab..35aabde76a7 100644 --- a/src/plugins/dev_octeon/flow.c +++ b/src/plugins/dev_octeon/flow.c @@ -46,6 +46,8 @@ VLIB_REGISTER_LOG_CLASS (oct_log, static) = { (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \ (f->type == VNET_FLOW_TYPE_IP4_GTPU)) +#define FLOW_IS_GENERIC_TYPE(f) (f->type == VNET_FLOW_TYPE_GENERIC) + #define OCT_FLOW_UNSUPPORTED_ACTIONS(f) \ ((f->actions == VNET_FLOW_ACTION_BUFFER_ADVANCE) || \ (f->actions == VNET_FLOW_ACTION_REDIRECT_TO_NODE)) @@ -71,6 +73,9 @@ VLIB_REGISTER_LOG_CLASS (oct_log, static) = { _ (62, FLOW_KEY_TYPE_L3_DST, "l3-dst-only") \ _ (63, FLOW_KEY_TYPE_L3_SRC, "l3-src-only") +#define GTPU_PORT 2152 +#define VXLAN_PORT 4789 + typedef struct { u16 src_port; @@ -87,6 +92,27 @@ typedef struct u32 teid; } gtpu_header_t; +typedef struct +{ + u8 layer; + u16 nxt_proto; + vnet_dev_port_t *port; + struct roc_npc_item_info *items; + struct + { + u8 *spec; + u8 *mask; + u16 off; + } oct_drv; + struct + { + u8 *spec; + u8 *mask; + u16 off; + u16 len; + } generic; +} oct_flow_parse_state; + static void oct_flow_convert_rss_types (u64 *key, u64 rss_types) { @@ -183,6 +209,320 @@ oct_flow_rule_create (vnet_dev_port_t *port, struct roc_npc_action *actions, return VNET_DEV_OK; } +static int +oct_parse_l2 (oct_flow_parse_state *pst) +{ + struct roc_npc_flow_item_eth *eth_spec = + (struct roc_npc_flow_item_eth *) &pst->oct_drv.spec[pst->oct_drv.off]; + struct roc_npc_flow_item_eth *eth_mask = + (struct roc_npc_flow_item_eth *) &pst->oct_drv.mask[pst->oct_drv.off]; + ethernet_header_t *eth_hdr_mask = + (ethernet_header_t *) &pst->generic.mask[pst->generic.off]; + ethernet_header_t *eth_hdr = + (ethernet_header_t *) &pst->generic.spec[pst->generic.off]; + u16 tpid, etype; + + tpid = etype = clib_net_to_host_u16 (eth_hdr->type); + clib_memcpy_fast (eth_spec, eth_hdr, sizeof (ethernet_header_t)); + clib_memcpy_fast (eth_mask, eth_hdr_mask, sizeof (ethernet_header_t)); + eth_spec->has_vlan = 0; + + pst->items[pst->layer].spec = (void *) eth_spec; + pst->items[pst->layer].mask = (void *) eth_mask; + pst->items[pst->layer].size = sizeof (ethernet_header_t); + pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_ETH; + pst->generic.off += sizeof (ethernet_header_t); + pst->oct_drv.off += sizeof (struct roc_npc_flow_item_eth); + pst->layer++; + + /* Parse VLAN Tags if any */ + struct roc_npc_flow_item_vlan *vlan_spec = + (struct roc_npc_flow_item_vlan *) &pst->oct_drv.spec[pst->oct_drv.off]; + struct roc_npc_flow_item_vlan *vlan_mask = + (struct roc_npc_flow_item_vlan *) &pst->oct_drv.mask[pst->oct_drv.off]; + ethernet_vlan_header_t *vlan_hdr, *vlan_hdr_mask; + u8 vlan_cnt = 0; + + while (tpid == ETHERNET_TYPE_DOT1AD || tpid == ETHERNET_TYPE_VLAN) + { + if (pst->generic.off >= pst->generic.len) + break; + + vlan_hdr = + (ethernet_vlan_header_t *) &pst->generic.spec[pst->generic.off]; + vlan_hdr_mask = + (ethernet_vlan_header_t *) &pst->generic.mask[pst->generic.off]; + tpid = etype = clib_net_to_host_u16 (vlan_hdr->type); + clib_memcpy (&vlan_spec[vlan_cnt], vlan_hdr, + sizeof (ethernet_vlan_header_t)); + clib_memcpy (&vlan_mask[vlan_cnt], vlan_hdr_mask, + sizeof (ethernet_vlan_header_t)); + pst->items[pst->layer].spec = (void *) &vlan_spec[vlan_cnt]; + pst->items[pst->layer].mask = (void *) &vlan_mask[vlan_cnt]; + pst->items[pst->layer].size = sizeof (ethernet_vlan_header_t); + pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_VLAN; + pst->generic.off += sizeof (ethernet_vlan_header_t); + pst->oct_drv.off += sizeof (struct roc_npc_flow_item_vlan); + pst->layer++; + vlan_cnt++; + } + + /* Inner most vlan tag */ + if (vlan_cnt) + vlan_spec[vlan_cnt - 1].has_more_vlan = 0; + + pst->nxt_proto = etype; + return 0; +} + +static int +oct_parse_l3 (oct_flow_parse_state *pst) +{ + + if (pst->generic.off >= pst->generic.len || pst->nxt_proto == 0) + return 0; + + if (pst->nxt_proto == ETHERNET_TYPE_MPLS) + { + int label_stack_bottom = 0; + do + { + + u8 *mpls_spec = &pst->generic.spec[pst->generic.off]; + u8 *mpls_mask = &pst->generic.mask[pst->generic.off]; + + label_stack_bottom = mpls_spec[2] & 1; + pst->items[pst->layer].spec = (void *) mpls_spec; + pst->items[pst->layer].mask = (void *) mpls_mask; + pst->items[pst->layer].size = sizeof (u32); + pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_MPLS; + pst->generic.off += sizeof (u32); + pst->layer++; + } + while (label_stack_bottom); + + pst->nxt_proto = 0; + return 0; + } + else if (pst->nxt_proto == ETHERNET_TYPE_IP4) + { + ip4_header_t *ip4_spec = + (ip4_header_t *) &pst->generic.spec[pst->generic.off]; + ip4_header_t *ip4_mask = + (ip4_header_t *) &pst->generic.mask[pst->generic.off]; + pst->items[pst->layer].spec = (void *) ip4_spec; + pst->items[pst->layer].mask = (void *) ip4_mask; + pst->items[pst->layer].size = sizeof (ip4_header_t); + pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_IPV4; + pst->generic.off += sizeof (ip4_header_t); + pst->layer++; + pst->nxt_proto = ip4_spec->protocol; + } + else if (pst->nxt_proto == ETHERNET_TYPE_IP6) + { + struct roc_npc_flow_item_ipv6 *ip6_spec = + (struct roc_npc_flow_item_ipv6 *) &pst->oct_drv.spec[pst->oct_drv.off]; + struct roc_npc_flow_item_ipv6 *ip6_mask = + (struct roc_npc_flow_item_ipv6 *) &pst->oct_drv.mask[pst->oct_drv.off]; + ip6_header_t *ip6_hdr_mask = + (ip6_header_t *) &pst->generic.mask[pst->generic.off]; + ip6_header_t *ip6_hdr = + (ip6_header_t *) &pst->generic.spec[pst->generic.off]; + u8 nxt_hdr = ip6_hdr->protocol; + + clib_memcpy (ip6_spec, ip6_hdr, sizeof (ip6_header_t)); + clib_memcpy (ip6_mask, ip6_hdr_mask, sizeof (ip6_header_t)); + pst->items[pst->layer].spec = (void *) ip6_spec; + pst->items[pst->layer].mask = (void *) ip6_mask; + pst->items[pst->layer].size = sizeof (ip6_header_t); + pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_IPV6; + pst->generic.off += sizeof (ip6_header_t); + pst->oct_drv.off += sizeof (struct roc_npc_flow_item_ipv6); + pst->layer++; + + while (nxt_hdr == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS || + nxt_hdr == IP_PROTOCOL_IP6_DESTINATION_OPTIONS || + nxt_hdr == IP_PROTOCOL_IPV6_ROUTE) + { + if (pst->generic.off >= pst->generic.len) + return 0; + + ip6_ext_header_t *ip6_ext_spec = + (ip6_ext_header_t *) &pst->generic.spec[pst->generic.off]; + ip6_ext_header_t *ip6_ext_mask = + (ip6_ext_header_t *) &pst->generic.mask[pst->generic.off]; + nxt_hdr = ip6_ext_spec->next_hdr; + + pst->items[pst->layer].spec = (void *) ip6_ext_spec; + pst->items[pst->layer].mask = (void *) ip6_ext_mask; + pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_IPV6_EXT; + pst->generic.off += ip6_ext_header_len (ip6_ext_spec); + pst->layer++; + } + + if (pst->generic.off >= pst->generic.len) + return 0; + + if (nxt_hdr == IP_PROTOCOL_IPV6_FRAGMENTATION) + { + ip6_frag_hdr_t *ip6_ext_frag_spec = + (ip6_frag_hdr_t *) &pst->generic.spec[pst->generic.off]; + ip6_frag_hdr_t *ip6_ext_frag_mask = + (ip6_frag_hdr_t *) &pst->generic.mask[pst->generic.off]; + + pst->items[pst->layer].spec = (void *) ip6_ext_frag_spec; + pst->items[pst->layer].mask = (void *) ip6_ext_frag_mask; + pst->items[pst->layer].size = sizeof (ip6_frag_hdr_t); + pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_IPV6_FRAG_EXT; + pst->generic.off += sizeof (ip6_frag_hdr_t); + pst->layer++; + } + + pst->nxt_proto = nxt_hdr; + } + /* Unsupported L3. */ + else + return -1; + + return 0; +} + +static int +oct_parse_l4 (oct_flow_parse_state *pst) +{ + + if (pst->generic.off >= pst->generic.len || pst->nxt_proto == 0) + return 0; + +#define _(protocol_t, protocol_value, ltype) \ + if (pst->nxt_proto == protocol_value) \ + \ + { \ + \ + protocol_t *spec = (protocol_t *) &pst->generic.spec[pst->generic.off]; \ + protocol_t *mask = (protocol_t *) &pst->generic.mask[pst->generic.off]; \ + pst->items[pst->layer].spec = spec; \ + pst->items[pst->layer].mask = mask; \ + \ + pst->items[pst->layer].size = sizeof (protocol_t); \ + \ + pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_##ltype; \ + pst->generic.off += sizeof (protocol_t); \ + pst->layer++; \ + return 0; \ + } + + _ (esp_header_t, IP_PROTOCOL_IPSEC_ESP, ESP) + _ (udp_header_t, IP_PROTOCOL_UDP, UDP) + _ (tcp_header_t, IP_PROTOCOL_TCP, TCP) + _ (sctp_header_t, IP_PROTOCOL_SCTP, SCTP) + _ (icmp46_header_t, IP_PROTOCOL_ICMP, ICMP) + _ (icmp46_header_t, IP_PROTOCOL_ICMP6, ICMP) + _ (igmp_header_t, IP_PROTOCOL_IGMP, IGMP) + _ (gre_header_t, IP_PROTOCOL_GRE, GRE) + + /* Unsupported L4. */ + return -1; +} + +static int +oct_parse_tunnel (oct_flow_parse_state *pst) +{ + if (pst->generic.off >= pst->generic.len) + return 0; + + if (pst->items[pst->layer - 1].type == ROC_NPC_ITEM_TYPE_GRE) + { + gre_header_t *gre_hdr = (gre_header_t *) pst->items[pst->layer - 1].spec; + pst->nxt_proto = clib_net_to_host_u16 (gre_hdr->protocol); + goto parse_l3; + } + + else if (pst->items[pst->layer - 1].type == ROC_NPC_ITEM_TYPE_UDP) + { + udp_header_t *udp_h = (udp_header_t *) pst->items[pst->layer - 1].spec; + u16 dport = clib_net_to_host_u16 (udp_h->dst_port); + + if (dport == GTPU_PORT) + { + gtpu_header_t *gtpu_spec = + (gtpu_header_t *) &pst->generic.spec[pst->generic.off]; + gtpu_header_t *gtpu_mask = + (gtpu_header_t *) &pst->generic.mask[pst->generic.off]; + pst->items[pst->layer].spec = (void *) gtpu_spec; + pst->items[pst->layer].mask = (void *) gtpu_mask; + pst->items[pst->layer].size = sizeof (gtpu_header_t); + pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_GTPU; + pst->generic.off += sizeof (gtpu_header_t); + pst->layer++; + pst->nxt_proto = 0; + return 0; + } + else if (dport == VXLAN_PORT) + { + vxlan_header_t *vxlan_spec = + (vxlan_header_t *) &pst->generic.spec[pst->generic.off]; + vxlan_header_t *vxlan_mask = + (vxlan_header_t *) &pst->generic.spec[pst->generic.off]; + pst->items[pst->layer].spec = (void *) vxlan_spec; + pst->items[pst->layer].mask = (void *) vxlan_mask; + pst->items[pst->layer].size = sizeof (vxlan_header_t); + pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_VXLAN; + pst->generic.off += sizeof (vxlan_header_t); + pst->layer++; + pst->nxt_proto = 0; + goto parse_l2; + } + } + /* No supported Tunnel detected. */ + else + { + log_err (pst->port->dev, + "Partially parsed till offset %u, not able to parse further", + pst->generic.off); + return 0; + } +parse_l2: + if (oct_parse_l2 (pst)) + return -1; +parse_l3: + if (oct_parse_l3 (pst)) + return -1; + + return oct_parse_l4 (pst); +} + +static vnet_dev_rv_t +oct_flow_generic_pattern_parse (oct_flow_parse_state *pst) +{ + + if (oct_parse_l2 (pst)) + goto err; + + if (oct_parse_l3 (pst)) + goto err; + + if (oct_parse_l4 (pst)) + goto err; + + if (oct_parse_tunnel (pst)) + goto err; + + if (pst->generic.off < pst->generic.len) + { + log_err (pst->port->dev, + "Partially parsed till offset %u, not able to parse further", + pst->generic.off); + goto err; + } + + pst->items[pst->layer].type = ROC_NPC_ITEM_TYPE_END; + return VNET_DEV_OK; + +err: + return VNET_DEV_ERR_NOT_SUPPORTED; +} + static vnet_dev_rv_t oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow, uword *private_data) @@ -196,6 +536,7 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow, struct roc_npc_action_queue conf = {}; struct roc_npc_action_mark mark = {}; struct roc_npc *npc = &oct_port->npc; + u8 *flow_spec = 0, *flow_mask = 0; vnet_dev_rv_t rv = VNET_DEV_OK; int layer = 0, index = 0; u16 *queues = NULL; @@ -203,6 +544,45 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow, u8 proto = 0; u16 action = 0; + if (FLOW_IS_GENERIC_TYPE (flow)) + { + u8 drv_item_spec[1024] = { 0 }, drv_item_mask[1024] = { 0 }; + unformat_input_t input; + int rc; + + unformat_init_string ( + &input, (const char *) flow->generic.pattern.spec, + strlen ((const char *) flow->generic.pattern.spec)); + unformat_user (&input, unformat_hex_string, &flow_spec); + unformat_free (&input); + + unformat_init_string ( + &input, (const char *) flow->generic.pattern.mask, + strlen ((const char *) flow->generic.pattern.mask)); + unformat_user (&input, unformat_hex_string, &flow_mask); + unformat_free (&input); + + oct_flow_parse_state pst = { + .nxt_proto = 0, + .port = port, + .items = item_info, + .oct_drv = { .spec = drv_item_spec, .mask = drv_item_mask }, + .generic = { .spec = flow_spec, + .mask = flow_mask, + .len = vec_len (flow_spec) }, + }; + + rc = oct_flow_generic_pattern_parse (&pst); + if (rc) + { + vec_free (flow_spec); + vec_free (flow_mask); + return VNET_DEV_ERR_NOT_SUPPORTED; + } + + goto parse_flow_actions; + } + if (FLOW_IS_ETHERNET_CLASS (flow)) { ethernet_header_t eth_spec = { .type = clib_host_to_net_u16 ( @@ -357,6 +737,7 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow, end_item_info: item_info[layer].type = ROC_NPC_ITEM_TYPE_END; +parse_flow_actions: if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE) { conf.index = flow->redirect_queue; @@ -422,6 +803,11 @@ end_item_info: if (queues) clib_mem_free (queues); + if (flow_spec) + vec_free (flow_spec); + if (flow_mask) + vec_free (flow_mask); + return rv; } diff --git a/src/plugins/dev_octeon/format.c b/src/plugins/dev_octeon/format.c index e624b84f54e..d0f53013d99 100644 --- a/src/plugins/dev_octeon/format.c +++ b/src/plugins/dev_octeon/format.c @@ -25,7 +25,7 @@ format_oct_nix_rx_cqe_desc (u8 *s, va_list *args) typeof (d->sg0) *sg0 = &d->sg0; typeof (d->sg0) *sg1 = &d->sg1; - s = format (s, "hdr: cqe_type %u nude %u q %u tag 0x%x", h->cqe_type, + s = format (s, "hdr: cqe_type %u nude %u qid %u tag 0x%x", h->cqe_type, h->node, h->q, h->tag); s = format (s, "\n%Uparse:", format_white_space, indent); #define _(n, f) s = format (s, " " #n " " f, p->n) diff --git a/src/plugins/dev_octeon/init.c b/src/plugins/dev_octeon/init.c index 97a11e0d0d7..87ed8317277 100644 --- a/src/plugins/dev_octeon/init.c +++ b/src/plugins/dev_octeon/init.c @@ -141,6 +141,7 @@ oct_init_nix (vlib_main_t *vm, vnet_dev_t *dev) .config_change_validate = oct_port_cfg_change_validate, .format_status = format_oct_port_status, .format_flow = format_oct_port_flow, + .clear_counters = oct_port_clear_counters, }, .data_size = sizeof (oct_port_t), .initial_data = &oct_port, @@ -159,6 +160,7 @@ oct_init_nix (vlib_main_t *vm, vnet_dev_t *dev) .alloc = oct_rx_queue_alloc, .free = oct_rx_queue_free, .format_info = format_oct_rxq_info, + .clear_counters = oct_rxq_clear_counters, }, }, .tx_queue = { @@ -173,6 +175,7 @@ oct_init_nix (vlib_main_t *vm, vnet_dev_t *dev) .alloc = oct_tx_queue_alloc, .free = oct_tx_queue_free, .format_info = format_oct_txq_info, + .clear_counters = oct_txq_clear_counters, }, }, }; @@ -245,6 +248,7 @@ oct_init (vlib_main_t *vm, vnet_dev_t *dev) { case OCT_DEVICE_TYPE_RVU_PF: case OCT_DEVICE_TYPE_RVU_VF: + case OCT_DEVICE_TYPE_LBK_VF: case OCT_DEVICE_TYPE_SDP_VF: return oct_init_nix (vm, dev); diff --git a/src/plugins/dev_octeon/octeon.h b/src/plugins/dev_octeon/octeon.h index e43cde0a35f..a87a5e3e1ed 100644 --- a/src/plugins/dev_octeon/octeon.h +++ b/src/plugins/dev_octeon/octeon.h @@ -12,6 +12,12 @@ #include <vnet/flow/flow.h> #include <vnet/udp/udp.h> #include <vnet/ipsec/esp.h> +#include <vnet/ethernet/packet.h> +#include <vnet/ip/ip_packet.h> +#include <vnet/ip/icmp46_packet.h> +#include <vnet/ip/igmp_packet.h> +#include <vnet/gre/packet.h> +#include <vxlan/vxlan.h> #include <base/roc_api.h> #include <dev_octeon/hw_defs.h> @@ -141,6 +147,17 @@ vnet_dev_rv_t oct_flow_validate_params (vlib_main_t *, vnet_dev_port_t *, vnet_dev_rv_t oct_flow_query (vlib_main_t *, vnet_dev_port_t *, u32, uword, u64 *); +/* counter.c */ +void oct_port_add_counters (vlib_main_t *, vnet_dev_port_t *); +void oct_port_clear_counters (vlib_main_t *, vnet_dev_port_t *); +void oct_rxq_clear_counters (vlib_main_t *, vnet_dev_rx_queue_t *); +void oct_txq_clear_counters (vlib_main_t *, vnet_dev_tx_queue_t *); +vnet_dev_rv_t oct_port_get_stats (vlib_main_t *, vnet_dev_port_t *); +vnet_dev_rv_t oct_rxq_get_stats (vlib_main_t *, vnet_dev_port_t *, + vnet_dev_rx_queue_t *); +vnet_dev_rv_t oct_txq_get_stats (vlib_main_t *, vnet_dev_port_t *, + vnet_dev_tx_queue_t *); + #define log_debug(dev, f, ...) \ vlib_log (VLIB_LOG_LEVEL_DEBUG, oct_log.class, "%U: " f, \ format_vnet_dev_addr, (dev), ##__VA_ARGS__) diff --git a/src/plugins/dev_octeon/port.c b/src/plugins/dev_octeon/port.c index d5f78301adf..a53fa256884 100644 --- a/src/plugins/dev_octeon/port.c +++ b/src/plugins/dev_octeon/port.c @@ -124,6 +124,8 @@ oct_port_init (vlib_main_t *vm, vnet_dev_port_t *port) return rv; } + oct_port_add_counters (vm, port); + return VNET_DEV_OK; } @@ -172,6 +174,21 @@ oct_port_poll (vlib_main_t *vm, vnet_dev_port_t *port) vnet_dev_port_state_changes_t changes = {}; int rrv; + if (oct_port_get_stats (vm, port)) + return; + + foreach_vnet_dev_port_rx_queue (q, port) + { + if (oct_rxq_get_stats (vm, port, q)) + return; + } + + foreach_vnet_dev_port_tx_queue (q, port) + { + if (oct_txq_get_stats (vm, port, q)) + return; + } + if (roc_nix_is_lbk (nix)) { link_info.status = 1; @@ -203,7 +220,8 @@ oct_port_poll (vlib_main_t *vm, vnet_dev_port_t *port) if (cd->speed != link_info.speed) { changes.change.link_speed = 1; - changes.link_speed = link_info.speed; + /* Convert to Kbps */ + changes.link_speed = link_info.speed * 1000; cd->speed = link_info.speed; } @@ -385,7 +403,7 @@ oct_validate_config_promisc_mode (vnet_dev_port_t *port, int enable) oct_device_t *cd = vnet_dev_get_data (dev); struct roc_nix *nix = cd->nix; - if (roc_nix_is_vf_or_sdp (nix)) + if (roc_nix_is_sdp (nix) || roc_nix_is_lbk (nix)) return VNET_DEV_ERR_UNSUPPORTED_DEVICE; return VNET_DEV_OK; @@ -405,6 +423,9 @@ oct_op_config_promisc_mode (vlib_main_t *vm, vnet_dev_port_t *port, int enable) return oct_roc_err (dev, rv, "roc_nix_npc_promisc_ena_dis failed"); } + if (!roc_nix_is_pf (nix)) + return VNET_DEV_OK; + rv = roc_nix_mac_promisc_mode_enable (nix, enable); if (rv) { @@ -416,6 +437,44 @@ oct_op_config_promisc_mode (vlib_main_t *vm, vnet_dev_port_t *port, int enable) return VNET_DEV_OK; } +static vnet_dev_rv_t +oct_port_add_del_eth_addr (vlib_main_t *vm, vnet_dev_port_t *port, + vnet_dev_hw_addr_t *addr, int is_add, + int is_primary) +{ + vnet_dev_t *dev = port->dev; + oct_device_t *cd = vnet_dev_get_data (dev); + struct roc_nix *nix = cd->nix; + vnet_dev_rv_t rv = VNET_DEV_OK; + + i32 rrv; + + if (is_primary) + { + if (is_add) + { + /* Update mac address at NPC */ + rrv = roc_nix_npc_mac_addr_set (nix, (u8 *) addr); + if (rrv) + rv = oct_roc_err (dev, rrv, "roc_nix_npc_mac_addr_set() failed"); + + /* Update mac address at CGX for PFs only */ + if (!roc_nix_is_vf_or_sdp (nix)) + { + rrv = roc_nix_mac_addr_set (nix, (u8 *) addr); + if (rrv) + { + /* Rollback to previous mac address */ + roc_nix_npc_mac_addr_set (nix, + (u8 *) &port->primary_hw_addr); + rv = oct_roc_err (dev, rrv, "roc_nix_mac_addr_set() failed"); + } + } + } + } + return rv; +} + vnet_dev_rv_t oct_port_cfg_change_validate (vlib_main_t *vm, vnet_dev_port_t *port, vnet_dev_port_cfg_change_req_t *req) @@ -465,6 +524,9 @@ oct_port_cfg_change (vlib_main_t *vm, vnet_dev_port_t *port, break; case VNET_DEV_PORT_CFG_CHANGE_PRIMARY_HW_ADDR: + rv = oct_port_add_del_eth_addr (vm, port, &req->addr, + /* is_add */ 1, + /* is_primary */ 1); break; case VNET_DEV_PORT_CFG_ADD_SECONDARY_HW_ADDR: diff --git a/src/plugins/dev_octeon/roc_helper.c b/src/plugins/dev_octeon/roc_helper.c index f10c2cb578b..16e0a871a9d 100644 --- a/src/plugins/dev_octeon/roc_helper.c +++ b/src/plugins/dev_octeon/roc_helper.c @@ -49,6 +49,12 @@ oct_plt_get_thread_index (void) return __os_thread_index; } +static u64 +oct_plt_get_cache_line_size (void) +{ + return CLIB_CACHE_LINE_BYTES; +} + static void oct_drv_physmem_free (vlib_main_t *vm, void *mem) { @@ -178,4 +184,5 @@ oct_plt_init_param_t oct_plt_init_param = { .oct_plt_spinlock_unlock = oct_plt_spinlock_unlock, .oct_plt_spinlock_trylock = oct_plt_spinlock_trylock, .oct_plt_get_thread_index = oct_plt_get_thread_index, + .oct_plt_get_cache_line_size = oct_plt_get_cache_line_size, }; diff --git a/src/plugins/dev_octeon/rx_node.c b/src/plugins/dev_octeon/rx_node.c index 997f1356199..1f8d5d93fa3 100644 --- a/src/plugins/dev_octeon/rx_node.c +++ b/src/plugins/dev_octeon/rx_node.c @@ -165,6 +165,38 @@ oct_rx_batch (vlib_main_t *vm, oct_rx_node_ctx_t *ctx, return n; } +#ifdef PLATFORM_OCTEON9 +static_always_inline u32 +oct_rxq_refill (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq, u16 n_refill) +{ + u32 n_alloc, n_free; + u32 buffer_indices[n_refill]; + vlib_buffer_t *buffers[n_refill]; + u8 bpi = vnet_dev_get_rx_queue_buffer_pool_index (rxq); + oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq); + u64 aura = roc_npa_aura_handle_to_aura (crq->aura_handle); + const uint64_t addr = + roc_npa_aura_handle_to_base (crq->aura_handle) + NPA_LF_AURA_OP_FREE0; + + if (n_refill < 256) + return 0; + + n_alloc = vlib_buffer_alloc (vm, buffer_indices, n_refill); + if (PREDICT_FALSE (n_alloc < n_refill)) + goto alloc_fail; + + vlib_get_buffers (vm, buffer_indices, (vlib_buffer_t **) buffers, n_alloc); + + for (n_free = 0; n_free < n_alloc; n_free++) + roc_store_pair ((u64) buffers[n_free], aura, addr); + + return n_alloc; + +alloc_fail: + vlib_buffer_unalloc_to_pool (vm, buffer_indices, n_alloc, bpi); + return 0; +} +#else static_always_inline void oct_rxq_refill_batch (vlib_main_t *vm, u64 lmt_id, u64 addr, oct_npa_lf_aura_batch_free_line_t *lines, u32 *bi, @@ -260,6 +292,7 @@ oct_rxq_refill (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq, u16 n_refill) return n_enq; } +#endif static_always_inline void oct_rx_trace (vlib_main_t *vm, vlib_node_runtime_t *node, diff --git a/src/plugins/dev_octeon/tx_node.c b/src/plugins/dev_octeon/tx_node.c index a2e4b07de8a..0907493814d 100644 --- a/src/plugins/dev_octeon/tx_node.c +++ b/src/plugins/dev_octeon/tx_node.c @@ -32,6 +32,44 @@ typedef struct lmt_line_t *lmt_lines; } oct_tx_ctx_t; +#ifdef PLATFORM_OCTEON9 +static_always_inline u32 +oct_batch_free (vlib_main_t *vm, oct_tx_ctx_t *ctx, vnet_dev_tx_queue_t *txq) +{ + oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq); + u16 off = ctq->hdr_off; + u64 ah = ctq->aura_handle; + u32 n_freed = 0, n; + + ah = ctq->aura_handle; + + if ((n = roc_npa_aura_op_available (ah)) >= 32) + { + u64 buffers[n]; + u32 bi[n]; + + n_freed = roc_npa_aura_op_bulk_alloc (ah, buffers, n, 0, 1); + vlib_get_buffer_indices_with_offset (vm, (void **) &buffers, bi, n_freed, + off); + vlib_buffer_free_no_next (vm, bi, n_freed); + } + + return n_freed; +} + +static_always_inline void +oct_lmt_copy (void *lmt_addr, u64 io_addr, void *desc, u64 dwords) +{ + u64 lmt_status; + + do + { + roc_lmt_mov_seg (lmt_addr, desc, dwords); + lmt_status = roc_lmt_submit_ldeor (io_addr); + } + while (lmt_status == 0); +} +#else static_always_inline u32 oct_batch_free (vlib_main_t *vm, oct_tx_ctx_t *ctx, vnet_dev_tx_queue_t *txq) { @@ -133,6 +171,7 @@ oct_batch_free (vlib_main_t *vm, oct_tx_ctx_t *ctx, vnet_dev_tx_queue_t *txq) return n_freed; } +#endif static_always_inline u8 oct_tx_enq1 (vlib_main_t *vm, oct_tx_ctx_t *ctx, vlib_buffer_t *b, @@ -158,6 +197,11 @@ oct_tx_enq1 (vlib_main_t *vm, oct_tx_ctx_t *ctx, vlib_buffer_t *b, return 0; } +#ifdef PLATFORM_OCTEON9 + /* Override line for Octeon9 */ + line = ctx->lmt_lines; +#endif + if (!simple && flags & VLIB_BUFFER_NEXT_PRESENT) { u8 n_tail_segs = 0; @@ -238,8 +282,12 @@ oct_tx_enq1 (vlib_main_t *vm, oct_tx_ctx_t *ctx, vlib_buffer_t *b, t->sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX]; } +#ifdef PLATFORM_OCTEON9 + oct_lmt_copy (line, ctx->lmt_ioaddr, &d, n_dwords); +#else for (u32 i = 0; i < n_dwords; i++) line->dwords[i] = d.as_u128[i]; +#endif *dpl = n_dwords; *n = *n + 1; @@ -252,7 +300,7 @@ oct_tx_enq16 (vlib_main_t *vm, oct_tx_ctx_t *ctx, vnet_dev_tx_queue_t *txq, vlib_buffer_t **b, u32 n_pkts, int trace) { u8 dwords_per_line[16], *dpl = dwords_per_line; - u64 lmt_arg, ioaddr, n_lines; + u64 __attribute__ ((unused)) lmt_arg, ioaddr, n_lines; u32 n_left, or_flags_16 = 0, n = 0; const u32 not_simple_flags = VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_OFFLOAD; @@ -331,6 +379,7 @@ oct_tx_enq16 (vlib_main_t *vm, oct_tx_ctx_t *ctx, vnet_dev_tx_queue_t *txq, if (PREDICT_FALSE (!n_lines)) return n_pkts; +#ifndef PLATFORM_OCTEON9 if (PREDICT_FALSE (or_flags_16 & VLIB_BUFFER_NEXT_PRESENT)) { dpl = dwords_per_line; @@ -359,6 +408,7 @@ oct_tx_enq16 (vlib_main_t *vm, oct_tx_ctx_t *ctx, vnet_dev_tx_queue_t *txq, } roc_lmt_submit_steorl (lmt_arg, ioaddr); +#endif return n_pkts; } @@ -375,7 +425,11 @@ VNET_DEV_NODE_FN (oct_tx_node) u32 *from = vlib_frame_vector_args (frame); u32 n, n_enq, n_left, n_pkts = frame->n_vectors; vlib_buffer_t *buffers[VLIB_FRAME_SIZE + 8], **b = buffers; +#ifdef PLATFORM_OCTEON9 + u64 lmt_id = 0; +#else u64 lmt_id = vm->thread_index << ROC_LMT_LINES_PER_CORE_LOG2; +#endif oct_tx_ctx_t ctx = { .node = node, diff --git a/src/plugins/dpdk/device/init.c b/src/plugins/dpdk/device/init.c index 421f662efa2..e416efe2e4d 100644 --- a/src/plugins/dpdk/device/init.c +++ b/src/plugins/dpdk/device/init.c @@ -1045,12 +1045,14 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) dpdk_main_t *dm = &dpdk_main; clib_error_t *error = 0; dpdk_config_main_t *conf = &dpdk_config_main; - vlib_thread_main_t *tm = vlib_get_thread_main (); dpdk_device_config_t *devconf; vlib_pci_addr_t pci_addr = { 0 }; vlib_vmbus_addr_t vmbus_addr = { 0 }; unformat_input_t sub_input; +#ifdef __linux + vlib_thread_main_t *tm = vlib_get_thread_main (); uword default_hugepage_sz, x; +#endif /* __linux__ */ u8 *s, *tmp = 0; int ret, i; int num_whitelisted = 0; @@ -1258,6 +1260,11 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) { vec_add1 (conf->eal_init_args, (u8 *) "--in-memory"); +#ifdef __linux__ + /* + * FreeBSD performs huge page prealloc through a dedicated kernel mode + * this process is only required on Linux. + */ default_hugepage_sz = clib_mem_get_default_hugepage_size (); clib_bitmap_foreach (x, tm->cpu_socket_bitmap) @@ -1272,6 +1279,7 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) if ((e = clib_sysfs_prealloc_hugepages(x, 0, n_pages))) clib_error_report (e); } +#endif /* __linux__ */ } /* on/off dpdk's telemetry thread */ diff --git a/src/plugins/hs_apps/http_cli.c b/src/plugins/hs_apps/http_cli.c index 5d4d49c0fba..f42f65342c3 100644 --- a/src/plugins/hs_apps/http_cli.c +++ b/src/plugins/hs_apps/http_cli.c @@ -323,6 +323,13 @@ hcs_ts_rx_callback (session_t *ts) return 0; } + if (msg.data.len == 0) + { + hs->tx_buf = 0; + start_send_data (hs, HTTP_STATUS_BAD_REQUEST); + return 0; + } + /* send the command to a new/recycled vlib process */ vec_validate (args.buf, msg.data.len - 1); rv = svm_fifo_dequeue (ts->rx_fifo, msg.data.len, args.buf); diff --git a/src/plugins/hs_apps/http_client_cli.c b/src/plugins/hs_apps/http_client_cli.c index 1a321bf44a8..a99169bafea 100644 --- a/src/plugins/hs_apps/http_client_cli.c +++ b/src/plugins/hs_apps/http_client_cli.c @@ -67,6 +67,7 @@ typedef enum { HCC_REPLY_RECEIVED = 100, HCC_TRANSPORT_CLOSED, + HCC_CONNECT_FAILED, } hcc_cli_signal_t; static hcc_main_t hcc_main; @@ -135,6 +136,8 @@ hcc_ts_connected_callback (u32 app_index, u32 hc_index, session_t *as, { clib_warning ("connected error: hc_index(%d): %U", hc_index, format_session_error, err); + vlib_process_signal_event_mt (hcm->vlib_main, hcm->cli_node_index, + HCC_CONNECT_FAILED, 0); return -1; } @@ -425,6 +428,9 @@ hcc_run (vlib_main_t *vm, int print_output) case HCC_TRANSPORT_CLOSED: err = clib_error_return (0, "error, transport closed"); break; + case HCC_CONNECT_FAILED: + err = clib_error_return (0, "failed to connect"); + break; default: err = clib_error_return (0, "unexpected event %d", event_type); break; diff --git a/src/plugins/http/http.c b/src/plugins/http/http.c index 855ab8deb3b..893dd877c29 100644 --- a/src/plugins/http/http.c +++ b/src/plugins/http/http.c @@ -267,17 +267,21 @@ http_ts_connected_callback (u32 http_app_index, u32 ho_hc_index, session_t *ts, app_worker_t *app_wrk; int rv; + ho_hc = http_conn_get_w_thread (ho_hc_index, 0); + ASSERT (ho_hc->state == HTTP_CONN_STATE_CONNECTING); + if (err) { - clib_warning ("ERROR: %d", err); + clib_warning ("half-open hc index %d, error: %U", ho_hc_index, + format_session_error, err); + app_wrk = app_worker_get_if_valid (ho_hc->h_pa_wrk_index); + if (app_wrk) + app_worker_connect_notify (app_wrk, 0, err, ho_hc->h_pa_app_api_ctx); return 0; } new_hc_index = http_conn_alloc_w_thread (ts->thread_index); hc = http_conn_get_w_thread (new_hc_index, ts->thread_index); - ho_hc = http_conn_get_w_thread (ho_hc_index, 0); - - ASSERT (ho_hc->state == HTTP_CONN_STATE_CONNECTING); clib_memcpy_fast (hc, ho_hc, sizeof (*hc)); @@ -378,7 +382,7 @@ static const char *http_response_template = "HTTP/1.1 %s\r\n" "Content-Length: %lu\r\n\r\n"; static const char *http_request_template = "GET %s HTTP/1.1\r\n" - "User-Agent: VPP HTTP client\r\n" + "User-Agent: %s\r\n" "Accept: */*\r\n"; static u32 @@ -521,17 +525,19 @@ http_state_wait_server_reply (http_conn_t *hc, transport_send_params_t *sp) http_msg_t msg = {}; app_worker_t *app_wrk; session_t *as; - http_status_code_t ec; rv = http_read_message (hc); /* Nothing yet, wait for data or timer expire */ if (rv) - return HTTP_SM_STOP; + { + HTTP_DBG (1, "no data to deq"); + return HTTP_SM_STOP; + } if (vec_len (hc->rx_buf) < 8) { - ec = HTTP_STATUS_BAD_REQUEST; + clib_warning ("response buffer too short"); goto error; } @@ -547,9 +553,7 @@ http_state_wait_server_reply (http_conn_t *hc, transport_send_params_t *sp) if (rv) { clib_warning ("failed to parse http reply"); - session_transport_closing_notify (&hc->connection); - http_disconnect_transport (hc); - return -1; + goto error; } msg.data.len = content_length; u32 dlen = vec_len (hc->rx_buf) - hc->rx_buf_offset; @@ -592,16 +596,14 @@ http_state_wait_server_reply (http_conn_t *hc, transport_send_params_t *sp) } else { - HTTP_DBG (0, "Unknown http method %v", hc->rx_buf); - ec = HTTP_STATUS_METHOD_NOT_ALLOWED; + clib_warning ("Unknown http method %v", hc->rx_buf); goto error; } error: - http_send_error (hc, ec); session_transport_closing_notify (&hc->connection); + session_transport_closed_notify (&hc->connection); http_disconnect_transport (hc); - return HTTP_SM_ERROR; } @@ -742,6 +744,10 @@ http_state_wait_app_reply (http_conn_t *hc, transport_send_params_t *sp) switch (msg.code) { + case HTTP_STATUS_NOT_FOUND: + case HTTP_STATUS_METHOD_NOT_ALLOWED: + case HTTP_STATUS_BAD_REQUEST: + case HTTP_STATUS_INTERNAL_ERROR: case HTTP_STATUS_OK: header = format (0, http_response_template, http_status_code_str[msg.code], @@ -762,6 +768,7 @@ http_state_wait_app_reply (http_conn_t *hc, transport_send_params_t *sp) /* Location: http(s)://new-place already queued up as data */ break; default: + clib_warning ("unsupported status code: %d", msg.code); return HTTP_SM_ERROR; } @@ -817,11 +824,18 @@ http_state_wait_app_method (http_conn_t *hc, transport_send_params_t *sp) goto error; } + /* currently we support only GET method */ + if (msg.method_type != HTTP_REQ_GET) + { + clib_warning ("unsupported method %d", msg.method_type); + goto error; + } + vec_validate (buf, msg.data.len - 1); rv = svm_fifo_dequeue (as->tx_fifo, msg.data.len, buf); ASSERT (rv == msg.data.len); - request = format (0, http_request_template, buf); + request = format (0, http_request_template, buf, hc->app_name); offset = http_send_data (hc, request, vec_len (request), 0); if (offset != vec_len (request)) { @@ -1159,6 +1173,11 @@ http_transport_connect (transport_endpoint_cfg_t *tep) hc->state = HTTP_CONN_STATE_CONNECTING; cargs->api_context = hc_index; + if (vec_len (app->name)) + hc->app_name = vec_dup (app->name); + else + hc->app_name = format (0, "VPP HTTP client"); + HTTP_DBG (1, "hc ho_index %x", hc_index); if ((error = vnet_connect (cargs))) diff --git a/src/plugins/http/http.h b/src/plugins/http/http.h index c9912dd6db8..7fbefd667f4 100644 --- a/src/plugins/http/http.h +++ b/src/plugins/http/http.h @@ -277,6 +277,74 @@ http_state_is_tx_valid (http_conn_t *hc) state == HTTP_STATE_WAIT_APP_METHOD); } +/** + * Remove dot segments from path (RFC3986 section 5.2.4) + * + * @param path Path to sanitize. + * + * @return New vector with sanitized path. + * + * The caller is always responsible to free the returned vector. + */ +always_inline u8 * +http_path_remove_dot_segments (u8 *path) +{ + u32 *segments = 0, *segments_len = 0, segment_len; + u8 *new_path = 0; + int i, ii; + + if (!path) + return vec_new (u8, 0); + + segments = vec_new (u32, 1); + /* first segment */ + segments[0] = 0; + /* find all segments */ + for (i = 1; i < (vec_len (path) - 1); i++) + { + if (path[i] == '/') + vec_add1 (segments, i + 1); + } + /* dummy tail */ + vec_add1 (segments, vec_len (path)); + + /* scan all segments for "." and ".." */ + segments_len = vec_new (u32, vec_len (segments) - 1); + for (i = 0; i < vec_len (segments_len); i++) + { + segment_len = segments[i + 1] - segments[i]; + if (segment_len == 2 && path[segments[i]] == '.') + segment_len = 0; + else if (segment_len == 3 && path[segments[i]] == '.' && + path[segments[i] + 1] == '.') + { + segment_len = 0; + /* remove parent (if any) */ + for (ii = i - 1; ii >= 0; ii--) + { + if (segments_len[ii]) + { + segments_len[ii] = 0; + break; + } + } + } + segments_len[i] = segment_len; + } + + /* we might end with empty path, so return at least empty vector */ + new_path = vec_new (u8, 0); + /* append all valid segments */ + for (i = 0; i < vec_len (segments_len); i++) + { + if (segments_len[i]) + vec_add (new_path, path + segments[i], segments_len[i]); + } + vec_free (segments); + vec_free (segments_len); + return new_path; +} + #endif /* SRC_PLUGINS_HTTP_HTTP_H_ */ /* diff --git a/src/plugins/http_static/static_server.c b/src/plugins/http_static/static_server.c index 040cdca9d7a..f433238dcb1 100644 --- a/src/plugins/http_static/static_server.c +++ b/src/plugins/http_static/static_server.c @@ -357,7 +357,7 @@ try_file_handler (hss_main_t *hsm, hss_session_t *hs, http_req_method_t rt, u8 *request) { http_status_code_t sc = HTTP_STATUS_OK; - u8 *path; + u8 *path, *sanitized_path; u32 ce_index; http_content_type_t type; @@ -367,6 +367,9 @@ try_file_handler (hss_main_t *hsm, hss_session_t *hs, http_req_method_t rt, type = content_type_from_request (request); + /* Remove dot segments to prevent path traversal */ + sanitized_path = http_path_remove_dot_segments (request); + /* * Construct the file to open * Browsers are capable of sporadically including a leading '/' @@ -374,9 +377,9 @@ try_file_handler (hss_main_t *hsm, hss_session_t *hs, http_req_method_t rt, if (!request) path = format (0, "%s%c", hsm->www_root, 0); else if (request[0] == '/') - path = format (0, "%s%s%c", hsm->www_root, request, 0); + path = format (0, "%s%s%c", hsm->www_root, sanitized_path, 0); else - path = format (0, "%s/%s%c", hsm->www_root, request, 0); + path = format (0, "%s/%s%c", hsm->www_root, sanitized_path, 0); if (hsm->debug_level > 0) clib_warning ("%s '%s'", (rt == HTTP_REQ_GET) ? "GET" : "POST", path); @@ -419,7 +422,7 @@ try_file_handler (hss_main_t *hsm, hss_session_t *hs, http_req_method_t rt, hs->cache_pool_index = ce_index; done: - + vec_free (sanitized_path); hs->content_type = type; start_send_data (hs, sc); if (!hs->data) diff --git a/src/plugins/marvell/pp2/cli.c b/src/plugins/marvell/pp2/cli.c index f4ecb1873c9..5072a3c035b 100644 --- a/src/plugins/marvell/pp2/cli.c +++ b/src/plugins/marvell/pp2/cli.c @@ -31,7 +31,7 @@ mrvl_pp2_create_command_fn (vlib_main_t * vm, unformat_input_t * input, { unformat_input_t _line_input, *line_input = &_line_input; mrvl_pp2_create_if_args_t args = { 0 }; - uint val; + unsigned int val; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) diff --git a/src/plugins/nat/nat44-ei/nat44_ei_in2out.c b/src/plugins/nat/nat44-ei/nat44_ei_in2out.c index 01b333a5234..3b981d69986 100644 --- a/src/plugins/nat/nat44-ei/nat44_ei_in2out.c +++ b/src/plugins/nat/nat44-ei/nat44_ei_in2out.c @@ -859,7 +859,7 @@ nat44_ei_icmp_in2out (vlib_buffer_t *b0, ip4_header_t *ip0, nat44_ei_main_t *nm = &nat44_ei_main; vlib_main_t *vm = vlib_get_main (); ip4_address_t addr; - u16 port; + u16 port = 0; u32 fib_index; nat_protocol_t proto; icmp_echo_header_t *echo0, *inner_echo0 = 0; diff --git a/src/plugins/netmap/CMakeLists.txt b/src/plugins/netmap/CMakeLists.txt new file mode 100644 index 00000000000..d53a9e0911a --- /dev/null +++ b/src/plugins/netmap/CMakeLists.txt @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2024 Tom Jones <thj@freebsd.org> +# +# This software was developed by Tom Jones <thj@freebsd.org> under sponsorship +# from the FreeBSD Foundation. +# + +if (NOT "${CMAKE_SYSTEM_NAME}" STREQUAL "FreeBSD") + message(WARNING "Netmap is only currently support on FreeBSD - netmap plugin disabled") + return() +endif() + +add_vpp_plugin(netmap + SOURCES + plugin.c + netmap.c + node.c + device.c + cli.c + netmap_api.c + + MULTIARCH_SOURCES + node.c + device.c + + INSTALL_HEADERS + netmap.h + net_netmap.h + + API_FILES + netmap.api +) diff --git a/src/plugins/netmap/FEATURE.yaml b/src/plugins/netmap/FEATURE.yaml new file mode 100644 index 00000000000..a9dfb2163e4 --- /dev/null +++ b/src/plugins/netmap/FEATURE.yaml @@ -0,0 +1,12 @@ +--- +name: Netmap Device +maintainer: Tom Jones <thj@freebsd.org> +features: + - L4 checksum offload +description: "Create a netmap interface, which is a high speed user-space + interface that allows VPP to patch to a physical or virtual NIC + without the use of DPDK" +missing: + - API dump +state: production +properties: [API, CLI, STATS, MULTITHREAD] diff --git a/src/plugins/netmap/cli.c b/src/plugins/netmap/cli.c new file mode 100644 index 00000000000..b54d397ecbe --- /dev/null +++ b/src/plugins/netmap/cli.c @@ -0,0 +1,236 @@ +/* + *------------------------------------------------------------------ + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ +#include <stdint.h> +#include <net/if.h> +#include <sys/ioctl.h> + +#include <vlib/vlib.h> +#include <vlib/unix/unix.h> +#include <vnet/ethernet/ethernet.h> + +#include <netmap/net_netmap.h> +#include <netmap/netmap.h> + +static clib_error_t * +netmap_create_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + u8 *host_if_name = NULL; + u8 hwaddr[6]; + u8 *hw_addr_ptr = 0; + int r; + u8 is_pipe = 0; + u8 is_master = 0; + u32 sw_if_index = ~0; + clib_error_t *error = NULL; + + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "name %s", &host_if_name)) + ; + else + if (unformat + (line_input, "hw-addr %U", unformat_ethernet_address, hwaddr)) + hw_addr_ptr = hwaddr; + else if (unformat (line_input, "pipe")) + is_pipe = 1; + else if (unformat (line_input, "master")) + is_master = 1; + else if (unformat (line_input, "slave")) + is_master = 0; + else + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } + } + + if (host_if_name == NULL) + { + error = clib_error_return (0, "missing host interface name"); + goto done; + } + + r = + netmap_create_if (vm, host_if_name, hw_addr_ptr, is_pipe, is_master, + &sw_if_index); + + if (r == VNET_API_ERROR_SYSCALL_ERROR_1) + { + error = clib_error_return (0, "%s (errno %d)", strerror (errno), errno); + goto done; + } + + if (r == VNET_API_ERROR_INVALID_INTERFACE) + { + error = clib_error_return (0, "Invalid interface name"); + goto done; + } + + if (r == VNET_API_ERROR_SUBIF_ALREADY_EXISTS) + { + error = clib_error_return (0, "Interface already exists"); + goto done; + } + + vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (), + sw_if_index); + +done: + unformat_free (line_input); + + return error; +} + +/*? + * '<em>netmap</em>' is a framework for very fast packet I/O from userspace. + * '<em>VALE</em>' is an equally fast in-kernel software switch using the + * netmap API. '<em>netmap</em>' includes '<em>netmap pipes</em>', a shared + * memory packet transport channel. Together, they provide a high speed + * user-space interface that allows VPP to patch into a linux namespace, a + * linux container, or a physical NIC without the use of DPDK. Netmap/VALE + * generates the '<em>netmap.ko</em>' kernel module that needs to be loaded + * before netmap interfaces can be created. + * - https://github.com/luigirizzo/netmap - Netmap/VALE repo. + * - https://github.com/vpp-dev/netmap - VPP development package for Netmap/VALE, + * which is a snapshot of the Netmap/VALE repo with minor changes to work + * with containers and modified kernel drivers to work with NICs. + * + * Create a netmap interface that will attach to a linux interface. + * The interface must already exist. Once created, a new netmap interface + * will exist in VPP with the name '<em>netmap-<ifname></em>', where + * '<em><ifname></em>' takes one of two forms: + * - <b>ifname</b> - Linux interface to bind too. + * - <b>valeXXX:YYY</b> - + * - Where '<em>valeXXX</em>' is an arbitrary name for a VALE + * interface that must start with '<em>vale</em>' and is less + * than 16 characters. + * - Where '<em>YYY</em>' is an existing linux namespace. + * + * This command has the following optional parameters: + * + * - <b>hw-addr <mac-addr></b> - Optional ethernet address, can be in either + * X:X:X:X:X:X unix or X.X.X cisco format. + * + * - <b>pipe</b> - Optional flag to indicate that a '<em>netmap pipe</em>' + * instance should be created. + * + * - <b>master | slave</b> - Optional flag to indicate whether VPP should + * be the master or slave of the '<em>netmap pipe</em>'. Only considered + * if '<em>pipe</em>' is entered. Defaults to '<em>slave</em>' if not entered. + * + * @cliexpar + * Example of how to create a netmap interface tied to the linux + * namespace '<em>vpp1</em>': + * @cliexstart{create netmap name vale00:vpp1 hw-addr 02:FE:3F:34:15:9B pipe master} + * netmap-vale00:vpp1 + * @cliexend + * Once the netmap interface is created, enable the interface using: + * @cliexcmd{set interface state netmap-vale00:vpp1 up} +?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (netmap_create_command, static) = { + .path = "create netmap", + .short_help = "create netmap name <ifname>|valeXXX:YYY " + "[hw-addr <mac-addr>] [pipe] [master|slave]", + .function = netmap_create_command_fn, +}; +/* *INDENT-ON* */ + +static clib_error_t * +netmap_delete_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + u8 *host_if_name = NULL; + clib_error_t *error = NULL; + + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "name %s", &host_if_name)) + ; + else + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } + } + + if (host_if_name == NULL) + { + error = clib_error_return (0, "missing host interface name"); + goto done; + } + + netmap_delete_if (vm, host_if_name); + +done: + unformat_free (line_input); + + return error; +} + +/*? + * Delete a netmap interface. Use the '<em><ifname></em>' to identify + * the netmap interface to be deleted. In VPP, netmap interfaces are + * named as '<em>netmap-<ifname></em>', where '<em><ifname></em>' + * takes one of two forms: + * - <b>ifname</b> - Linux interface to bind too. + * - <b>valeXXX:YYY</b> - + * - Where '<em>valeXXX</em>' is an arbitrary name for a VALE + * interface that must start with '<em>vale</em>' and is less + * than 16 characters. + * - Where '<em>YYY</em>' is an existing linux namespace. + * + * @cliexpar + * Example of how to delete a netmap interface named '<em>netmap-vale00:vpp1</em>': + * @cliexcmd{delete netmap name vale00:vpp1} +?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (netmap_delete_command, static) = { + .path = "delete netmap", + .short_help = "delete netmap name <ifname>|valeXXX:YYY", + .function = netmap_delete_command_fn, +}; +/* *INDENT-ON* */ + +clib_error_t * +netmap_cli_init (vlib_main_t * vm) +{ + return 0; +} + +VLIB_INIT_FUNCTION (netmap_cli_init); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/plugins/netmap/device.c b/src/plugins/netmap/device.c new file mode 100644 index 00000000000..505deb988c4 --- /dev/null +++ b/src/plugins/netmap/device.c @@ -0,0 +1,252 @@ +/* + *------------------------------------------------------------------ + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include <stdint.h> +#include <net/if.h> +#include <sys/ioctl.h> + +#include <vlib/vlib.h> +#include <vlib/unix/unix.h> +#include <vnet/ethernet/ethernet.h> + +#include <netmap/net_netmap.h> +#include <netmap/netmap.h> + +#define foreach_netmap_tx_func_error \ +_(NO_FREE_SLOTS, "no free tx slots") \ +_(PENDING_MSGS, "pending msgs in tx ring") + +typedef enum +{ +#define _(f,s) NETMAP_TX_ERROR_##f, + foreach_netmap_tx_func_error +#undef _ + NETMAP_TX_N_ERROR, +} netmap_tx_func_error_t; + +static char *netmap_tx_func_error_strings[] = { +#define _(n,s) s, + foreach_netmap_tx_func_error +#undef _ +}; + + +static u8 * +format_netmap_device_name (u8 * s, va_list * args) +{ + u32 i = va_arg (*args, u32); + netmap_main_t *apm = &netmap_main; + netmap_if_t *nif = pool_elt_at_index (apm->interfaces, i); + + s = format (s, "netmap-%s", nif->host_if_name); + return s; +} + +static u8 * +format_netmap_device (u8 * s, va_list * args) +{ + u32 dev_instance = va_arg (*args, u32); + int verbose = va_arg (*args, int); + netmap_main_t *nm = &netmap_main; + netmap_if_t *nif = vec_elt_at_index (nm->interfaces, dev_instance); + u32 indent = format_get_indent (s); + + s = format (s, "NETMAP interface"); + if (verbose) + { + s = format (s, "\n%U version %d flags 0x%x" + "\n%U region %u memsize 0x%x offset 0x%x" + "\n%U tx_slots %u rx_slots %u tx_rings %u rx_rings %u", + format_white_space, indent + 2, + nif->req->nr_version, + nif->req->nr_flags, + format_white_space, indent + 2, + nif->mem_region, + nif->req->nr_memsize, + nif->req->nr_offset, + format_white_space, indent + 2, + nif->req->nr_tx_slots, + nif->req->nr_rx_slots, + nif->req->nr_tx_rings, nif->req->nr_rx_rings); + } + return s; +} + +static u8 * +format_netmap_tx_trace (u8 * s, va_list * args) +{ + s = format (s, "Unimplemented..."); + return s; +} + +VNET_DEVICE_CLASS_TX_FN (netmap_device_class) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + netmap_main_t *nm = &netmap_main; + u32 *buffers = vlib_frame_vector_args (frame); + u32 n_left = frame->n_vectors; + f64 const time_constant = 1e3; + vnet_interface_output_runtime_t *rd = (void *) node->runtime_data; + netmap_if_t *nif = pool_elt_at_index (nm->interfaces, rd->dev_instance); + int cur_ring; + + clib_spinlock_lock_if_init (&nif->lockp); + + cur_ring = nif->first_tx_ring; + + while (n_left && cur_ring <= nif->last_tx_ring) + { + struct netmap_ring *ring = NETMAP_TXRING (nif->nifp, cur_ring); + int n_free_slots = nm_ring_space (ring); + uint cur = ring->cur; + + if (nm_tx_pending (ring)) + { + if (ioctl (nif->fd, NIOCTXSYNC, NULL) < 0) + clib_unix_warning ("NIOCTXSYNC"); + clib_cpu_time_wait (time_constant); + + if (nm_tx_pending (ring) && !n_free_slots) + { + cur_ring++; + continue; + } + } + + while (n_left && n_free_slots) + { + vlib_buffer_t *b0 = 0; + u32 bi = buffers[0]; + u32 len; + u32 offset = 0; + buffers++; + + struct netmap_slot *slot = &ring->slot[cur]; + + do + { + b0 = vlib_get_buffer (vm, bi); + len = b0->current_length; + /* memcpy */ + clib_memcpy_fast ((u8 *) NETMAP_BUF (ring, slot->buf_idx) + + offset, vlib_buffer_get_current (b0), len); + offset += len; + } + while ((bi = b0->next_buffer)); + + slot->len = offset; + cur = (cur + 1) % ring->num_slots; + n_free_slots--; + n_left--; + } + CLIB_MEMORY_BARRIER (); + ring->head = ring->cur = cur; + } + + if (n_left < frame->n_vectors) + ioctl (nif->fd, NIOCTXSYNC, NULL); + + clib_spinlock_unlock_if_init (&nif->lockp); + + if (n_left) + vlib_error_count (vm, node->node_index, + (n_left == + frame->n_vectors ? NETMAP_TX_ERROR_PENDING_MSGS : + NETMAP_TX_ERROR_NO_FREE_SLOTS), n_left); + + vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors); + return frame->n_vectors; +} + +static void +netmap_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index, + u32 node_index) +{ + netmap_main_t *apm = &netmap_main; + vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); + netmap_if_t *nif = pool_elt_at_index (apm->interfaces, hw->dev_instance); + + /* Shut off redirection */ + if (node_index == ~0) + { + nif->per_interface_next_index = node_index; + return; + } + + nif->per_interface_next_index = + vlib_node_add_next (vlib_get_main (), netmap_input_node.index, + node_index); +} + +static void +netmap_clear_hw_interface_counters (u32 instance) +{ + /* Nothing for now */ +} + +static clib_error_t * +netmap_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) +{ + netmap_main_t *apm = &netmap_main; + vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); + netmap_if_t *nif = pool_elt_at_index (apm->interfaces, hw->dev_instance); + u32 hw_flags; + + nif->is_admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0; + + if (nif->is_admin_up) + hw_flags = VNET_HW_INTERFACE_FLAG_LINK_UP; + else + hw_flags = 0; + + vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags); + + return 0; +} + +static clib_error_t * +netmap_subif_add_del_function (vnet_main_t * vnm, + u32 hw_if_index, + struct vnet_sw_interface_t *st, int is_add) +{ + /* Nothing for now */ + return 0; +} + +/* *INDENT-OFF* */ +VNET_DEVICE_CLASS (netmap_device_class) = { + .name = "netmap", + .format_device_name = format_netmap_device_name, + .format_device = format_netmap_device, + .format_tx_trace = format_netmap_tx_trace, + .tx_function_n_errors = NETMAP_TX_N_ERROR, + .tx_function_error_strings = netmap_tx_func_error_strings, + .rx_redirect_to_node = netmap_set_interface_next_node, + .clear_counters = netmap_clear_hw_interface_counters, + .admin_up_down_function = netmap_interface_admin_up_down, + .subif_add_del_function = netmap_subif_add_del_function, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/plugins/netmap/net_netmap.h b/src/plugins/netmap/net_netmap.h new file mode 100644 index 00000000000..ecccedd4484 --- /dev/null +++ b/src/plugins/netmap/net_netmap.h @@ -0,0 +1,650 @@ +/* + * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``S IS''AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * $FreeBSD: head/sys/net/netmap.h 251139 2013-05-30 14:07:14Z luigi $ + * + * Definitions of constants and the structures used by the netmap + * framework, for the part visible to both kernel and userspace. + * Detailed info on netmap is available with "man netmap" or at + * + * http://info.iet.unipi.it/~luigi/netmap/ + * + * This API is also used to communicate with the VALE software switch + */ + +#ifndef _NET_NETMAP_H_ +#define _NET_NETMAP_H_ + +#define NETMAP_API 14 /* current API version */ + +#define NETMAP_MIN_API 14 /* min and max versions accepted */ +#define NETMAP_MAX_API 15 +/* + * Some fields should be cache-aligned to reduce contention. + * The alignment is architecture and OS dependent, but rather than + * digging into OS headers to find the exact value we use an estimate + * that should cover most architectures. + */ +#define NM_CACHE_ALIGN 128 + +/* + * --- Netmap data structures --- + * + * The userspace data structures used by netmap are shown below. + * They are allocated by the kernel and mmap()ed by userspace threads. + * Pointers are implemented as memory offsets or indexes, + * so that they can be easily dereferenced in kernel and userspace. + + KERNEL (opaque, obviously) + + ==================================================================== + | + USERSPACE | struct netmap_ring + +---->+---------------+ + / | head,cur,tail | + struct netmap_if (nifp, 1 per fd) / | buf_ofs | + +---------------+ / | other fields | + | ni_tx_rings | / +===============+ + | ni_rx_rings | / | buf_idx, len | slot[0] + | | / | flags, ptr | + | | / +---------------+ + +===============+ / | buf_idx, len | slot[1] + | txring_ofs[0] | (rel.to nifp)--' | flags, ptr | + | txring_ofs[1] | +---------------+ + (tx+1 entries) (num_slots entries) + | txring_ofs[t] | | buf_idx, len | slot[n-1] + +---------------+ | flags, ptr | + | rxring_ofs[0] | +---------------+ + | rxring_ofs[1] | + (rx+1 entries) + | rxring_ofs[r] | + +---------------+ + + * For each "interface" (NIC, host stack, PIPE, VALE switch port) bound to + * a file descriptor, the mmap()ed region contains a (logically readonly) + * struct netmap_if pointing to struct netmap_ring's. + * + * There is one netmap_ring per physical NIC ring, plus one tx/rx ring + * pair attached to the host stack (this pair is unused for non-NIC ports). + * + * All physical/host stack ports share the same memory region, + * so that zero-copy can be implemented between them. + * VALE switch ports instead have separate memory regions. + * + * The netmap_ring is the userspace-visible replica of the NIC ring. + * Each slot has the index of a buffer (MTU-sized and residing in the + * mmapped region), its length and some flags. An extra 64-bit pointer + * is provided for user-supplied buffers in the tx path. + * + * In user space, the buffer address is computed as + * (char *)ring + buf_ofs + index * NETMAP_BUF_SIZE + * + * Added in NETMAP_API 11: + * + * + NIOCREGIF can request the allocation of extra spare buffers from + * the same memory pool. The desired number of buffers must be in + * nr_arg3. The ioctl may return fewer buffers, depending on memory + * availability. nr_arg3 will return the actual value, and, once + * mapped, nifp->ni_bufs_head will be the index of the first buffer. + * + * The buffers are linked to each other using the first uint32_t + * as the index. On close, ni_bufs_head must point to the list of + * buffers to be released. + * + * + NIOCREGIF can request space for extra rings (and buffers) + * allocated in the same memory space. The number of extra rings + * is in nr_arg1, and is advisory. This is a no-op on NICs where + * the size of the memory space is fixed. + * + * + NIOCREGIF can attach to PIPE rings sharing the same memory + * space with a parent device. The ifname indicates the parent device, + * which must already exist. Flags in nr_flags indicate if we want to + * bind the master or slave side, the index (from nr_ringid) + * is just a cookie and does not need to be sequential. + * + * + NIOCREGIF can also attach to 'monitor' rings that replicate + * the content of specific rings, also from the same memory space. + * + * Extra flags in nr_flags support the above functions. + * Application libraries may use the following naming scheme: + * netmap:foo all NIC ring pairs + * netmap:foo^ only host ring pair + * netmap:foo+ all NIC ring + host ring pairs + * netmap:foo-k the k-th NIC ring pair + * netmap:foo{k PIPE ring pair k, master side + * netmap:foo}k PIPE ring pair k, slave side + */ + +/* + * struct netmap_slot is a buffer descriptor + */ +struct netmap_slot { + uint32_t buf_idx; /* buffer index */ + uint16_t len; /* length for this slot */ + uint16_t flags; /* buf changed, etc. */ + uint64_t ptr; /* pointer for indirect buffers */ +}; + +/* + * The following flags control how the slot is used + */ + +#define NS_BUF_CHANGED 0x0001 /* buf_idx changed */ + /* + * must be set whenever buf_idx is changed (as it might be + * necessary to recompute the physical address and mapping) + * + * It is also set by the kernel whenever the buf_idx is + * changed internally (e.g., by pipes). Applications may + * use this information to know when they can reuse the + * contents of previously prepared buffers. + */ + +#define NS_REPORT 0x0002 /* ask the hardware to report results */ + /* + * Request notification when slot is used by the hardware. + * Normally transmit completions are handled lazily and + * may be unreported. This flag lets us know when a slot + * has been sent (e.g. to terminate the sender). + */ + +#define NS_FORWARD 0x0004 /* pass packet 'forward' */ + /* + * (Only for physical ports, rx rings with NR_FORWARD set). + * Slot released to the kernel (i.e. before ring->head) with + * this flag set are passed to the peer ring (host/NIC), + * thus restoring the host-NIC connection for these slots. + * This supports efficient traffic monitoring or firewalling. + */ + +#define NS_NO_LEARN 0x0008 /* disable bridge learning */ + /* + * On a VALE switch, do not 'learn' the source port for + * this buffer. + */ + +#define NS_INDIRECT 0x0010 /* userspace buffer */ + /* + * (VALE tx rings only) data is in a userspace buffer, + * whose address is in the 'ptr' field in the slot. + */ + +#define NS_MOREFRAG 0x0020 /* packet has more fragments */ + /* + * (VALE ports only) + * Set on all but the last slot of a multi-segment packet. + * The 'len' field refers to the individual fragment. + */ + +#define NS_PORT_SHIFT 8 +#define NS_PORT_MASK (0xff << NS_PORT_SHIFT) + /* + * The high 8 bits of the flag, if not zero, indicate the + * destination port for the VALE switch, overriding + * the lookup table. + */ + +#define NS_RFRAGS(_slot) ( ((_slot)->flags >> 8) & 0xff) + /* + * (VALE rx rings only) the high 8 bits + * are the number of fragments. + */ + + +/* + * struct netmap_ring + * + * Netmap representation of a TX or RX ring (also known as "queue"). + * This is a queue implemented as a fixed-size circular array. + * At the software level the important fields are: head, cur, tail. + * + * In TX rings: + * + * head first slot available for transmission. + * cur wakeup point. select() and poll() will unblock + * when 'tail' moves past 'cur' + * tail (readonly) first slot reserved to the kernel + * + * [head .. tail-1] can be used for new packets to send; + * 'head' and 'cur' must be incremented as slots are filled + * with new packets to be sent; + * 'cur' can be moved further ahead if we need more space + * for new transmissions. XXX todo (2014-03-12) + * + * In RX rings: + * + * head first valid received packet + * cur wakeup point. select() and poll() will unblock + * when 'tail' moves past 'cur' + * tail (readonly) first slot reserved to the kernel + * + * [head .. tail-1] contain received packets; + * 'head' and 'cur' must be incremented as slots are consumed + * and can be returned to the kernel; + * 'cur' can be moved further ahead if we want to wait for + * new packets without returning the previous ones. + * + * DATA OWNERSHIP/LOCKING: + * The netmap_ring, and all slots and buffers in the range + * [head .. tail-1] are owned by the user program; + * the kernel only accesses them during a netmap system call + * and in the user thread context. + * + * Other slots and buffers are reserved for use by the kernel + */ +struct netmap_ring { + /* + * buf_ofs is meant to be used through macros. + * It contains the offset of the buffer region from this + * descriptor. + */ + const int64_t buf_ofs; + const uint32_t num_slots; /* number of slots in the ring. */ + const uint32_t nr_buf_size; + const uint16_t ringid; + const uint16_t dir; /* 0: tx, 1: rx */ + + uint32_t head; /* (u) first user slot */ + uint32_t cur; /* (u) wakeup point */ + uint32_t tail; /* (k) first kernel slot */ + + uint32_t flags; + + struct timeval ts; /* (k) time of last *sync() */ + + /* opaque room for a mutex or similar object */ +#if !defined(_WIN32) || defined(__CYGWIN__) + uint8_t __attribute__((__aligned__(NM_CACHE_ALIGN))) sem[128]; +#else + uint8_t __declspec(align(NM_CACHE_ALIGN)) sem[128]; +#endif + + /* the slots follow. This struct has variable size */ + struct netmap_slot slot[0]; /* array of slots. */ +}; + + +/* + * RING FLAGS + */ +#define NR_TIMESTAMP 0x0002 /* set timestamp on *sync() */ + /* + * updates the 'ts' field on each netmap syscall. This saves + * saves a separate gettimeofday(), and is not much worse than + * software timestamps generated in the interrupt handler. + */ + +#define NR_FORWARD 0x0004 /* enable NS_FORWARD for ring */ + /* + * Enables the NS_FORWARD slot flag for the ring. + */ + + +/* + * Netmap representation of an interface and its queue(s). + * This is initialized by the kernel when binding a file + * descriptor to a port, and should be considered as readonly + * by user programs. The kernel never uses it. + * + * There is one netmap_if for each file descriptor on which we want + * to select/poll. + * select/poll operates on one or all pairs depending on the value of + * nmr_queueid passed on the ioctl. + */ +struct netmap_if { + char ni_name[IFNAMSIZ]; /* name of the interface. */ + const uint32_t ni_version; /* API version, currently unused */ + const uint32_t ni_flags; /* properties */ +#define NI_PRIV_MEM 0x1 /* private memory region */ + + /* + * The number of packet rings available in netmap mode. + * Physical NICs can have different numbers of tx and rx rings. + * Physical NICs also have a 'host' ring pair. + * Additionally, clients can request additional ring pairs to + * be used for internal communication. + */ + const uint32_t ni_tx_rings; /* number of HW tx rings */ + const uint32_t ni_rx_rings; /* number of HW rx rings */ + + uint32_t ni_bufs_head; /* head index for extra bufs */ + uint32_t ni_spare1[5]; + /* + * The following array contains the offset of each netmap ring + * from this structure, in the following order: + * NIC tx rings (ni_tx_rings); host tx ring (1); extra tx rings; + * NIC rx rings (ni_rx_rings); host tx ring (1); extra rx rings. + * + * The area is filled up by the kernel on NIOCREGIF, + * and then only read by userspace code. + */ + const ssize_t ring_ofs[0]; +}; + + +#ifndef NIOCREGIF +/* + * ioctl names and related fields + * + * NIOCTXSYNC, NIOCRXSYNC synchronize tx or rx queues, + * whose identity is set in NIOCREGIF through nr_ringid. + * These are non blocking and take no argument. + * + * NIOCGINFO takes a struct ifreq, the interface name is the input, + * the outputs are number of queues and number of descriptor + * for each queue (useful to set number of threads etc.). + * The info returned is only advisory and may change before + * the interface is bound to a file descriptor. + * + * NIOCREGIF takes an interface name within a struct nmre, + * and activates netmap mode on the interface (if possible). + * + * The argument to NIOCGINFO/NIOCREGIF overlays struct ifreq so we + * can pass it down to other NIC-related ioctls. + * + * The actual argument (struct nmreq) has a number of options to request + * different functions. + * The following are used in NIOCREGIF when nr_cmd == 0: + * + * nr_name (in) + * The name of the port (em0, valeXXX:YYY, etc.) + * limited to IFNAMSIZ for backward compatibility. + * + * nr_version (in/out) + * Must match NETMAP_API as used in the kernel, error otherwise. + * Always returns the desired value on output. + * + * nr_tx_slots, nr_tx_slots, nr_tx_rings, nr_rx_rings (in/out) + * On input, non-zero values may be used to reconfigure the port + * according to the requested values, but this is not guaranteed. + * On output the actual values in use are reported. + * + * nr_ringid (in) + * Indicates how rings should be bound to the file descriptors. + * If nr_flags != 0, then the low bits (in NETMAP_RING_MASK) + * are used to indicate the ring number, and nr_flags specifies + * the actual rings to bind. NETMAP_NO_TX_POLL is unaffected. + * + * NOTE: THE FOLLOWING (nr_flags == 0) IS DEPRECATED: + * If nr_flags == 0, NETMAP_HW_RING and NETMAP_SW_RING control + * the binding as follows: + * 0 (default) binds all physical rings + * NETMAP_HW_RING | ring number binds a single ring pair + * NETMAP_SW_RING binds only the host tx/rx rings + * + * NETMAP_NO_TX_POLL can be OR-ed to make select()/poll() push + * packets on tx rings only if POLLOUT is set. + * The default is to push any pending packet. + * + * NETMAP_DO_RX_POLL can be OR-ed to make select()/poll() release + * packets on rx rings also when POLLIN is NOT set. + * The default is to touch the rx ring only with POLLIN. + * Note that this is the opposite of TX because it + * reflects the common usage. + * + * NOTE: NETMAP_PRIV_MEM IS DEPRECATED, use nr_arg2 instead. + * NETMAP_PRIV_MEM is set on return for ports that do not use + * the global memory allocator. + * This information is not significant and applications + * should look at the region id in nr_arg2 + * + * nr_flags is the recommended mode to indicate which rings should + * be bound to a file descriptor. Values are NR_REG_* + * + * nr_arg1 (in) The number of extra rings to be reserved. + * Especially when allocating a VALE port the system only + * allocates the amount of memory needed for the port. + * If more shared memory rings are desired (e.g. for pipes), + * the first invocation for the same basename/allocator + * should specify a suitable number. Memory cannot be + * extended after the first allocation without closing + * all ports on the same region. + * + * nr_arg2 (in/out) The identity of the memory region used. + * On input, 0 means the system decides autonomously, + * other values may try to select a specific region. + * On return the actual value is reported. + * Region '1' is the global allocator, normally shared + * by all interfaces. Other values are private regions. + * If two ports the same region zero-copy is possible. + * + * nr_arg3 (in/out) number of extra buffers to be allocated. + * + * + * + * nr_cmd (in) if non-zero indicates a special command: + * NETMAP_BDG_ATTACH and nr_name = vale*:ifname + * attaches the NIC to the switch; nr_ringid specifies + * which rings to use. Used by vale-ctl -a ... + * nr_arg1 = NETMAP_BDG_HOST also attaches the host port + * as in vale-ctl -h ... + * + * NETMAP_BDG_DETACH and nr_name = vale*:ifname + * disconnects a previously attached NIC. + * Used by vale-ctl -d ... + * + * NETMAP_BDG_LIST + * list the configuration of VALE switches. + * + * NETMAP_BDG_VNET_HDR + * Set the virtio-net header length used by the client + * of a VALE switch port. + * + * NETMAP_BDG_NEWIF + * create a persistent VALE port with name nr_name. + * Used by vale-ctl -n ... + * + * NETMAP_BDG_DELIF + * delete a persistent VALE port. Used by vale-ctl -d ... + * + * nr_arg1, nr_arg2, nr_arg3 (in/out) command specific + * + * + * + */ + + +/* + * struct nmreq overlays a struct ifreq (just the name) + */ +struct nmreq { + char nr_name[IFNAMSIZ]; + uint32_t nr_version; /* API version */ + uint32_t nr_offset; /* nifp offset in the shared region */ + uint32_t nr_memsize; /* size of the shared region */ + uint32_t nr_tx_slots; /* slots in tx rings */ + uint32_t nr_rx_slots; /* slots in rx rings */ + uint16_t nr_tx_rings; /* number of tx rings */ + uint16_t nr_rx_rings; /* number of rx rings */ + + uint16_t nr_ringid; /* ring(s) we care about */ +#define NETMAP_HW_RING 0x4000 /* single NIC ring pair */ +#define NETMAP_SW_RING 0x2000 /* only host ring pair */ + +#define NETMAP_RING_MASK 0x0fff /* the ring number */ + +#define NETMAP_NO_TX_POLL 0x1000 /* no automatic txsync on poll */ + +#define NETMAP_DO_RX_POLL 0x8000 /* DO automatic rxsync on poll */ + + uint16_t nr_cmd; +#define NETMAP_BDG_ATTACH 1 /* attach the NIC */ +#define NETMAP_BDG_DETACH 2 /* detach the NIC */ +#define NETMAP_BDG_REGOPS 3 /* register bridge callbacks */ +#define NETMAP_BDG_LIST 4 /* get bridge's info */ +#define NETMAP_BDG_VNET_HDR 5 /* set the port virtio-net-hdr length */ +#define NETMAP_BDG_OFFSET NETMAP_BDG_VNET_HDR /* deprecated alias */ +#define NETMAP_BDG_NEWIF 6 /* create a virtual port */ +#define NETMAP_BDG_DELIF 7 /* destroy a virtual port */ +#define NETMAP_PT_HOST_CREATE 8 /* create ptnetmap kthreads */ +#define NETMAP_PT_HOST_DELETE 9 /* delete ptnetmap kthreads */ +#define NETMAP_BDG_POLLING_ON 10 /* delete polling kthread */ +#define NETMAP_BDG_POLLING_OFF 11 /* delete polling kthread */ +#define NETMAP_VNET_HDR_GET 12 /* get the port virtio-net-hdr length */ + uint16_t nr_arg1; /* reserve extra rings in NIOCREGIF */ +#define NETMAP_BDG_HOST 1 /* attach the host stack on ATTACH */ + + uint16_t nr_arg2; + uint32_t nr_arg3; /* req. extra buffers in NIOCREGIF */ + uint32_t nr_flags; + /* various modes, extends nr_ringid */ + uint32_t spare2[1]; +}; + +#define NR_REG_MASK 0xf /* values for nr_flags */ +enum { NR_REG_DEFAULT = 0, /* backward compat, should not be used. */ + NR_REG_ALL_NIC = 1, + NR_REG_SW = 2, + NR_REG_NIC_SW = 3, + NR_REG_ONE_NIC = 4, + NR_REG_PIPE_MASTER = 5, + NR_REG_PIPE_SLAVE = 6, +}; +/* monitor uses the NR_REG to select the rings to monitor */ +#define NR_MONITOR_TX 0x100 +#define NR_MONITOR_RX 0x200 +#define NR_ZCOPY_MON 0x400 +/* request exclusive access to the selected rings */ +#define NR_EXCLUSIVE 0x800 +/* request ptnetmap host support */ +#define NR_PASSTHROUGH_HOST NR_PTNETMAP_HOST /* deprecated */ +#define NR_PTNETMAP_HOST 0x1000 +#define NR_RX_RINGS_ONLY 0x2000 +#define NR_TX_RINGS_ONLY 0x4000 +/* Applications set this flag if they are able to deal with virtio-net headers, + * that is send/receive frames that start with a virtio-net header. + * If not set, NIOCREGIF will fail with netmap ports that require applications + * to use those headers. If the flag is set, the application can use the + * NETMAP_VNET_HDR_GET command to figure out the header length. */ +#define NR_ACCEPT_VNET_HDR 0x8000 + + +/* + * Windows does not have _IOWR(). _IO(), _IOW() and _IOR() are defined + * in ws2def.h but not sure if they are in the form we need. + * XXX so we redefine them + * in a convenient way to use for DeviceIoControl signatures + */ +#ifdef _WIN32 +#undef _IO // ws2def.h +#define _WIN_NM_IOCTL_TYPE 40000 +#define _IO(_c, _n) CTL_CODE(_WIN_NM_IOCTL_TYPE, ((_n) + 0x800) , \ + METHOD_BUFFERED, FILE_ANY_ACCESS ) +#define _IO_direct(_c, _n) CTL_CODE(_WIN_NM_IOCTL_TYPE, ((_n) + 0x800) , \ + METHOD_OUT_DIRECT, FILE_ANY_ACCESS ) + +#define _IOWR(_c, _n, _s) _IO(_c, _n) + +/* We havesome internal sysctl in addition to the externally visible ones */ +#define NETMAP_MMAP _IO_direct('i', 160) // note METHOD_OUT_DIRECT +#define NETMAP_POLL _IO('i', 162) + +/* and also two setsockopt for sysctl emulation */ +#define NETMAP_SETSOCKOPT _IO('i', 140) +#define NETMAP_GETSOCKOPT _IO('i', 141) + + +//These linknames are for the Netmap Core Driver +#define NETMAP_NT_DEVICE_NAME L"\\Device\\NETMAP" +#define NETMAP_DOS_DEVICE_NAME L"\\DosDevices\\netmap" + +//Definition of a structure used to pass a virtual address within an IOCTL +typedef struct _MEMORY_ENTRY { + PVOID pUsermodeVirtualAddress; +} MEMORY_ENTRY, *PMEMORY_ENTRY; + +typedef struct _POLL_REQUEST_DATA { + int events; + int timeout; + int revents; +} POLL_REQUEST_DATA; + +#endif /* _WIN32 */ + +/* + * FreeBSD uses the size value embedded in the _IOWR to determine + * how much to copy in/out. So we need it to match the actual + * data structure we pass. We put some spares in the structure + * to ease compatibility with other versions + */ +#define NIOCGINFO _IOWR('i', 145, struct nmreq) /* return IF info */ +#define NIOCREGIF _IOWR('i', 146, struct nmreq) /* interface register */ +#define NIOCTXSYNC _IO('i', 148) /* sync tx queues */ +#define NIOCRXSYNC _IO('i', 149) /* sync rx queues */ +#define NIOCCONFIG _IOWR('i',150, struct nm_ifreq) /* for ext. modules */ +#endif /* !NIOCREGIF */ + + +/* + * Helper functions for kernel and userspace + */ + +/* + * check if space is available in the ring. + */ +static inline int +nm_ring_empty(struct netmap_ring *ring) +{ + return (ring->cur == ring->tail); +} + +/* + * Opaque structure that is passed to an external kernel + * module via ioctl(fd, NIOCCONFIG, req) for a user-owned + * bridge port (at this point ephemeral VALE interface). + */ +#define NM_IFRDATA_LEN 256 +struct nm_ifreq { + char nifr_name[IFNAMSIZ]; + char data[NM_IFRDATA_LEN]; +}; + +/* + * netmap kernel thread configuration + */ +/* bhyve/vmm.ko MSIX parameters for IOCTL */ +struct ptn_vmm_ioctl_msix { + uint64_t msg; + uint64_t addr; +}; + +/* IOCTL parameters */ +struct nm_kth_ioctl { + u_long com; + /* TODO: use union */ + union { + struct ptn_vmm_ioctl_msix msix; + } data; +}; + +/* Configuration of a ptnetmap ring */ +struct ptnet_ring_cfg { + uint64_t ioeventfd; /* eventfd in linux, tsleep() parameter in FreeBSD */ + uint64_t irqfd; /* eventfd in linux, ioctl fd in FreeBSD */ + struct nm_kth_ioctl ioctl; /* ioctl parameter to send irq (only used in bhyve/FreeBSD) */ +}; +#endif /* _NET_NETMAP_H_ */ diff --git a/src/plugins/netmap/netmap.api b/src/plugins/netmap/netmap.api new file mode 100644 index 00000000000..a14753cad9c --- /dev/null +++ b/src/plugins/netmap/netmap.api @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2015-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option version = "1.0.0"; + +/** \brief Create netmap + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param netmap_if_name - interface name + @param hw_addr - interface MAC + @param use_random_hw_addr - use random generated MAC + @param is_pipe - is pipe + @param is_master - 0=slave, 1=master +*/ +autoreply define netmap_create +{ + u32 client_index; + u32 context; + + u8 netmap_if_name[64]; + u8 hw_addr[6]; + u8 use_random_hw_addr; + u8 is_pipe; + u8 is_master; +}; + +/** \brief Delete netmap + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param netmap_if_name - interface name +*/ +autoreply define netmap_delete +{ + u32 client_index; + u32 context; + + u8 netmap_if_name[64]; +}; + +/* + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/plugins/netmap/netmap.c b/src/plugins/netmap/netmap.c new file mode 100644 index 00000000000..ebef215eb3b --- /dev/null +++ b/src/plugins/netmap/netmap.c @@ -0,0 +1,334 @@ +/* + *------------------------------------------------------------------ + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include <stdint.h> +#include <net/if.h> +#include <sys/ioctl.h> +#include <sys/types.h> +#include <fcntl.h> + +#include <vlib/vlib.h> +#include <vlib/unix/unix.h> +#include <vnet/ethernet/ethernet.h> + +#include <netmap/net_netmap.h> +#include <netmap/netmap.h> +#include <netmap/netmap.api_enum.h> +#include <netmap/netmap.api_types.h> + +netmap_main_t netmap_main; + +static clib_error_t * +netmap_fd_read_ready (clib_file_t * uf) +{ + vlib_main_t *vm = vlib_get_main (); + netmap_main_t *nm = &netmap_main; + u32 idx = uf->private_data; + + nm->pending_input_bitmap = + clib_bitmap_set (nm->pending_input_bitmap, idx, 1); + + /* Schedule the rx node */ + vlib_node_set_interrupt_pending (vm, netmap_input_node.index); + + return 0; +} + +static void +close_netmap_if (netmap_main_t * nm, netmap_if_t * nif) +{ + if (nif->clib_file_index != ~0) + { + clib_file_del (&file_main, file_main.file_pool + nif->clib_file_index); + nif->clib_file_index = ~0; + } + else if (nif->fd > -1) + close (nif->fd); + + if (nif->mem_region) + { + netmap_mem_region_t *reg = &nm->mem_regions[nif->mem_region]; + if (--reg->refcnt == 0) + { + munmap (reg->mem, reg->region_size); + reg->region_size = 0; + } + } + + + mhash_unset (&nm->if_index_by_host_if_name, nif->host_if_name, + &nif->if_index); + vec_free (nif->host_if_name); + vec_free (nif->req); + + clib_memset (nif, 0, sizeof (*nif)); + pool_put (nm->interfaces, nif); +} + +int +netmap_worker_thread_enable () +{ + /* if worker threads are enabled, switch to polling mode */ + foreach_vlib_main () + { + vlib_node_set_state (this_vlib_main, netmap_input_node.index, + VLIB_NODE_STATE_POLLING); + } + + return 0; +} + +int +netmap_worker_thread_disable () +{ + foreach_vlib_main () + { + vlib_node_set_state (this_vlib_main, netmap_input_node.index, + VLIB_NODE_STATE_INTERRUPT); + } + + return 0; +} + +int +netmap_create_if (vlib_main_t * vm, u8 * if_name, u8 * hw_addr_set, + u8 is_pipe, u8 is_master, u32 * sw_if_index) +{ + netmap_main_t *nm = &netmap_main; + int ret = 0; + uint32_t nr_reg; + netmap_if_t *nif = 0; + u8 hw_addr[6]; + vnet_sw_interface_t *sw; + vnet_main_t *vnm = vnet_get_main (); + uword *p; + struct nmreq *req = 0; + netmap_mem_region_t *reg; + vlib_thread_main_t *tm = vlib_get_thread_main (); + int fd; + + p = mhash_get (&nm->if_index_by_host_if_name, if_name); + if (p) + return VNET_API_ERROR_SUBIF_ALREADY_EXISTS; + + fd = open ("/dev/netmap", O_RDWR); + if (fd < 0) + return VNET_API_ERROR_SUBIF_ALREADY_EXISTS; + + pool_get (nm->interfaces, nif); + nif->if_index = nif - nm->interfaces; + nif->fd = fd; + nif->clib_file_index = ~0; + + vec_validate (req, 0); + nif->req = req; + req->nr_version = NETMAP_API; + req->nr_flags = NR_REG_ALL_NIC; + + if (is_pipe) + req->nr_flags = is_master ? NR_REG_PIPE_MASTER : NR_REG_PIPE_SLAVE; + else + req->nr_flags = NR_REG_ALL_NIC; + + req->nr_flags |= NR_ACCEPT_VNET_HDR; + snprintf (req->nr_name, IFNAMSIZ, "%s", if_name); + req->nr_name[IFNAMSIZ - 1] = 0; + + if (ioctl (nif->fd, NIOCREGIF, req)) + { + ret = VNET_API_ERROR_NOT_CONNECTED; + goto error; + } + + nif->mem_region = req->nr_arg2; + vec_validate (nm->mem_regions, nif->mem_region); + reg = &nm->mem_regions[nif->mem_region]; + if (reg->region_size == 0) + { + reg->mem = mmap (NULL, req->nr_memsize, PROT_READ | PROT_WRITE, + MAP_SHARED, fd, 0); + clib_warning ("mem %p", reg->mem); + if (reg->mem == MAP_FAILED) + { + ret = VNET_API_ERROR_NOT_CONNECTED; + goto error; + } + reg->region_size = req->nr_memsize; + } + reg->refcnt++; + + nif->nifp = NETMAP_IF (reg->mem, req->nr_offset); + nr_reg = nif->req->nr_flags & NR_REG_MASK; + + if (nr_reg == NR_REG_SW) + { /* host stack */ + nif->first_tx_ring = nif->last_tx_ring = nif->req->nr_tx_rings; + nif->first_rx_ring = nif->last_rx_ring = nif->req->nr_rx_rings; + } + else if (nr_reg == NR_REG_ALL_NIC) + { /* only nic */ + nif->first_tx_ring = 0; + nif->first_rx_ring = 0; + nif->last_tx_ring = nif->req->nr_tx_rings - 1; + nif->last_rx_ring = nif->req->nr_rx_rings - 1; + } + else if (nr_reg == NR_REG_NIC_SW) + { + nif->first_tx_ring = 0; + nif->first_rx_ring = 0; + nif->last_tx_ring = nif->req->nr_tx_rings; + nif->last_rx_ring = nif->req->nr_rx_rings; + } + else if (nr_reg == NR_REG_ONE_NIC) + { + /* XXX check validity */ + nif->first_tx_ring = nif->last_tx_ring = nif->first_rx_ring = + nif->last_rx_ring = nif->req->nr_ringid & NETMAP_RING_MASK; + } + else + { /* pipes */ + nif->first_tx_ring = nif->last_tx_ring = 0; + nif->first_rx_ring = nif->last_rx_ring = 0; + } + + nif->host_if_name = if_name; + nif->per_interface_next_index = ~0; + + if (tm->n_vlib_mains > 1) + clib_spinlock_init (&nif->lockp); + + { + clib_file_t template = { 0 }; + template.read_function = netmap_fd_read_ready; + template.file_descriptor = nif->fd; + template.private_data = nif->if_index; + template.description = format (0, "netmap socket"); + nif->clib_file_index = clib_file_add (&file_main, &template); + } + + /*use configured or generate random MAC address */ + if (hw_addr_set) + memcpy (hw_addr, hw_addr_set, 6); + else + { + f64 now = vlib_time_now (vm); + u32 rnd; + rnd = (u32) (now * 1e6); + rnd = random_u32 (&rnd); + + memcpy (hw_addr + 2, &rnd, sizeof (rnd)); + hw_addr[0] = 2; + hw_addr[1] = 0xfe; + } + + vnet_eth_interface_registration_t eir = {}; + + eir.dev_class_index = netmap_device_class.index; + eir.dev_instance = nif->if_index; + eir.address = hw_addr; + eir.cb.set_max_frame_size = NULL; + + nif->hw_if_index = vnet_eth_register_interface (vnm, &eir); + + sw = vnet_get_hw_sw_interface (vnm, nif->hw_if_index); + nif->sw_if_index = sw->sw_if_index; + + mhash_set_mem (&nm->if_index_by_host_if_name, if_name, &nif->if_index, 0); + + if (sw_if_index) + *sw_if_index = nif->sw_if_index; + + if (tm->n_vlib_mains > 1 && pool_elts (nm->interfaces) == 1) + netmap_worker_thread_enable (); + + return 0; + +error: + close_netmap_if (nm, nif); + return ret; +} + +int +netmap_delete_if (vlib_main_t * vm, u8 * host_if_name) +{ + vnet_main_t *vnm = vnet_get_main (); + netmap_main_t *nm = &netmap_main; + netmap_if_t *nif; + uword *p; + vlib_thread_main_t *tm = vlib_get_thread_main (); + + p = mhash_get (&nm->if_index_by_host_if_name, host_if_name); + if (p == NULL) + { + clib_warning ("Host interface %s does not exist", host_if_name); + return VNET_API_ERROR_SYSCALL_ERROR_1; + } + nif = pool_elt_at_index (nm->interfaces, p[0]); + + /* bring down the interface */ + vnet_hw_interface_set_flags (vnm, nif->hw_if_index, 0); + + ethernet_delete_interface (vnm, nif->hw_if_index); + + close_netmap_if (nm, nif); + + if (tm->n_vlib_mains > 1 && pool_elts (nm->interfaces) == 0) + netmap_worker_thread_disable (); + + return 0; +} + +static clib_error_t * +netmap_init (vlib_main_t * vm) +{ + netmap_main_t *nm = &netmap_main; + vlib_thread_main_t *tm = vlib_get_thread_main (); + vlib_thread_registration_t *tr; + uword *p; + + clib_memset (nm, 0, sizeof (netmap_main_t)); + + nm->input_cpu_first_index = 0; + nm->input_cpu_count = 1; + + /* find out which cpus will be used for input */ + p = hash_get_mem (tm->thread_registrations_by_name, "workers"); + tr = p ? (vlib_thread_registration_t *) p[0] : 0; + + if (tr && tr->count > 0) + { + nm->input_cpu_first_index = tr->first_index; + nm->input_cpu_count = tr->count; + } + + mhash_init_vec_string (&nm->if_index_by_host_if_name, sizeof (uword)); + + vec_validate_aligned (nm->rx_buffers, tm->n_vlib_mains - 1, + CLIB_CACHE_LINE_BYTES); + + return 0; +} + +VLIB_INIT_FUNCTION (netmap_init); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/plugins/netmap/netmap.h b/src/plugins/netmap/netmap.h new file mode 100644 index 00000000000..29f855fda8e --- /dev/null +++ b/src/plugins/netmap/netmap.h @@ -0,0 +1,166 @@ +/* + *------------------------------------------------------------------ + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ +/* + * Copyright (C) 2011-2014 Universita` di Pisa. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <vppinfra/lock.h> + +typedef struct +{ + CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); + clib_spinlock_t lockp; + u8 *host_if_name; + uword if_index; + u32 hw_if_index; + u32 sw_if_index; + u32 clib_file_index; + + u32 per_interface_next_index; + u8 is_admin_up; + + /* netmap */ + struct nmreq *req; + u16 mem_region; + int fd; + struct netmap_if *nifp; + u16 first_tx_ring; + u16 last_tx_ring; + u16 first_rx_ring; + u16 last_rx_ring; + +} netmap_if_t; + +typedef struct +{ + char *mem; + u32 region_size; + int refcnt; +} netmap_mem_region_t; + +typedef struct +{ + CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); + netmap_if_t *interfaces; + + /* bitmap of pending rx interfaces */ + uword *pending_input_bitmap; + + /* rx buffer cache */ + u32 **rx_buffers; + + /* hash of host interface names */ + mhash_t if_index_by_host_if_name; + + /* vector of memory regions */ + netmap_mem_region_t *mem_regions; + + /* first cpu index */ + u32 input_cpu_first_index; + + /* total cpu count */ + u32 input_cpu_count; +} netmap_main_t; + +extern netmap_main_t netmap_main; +extern vnet_device_class_t netmap_device_class; +extern vlib_node_registration_t netmap_input_node; + +int netmap_create_if (vlib_main_t * vm, u8 * host_if_name, u8 * hw_addr_set, + u8 is_pipe, u8 is_master, u32 * sw_if_index); +int netmap_delete_if (vlib_main_t * vm, u8 * host_if_name); + + +/* Macros and helper functions from sys/net/netmap_user.h */ + +#ifdef _NET_NETMAP_H_ + +#define _NETMAP_OFFSET(type, ptr, offset) \ + ((type)(void *)((char *)(ptr) + (offset))) + +#define NETMAP_IF(_base, _ofs) _NETMAP_OFFSET(struct netmap_if *, _base, _ofs) + +#define NETMAP_TXRING(nifp, index) _NETMAP_OFFSET(struct netmap_ring *, \ + nifp, (nifp)->ring_ofs[index] ) + +#define NETMAP_RXRING(nifp, index) _NETMAP_OFFSET(struct netmap_ring *, \ + nifp, (nifp)->ring_ofs[index + (nifp)->ni_tx_rings + 1] ) + +#define NETMAP_BUF(ring, index) \ + ((char *)(ring) + (ring)->buf_ofs + ((index)*(ring)->nr_buf_size)) + +#define NETMAP_BUF_IDX(ring, buf) \ + ( ((char *)(buf) - ((char *)(ring) + (ring)->buf_ofs) ) / \ + (ring)->nr_buf_size ) + +static inline uint32_t +nm_ring_next (struct netmap_ring *ring, uint32_t i) +{ + return (PREDICT_FALSE (i + 1 == ring->num_slots) ? 0 : i + 1); +} + + +/* + * Return 1 if we have pending transmissions in the tx ring. + * When everything is complete ring->head = ring->tail + 1 (modulo ring size) + */ +static inline int +nm_tx_pending (struct netmap_ring *ring) +{ + return nm_ring_next (ring, ring->tail) != ring->head; +} + +static inline uint32_t +nm_ring_space (struct netmap_ring *ring) +{ + int ret = ring->tail - ring->cur; + if (ret < 0) + ret += ring->num_slots; + return ret; +} +#endif + + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/plugins/netmap/netmap_api.c b/src/plugins/netmap/netmap_api.c new file mode 100644 index 00000000000..51f572a23e6 --- /dev/null +++ b/src/plugins/netmap/netmap_api.c @@ -0,0 +1,95 @@ +/* + *------------------------------------------------------------------ + * netmap_api.c - netmap api + * + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include <vnet/vnet.h> +#include <vlibmemory/api.h> + +#include <vnet/interface.h> +#include <vnet/api_errno.h> +#include <netmap/netmap.h> + +#include <vnet/format_fns.h> +#include <netmap/netmap.api_enum.h> +#include <netmap/netmap.api_types.h> + +#include <vlibapi/api_helper_macros.h> + +#define foreach_vpe_api_msg \ +_(NETMAP_CREATE, netmap_create) \ +_(NETMAP_DELETE, netmap_delete) \ + +static void +vl_api_netmap_create_t_handler (vl_api_netmap_create_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_netmap_create_reply_t *rmp; + int rv = 0; + u8 *if_name = NULL; + + if_name = format (0, "%s", mp->netmap_if_name); + vec_add1 (if_name, 0); + + rv = + netmap_create_if (vm, if_name, mp->use_random_hw_addr ? 0 : mp->hw_addr, + mp->is_pipe, mp->is_master, 0); + + vec_free (if_name); + + REPLY_MACRO (VL_API_NETMAP_CREATE_REPLY); +} + +static void +vl_api_netmap_delete_t_handler (vl_api_netmap_delete_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + vl_api_netmap_delete_reply_t *rmp; + int rv = 0; + u8 *if_name = NULL; + + if_name = format (0, "%s", mp->netmap_if_name); + vec_add1 (if_name, 0); + + rv = netmap_delete_if (vm, if_name); + + vec_free (if_name); + + REPLY_MACRO (VL_API_NETMAP_DELETE_REPLY); +} + +#include <netmap/netmap.api.c> +static clib_error_t * +netmap_api_hookup (vlib_main_t * vm) +{ + /* + * Set up the (msg_name, crc, message-id) table + */ + setup_message_id_table (); + + return 0; +} + +VLIB_API_INIT_FUNCTION (netmap_api_hookup); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/plugins/netmap/node.c b/src/plugins/netmap/node.c new file mode 100644 index 00000000000..6169847fa79 --- /dev/null +++ b/src/plugins/netmap/node.c @@ -0,0 +1,295 @@ +/* + *------------------------------------------------------------------ + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include <stdint.h> +#include <net/if.h> +#include <sys/ioctl.h> + +#include <vlib/vlib.h> +#include <vlib/unix/unix.h> +#include <vnet/ethernet/ethernet.h> +#include <vnet/devices/devices.h> +#include <vnet/feature/feature.h> + +#include <netmap/net_netmap.h> +#include <netmap/netmap.h> + +#define foreach_netmap_input_error + +typedef enum +{ +#define _(f,s) NETMAP_INPUT_ERROR_##f, + foreach_netmap_input_error +#undef _ + NETMAP_INPUT_N_ERROR, +} netmap_input_error_t; + +static char *netmap_input_error_strings[] = { +#define _(n,s) s, + foreach_netmap_input_error +#undef _ +}; + +typedef struct +{ + u32 next_index; + u32 hw_if_index; + struct netmap_slot slot; +} netmap_input_trace_t; + +static u8 * +format_netmap_input_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + netmap_input_trace_t *t = va_arg (*args, netmap_input_trace_t *); + u32 indent = format_get_indent (s); + + s = format (s, "netmap: hw_if_index %d next-index %d", + t->hw_if_index, t->next_index); + s = format (s, "\n%Uslot: flags 0x%x len %u buf_idx %u", + format_white_space, indent + 2, + t->slot.flags, t->slot.len, t->slot.buf_idx); + return s; +} + +always_inline void +buffer_add_to_chain (vlib_main_t * vm, u32 bi, u32 first_bi, u32 prev_bi) +{ + vlib_buffer_t *b = vlib_get_buffer (vm, bi); + vlib_buffer_t *first_b = vlib_get_buffer (vm, first_bi); + vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_bi); + + /* update first buffer */ + first_b->total_length_not_including_first_buffer += b->current_length; + + /* update previous buffer */ + prev_b->next_buffer = bi; + prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT; + + /* update current buffer */ + b->next_buffer = 0; +} + +always_inline uword +netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame, netmap_if_t * nif) +{ + u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT; + uword n_trace = vlib_get_trace_count (vm, node); + netmap_main_t *nm = &netmap_main; + u32 n_rx_packets = 0; + u32 n_rx_bytes = 0; + u32 *to_next = 0; + u32 n_free_bufs; + struct netmap_ring *ring; + int cur_ring; + u32 thread_index = vm->thread_index; + u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm); + + if (nif->per_interface_next_index != ~0) + next_index = nif->per_interface_next_index; + + n_free_bufs = vec_len (nm->rx_buffers[thread_index]); + if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE)) + { + vec_validate (nm->rx_buffers[thread_index], + VLIB_FRAME_SIZE + n_free_bufs - 1); + n_free_bufs += + vlib_buffer_alloc (vm, &nm->rx_buffers[thread_index][n_free_bufs], + VLIB_FRAME_SIZE); + vec_set_len (nm->rx_buffers[thread_index], n_free_bufs); + } + + cur_ring = nif->first_rx_ring; + while (cur_ring <= nif->last_rx_ring && n_free_bufs) + { + int r = 0; + u32 cur_slot_index; + ring = NETMAP_RXRING (nif->nifp, cur_ring); + r = nm_ring_space (ring); + + if (!r) + { + cur_ring++; + continue; + } + + if (r > n_free_bufs) + r = n_free_bufs; + + cur_slot_index = ring->cur; + while (r) + { + u32 n_left_to_next; + u32 next0 = next_index; + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (r && n_left_to_next) + { + vlib_buffer_t *first_b0 = 0; + u32 offset = 0; + u32 bi0 = 0, first_bi0 = 0, prev_bi0; + u32 next_slot_index = (cur_slot_index + 1) % ring->num_slots; + u32 next2_slot_index = (cur_slot_index + 2) % ring->num_slots; + struct netmap_slot *slot = &ring->slot[cur_slot_index]; + u32 data_len = slot->len; + + /* prefetch 2 slots in advance */ + CLIB_PREFETCH (&ring->slot[next2_slot_index], + CLIB_CACHE_LINE_BYTES, LOAD); + /* prefetch start of next packet */ + CLIB_PREFETCH (NETMAP_BUF + (ring, ring->slot[next_slot_index].buf_idx), + CLIB_CACHE_LINE_BYTES, LOAD); + + while (data_len && n_free_bufs) + { + vlib_buffer_t *b0; + /* grab free buffer */ + u32 last_empty_buffer = + vec_len (nm->rx_buffers[thread_index]) - 1; + prev_bi0 = bi0; + bi0 = nm->rx_buffers[thread_index][last_empty_buffer]; + b0 = vlib_get_buffer (vm, bi0); + vec_set_len (nm->rx_buffers[thread_index], + last_empty_buffer); + n_free_bufs--; + + /* copy data */ + u32 bytes_to_copy = + data_len > n_buffer_bytes ? n_buffer_bytes : data_len; + b0->current_data = 0; + clib_memcpy_fast (vlib_buffer_get_current (b0), + (u8 *) NETMAP_BUF (ring, slot->buf_idx) + + offset, bytes_to_copy); + + /* fill buffer header */ + b0->current_length = bytes_to_copy; + + if (offset == 0) + { + b0->total_length_not_including_first_buffer = 0; + b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID; + vnet_buffer (b0)->sw_if_index[VLIB_RX] = + nif->sw_if_index; + vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + first_bi0 = bi0; + first_b0 = vlib_get_buffer (vm, first_bi0); + } + else + buffer_add_to_chain (vm, bi0, first_bi0, prev_bi0); + + offset += bytes_to_copy; + data_len -= bytes_to_copy; + } + + /* trace */ + if (PREDICT_FALSE (n_trace > 0)) + { + if (PREDICT_TRUE (first_b0 != 0) && + vlib_trace_buffer (vm, node, next0, first_b0, + /* follow_chain */ 0)) + { + netmap_input_trace_t *tr; + + vlib_set_trace_count (vm, node, --n_trace); + tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr)); + tr->next_index = next0; + tr->hw_if_index = nif->hw_if_index; + memcpy (&tr->slot, slot, sizeof (struct netmap_slot)); + } + } + + /* enque and take next packet */ + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, first_bi0, + next0); + + /* next packet */ + n_rx_packets++; + n_rx_bytes += slot->len; + to_next[0] = first_bi0; + to_next += 1; + n_left_to_next--; + cur_slot_index = next_slot_index; + + r--; + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + ring->head = ring->cur = cur_slot_index; + cur_ring++; + } + + if (n_rx_packets) + ioctl (nif->fd, NIOCRXSYNC, NULL); + + vlib_increment_combined_counter + (vnet_get_main ()->interface_main.combined_sw_if_counters + + VNET_INTERFACE_COUNTER_RX, + vlib_get_thread_index (), nif->hw_if_index, n_rx_packets, n_rx_bytes); + + vnet_device_increment_rx_packets (thread_index, n_rx_packets); + + return n_rx_packets; +} + +VLIB_NODE_FN (netmap_input_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + int i; + u32 n_rx_packets = 0; + u32 thread_index = vm->thread_index; + netmap_main_t *nm = &netmap_main; + netmap_if_t *nmi; + + for (i = 0; i < vec_len (nm->interfaces); i++) + { + nmi = vec_elt_at_index (nm->interfaces, i); + if (nmi->is_admin_up && + (i % nm->input_cpu_count) == + (thread_index - nm->input_cpu_first_index)) + n_rx_packets += netmap_device_input_fn (vm, node, frame, nmi); + } + + return n_rx_packets; +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (netmap_input_node) = { + .name = "netmap-input", + .sibling_of = "device-input", + .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED, + .format_trace = format_netmap_input_trace, + .type = VLIB_NODE_TYPE_INPUT, + /* default state is INTERRUPT mode, switch to POLLING if worker threads are enabled */ + .state = VLIB_NODE_STATE_INTERRUPT, + .n_errors = NETMAP_INPUT_N_ERROR, + .error_strings = netmap_input_error_strings, +}; +/* *INDENT-ON* */ + + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/plugins/netmap/plugin.c b/src/plugins/netmap/plugin.c new file mode 100644 index 00000000000..1673225b683 --- /dev/null +++ b/src/plugins/netmap/plugin.c @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: Apache-2.0 + * Copyright (c) 2024 Tom Jones <thj@freebsd.org> + * + * This software was developed by Tom Jones <thj@freebsd.org> under sponsorship + * from the FreeBSD Foundation. + * + */ + +#include <vlib/vlib.h> +#include <vnet/plugin/plugin.h> +#include <vpp/app/version.h> + +VLIB_PLUGIN_REGISTER () = { + .version = VPP_BUILD_VER, + .description = "netmap", +}; diff --git a/src/plugins/quic/quic.c b/src/plugins/quic/quic.c index 60d4ac21c19..3f7a3426069 100644 --- a/src/plugins/quic/quic.c +++ b/src/plugins/quic/quic.c @@ -1058,6 +1058,8 @@ quic_on_stream_open (quicly_stream_open_t * self, quicly_stream_t * stream) svm_fifo_add_want_deq_ntf (stream_session->rx_fifo, SVM_FIFO_WANT_DEQ_NOTIF_IF_FULL | SVM_FIFO_WANT_DEQ_NOTIF_IF_EMPTY); + svm_fifo_init_ooo_lookup (stream_session->rx_fifo, 0 /* ooo enq */); + svm_fifo_init_ooo_lookup (stream_session->tx_fifo, 1 /* ooo deq */); stream_session->session_state = SESSION_STATE_ACCEPTING; if ((rv = app_worker_accept_notify (app_wrk, stream_session))) @@ -1302,6 +1304,8 @@ quic_connect_stream (session_t * quic_session, session_endpoint_cfg_t * sep) return app_worker_connect_notify (app_wrk, NULL, rv, sep->opaque); } + svm_fifo_init_ooo_lookup (stream_session->rx_fifo, 0 /* ooo enq */); + svm_fifo_init_ooo_lookup (stream_session->tx_fifo, 1 /* ooo deq */); svm_fifo_add_want_deq_ntf (stream_session->rx_fifo, SVM_FIFO_WANT_DEQ_NOTIF_IF_FULL | SVM_FIFO_WANT_DEQ_NOTIF_IF_EMPTY); @@ -1679,6 +1683,9 @@ quic_on_quic_session_connected (quic_ctx_t * ctx) return; } + svm_fifo_init_ooo_lookup (quic_session->rx_fifo, 0 /* ooo enq */); + svm_fifo_init_ooo_lookup (quic_session->tx_fifo, 1 /* ooo deq */); + quic_session->session_state = SESSION_STATE_CONNECTING; if ((rv = app_worker_connect_notify (app_wrk, quic_session, SESSION_E_NONE, ctx->client_opaque))) @@ -2137,6 +2144,9 @@ quic_accept_connection (quic_rx_packet_ctx_t * pctx) return; } + svm_fifo_init_ooo_lookup (quic_session->rx_fifo, 0 /* ooo enq */); + svm_fifo_init_ooo_lookup (quic_session->tx_fifo, 1 /* ooo deq */); + app_wrk = app_worker_get (quic_session->app_wrk_index); quic_session->session_state = SESSION_STATE_ACCEPTING; if ((rv = app_worker_accept_notify (app_wrk, quic_session))) diff --git a/src/plugins/srmpls/CMakeLists.txt b/src/plugins/srmpls/CMakeLists.txt new file mode 100644 index 00000000000..25905d31e1b --- /dev/null +++ b/src/plugins/srmpls/CMakeLists.txt @@ -0,0 +1,30 @@ +# Copyright (c) 2024 Cisco and/or its affiliates +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +add_vpp_plugin(srmpls + SOURCES + sr_mpls_policy.c + sr_mpls_steering.c + sr_mpls_api.c + plugin.c + + INSTALL_HEADERS + sr_mpls.h + + API_FILES + sr_mpls.api + + # This might need to be VAT_AUTO_TEST? Not documented + API_TEST_SOURCES + sr_mpls_test.c +) diff --git a/src/plugins/srmpls/FEATURE.yaml b/src/plugins/srmpls/FEATURE.yaml new file mode 100644 index 00000000000..c5b958224c7 --- /dev/null +++ b/src/plugins/srmpls/FEATURE.yaml @@ -0,0 +1,9 @@ +--- +name: Segment Routing for MPLS +maintainer: Pablo Camarillo <pcamaril@cisco.com> +features: + - SR Policy support + - Automated steering (SR steering based on NextHop/Color) +description: "SR-MPLS" +state: production +properties: [API, CLI, MULTITHREAD] diff --git a/src/plugins/srmpls/dir.dox b/src/plugins/srmpls/dir.dox new file mode 100644 index 00000000000..76ec1d6a41b --- /dev/null +++ b/src/plugins/srmpls/dir.dox @@ -0,0 +1,22 @@ +/* + * + * Copyright (c) 2013 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + @dir + @brief Segment Routing MPLS code + + An implementation of Segment Routing for the MPLS dataplane. + +*/
\ No newline at end of file diff --git a/src/plugins/srmpls/plugin.c b/src/plugins/srmpls/plugin.c new file mode 100644 index 00000000000..af87607764f --- /dev/null +++ b/src/plugins/srmpls/plugin.c @@ -0,0 +1,26 @@ +/* + * plugin.c: srmpls + * + * Copyright (c) 2024 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <vlib/vlib.h> +#include <vnet/plugin/plugin.h> +#include <vpp/app/version.h> + +// register a plugin +VLIB_PLUGIN_REGISTER () = { + .version = VPP_BUILD_VER, + .description = "Segment Routing for MPLS plugin", +}; diff --git a/src/plugins/srmpls/sr_doc.rst b/src/plugins/srmpls/sr_doc.rst new file mode 100644 index 00000000000..ed847fa0d42 --- /dev/null +++ b/src/plugins/srmpls/sr_doc.rst @@ -0,0 +1,215 @@ +.. _srmpls_doc: + +SR-MPLS: Segment Routing for MPLS +================================= + +This is a memo intended to contain documentation of the VPP SR-MPLS +implementation. Everything that is not directly obvious should come +here. For any feedback on content that should be explained please +mailto:pcamaril@cisco.com + +Segment Routing +--------------- + +Segment routing is a network technology focused on addressing the +limitations of existing IP and Multiprotocol Label Switching (MPLS) +networks in terms of simplicity, scale, and ease of operation. It is a +foundation for application engineered routing as it prepares the +networks for new business models where applications can control the +network behavior. + +Segment routing seeks the right balance between distributed intelligence +and centralized optimization and programming. It was built for the +software-defined networking (SDN) era. + +Segment routing enhances packet forwarding behavior by enabling a +network to transport unicast packets through a specific forwarding path, +different from the normal path that a packet usually takes (IGP shortest +path or BGP best path). This capability benefits many use cases, and one +can build those specific paths based on application requirements. + +Segment routing uses the source routing paradigm. A node, usually a +router but also a switch, a trusted server, or a virtual forwarder +running on a hypervisor, steers a packet through an ordered list of +instructions, called segments. A segment can represent any instruction, +topological or service-based. A segment can have a local semantic to a +segment-routing node or global within a segment-routing network. Segment +routing allows an operator to enforce a flow through any topological +path and service chain while maintaining per-flow state only at the +ingress node to the segment-routing network. Segment routing also +supports equal-cost multipath (ECMP) by design. + +Segment routing can operate with either an MPLS or an IPv6 data plane. +All the currently available MPLS services, such as Layer 3 VPN (L3VPN), +L2VPN (Virtual Private Wire Service [VPWS], Virtual Private LAN Services +[VPLS], Ethernet VPN [E-VPN], and Provider Backbone Bridging Ethernet +VPN [PBB-EVPN]), can run on top of a segment-routing transport network. + +**The implementation of Segment Routing in VPP covers both the IPv6 data +plane (SRv6) as well as the MPLS data plane (SR-MPLS). This page +contains the SR-MPLS documentation.** + +Segment Routing terminology +--------------------------- + +- SegmentID (SID): is an MPLS label. +- Segment List (SL) (SID List): is the sequence of SIDs that the packet + will traverse. +- SR Policy: is a set of candidate paths (SID list+weight). An SR + policy is uniquely identified by its Binding SID and associated with + a weighted set of Segment Lists. In case several SID lists are + defined, traffic steered into the policy is unevenly load-balanced + among them according to their respective weights. +- BindingSID: a BindingSID is a SID (only one) associated one-one with + an SR Policy. If a packet arrives with MPLS label corresponding to a + BindingSID, then the SR policy will be applied to such packet. + (BindingSID is popped first.) + +SR-MPLS features in VPP +----------------------- + +The SR-MPLS implementation is focused on the SR policies, as well on its +steering. Others SR-MPLS features, such as for example AdjSIDs, can be +achieved using the regular VPP MPLS implementation. + +The Segment Routing Policy +(*draft-filsfils-spring-segment-routing-policy*) defines SR Policies. + +Creating a SR Policy +-------------------- + +An SR Policy is defined by a Binding SID and a weighted set of Segment +Lists. + +A new SR policy is created with a first SID list using: + +:: + + sr mpls policy add bsid 40001 next 16001 next 16002 next 16003 (weight 5) + +- The weight parameter is only used if more than one SID list is + associated with the policy. + +An SR policy is deleted with: + +:: + + sr mpls policy del bsid 40001 + +The existing SR policies are listed with: + +:: + + show sr mpls policies + +Adding/Removing SID Lists from an SR policy +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +An additional SID list is associated with an existing SR policy with: + +:: + + sr mpls policy mod bsid 40001 add sl next 16001 next 16002 next 16003 (weight 3) + +Conversely, a SID list can be removed from an SR policy with: + +:: + + sr mpls policy mod bsid 4001 del sl index 1 + +Note that this CLI cannot be used to remove the last SID list of a +policy. Instead the SR policy delete CLI must be used. + +The weight of a SID list can also be modified with: + +:: + + sr mpls policy mod bsid 40001 mod sl index 1 weight 4 + +SR Policies: Spray policies +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Spray policies are a specific type of SR policies where the packet is +replicated on all the SID lists, rather than load-balanced among them. + +SID list weights are ignored with this type of policies. + +A Spray policy is instantiated by appending the keyword **spray** to a +regular SR-MPLS policy command, as in: + +:: + + sr mpls policy add bsid 40002 next 16001 next 16002 next 16003 spray + +Spray policies are used for removing multicast state from a network core +domain, and instead send a linear unicast copy to every access node. The +last SID in each list accesses the multicast tree within the access +node. + +Steering packets into a SR Policy +--------------------------------- + +Segment Routing supports three methods of steering traffic into an SR +policy. + +Local steering +~~~~~~~~~~~~~~ + +In this variant incoming packets match a routing policy which directs +them on a local SR policy. + +In order to achieve this behavior the user needs to create an ‘sr +steering policy via sr policy bsid’. + +:: + + sr mpls steer l3 2001::/64 via sr policy bsid 40001 + sr mpls steer l3 2001::/64 via sr policy bsid 40001 fib-table 3 + sr mpls steer l3 10.0.0.0/16 via sr policy bsid 40001 + sr mpls steer l3 10.0.0.0/16 via sr policy bsid 40001 vpn-label 500 + +Remote steering +~~~~~~~~~~~~~~~ + +In this variant incoming packets have an active SID matching a local +BSID at the head-end. + +In order to achieve this behavior the packets should simply arrive with +an active SID equal to the Binding SID of a locally instantiated SR +policy. + +Automated steering +~~~~~~~~~~~~~~~~~~ + +In this variant incoming packets match a BGP/Service route which +recurses on the BSID of a local policy. + +In order to achieve this behavior the user first needs to color the SR +policies. He can do so by using the CLI: + +:: + + sr mpls policy te bsid xxxxx endpoint x.x.x.x color 12341234 + +Notice that an SR policy can have a single endpoint and a single color. +Notice that the *endpoint* value is an IP46 address and the color a u32. + +Then, for any BGP/Service route the user has to use the API to steer +prefixes: + +:: + + sr steer l3 2001::/64 via next-hop 2001::1 color 1234 co 2 + sr steer l3 2001::/64 via next-hop 2001::1 color 1234 co 2 vpn-label 500 + +Notice that *co* refers to the CO-bits (values [0|1|2|3]). + +Notice also that a given prefix might be steered over several colors +(same next-hop and same co-bit value). In order to add new colors just +execute the API several times (or with the del parameter to delete the +color). + +This variant is meant to be used in conjunction with a control plane +agent that uses the underlying binary API bindings of +*sr_mpls_steering_policy_add*/*sr_mpls_steering_policy_del* for any BGP +service route received. diff --git a/src/plugins/srmpls/sr_mpls.api b/src/plugins/srmpls/sr_mpls.api new file mode 100644 index 00000000000..742f135d493 --- /dev/null +++ b/src/plugins/srmpls/sr_mpls.api @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2015-2016 Cisco and/or its affiliates. Licensed under the + * Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the + * License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +option version = "3.0.0"; + +import "vnet/interface_types.api"; +import "vnet/ip/ip_types.api"; +import "vnet/srv6/sr_types.api"; + +/** \brief MPLS SR policy add + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param bsid - is the bindingSID of the SR Policy. MPLS label (20bit) + @param weight - is the weight of the sid list. optional. + @param is_spray - is the type of the SR policy. (0.Default // 1.Spray) + @param segments - vector of labels (20bit) composing the segment list +*/ +autoreply define sr_mpls_policy_add +{ + u32 client_index; + u32 context; + u32 bsid; + u32 weight; + bool is_spray; + u8 n_segments; + u32 segments[n_segments]; +}; + +/** \brief MPLS SR policy modification + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param bsid is the bindingSID of the SR Policy. MPLS label (20bit) + @param sr_policy_index is the index of the SR policy + @param fib_table is the VRF where to install the FIB entry for the BSID + @param operation is the operation to perform (among the top ones) + @param segments is a vector of MPLS labels composing the segment list + @param sl_index is the index of the Segment List to modify/delete + @param weight is the weight of the sid list. optional. + @param is_encap Mode. Encapsulation or SRH insertion. +*/ +autoreply define sr_mpls_policy_mod +{ + u32 client_index; + u32 context; + u32 bsid; + vl_api_sr_policy_op_t operation; + u32 sl_index; + u32 weight; + u8 n_segments; + u32 segments[n_segments]; +}; + +/** \brief MPLS SR policy deletion + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param bsid is the bindingSID of the SR Policy. MPLS label (20bit) +*/ +autoreply define sr_mpls_policy_del +{ + u32 client_index; + u32 context; + u32 bsid; +}; + +/** \brief MPLS SR steering add/del + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_del + @param bsid - is the bindingSID of the SR Policy (~0 is no bsid) + @param table_id - is the VRF where to install the FIB entry for the BSID + @param prefix - is the IPv4/v6 address for L3 traffic type. + @param mask_width - is the mask for L3 traffic type + @param next_hop - describes the next_hop (in case no BSID) + @param color - describes the color + @param co_bits - are the CO_bits of the steering policy + @param vpn_label - is an additonal last VPN label. (~0 is no label) +*/ +autoreply define sr_mpls_steering_add_del +{ + u32 client_index; + u32 context; + bool is_del[default = false]; + u32 bsid; + u32 table_id; + vl_api_prefix_t prefix; + u32 mask_width; + vl_api_address_t next_hop; + u32 color; + u8 co_bits; + u32 vpn_label; +}; + +/** \brief MPLS SR steering add/del + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param bsid is the bindingSID of the SR Policy + @param endpoint is the endpoint of the SR policy + @param color is the color of the sr policy +*/ +autoreply define sr_mpls_policy_assign_endpoint_color +{ + u32 client_index; + u32 context; + u32 bsid; + vl_api_address_t endpoint; + u32 color; +}; + +/* + * fd.io coding-style-patch-verification: ON Local Variables: eval: + * (c-set-style "gnu") End: + */ diff --git a/src/plugins/srmpls/sr_mpls.h b/src/plugins/srmpls/sr_mpls.h new file mode 100644 index 00000000000..a8f9494428f --- /dev/null +++ b/src/plugins/srmpls/sr_mpls.h @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. Licensed under the Apache + * License, Version 2.0 (the "License"); you may not use this file except in + * compliance with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +/** + * @file + * @brief Segment Routing MPLS data structures definitions + * + */ + +#ifndef included_vnet_srmpls_h +#define included_vnet_srmpls_h + +#include <vnet/vnet.h> +#include <vnet/mpls/packet.h> +#include <vnet/fib/mpls_fib.h> +#include <vnet/ip/ip.h> +#include <vnet/ip/lookup.h> +#include <vnet/dpo/dpo.h> +#include <vnet/dpo/replicate_dpo.h> + +#include <stdlib.h> +#include <string.h> + +/* SR policy types */ +#define SR_POLICY_TYPE_DEFAULT 0 +#define SR_POLICY_TYPE_SPRAY 1 + +#define SR_SEGMENT_LIST_WEIGHT_DEFAULT 1 + +#define SR_STEER_IPV4 4 +#define SR_STEER_IPV6 6 + +#define SR_TE_CO_BITS_00 0 +#define SR_TE_CO_BITS_01 1 +#define SR_TE_CO_BITS_10 2 +#define SR_TE_CO_BITS_11 3 + +/** + * @brief SR Segment List (SID list) + */ +typedef struct +{ + /* SIDs (key) */ + mpls_label_t *segments; + + /* SID list weight (wECMP / UCMP) */ + u32 weight; + +} mpls_sr_sl_t; + +typedef struct +{ + u32 *segments_lists; /**< Pool of SID lists indexes */ + + mpls_label_t bsid; /**< BindingSID (key) */ + + u8 type; /**< Type (default is 0) */ + /* SR Policy specific DPO */ + /* IF Type = DEFAULT Then Load-Balancer DPO among SID lists */ + /* IF Type = SPRAY then Spray DPO with all SID lists */ + + ip46_address_t endpoint; /**< Optional NH for SR TE */ + u8 endpoint_type; + u32 color; /**< Optional color for SR TE */ +} mpls_sr_policy_t; + +/** + * @brief Steering db key + * + * L3 is IPv4/IPv6 + mask + */ +typedef struct +{ + ip46_address_t prefix; /**< IP address of the prefix */ + u32 mask_width; /**< Mask width of the prefix */ + u32 fib_table; /**< VRF of the prefix */ + u8 traffic_type; /**< Traffic type (IPv4, IPv6, L2) */ + u8 padding[3]; +} sr_mpls_steering_key_t; + +typedef struct +{ + sr_mpls_steering_key_t classify; /**< Traffic classification */ + mpls_label_t bsid; /**< SR Policy index */ + ip46_address_t next_hop; /**< SR TE NH */ + char nh_type; + u32 *color; /**< Vector of SR TE colors */ + char co_bits; /**< Color-Only bits */ + mpls_label_t vpn_label; +} mpls_sr_steering_policy_t; + +/** + * @brief Segment Routing main datastructure + */ +typedef struct +{ + /* SR SID lists */ + mpls_sr_sl_t *sid_lists; + + /* SR MPLS policies */ + mpls_sr_policy_t *sr_policies; + + /* Hash table mapping BindingSID to SR MPLS policy */ + uword *sr_policies_index_hash; + + /* Pool of SR steer policies instances */ + mpls_sr_steering_policy_t *steer_policies; + + /* MHash table mapping steering rules to SR steer instance */ + mhash_t sr_steer_policies_hash; + + /** SR TE **/ + /* Hash table mapping (Color->Endpoint->BSID) for SR policies */ + mhash_t sr_policies_c2e2eclabel_hash; + /* SR TE (internal) fib table (Endpoint, Color) */ + u32 fib_table_EC; + /* Pool of (Endpoint, Color) hidden labels */ + u32 *ec_labels; + + /* convenience */ + vlib_main_t *vlib_main; + vnet_main_t *vnet_main; +} mpls_sr_main_t; + +extern mpls_sr_main_t sr_mpls_main; + +extern int +sr_mpls_policy_add (mpls_label_t bsid, mpls_label_t * segments, + u8 behavior, u32 weight); + +extern int +sr_mpls_policy_mod (mpls_label_t bsid, u8 operation, + mpls_label_t * segments, u32 sl_index, u32 weight); + +extern int sr_mpls_policy_del (mpls_label_t bsid); + +extern int +sr_mpls_policy_assign_endpoint_color (mpls_label_t bsid, + ip46_address_t * endpoint, + u8 endpoint_type, u32 color); + +extern int +sr_mpls_steering_policy_add (mpls_label_t bsid, u32 table_id, + ip46_address_t * prefix, u32 mask_width, + u8 traffic_type, ip46_address_t * next_hop, + u8 nh_type, u32 color, char co_bits, + mpls_label_t vpn_label); + +extern int +sr_mpls_steering_policy_del (ip46_address_t * prefix, + u32 mask_width, u8 traffic_type, u32 table_id, + u32 color); + +extern u32 find_or_create_internal_label (ip46_address_t endpoint, u32 color); + +extern void internal_label_lock (ip46_address_t endpoint, u32 color); + +extern void internal_label_unlock (ip46_address_t endpoint, u32 color); + +#endif /* included_vnet_sr_mpls_h */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: eval: (c-set-style "gnu") End: + */ diff --git a/src/plugins/srmpls/sr_mpls_api.c b/src/plugins/srmpls/sr_mpls_api.c new file mode 100644 index 00000000000..3e89017dbc1 --- /dev/null +++ b/src/plugins/srmpls/sr_mpls_api.c @@ -0,0 +1,257 @@ +/* + * ------------------------------------------------------------------ + * sr_api.c - ipv6 segment routing api + * + * Copyright (c) 2016 Cisco and/or its affiliates. Licensed under the Apache + * License, Version 2.0 (the "License"); you may not use this file except in + * compliance with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * ------------------------------------------------------------------ + */ + +#include <vnet/vnet.h> +#include "sr_mpls.h" +#include <vlibmemory/api.h> + +#include <vnet/interface.h> +#include <vnet/api_errno.h> +#include <vnet/feature/feature.h> +#include <vnet/ip/ip_types_api.h> + +#include <vnet/format_fns.h> +#include <plugins/srmpls/sr_mpls.api_enum.h> +#include <plugins/srmpls/sr_mpls.api_types.h> + +#define vl_api_version(n, v) static u32 api_version = v; +#include <plugins/srmpls/sr_mpls.api.h> +#undef vl_api_version + +#define vl_endianfun +#include <plugins/srmpls/sr_mpls.api.h> +#undef vl_endianfun + +#define vl_calcsizefun +#include <plugins/srmpls/sr_mpls.api.h> +#undef vl_calcsizefun + +#define vl_printfun +#include <plugins/srmpls/sr_mpls.api.h> +#undef vl_printfun + +#define vl_msg_name_crc_list +#include <plugins/srmpls/sr_mpls.api.h> +#undef vl_msg_name_crc_list + +#define REPLY_MSG_ID_BASE msg_id_base +#include <vlibapi/api_helper_macros.h> + +#define foreach_vpe_api_msg \ +_(SR_MPLS_POLICY_DEL, sr_mpls_policy_del) \ +_(SR_MPLS_STEERING_ADD_DEL, sr_mpls_steering_add_del) \ +_(SR_MPLS_POLICY_ASSIGN_ENDPOINT_COLOR, sr_mpls_policy_assign_endpoint_color) + +static u16 msg_id_base; + +static void +vl_api_sr_mpls_policy_add_t_handler (vl_api_sr_mpls_policy_add_t * mp) +{ + vl_api_sr_mpls_policy_add_reply_t *rmp; + + mpls_label_t *segments = 0, *seg; + mpls_label_t this_address = 0; + + int i; + for (i = 0; i < mp->n_segments; i++) + { + vec_add2 (segments, seg, 1); + this_address = ntohl (mp->segments[i]); + clib_memcpy (seg, &this_address, sizeof (this_address)); + } + + int rv = 0; + rv = sr_mpls_policy_add (ntohl (mp->bsid), + segments, mp->is_spray, ntohl (mp->weight)); + vec_free (segments); + + REPLY_MACRO (VL_API_SR_MPLS_POLICY_ADD_REPLY); +} + +static void +vl_api_sr_mpls_policy_mod_t_handler (vl_api_sr_mpls_policy_mod_t * mp) +{ + vl_api_sr_mpls_policy_mod_reply_t *rmp; + + mpls_label_t *segments = 0, *seg; + mpls_label_t this_address = 0; + + int i; + for (i = 0; i < mp->n_segments; i++) + { + vec_add2 (segments, seg, 1); + this_address = ntohl (mp->segments[i]); + clib_memcpy (seg, &this_address, sizeof (this_address)); + } + + int rv = 0; + rv = sr_mpls_policy_mod (ntohl (mp->bsid), + ntohl (mp->operation), segments, + ntohl (mp->sl_index), ntohl (mp->weight)); + vec_free (segments); + + REPLY_MACRO (VL_API_SR_MPLS_POLICY_MOD_REPLY); +} + +static void +vl_api_sr_mpls_policy_del_t_handler (vl_api_sr_mpls_policy_del_t * mp) +{ + vl_api_sr_mpls_policy_del_reply_t *rmp; + int rv = 0; + rv = sr_mpls_policy_del (ntohl (mp->bsid)); + + REPLY_MACRO (VL_API_SR_MPLS_POLICY_DEL_REPLY); +} + +static void vl_api_sr_mpls_steering_add_del_t_handler + (vl_api_sr_mpls_steering_add_del_t * mp) +{ + vl_api_sr_mpls_steering_add_del_reply_t *rmp; + fib_prefix_t prefix; + ip46_address_t next_hop; + clib_memset (&prefix, 0, sizeof (ip46_address_t)); + + ip_prefix_decode (&mp->prefix, &prefix); + ip_address_decode (&mp->next_hop, &next_hop); + + int rv = 0; + if (mp->is_del) + rv = sr_mpls_steering_policy_del (&prefix.fp_addr, + prefix.fp_len, + ip46_address_is_ip4 (&prefix.fp_addr) ? + SR_STEER_IPV4 : SR_STEER_IPV6, + ntohl (mp->table_id), + ntohl (mp->color)); + else + rv = sr_mpls_steering_policy_add (ntohl (mp->bsid), + ntohl (mp->table_id), + &prefix.fp_addr, + prefix.fp_len, + ip46_address_is_ip4 (&prefix.fp_addr) ? + SR_STEER_IPV4 : SR_STEER_IPV6, + &next_hop, + ip46_address_is_ip4 (&next_hop) ? + SR_STEER_IPV4 : SR_STEER_IPV6, + ntohl (mp->color), mp->co_bits, + ntohl (mp->vpn_label)); + + REPLY_MACRO (VL_API_SR_MPLS_STEERING_ADD_DEL_REPLY); +} + +static void vl_api_sr_mpls_policy_assign_endpoint_color_t_handler + (vl_api_sr_mpls_policy_assign_endpoint_color_t * mp) +{ + vl_api_sr_mpls_policy_assign_endpoint_color_reply_t *rmp; + int rv = 0; + + ip46_address_t endpoint; + clib_memset (&endpoint, 0, sizeof (ip46_address_t)); + ip_address_decode (&mp->endpoint, &endpoint); + + rv = sr_mpls_policy_assign_endpoint_color (ntohl (mp->bsid), + &endpoint, + ip46_address_is_ip4 (&endpoint) ? + SR_STEER_IPV4 : SR_STEER_IPV6, + ntohl (mp->color)); + + REPLY_MACRO (VL_API_SR_MPLS_POLICY_ASSIGN_ENDPOINT_COLOR_REPLY); +} + +static void +setup_message_id_table (api_main_t * am) +{ +#define _(id, n, crc) \ + vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id + REPLY_MSG_ID_BASE); + foreach_vl_msg_name_crc_sr_mpls; +#undef _ +} + +static clib_error_t * +sr_mpls_api_hookup (vlib_main_t * vm) +{ + api_main_t *am = vlibapi_get_main (); + + u8 *name = format (0, "sr_mpls_%08x%c", api_version, 0); + REPLY_MSG_ID_BASE = + vl_msg_api_get_msg_ids ((char *) name, VL_MSG_SR_MPLS_LAST); + vec_free (name); + +#define _(N, n) \ + vl_msg_api_config (&(vl_msg_api_msg_config_t){ \ + .id = REPLY_MSG_ID_BASE + VL_API_##N, \ + .name = #n, \ + .handler = vl_api_##n##_t_handler, \ + .endian = vl_api_##n##_t_endian, \ + .format_fn = vl_api_##n##_t_format, \ + .size = sizeof (vl_api_##n##_t), \ + .traced = 1, \ + .tojson = vl_api_##n##_t_tojson, \ + .fromjson = vl_api_##n##_t_fromjson, \ + .calc_size = vl_api_##n##_t_calc_size, \ + }); + foreach_vpe_api_msg; +#undef _ + + /* + * Manually register the sr policy add msg, so we trace enough bytes + * to capture a typical segment list + */ + vl_msg_api_config (&(vl_msg_api_msg_config_t){ + .id = REPLY_MSG_ID_BASE + VL_API_SR_MPLS_POLICY_ADD, + .name = "sr_mpls_policy_add", + .handler = vl_api_sr_mpls_policy_add_t_handler, + .endian = vl_api_sr_mpls_policy_add_t_endian, + .format_fn = vl_api_sr_mpls_policy_add_t_format, + .size = 256, + .traced = 1, + .tojson = vl_api_sr_mpls_policy_add_t_tojson, + .fromjson = vl_api_sr_mpls_policy_add_t_fromjson, + .calc_size = vl_api_sr_mpls_policy_add_t_calc_size, + }); + /* + * Manually register the sr policy mod msg, so we trace enough bytes + * to capture a typical segment list + */ + vl_msg_api_config (&(vl_msg_api_msg_config_t){ + .id = REPLY_MSG_ID_BASE + VL_API_SR_MPLS_POLICY_MOD, + .name = "sr_mpls_policy_mod", + .handler = vl_api_sr_mpls_policy_mod_t_handler, + .endian = vl_api_sr_mpls_policy_mod_t_endian, + .format_fn = vl_api_sr_mpls_policy_mod_t_format, + .size = 256, + .traced = 1, + .tojson = vl_api_sr_mpls_policy_mod_t_tojson, + .fromjson = vl_api_sr_mpls_policy_mod_t_fromjson, + .calc_size = vl_api_sr_mpls_policy_mod_t_calc_size, + }); + + /* + * Set up the (msg_name, crc, message-id) table + */ + setup_message_id_table (am); + + return 0; +} + +VLIB_API_INIT_FUNCTION (sr_mpls_api_hookup); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: eval: (c-set-style "gnu") End: + */ diff --git a/src/plugins/srmpls/sr_mpls_policy.c b/src/plugins/srmpls/sr_mpls_policy.c new file mode 100644 index 00000000000..af24acd8cf6 --- /dev/null +++ b/src/plugins/srmpls/sr_mpls_policy.c @@ -0,0 +1,903 @@ +/* + * sr_mpls_policy.c: SR-MPLS policies + * + * Copyright (c) 2016 Cisco and/or its affiliates. Licensed under the Apache + * License, Version 2.0 (the "License"); you may not use this file except in + * compliance with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +/** + * @file + * @brief SR MPLS policy creation and application + * + * Create an SR policy. + * An SR policy can be either of 'default' type or 'spray' type + * An SR policy has attached a list of SID lists. + * In case the SR policy is a default one it will load balance among them. + * An SR policy has associated a BindingSID. + * In case any packet arrives with MPLS_label == BindingSID then the SR policy + * associated to such bindingSID will be applied to such packet. + * Also, a BSID can be associated with a (Next-Hop, Color) + * + */ + +#include <vlib/vlib.h> +#include <vnet/vnet.h> +#include "sr_mpls.h" +#include <vnet/fib/mpls_fib.h> +#include <vnet/dpo/dpo.h> +#include <vnet/ip/ip.h> + +#include <vppinfra/error.h> +#include <vppinfra/elog.h> + +mpls_sr_main_t sr_mpls_main; + +/*************************** SR LB helper functions **************************/ +/** + * @brief Creates a Segment List and adds it to an SR policy + * + * Creates a Segment List and adds it to the SR policy. Notice that the SL are + * not necessarily unique. Hence there might be two Segment List within the + * same SR Policy with exactly the same segments and same weight. + * + * @param sr_policy is the SR policy where the SL will be added + * @param sl is a vector of IPv6 addresses composing the Segment List + * @param weight is the weight of the SegmentList (for load-balancing purposes) + * @param is_encap represents the mode (SRH insertion vs Encapsulation) + * + * @return pointer to the just created segment list + */ +static inline mpls_sr_sl_t * +create_sl (mpls_sr_policy_t * sr_policy, mpls_label_t * sl, u32 weight) +{ + mpls_sr_main_t *sm = &sr_mpls_main; + mpls_sr_sl_t *segment_list; + u32 ii; + + pool_get (sm->sid_lists, segment_list); + clib_memset (segment_list, 0, sizeof (*segment_list)); + + vec_add1 (sr_policy->segments_lists, segment_list - sm->sid_lists); + + /* Fill in segment list */ + segment_list->weight = + (weight != (u32) ~ 0 ? weight : SR_SEGMENT_LIST_WEIGHT_DEFAULT); + segment_list->segments = vec_dup (sl); + + mpls_eos_bit_t eos; + FOR_EACH_MPLS_EOS_BIT (eos) + { + fib_route_path_t path = { + .frp_proto = DPO_PROTO_MPLS, + .frp_sw_if_index = ~0, + .frp_fib_index = 0, + .frp_weight = segment_list->weight, + .frp_flags = FIB_ROUTE_PATH_FLAG_NONE, + .frp_label_stack = NULL, + .frp_local_label = sl[0], + }; + + if (vec_len (sl) > 1) + { + vec_validate (path.frp_label_stack, vec_len (sl) - 2); + for (ii = 1; ii < vec_len (sl); ii++) + { + path.frp_label_stack[ii - 1].fml_value = sl[ii]; + } + } + else + { + /* + * add an impliciet NULL label to allow non-eos recursion + */ + fib_mpls_label_t lbl = { + .fml_value = MPLS_IETF_IMPLICIT_NULL_LABEL, + }; + vec_add1 (path.frp_label_stack, lbl); + } + + fib_route_path_t *paths = NULL; + vec_add1 (paths, path); + + fib_prefix_t pfx = { + .fp_len = 21, + .fp_proto = FIB_PROTOCOL_MPLS, + .fp_label = sr_policy->bsid, + .fp_eos = eos, + .fp_payload_proto = DPO_PROTO_MPLS, + }; + + fib_table_entry_path_add2 (0, + &pfx, + FIB_SOURCE_SR, + (sr_policy->type == SR_POLICY_TYPE_DEFAULT ? + FIB_ENTRY_FLAG_NONE : + FIB_ENTRY_FLAG_MULTICAST), paths); + vec_free (paths); + } + + return segment_list; +} + +/******************************* SR rewrite API *******************************/ +/* + * Three functions for handling sr policies: -> sr_mpls_policy_add -> + * sr_mpls_policy_del -> sr_mpls_policy_mod All of them are API. CLI function + * on sr_policy_command_fn + */ + +/** + * @brief Create a new SR policy + * + * @param bsid is the bindingSID of the SR Policy + * @param segments is a vector of MPLS labels composing the segment list + * @param behavior is the behavior of the SR policy. (default//spray) + * @param fib_table is the VRF where to install the FIB entry for the BSID + * @param weight is the weight of this specific SID list + * + * @return 0 if correct, else error + */ +int +sr_mpls_policy_add (mpls_label_t bsid, mpls_label_t * segments, + u8 behavior, u32 weight) +{ + mpls_sr_main_t *sm = &sr_mpls_main; + mpls_sr_policy_t *sr_policy = 0; + uword *p; + + if (!sm->sr_policies_index_hash) + sm->sr_policies_index_hash = hash_create (0, sizeof (mpls_label_t)); + + /* MPLS SR policies cannot be created unless the MPLS table is present */ + if (~0 == fib_table_find (FIB_PROTOCOL_MPLS, MPLS_FIB_DEFAULT_TABLE_ID)) + return (VNET_API_ERROR_NO_SUCH_TABLE); + + /* Search for existing keys (BSID) */ + p = hash_get (sm->sr_policies_index_hash, bsid); + if (p) + { + /* Add SR policy that already exists; complain */ + return -12; + } + /* Add an SR policy object */ + pool_get (sm->sr_policies, sr_policy); + clib_memset (sr_policy, 0, sizeof (*sr_policy)); + + /* the first policy needs to lock the MPLS table so it doesn't + * disappear with policies in it */ + if (1 == pool_elts (sm->sr_policies)) + fib_table_find_or_create_and_lock (FIB_PROTOCOL_MPLS, + MPLS_FIB_DEFAULT_TABLE_ID, + FIB_SOURCE_SR); + sr_policy->bsid = bsid; + sr_policy->type = behavior; + sr_policy->endpoint_type = 0; + ip6_address_set_zero (&sr_policy->endpoint.ip6); + sr_policy->color = (u32) ~ 0; + + /* Copy the key */ + hash_set (sm->sr_policies_index_hash, bsid, sr_policy - sm->sr_policies); + + /* Create a segment list and add the index to the SR policy */ + create_sl (sr_policy, segments, weight); + + return 0; +} + +/** + * @brief Delete a SR policy + * + * @param bsid is the bindingSID of the SR Policy + * @param index is the index of the SR policy + * + * @return 0 if correct, else error + */ +int +sr_mpls_policy_del (mpls_label_t bsid) +{ + mpls_sr_main_t *sm = &sr_mpls_main; + mpls_sr_policy_t *sr_policy = 0; + mpls_sr_sl_t *segment_list; + mpls_eos_bit_t eos; + u32 *sl_index; + uword *p; + + if (!sm->sr_policies_index_hash) + sm->sr_policies_index_hash = hash_create (0, sizeof (mpls_label_t)); + + p = hash_get (sm->sr_policies_index_hash, bsid); + if (p) + sr_policy = pool_elt_at_index (sm->sr_policies, p[0]); + else + return -1; + + /* Clean SID Lists */ + vec_foreach (sl_index, sr_policy->segments_lists) + { + segment_list = pool_elt_at_index (sm->sid_lists, *sl_index); + + fib_route_path_t path = { + .frp_proto = DPO_PROTO_MPLS, + .frp_sw_if_index = ~0, + .frp_fib_index = 0, + .frp_weight = segment_list->weight, + .frp_flags = FIB_ROUTE_PATH_FLAG_NONE, + .frp_local_label = segment_list->segments[0], + }; + + vec_add (path.frp_label_stack, segment_list + 1, + vec_len (segment_list) - 1); + + fib_route_path_t *paths = NULL; + vec_add1 (paths, path); + + /* remove each of the MPLS routes */ + FOR_EACH_MPLS_EOS_BIT (eos) + { + fib_prefix_t pfx = { + .fp_len = 21, + .fp_proto = FIB_PROTOCOL_MPLS, + .fp_label = sr_policy->bsid, + .fp_eos = eos, + .fp_payload_proto = DPO_PROTO_MPLS, + }; + + fib_table_entry_path_remove2 (0, &pfx, FIB_SOURCE_SR, paths); + } + vec_free (paths); + vec_free (segment_list->segments); + pool_put_index (sm->sid_lists, *sl_index); + } + + /* If there is still traces of TE, make sure locks are released */ + if (sr_policy->endpoint_type != 0 && sr_policy->color != (u32) ~ 0) + { + sr_mpls_policy_assign_endpoint_color (bsid, NULL, 0, (u32) ~ 0); + } + + /* Remove SR policy entry */ + hash_unset (sm->sr_policies_index_hash, sr_policy->bsid); + pool_put (sm->sr_policies, sr_policy); + + if (0 == pool_elts (sm->sr_policies)) + fib_table_unlock (MPLS_FIB_DEFAULT_TABLE_ID, + FIB_PROTOCOL_MPLS, FIB_SOURCE_SR); + + return 0; +} + +/** + * @brief Modify an existing SR policy + * + * The possible modifications are adding a new Segment List, modifying an + * existing Segment List (modify the weight only) and delete a given + * Segment List from the SR Policy. + * + * @param bsid is the bindingSID of the SR Policy + * @param fib_table is the VRF where to install the FIB entry for the BSID + * @param operation is the operation to perform (among the top ones) + * @param segments is a vector of IPv6 address composing the segment list + * @param sl_index is the index of the Segment List to modify/delete + * @param weight is the weight of the sid list. optional. + * + * @return 0 ok, >0 index of SL, <0 error + */ +int +sr_mpls_policy_mod (mpls_label_t bsid, u8 operation, + mpls_label_t * segments, u32 sl_index, u32 weight) +{ + mpls_sr_main_t *sm = &sr_mpls_main; + mpls_sr_policy_t *sr_policy = 0; + mpls_sr_sl_t *segment_list; + u32 *sl_index_iterate; + uword *p; + + if (!sm->sr_policies_index_hash) + sm->sr_policies_index_hash = hash_create (0, sizeof (mpls_label_t)); + + p = hash_get (sm->sr_policies_index_hash, bsid); + if (p) + sr_policy = pool_elt_at_index (sm->sr_policies, p[0]); + else + return -1; + + if (operation == 1) + { /* Add SR List to an existing SR policy */ + /* Create the new SL */ + segment_list = create_sl (sr_policy, segments, weight); + return segment_list - sm->sid_lists; + } + else if (operation == 2) + { /* Delete SR List from an existing SR + * policy */ + /* Check that currently there are more than one SID list */ + if (vec_len (sr_policy->segments_lists) == 1) + return -21; + + /* + * Check that the SR list does exist and is assigned to the + * sr policy + */ + vec_foreach (sl_index_iterate, sr_policy->segments_lists) + if (*sl_index_iterate == sl_index) + break; + + if (*sl_index_iterate != sl_index) + return -22; + + /* Remove the lucky SR list that is being kicked out */ + segment_list = pool_elt_at_index (sm->sid_lists, sl_index); + + mpls_eos_bit_t eos; + fib_route_path_t path = { + .frp_proto = DPO_PROTO_MPLS, + .frp_sw_if_index = ~0, + .frp_fib_index = 0, + .frp_weight = segment_list->weight, + .frp_flags = FIB_ROUTE_PATH_FLAG_NONE, + .frp_local_label = segment_list->segments[0], + }; + + vec_add (path.frp_label_stack, segment_list + 1, + vec_len (segment_list) - 1); + + fib_route_path_t *paths = NULL; + vec_add1 (paths, path); + + FOR_EACH_MPLS_EOS_BIT (eos) + { + fib_prefix_t pfx = { + .fp_len = 21, + .fp_proto = FIB_PROTOCOL_MPLS, + .fp_label = sr_policy->bsid, + .fp_eos = eos, + .fp_payload_proto = DPO_PROTO_MPLS, + }; + + fib_table_entry_path_remove2 (0, &pfx, FIB_SOURCE_SR, paths); + } + + vec_free (paths); + vec_free (segment_list->segments); + pool_put_index (sm->sid_lists, sl_index); + vec_del1 (sr_policy->segments_lists, + sl_index_iterate - sr_policy->segments_lists); + } + else if (operation == 3) + { /* Modify the weight of an existing + * SR List */ + /* Find the corresponding SL */ + vec_foreach (sl_index_iterate, sr_policy->segments_lists) + if (*sl_index_iterate == sl_index) + break; + + if (*sl_index_iterate != sl_index) + return -32; + + /* Change the weight */ + segment_list = pool_elt_at_index (sm->sid_lists, sl_index); + + /* Update LB */ + mpls_eos_bit_t eos; + fib_route_path_t path = { + .frp_proto = DPO_PROTO_MPLS, + .frp_sw_if_index = ~0, + .frp_fib_index = 0, + .frp_weight = segment_list->weight, + .frp_flags = FIB_ROUTE_PATH_FLAG_NONE, + .frp_local_label = segment_list->segments[0], + }; + + vec_add (path.frp_label_stack, segment_list + 1, + vec_len (segment_list) - 1); + + fib_route_path_t *paths = NULL; + vec_add1 (paths, path); + + FOR_EACH_MPLS_EOS_BIT (eos) + { + fib_prefix_t pfx = { + .fp_len = 21, + .fp_proto = FIB_PROTOCOL_MPLS, + .fp_label = sr_policy->bsid, + .fp_eos = eos, + .fp_payload_proto = DPO_PROTO_MPLS, + }; + + fib_table_entry_path_remove2 (0, &pfx, FIB_SOURCE_SR, paths); + } + + segment_list->weight = weight; + + path.frp_weight = segment_list->weight; + + vec_free (paths); + paths = NULL; + vec_add1 (paths, path); + + FOR_EACH_MPLS_EOS_BIT (eos) + { + fib_prefix_t pfx = { + .fp_len = 21, + .fp_proto = FIB_PROTOCOL_MPLS, + .fp_label = sr_policy->bsid, + .fp_eos = eos, + .fp_payload_proto = DPO_PROTO_MPLS, + }; + + fib_table_entry_path_add2 (0, + &pfx, + FIB_SOURCE_SR, + (sr_policy->type == + SR_POLICY_TYPE_DEFAULT ? + FIB_ENTRY_FLAG_NONE : + FIB_ENTRY_FLAG_MULTICAST), paths); + } + } + return 0; +} + +/** + * @brief CLI for 'sr mpls policies' command family + */ +static clib_error_t * +sr_mpls_policy_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + int rv = -1; + char is_del = 0, is_add = 0, is_mod = 0; + char policy_set = 0; + mpls_label_t bsid, next_label; + u32 sl_index = (u32) ~ 0; + u32 weight = (u32) ~ 0; + mpls_label_t *segments = 0; + u8 operation = 0; + u8 is_spray = 0; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (!is_add && !is_mod && !is_del && unformat (input, "add")) + is_add = 1; + else if (!is_add && !is_mod && !is_del && unformat (input, "del")) + is_del = 1; + else if (!is_add && !is_mod && !is_del && unformat (input, "mod")) + is_mod = 1; + else if (!policy_set + && unformat (input, "bsid %U", unformat_mpls_unicast_label, + &bsid)) + policy_set = 1; + else if (unformat (input, "weight %d", &weight)); + else if (unformat + (input, "next %U", unformat_mpls_unicast_label, &next_label)) + { + vec_add (segments, &next_label, 1); + } + else if (unformat (input, "add sl")) + operation = 1; + else if (unformat (input, "del sl index %d", &sl_index)) + operation = 2; + else if (unformat (input, "mod sl index %d", &sl_index)) + operation = 3; + else if (unformat (input, "spray")) + is_spray = 1; + else + break; + } + + if (!is_add && !is_mod && !is_del) + return clib_error_return (0, "Incorrect CLI"); + + if (!policy_set) + return clib_error_return (0, "No SR policy BSID or index specified"); + + if (is_add) + { + if (vec_len (segments) == 0) + return clib_error_return (0, "No Segment List specified"); + + rv = sr_mpls_policy_add (bsid, segments, + (is_spray ? SR_POLICY_TYPE_SPRAY : + SR_POLICY_TYPE_DEFAULT), weight); + vec_free (segments); + } + else if (is_del) + rv = sr_mpls_policy_del (bsid); + else if (is_mod) + { + if (!operation) + return clib_error_return (0, "No SL modification specified"); + if (operation != 1 && sl_index == (u32) ~ 0) + return clib_error_return (0, "No Segment List index specified"); + if (operation == 1 && vec_len (segments) == 0) + return clib_error_return (0, "No Segment List specified"); + if (operation == 3 && weight == (u32) ~ 0) + return clib_error_return (0, "No new weight for the SL specified"); + rv = sr_mpls_policy_mod (bsid, operation, segments, sl_index, weight); + vec_free (segments); + } + switch (rv) + { + case 0: + break; + case 1: + return 0; + case -12: + return clib_error_return (0, + "There is already a FIB entry for the BindingSID address.\n" + "The SR policy could not be created."); + case -21: + return clib_error_return (0, + "The selected SR policy only contains ONE segment list. " + "Please remove the SR policy instead"); + case -22: + return clib_error_return (0, + "Could not delete the segment list. " + "It is not associated with that SR policy."); + case -23: + return clib_error_return (0, + "Could not delete the segment list. " + "It is not associated with that SR policy."); + case -32: + return clib_error_return (0, + "Could not modify the segment list. " + "The given SL is not associated with such SR policy."); + case VNET_API_ERROR_NO_SUCH_TABLE: + return clib_error_return (0, "the Default MPLS table is not present"); + default: + return clib_error_return (0, "BUG: sr policy returns %d", rv); + } + return 0; +} + +VLIB_CLI_COMMAND(sr_mpls_policy_command, static)= +{ + .path = "sr mpls policy", + .short_help = "sr mpls policy [add||del||mod] bsid 2999 " + "next 10 next 20 next 30 (weight 1) (spray)", + .long_help = "TBD.\n", + .function = sr_mpls_policy_command_fn, +}; + +/** + * @brief CLI to display onscreen all the SR MPLS policies + */ +static clib_error_t * +show_sr_mpls_policies_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + mpls_sr_main_t *sm = &sr_mpls_main; + mpls_sr_sl_t *segment_list = 0; + mpls_sr_policy_t *sr_policy = 0; + mpls_sr_policy_t **vec_policies = 0; + mpls_label_t *label; + u32 *sl_index; + u8 *s; + int i = 0; + + vlib_cli_output (vm, "SR MPLS policies:"); + + pool_foreach (sr_policy, sm->sr_policies) { + vec_add1(vec_policies, sr_policy); + } + + vec_foreach_index (i, vec_policies) + { + sr_policy = vec_policies[i]; + vlib_cli_output (vm, "[%u].-\tBSID: %U", + (u32) (sr_policy - sm->sr_policies), + format_mpls_unicast_label, sr_policy->bsid); + switch (sr_policy->endpoint_type) + { + case SR_STEER_IPV6: + vlib_cli_output (vm, "\tEndpoint: %U", format_ip6_address, + &sr_policy->endpoint.ip6); + vlib_cli_output (vm, "\tColor: %u", sr_policy->color); + break; + case SR_STEER_IPV4: + vlib_cli_output (vm, "\tEndpoint: %U", format_ip4_address, + &sr_policy->endpoint.ip4); + vlib_cli_output (vm, "\tColor: %u", sr_policy->color); + break; + default: + vlib_cli_output (vm, "\tTE disabled"); + } + vlib_cli_output (vm, "\tType: %s", + (sr_policy->type == + SR_POLICY_TYPE_DEFAULT ? "Default" : "Spray")); + vlib_cli_output (vm, "\tSegment Lists:"); + vec_foreach (sl_index, sr_policy->segments_lists) + { + s = NULL; + segment_list = pool_elt_at_index (sm->sid_lists, *sl_index); + s = format (s, "\t[%u].- ", *sl_index); + s = format (s, "< "); + vec_foreach (label, segment_list->segments) + { + s = format (s, "%U, ", format_mpls_unicast_label, *label); + } + s = format (s, "\b\b > "); + vlib_cli_output (vm, " %s", s); + } + vlib_cli_output (vm, "-----------"); + } + vec_free (vec_policies); + return 0; +} + +VLIB_CLI_COMMAND(show_sr_mpls_policies_command, static)= +{ + .path = "show sr mpls policies", + .short_help = "show sr mpls policies", + .function = show_sr_mpls_policies_command_fn, +}; + +/** + * @brief Update the Endpoint,Color tuple of an SR policy + * + * @param bsid is the bindingSID of the SR Policy + * @param endpoint represents the IP46 of the endpoint + * @param color represents the color (u32) + * + * To reset to NULL use ~0 as parameters. + * + * @return 0 if correct, else error + */ +int +sr_mpls_policy_assign_endpoint_color (mpls_label_t bsid, + ip46_address_t * endpoint, + u8 endpoint_type, u32 color) +{ + mpls_sr_main_t *sm = &sr_mpls_main; + mpls_sr_policy_t *sr_policy = 0; + uword *endpoint_table, *p, *old_value; + + ip46_address_t any; + any.as_u64[0] = any.as_u64[1] = (u64) ~ 0; + + if (!sm->sr_policies_index_hash) + sm->sr_policies_index_hash = hash_create (0, sizeof (mpls_label_t)); + + p = hash_get (sm->sr_policies_index_hash, bsid); + if (p) + sr_policy = pool_elt_at_index (sm->sr_policies, p[0]); + else + return -1; + + /* If previous Endpoint, color existed, remove (NH,C) and (ANY,C) */ + if (sr_policy->endpoint_type) + { + endpoint_table = + mhash_get (&sm->sr_policies_c2e2eclabel_hash, &sr_policy->color); + if (!endpoint_table) + return -2; + old_value = + mhash_get ((mhash_t *) endpoint_table, &sr_policy->endpoint); + + /* CID 180995 This should never be NULL unless the two hash tables + * get out of sync */ + ALWAYS_ASSERT (old_value != NULL); + + fib_prefix_t pfx = { 0 }; + pfx.fp_proto = FIB_PROTOCOL_MPLS; + pfx.fp_len = 21; + pfx.fp_label = (u32) * old_value; + + mpls_eos_bit_t eos; + FOR_EACH_MPLS_EOS_BIT (eos) + { + pfx.fp_eos = eos; + fib_table_entry_path_remove (sm->fib_table_EC, + &pfx, + FIB_SOURCE_SR, + DPO_PROTO_MPLS, + NULL, + ~0, 0, 1, FIB_ROUTE_PATH_FLAG_NONE); + } + + old_value = mhash_get ((mhash_t *) endpoint_table, &any); + pfx.fp_label = (u32) * old_value; + + FOR_EACH_MPLS_EOS_BIT (eos) + { + pfx.fp_eos = eos; + fib_table_entry_path_remove (sm->fib_table_EC, + &pfx, + FIB_SOURCE_SR, + DPO_PROTO_MPLS, + NULL, + ~0, 0, 1, FIB_ROUTE_PATH_FLAG_NONE); + } + + /* Release the lock on (NH, Color) and (ANY, Color) */ + internal_label_unlock (sr_policy->endpoint, sr_policy->color); + internal_label_unlock (any, sr_policy->color); + + /* Reset the values on the SR policy */ + sr_policy->endpoint_type = 0; + sr_policy->endpoint.as_u64[0] = sr_policy->endpoint.as_u64[1] = + (u64) ~ 0; + sr_policy->color = (u32) ~ 0; + } + + if (endpoint_type) + { + sr_policy->endpoint_type = endpoint_type; + sr_policy->endpoint.as_u64[0] = endpoint->as_u64[0]; + sr_policy->endpoint.as_u64[1] = endpoint->as_u64[1]; + sr_policy->color = color; + + u32 label = find_or_create_internal_label (*endpoint, color); + internal_label_lock (*endpoint, sr_policy->color); + + /* If FIB doesnt exist, create them */ + if (sm->fib_table_EC == (u32) ~ 0) + { + sm->fib_table_EC = fib_table_create_and_lock (FIB_PROTOCOL_MPLS, + FIB_SOURCE_SR, + "SR-MPLS Traffic Engineering (NextHop,Color)"); + + fib_table_flush (sm->fib_table_EC, FIB_PROTOCOL_MPLS, + FIB_SOURCE_SPECIAL); + } + + fib_prefix_t pfx = { 0 }; + pfx.fp_proto = FIB_PROTOCOL_MPLS; + pfx.fp_len = 21; + + fib_route_path_t path = { + .frp_proto = DPO_PROTO_MPLS, + .frp_sw_if_index = ~0, + .frp_fib_index = 0, + .frp_weight = 1, + .frp_flags = FIB_ROUTE_PATH_FLAG_NONE, + .frp_label_stack = 0 + }; + path.frp_local_label = sr_policy->bsid; + + //Add the entry to ANY,Color + u32 any_label = find_or_create_internal_label (any, color); + internal_label_lock (any, sr_policy->color); + + pfx.fp_eos = MPLS_EOS; + path.frp_eos = MPLS_EOS; + + fib_route_path_t *paths = NULL; + vec_add1 (paths, path); + + pfx.fp_label = label; + fib_table_entry_update (sm->fib_table_EC, + &pfx, + FIB_SOURCE_SR, + FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT, paths); + + pfx.fp_label = any_label; + fib_table_entry_update (sm->fib_table_EC, + &pfx, + FIB_SOURCE_SR, + FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT, paths); + + fib_mpls_label_t fml = { + .fml_value = MPLS_IETF_IMPLICIT_NULL_LABEL, + }; + + vec_add1 (path.frp_label_stack, fml); + pfx.fp_eos = MPLS_NON_EOS; + path.frp_eos = MPLS_NON_EOS; + + paths = NULL; + vec_add1 (paths, path); + + pfx.fp_label = label; + fib_table_entry_update (sm->fib_table_EC, + &pfx, + FIB_SOURCE_SR, + FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT, paths); + + pfx.fp_label = any_label; + fib_table_entry_update (sm->fib_table_EC, + &pfx, + FIB_SOURCE_SR, + FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT, paths); + } + return 0; +} + +/** + * @brief CLI to modify the Endpoint,Color of an SR policy + */ +static clib_error_t * +cli_sr_mpls_policy_ec_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + ip46_address_t endpoint; + u32 color = (u32) ~ 0; + mpls_label_t bsid; + u8 endpoint_type = 0; + char clear = 0, color_set = 0, bsid_set = 0; + + clib_memset (&endpoint, 0, sizeof (ip46_address_t)); + + int rv; + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (!endpoint_type + && unformat (input, "endpoint %U", unformat_ip6_address, + &endpoint.ip6)) + endpoint_type = SR_STEER_IPV6; + else if (!endpoint_type + && unformat (input, "endpoint %U", unformat_ip4_address, + &endpoint.ip4)) + endpoint_type = SR_STEER_IPV4; + else if (!color_set && unformat (input, "color %u", &color)) + color_set = 1; + else if (!bsid_set + && unformat (input, "bsid %U", unformat_mpls_unicast_label, + &bsid)) + bsid_set = 1; + else if (!clear && unformat (input, "clear")) + clear = 1; + else + break; + } + + if (!bsid_set) + return clib_error_return (0, "No BSID specified"); + if (!endpoint_type && !clear) + return clib_error_return (0, "No Endpoint specified"); + if (!color_set && !clear) + return clib_error_return (0, "No Color set"); + + /* In case its a cleanup */ + if (clear) + { + ip6_address_set_zero (&endpoint.ip6); + color = (u32) ~ 0; + } + rv = + sr_mpls_policy_assign_endpoint_color (bsid, &endpoint, endpoint_type, + color); + + if (rv) + clib_error_return (0, "Error on Endpoint,Color"); + + return 0; +} + +VLIB_CLI_COMMAND(cli_sr_mpls_policy_ec_command, static)= +{ + .path = "sr mpls policy te", + .short_help = "sr mpls policy te bsid xxxxx endpoint x.x.x.x color 12341234", + .function = cli_sr_mpls_policy_ec_command_fn, +}; + +/********************* SR MPLS Policy initialization ***********************/ +/** + * @brief SR MPLS Policy initialization + */ +clib_error_t * +sr_mpls_policy_rewrite_init (vlib_main_t * vm) +{ + mpls_sr_main_t *sm = &sr_mpls_main; + + /* Init memory for sr policy keys (bsid <-> ip6_address_t) */ + sm->sr_policies_index_hash = NULL; + sm->sr_policies_c2e2eclabel_hash.hash = NULL; + return 0; +} + +VLIB_INIT_FUNCTION (sr_mpls_policy_rewrite_init); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: eval: (c-set-style "gnu") End: + */ diff --git a/src/plugins/srmpls/sr_mpls_steering.c b/src/plugins/srmpls/sr_mpls_steering.c new file mode 100644 index 00000000000..24c8b0e2d9f --- /dev/null +++ b/src/plugins/srmpls/sr_mpls_steering.c @@ -0,0 +1,897 @@ +/* + * sr_steering.c: ipv6 segment routing steering into SR policy + * + * Copyright (c) 2016 Cisco and/or its affiliates. Licensed under the Apache + * License, Version 2.0 (the "License"); you may not use this file except in + * compliance with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +/** + * @file + * @brief Packet steering into SR-MPLS Policies + * + * This file is in charge of handling the FIB appropiatly to steer packets + * through SR Policies as defined in 'sr_mpls_policy.c'. Notice that here + * we are only doing steering. SR policy application is done in + * sr_policy_rewrite.c + * + * Supports: + * - Steering of IPv6 traffic Destination Address based through BSID + * - Steering of IPv4 traffic Destination Address based through BSID + * - Steering of IPv4 and IPv6 traffic through N,C (SR CP) + */ + +#include <vlib/vlib.h> +#include <vnet/vnet.h> +#include "sr_mpls.h" +#include <vnet/ip/ip4_packet.h> +#include <vnet/ip/ip6_packet.h> +#include <vnet/fib/mpls_fib.h> + +#include <vppinfra/error.h> +#include <vppinfra/elog.h> + +#define SRMPLS_TE_OFFSET 50 + +/** + * @brief function to sort the colors in descending order + */ +int +sort_color_descent (const u32 * x, u32 * y) +{ + return *y - *x; +} + +/********************* Internal (NH, C) labels *******************************/ +/** + * @brief find the corresponding label for (endpoint, color) and lock it + * endpoint might be NULL or ANY + * NULL = 0, ANY=~0 + */ +u32 +find_or_create_internal_label (ip46_address_t endpoint, u32 color) +{ + mpls_sr_main_t *sm = &sr_mpls_main; + uword *color_table, *result_label; + + if (!sm->sr_policies_c2e2eclabel_hash.hash) + mhash_init (&sm->sr_policies_c2e2eclabel_hash, sizeof (mhash_t), + sizeof (u32)); + + color_table = mhash_get (&sm->sr_policies_c2e2eclabel_hash, &color); + if (!color_table) + { + mhash_t color_t; + clib_memset (&color_t, 0, sizeof (mhash_t)); + mhash_init (&color_t, sizeof (u32), sizeof (ip46_address_t)); + mhash_set_mem (&sm->sr_policies_c2e2eclabel_hash, &color, + (uword *) & color_t, NULL); + color_table = mhash_get (&sm->sr_policies_c2e2eclabel_hash, &color); + } + + result_label = mhash_get ((mhash_t *) color_table, &endpoint); + + if (result_label) + return (u32) * result_label; + + /* Create and set a new internal label */ + u32 *new_internal_label = 0; + pool_get (sm->ec_labels, new_internal_label); + *new_internal_label = 0; + mhash_set ((mhash_t *) color_table, &endpoint, + (new_internal_label - sm->ec_labels) + SRMPLS_TE_OFFSET, NULL); + + return (new_internal_label - sm->ec_labels) + SRMPLS_TE_OFFSET; +} + +always_inline void +internal_label_lock_co (ip46_address_t endpoint, u32 color, char co_bits) +{ + ip46_address_t zero, any; + ip46_address_reset (&zero); + any.as_u64[0] = any.as_u64[1] = (u64) ~ 0; + switch (co_bits) + { + case SR_TE_CO_BITS_10: + internal_label_lock (endpoint, color); + internal_label_lock (zero, color); + internal_label_lock (any, color); + break; + case SR_TE_CO_BITS_01: + internal_label_lock (endpoint, color); + internal_label_lock (zero, color); + break; + case SR_TE_CO_BITS_00: + case SR_TE_CO_BITS_11: + internal_label_lock (endpoint, color); + break; + } +} + +/** + * @brief lock the label for (NH, C) + * endpoint might be NULL or ANY + * NULL = 0, ANY=~0 + */ +void +internal_label_lock (ip46_address_t endpoint, u32 color) +{ + mpls_sr_main_t *sm = &sr_mpls_main; + uword *color_table, *result_label; + + if (!sm->sr_policies_c2e2eclabel_hash.hash) + return; + + color_table = mhash_get (&sm->sr_policies_c2e2eclabel_hash, &color); + if (!color_table) + return; + + result_label = mhash_get ((mhash_t *) color_table, &endpoint); + + if (!result_label) + return; + + /* Lock it */ + u32 *label_lock = + pool_elt_at_index (sm->ec_labels, *result_label - SRMPLS_TE_OFFSET); + (*label_lock)++; +} + + +always_inline void +internal_label_unlock_co (ip46_address_t endpoint, u32 color, char co_bits) +{ + ip46_address_t zero, any; + ip46_address_reset (&zero); + any.as_u64[0] = any.as_u64[1] = (u64) ~ 0; + switch (co_bits) + { + case SR_TE_CO_BITS_10: + internal_label_unlock (endpoint, color); + internal_label_unlock (zero, color); + internal_label_unlock (any, color); + break; + case SR_TE_CO_BITS_01: + internal_label_unlock (endpoint, color); + internal_label_unlock (zero, color); + break; + case SR_TE_CO_BITS_00: + case SR_TE_CO_BITS_11: + internal_label_unlock (endpoint, color); + break; + } +} + +/** + * @brief Release lock on label for (endpoint, color) + * endpoint might be NULL or ANY + * NULL = 0, ANY=~0 + */ +void +internal_label_unlock (ip46_address_t endpoint, u32 color) +{ + mpls_sr_main_t *sm = &sr_mpls_main; + uword *color_table, *result_label; + + if (!sm->sr_policies_c2e2eclabel_hash.hash) + return; + + color_table = mhash_get (&sm->sr_policies_c2e2eclabel_hash, &color); + if (!color_table) + return; + + result_label = mhash_get ((mhash_t *) color_table, &endpoint); + + if (!result_label) + return; + + u32 *label_lock = + pool_elt_at_index (sm->ec_labels, *result_label - SRMPLS_TE_OFFSET); + (*label_lock)--; + + if (*label_lock == 0) + { + pool_put (sm->ec_labels, label_lock); + mhash_unset ((mhash_t *) color_table, &endpoint, NULL); + if (mhash_elts ((mhash_t *) color_table) == 0) + { + mhash_free ((mhash_t *) color_table); + mhash_unset (&sm->sr_policies_c2e2eclabel_hash, &color, NULL); + if (mhash_elts (&sm->sr_policies_c2e2eclabel_hash) == 0) + { + mhash_free (&sm->sr_policies_c2e2eclabel_hash); + sm->sr_policies_c2e2eclabel_hash.hash = NULL; + fib_table_unlock (sm->fib_table_EC, FIB_PROTOCOL_MPLS, + FIB_SOURCE_SR); + sm->fib_table_EC = (u32) ~ 0; + } + } + } +} + +/********************* steering computation *********************************/ +/** + * @brief function to update the FIB + */ +void +compute_sr_te_automated_steering_fib_entry (mpls_sr_steering_policy_t * + steer_pl) +{ + mpls_sr_main_t *sm = &sr_mpls_main; + fib_prefix_t pfx = { 0 }; + + u32 *internal_labels = 0; + ip46_address_t zero, any; + ip46_address_reset (&zero); + any.as_u64[0] = any.as_u64[1] = (u64) ~ 0; + + u32 *color_i = NULL; + vec_foreach (color_i, steer_pl->color) + { + switch (steer_pl->co_bits) + { + case SR_TE_CO_BITS_10: + vec_add1 (internal_labels, + find_or_create_internal_label (steer_pl->next_hop, + *color_i)); + vec_add1 (internal_labels, + find_or_create_internal_label (zero, *color_i)); + vec_add1 (internal_labels, + find_or_create_internal_label (any, *color_i)); + break; + case SR_TE_CO_BITS_01: + vec_add1 (internal_labels, + find_or_create_internal_label (steer_pl->next_hop, + *color_i)); + vec_add1 (internal_labels, + find_or_create_internal_label (zero, *color_i)); + break; + case SR_TE_CO_BITS_00: + case SR_TE_CO_BITS_11: + vec_add1 (internal_labels, + find_or_create_internal_label (steer_pl->next_hop, + *color_i)); + break; + } + } + + /* Does hidden FIB already exist? */ + if (sm->fib_table_EC == (u32) ~ 0) + { + sm->fib_table_EC = fib_table_create_and_lock (FIB_PROTOCOL_MPLS, + FIB_SOURCE_SR, + "SR-MPLS Traffic Engineering (NextHop,Color)"); + + fib_table_flush (sm->fib_table_EC, FIB_PROTOCOL_MPLS, + FIB_SOURCE_SPECIAL); + } + + /* Add the corresponding FIB entries */ + fib_route_path_t path = { + .frp_proto = DPO_PROTO_MPLS, + .frp_eos = MPLS_EOS, + .frp_sw_if_index = ~0, + .frp_fib_index = sm->fib_table_EC, + .frp_weight = 1, + .frp_flags = FIB_ROUTE_PATH_FLAG_NONE, + .frp_label_stack = 0 + }; + fib_route_path_t *paths = NULL; + + if (steer_pl->classify.traffic_type == SR_STEER_IPV6) + { + pfx.fp_proto = FIB_PROTOCOL_IP6; + pfx.fp_len = steer_pl->classify.mask_width; + pfx.fp_addr.ip6 = steer_pl->classify.prefix.ip6; + } + else if (steer_pl->classify.traffic_type == SR_STEER_IPV4) + { + pfx.fp_proto = FIB_PROTOCOL_IP4; + pfx.fp_len = steer_pl->classify.mask_width; + pfx.fp_addr.ip4 = steer_pl->classify.prefix.ip4; + } + + if (steer_pl->vpn_label != (u32) ~ 0) + { + fib_mpls_label_t fml = { + .fml_value = steer_pl->vpn_label, + }; + vec_add1 (path.frp_label_stack, fml); + path.frp_eos = MPLS_NON_EOS; + } + + u32 label_i; + vec_foreach_index (label_i, internal_labels) + { + path.frp_local_label = internal_labels[label_i]; + path.frp_preference = label_i; + vec_add1 (paths, path); + } + + /* Finally we must add to FIB IGP to N */ + clib_memcpy (&path.frp_addr, &steer_pl->next_hop, + sizeof (steer_pl->next_hop)); + path.frp_preference = vec_len (internal_labels); + path.frp_label_stack = NULL; + + if (steer_pl->nh_type == SR_STEER_IPV6) + { + path.frp_proto = DPO_PROTO_IP6; + path.frp_fib_index = + fib_table_find (FIB_PROTOCOL_IP6, + (steer_pl->classify.fib_table != + (u32) ~ 0 ? steer_pl->classify.fib_table : 0)); + } + else if (steer_pl->nh_type == SR_STEER_IPV4) + { + path.frp_proto = DPO_PROTO_IP4; + path.frp_fib_index = + fib_table_find (FIB_PROTOCOL_IP4, + (steer_pl->classify.fib_table != + (u32) ~ 0 ? steer_pl->classify.fib_table : 0)); + } + + vec_add1 (paths, path); + if (steer_pl->classify.traffic_type == SR_STEER_IPV6) + fib_table_entry_update (fib_table_find + (FIB_PROTOCOL_IP6, + (steer_pl->classify.fib_table != + (u32) ~ 0 ? steer_pl->classify.fib_table : 0)), + &pfx, FIB_SOURCE_SR, + FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT, paths); + else if (steer_pl->classify.traffic_type == SR_STEER_IPV4) + fib_table_entry_update (fib_table_find + (FIB_PROTOCOL_IP4, + (steer_pl->classify.fib_table != + (u32) ~ 0 ? steer_pl->classify.fib_table : 0)), + &pfx, FIB_SOURCE_SR, + FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT, paths); + + vec_free (paths); + paths = NULL; +} + +/** + * @brief Steer traffic L3 traffic through a given SR-MPLS policy + * + * @param is_del + * @param bsid is the bindingSID of the SR Policy (alt to sr_policy_index) + * @param sr_policy is the index of the SR Policy (alt to bsid) + * @param table_id is the VRF where to install the FIB entry for the BSID + * @param prefix is the IPv4/v6 address for L3 traffic type + * @param mask_width is the mask for L3 traffic type + * @param traffic_type describes the type of traffic + * @param next_hop SR TE Next-Hop + * @param nh_type is the AF of Next-Hop + * @param color SR TE color + * @param co_bits SR TE color-only bits + * + * @return 0 if correct, else error + */ +int +sr_mpls_steering_policy_add (mpls_label_t bsid, u32 table_id, + ip46_address_t * prefix, u32 mask_width, + u8 traffic_type, ip46_address_t * next_hop, + u8 nh_type, u32 color, char co_bits, + mpls_label_t vpn_label) +{ + mpls_sr_main_t *sm = &sr_mpls_main; + sr_mpls_steering_key_t key; + mpls_sr_steering_policy_t *steer_pl; + fib_prefix_t pfx = { 0 }; + + mpls_sr_policy_t *sr_policy = 0; + uword *p = 0; + + clib_memset (&key, 0, sizeof (sr_mpls_steering_key_t)); + + if (traffic_type != SR_STEER_IPV4 && traffic_type != SR_STEER_IPV6) + return -1; + + /* Compute the steer policy key */ + key.prefix.as_u64[0] = prefix->as_u64[0]; + key.prefix.as_u64[1] = prefix->as_u64[1]; + key.mask_width = mask_width; + key.fib_table = (table_id != (u32) ~ 0 ? table_id : 0); + key.traffic_type = traffic_type; + + /* + * Search for steering policy. If already exists we are adding a new + * color. + */ + if (!sm->sr_steer_policies_hash.hash) + mhash_init (&sm->sr_steer_policies_hash, sizeof (uword), + sizeof (sr_mpls_steering_key_t)); + + p = mhash_get (&sm->sr_steer_policies_hash, &key); + if (p) + { + steer_pl = pool_elt_at_index (sm->steer_policies, p[0]); + if (steer_pl->bsid != (u32) ~ 0) + return -1; //Means we are rewritting the steering. Not allowed. + + /* Means we are adding a color. Check that NH match. */ + if (ip46_address_cmp (&steer_pl->next_hop, next_hop)) + return -2; + if (vec_search (steer_pl->color, color) != ~0) + return -3; + if (steer_pl->co_bits != co_bits) + return -4; /* CO colors should be the same */ + if (steer_pl->vpn_label != vpn_label) + return -5; /* VPN label should be the same */ + + /* Remove the steering and ReDo it */ + vec_add1 (steer_pl->color, color); + vec_sort_with_function (steer_pl->color, sort_color_descent); + compute_sr_te_automated_steering_fib_entry (steer_pl); + internal_label_lock_co (steer_pl->next_hop, color, steer_pl->co_bits); + return 0; + } + + /* Create a new steering policy */ + pool_get (sm->steer_policies, steer_pl); + clib_memset (steer_pl, 0, sizeof (*steer_pl)); + clib_memcpy (&steer_pl->classify.prefix, prefix, sizeof (ip46_address_t)); + clib_memcpy (&steer_pl->next_hop, next_hop, sizeof (ip46_address_t)); + steer_pl->nh_type = nh_type; + steer_pl->co_bits = co_bits; + steer_pl->classify.mask_width = mask_width; + steer_pl->classify.fib_table = (table_id != (u32) ~ 0 ? table_id : 0); + steer_pl->classify.traffic_type = traffic_type; + steer_pl->color = NULL; + steer_pl->vpn_label = vpn_label; + + /* Create and store key */ + mhash_set (&sm->sr_steer_policies_hash, &key, steer_pl - sm->steer_policies, + NULL); + + /* Local steering */ + if (bsid != (u32) ~ 0) + { + if (!sm->sr_policies_index_hash) + sm->sr_policies_index_hash = hash_create (0, sizeof (mpls_label_t)); + steer_pl->bsid = bsid; + p = hash_get (sm->sr_policies_index_hash, bsid); + if (!p) + return -1; + sr_policy = pool_elt_at_index (sm->sr_policies, p[0]); + + fib_route_path_t path = { + .frp_proto = DPO_PROTO_MPLS, + .frp_local_label = sr_policy->bsid, + .frp_eos = MPLS_EOS, + .frp_sw_if_index = ~0, + .frp_fib_index = 0, + .frp_weight = 1, + .frp_flags = FIB_ROUTE_PATH_FLAG_NONE, + .frp_label_stack = 0 + }; + fib_route_path_t *paths = NULL; + + if (steer_pl->vpn_label != (u32) ~ 0) + { + fib_mpls_label_t fml = { + .fml_value = steer_pl->vpn_label, + }; + vec_add1 (path.frp_label_stack, fml); + } + + /* FIB API calls - Recursive route through the BindingSID */ + if (traffic_type == SR_STEER_IPV6) + { + pfx.fp_proto = FIB_PROTOCOL_IP6; + pfx.fp_len = steer_pl->classify.mask_width; + pfx.fp_addr.ip6 = steer_pl->classify.prefix.ip6; + path.frp_fib_index = 0; + path.frp_preference = 0; + vec_add1 (paths, path); + fib_table_entry_path_add2 (fib_table_find + (FIB_PROTOCOL_IP6, + (table_id != (u32) ~ 0 ? table_id : 0)), + &pfx, FIB_SOURCE_SR, + FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT, paths); + vec_free (paths); + } + else if (traffic_type == SR_STEER_IPV4) + { + pfx.fp_proto = FIB_PROTOCOL_IP4; + pfx.fp_len = steer_pl->classify.mask_width; + pfx.fp_addr.ip4 = steer_pl->classify.prefix.ip4; + path.frp_fib_index = 0; + path.frp_preference = 0; + vec_add1 (paths, path); + fib_table_entry_path_add2 (fib_table_find + (FIB_PROTOCOL_IP4, + (table_id != (u32) ~ 0 ? table_id : 0)), + &pfx, FIB_SOURCE_SR, + FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT, paths); + vec_free (paths); + } + } + /* Automated steering */ + else + { + steer_pl->bsid = (u32) ~ 0; + vec_add1 (steer_pl->color, color); + compute_sr_te_automated_steering_fib_entry (steer_pl); + internal_label_lock_co (steer_pl->next_hop, color, steer_pl->co_bits); + } + return 0; +} + +/** + * @brief Delete steering rule for an SR-MPLS policy + * + * @param is_del + * @param bsid is the bindingSID of the SR Policy (alt to sr_policy_index) + * @param sr_policy is the index of the SR Policy (alt to bsid) + * @param table_id is the VRF where to install the FIB entry for the BSID + * @param prefix is the IPv4/v6 address for L3 traffic type + * @param mask_width is the mask for L3 traffic type + * @param traffic_type describes the type of traffic + * @param next_hop SR TE Next-HOP + * @param nh_type is the AF of Next-Hop + * @param color SR TE color + * + * @return 0 if correct, else error + */ +int +sr_mpls_steering_policy_del (ip46_address_t * prefix, u32 mask_width, + u8 traffic_type, u32 table_id, u32 color) +{ + mpls_sr_main_t *sm = &sr_mpls_main; + sr_mpls_steering_key_t key; + mpls_sr_steering_policy_t *steer_pl; + fib_prefix_t pfx = { 0 }; + uword *p = 0; + + clib_memset (&key, 0, sizeof (sr_mpls_steering_key_t)); + + /* Compute the steer policy key */ + if (traffic_type != SR_STEER_IPV4 && traffic_type != SR_STEER_IPV6) + return -1; + + key.prefix.as_u64[0] = prefix->as_u64[0]; + key.prefix.as_u64[1] = prefix->as_u64[1]; + key.mask_width = mask_width; + key.fib_table = (table_id != (u32) ~ 0 ? table_id : 0); + key.traffic_type = traffic_type; + + if (!sm->sr_steer_policies_hash.hash) + mhash_init (&sm->sr_steer_policies_hash, sizeof (uword), + sizeof (sr_mpls_steering_key_t)); + + /* Search for the item */ + p = mhash_get (&sm->sr_steer_policies_hash, &key); + + if (!p) + return -1; + + /* Retrieve Steer Policy function */ + steer_pl = pool_elt_at_index (sm->steer_policies, p[0]); + + if (steer_pl->bsid == (u32) ~ 0) + { + /* Remove the color from the color vector */ + vec_del1 (steer_pl->color, vec_search (steer_pl->color, color)); + + if (vec_len (steer_pl->color)) + { + /* Reorder Colors */ + vec_sort_with_function (steer_pl->color, sort_color_descent); + compute_sr_te_automated_steering_fib_entry (steer_pl); + /* Remove all the locks for this ones... */ + internal_label_unlock_co (steer_pl->next_hop, color, + steer_pl->co_bits); + return 0; + } + else + { + vec_free (steer_pl->color); + /* Remove FIB entry */ + if (steer_pl->classify.traffic_type == SR_STEER_IPV6) + { + pfx.fp_proto = FIB_PROTOCOL_IP6; + pfx.fp_len = steer_pl->classify.mask_width; + pfx.fp_addr.ip6 = steer_pl->classify.prefix.ip6; + fib_table_entry_delete (fib_table_find + (FIB_PROTOCOL_IP6, + steer_pl->classify.fib_table), &pfx, + FIB_SOURCE_SR); + } + else if (steer_pl->classify.traffic_type == SR_STEER_IPV4) + { + pfx.fp_proto = FIB_PROTOCOL_IP4; + pfx.fp_len = steer_pl->classify.mask_width; + pfx.fp_addr.ip4 = steer_pl->classify.prefix.ip4; + fib_table_entry_delete (fib_table_find + (FIB_PROTOCOL_IP4, + steer_pl->classify.fib_table), &pfx, + FIB_SOURCE_SR); + } + /* Remove all the locks for this ones... */ + internal_label_unlock_co (steer_pl->next_hop, color, + steer_pl->co_bits); + } + } + else //Remove by BSID + { + if (steer_pl->classify.traffic_type == SR_STEER_IPV6) + { + pfx.fp_proto = FIB_PROTOCOL_IP6; + pfx.fp_len = steer_pl->classify.mask_width; + pfx.fp_addr.ip6 = steer_pl->classify.prefix.ip6; + fib_table_entry_delete (fib_table_find + (FIB_PROTOCOL_IP6, + steer_pl->classify.fib_table), &pfx, + FIB_SOURCE_SR); + } + else if (steer_pl->classify.traffic_type == SR_STEER_IPV4) + { + pfx.fp_proto = FIB_PROTOCOL_IP4; + pfx.fp_len = steer_pl->classify.mask_width; + pfx.fp_addr.ip4 = steer_pl->classify.prefix.ip4; + fib_table_entry_delete (fib_table_find + (FIB_PROTOCOL_IP4, + steer_pl->classify.fib_table), &pfx, + FIB_SOURCE_SR); + } + } + /* Delete SR steering policy entry */ + pool_put (sm->steer_policies, steer_pl); + mhash_unset (&sm->sr_steer_policies_hash, &key, NULL); + if (mhash_elts (&sm->sr_steer_policies_hash) == 0) + { + mhash_free (&sm->sr_steer_policies_hash); + sm->sr_steer_policies_hash.hash = NULL; + } + return 0; +} + +static clib_error_t * +sr_mpls_steer_policy_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + int is_del = 0; + + ip46_address_t prefix, nh; + u32 dst_mask_width = 0; + u8 traffic_type = 0; + u8 nh_type = 0; + u32 fib_table = (u32) ~ 0, color = (u32) ~ 0; + u32 co_bits = 0; + + mpls_label_t bsid, vpn_label = (u32) ~ 0; + + u8 sr_policy_set = 0; + + clib_memset (&prefix, 0, sizeof (ip46_address_t)); + clib_memset (&nh, 0, sizeof (ip46_address_t)); + + int rv; + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "del")) + is_del = 1; + else if (!traffic_type + && unformat (input, "l3 %U/%d", unformat_ip6_address, + &prefix.ip6, &dst_mask_width)) + traffic_type = SR_STEER_IPV6; + else if (!traffic_type + && unformat (input, "l3 %U/%d", unformat_ip4_address, + &prefix.ip4, &dst_mask_width)) + traffic_type = SR_STEER_IPV4; + else if (!sr_policy_set + && unformat (input, "via sr policy bsid %U", + unformat_mpls_unicast_label, &bsid)) + sr_policy_set = 1; + else if (!sr_policy_set + && unformat (input, "via next-hop %U color %d co %d", + unformat_ip4_address, &nh.ip4, &color, &co_bits)) + { + sr_policy_set = 1; + nh_type = SR_STEER_IPV4; + } + else if (!sr_policy_set + && unformat (input, "via next-hop %U color %d co %d", + unformat_ip6_address, &nh.ip6, &color, &co_bits)) + { + sr_policy_set = 1; + nh_type = SR_STEER_IPV6; + } + else if (fib_table == (u32) ~ 0 + && unformat (input, "fib-table %d", &fib_table)); + else if (unformat (input, "vpn-label %U", + unformat_mpls_unicast_label, &vpn_label)); + else + break; + } + + if (!traffic_type) + return clib_error_return (0, "No L3 traffic specified"); + if (!sr_policy_set) + return clib_error_return (0, "No SR policy specified"); + + /* Make sure that the prefixes are clean */ + if (traffic_type == SR_STEER_IPV4) + { + u32 mask = + (dst_mask_width ? (0xFFFFFFFFu >> (32 - dst_mask_width)) : 0); + prefix.ip4.as_u32 &= mask; + } + else if (traffic_type == SR_STEER_IPV6) + { + ip6_address_t mask; + ip6_address_mask_from_width (&mask, dst_mask_width); + ip6_address_mask (&prefix.ip6, &mask); + } + + if (nh_type) + bsid = (u32) ~ 0; + + if (is_del) + rv = + sr_mpls_steering_policy_del (&prefix, dst_mask_width, + traffic_type, fib_table, color); + + else + rv = + sr_mpls_steering_policy_add (bsid, fib_table, &prefix, dst_mask_width, + traffic_type, &nh, nh_type, color, co_bits, + vpn_label); + + switch (rv) + { + case 0: + break; + case 1: + return 0; + case -1: + return clib_error_return (0, "Incorrect API usage."); + case -2: + return clib_error_return (0, "The Next-Hop does not match."); + case -3: + return clib_error_return (0, "The color already exists."); + case -4: + return clib_error_return (0, "The co-bits do not match."); + case -5: + return clib_error_return (0, "The VPN-labels do not match."); + default: + return clib_error_return (0, "BUG: sr steer policy returns %d", rv); + } + return 0; +} + +VLIB_CLI_COMMAND(sr_mpls_steer_policy_command, static)= +{ + .path = "sr mpls steer", + .short_help = "sr mpls steer (del) l3 <ip_addr/mask> " + "via [sr policy bsid <mpls_label> || next-hop <ip46_addr> color <u32> co <0|1|2|3> ](fib-table <fib_table_index>)(vpn-label 500)", + .long_help = + "\tSteer L3 traffic through an existing SR policy.\n" + "\tExamples:\n" + "\t\tsr steer l3 2001::/64 via sr_policy bsid 29999\n" + "\t\tsr steer del l3 2001::/64 via sr_policy bsid 29999\n" + "\t\tsr steer l3 2001::/64 via next-hop 1.1.1.1 color 1234 co 0\n" + "\t\tsr steer l3 2001::/64 via next-hop 2001::1 color 1234 co 2 vpn-label 500\n", + .function = sr_mpls_steer_policy_command_fn, +}; + +static clib_error_t * +show_sr_mpls_steering_policies_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + mpls_sr_main_t *sm = &sr_mpls_main; + mpls_sr_steering_policy_t **steer_policies = 0; + mpls_sr_steering_policy_t *steer_pl; + + int i; + + vlib_cli_output (vm, "SR MPLS steering policies:"); + pool_foreach (steer_pl, sm->steer_policies) { + vec_add1(steer_policies, steer_pl); + } + for (i = 0; i < vec_len (steer_policies); i++) + { + vlib_cli_output (vm, "=========================="); + steer_pl = steer_policies[i]; + if (steer_pl->classify.traffic_type == SR_STEER_IPV4) + { + vlib_cli_output (vm, "Prefix: %U/%d via:", + format_ip4_address, + &steer_pl->classify.prefix.ip4, + steer_pl->classify.mask_width); + } + else if (steer_pl->classify.traffic_type == SR_STEER_IPV6) + { + vlib_cli_output (vm, "Prefix: %U/%d via:", + format_ip6_address, + &steer_pl->classify.prefix.ip6, + steer_pl->classify.mask_width); + } + + if (steer_pl->bsid != (u32) ~ 0) + { + vlib_cli_output (vm, "· BSID %U", + format_mpls_unicast_label, steer_pl->bsid); + } + else + { + if (steer_pl->nh_type == SR_STEER_IPV4) + { + vlib_cli_output (vm, "· Next-hop %U", + format_ip4_address, &steer_pl->next_hop.ip4); + } + else if (steer_pl->nh_type == SR_STEER_IPV6) + { + vlib_cli_output (vm, "· Next-hop %U", + format_ip6_address, &steer_pl->next_hop.ip6); + } + + u32 *color_i = 0; + u8 *s = NULL; + s = format (s, "[ "); + vec_foreach (color_i, steer_pl->color) + { + s = format (s, "%d, ", *color_i); + } + s = format (s, "\b\b ]"); + vlib_cli_output (vm, "· Color %s", s); + + switch (steer_pl->co_bits) + { + case SR_TE_CO_BITS_00: + vlib_cli_output (vm, "· CO-bits: 00"); + break; + case SR_TE_CO_BITS_01: + vlib_cli_output (vm, "· CO-bits: 01"); + break; + case SR_TE_CO_BITS_10: + vlib_cli_output (vm, "· CO-bits: 10"); + break; + case SR_TE_CO_BITS_11: + vlib_cli_output (vm, "· CO-bits: 11"); + break; + } + } + } + return 0; +} + +VLIB_CLI_COMMAND(show_sr_mpls_steering_policies_command, static)= +{ + .path = "show sr mpls steering policies", + .short_help = "show sr mpls steering policies", + .function = show_sr_mpls_steering_policies_command_fn, +}; + +clib_error_t * +sr_mpls_steering_init (vlib_main_t * vm) +{ + mpls_sr_main_t *sm = &sr_mpls_main; + + /* Init memory for function keys */ + sm->sr_steer_policies_hash.hash = NULL; + + sm->fib_table_EC = (u32) ~ 0; + sm->ec_labels = 0; + + return 0; +} + +VLIB_INIT_FUNCTION(sr_mpls_steering_init); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: eval: (c-set-style "gnu") End: + */ diff --git a/src/plugins/srmpls/sr_mpls_test.c b/src/plugins/srmpls/sr_mpls_test.c new file mode 100644 index 00000000000..7aff4c32b06 --- /dev/null +++ b/src/plugins/srmpls/sr_mpls_test.c @@ -0,0 +1,174 @@ +/* + *------------------------------------------------------------------ + * Copyright (c) 2021 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ +#include <vat/vat.h> +#include <vlibapi/api.h> +#include <vlibmemory/api.h> +#include <vppinfra/error.h> +#include <vpp/api/types.h> + +#define __plugin_msg_base sr_mpls_test_main.msg_id_base +#include <vlibapi/vat_helper_macros.h> + +/* Declare message IDs */ +#include <vnet/format_fns.h> +#include <plugins/srmpls/sr_mpls.api_enum.h> +#include <plugins/srmpls/sr_mpls.api_types.h> + +#define vl_endianfun /* define message structures */ +#include <plugins/srmpls/sr_mpls.api.h> +#undef vl_endianfun + +typedef struct +{ + /* API message ID base */ + u16 msg_id_base; + u32 ping_id; + vat_main_t *vat_main; +} sr_mpls_test_main_t; + +static sr_mpls_test_main_t sr_mpls_test_main; + +static int +api_sr_mpls_policy_mod (vat_main_t *vam) +{ + return -1; +} + +static int +api_sr_mpls_steering_add_del (vat_main_t *vam) +{ + return -1; +} + +static int +api_sr_mpls_policy_assign_endpoint_color (vat_main_t *vam) +{ + return -1; +} + +static int +api_sr_mpls_policy_add (vat_main_t *vam) +{ + unformat_input_t *i = vam->input; + vl_api_sr_mpls_policy_add_t *mp; + u32 bsid = 0; + u32 weight = 1; + u8 type = 0; + u8 n_segments = 0; + u32 sid; + u32 *segments = NULL; + int ret; + + /* Parse args required to build the message */ + while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT) + { + if (unformat (i, "bsid %d", &bsid)) + ; + else if (unformat (i, "weight %d", &weight)) + ; + else if (unformat (i, "spray")) + type = 1; + else if (unformat (i, "next %d", &sid)) + { + n_segments += 1; + vec_add1 (segments, htonl (sid)); + } + else + { + clib_warning ("parse error '%U'", format_unformat_error, i); + return -99; + } + } + + if (bsid == 0) + { + errmsg ("bsid not set"); + return -99; + } + + if (n_segments == 0) + { + errmsg ("no sid in segment stack"); + return -99; + } + + /* Construct the API message */ + M2 (SR_MPLS_POLICY_ADD, mp, sizeof (u32) * n_segments); + + mp->bsid = htonl (bsid); + mp->weight = htonl (weight); + mp->is_spray = type; + mp->n_segments = n_segments; + memcpy (mp->segments, segments, sizeof (u32) * n_segments); + vec_free (segments); + + /* send it... */ + S (mp); + + /* Wait for a reply... */ + W (ret); + return ret; +} + +static int +api_sr_mpls_policy_del (vat_main_t *vam) +{ + unformat_input_t *i = vam->input; + vl_api_sr_mpls_policy_del_t *mp; + u32 bsid = 0; + int ret; + + /* Parse args required to build the message */ + while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT) + { + if (unformat (i, "bsid %d", &bsid)) + ; + else + { + clib_warning ("parse error '%U'", format_unformat_error, i); + return -99; + } + } + + if (bsid == 0) + { + errmsg ("bsid not set"); + return -99; + } + + /* Construct the API message */ + M (SR_MPLS_POLICY_DEL, mp); + + mp->bsid = htonl (bsid); + + /* send it... */ + S (mp); + + /* Wait for a reply... */ + W (ret); + return ret; +} + +#include <plugins/srmpls/sr_mpls.api_test.c> + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/plugins/urpf/CMakeLists.txt b/src/plugins/urpf/CMakeLists.txt index 2f44e3b2344..f665d30b0bb 100644 --- a/src/plugins/urpf/CMakeLists.txt +++ b/src/plugins/urpf/CMakeLists.txt @@ -22,6 +22,10 @@ add_vpp_plugin(urpf ip4_urpf.c ip6_urpf.c + INSTALL_HEADERS + urpf_dp.h + urpf.h + API_FILES urpf.api ) diff --git a/src/plugins/urpf/urpf.c b/src/plugins/urpf/urpf.c index e5209caafb4..1e7d6c0fb91 100644 --- a/src/plugins/urpf/urpf.c +++ b/src/plugins/urpf/urpf.c @@ -60,7 +60,17 @@ static const char *urpf_feats[N_AF][VLIB_N_DIR][URPF_N_MODES] = urpf_data_t *urpf_cfgs[N_AF][VLIB_N_DIR]; u8 * -format_urpf_mode (u8 * s, va_list * a) +format_urpf_trace (u8 *s, va_list *va) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *); + urpf_trace_t *t = va_arg (*va, urpf_trace_t *); + + return format (s, "uRPF:%d fib:%d", t->urpf, t->fib_index); +} + +__clib_export u8 * +format_urpf_mode (u8 *s, va_list *a) { urpf_mode_t mode = va_arg (*a, int); @@ -76,8 +86,8 @@ format_urpf_mode (u8 * s, va_list * a) return (format (s, "unknown")); } -static uword -unformat_urpf_mode (unformat_input_t * input, va_list * args) +__clib_export uword +unformat_urpf_mode (unformat_input_t *input, va_list *args) { urpf_mode_t *mode = va_arg (*args, urpf_mode_t *); @@ -94,7 +104,16 @@ unformat_urpf_mode (unformat_input_t * input, va_list * args) return 0; } -int +__clib_export int +urpf_feature_enable_disable (ip_address_family_t af, vlib_dir_t dir, + urpf_mode_t mode, u32 sw_if_index, int enable) +{ + return vnet_feature_enable_disable (urpf_feat_arcs[af][dir], + urpf_feats[af][dir][mode], sw_if_index, + enable, 0, 0); +} + +__clib_export int urpf_update (urpf_mode_t mode, u32 sw_if_index, ip_address_family_t af, vlib_dir_t dir, u32 table_id) { diff --git a/src/plugins/urpf/urpf.h b/src/plugins/urpf/urpf.h index 6983a2b440c..a40a25df16b 100644 --- a/src/plugins/urpf/urpf.h +++ b/src/plugins/urpf/urpf.h @@ -32,7 +32,15 @@ typedef enum urpf_mode_t_ #define URPF_N_MODES (URPF_MODE_STRICT+1) -extern u8 *format_urpf_mode (u8 * s, va_list * a); +typedef struct +{ + index_t urpf; + u32 fib_index; +} urpf_trace_t; + +u8 *format_urpf_trace (u8 *s, va_list *va); +u8 *format_urpf_mode (u8 *s, va_list *a); +uword unformat_urpf_mode (unformat_input_t *input, va_list *args); typedef struct { @@ -43,8 +51,8 @@ typedef struct extern urpf_data_t *urpf_cfgs[N_AF][VLIB_N_DIR]; -extern int urpf_update (urpf_mode_t mode, u32 sw_if_index, - ip_address_family_t af, vlib_dir_t dir, u32 table_id); +int urpf_update (urpf_mode_t mode, u32 sw_if_index, ip_address_family_t af, + vlib_dir_t dir, u32 table_id); #endif diff --git a/src/plugins/urpf/urpf_dp.h b/src/plugins/urpf/urpf_dp.h index 816d8b70b90..b17fed7e04b 100644 --- a/src/plugins/urpf/urpf_dp.h +++ b/src/plugins/urpf/urpf_dp.h @@ -53,22 +53,6 @@ * * This file contains the interface unicast source check. */ -typedef struct -{ - index_t urpf; -} urpf_trace_t; - -static u8 * -format_urpf_trace (u8 * s, va_list * va) -{ - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *); - urpf_trace_t *t = va_arg (*va, urpf_trace_t *); - - s = format (s, "uRPF:%d", t->urpf); - - return s; -} #define foreach_urpf_error \ _(DROP, "uRPF Drop") \ @@ -87,10 +71,157 @@ typedef enum URPF_N_NEXT, } urpf_next_t; +static_always_inline u32 +urpf_get_fib_index (vlib_buffer_t *b, ip_address_family_t af, vlib_dir_t dir) +{ + u32 sw_if_index = vnet_buffer (b)->sw_if_index[dir]; + return vec_elt (urpf_cfgs[af][dir], sw_if_index).fib_index; +} + +static_always_inline void +urpf_perform_check_x1 (ip_address_family_t af, vlib_dir_t dir, + urpf_mode_t mode, vlib_buffer_t *b, const u8 *h, + u32 fib_index, load_balance_t **lb, u32 *pass) +{ + load_balance_t *llb; + u32 lpass; + u32 lb_index; + + ASSERT (fib_index != ~0); + + if (AF_IP4 == af) + { + const ip4_header_t *ip; + + ip = (ip4_header_t *) h; + + lb_index = ip4_fib_forwarding_lookup (fib_index, &ip->src_address); + + /* Pass multicast. */ + lpass = (ip4_address_is_multicast (&ip->src_address) || + ip4_address_is_global_broadcast (&ip->src_address)); + } + else + { + const ip6_header_t *ip; + + ip = (ip6_header_t *) h; + + lb_index = ip6_fib_table_fwding_lookup (fib_index, &ip->src_address); + lpass = ip6_address_is_multicast (&ip->src_address); + } + + llb = load_balance_get (lb_index); + + if (URPF_MODE_STRICT == mode) + { + int res; + + res = fib_urpf_check (llb->lb_urpf, vnet_buffer (b)->sw_if_index[dir]); + if (VLIB_RX == dir) + lpass |= res; + else + { + lpass |= !res && fib_urpf_check_size (llb->lb_urpf); + lpass |= b->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED; + } + } + else + lpass |= fib_urpf_check_size (llb->lb_urpf); + + *lb = llb; + *pass = lpass; +} + +static_always_inline void +urpf_perform_check_x2 (ip_address_family_t af, vlib_dir_t dir, + urpf_mode_t mode, vlib_buffer_t *b0, vlib_buffer_t *b1, + const u8 *h0, const u8 *h1, u32 fib_index0, + u32 fib_index1, load_balance_t **lb0, + load_balance_t **lb1, u32 *pass0, u32 *pass1) +{ + load_balance_t *llb0, *llb1; + u32 lpass0, lpass1; + u32 lb_index0, lb_index1; + + ASSERT (fib_index0 != ~0); + ASSERT (fib_index1 != ~0); + + if (AF_IP4 == af) + { + const ip4_header_t *ip0, *ip1; + + ip0 = (ip4_header_t *) h0; + ip1 = (ip4_header_t *) h1; + + ip4_fib_forwarding_lookup_x2 (fib_index0, fib_index1, &ip0->src_address, + &ip1->src_address, &lb_index0, &lb_index1); + /* Pass multicast. */ + lpass0 = (ip4_address_is_multicast (&ip0->src_address) || + ip4_address_is_global_broadcast (&ip0->src_address)); + lpass1 = (ip4_address_is_multicast (&ip1->src_address) || + ip4_address_is_global_broadcast (&ip1->src_address)); + } + else + { + const ip6_header_t *ip0, *ip1; + + ip0 = (ip6_header_t *) h0; + ip1 = (ip6_header_t *) h1; + + lb_index0 = ip6_fib_table_fwding_lookup (fib_index0, &ip0->src_address); + lb_index1 = ip6_fib_table_fwding_lookup (fib_index1, &ip1->src_address); + lpass0 = ip6_address_is_multicast (&ip0->src_address); + lpass1 = ip6_address_is_multicast (&ip1->src_address); + } + + llb0 = load_balance_get (lb_index0); + llb1 = load_balance_get (lb_index1); + + if (URPF_MODE_STRICT == mode) + { + /* for RX the check is: would this source adddress be + * forwarded out of the interface on which it was recieved, + * if yes allow. For TX it's; would this source address be + * forwarded out of the interface through which it is being + * sent, if yes drop. + */ + int res0, res1; + + res0 = + fib_urpf_check (llb0->lb_urpf, vnet_buffer (b0)->sw_if_index[dir]); + res1 = + fib_urpf_check (llb1->lb_urpf, vnet_buffer (b1)->sw_if_index[dir]); + + if (VLIB_RX == dir) + { + lpass0 |= res0; + lpass1 |= res1; + } + else + { + lpass0 |= !res0 && fib_urpf_check_size (llb0->lb_urpf); + lpass1 |= !res1 && fib_urpf_check_size (llb1->lb_urpf); + + /* allow locally generated */ + lpass0 |= b0->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED; + lpass1 |= b1->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED; + } + } + else + { + lpass0 |= fib_urpf_check_size (llb0->lb_urpf); + lpass1 |= fib_urpf_check_size (llb1->lb_urpf); + } + + *lb0 = llb0; + *lb1 = llb1; + *pass0 = lpass0; + *pass1 = lpass1; +} + static_always_inline uword -urpf_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame, +urpf_inline (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, ip_address_family_t af, vlib_dir_t dir, urpf_mode_t mode) { vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b; @@ -106,8 +237,8 @@ urpf_inline (vlib_main_t * vm, while (n_left >= 4) { - u32 pass0, lb_index0, pass1, lb_index1; - const load_balance_t *lb0, *lb1; + u32 pass0, pass1; + load_balance_t *lb0 = 0, *lb1 = 0; u32 fib_index0, fib_index1; const u8 *h0, *h1; @@ -121,87 +252,32 @@ urpf_inline (vlib_main_t * vm, h0 = (u8 *) vlib_buffer_get_current (b[0]); h1 = (u8 *) vlib_buffer_get_current (b[1]); - if (VLIB_TX == dir) { h0 += vnet_buffer (b[0])->ip.save_rewrite_length; h1 += vnet_buffer (b[1])->ip.save_rewrite_length; } - fib_index0 = - urpf_cfgs[af][dir][vnet_buffer (b[0])->sw_if_index[dir]].fib_index; - fib_index1 = - urpf_cfgs[af][dir][vnet_buffer (b[1])->sw_if_index[dir]].fib_index; + fib_index0 = urpf_get_fib_index (b[0], af, dir); + fib_index1 = urpf_get_fib_index (b[1], af, dir); + urpf_perform_check_x2 (af, dir, mode, b[0], b[1], h0, h1, fib_index0, + fib_index1, &lb0, &lb1, &pass0, &pass1); - if (AF_IP4 == af) - { - const ip4_header_t *ip0, *ip1; - - ip0 = (ip4_header_t *) h0; - ip1 = (ip4_header_t *) h1; - - ip4_fib_forwarding_lookup_x2 (fib_index0, - fib_index1, - &ip0->src_address, - &ip1->src_address, - &lb_index0, &lb_index1); - /* Pass multicast. */ - pass0 = (ip4_address_is_multicast (&ip0->src_address) || - ip4_address_is_global_broadcast (&ip0->src_address)); - pass1 = (ip4_address_is_multicast (&ip1->src_address) || - ip4_address_is_global_broadcast (&ip1->src_address)); - } - else + if (b[0]->flags & VLIB_BUFFER_IS_TRACED) { - const ip6_header_t *ip0, *ip1; - - ip0 = (ip6_header_t *) h0; - ip1 = (ip6_header_t *) h1; - - lb_index0 = ip6_fib_table_fwding_lookup (fib_index0, - &ip0->src_address); - lb_index1 = ip6_fib_table_fwding_lookup (fib_index1, - &ip1->src_address); - pass0 = ip6_address_is_multicast (&ip0->src_address); - pass1 = ip6_address_is_multicast (&ip1->src_address); - } - - lb0 = load_balance_get (lb_index0); - lb1 = load_balance_get (lb_index1); + urpf_trace_t *t; - if (URPF_MODE_STRICT == mode) - { - /* for RX the check is: would this source adddress be forwarded - * out of the interface on which it was recieved, if yes allow. - * For TX it's; would this source address be forwarded out of the - * interface through which it is being sent, if yes drop. - */ - int res0, res1; - - res0 = fib_urpf_check (lb0->lb_urpf, - vnet_buffer (b[0])->sw_if_index[dir]); - res1 = fib_urpf_check (lb1->lb_urpf, - vnet_buffer (b[1])->sw_if_index[dir]); - - if (VLIB_RX == dir) - { - pass0 |= res0; - pass1 |= res1; - } - else - { - pass0 |= !res0 && fib_urpf_check_size (lb0->lb_urpf); - pass1 |= !res1 && fib_urpf_check_size (lb1->lb_urpf); - - /* allow locally generated */ - pass0 |= b[0]->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED; - pass1 |= b[1]->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED; - } + t = vlib_add_trace (vm, node, b[0], sizeof (*t)); + t->urpf = lb0 ? lb0->lb_urpf : ~0; + t->fib_index = fib_index0; } - else + if (b[1]->flags & VLIB_BUFFER_IS_TRACED) { - pass0 |= fib_urpf_check_size (lb0->lb_urpf); - pass1 |= fib_urpf_check_size (lb1->lb_urpf); + urpf_trace_t *t; + + t = vlib_add_trace (vm, node, b[1], sizeof (*t)); + t->urpf = lb1 ? lb1->lb_urpf : ~0; + t->fib_index = fib_index1; } if (PREDICT_TRUE (pass0)) @@ -218,22 +294,6 @@ urpf_inline (vlib_main_t * vm, next[1] = URPF_NEXT_DROP; b[1]->error = node->errors[URPF_ERROR_DROP]; } - - if (b[0]->flags & VLIB_BUFFER_IS_TRACED) - { - urpf_trace_t *t; - - t = vlib_add_trace (vm, node, b[0], sizeof (*t)); - t->urpf = lb0->lb_urpf; - } - if (b[1]->flags & VLIB_BUFFER_IS_TRACED) - { - urpf_trace_t *t; - - t = vlib_add_trace (vm, node, b[1], sizeof (*t)); - t->urpf = lb1->lb_urpf; - } - b += 2; next += 2; n_left -= 2; @@ -241,8 +301,8 @@ urpf_inline (vlib_main_t * vm, while (n_left) { - u32 pass0, lb_index0, fib_index0; - const load_balance_t *lb0; + u32 pass0, fib_index0; + load_balance_t *lb0 = 0; const u8 *h0; h0 = (u8 *) vlib_buffer_get_current (b[0]); @@ -250,51 +310,18 @@ urpf_inline (vlib_main_t * vm, if (VLIB_TX == dir) h0 += vnet_buffer (b[0])->ip.save_rewrite_length; - fib_index0 = - urpf_cfgs[af][dir][vnet_buffer (b[0])->sw_if_index[dir]].fib_index; - - if (AF_IP4 == af) - { - const ip4_header_t *ip0; - - ip0 = (ip4_header_t *) h0; - - lb_index0 = ip4_fib_forwarding_lookup (fib_index0, - &ip0->src_address); + fib_index0 = urpf_get_fib_index (b[0], af, dir); + urpf_perform_check_x1 (af, dir, mode, b[0], h0, fib_index0, &lb0, + &pass0); - /* Pass multicast. */ - pass0 = (ip4_address_is_multicast (&ip0->src_address) || - ip4_address_is_global_broadcast (&ip0->src_address)); - } - else + if (b[0]->flags & VLIB_BUFFER_IS_TRACED) { - const ip6_header_t *ip0; - - ip0 = (ip6_header_t *) h0; - - lb_index0 = ip6_fib_table_fwding_lookup (fib_index0, - &ip0->src_address); - pass0 = ip6_address_is_multicast (&ip0->src_address); - } - - lb0 = load_balance_get (lb_index0); + urpf_trace_t *t; - if (URPF_MODE_STRICT == mode) - { - int res0; - - res0 = fib_urpf_check (lb0->lb_urpf, - vnet_buffer (b[0])->sw_if_index[dir]); - if (VLIB_RX == dir) - pass0 |= res0; - else - { - pass0 |= !res0 && fib_urpf_check_size (lb0->lb_urpf); - pass0 |= b[0]->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED; - } + t = vlib_add_trace (vm, node, b[0], sizeof (*t)); + t->urpf = lb0 ? lb0->lb_urpf : ~0; + t->fib_index = fib_index0; } - else - pass0 |= fib_urpf_check_size (lb0->lb_urpf); if (PREDICT_TRUE (pass0)) vnet_feature_next_u16 (&next[0], b[0]); @@ -303,14 +330,6 @@ urpf_inline (vlib_main_t * vm, next[0] = URPF_NEXT_DROP; b[0]->error = node->errors[URPF_ERROR_DROP]; } - - if (b[0]->flags & VLIB_BUFFER_IS_TRACED) - { - urpf_trace_t *t; - - t = vlib_add_trace (vm, node, b[0], sizeof (*t)); - t->urpf = lb0->lb_urpf; - } b++; next++; n_left--; diff --git a/src/plugins/wireguard/wireguard_chachapoly.c b/src/plugins/wireguard/wireguard_chachapoly.c index 0dd7908d2e2..ad644ff6cb8 100644 --- a/src/plugins/wireguard/wireguard_chachapoly.c +++ b/src/plugins/wireguard/wireguard_chachapoly.c @@ -72,11 +72,11 @@ wg_xchacha20poly1305_encrypt (vlib_main_t *vm, u8 *src, u32 src_len, u8 *dst, u64 h_nonce; clib_memcpy (&h_nonce, nonce + 16, sizeof (h_nonce)); - h_nonce = le64toh (h_nonce); + h_nonce = clib_little_to_host_u64 (h_nonce); hchacha20 (derived_key, nonce, key); for (i = 0; i < (sizeof (derived_key) / sizeof (derived_key[0])); i++) - (derived_key[i]) = htole32 ((derived_key[i])); + (derived_key[i]) = clib_host_to_little_u32 ((derived_key[i])); uint32_t key_idx; @@ -102,11 +102,11 @@ wg_xchacha20poly1305_decrypt (vlib_main_t *vm, u8 *src, u32 src_len, u8 *dst, u64 h_nonce; clib_memcpy (&h_nonce, nonce + 16, sizeof (h_nonce)); - h_nonce = le64toh (h_nonce); + h_nonce = clib_little_to_host_u64 (h_nonce); hchacha20 (derived_key, nonce, key); for (i = 0; i < (sizeof (derived_key) / sizeof (derived_key[0])); i++) - (derived_key[i]) = htole32 ((derived_key[i])); + (derived_key[i]) = clib_host_to_little_u32 ((derived_key[i])); uint32_t key_idx; diff --git a/src/plugins/wireguard/wireguard_noise.c b/src/plugins/wireguard/wireguard_noise.c index 5fe2e44b03b..c3f28f442f5 100644 --- a/src/plugins/wireguard/wireguard_noise.c +++ b/src/plugins/wireguard/wireguard_noise.c @@ -751,8 +751,8 @@ noise_tai64n_now (uint8_t output[NOISE_TIMESTAMP_LEN]) unix_nanosec &= REJECT_INTERVAL_MASK; /* https://cr.yp.to/libtai/tai64.html */ - sec = htobe64 (0x400000000000000aULL + unix_sec); - nsec = htobe32 (unix_nanosec); + sec = clib_host_to_big_u64 (0x400000000000000aULL + unix_sec); + nsec = clib_host_to_big_u32 (unix_nanosec); /* memcpy to output buffer, assuming output could be unaligned. */ clib_memcpy (output, &sec, sizeof (sec)); |