/* * Copyright (c) 2017 SUSE LLC. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include /** * @file * @brief GENEVE. * * GENEVE provides the features needed to allow L2 bridge domains (BDs) * to span multiple servers. This is done by building an L2 overlay on * top of an L3 network underlay using GENEVE tunnels. * * This makes it possible for servers to be co-located in the same data * center or be separated geographically as long as they are reachable * through the underlay L3 network. */ geneve_main_t geneve_main; u8 * format_geneve_encap_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); geneve_encap_trace_t *t = va_arg (*args, geneve_encap_trace_t *); s = format (s, "GENEVE encap to geneve_tunnel%d vni %d", t->tunnel_index, t->vni); return s; } static u8 * format_decap_next (u8 * s, va_list * args) { u32 next_index = va_arg (*args, u32); switch (next_index) { case GENEVE_INPUT_NEXT_DROP: return format (s, "drop"); case GENEVE_INPUT_NEXT_L2_INPUT: return format (s, "l2"); default: return format (s, "index %d", next_index); } return s; } u8 * format_geneve_tunnel (u8 * s, va_list * args) { geneve_tunnel_t *t = va_arg (*args, geneve_tunnel_t *); geneve_main_t *ngm = &geneve_main; s = format (s, "[%d] lcl %U rmt %U vni %d fib-idx %d sw-if-idx %d ", t - ngm->tunnels, format_ip46_address, &t->local, IP46_TYPE_ANY, format_ip46_address, &t->remote, IP46_TYPE_ANY, t->vni, t->encap_fib_index, t->sw_if_index); s = format (s, "encap-dpo-idx %d ", t->next_dpo.dpoi_index); s = format (s, "decap-next-%U ", format_decap_next, t->decap_next_index); if (PREDICT_FALSE (ip46_address_is_multicast (&t->remote))) s = format (s, "mcast-sw-if-idx %d ", t->mcast_sw_if_index); return s; } static u8 * format_geneve_name (u8 * s, va_list * args) { u32 dev_instance = va_arg (*args, u32); return format (s, "geneve_tunnel%d", dev_instance); } static clib_error_t * geneve_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) { u32 hw_flags = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0; vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags); return /* no error */ 0; } /* *INDENT-OFF* */ VNET_DEVICE_CLASS (geneve_device_class, static) = { .name = "GENEVE", .format_device_name = format_geneve_name, .format_tx_trace = format_geneve_encap_trace, .admin_up_down_function = geneve_interface_admin_up_down, }; /* *INDENT-ON* */ static u8 * format_geneve_header_with_length (u8 * s, va_list * args) { u32 dev_instance = va_arg (*args, u32); s = format (s, "unimplemented dev %u", dev_instance); return s; } /* *INDENT-OFF* */ VNET_HW_INTERFACE_CLASS (geneve_hw_class) = { .name = "GENEVE", .format_header = format_geneve_header_with_length, .build_rewrite = default_build_rewrite, }; /* *INDENT-ON* */ static void geneve_tunnel_restack_dpo (geneve_tunnel_t * t) { dpo_id_t dpo = DPO_INVALID; u32 encap_index = ip46_address_is_ip4 (&t->remote) ? geneve4_encap_node.index : geneve6_encap_node.index; fib_forward_chain_type_t forw_type = ip46_address_is_ip4 (&t->remote) ? FIB_FORW_CHAIN_TYPE_UNICAST_IP4 : FIB_FORW_CHAIN_TYPE_UNICAST_IP6; fib_entry_contribute_forwarding (t->fib_entry_index, forw_type, &dpo); dpo_stack_from_node (encap_index, &t->next_dpo, &dpo); dpo_reset (&dpo); } static geneve_tunnel_t * geneve_tunnel_from_fib_node (fib_node_t * node) { ASSERT (FIB_NODE_TYPE_GENEVE_TUNNEL == node->fn_type); return ((geneve_tunnel_t *) (((char *) node) - STRUCT_OFFSET_OF (geneve_tunnel_t, node))); } /** * Function definition to backwalk a FIB node - * Here we will restack the new dpo of GENEVE DIP to encap node. */ static fib_node_back_walk_rc_t geneve_tunnel_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx) { geneve_tunnel_restack_dpo (geneve_tunnel_from_fib_node (node)); return (FIB_NODE_BACK_WALK_CONTINUE); } /** * Function definition to get a FIB node from its index */ static fib_node_t * geneve_tunnel_fib_node_get (fib_node_index_t index) { geneve_tunnel_t *t; geneve_main_t *vxm = &geneve_main; t = pool_elt_at_index (vxm->tunnels, index); return (&t->node); } /** * Function definition to inform the FIB node that its last lock has gone. */ static void geneve_tunnel_last_lock_gone (fib_node_t * node) { /* * The GENEVE tunnel is a root of the graph. As such * it never has children and thus is never locked. */ ASSERT (0); } /* * Virtual function table registered by GENEVE tunnels * for participation in the FIB object graph. */ const static fib_node_vft_t geneve_vft = { .fnv_get = geneve_tunnel_fib_node_get, .fnv_last_lock = geneve_tunnel_last_lock_gone, .fnv_back_walk = geneve_tunnel_back_walk, }; #define foreach_copy_field \ _(vni) \ _(mcast_sw_if_index) \ _(encap_fib_index) \ _(decap_next_index) \ _(local) \ _(remote) static int geneve_rewrite (geneve_tunnel_t * t, bool is_ip6) { union { ip4_geneve_header_t *h4; ip6_geneve_header_t *h6; u8 *rw; } r = { .rw = 0}; int len = is_ip6 ? sizeof *r.h6 : sizeof *r.h4; #if SUPPORT_OPTIONS_HEADER==1 len += t->options_len; #endif vec_validate_aligned (r.rw, len - 1, CLIB_CACHE_LINE_BYTES); udp_header_t *udp; geneve_header_t *geneve; /* Fixed portion of the (outer) ip header */ if (!is_ip6) { ip4_header_t *ip = &r.h4->ip4; udp = &r.h4->udp, geneve = &r.h4->geneve; ip->ip_version_and_header_length = 0x45; ip->ttl = 254; ip->protocol = IP_PROTOCOL_UDP; ip->src_address = t->local.ip4; ip->dst_address = t->remote.ip4; /* we fix up the ip4 header length and checksum after-the-fact */ ip->checksum = ip4_header_checksum (ip); } else { ip6_header_t *ip = &r.h6->ip6; udp = &r.h6->udp, geneve = &r.h6->geneve; ip->ip_ver
/*
 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
 *
 * Copyright (c) 2015 QLogic Corporation.
 * All rights reserved.
 * www.qlogic.com
 *
 * See LICENSE.bnx2x_pmd for copyright and licensing details.
 */

#ifndef _BNX2X_RXTX_H_
#define _BNX2X_RXTX_H_

#define DEFAULT_TX_FREE_THRESH   512
#define RTE_PMD_BNX2X_TX_MAX_BURST 1

/**
 * Structure associated with each descriptor of the RX ring of a RX queue.
 */
struct bnx2x_rx_entry {
	struct rte_mbuf     *mbuf;                /**< mbuf associated with RX descriptor. */
};

/**
 * Structure associated with each RX queue.
 */
struct bnx2x_rx_queue {
	struct rte_mempool         *mb_pool;             /**< mbuf pool to populate RX ring. */
	union eth_rx_cqe           *cq_ring;             /**< RCQ ring virtual address. */
	uint64_t                   cq_ring_phys_addr;    /**< RCQ ring DMA address. */
	uint64_t                   *rx_ring;             /**< RX ring virtual address. */
	uint64_t                   rx_ring_phys_addr;    /**< RX ring DMA address. */
	struct rte_mbuf            **sw_ring;            /**< address of RX software ring. */
	struct rte_mbuf            *pkt_first_seg;       /**< First segment of current packet. */
	struct rte_mbuf            *pkt_last_seg;        /**< Last segment of current packet. */
	uint16_t                   nb_cq_pages;          /**< number of RCQ pages. */
	uint16_t                   nb_rx_desc;           /**< number of RX descriptors. */
	uint16_t                   nb_rx_pages;          /**< number of RX pages. */
	uint16_t                   rx_bd_head;           /**< Index of current rx bd. */
	uint16_t                   rx_bd_tail;           /**< Index of last rx bd. */
	uint16_t                   rx_cq_head;           /**< Index of current rcq bd. */
	uint16_t                   rx_cq_tail;           /**< Index of last rcq bd. */
	uint16_t                   queue_id;             /**< RX queue index. */
	uint8_t                    port_id;              /**< Device port identifier. */
	struct bnx2x_softc           *sc;                  /**< Ptr to dev_private data. */
};

/**
 * Structure associated with each TX queue.
 */
struct bnx2x_tx_queue {
	/** TX ring virtual address. */
	union eth_tx_bd_types      *tx_ring;             /**< TX ring virtual address. */
	uint64_t                   tx_ring_phys_addr;    /**< TX ring DMA address. */
	struct rte_mbuf            **sw_ring;            /**< virtual address of SW ring. */
	uint16_t                   tx_pkt_tail;          /**< Index of current tx pkt. */
	uint16_t                   tx_pkt_head;          /**< Index of last pkt counted by txeof. */
	uint16_t                   tx_bd_tail;           /**< Index of current tx bd. */
	uint16_t                   tx_bd_head;           /**< Index of last bd counted by txeof. */
	uint16_t                   nb_tx_desc;           /**< number of TX descriptors. */
	uint16_t                   tx_free_thresh;       /**< minimum TX before freeing. */
	uint16_t                   nb_tx_avail;          /**< Number of TX descriptors available. */
	uint16_t                   nb_tx_pages;          /**< number of TX pages */
	uint16_t                   queue_id;             /**< TX queue index. */
	uint8_t                    port_id;              /**< Device port identifier. */
	struct bnx2x_softc           *sc;                  /**< Ptr to dev_private data */
};

int bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
			      uint16_t nb_rx_desc, unsigned int socket_id,
			      const struct rte_eth_rxconf *rx_conf,
			      struct rte_mempool *mb_pool);

int bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
			      uint16_t nb_tx_desc, unsigned int socket_id,
			      const struct rte_eth_txconf *tx_conf);

void bnx2x_dev_rx_queue_release(void *rxq);
void bnx2x_dev_tx_queue_release(void *txq);
int bnx2x_dev_rx_init(struct rte_eth_dev *dev);
void bnx2x_dev_clear_queues(struct rte_eth_dev *dev);

#endif /* _BNX2X_RXTX_H_ */
} static clib_error_t * set_ip4_geneve_bypass (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { return set_ip_geneve_bypass (0, input, cmd); } /*? * This command adds the 'ip4-geneve-bypass' graph node for a given interface. * By adding the IPv4 geneve-bypass graph node to an interface, the node checks * for and validate input geneve packet and bypass ip4-lookup, ip4-local, * ip4-udp-lookup nodes to speedup geneve packet forwarding. This node will * cause extra overhead to for non-geneve packets which is kept at a minimum. * * @cliexpar * @parblock * Example of graph node before ip4-geneve-bypass is enabled: * @cliexstart{show vlib graph ip4-geneve-bypass} * Name Next Previous * ip4-geneve-bypass error-drop [0] * geneve4-input [1] * ip4-lookup [2] * @cliexend * * Example of how to enable ip4-geneve-bypass on an interface: * @cliexcmd{set interface ip geneve-bypass GigabitEthernet2/0/0} * * Example of graph node after ip4-geneve-bypass is enabled: * @cliexstart{show vlib graph ip4-geneve-bypass} * Name Next Previous * ip4-geneve-bypass error-drop [0] ip4-input * geneve4-input [1] ip4-input-no-checksum * ip4-lookup [2] * @cliexend * * Example of how to display the feature enabed on an interface: * @cliexstart{show ip interface features GigabitEthernet2/0/0} * IP feature paths configured on GigabitEthernet2/0/0... * ... * ipv4 unicast: * ip4-geneve-bypass * ip4-lookup * ... * @cliexend * * Example of how to disable ip4-geneve-bypass on an interface: * @cliexcmd{set interface ip geneve-bypass GigabitEthernet2/0/0 del} * @endparblock ?*/ /* *INDENT-OFF* */ VLIB_CLI_COMMAND (set_interface_ip_geneve_bypass_command, static) = { .path = "set interface ip geneve-bypass", .function = set_ip4_geneve_bypass, .short_help = "set interface ip geneve-bypass [del]", }; /* *INDENT-ON* */ static clib_error_t * set_ip6_geneve_bypass (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { return set_ip_geneve_bypass (1, input, cmd); } /*? * This command adds the 'ip6-geneve-bypass' graph node for a given interface. * By adding the IPv6 geneve-bypass graph node to an interface, the node checks * for and validate input geneve packet and bypass ip6-lookup, ip6-local, * ip6-udp-lookup nodes to speedup geneve packet forwarding. This node will * cause extra overhead to for non-geneve packets which is kept at a minimum. * * @cliexpar * @parblock * Example of graph node before ip6-geneve-bypass is enabled: * @cliexstart{show vlib graph ip6-geneve-bypass} * Name Next Previous * ip6-geneve-bypass error-drop [0] * geneve6-input [1] * ip6-lookup [2] * @cliexend * * Example of how to enable ip6-geneve-bypass on an interface: * @cliexcmd{set interface ip6 geneve-bypass GigabitEthernet2/0/0} * * Example of graph node after ip6-geneve-bypass is enabled: * @cliexstart{show vlib graph ip6-geneve-bypass} * Name Next Previous * ip6-geneve-bypass error-drop [0] ip6-input * geneve6-input [1] ip4-input-no-checksum * ip6-lookup [2] * @cliexend * * Example of how to display the feature enabed on an interface: * @cliexstart{show ip interface features GigabitEthernet2/0/0} * IP feature paths configured on GigabitEthernet2/0/0... * ... * ipv6 unicast: * ip6-geneve-bypass * ip6-lookup * ... * @cliexend * * Example of how to disable ip6-geneve-bypass on an interface: * @cliexcmd{set interface ip6 geneve-bypass GigabitEthernet2/0/0 del} * @endparblock ?*/ /* *INDENT-OFF* */ VLIB_CLI_COMMAND (set_interface_ip6_geneve_bypass_command, static) = { .path = "set interface ip6 geneve-bypass", .function = set_ip6_geneve_bypass, .short_help = "set interface ip geneve-bypass [del]", }; /* *INDENT-ON* */ clib_error_t * geneve_init (vlib_main_t * vm) { geneve_main_t *vxm = &geneve_main; vxm->vnet_main = vnet_get_main (); vxm->vlib_main = vm; /* initialize the ip6 hash */ vxm->geneve6_tunnel_by_key = hash_create_mem (0, sizeof (geneve6_tunnel_key_t), sizeof (uword)); vxm->vtep6 = hash_create_mem (0, sizeof (ip6_address_t), sizeof (uword)); vxm->mcast_shared = hash_create_mem (0, sizeof (ip46_address_t), sizeof (mcast_shared_t)); fib_node_register_type (FIB_NODE_TYPE_GENEVE_TUNNEL, &geneve_vft); return 0; } VLIB_INIT_FUNCTION (geneve_init); /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */