aboutsummaryrefslogtreecommitdiffstats
path: root/tests/vpp/device/vm_vhost/l2xc/eth2p-ethipv4-l2xcbase-eth-2vhost-1vm-dev.robot
blob: 11a47676a47789e91b4f8a9ba7a0d89c9da47058 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

*** Settings ***
| Resource | resources/libraries/robot/shared/default.robot
|
| Force Tags | 2_NODE_SINGLE_LINK_TOPO | DEVICETEST | HW_ENV | DCR_ENV | SCAPY
| ... | NIC_Virtual | ETH | L2XCFWD | BASE | VHOST | 1VM | DRV_VFIO_PCI
| ... | ethipv4-l2xcbase-eth-2vhost-1vm
|
| Suite Setup | Setup suite single link | scapy
| Test Setup | Setup test
| Test Teardown | Tear down test | packet_trace | vhost
|
| Test Template | Local Template
|
| Documentation | *L2 cross-connect test cases with vhost user interface*
|
| ... | *[Top] Network Topologies:* TG-DUT1-TG 2-node circular topology with \
| ... | VM and single links between nodes.
| ... | *[Enc] Packet Encapsulations:* Eth-IPv4 for L2 switching of IPv4.
| ... | *[Cfg] DUT configuration:* DUT1 is configured with L2 cross-connect \
| ... | (L2XC) switching. Qemu Guest is connected to VPP via vhost-user \
| ... | interfaces. Guest is configured with VPP l2 cross-connect \
| ... | interconnecting vhost-user interfaces.
| ... | *[Ver] TG verification:* Test IPv4 packets with IP protocol=61 are \
| ... | sent in both directions by TG on links to DUT1 via VM; on receive TG \
| ... | verifies packets for correctness and their IPv4 src-addr, dst-addr \
| ... | and MAC addresses.
| ... | *[Ref] Applicable standard specifications:* RFC792

*** Variables ***
| @{plugins_to_enable}= | dpdk_plugin.so
| ${crypto_type}= | ${None}
| ${nic_name}= | virtual
| ${nic_driver}= | vfio-pci
| ${overhead}= | ${0}
| ${nf_chains}= | ${1}
| ${nf_nodes}= | ${1}
| ${nf_dtc} | ${1}
| ${nf_dtcr} | ${1}

*** Keywords ***
| Local Template
| | [Documentation]
| | ... | [Top] TG=DUT=VM. [Enc] Eth-IPv4. [Cfg] On DUT configure \
| | ... | two L2 cross-connects (L2XC), each with one untagged interface \
| | ... | to TG and untagged i/f to local VM over vhost-user. [Ver] Make \
| | ... | TG send IPv4 packets in both directions between two of its \
| | ... | i/fs to be switched by DUT to and from VM; verify all packets \
| | ... | are received. [Ref]
| |
| | ... | *Arguments:*
| | ... | - frame_size - Framesize in Bytes in integer. Type: integer
| | ... | - phy_cores - Number of physical cores. Type: integer
| | ... | - rxq - Number of RX queues, default value: ${None}. Type: integer
| |
| | [Arguments] | ${frame_size} | ${phy_cores} | ${rxq}=${None}
| |
| | Set Test Variable | \${frame_size}
| |
| | Given Set Max Rate And Jumbo
| | And Add worker threads to all DUTs | ${phy_cores} | ${rxq}
| | And Pre-initialize layer driver | ${nic_driver}
| | And Apply startup configuration on all VPP DUTs | with_trace=${True}
| | When Initialize layer driver | ${nic_driver}
| | And Initialize layer interface
| | And Initialize L2 xconnect with Vhost-User | nf_nodes=${nf_nodes}
| | And Configure chains of NFs connected via vhost-user
| | ... | nf_chains=${nf_chains} | nf_nodes=${nf_nodes} | vnf=vpp_chain_l2xc
| | ... | pinning=${False}
| | Then Send IPv4 bidirectionally and verify received packets | ${tg}
| | ... | ${tg_if1} | ${tg_if2}

*** Test Cases ***
| tc01-64B-ethipv4-l2xcbase-eth-2vhost-1vm-dev
| | [Tags] | 64B
| | frame_size=${64} | phy_cores=${0}
n> ASSERT (!is_tcp || !is_udp); ASSERT (is_ip4 || is_ip6); i16 l2_hdr_offset = b->current_data; i16 l3_hdr_offset = vnet_buffer (b)->l3_hdr_offset; i16 l4_hdr_offset = vnet_buffer (b)->l4_hdr_offset; u16 l2_len = l3_hdr_offset - l2_hdr_offset; u16 l3_len = l4_hdr_offset - l3_hdr_offset; ip4_header_t *ip4 = (void *) (b->data + l3_hdr_offset); ip6_header_t *ip6 = (void *) (b->data + l3_hdr_offset); tcp_header_t *tcp = (void *) (b->data + l4_hdr_offset); udp_header_t *udp = (void *) (b->data + l4_hdr_offset); u16 l4_len = is_tcp ? tcp_header_bytes (tcp) : is_udp ? sizeof (udp_header_t) : 0; u16 sum = 0; flags |= AVF_TXD_OFFSET_MACLEN (l2_len) | AVF_TXD_OFFSET_IPLEN (l3_len) | AVF_TXD_OFFSET_L4LEN (l4_len); flags |= is_ip4 ? AVF_TXD_CMD_IIPT_IPV4 : AVF_TXD_CMD_IIPT_IPV6; flags |= is_tcp ? AVF_TXD_CMD_L4T_TCP : is_udp ? AVF_TXD_CMD_L4T_UDP : 0; if (is_ip4) ip4->checksum = 0; if (is_tso) { if (is_ip4) ip4->length = 0; else ip6->payload_length = 0; } if (is_tcp || is_udp) { if (is_ip4) { struct avf_ip4_psh psh = { 0 }; psh.src = ip4->src_address.as_u32; psh.dst = ip4->dst_address.as_u32; psh.proto = ip4->protocol; psh.l4len = is_tso ? 0 : clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) - (l4_hdr_offset - l3_hdr_offset)); sum = ~ip_csum (&psh, sizeof (psh)); } else { struct avf_ip6_psh psh = { 0 }; psh.src = ip6->src_address; psh.dst = ip6->dst_address; psh.proto = clib_host_to_net_u32 ((u32) ip6->protocol); psh.l4len = is_tso ? 0 : ip6->payload_length; sum = ~ip_csum (&psh, sizeof (psh)); } } /* ip_csum does a byte swap for some reason... */ sum = clib_net_to_host_u16 (sum); if (is_tcp) tcp->checksum = sum; else if (is_udp) udp->checksum = sum; return flags; } static_always_inline u32 avf_tx_fill_ctx_desc (vlib_main_t *vm, avf_txq_t *txq, avf_tx_desc_t *d, vlib_buffer_t *b) { vlib_buffer_t *ctx_ph; u32 *bi = txq->ph_bufs; next: ctx_ph = vlib_get_buffer (vm, bi[0]); if (PREDICT_FALSE (ctx_ph->ref_count == 255)) { bi++; goto next; } /* Acquire a reference on the placeholder buffer */ ctx_ph->ref_count++; u16 l234hdr_sz = vnet_buffer (b)->l4_hdr_offset - b->current_data + vnet_buffer2 (b)->gso_l4_hdr_sz; u16 tlen = vlib_buffer_length_in_chain (vm, b) - l234hdr_sz; d[0].qword[0] = 0; d[0].qword[1] = AVF_TXD_DTYP_CTX | AVF_TXD_CTX_CMD_TSO | AVF_TXD_CTX_SEG_MSS (vnet_buffer2 (b)->gso_size) | AVF_TXD_CTX_SEG_TLEN (tlen); return bi[0]; } static_always_inline void avf_tx_copy_desc (avf_tx_desc_t *d, avf_tx_desc_t *s, u32 n_descs) { #if defined CLIB_HAVE_VEC512 while (n_descs >= 8) { u64x8u *dv = (u64x8u *) d; u64x8u *sv = (u64x8u *) s; dv[0] = sv[0]; dv[1] = sv[1]; /* next */ d += 8; s += 8; n_descs -= 8; } #elif defined CLIB_HAVE_VEC256 while (n_descs >= 4) { u64x4u *dv = (u64x4u *) d; u64x4u *sv = (u64x4u *) s; dv[0] = sv[0]; dv[1] = sv[1]; /* next */ d += 4; s += 4; n_descs -= 4; } #elif defined CLIB_HAVE_VEC128 while (n_descs >= 2) { u64x2u *dv = (u64x2u *) d; u64x2u *sv = (u64x2u *) s; dv[0] = sv[0]; dv[1] = sv[1]; /* next */ d += 2; s += 2; n_descs -= 2; } #endif while (n_descs) { d[0].qword[0] = s[0].qword[0]; d[0].qword[1] = s[0].qword[1]; d++; s++; n_descs--; } } static_always_inline void avf_tx_fill_data_desc (vlib_main_t *vm, avf_tx_desc_t *d, vlib_buffer_t *b, u64 cmd, int use_va_dma) { if (use_va_dma) d->qword[0] = vlib_buffer_get_current_va (b); else d->qword[0] = vlib_buffer_get_current_pa (vm, b); d->qword[1] = (((u64) b->current_length) << 34 | cmd | AVF_TXD_CMD_RSV); } static_always_inline u16 avf_tx_prepare (vlib_main_t *vm, vlib_node_runtime_t *node, avf_txq_t *txq, u32 *buffers, u32 n_packets, u16 *n_enq_descs, int use_va_dma) { const u64 cmd_eop = AVF_TXD_CMD_EOP; u16 n_free_desc, n_desc_left, n_packets_left = n_packets; vlib_buffer_t *b[4]; avf_tx_desc_t *d = txq->tmp_descs; u32 *tb = txq->tmp_bufs; n_free_desc = n_desc_left = txq->size - txq->n_enqueued - 8; if (n_desc_left == 0) return 0; while (n_packets_left && n_desc_left) { u32 flags, or_flags; if (n_packets_left < 8 || n_desc_left < 4) goto one_by_one; vlib_prefetch_buffer_with_index (vm, buffers[4], LOAD); vlib_prefetch_buffer_with_index (vm, buffers[5], LOAD); vlib_prefetch_buffer_with_index (vm, buffers[6], LOAD); vlib_prefetch_buffer_with_index (vm, buffers[7], LOAD); b[0] = vlib_get_buffer (vm, buffers[0]); b[1] = vlib_get_buffer (vm, buffers[1]); b[2] = vlib_get_buffer (vm, buffers[2]); b[3] = vlib_get_buffer (vm, buffers[3]); or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags; if (PREDICT_FALSE (or_flags & (VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_OFFLOAD | VNET_BUFFER_F_GSO))) goto one_by_one; vlib_buffer_copy_indices (tb, buffers, 4); avf_tx_fill_data_desc (vm, d + 0, b[0], cmd_eop, use_va_dma); avf_tx_fill_data_desc (vm, d + 1, b[1], cmd_eop, use_va_dma); avf_tx_fill_data_desc (vm, d + 2, b[2], cmd_eop, use_va_dma); avf_tx_fill_data_desc (vm, d + 3, b[3], cmd_eop, use_va_dma); buffers += 4; n_packets_left -= 4; n_desc_left -= 4; d += 4; tb += 4; continue; one_by_one: tb[0] = buffers[0]; b[0] = vlib_get_buffer (vm, buffers[0]); flags = b[0]->flags; /* No chained buffers or TSO case */ if (PREDICT_TRUE ( (flags & (VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_GSO)) == 0)) { u64 cmd = cmd_eop; if (PREDICT_FALSE (flags & VNET_BUFFER_F_OFFLOAD)) cmd |= avf_tx_prepare_cksum (b[0], 0 /* is_tso */); avf_tx_fill_data_desc (vm, d, b[0], cmd, use_va_dma); } else { u16 n_desc_needed = 1; u64 cmd = 0; if (flags & VLIB_BUFFER_NEXT_PRESENT) { vlib_buffer_t *next = vlib_get_buffer (vm, b[0]->next_buffer); n_desc_needed = 2; while (next->flags & VLIB_BUFFER_NEXT_PRESENT) { next = vlib_get_buffer (vm, next->next_buffer); n_desc_needed++; } } if (flags & VNET_BUFFER_F_GSO) { n_desc_needed++; } else if (PREDICT_FALSE (n_desc_needed > 8)) { vlib_buffer_free_one (vm, buffers[0]); vlib_error_count (vm, node->node_index, AVF_TX_ERROR_SEGMENT_SIZE_EXCEEDED, 1); n_packets_left -= 1; buffers += 1; continue; } if (PREDICT_FALSE (n_desc_left < n_desc_needed)) break; if (flags & VNET_BUFFER_F_GSO) { /* Enqueue a context descriptor */ tb[1] = tb[0]; tb[0] = avf_tx_fill_ctx_desc (vm, txq, d, b[0]); n_desc_left -= 1; d += 1; tb += 1; cmd = avf_tx_prepare_cksum (b[0], 1 /* is_tso */); } else if (flags & VNET_BUFFER_F_OFFLOAD) { cmd = avf_tx_prepare_cksum (b[0], 0 /* is_tso */); } /* Deal with chain buffer if present */ while (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT) { avf_tx_fill_data_desc (vm, d, b[0], cmd, use_va_dma); n_desc_left -= 1; d += 1; tb += 1; tb[0] = b[0]->next_buffer; b[0] = vlib_get_buffer (vm, b[0]->next_buffer); } avf_tx_fill_data_desc (vm, d, b[0], cmd_eop | cmd, use_va_dma); } buffers += 1; n_packets_left -= 1; n_desc_left -= 1; d += 1; tb += 1; } *n_enq_descs = n_free_desc - n_desc_left; return n_packets - n_packets_left; } VNET_DEVICE_CLASS_TX_FN (avf_device_class) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { vnet_interface_output_runtime_t *rd = (void *) node->runtime_data; avf_device_t *ad = avf_get_device (rd->dev_instance); u32 thread_index = vm->thread_index; u8 qid = thread_index; avf_txq_t *txq = vec_elt_at_index (ad->txqs, qid % ad->num_queue_pairs); u16 next = txq->next; u16 mask = txq->size - 1; u32 *buffers = vlib_frame_vector_args (frame); u16 n_enq, n_left, n_desc, *slot; u16 n_retry = 2; clib_spinlock_lock_if_init (&txq->lock); n_left = frame->n_vectors; retry: /* release consumed bufs */ if (txq->n_enqueued) { i32 complete_slot = -1; while (1) { u16 *slot = clib_ring_get_first (txq->rs_slots); if (slot == 0) break; if (avf_tx_desc_get_dtyp (txq->descs + slot[0]) != 0x0F) break; complete_slot = slot[0]; clib_ring_deq (txq->rs_slots); } if (complete_slot >= 0) { u16 first, mask, n_free; mask = txq->size - 1; first = (txq->next - txq->n_enqueued) & mask; n_free = (complete_slot + 1 - first) & mask; txq->n_enqueued -= n_free; vlib_buffer_free_from_ring_no_next (vm, txq->bufs, first, txq->size, n_free); } } n_desc = 0; if (ad->flags & AVF_DEVICE_F_VA_DMA) n_enq = avf_tx_prepare (vm, node, txq, buffers, n_left, &n_desc, 1); else n_enq = avf_tx_prepare (vm, node, txq, buffers, n_left, &n_desc, 0); if (n_desc) { if (PREDICT_TRUE (next + n_desc <= txq->size)) { /* no wrap */ avf_tx_copy_desc (txq->descs + next, txq->tmp_descs, n_desc); vlib_buffer_copy_indices (txq->bufs + next, txq->tmp_bufs, n_desc); } else { /* wrap */ u32 n_not_wrap = txq->size - next; avf_tx_copy_desc (txq->descs + next, txq->tmp_descs, n_not_wrap); avf_tx_copy_desc (txq->descs, txq->tmp_descs + n_not_wrap, n_desc - n_not_wrap); vlib_buffer_copy_indices (txq->bufs + next, txq->tmp_bufs, n_not_wrap); vlib_buffer_copy_indices (txq->bufs, txq->tmp_bufs + n_not_wrap, n_desc - n_not_wrap); } next += n_desc; if ((slot = clib_ring_enq (txq->rs_slots))) { u16 rs_slot = slot[0] = (next - 1) & mask; txq->descs[rs_slot].qword[1] |= AVF_TXD_CMD_RS; } txq->next = next & mask; avf_tail_write (txq->qtx_tail, txq->next); txq->n_enqueued += n_desc; n_left -= n_enq; } if (n_left) { buffers += n_enq; if (n_retry--) goto retry; vlib_buffer_free (vm, buffers, n_left); vlib_error_count (vm, node->node_index, AVF_TX_ERROR_NO_FREE_SLOTS, n_left); } clib_spinlock_unlock_if_init (&txq->lock); return frame->n_vectors - n_left; } /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */