/*
* Copyright (c) 2018 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __included_vxlan_gbp_packet_h__
#define __included_vxlan_gbp_packet_h__ 1
#include <vlib/vlib.h>
/*
* From draft-smith-vxlan-group-policy-04.txt
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |G|R|R|R|I|R|R|R|R|D|E|S|A|R|R|R| Group Policy ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | VXLAN Network Identifier (VNI) | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* G bit: Bit 0 of the initial word is defined as the G (Group Based
* Policy Extension) bit.
*
* I bit: where the I flag MUST be set to 1 for a valid
* VXLAN Network ID (VNI).
*
* D bit: Bit 9 of the initial word is defined as the Don't Learn bit.
* When set, this bit indicates that the egress VTEP MUST NOT learn the
* source address of the encapsulated frame.
*
* E bit: Bit 10 of the initial word is defined as the bounce packet.
* When set, this bit indicates that packet is bounced and must be
* dropped.
*
* S bit: Bit 11 of the initial word is defined as the source policy
* applied bit.
*
* A bit: Bit 12 of the initial word is defined as the A (Policy
* Applied) bit. This bit is only defined as the A bit when the G bit
* is set to 1.
*
* A = 1 indicates that the group policy has already been applied to
* this packet. Policies MUST NOT be applied by devices when the A
* bit is set.
*
* A = 0 indicates that the group policy has not been applied to this
* packet. Group policies MUST be applied by devices when the A bit
* is set to 0 and the destination Group has been determined.
* Devices that apply the Group policy MUST set the A bit to 1 after
* the policy has been applied.
*
* Group Policy ID: 16 bit identifier that indicates the source TSI
* Group membership being encapsulated by VXLAN. Its value is source
* class id.
*
* FOR INTERNAL USE ONLY
* R bit: Bit 12 of the initial word is defined as the reflection bit
* Set on packet rx checked on tx and dropped if set. this prevents
* packets recieved on an iVXLAN tunnel being reflected back to
* another.
*/
typedef struct
{
union
{
struct
{
union
{
struct
{
u8 flag_g_i;
u8 gpflags;
};
u16 flags;
};
u16 sclass;
};
u32 flags_sclass_as_u32;
};
u32 vni_reserved;
} vxlan_gbp_header_t;
#define foreach_vxlan_gbp_flags \
_ (0x80, G) \
_ (0x08, I)
typedef enum
{
VXLAN_GBP_FLAGS_NONE = 0,
#define _(n,f) VXLAN_GBP_FLAGS_##f = n,
foreach_vxlan_gbp_flags
#undef _
}.highlight .hll { background-color: #ffffcc }
.highlight .c { color: #888888 } /* Comment */
.highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */
.highlight .k { color: #008800; font-weight: bold } /* Keyword */
.highlight .ch { color: #888888 } /* Comment.Hashbang */
.highlight .cm { color: #888888 } /* Comment.Multiline */
.highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */
.highlight .cpf { color: #888888 } /* Comment.PreprocFile */
.highlight .c1 { color: #888888 } /* Comment.Single */
.highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */
.highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gr { color: #aa0000 } /* Generic.Error */
.highlight .gh { color: #333333 } /* Generic.Heading */
.highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */
.highlight .go { color: #888888 } /* Generic.Output */
.highlight .gp { color: #555555 } /* Generic.Prompt */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #666666 } /* Generic.Subheading */
.highlight .gt { color: #aa0000 } /* Generic.Traceback */
.highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */
.highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */
.highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */
.highlight .kp { color: #008800 } /* Keyword.Pseudo */
.highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */
.highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */
.highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */
.highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */
.highlight .na { color: #336699 } /* Name.Attribute */
.highlight .nb { color: #003388 } /* Name.Builtin */
.highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */
.highlight .no { color: #003366; font-weight: bold } /* Name.Constant */
.highlight .nd { color: #555555 } /* Name.Decorator */
.highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */
.highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */
.highlight .nl { color: #336699; font-style: italic } /* Name.Label */
.highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */
.highlight .py { color: #336699; font-weight: bold } /* Name.Property */
.highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */
.highlight .nv { color: #336699 } /* Name.Variable */
.highlight .ow { color: #008800 } /* Operator.Word */
.highlight .w { color: #bbbbbb } /* Text.Whitespace */
.highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */
.highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */
.highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */
.highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */
.highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */
.highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */
.highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */
.highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */
.highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */
.highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */
.highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */
.highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */
.highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */
.highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */
.highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */
.highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */
.highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */
.highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */
.highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */
.highlight .vc { color: #336699 } /* Name.Variable.Class */
.highlight .vg { color: #dd7700 } /* Name.Variable.Global */
.highlight .vi { color: #3333bb } /* Name.Variable.Instance */
.highlight .vm { color: #336699 } /* Name.Variable.Magic */
.highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long *//*
* Copyright (c) 2015 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vnet/vnet.h>
#include <vppinfra/vec.h>
#include <vppinfra/error.h>
#include <vppinfra/format.h>
#include <vppinfra/xxhash.h>
#include <vnet/ethernet/ethernet.h>
#include <dpdk/buffer.h>
#include <dpdk/device/dpdk.h>
#include <vnet/classify/vnet_classify.h>
#include <vnet/mpls/packet.h>
#include <vnet/handoff.h>
#include <vnet/devices/devices.h>
#include <vnet/feature/feature.h>
#include <dpdk/device/dpdk_priv.h>
static char *dpdk_error_strings[] = {
#define _(n,s) s,
foreach_dpdk_error
#undef _
};
/* make sure all flags we need are stored in lower 8 bits */
STATIC_ASSERT ((PKT_RX_IP_CKSUM_BAD | PKT_RX_FDIR) <
256, "dpdk flags not un lower byte, fix needed");
static_always_inline uword
dpdk_process_subseq_segs (vlib_main_t * vm, vlib_buffer_t * b,
struct rte_mbuf *mb, vlib_buffer_t * bt)
{
u8 nb_seg = 1;
struct rte_mbuf *mb_seg = 0;
vlib_buffer_t *b_seg, *b_chain = 0;
mb_seg = mb->next;
b_chain = b;
if (mb->nb_segs < 2)
return 0;
b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
b->total_length_not_including_first_buffer = 0;
while (nb_seg < mb->nb_segs)
{
ASSERT (mb_seg != 0);
b_seg = vlib_buffer_from_rte_mbuf (mb_seg);
vlib_buffer_copy_template (b_seg, bt);
/*
* The driver (e.g. virtio) may not put the packet data at the start
* of the segment, so don't assume b_seg->current_data == 0 is correct.
*/
b_seg->current_data =
(mb_seg->buf_addr + mb_seg->data_off) - (void *) b_seg->data;
b_seg->current_length = mb_seg->data_len;
b->total_length_not_including_first_buffer += mb_seg->data_len;
b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
b_chain = b_seg;
mb_seg = mb_seg->next;
nb_seg++;
}
return b->total_length_not_including_first_buffer;
}
static_always_inline void
dpdk_prefetch_mbuf_x4 (struct rte_mbuf *mb[])
{
CLIB_PREFETCH (mb[0], CLIB_CACHE_LINE_BYTES, LOAD);
CLIB_PREFETCH (mb[1], CLIB_CACHE_LINE_BYTES, LOAD);
CLIB_PREFETCH (mb[2], CLIB_CACHE_LINE_BYTES, LOAD);
CLIB_PREFETCH (mb[3], CLIB_CACHE_LINE_BYTES, LOAD);
}
static_always_inline void
dpdk_prefetch_buffer_x4 (struct rte_mbuf *mb[])
{
vlib_buffer_t *b;
b = vlib_buffer_from_rte_mbuf (mb[0]);
CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
b = vlib_buffer_from_rte_mbuf (mb[1]);
CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
b = vlib_buffer_from_rte_mbuf (mb[2]);
CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
b = vlib_buffer_from_rte_mbuf (mb[3]);
CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
}
/** \brief Main DPDK input node
@node dpdk-input
This is the main DPDK input node: across each assigned interface,
call rte_eth_rx_burst(...) or similar to obtain a vector of
packets to process. Derive @c vlib_buffer_t metadata from
<code>struct rte_mbuf</code> metadata,
Depending on the resulting metadata: adjust <code>b->current_data,
b->current_length </code> and dispatch directly to
ip4-input-no-checksum, or ip6-input. Trace the packet if required.
@param vm vlib_main_t corresponding to the current thread
@param node vlib_node_runtime_t
@param f vlib_frame_t input-node, not used.
@par Graph mechanics: buffer metadata, next index usage
@em Uses:
- <code>struct rte_mbuf mb->ol_flags</code>
- PKT_RX_IP_CKSUM_BAD
@em Sets:
- <code>b->error</code> if the packet is to be dropped immediately
- <code>b->current_data, b->current_length</code>
- adjusted as needed to skip the L2 header in direct-dispatch cases
- <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
- rx interface sw_if_index
- <code>vnet_buffer(b)->sw_if_index[VLIB_TX] = ~0</code>
- required by ipX-lookup
- <code>b->flags</code>
- to indicate multi-segment pkts (VLIB_BUFFER_NEXT_PRESENT), etc.
<em>Next Nodes:</em>
- Static arcs to: error-drop, ethernet-input,
ip4-input-no-checksum, ip6-input, mpls-input
- per-interface redirection, controlled by
<code>xd->per_interface_next_index</code>
*/
static_always_inline u16
dpdk_ol_flags_extract (struct rte_mbuf **mb, u16 * flags, int count)
{
u16 rv = 0;
int i;
for (i = 0; i < count; i++)
{
/* all flags we are interested in are in lower 8 bits but
that might change */
flags[i] = (u16) mb[i]->ol_flags;
rv |= flags[i];
}
return rv;
}
static_always_inline uword
dpdk_process_rx_burst (vlib_main_t * vm, dpdk_per_thread_data_t * ptd,
uword n_rx_packets, int maybe_multiseg,
u16 * or_flagsp)
{
u32 n_left = n_rx_packets;
vlib_buffer_t *b[4];
struct rte_mbuf **mb = ptd->mbufs;
uword n_bytes = 0;
u16 *flags, or_flags = 0;
vlib_buffer_t bt;
mb = ptd->mbufs;
flags = ptd->flags;
/* copy template into local variable - will save per packet load */
vlib_buffer_copy_template (&bt, &ptd->buffer_template);
while (n_left >= 8)
{
dpdk_prefetch_buffer_x4 (mb + 4);
b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
vlib_buffer_copy_template (b[0], &bt);
vlib_buffer_copy_template (b[1], &bt);
vlib_buffer_copy_template (b[2], &bt);
vlib_buffer_copy_template (b[3], &bt);
dpdk_prefetch_mbuf_x4 (mb + 4);
or_flags |= dpdk_ol_flags_extract (mb, flags, 4);
flags += 4;
b[0]->current_data = mb[0]->data_off - RTE_PKTMBUF_HEADROOM;
n_bytes += b[0]->current_length = mb[0]->data_len;
b[1]->current_data = mb[1]->data_off - RTE_PKTMBUF_HEADROOM;
n_bytes += b[1]->current_length = mb[1]->data_len;
b[2]->current_data = mb[2]->data_off - RTE_PKTMBUF_HEADROOM;
n_bytes += b[2]->current_length = mb[2]->data_len;
b[3]->current_data = mb[3]->data_off - RTE_PKTMBUF_HEADROOM;
n_bytes += b[3]->current_length = mb[3]->data_len;
if (maybe_multiseg)
{
n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], &bt);
n_bytes += dpdk_process_subseq_segs (vm, b[1], mb[1], &bt);
n_bytes += dpdk_process_subseq_segs (vm, b[2], mb[2], &bt);
n_bytes += dpdk_process_subseq_segs (vm, b[3], mb[3], &bt);
}
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
/* next */
mb += 4;
n_left -= 4;
}
while (n_left)
{
b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
vlib_buffer_copy_template (b[0], &bt);
or_flags |= dpdk_ol_flags_extract (mb, flags, 1);
flags += 1;
b[0]->current_data = mb[0]->data_off - RTE_PKTMBUF_HEADROOM;
n_bytes += b[0]->current_length = mb[0]->data_len;
if (maybe_multiseg)
n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], &bt);
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
/* next */
mb += 1;
n_left -= 1;
}
*or_flagsp = or_flags;
return n_bytes;
}
static_always_inline void
dpdk_process_flow_offload (dpdk_device_t * xd, dpdk_per_thread_data_t * ptd,
uword n_rx_packets)
{
uword n;
dpdk_flow_lookup_entry_t *fle;
vlib_buffer_t *b0;
/* TODO prefetch and quad-loop */
for (n = 0; n < n_rx_packets; n++)
{
if ((ptd->flags[n] & PKT_RX_FDIR_ID) == 0)
continue;
fle = pool_elt_at_index (xd->flow_lookup_entries,
ptd->mbufs[n]->hash.fdir.hi);
if (fle->next_index != (u16) ~ 0)
ptd->next[n] = fle->next_index;
if (fle->flow_id != ~0)
{
b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
b0->flow_id = fle->flow_id;
}
if (fle->buffer_advance != ~0)
{
b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
vlib_buffer_advance (b0, fle->buffer_advance);
}
}
}
static_always_inline u32
dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd,
vlib_node_runtime_t * node, u32 thread_index, u16 queue_id)
{
uword n_rx_packets = 0, n_rx_bytes;
u32 n_left, n_trace;
u32 *buffers;
u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
struct rte_mbuf **mb;
vlib_buffer_t *b0;
u16 *next;
u16 or_flags;
u32 n;
int single_next = 0;
dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data,
thread_index);
vlib_buffer_t *bt = &ptd->buffer_template;
if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
return 0;
/* get up to DPDK_RX_BURST_SZ buffers from PMD */
while (n_rx_packets < DPDK_RX_BURST_SZ)
{
n = rte_eth_rx_burst (xd->port_id, queue_id,
ptd->mbufs + n_rx_packets,
DPDK_RX_BURST_SZ - n_rx_packets);
n_rx_packets += n;
if (n < 32)
break;
}
if (n_rx_packets == 0)
return 0;
/* Update buffer template */
vnet_buffer (bt)->sw_if_index[VLIB_RX] = xd->sw_if_index;
bt->error = node->errors[DPDK_ERROR_NONE];
/* as DPDK is allocating empty buffers from mempool provided before interface
start for each queue, it is safe to store this in the template */
bt->buffer_pool_index = xd->buffer_pool_for_queue[queue_id];
bt->ref_count = 1;
vnet_buffer (bt)->feature_arc_index = 0;
bt->current_config_index = 0;
/* receive burst of packets from DPDK PMD */
if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
next_index = xd->per_interface_next_index;
/* as all packets belong to the same interface feature arc lookup
can be don once and result stored in the buffer template */
if (PREDICT_FALSE (vnet_device_input_have_features (xd->sw_if_index)))
vnet_feature_start_device_input_x1 (xd->sw_if_index, &next_index, bt);
if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG)
n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 1, &or_flags);
else
n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 0, &or_flags);
if (PREDICT_FALSE (or_flags & PKT_RX_FDIR))
{
/* some packets will need to go to different next nodes */
for (n = 0; n < n_rx_packets; n++)
ptd->next[n] = next_index;
/* flow offload - process if rx flow offload enabled and at least one
packet is marked */
if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) &&
(or_flags & PKT_RX_FDIR)))
dpdk_process_flow_offload (xd, ptd, n_rx_packets);
/* enqueue buffers to the next node */
vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs,
ptd->buffers, n_rx_packets,
sizeof (struct rte_mbuf));
vlib_buffer_enqueue_to_next (vm, node, ptd->buffers, ptd->next,
n_rx_packets);
}
else
{
u32 *to_next, n_left_to_next;
vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs, to_next,
n_rx_packets,
sizeof (struct rte_mbuf));
if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
{
vlib_next_frame_t *nf;
vlib_frame_t *f;
ethernet_input_frame_t *ef;
nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
f = vlib_get_frame (vm, nf->frame);
f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
ef = vlib_frame_scalar_args (f);
ef->sw_if_index = xd->sw_if_index;
ef->hw_if_index = xd->hw_if_index;
/* if PMD supports ip4 checksum check and there are no packets
marked as ip4 checksum bad we can notify ethernet input so it
can send pacets to ip4-input-no-checksum node */
if (xd->flags & DPDK_DEVICE_FLAG_RX_IP4_CKSUM &&
(or_flags & PKT_RX_IP_CKSUM_BAD) == 0)
f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
vlib_frame_no_append (f);
}
n_left_to_next -= n_rx_packets;
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
single_next = 1;
}
/* packet trace if enabled */
if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
{
if (single_next)
vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs,
ptd->buffers, n_rx_packets,
sizeof (struct rte_mbuf));
n_left = n_rx_packets;
buffers = ptd->buffers;
mb = ptd->mbufs;
next = ptd->next;
while (n_trace && n_left)
{
b0 = vlib_get_buffer (vm, buffers[0]);
if (single_next == 0)
next_index = next[0];
vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 0);
dpdk_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof t0[0]);
t0->queue_index = queue_id;
t0->device_index = xd->device_index;
t0->buffer_index = vlib_get_buffer_index (vm, b0);
clib_memcpy_fast (&t0->mb, mb[0], sizeof t0->mb);
clib_memcpy_fast (&t0->buffer, b0,
sizeof b0[0] - sizeof b0->pre_data);
clib_memcpy_fast (t0->buffer.pre_data, b0->data,
sizeof t0->buffer.pre_data);
clib_memcpy_fast (&t0->data, mb[0]->buf_addr + mb[0]->data_off,
sizeof t0->data);
n_trace--;
n_left--;
buffers++;
mb++;
next++;
}
vlib_set_trace_count (vm, node, n_trace);
}
vlib_increment_combined_counter
(vnet_get_main ()->interface_main.combined_sw_if_counters
+ VNET_INTERFACE_COUNTER_RX, thread_index, xd->sw_if_index,
n_rx_packets, n_rx_bytes);
vnet_device_increment_rx_packets (thread_index, n_rx_packets);
return n_rx_packets;
}
VLIB_NODE_FN (dpdk_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * f)
{
dpdk_main_t *dm = &dpdk_main;
dpdk_device_t *xd;
uword n_rx_packets = 0;
vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
vnet_device_and_queue_t *dq;
u32 thread_index = node->thread_index;
/*
* Poll all devices on this cpu for input/interrupts.
*/
/* *INDENT-OFF* */
foreach_device_and_queue (dq, rt->devices_and_queues)
{
xd = vec_elt_at_index(dm->devices, dq->dev_instance);
n_rx_packets += dpdk_device_input (vm, dm, xd, node, thread_index,
dq->queue_id);
}
/* *INDENT-ON* */
return n_rx_packets;
}
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (dpdk_input_node) = {
.type = VLIB_NODE_TYPE_INPUT,
.name = "dpdk-input",
.sibling_of = "device-input",
/* Will be enabled if/when hardware is detected. */
.state = VLIB_NODE_STATE_DISABLED,
.format_buffer = format_ethernet_header_with_length,
.format_trace = format_dpdk_rx_trace,
.n_errors = DPDK_N_ERROR,
.error_strings = dpdk_error_strings,
};
/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/