/* * Copyright(c) 2016 Intel Corporation. All rights reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* enumerate all vlib messages */ #define vl_typedefs /* define message structures */ #include #undef vl_typedefs /* instantiate all the print functions we know about */ #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) #define vl_printfun #include #undef vl_printfun #include /*** * * HQoS default configuration values * ***/ static dpdk_device_config_hqos_t hqos_params_default = { .hqos_thread_valid = 0, .swq_size = 4096, .burst_enq = 256, .burst_deq = 220, /* * Packet field to identify the subport. * * Default value: Since only one subport is defined by default (see below: * n_subports_per_port = 1), the subport ID is hardcoded to 0. */ .pktfield0_slabpos = 0, .pktfield0_slabmask = 0, /* * Packet field to identify the pipe. * * Default value: Assuming Ethernet/IPv4/UDP packets, UDP payload bits 12 .. 23 */ .pktfield1_slabpos = 40, .pktfield1_slabmask = 0x0000000FFF000000LLU, /* Packet field used as index into TC translation table to identify the traffic * class and queue. * * Default value: Assuming Ethernet/IPv4 packets, IPv4 DSCP field */ .pktfield2_slabpos = 8, .pktfield2_slabmask = 0x00000000000000FCLLU, .tc_table = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, }, /* port */ .port = { .name = NULL, /* Set at init */ .socket = 0, /* Set at init */ .rate = 1250000000, /* Assuming 10GbE port */ .mtu = 14 + 1500, /* Assuming Ethernet/IPv4 pkt (Ethernet FCS not included) */ .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT, .n_subports_per_port = 1, .n_pipes_per_subport = 4096, .qsize = {64, 64, 64, 64}, .pipe_profiles = NULL, /* Set at config */ .n_pipe_profiles = 1, #ifdef RTE_SCHED_RED .red_params = { /* Traffic Class 0 Colors Green / Yellow / Red */ [0][0] = {.min_th = 48,.max_th = 64,.maxp_inv = 10,.wq_log2 = 9}, [0][1] = {.min_th = 40,.max_th = 64,.maxp_inv = 10,.wq_log2 = 9}, [0][2] = {.min_th = 32,.max_th = 64,.maxp_inv = 10,.wq_log2 = 9}, /* Traffic Class 1 - Colors Green / Yellow / Red */ [1][0] = {.min_th = 48,.max_th = 64,.maxp_inv = 10,.wq_log2 = 9}, [1][1] = {.min_th = 40,.max_th = 64,.maxp_inv = 10,.wq_log2 = 9}, [1][2] = {.min_th = 32,.max_th = 64,.maxp_inv = 10,.wq_log2 = 9}, /* Traffic Class 2 - Colors Green / Yellow / Red */ [2][0] = {.min_th = 48,.max_th = 64,.maxp_inv = 10,.wq_log2 = 9}, [2][1] = {.min_th = 40,.max_th = 64,.maxp_inv = 10,.wq_log2 = 9}, [2][2] = {.min_th = 32,.max_th = 64,.maxp_inv = 10,.wq_log2 = 9}, /* Traffic Class 3 - Colors Green / Yellow / Red */ [3][0] = {.min_th = 48,.max_th = 64,.maxp_inv = 10,.wq_log2 = 9}, [3][1] = {.min_th = 40,.max_th = 64,.maxp_inv = 10,.wq_log2 = 9}, [3][2] = {.min_th = 32,.max_th = 64,.maxp_inv = 10,.wq_log2 = 9} }, #endif /* RTE_SCHED_RED */ }, }; static struct rte_sched_subport_params hqos_subport_params_default = { .tb_rate = 1250000000, /* 10GbE line rate (measured in bytes/second) */ .tb_size = 1000000, .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000}, .tc_period = 10, }; static struct rte_sched_pipe_params hqos_pipe_params_default = { .tb_rate = 305175, /* 10GbE line rate divided by 4K pipes */ .tb_size = 1000000, .tc_rate = {305175, 305175, 305175, 305175}, .tc_period = 40, #ifdef RTE_SCHED_SUBPORT_TC_OV .tc_ov_weight = 1, #endif .wrr_weights = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, }; /*** * * HQoS configuration * ***/ int dpdk_hqos_validate_mask (u64 mask, u32 n) { int count = __builtin_popcountll (mask); int pos_lead = sizeof (u64) * 8 - count_leading_zeros (mask); int pos_trail = count_trailing_zeros (mask); int count_expected = __builtin_popcount (n -
from scapy.fields import BitField, XByteField, X3BytesField
from scapy.packet import Packet, bind_layers
from scapy.layers.l2 import Ether
from scapy.layers.inet import UDP


class VXLAN(Packet):
    name = "VXLAN"
    fields_desc = [BitField("flags", 0x08000000, 32),
                   X3BytesField("vni", 0),
                   XByteField("reserved", 0x00)]

    def mysummary(self):
        return self.sprintf("VXLAN (vni=%VXLAN.vni%)")

bind_layers(UDP, VXLAN, dport=4789)
bind_layers(VXLAN, Ether)
, slab_shr) \ ({ \ u64 slab = *((u64 *) &byte_array[slab_pos]); \ u64 val = (rte_be_to_cpu_64(slab) & slab_mask) >> slab_shr; \ val; \ }) #define RTE_SCHED_PORT_HIERARCHY(subport, pipe, traffic_class, queue, color) \ ((((u64) (queue)) & 0x3) | \ ((((u64) (traffic_class)) & 0x3) << 2) | \ ((((u64) (color)) & 0x3) << 4) | \ ((((u64) (subport)) & 0xFFFF) << 16) | \ ((((u64) (pipe)) & 0xFFFFFFFF) << 32)) void dpdk_hqos_metadata_set (dpdk_device_hqos_per_worker_thread_t * hqos, struct rte_mbuf **pkts, u32 n_pkts) { u32 i; for (i = 0; i < (n_pkts & (~0x3)); i += 4) { struct rte_mbuf *pkt0 = pkts[i]; struct rte_mbuf *pkt1 = pkts[i + 1]; struct rte_mbuf *pkt2 = pkts[i + 2]; struct rte_mbuf *pkt3 = pkts[i + 3]; u8 *pkt0_data = rte_pktmbuf_mtod (pkt0, u8 *); u8 *pkt1_data = rte_pktmbuf_mtod (pkt1, u8 *); u8 *pkt2_data = rte_pktmbuf_mtod (pkt2, u8 *); u8 *pkt3_data = rte_pktmbuf_mtod (pkt3, u8 *); u64 pkt0_subport = BITFIELD (pkt0_data, hqos->hqos_field0_slabpos, hqos->hqos_field0_slabmask, hqos->hqos_field0_slabshr); u64 pkt0_pipe = BITFIELD (pkt0_data, hqos->hqos_field1_slabpos, hqos->hqos_field1_slabmask, hqos->hqos_field1_slabshr); u64 pkt0_dscp = BITFIELD (pkt0_data, hqos->hqos_field2_slabpos, hqos->hqos_field2_slabmask, hqos->hqos_field2_slabshr); u32 pkt0_tc = hqos->hqos_tc_table[pkt0_dscp & 0x3F] >> 2; u32 pkt0_tc_q = hqos->hqos_tc_table[pkt0_dscp & 0x3F] & 0x3; u64 pkt1_subport = BITFIELD (pkt1_data, hqos->hqos_field0_slabpos, hqos->hqos_field0_slabmask, hqos->hqos_field0_slabshr); u64 pkt1_pipe = BITFIELD (pkt1_data, hqos->hqos_field1_slabpos, hqos->hqos_field1_slabmask, hqos->hqos_field1_slabshr); u64 pkt1_dscp = BITFIELD (pkt1_data, hqos->hqos_field2_slabpos, hqos->hqos_field2_slabmask, hqos->hqos_field2_slabshr); u32 pkt1_tc = hqos->hqos_tc_table[pkt1_dscp & 0x3F] >> 2; u32 pkt1_tc_q = hqos->hqos_tc_table[pkt1_dscp & 0x3F] & 0x3; u64 pkt2_subport = BITFIELD (pkt2_data, hqos->hqos_field0_slabpos, hqos->hqos_field0_slabmask, hqos->hqos_field0_slabshr); u64 pkt2_pipe = BITFIELD (pkt2_data, hqos->hqos_field1_slabpos, hqos->hqos_field1_slabmask, hqos->hqos_field1_slabshr); u64 pkt2_dscp = BITFIELD (pkt2_data, hqos->hqos_field2_slabpos, hqos->hqos_field2_slabmask, hqos->hqos_field2_slabshr); u32 pkt2_tc = hqos->hqos_tc_table[pkt2_dscp & 0x3F] >> 2; u32 pkt2_tc_q = hqos->hqos_tc_table[pkt2_dscp & 0x3F] & 0x3; u64 pkt3_subport = BITFIELD (pkt3_data, hqos->hqos_field0_slabpos, hqos->hqos_field0_slabmask, hqos->hqos_field0_slabshr); u64 pkt3_pipe = BITFIELD (pkt3_data, hqos->hqos_field1_slabpos, hqos->hqos_field1_slabmask, hqos->hqos_field1_slabshr); u64 pkt3_dscp = BITFIELD (pkt3_data, hqos->hqos_field2_slabpos, hqos->hqos_field2_slabmask, hqos->hqos_field2_slabshr); u32 pkt3_tc = hqos->hqos_tc_table[pkt3_dscp & 0x3F] >> 2; u32 pkt3_tc_q = hqos->hqos_tc_table[pkt3_dscp & 0x3F] & 0x3; u64 pkt0_sched = RTE_SCHED_PORT_HIERARCHY (pkt0_subport, pkt0_pipe, pkt0_tc, pkt0_tc_q, 0); u64 pkt1_sched = RTE_SCHED_PORT_HIERARCHY (pkt1_subport, pkt1_pipe, pkt1_tc, pkt1_tc_q, 0); u64 pkt2_sched = RTE_SCHED_PORT_HIERARCHY (pkt2_subport, pkt2_pipe, pkt2_tc, pkt2_tc_q, 0); u64 pkt3_sched = RTE_SCHED_PORT_HIERARCHY (pkt3_subport, pkt3_pipe, pkt3_tc, pkt3_tc_q, 0); pkt0->hash.sched.lo = pkt0_sched & 0xFFFFFFFF; pkt0->hash.sched.hi = pkt0_sched >> 32; pkt1->hash.sched.lo = pkt1_sched & 0xFFFFFFFF; pkt1->hash.sched.hi = pkt1_sched >> 32; pkt2->hash.sched.lo = pkt2_sched & 0xFFFFFFFF; pkt2->hash.sched.hi = pkt2_sched >> 32; pkt3->hash.sched.lo = pkt3_sched & 0xFFFFFFFF; pkt3->hash.sched.hi = pkt3_sched >> 32; } for (; i < n_pkts; i++) { struct rte_mbuf *pkt = pkts[i]; u8 *pkt_data = rte_pktmbuf_mtod (pkt, u8 *); u64 pkt_subport = BITFIELD (pkt_data, hqos->hqos_field0_slabpos, hqos->hqos_field0_slabmask, hqos->hqos_field0_slabshr); u64 pkt_pipe = BITFIELD (pkt_data, hqos->hqos_field1_slabpos, hqos->hqos_field1_slabmask, hqos->hqos_field1_slabshr); u64 pkt_dscp = BITFIELD (pkt_data, hqos->hqos_field2_slabpos, hqos->hqos_field2_slabmask, hqos->hqos_field2_slabshr); u32 pkt_tc = hqos->hqos_tc_table[pkt_dscp & 0x3F] >> 2; u32 pkt_tc_q = hqos->hqos_tc_table[pkt_dscp & 0x3F] & 0x3; u64 pkt_sched = RTE_SCHED_PORT_HIERARCHY (pkt_subport, pkt_pipe, pkt_tc, pkt_tc_q, 0); pkt->hash.sched.lo = pkt_sched & 0xFFFFFFFF; pkt->hash.sched.hi = pkt_sched >> 32; } } /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */