aboutsummaryrefslogtreecommitdiffstats
path: root/app/test/Makefile
blob: 49ea195966cb9af87d5d791da248a8732387b469 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
#   BSD LICENSE
#
#   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
#   All rights reserved.
#
#   Redistribution and use in source and binary forms, with or without
#   modification, are permitted provided that the following conditions
#   are met:
#
#     * Redistributions of source code must retain the above copyright
#       notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above copyright
#       notice, this list of conditions and the following disclaimer in
#       the documentation and/or other materials provided with the
#       distribution.
#     * Neither the name of Intel Corporation nor the names of its
#       contributors may be used to endorse or promote products derived
#       from this software without specific prior written permission.
#
#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

include $(RTE_SDK)/mk/rte.vars.mk

ifeq ($(CONFIG_RTE_APP_TEST),y)

# default rule
all:

# Define an externally linked resource. A linked resource is an arbitrary
# file that is linked into the test binary. The application refers to this
# resource by name. The linked generates identifiers beg_<name> and end_<name>
# for referencing by the C code.
#
# Parameters: <unique name>, <file to be linked>
define linked_resource
SRCS-y += $(1).res.o
$(1).res.o: $(2)
	@  echo '  MKRES $$@'
	$Q [ "$$(<D)" = . ] || ln -fs $$<
	$Q $(OBJCOPY) -I binary -B $(RTE_OBJCOPY_ARCH) -O $(RTE_OBJCOPY_TARGET) \
		--rename-section                                         \
			.data=.rodata,alloc,load,data,contents,readonly  \
		--redefine-sym _binary_$$(subst .,_,$$(<F))_start=beg_$(1) \
		--redefine-sym _binary_$$(subst .,_,$$(<F))_end=end_$(1)   \
		--redefine-sym _binary_$$(subst .,_,$$(<F))_size=siz_$(1)  \
		$$(<F) $$@
endef

ifeq ($(CONFIG_RTE_APP_TEST_RESOURCE_TAR),y)
define linked_tar_resource
$(1).tar: $(2)
	@  echo '  TAR $$@'
	$Q tar -C $$(dir $$<) -cf $$@ $$(notdir $$<)
$(call linked_resource,$(1),$(1).tar)
endef
else # ! CONFIG_RTE_APP_TEST_RESOURCE_TAR
linked_tar_resource =
endif # CONFIG_RTE_APP_TEST_RESOURCE_TAR

#
# library name
#
APP = test

#
# all sources are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) := commands.c
SRCS-y += test.c
SRCS-y += resource.c
SRCS-y += test_resource.c
test_resource.res: test_resource.c
	@ cp $< $@
$(eval $(call linked_resource,test_resource_c,test_resource.res))
$(eval $(call linked_tar_resource,test_resource_tar,test_resource.c))
SRCS-$(CONFIG_RTE_APP_TEST_RESOURCE_TAR) += test_pci.c
$(eval $(call linked_tar_resource,test_pci_sysfs,test_pci_sysfs))
SRCS-y += test_prefetch.c
SRCS-y += test_byteorder.c
SRCS-y += test_per_lcore.c
SRCS-y += test_atomic.c
SRCS-y += test_malloc.c
SRCS-y += test_cycles.c
SRCS-y += test_spinlock.c
SRCS-y += test_memory.c
SRCS-y += test_memzone.c

SRCS-y += test_ring.c
SRCS-y += test_ring_perf.c
SRCS-y += test_pmd_perf.c

ifeq ($(CONFIG_RTE_LIBRTE_TABLE),y)
SRCS-y += test_table.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += test_table_pipeline.c
SRCS-y += test_table_tables.c
SRCS-y += test_table_ports.c
SRCS-y += test_table_combined.c
SRCS-$(CONFIG_RTE_LIBRTE_ACL) += test_table_acl.c
endif

SRCS-y += test_rwlock.c

SRCS-$(CONFIG_RTE_LIBRTE_TIMER) += test_timer.c
SRCS-$(CONFIG_RTE_LIBRTE_TIMER) += test_timer_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_TIMER) += test_timer_racecond.c

SRCS-y += test_mempool.c
SRCS-y += test_mempool_perf.c

SRCS-y += test_mbuf.c
SRCS-y += test_logs.c

SRCS-y += test_memcpy.c
SRCS-y += test_memcpy_perf.c

SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_thash.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_functions.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_scaling.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_multiwriter.c

SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm.c
SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm6.c
SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm6_perf.c

SRCS-y += test_debug.c
SRCS-y += test_errno.c
SRCS-y += test_tailq.c
SRCS-y += test_string_fns.c
SRCS-y += test_cpuflags.c
SRCS-y += test_mp_secondary.c
SRCS-y += test_eal_flags.c
SRCS-y += test_eal_fs.c
SRCS-y += test_alarm.c
SRCS-y += test_interrupts.c
SRCS-y += test_version.c
SRCS-y += test_func_reentrancy.c

SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += test_cmdline.c
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += test_cmdline_num.c
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += test_cmdline_etheraddr.c
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += test_cmdline_portlist.c
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += test_cmdline_ipaddr.c
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += test_cmdline_cirbuf.c
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += test_cmdline_string.c
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += test_cmdline_lib.c

ifeq ($(CONFIG_RTE_LIBRTE_SCHED),y)
SRCS-y += test_red.c
SRCS-y += test_sched.c
endif

SRCS-$(CONFIG_RTE_LIBRTE_METER) += test_meter.c
SRCS-$(CONFIG_RTE_LIBRTE_KNI) += test_kni.c
SRCS-$(CONFIG_RTE_LIBRTE_POWER) += test_power.c test_power_acpi_cpufreq.c
SRCS-$(CONFIG_RTE_LIBRTE_POWER) += test_power_kvm_vm.c
SRCS-y += test_common.c
SRCS-$(CONFIG_RTE_LIBRTE_IVSHMEM) += test_ivshmem.c

SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += test_distributor.c
SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += test_distributor_perf.c

SRCS-$(CONFIG_RTE_LIBRTE_REORDER) += test_reorder.c

SRCS-y += test_devargs.c
SRCS-y += virtual_pmd.c
SRCS-y += packet_burst_generator.c
SRCS-$(CONFIG_RTE_LIBRTE_ACL) += test_acl.c

ifeq ($(CONFIG_RTE_LIBRTE_PMD_RING),y)
SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += test_link_bonding.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += test_link_bonding_mode4.c
endif

ifeq ($(CONFIG_RTE_LIBRTE_PMD_NULL),y)
SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += test_link_bonding_rssconf.c
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
LDLIBS += -lrte_pmd_null
endif
endif

SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += test_pmd_ring.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += test_pmd_ring_perf.c

SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev_aes.c
SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev.c

SRCS-$(CONFIG_RTE_LIBRTE_KVARGS) += test_kvargs.c

CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)

CFLAGS += -D_GNU_SOURCE

LDLIBS += -lm

# Disable VTA for memcpy test
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
CFLAGS_test_memcpy.o += -fno-var-tracking-assignments
CFLAGS_test_memcpy_perf.o += -fno-var-tracking-assignments
endif
endif

# this application needs libraries first
DEPDIRS-y += lib drivers

# Link against shared libraries when needed
ifeq ($(CONFIG_RTE_LIBRTE_PMD_BOND),y)
ifneq ($(CONFIG_RTE_LIBRTE_PMD_RING),y)
$(error Link bonding tests require CONFIG_RTE_LIBRTE_PMD_RING=y)
else
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
LDLIBS += -lrte_pmd_ring
endif
endif
endif

ifeq ($(CONFIG_RTE_APP_TEST_RESOURCE_TAR),y)
LDLIBS += -larchive
endif

include $(RTE_SDK)/mk/rte.app.mk

endif
ight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #666666 } /* Generic.Subheading */ .highlight .gt { color: #aa0000 } /* Generic.Traceback */ .highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */ .highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */ .highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */ .highlight .kp { color: #008800 } /* Keyword.Pseudo */ .highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */ .highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */ .highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */ .highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */ .highlight .na { color: #336699 } /* Name.Attribute */ .highlight .nb { color: #003388 } /* Name.Builtin */ .highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */ .highlight .no { color: #003366; font-weight: bold } /* Name.Constant */ .highlight .nd { color: #555555 } /* Name.Decorator */ .highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */ .highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */ .highlight .nl { color: #336699; font-style: italic } /* Name.Label */ .highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */ .highlight .py { color: #336699; font-weight: bold } /* Name.Property */ .highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #336699 } /* Name.Variable */ .highlight .ow { color: #008800 } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */ .highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */ .highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */ .highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */ .highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */ .highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */ }
/*
 *------------------------------------------------------------------
 * Copyright (c) 2020 Intel and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *------------------------------------------------------------------
 */

#include <vppinfra/mem.h>
#include "avf_advanced_flow.h"

#define AVF_FDIR_IPV6_TC_OFFSET	  20
#define AVF_IPV6_TC_MASK	  (0xFF << AVF_FDIR_IPV6_TC_OFFSET)
#define AVF_FDIR_MAX_QREGION_SIZE 128

/*
 * Return the last (most-significant) bit set.
 */
static inline int
fls_u32 (u32 x)
{
  return (x == 0) ? 0 : 32 - count_leading_zeros (x);
}

static inline int
ether_addr_is_zero (const struct avf_ether_addr *ea)
{
  const u16 *w = (const u16 *) ea;

  return (w[0] | w[1] | w[2]) == 0;
}

int
avf_fdir_rcfg_create (struct avf_fdir_conf **rcfg, int tunnel_level, u16 vsi,
		      u16 nrxq)
{
  (*rcfg) = clib_mem_alloc (sizeof (**rcfg));
  if ((*rcfg) == NULL)
    {
      return -1;
    }

  clib_memset (*rcfg, 0, sizeof (**rcfg));

  (*rcfg)->add_fltr.rule_cfg.proto_hdrs.tunnel_level = tunnel_level;
  (*rcfg)->vsi = vsi;
  (*rcfg)->nb_rx_queues = nrxq;

  return 0;
}

int
avf_fdir_rcfg_destroy (struct avf_fdir_conf *rcfg)
{
  clib_mem_free (rcfg);

  return 0;
}

int
avf_fdir_rcfg_set_hdr (struct avf_fdir_conf *rcfg, int layer,
		       enum virtchnl_proto_hdr_type hdr)
{
  struct virtchnl_proto_hdrs *hdrs;

  hdrs = &rcfg->add_fltr.rule_cfg.proto_hdrs;
  if (layer >= VIRTCHNL_MAX_NUM_PROTO_HDRS)
    return -1;

  hdrs->proto_hdr[layer].type = hdr;

  return 0;
}

int
avf_fdir_rcfg_set_field (struct avf_fdir_conf *rcfg, int layer,
			 struct avf_flow_item *item,
			 struct avf_flow_error *error)
{
  const struct avf_ipv4_hdr *ipv4_spec, *ipv4_mask;
  const struct avf_ipv6_hdr *ipv6_spec, *ipv6_mask;
  const struct avf_udp_hdr *udp_spec, *udp_mask;
  const struct avf_tcp_hdr *tcp_spec, *tcp_mask;
  const struct avf_sctp_hdr *sctp_spec, *sctp_mask;
  const struct avf_gtp_hdr *gtp_spec, *gtp_mask;
  const struct avf_gtp_psc_hdr *gtp_psc_spec, *gtp_psc_mask;
  const struct avf_l2tpv3oip_hdr *l2tpv3oip_spec, *l2tpv3oip_mask;
  const struct avf_esp_hdr *esp_spec, *esp_mask;
  const struct avf_ah_hdr *ah_spec, *ah_mask;
  const struct avf_pfcp_hdr *pfcp_spec, *pfcp_mask;
  const struct avf_flow_eth_hdr *eth_spec, *eth_mask;

  struct virtchnl_proto_hdr *hdr;
  enum virtchnl_proto_hdr_type type;
  u16 ether_type;
  int ret = 0;

  u8 ipv6_addr_mask[16] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
			    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };

  hdr = &rcfg->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
  type = item->type;

  switch (type)
    {
    case VIRTCHNL_PROTO_HDR_ETH:
      eth_spec = item->spec;
      eth_mask = item->mask;

      hdr->type = VIRTCHNL_PROTO_HDR_ETH;

      if (eth_spec && eth_mask)
	{
	  if (!ether_addr_is_zero (&eth_mask->src) ||
	      !ether_addr_is_zero (&eth_mask->dst))
	    {
	      ret = avf_flow_error_set (error, AVF_FAILURE,
					AVF_FLOW_ERROR_TYPE_ITEM, item,
					"Invalid MAC_addr mask.");
	      return ret;
	    }

	  if (eth_mask->type)
	    {
	      if (eth_mask->type != 0xffff)
		{
		  ret = avf_flow_error_set (error, AVF_FAILURE,
					    AVF_FLOW_ERROR_TYPE_ITEM, item,
					    "Invalid type mask.");
		  return ret;
		}
	    }
	}

      if (eth_spec && eth_mask && eth_mask->type)
	{
	  ether_type = clib_net_to_host_u16 (eth_spec->type);
	  if (ether_type == AVF_ETHER_TYPE_IPV4 ||
	      ether_type == AVF_ETHER_TYPE_IPV6)
	    {
	      ret = avf_flow_error_set (error, AVF_FAILURE,
					AVF_FLOW_ERROR_TYPE_ITEM, item,
					"Unsupported ether_type.");
	      return ret;
	    }

	  rcfg->input_set |= AVF_INSET_ETHERTYPE;
	  VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, ETH, ETHERTYPE);

	  clib_memcpy (hdr->buffer, eth_spec, sizeof (*eth_spec));
	}
      break;

    case VIRTCHNL_PROTO_HDR_IPV4:
      ipv4_spec = item->spec;
      ipv4_mask = item->mask;
      hdr->type = VIRTCHNL_PROTO_HDR_IPV4;

      if (ipv4_spec && ipv4_mask)
	{
	  if (ipv4_mask->version_ihl || ipv4_mask->total_length ||
	      ipv4_mask->packet_id || ipv4_mask->fragment_offset ||
	      ipv4_mask->hdr_checksum)
	    {
	      ret = avf_flow_error_set (error, AVF_FAILURE,
					AVF_FLOW_ERROR_TYPE_ITEM, item,
					"Invalid IPv4 mask.");
	      return ret;
	    }

	  if (ipv4_mask->type_of_service == 0xff)
	    {
	      rcfg->input_set |= AVF_INSET_IPV4_TOS;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, DSCP);
	    }

	  if (ipv4_mask->next_proto_id == 0xff)
	    {
	      rcfg->input_set |= AVF_INSET_IPV4_PROTO;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, PROT);
	    }

	  if (ipv4_mask->time_to_live == 0xff)
	    {
	      rcfg->input_set |= AVF_INSET_IPV4_TTL;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, TTL);
	    }

	  if (ipv4_mask->src_addr == 0xffffffff)
	    {
	      rcfg->input_set |= AVF_INSET_IPV4_SRC;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, SRC);
	    }

	  if (ipv4_mask->dst_addr == 0xffffffff)
	    {
	      rcfg->input_set |= AVF_INSET_IPV4_DST;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, DST);
	    }

	  clib_memcpy (hdr->buffer, ipv4_spec, sizeof (*ipv4_spec));
	}
      break;

    case VIRTCHNL_PROTO_HDR_IPV6:
      ipv6_spec = item->spec;
      ipv6_mask = item->mask;
      hdr->type = VIRTCHNL_PROTO_HDR_IPV6;

      if (ipv6_spec && ipv6_mask)
	{
	  if (ipv6_mask->payload_len)
	    {
	      ret = avf_flow_error_set (error, AVF_FAILURE,
					AVF_FLOW_ERROR_TYPE_ITEM, item,
					"Invalid IPv6 mask");
	      return ret;
	    }

	  if ((ipv6_mask->vtc_flow &
	       clib_host_to_net_u32 (AVF_IPV6_TC_MASK)) ==
	      (clib_host_to_net_u32 (AVF_IPV6_TC_MASK)))
	    {
	      rcfg->input_set |= AVF_INSET_IPV6_TC;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, TC);
	    }

	  if (ipv6_mask->proto == 0xff)
	    {
	      rcfg->input_set |= AVF_INSET_IPV6_NEXT_HDR;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, PROT);
	    }

	  if (ipv6_mask->hop_limits == 0xff)
	    {
	      rcfg->input_set |= AVF_INSET_IPV6_HOP_LIMIT;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, HOP_LIMIT);
	    }

	  if (!clib_memcmp (ipv6_mask->src_addr, ipv6_addr_mask,
			    sizeof (ipv6_mask->src_addr)))
	    {
	      rcfg->input_set |= AVF_INSET_IPV6_SRC;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, SRC);
	    }
	  if (!clib_memcmp (ipv6_mask->dst_addr, ipv6_addr_mask,
			    sizeof (ipv6_mask->dst_addr)))
	    {
	      rcfg->input_set |= AVF_INSET_IPV6_DST;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, DST);

	      clib_memcpy (hdr->buffer, ipv6_spec, sizeof (*ipv6_spec));
	    }
	}

      break;

    case VIRTCHNL_PROTO_HDR_UDP:
      udp_spec = item->spec;
      udp_mask = item->mask;
      hdr->type = VIRTCHNL_PROTO_HDR_UDP;

      if (udp_spec && udp_mask)
	{
	  if (udp_mask->dgram_len || udp_mask->dgram_cksum)
	    {
	      ret = avf_flow_error_set (error, AVF_FAILURE,
					AVF_FLOW_ERROR_TYPE_ITEM, item,
					"Invalid UDP mask");
	      return ret;
	    };

	  if (udp_mask->src_port == 0xffff)
	    {
	      rcfg->input_set |= AVF_INSET_UDP_SRC_PORT;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, UDP, SRC_PORT);
	    }

	  if (udp_mask->dst_port == 0xffff)
	    {
	      rcfg->input_set |= AVF_INSET_UDP_DST_PORT;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, UDP, DST_PORT);
	    }

	  clib_memcpy (hdr->buffer, udp_spec, sizeof (*udp_spec));
	}
      break;

    case VIRTCHNL_PROTO_HDR_TCP:
      tcp_spec = item->spec;
      tcp_mask = item->mask;
      hdr->type = VIRTCHNL_PROTO_HDR_TCP;

      if (tcp_spec && tcp_mask)
	{
	  if (tcp_mask->sent_seq || tcp_mask->recv_ack || tcp_mask->data_off ||
	      tcp_mask->tcp_flags || tcp_mask->rx_win || tcp_mask->cksum ||
	      tcp_mask->tcp_urp)
	    {
	      ret = avf_flow_error_set (error, AVF_FAILURE,
					AVF_FLOW_ERROR_TYPE_ITEM, item,
					"Invalid TCP mask");
	      return ret;
	    }

	  if (tcp_mask->src_port == 0xffff)
	    {
	      rcfg->input_set |= AVF_INSET_TCP_SRC_PORT;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, TCP, SRC_PORT);
	    }

	  if (tcp_mask->dst_port == 0xffff)
	    {
	      rcfg->input_set |= AVF_INSET_TCP_DST_PORT;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, TCP, DST_PORT);
	    }

	  clib_memcpy (hdr->buffer, tcp_spec, sizeof (*tcp_spec));
	}

      break;

    case VIRTCHNL_PROTO_HDR_SCTP:
      sctp_spec = item->spec;
      sctp_mask = item->mask;
      hdr->type = VIRTCHNL_PROTO_HDR_SCTP;

      if (sctp_spec && sctp_mask)
	{
	  if (sctp_mask->cksum)
	    {
	      ret = avf_flow_error_set (error, AVF_FAILURE,
					AVF_FLOW_ERROR_TYPE_ITEM, item,
					"Invalid UDP mask");
	      return ret;
	    }

	  if (sctp_mask->src_port == 0xffff)
	    {
	      rcfg->input_set |= AVF_INSET_SCTP_SRC_PORT;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, SCTP, SRC_PORT);
	    }

	  if (sctp_mask->dst_port == 0xffff)
	    {
	      rcfg->input_set |= AVF_INSET_SCTP_DST_PORT;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, SCTP, DST_PORT);
	    }

	  clib_memcpy (hdr->buffer, sctp_spec, sizeof (*sctp_spec));
	}
      break;

    case VIRTCHNL_PROTO_HDR_GTPU_IP:
      gtp_spec = item->spec;
      gtp_mask = item->mask;
      hdr->type = VIRTCHNL_PROTO_HDR_GTPU_IP;

      if (gtp_spec && gtp_mask)
	{
	  if (gtp_mask->v_pt_rsv_flags || gtp_mask->msg_type ||
	      gtp_mask->msg_len)
	    {
	      ret = avf_flow_error_set (error, AVF_FAILURE,
					AVF_FLOW_ERROR_TYPE_ITEM, item,
					"Invalid GTP mask");
	      return ret;
	    }

	  if (gtp_mask->teid == 0xffffffff)
	    {
	      rcfg->input_set |= AVF_INSET_GTPU_TEID;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, GTPU_IP, TEID);
	    }

	  clib_memcpy (hdr->buffer, gtp_spec, sizeof (*gtp_spec));
	}

      break;

    case VIRTCHNL_PROTO_HDR_GTPU_EH:
      gtp_psc_spec = item->spec;
      gtp_psc_mask = item->mask;
      hdr->type = VIRTCHNL_PROTO_HDR_GTPU_EH;

      if (gtp_psc_spec && gtp_psc_mask)
	{
	  if (gtp_psc_mask->qfi == 0xff)
	    {
	      rcfg->input_set |= AVF_INSET_GTPU_QFI;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, GTPU_EH, QFI);
	    }

	  clib_memcpy (hdr->buffer, gtp_psc_spec, sizeof (*gtp_psc_spec));
	}

      break;

    case VIRTCHNL_PROTO_HDR_L2TPV3:
      l2tpv3oip_spec = item->spec;
      l2tpv3oip_mask = item->mask;
      hdr->type = VIRTCHNL_PROTO_HDR_L2TPV3;

      if (l2tpv3oip_spec && l2tpv3oip_mask)
	{
	  if (l2tpv3oip_mask->session_id == 0xffffffff)
	    {
	      rcfg->input_set |= AVF_L2TPV3OIP_SESSION_ID;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, L2TPV3, SESS_ID);
	    }

	  clib_memcpy (hdr->buffer, l2tpv3oip_spec, sizeof (*l2tpv3oip_spec));
	}
      break;

    case VIRTCHNL_PROTO_HDR_ESP:
      esp_spec = item->spec;
      esp_mask = item->mask;
      hdr->type = VIRTCHNL_PROTO_HDR_ESP;

      if (esp_spec && esp_mask)
	{
	  if (esp_mask->spi == 0xffffffff)
	    {
	      rcfg->input_set |= AVF_INSET_ESP_SPI;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, ESP, SPI);
	    }

	  clib_memcpy (hdr->buffer, esp_spec, sizeof (*esp_spec));
	}
      break;

    case VIRTCHNL_PROTO_HDR_AH:
      ah_spec = item->spec;
      ah_mask = item->mask;
      hdr->type = VIRTCHNL_PROTO_HDR_AH;

      if (ah_spec && ah_mask)
	{
	  if (ah_mask->spi == 0xffffffff)
	    {
	      rcfg->input_set |= AVF_INSET_AH_SPI;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, AH, SPI);
	    }

	  clib_memcpy (hdr->buffer, ah_spec, sizeof (*ah_spec));
	}
      break;

    case VIRTCHNL_PROTO_HDR_PFCP:
      pfcp_spec = item->spec;
      pfcp_mask = item->mask;
      hdr->type = VIRTCHNL_PROTO_HDR_PFCP;

      if (pfcp_spec && pfcp_mask)
	{
	  if (pfcp_mask->s_field == 0xff)
	    {
	      rcfg->input_set |= AVF_INSET_PFCP_S_FIELD;
	      VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, PFCP, S_FIELD);
	    }

	  clib_memcpy (hdr->buffer, pfcp_spec, sizeof (*pfcp_spec));
	}
      break;

    default:
      ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ITEM,
				item, "Invalid pattern item.");
      return ret;
    }

  return 0;
}

int
avf_fdir_rcfg_act_queue (struct avf_fdir_conf *rcfg, int queue, int size,
			 int act_idx)
{
  if (act_idx >= VIRTCHNL_MAX_NUM_ACTIONS)
    return -AVF_FAILURE;

  struct virtchnl_filter_action *filter_action;

  filter_action = rcfg->add_fltr.rule_cfg.action_set.actions + act_idx;
  filter_action->type = VIRTCHNL_ACTION_QUEUE;
  filter_action->act_conf.queue.index = queue;

  if (size == 1)
    return 0;
  else if (is_pow2 (size))
    filter_action->act_conf.queue.region = fls_u32 (size) - 1;

  return 0;
}

int
avf_fdir_parse_action_qregion (struct avf_fdir_conf *rcfg,
			       const struct avf_flow_action *act, int act_idx,
			       struct avf_flow_error *error)
{
  const struct avf_flow_action_rss *rss = act->conf;
  struct virtchnl_filter_action *filter_action;
  u32 i;
  int ret;

  filter_action = rcfg->add_fltr.rule_cfg.action_set.actions + act_idx;

  if (rss->queue_num <= 1)
    {
      ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
				act, "Queue region size can't be 0 or 1.");
      return ret;
    }

  /* check if queue index for queue region is continuous */
  for (i = 0; i < rss->queue_num - 1; i++)
    {
      if (rss->queue[i + 1] != rss->queue[i] + 1)
	{
	  ret =
	    avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
				act, "Discontinuous queue region");
	  return ret;
	}
    }

  if (rss->queue[rss->queue_num - 1] >= rcfg->nb_rx_queues)
    {
      ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
				act, "Invalid queue region indexes.");
      return ret;
    }

  if (!(is_pow2 (rss->queue_num) &&
	rss->queue_num <= AVF_FDIR_MAX_QREGION_SIZE))
    {
      ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
				act,
				"The region size should be any of the"
				"following values: 1, 2, 4, 8, 16, 32"
				", 64, 128 as long as the total number of"
				"queues do not exceed the VSI allocation");
      return ret;
    }

  filter_action->type = VIRTCHNL_ACTION_Q_REGION;
  filter_action->act_conf.queue.index = rss->queue[0];
  filter_action->act_conf.queue.region = fls_u32 (rss->queue_num) - 1;

  return 0;
}

int
avf_fdir_rcfg_act_drop (struct avf_fdir_conf *rcfg, int act_idx)
{
  struct virtchnl_filter_action *filter_action;

  if (act_idx >= VIRTCHNL_MAX_NUM_ACTIONS)
    return -AVF_FAILURE;

  filter_action = rcfg->add_fltr.rule_cfg.action_set.actions + act_idx;
  filter_action->type = VIRTCHNL_ACTION_DROP;

  return 0;
}

int
avf_fdir_rcfg_act_mark (struct avf_fdir_conf *rcfg, const u32 mark,
			int act_idx)
{
  struct virtchnl_filter_action *filter_action;
  if (act_idx >= VIRTCHNL_MAX_NUM_ACTIONS)
    return -AVF_FAILURE;

  filter_action = rcfg->add_fltr.rule_cfg.action_set.actions + act_idx;

  filter_action->type = VIRTCHNL_ACTION_MARK;
  filter_action->act_conf.mark_id = mark;

  return 0;
}

int
avf_fdir_rcfg_validate (struct avf_fdir_vc_ctx *ctx,
			struct avf_fdir_conf *rcfg)
{
  int ret;
  rcfg->add_fltr.vsi_id = rcfg->vsi;
  rcfg->add_fltr.validate_only = 1;
  struct virtchnl_fdir_add fdir_ret;

  ret =
    ctx->vc_op (ctx->vc_hdl, VIRTCHNL_ADV_OP_ADD_FDIR_FILTER, &rcfg->add_fltr,
		sizeof (rcfg->add_fltr), &fdir_ret, sizeof (fdir_ret));

  if (ret != 0)
    {
      return ret;
    }

  if (fdir_ret.status != VIRTCHNL_FDIR_SUCCESS)
    {
      ret = -fdir_ret.status;
    }

  return ret;
}

int
avf_fdir_rule_create (struct avf_fdir_vc_ctx *ctx, struct avf_fdir_conf *rcfg)
{
  int ret;
  rcfg->add_fltr.vsi_id = rcfg->vsi;
  rcfg->add_fltr.validate_only = 0;
  struct virtchnl_fdir_add fdir_ret;

  ret =
    ctx->vc_op (ctx->vc_hdl, VIRTCHNL_ADV_OP_ADD_FDIR_FILTER, &rcfg->add_fltr,
		sizeof (rcfg->add_fltr), &fdir_ret, sizeof (fdir_ret));

  if (ret != 0)
    {
      return ret;
    }

  rcfg->flow_id = fdir_ret.flow_id;

  if (fdir_ret.status != VIRTCHNL_FDIR_SUCCESS)
    {
      ret = -fdir_ret.status;
    }

  return ret;
}

int
avf_fdir_rule_destroy (struct avf_fdir_vc_ctx *ctx, struct avf_fdir_conf *rcfg)
{
  int ret;
  struct virtchnl_fdir_del fdir_ret;
  rcfg->del_fltr.vsi_id = rcfg->vsi;
  rcfg->del_fltr.flow_id = rcfg->flow_id;

  ret =
    ctx->vc_op (ctx->vc_hdl, VIRTCHNL_ADV_OP_DEL_FDIR_FILTER, &rcfg->del_fltr,
		sizeof (rcfg->del_fltr), &fdir_ret, sizeof (fdir_ret));

  if (ret != 0)
    {
      return ret;
    }

  if (fdir_ret.status != VIRTCHNL_FDIR_SUCCESS)
    {
      ret = -fdir_ret.status;
    }

  return ret;
}

int
avf_fdir_parse_action (const struct avf_flow_action actions[],
		       struct avf_fdir_conf *rcfg,
		       struct avf_flow_error *error)
{
  int act_idx = 0, ret = 0;
  u32 dest_num = 0;
  u32 mark_num = 0;
  u32 act_num;
  struct virtchnl_filter_action *filter_action;
  const struct avf_flow_action_queue *act_q;
  const struct avf_flow_action_mark *act_msk;

  struct virtchnl_fdir_rule *rule_cfg = &rcfg->add_fltr.rule_cfg;

  for (; actions->type != VIRTCHNL_ACTION_NONE; actions++, act_idx++)
    {
      switch (actions->type)
	{
	case VIRTCHNL_ACTION_PASSTHRU:
	  dest_num++;
	  filter_action = &rule_cfg->action_set.actions[act_idx];
	  filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
	  rule_cfg->action_set.count++;
	  break;

	case VIRTCHNL_ACTION_DROP:
	  dest_num++;
	  ret = avf_fdir_rcfg_act_drop (rcfg, act_idx);
	  if (ret)
	    return ret;

	  rule_cfg->action_set.count++;
	  break;

	case VIRTCHNL_ACTION_QUEUE:
	  dest_num++;
	  act_q = actions->conf;

	  if (act_q->index >= rcfg->nb_rx_queues)
	    {
	      ret = avf_flow_error_set (error, AVF_FAILURE,
					AVF_FLOW_ERROR_TYPE_ACTION, actions,
					"Invalid queue for FDIR.");
	      return -AVF_FAILURE;
	    }

	  ret = avf_fdir_rcfg_act_queue (rcfg, act_q->index, 1, act_idx);
	  if (ret)
	    return ret;

	  rule_cfg->action_set.count++;
	  break;

	case VIRTCHNL_ACTION_Q_REGION:
	  dest_num++;
	  filter_action = &rule_cfg->action_set.actions[act_idx];
	  ret = avf_fdir_parse_action_qregion (rcfg, actions, act_idx, error);
	  if (ret)
	    return ret;

	  rule_cfg->action_set.count++;
	  break;

	case VIRTCHNL_ACTION_MARK:
	  mark_num++;
	  act_msk = actions->conf;
	  rcfg->mark_flag = 1;

	  ret = avf_fdir_rcfg_act_mark (rcfg, act_msk->id, act_idx);
	  if (ret)
	    return ret;

	  rule_cfg->action_set.count++;
	  break;

	default:
	  ret =
	    avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
				actions, "Invalid action.");
	  return ret;
	}
    }

  if (dest_num >= 2)
    {
      ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
				actions, "Unsupported action combination");
      return ret;
    }

  if (mark_num >= 2)
    {
      ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
				actions, "Too many mark actions");
      return ret;
    }

  if (dest_num + mark_num == 0)
    {
      ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
				actions, "Empty action");
      return ret;
    }

  /* Mark only is equal to mark + passthru. */
  act_num = rule_cfg->action_set.count;
  if (dest_num == 0)
    {
      filter_action = &rule_cfg->action_set.actions[act_num];
      filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
      rule_cfg->action_set.count = ++act_num;
    }

  return ret;
}

int
avf_fdir_parse_pattern (struct avf_fdir_conf *rcfg,
			struct avf_flow_item avf_items[],
			struct avf_flow_error *error)
{
  int layer = 0;
  int ret = 0;
  struct avf_flow_item *item;

  for (item = avf_items; item->type != VIRTCHNL_PROTO_HDR_NONE; item++)
    {
      ret = avf_fdir_rcfg_set_field (rcfg, layer, item, error);
      if (ret)
	return ret;

      rcfg->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
    }

  return ret;
}

int
avf_flow_error_set (struct avf_flow_error *error, int code,
		    enum avf_flow_error_type type, const void *cause,
		    const char *message)
{
  if (error)
    {
      *error = (struct avf_flow_error){
	.type = type,
	.cause = cause,
	.message = message,
      };
    }

  return code;
}

char *
avf_fdir_prgm_error_decode (int err_no)
{
  enum virtchnl_fdir_prgm_status status;
  char *s = NULL;

  err_no = -err_no;

  if (err_no >= VIRTCHNL_FDIR_FAILURE_MAX)
    return "Failed to program the rule due to other reasons";

  status = (enum virtchnl_fdir_prgm_status) err_no;
  switch (status)
    {
    case VIRTCHNL_FDIR_SUCCESS:
      s = "Succeed in programming rule request by PF";
      break;
    case VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE:
      s = "Failed to add rule request due to no hardware resource";
      break;
    case VIRTCHNL_FDIR_FAILURE_RULE_EXIST:
      s = "Failed to add rule request due to the rule is already existed";
      break;
    case VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT:
      s = "Failed to add rule request due to the rule is conflict with "
	  "existing rule";
      break;
    case VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST:
      s = "Failed to delete rule request due to this rule doesn't exist";
      break;
    case VIRTCHNL_FDIR_FAILURE_RULE_INVALID:
      s = "Failed to add rule request due to the hardware doesn't support";
      break;
    case VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT:
      s = "Failed to add rule request due to time out for programming";
      break;
    case VIRTCHNL_FDIR_FAILURE_QUERY_INVALID:
      s = "Succeed in programming rule request by PF";
      break;
    default:
      s = "Failed to program the rule due to other reasons";
      break;
    }

  return s;
}

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */