DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Su, Simei" <simei.su@intel.com>
To: "Cao, Yahui" <yahui.cao@intel.com>,
	"Zhang, Qi Z" <qi.z.zhang@intel.com>,
	 "Ye, Xiaolong" <xiaolong.ye@intel.com>,
	"Wu, Jingjing" <jingjing.wu@intel.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>
Subject: Re: [dpdk-dev] [PATCH v2 1/5] net/iavf: add support for FDIR basic rule
Date: Fri, 10 Apr 2020 08:00:34 +0000	[thread overview]
Message-ID: <BL0PR11MB3380E7D781B23EDF9A06EBE19CDE0@BL0PR11MB3380.namprd11.prod.outlook.com> (raw)
In-Reply-To: <761b11f51a654631a9b874b76c9e1568@intel.com>

Hi, Yahui

> -----Original Message-----
> From: Cao, Yahui <yahui.cao@intel.com>
> Sent: Friday, April 10, 2020 3:40 PM
> To: Su, Simei <simei.su@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Ye,
> Xiaolong <xiaolong.ye@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH v2 1/5] net/iavf: add support for FDIR basic rule
> 
> 
> 
> > -----Original Message-----
> > From: Su, Simei <simei.su@intel.com>
> > Sent: Thursday, April 2, 2020 9:33 PM
> > To: Zhang, Qi Z <qi.z.zhang@intel.com>; Ye, Xiaolong
> > <xiaolong.ye@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org; Cao, Yahui <yahui.cao@intel.com>; Su, Simei
> > <simei.su@intel.com>
> > Subject: [PATCH v2 1/5] net/iavf: add support for FDIR basic rule
> >
> > This patch adds FDIR create/destroy/validate function in AVF.
> > Common pattern and queue/qgroup/passthru/drop actions are supported.
> >
> > Signed-off-by: Simei Su <simei.su@intel.com>
> > ---
> >  drivers/net/iavf/Makefile     |   1 +
> >  drivers/net/iavf/iavf.h       |  17 +
> >  drivers/net/iavf/iavf_fdir.c  | 749
> > ++++++++++++++++++++++++++++++++++++++++++
> >  drivers/net/iavf/iavf_vchnl.c | 152 ++++++++-
> >  drivers/net/iavf/meson.build  |   1 +
> >  5 files changed, 919 insertions(+), 1 deletion(-)  create mode 100644
> > drivers/net/iavf/iavf_fdir.c
> >
> > diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile
> > index
> > 7b0093a..b2b75d7 100644
> > --- a/drivers/net/iavf/Makefile
> > +++ b/drivers/net/iavf/Makefile
> > @@ -25,6 +25,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) +=
> iavf_vchnl.c
> >  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
> >  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
> >  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_hash.c
> > +SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
> >  ifeq ($(CONFIG_RTE_ARCH_X86), y)
> >  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c  endif
> diff
> > --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index
> > afec8b2..e2b1d5f
> > 100644
> > --- a/drivers/net/iavf/iavf.h
> > +++ b/drivers/net/iavf/iavf.h
> > @@ -106,6 +106,17 @@ struct iavf_vsi {  struct iavf_flow_parser_node;
> > TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
> >
> > +struct iavf_fdir_conf {
> > +struct virtchnl_fdir_add add_fltr;
> > +struct virtchnl_fdir_del del_fltr;
> > +uint64_t input_set;
> > +uint32_t flow_id;
> > +};
> > +
> > +struct iavf_fdir_info {
> > +struct iavf_fdir_conf conf;
> > +};
> > +
> >  /* TODO: is that correct to assume the max number to be 16 ?*/
> >  #define IAVF_MAX_MSIX_VECTORS   16
> >
> > @@ -145,6 +156,8 @@ struct iavf_info {  struct iavf_flow_list
> > flow_list;  struct iavf_parser_list rss_parser_list;  struct
> > iavf_parser_list dist_parser_list;
> > +
> > +struct iavf_fdir_info fdir; /* flow director info */
> >  };
> >
> >  #define IAVF_MAX_PKT_TYPE 1024
> > @@ -270,4 +283,8 @@ int iavf_add_del_eth_addr(struct iavf_adapter
> > *adapter, int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t
> > vlanid, bool add); int iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
> >   struct virtchnl_rss_cfg *rss_cfg, bool add);
> > +int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf
> > +*filter); int iavf_fdir_del(struct iavf_adapter *adapter, struct
> > +iavf_fdir_conf *filter); int iavf_fdir_check(struct iavf_adapter
> > +*adapter, struct iavf_fdir_conf *filter);
> >  #endif /* _IAVF_ETHDEV_H_ */
> > diff --git a/drivers/net/iavf/iavf_fdir.c
> > b/drivers/net/iavf/iavf_fdir.c new file mode 100644 index
> > 0000000..ea529b6
> > --- /dev/null
> > +++ b/drivers/net/iavf/iavf_fdir.c
> > @@ -0,0 +1,749 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2019 Intel Corporation  */
> > +
> > +#include <sys/queue.h>
> > +#include <stdio.h>
> > +#include <errno.h>
> > +#include <stdint.h>
> > +#include <string.h>
> > +#include <unistd.h>
> > +#include <stdarg.h>
> > +
> > +#include <rte_ether.h>
> > +#include <rte_ethdev_driver.h>
> > +#include <rte_malloc.h>
> > +#include <rte_tailq.h>
> > +
> > +#include "iavf.h"
> > +#include "iavf_generic_flow.h"
> > +#include "virtchnl.h"
> > +
> > +#define IAVF_FDIR_MAX_QREGION_SIZE 128
> > +
> > +#define IAVF_FDIR_IPV6_TC_OFFSET 20
> > +#define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
> > +
> > +#define IAVF_FDIR_INSET_ETH (\
> > +IAVF_INSET_ETHERTYPE)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV4 (\
> > +IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> IAVF_INSET_IPV4_PROTO |
> > +IAVF_INSET_IPV4_TOS | \
> > +IAVF_INSET_IPV4_TTL)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\ IAVF_INSET_IPV4_SRC |
> > +IAVF_INSET_IPV4_DST | \ IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL
> | \
> > +IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\ IAVF_INSET_IPV4_SRC |
> > +IAVF_INSET_IPV4_DST | \ IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL
> | \
> > +IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\ IAVF_INSET_IPV4_SRC |
> > +IAVF_INSET_IPV4_DST | \ IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL
> | \
> > +IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV6 (\
> > +IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> > +IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
> > +IAVF_INSET_IPV6_HOP_LIMIT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\ IAVF_INSET_IPV6_SRC |
> > +IAVF_INSET_IPV6_DST | \ IAVF_INSET_IPV6_TC |
> > +IAVF_INSET_IPV6_HOP_LIMIT | \ IAVF_INSET_UDP_SRC_PORT |
> > +IAVF_INSET_UDP_DST_PORT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\ IAVF_INSET_IPV6_SRC |
> > +IAVF_INSET_IPV6_DST | \ IAVF_INSET_IPV6_TC |
> > +IAVF_INSET_IPV6_HOP_LIMIT | \ IAVF_INSET_TCP_SRC_PORT |
> > +IAVF_INSET_TCP_DST_PORT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\ IAVF_INSET_IPV6_SRC |
> > +IAVF_INSET_IPV6_DST | \ IAVF_INSET_IPV6_TC |
> > +IAVF_INSET_IPV6_HOP_LIMIT | \ IAVF_INSET_SCTP_SRC_PORT |
> > +IAVF_INSET_SCTP_DST_PORT)
> > +
> > +static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
> > +{iavf_pattern_ethertype,IAVF_FDIR_INSET_ETH,
> > IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv4,IAVF_FDIR_INSET_ETH_IPV4,
> > IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv4_udp,
> > IAVF_FDIR_INSET_ETH_IPV4_UDP,IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv4_tcp,
> > IAVF_FDIR_INSET_ETH_IPV4_TCP,IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv4_sctp,
> > IAVF_FDIR_INSET_ETH_IPV4_SCTP,IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv6,IAVF_FDIR_INSET_ETH_IPV6,
> > IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv6_udp,
> > IAVF_FDIR_INSET_ETH_IPV6_UDP,IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv6_tcp,
> > IAVF_FDIR_INSET_ETH_IPV6_TCP,IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv6_sctp,
> > IAVF_FDIR_INSET_ETH_IPV6_SCTP,IAVF_INSET_NONE},
> > +};
> > +
> > +static struct iavf_flow_parser iavf_fdir_parser;
> > +
> > +static int
> > +iavf_fdir_init(struct iavf_adapter *ad) { struct iavf_info *vf =
> > +IAVF_DEV_PRIVATE_TO_VF(ad); struct iavf_flow_parser *parser;
> > +
> > +if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF) parser =
> > +&iavf_fdir_parser; else return -ENOTSUP;
> > +
> > +return iavf_register_parser(parser, ad); }
> > +
> > +static void
> > +iavf_fdir_uninit(struct iavf_adapter *ad) { struct iavf_flow_parser
> > +*parser;
> > +
> > +parser = &iavf_fdir_parser;
> > +
> > +iavf_unregister_parser(parser, ad);
> > +}
> > +
> > +static int
> > +iavf_fdir_create(struct iavf_adapter *ad, struct rte_flow *flow, void
> > +*meta, struct rte_flow_error *error) { struct iavf_fdir_conf *filter
> > += meta; struct iavf_fdir_conf *rule; int ret;
> > +
> > +rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0); if (!rule) {
> > +rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
> NULL,
> > +"Failed to allocate memory"); return -rte_errno; }
> > +
> > +ret = iavf_fdir_add(ad, filter);
> > +if (ret) {
> > +rte_flow_error_set(error, -ret,
> > +RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > +"Add filter rule failed.");
> > +goto free_entry;
> > +}
> > +
> > +rte_memcpy(rule, filter, sizeof(*rule));
> > +flow->rule = rule;
> > +
> > +return 0;
> > +
> > +free_entry:
> > +rte_free(rule);
> > +return -rte_errno;
> > +}
> > +
> > +static int
> > +iavf_fdir_destroy(struct iavf_adapter *ad, struct rte_flow *flow,
> > +struct rte_flow_error *error) { struct iavf_fdir_conf *filter; int
> > +ret;
> > +
> > +filter = (struct iavf_fdir_conf *)flow->rule;
> > +
> > +ret = iavf_fdir_del(ad, filter);
> > +if (ret) {
> > +rte_flow_error_set(error, -ret,
> > +RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > +"Del filter rule failed.");
> > +return -rte_errno;
> > +}
> > +
> > +flow->rule = NULL;
> > +rte_free(filter);
> > +
> > +return 0;
> > +}
> > +
> > +static int
> > +iavf_fdir_validation(struct iavf_adapter *ad, __rte_unused struct
> > +rte_flow *flow, void *meta, struct rte_flow_error *error) { struct
> > +iavf_fdir_conf *filter = meta; int ret;
> > +
> > +ret = iavf_fdir_check(ad, filter);
> > +if (ret) {
> > +rte_flow_error_set(error, -ret,
> > +RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > +"Validate filter rule failed.");
> > +return -rte_errno;
> > +}
> > +
> > +return 0;
> > +};
> > +
> > +static struct iavf_flow_engine iavf_fdir_engine = { .init =
> > +iavf_fdir_init, .uninit = iavf_fdir_uninit, .create =
> > +iavf_fdir_create, .destroy = iavf_fdir_destroy, .validation =
> > +iavf_fdir_validation, .type = IAVF_FLOW_ENGINE_FDIR, };
> > +
> > +static int
> > +iavf_fdir_parse_action_qregion(struct iavf_adapter *ad, struct
> > +rte_flow_error *error, const struct rte_flow_action *act, struct
> > +virtchnl_filter_action *filter_action) { const struct
> > +rte_flow_action_rss *rss = act->conf; uint32_t i;
> > +
> > +if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
> > +rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
> > +"Invalid action."); return -rte_errno; }
> > +
> > +if (rss->queue_num <= 1) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION, act,
> > +"Queue region size can't be 0 or 1."); return -rte_errno; }
> > +
> > +/* check if queue index for queue region is continuous */ for (i = 0;
> > +i < rss->queue_num - 1; i++) { if (rss->queue[i + 1] != rss->queue[i]
> > ++ 1) { rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ACTION,
> > +act, "Discontinuous queue region"); return -rte_errno; } }
> > +
> > +if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data-
> > >nb_rx_queues) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION, act,
> > +"Invalid queue region indexes.");
> > +return -rte_errno;
> > +}
> > +
> > +if (!(rte_is_power_of_2(rss->queue_num) &&
> > +rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION, act,
> > +"The region size should be any of the following
> > values:"
> > +"1, 2, 4, 8, 16, 32, 64, 128 as long as the total
> > number "
> > +"of queues do not exceed the VSI allocation."); return -rte_errno; }
> > +
> > +filter_action->q_index = rss->queue[0]; filter_action->q_region =
> > +rte_fls_u32(rss->queue_num) - 1;
> > +
> > +return 0;
> > +}
> > +
> > +static int
> > +iavf_fdir_parse_action(struct iavf_adapter *ad, const struct
> > +rte_flow_action actions[], struct rte_flow_error *error, struct
> > +iavf_fdir_conf *filter) { const struct rte_flow_action_queue *act_q;
> > +uint32_t dest_num = 0; int ret;
> > +
> > +int number = 0;
> > +struct virtchnl_filter_action *filter_action;
> > +
> > +for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch
> > +(actions->type) { case RTE_FLOW_ACTION_TYPE_VOID:
> > +break;
> > +
> > +case RTE_FLOW_ACTION_TYPE_PASSTHRU:
> > +dest_num++;
> > +
> > +filter_action =
> > +&filter->add_fltr.rule_cfg.action_set.actions[number];
> > +
> > +filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
> > +
> > +filter->add_fltr.rule_cfg.action_set.count = ++number;
> > +break;
> > +
> > +case RTE_FLOW_ACTION_TYPE_DROP:
> > +dest_num++;
> > +
> > +filter_action =
> > +&filter->add_fltr.rule_cfg.action_set.actions[number];
> > +
> > +filter_action->type = VIRTCHNL_ACTION_DROP;
> > +
> > +filter->add_fltr.rule_cfg.action_set.count = ++number;
> > +break;
> > +
> > +case RTE_FLOW_ACTION_TYPE_QUEUE:
> > +dest_num++;
> > +
> > +act_q = actions->conf;
> > +filter_action =
> > +&filter->add_fltr.rule_cfg.action_set.actions[number];
> > +
> > +filter_action->type = VIRTCHNL_ACTION_QUEUE; filter_action->q_index =
> > +act_q->index;
> > +
> > +if (filter_action->q_index >=
> > +ad->eth_dev->data->nb_rx_queues) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION,
> > +actions, "Invalid queue for FDIR.");
> > +return -rte_errno;
> > +}
> > +
> > +filter->add_fltr.rule_cfg.action_set.count = ++number;
> > +break;
> > +
> > +case RTE_FLOW_ACTION_TYPE_RSS:
> > +dest_num++;
> > +
> > +filter_action =
> > +&filter->add_fltr.rule_cfg.action_set.actions[number];
> > +
> > +filter_action->type = VIRTCHNL_ACTION_Q_REGION;
> > +
> > +ret = iavf_fdir_parse_action_qregion(ad,
> > +error, actions, filter_action);
> > +if (ret)
> > +return ret;
> > +
> > +filter->add_fltr.rule_cfg.action_set.count = ++number;
> > +break;
> > +
> > +default:
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION,
> > actions,
> > +"Invalid action.");
> > +return -rte_errno;
> > +}
> > +}
> > +
> > +if (number > VIRTCHNL_MAX_NUM_ACTIONS) { rte_flow_error_set(error,
> > +EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "Action numbers
> exceed
> > +the maximum value"); return -rte_errno; }
> > +
> > +if (dest_num == 0 || dest_num >= 2) { rte_flow_error_set(error,
> > +EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "Unsupported action
> > +combination"); return -rte_errno; }
> > +
> > +return 0;
> > +}
> > +
> > +static int
> > +iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, const
> > +struct rte_flow_item pattern[], struct rte_flow_error *error, struct
> > +iavf_fdir_conf *filter) { const struct rte_flow_item *item = pattern;
> > +enum rte_flow_item_type item_type; enum rte_flow_item_type l3 =
> > +RTE_FLOW_ITEM_TYPE_END; const struct rte_flow_item_eth *eth_spec,
> > +*eth_mask; const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> > +const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; const struct
> > +rte_flow_item_udp *udp_spec, *udp_mask; const struct
> > +rte_flow_item_tcp *tcp_spec, *tcp_mask; const struct
> > +rte_flow_item_sctp *sctp_spec, *sctp_mask; uint64_t input_set =
> > +IAVF_INSET_NONE;
> > +
> > +enum rte_flow_item_type next_type;
> > +uint16_t ether_type;
> > +
> > +int layer = 0;
> > +struct virtchnl_proto_hdr *hdr;
> > +
> > +uint8_t  ipv6_addr_mask[16] = {
> > +0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
> > +0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
> > +
> > +for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++)
> > {
> > +if (item->last) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Not support range");
> > +}
> > +
> > +item_type = item->type;
> > +
> > +switch (item_type) {
> > +case RTE_FLOW_ITEM_TYPE_ETH:
> > +eth_spec = item->spec;
> > +eth_mask = item->mask;
> > +next_type = (item + 1)->type;
> > +
> > +hdr = &filter-
> > >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
> > +
> > +if (next_type == RTE_FLOW_ITEM_TYPE_END && (!eth_spec
> || !eth_mask))
> > +{ rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +item, "NULL eth spec/mask.");
> > +return -rte_errno;
> > +}
> > +
> > +if (eth_spec && eth_mask) {
> > +if (!rte_is_zero_ether_addr(&eth_mask->src) ||
> > +    !rte_is_zero_ether_addr(&eth_mask->dst)) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Invalid MAC_addr mask.");
> > +return -rte_errno;
> > +}
> > +}
> > +
> > +if (eth_spec && eth_mask && eth_mask->type) { if (eth_mask->type !=
> > +RTE_BE16(0xffff)) { rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +item, "Invalid type mask.");
> > +return -rte_errno;
> > +}
> > +
> > +ether_type = rte_be_to_cpu_16(eth_spec-
> > >type);
> > +if (ether_type == RTE_ETHER_TYPE_IPV4 || ether_type ==
> > +RTE_ETHER_TYPE_IPV6)
> > {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +item,
> > +"Unsupported ether_type.");
> > +return -rte_errno;
> > +}
> > +
> > +input_set |= IAVF_INSET_ETHERTYPE;
> > +VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
> > ETH, ETHERTYPE);
> > +
> > +rte_memcpy(hdr->buffer,
> > +eth_spec, sizeof(*eth_spec));
> > +}
> > +
> > +filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_IPV4:
> > +l3 = RTE_FLOW_ITEM_TYPE_IPV4;
> > +ipv4_spec = item->spec;
> > +ipv4_mask = item->mask;
> > +
> > +hdr = &filter-
> > >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
> > +
> > +if (ipv4_spec && ipv4_mask) {
> > +if (ipv4_mask->hdr.version_ihl ||
> > +ipv4_mask->hdr.total_length ||
> > +ipv4_mask->hdr.packet_id ||
> > +ipv4_mask->hdr.fragment_offset ||
> > +ipv4_mask->hdr.hdr_checksum) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +item, "Invalid IPv4 mask.");
> > +return -rte_errno;
> > +}
> > +
> > +if (ipv4_mask->hdr.type_of_service ==
> > +UINT8_MAX) {
> > +input_set |= IAVF_INSET_IPV4_TOS;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
> > +}
> > +if (ipv4_mask->hdr.next_proto_id ==
> > UINT8_MAX) {
> > +input_set |= IAVF_INSET_IPV4_PROTO;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
> > +}
> > +if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
> > {
> > +input_set |= IAVF_INSET_IPV4_TTL;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
> > +}
> > +if (ipv4_mask->hdr.src_addr == UINT32_MAX) { input_set |=
> > +IAVF_INSET_IPV4_SRC;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
> > +}
> > +if (ipv4_mask->hdr.dst_addr == UINT32_MAX) { input_set |=
> > +IAVF_INSET_IPV4_DST;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
> > +}
> > +
> > +rte_memcpy(hdr->buffer,
> > +&ipv4_spec->hdr,
> > +sizeof(ipv4_spec->hdr));
> > +}
> > +
> > +filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_IPV6:
> > +l3 = RTE_FLOW_ITEM_TYPE_IPV6;
> > +ipv6_spec = item->spec;
> > +ipv6_mask = item->mask;
> > +
> > +hdr = &filter-
> > >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
> > +
> > +if (ipv6_spec && ipv6_mask) {
> > +if (ipv6_mask->hdr.payload_len) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +item, "Invalid IPv6 mask");
> > +return -rte_errno;
> > +}
> > +
> > +if ((ipv6_mask->hdr.vtc_flow &
> > +
> > rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
> > +== rte_cpu_to_be_32(
> > +IAVF_IPV6_TC_MASK))
> > {
> > +input_set |= IAVF_INSET_IPV6_TC;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
> > +}
> > +if (ipv6_mask->hdr.proto == UINT8_MAX) { input_set |=
> > IAVF_INSET_IPV6_NEXT_HDR;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
> > +}
> > +if (ipv6_mask->hdr.hop_limits == UINT8_MAX) { input_set |=
> > IAVF_INSET_IPV6_HOP_LIMIT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
> > +}
> > +if (!memcmp(ipv6_mask->hdr.src_addr,
> > +ipv6_addr_mask,
> > +RTE_DIM(ipv6_mask->hdr.src_addr))) {
> > +input_set |= IAVF_INSET_IPV6_SRC;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
> > +}
> > +if (!memcmp(ipv6_mask->hdr.dst_addr,
> > +ipv6_addr_mask,
> > +RTE_DIM(ipv6_mask->hdr.dst_addr))) {
> > +input_set |= IAVF_INSET_IPV6_DST;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
> > +}
> > +
> > +rte_memcpy(hdr->buffer,
> > +&ipv6_spec->hdr,
> > +sizeof(ipv6_spec->hdr));
> > +}
> > +
> > +filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_UDP:
> > +udp_spec = item->spec;
> > +udp_mask = item->mask;
> > +
> > +hdr = &filter-
> > >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
> > +
> > +if (udp_spec && udp_mask) {
> > +if (udp_mask->hdr.dgram_len ||
> > +udp_mask->hdr.dgram_cksum) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Invalid UDP mask");
> > +return -rte_errno;
> > +}
> > +
> > +if (udp_mask->hdr.src_port == UINT16_MAX) { input_set |=
> > IAVF_INSET_UDP_SRC_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
> > +}
> > +if (udp_mask->hdr.dst_port == UINT16_MAX) { input_set |=
> > IAVF_INSET_UDP_DST_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
> > +}
> > +
> > +if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> > +rte_memcpy(hdr->buffer,
> > +&udp_spec->hdr,
> > +sizeof(udp_spec->hdr));
> > +else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) rte_memcpy(hdr->buffer,
> > +&udp_spec->hdr, sizeof(udp_spec->hdr)); }
> > +
> > +filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_TCP:
> > +tcp_spec = item->spec;
> > +tcp_mask = item->mask;
> > +
> > +hdr = &filter-
> > >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
> > +
> > +if (tcp_spec && tcp_mask) {
> > +if (tcp_mask->hdr.sent_seq ||
> > +tcp_mask->hdr.recv_ack ||
> > +tcp_mask->hdr.data_off ||
> > +tcp_mask->hdr.tcp_flags ||
> > +tcp_mask->hdr.rx_win ||
> > +tcp_mask->hdr.cksum ||
> > +tcp_mask->hdr.tcp_urp) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Invalid TCP mask");
> > +return -rte_errno;
> > +}
> > +
> > +if (tcp_mask->hdr.src_port == UINT16_MAX) { input_set |=
> > IAVF_INSET_TCP_SRC_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
> > +}
> > +if (tcp_mask->hdr.dst_port == UINT16_MAX) { input_set |=
> > IAVF_INSET_TCP_DST_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
> > +}
> > +
> > +if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> > +rte_memcpy(hdr->buffer,
> > +&tcp_spec->hdr,
> > +sizeof(tcp_spec->hdr));
> > +else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) rte_memcpy(hdr->buffer,
> > +&tcp_spec->hdr, sizeof(tcp_spec->hdr)); }
> > +
> > +filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_SCTP:
> > +sctp_spec = item->spec;
> > +sctp_mask = item->mask;
> > +
> > +hdr = &filter-
> > >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
> > +
> > +if (sctp_spec && sctp_mask) {
> > +if (sctp_mask->hdr.cksum) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Invalid UDP mask");
> > +return -rte_errno;
> > +}
> > +
> > +if (sctp_mask->hdr.src_port == UINT16_MAX) { input_set |=
> > IAVF_INSET_SCTP_SRC_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
> > +}
> > +if (sctp_mask->hdr.dst_port == UINT16_MAX) { input_set |=
> > IAVF_INSET_SCTP_DST_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
> > +}
> > +
> > +if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> > +rte_memcpy(hdr->buffer,
> > +&sctp_spec->hdr,
> > +sizeof(sctp_spec->hdr));
> > +else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) rte_memcpy(hdr->buffer,
> > +&sctp_spec->hdr, sizeof(sctp_spec->hdr)); }
> > +
> > +filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_VOID:
> > +break;
> > +
> > +default:
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Invalid pattern item.");
> > +return -rte_errno;
> > +}
> > +}
> > +
> > +if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS)
> { rte_flow_error_set(error,
> > +EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Protocol header layers
> > +exceed the maximum value"); return -rte_errno; }
> > +
> > +filter->input_set = input_set;
> > +
> > +return 0;
> > +}
> > +
> > +static int
> > +iavf_fdir_parse(struct iavf_adapter *ad, struct
> > +iavf_pattern_match_item *array, uint32_t array_len, const struct
> > +rte_flow_item pattern[], const struct rte_flow_action actions[], void
> > +**meta, struct rte_flow_error *error) { struct iavf_info *vf =
> > +IAVF_DEV_PRIVATE_TO_VF(ad); struct iavf_fdir_conf *filter =
> > +&vf->fdir.conf; struct iavf_pattern_match_item *item = NULL; uint64_t
> > +input_set; int ret;
> > +
> > +memset(filter, 0, sizeof(*filter));
> > +
> > +item = iavf_search_pattern_match_item(pattern, array, array_len,
> > error);
> > +if (!item)
> > +return -rte_errno;
> > +
> > +ret = iavf_fdir_parse_pattern(ad, pattern, error, filter); if (ret)
> > +goto error;
> > +
> > +input_set = filter->input_set;
> > +if (!input_set || input_set & ~item->input_set_mask) {
> > +rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
> > +pattern, "Invalid input set"); ret = -rte_errno; goto error; }
> > +
> > +ret = iavf_fdir_parse_action(ad, actions, error, filter); if (ret)
> > +goto error;
> > +
> > +if (meta)
> > +*meta = filter;
> > +
> > +error:
> > +rte_free(item);
> > +return ret;
> > +}
> > +
> > +static struct iavf_flow_parser iavf_fdir_parser = { .engine =
> > +&iavf_fdir_engine, .array = iavf_fdir_pattern, .array_len =
> > +RTE_DIM(iavf_fdir_pattern), .parse_pattern_action = iavf_fdir_parse,
> > +.stage = IAVF_FLOW_STAGE_DISTRIBUTOR, };
> > +
> > +RTE_INIT(iavf_fdir_engine_register)
> > +{
> > +iavf_register_flow_engine(&iavf_fdir_engine);
> > +}
> > diff --git a/drivers/net/iavf/iavf_vchnl.c
> > b/drivers/net/iavf/iavf_vchnl.c index 2307969..133e81c 100644
> > --- a/drivers/net/iavf/iavf_vchnl.c
> > +++ b/drivers/net/iavf/iavf_vchnl.c
> > @@ -339,7 +339,8 @@
> >  caps = IAVF_BASIC_OFFLOAD_CAPS |
> > VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
> >  VIRTCHNL_VF_OFFLOAD_QUERY_DDP |
> >  VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
> > -VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
> > +VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
> > +VIRTCHNL_VF_OFFLOAD_FDIR_PF;
> >
> >  args.in_args = (uint8_t *)&caps;
> >  args.in_args_size = sizeof(caps);
> > @@ -906,3 +907,152 @@
> >
> >  return err;
> >  }
> > +
> > +int
> > +iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf
> > +*filter) { struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> > +struct virtchnl_fdir_add *fdir_ret;
> > +
> > +struct iavf_cmd_info args;
> > +int err;
> > +
> > +filter->add_fltr.vsi_id = vf->vsi_res->vsi_id; add_fltr.validate_only
> > +filter->= 0;
> > +
> > +args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER; args.in_args = (uint8_t
> > +*)(&filter->add_fltr); args.in_args_size =
> > +sizeof(*(&filter->add_fltr)); args.out_buffer = vf->aq_resp;
> > +args.out_size = IAVF_AQ_BUF_SZ;
> > +
> > +err = iavf_execute_vf_cmd(adapter, &args); if (err) {
> > +PMD_DRV_LOG(ERR, "fail to execute command
> > OP_ADD_FDIR_FILTER");
> > +return err;
> > +}
> > +
> > +fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
> > +filter->flow_id = fdir_ret->flow_id;
> > +
> > +if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) { PMD_DRV_LOG(INFO,
> > +"add rule request is successfully done by PF"); } else if
> > +(fdir_ret->status ==
> > VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE) {
> > +PMD_DRV_LOG(ERR,
> > +"add rule request is failed due to no hw resource"); return -1; }
> > +else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT)
> > {
> > +PMD_DRV_LOG(ERR,
> > +"add rule request is failed due to the rule is already
> > existed");
> > +return -1;
> [Cao, Yahui]
> The logic here is when VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT is found,
> printf " already existed"
> But due to virtchnl definition:
>  * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
> * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
> *
> * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
> * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
> RULE_CONFLCIT means conflict with existing rule while RULE_EXIST means
> rule is already existed So I think you miss
> VIRTCHNL_FDIR_FAILURE_RULE_EXIST condition and may mismatch the error
> log.
> 

 Yes, you are right, here I miss VIRTCHNL_FDIR_FAILURE_RULE_EXIST condition and mismatch the corresponding error log.
 Thanks.

Br
Simei

> > +} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
> > +PMD_DRV_LOG(ERR, "add rule request is failed due to the hw doesn't
> > support");
> > +return -1;
> > +} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT)
> > {
> > +PMD_DRV_LOG(ERR,
> > +"add rule request is failed due to time out for
> > programming");
> > +return -1;
> > +} else {
> > +PMD_DRV_LOG(ERR,
> > +"add rule request is failed due to other reasons"); return -1; }
> > +
> > +return 0;
> > +};
> > +
> > +int
> > +iavf_fdir_del(struct iavf_adapter *adapter, struct iavf_fdir_conf
> > +*filter) { struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> > +struct virtchnl_fdir_del *fdir_ret;
> > +
> > +struct iavf_cmd_info args;
> > +int err;
> > +
> > +filter->del_fltr.vsi_id = vf->vsi_res->vsi_id; del_fltr.flow_id =
> > +filter->filter->flow_id;
> > +
> > +args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER; args.in_args = (uint8_t
> > +*)(&filter->del_fltr); args.in_args_size = sizeof(filter->del_fltr);
> > +args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ;
> > +
> > +err = iavf_execute_vf_cmd(adapter, &args); if (err) {
> > +PMD_DRV_LOG(ERR, "fail to execute command
> > OP_DEL_FDIR_FILTER");
> > +return err;
> > +}
> > +
> > +fdir_ret = (struct virtchnl_fdir_del *)args.out_buffer;
> > +
> > +if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) { PMD_DRV_LOG(INFO,
> > +"delete rule request is successfully done by PF"); } else if
> > +(fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST)
> > {
> > +PMD_DRV_LOG(ERR,
> > +"delete rule request is failed due to this rule doesn't
> > exist");
> > +return -1;
> > +} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT)
> > {
> > +PMD_DRV_LOG(ERR,
> > +"delete rule request is failed due to time out for
> > programming");
> > +return -1;
> > +} else {
> > +PMD_DRV_LOG(ERR,
> > +"delete rule request is failed due to other reasons"); return -1; }
> > +
> > +return 0;
> > +};
> > +
> > +int
> > +iavf_fdir_check(struct iavf_adapter *adapter, struct iavf_fdir_conf
> > +*filter) { struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> > +struct virtchnl_fdir_add *fdir_ret;
> > +
> > +struct iavf_cmd_info args;
> > +int err;
> > +
> > +filter->add_fltr.vsi_id = vf->vsi_res->vsi_id; add_fltr.validate_only
> > +filter->= 1;
> > +
> > +args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER; args.in_args = (uint8_t
> > +*)(&filter->add_fltr); args.in_args_size =
> > +sizeof(*(&filter->add_fltr)); args.out_buffer = vf->aq_resp;
> > +args.out_size = IAVF_AQ_BUF_SZ;
> > +
> > +err = iavf_execute_vf_cmd(adapter, &args); if (err) {
> > +PMD_DRV_LOG(ERR, "fail to check flow direcotor rule"); return err; }
> > +
> > +fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
> > +
> > +if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) { PMD_DRV_LOG(INFO,
> > +"check rule request is successfully done by PF"); }  else if
> > +(fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
> > +PMD_DRV_LOG(ERR, "check rule request is failed due to parameters
> > validation"
> > +" or HW doesn't support");
> > +return -1;
> > +} else {
> > +PMD_DRV_LOG(ERR,
> > +"check rule request is failed due to other reasons"); return -1; }
> > +
> > +return 0;
> > +}
> > +
> > +
> > diff --git a/drivers/net/iavf/meson.build
> > b/drivers/net/iavf/meson.build index
> > 5a5cdd5..f875b72 100644
> > --- a/drivers/net/iavf/meson.build
> > +++ b/drivers/net/iavf/meson.build
> > @@ -14,6 +14,7 @@ sources = files(
> >  'iavf_vchnl.c',
> >  'iavf_generic_flow.c',
> >  'iavf_hash.c',
> > +'iavf_fdir.c',
> >  )
> >
> >  if arch_subdir == 'x86'
> > --
> > 1.8.3.1
> 


  reply	other threads:[~2020-04-10  8:00 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-18  5:41 [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy Simei Su
2020-03-18  5:41 ` [dpdk-dev] [PATCH 1/5] net/iavf: add support for FDIR basic rule Simei Su
2020-03-31  5:20   ` Cao, Yahui
2020-03-31  7:12     ` Su, Simei
2020-03-18  5:41 ` [dpdk-dev] [PATCH 2/5] net/iavf: add support for FDIR GTPU Simei Su
2020-03-19  1:46   ` Zhang, Qi Z
2020-03-18  5:41 ` [dpdk-dev] [PATCH 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
2020-03-18  5:42 ` [dpdk-dev] [PATCH 4/5] net/iavf: add support for FDIR PFCP Simei Su
2020-03-18  5:42 ` [dpdk-dev] [PATCH 5/5] net/iavf: add support for FDIR mark action Simei Su
2020-03-31  5:20   ` Cao, Yahui
2020-03-31  7:05     ` Su, Simei
2020-03-18  5:56 ` [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy Stephen Hemminger
2020-03-19  8:48   ` Su, Simei
2020-04-02 13:32 ` [dpdk-dev] [PATCH v2 " Simei Su
2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 1/5] net/iavf: add support for FDIR basic rule Simei Su
2020-04-10  7:40     ` Cao, Yahui
2020-04-10  8:00       ` Su, Simei [this message]
2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 2/5] net/iavf: add support for FDIR GTPU Simei Su
2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 4/5] net/iavf: add support for FDIR PFCP Simei Su
2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 5/5] net/iavf: add support for FDIR mark action Simei Su
2020-04-10 10:18   ` [dpdk-dev] [PATCH v3 0/5] net/iavf: support FDIR capabiltiy Simei Su
2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 1/5] net/iavf: add support for FDIR basic rule Simei Su
2020-04-14  7:37       ` Ye Xiaolong
2020-04-14  8:31         ` Su, Simei
2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 2/5] net/iavf: add support for FDIR GTPU Simei Su
2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 4/5] net/iavf: add support for FDIR PFCP Simei Su
2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 5/5] net/iavf: add support for FDIR mark action Simei Su
2020-04-15  2:55     ` [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy Simei Su
2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 1/5] net/iavf: add support for FDIR basic rule Simei Su
2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 2/5] net/iavf: add support for FDIR GTPU Simei Su
2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 4/5] net/iavf: add support for FDIR PFCP Simei Su
2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 5/5] net/iavf: add support for FDIR mark action Simei Su
2020-04-15  3:17       ` [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy Zhang, Qi Z
2020-04-21  6:19       ` [dpdk-dev] [PATCH v5 " Simei Su
2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 1/5] net/iavf: add support for FDIR basic rule Simei Su
2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 2/5] net/iavf: add support for FDIR GTPU Simei Su
2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 4/5] net/iavf: add support for FDIR PFCP Simei Su
2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 5/5] net/iavf: add support for FDIR mark action Simei Su
2020-04-21  6:40         ` [dpdk-dev] [PATCH v5 0/5] net/iavf: support FDIR capabiltiy Ye Xiaolong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=BL0PR11MB3380E7D781B23EDF9A06EBE19CDE0@BL0PR11MB3380.namprd11.prod.outlook.com \
    --to=simei.su@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=xiaolong.ye@intel.com \
    --cc=yahui.cao@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).