* [dpdk-dev] [PATCH v1 0/3] net/ice: support DCF ACL capabiltiy
@ 2020-09-10 7:37 Simei Su
2020-09-10 7:37 ` [dpdk-dev] [PATCH v1 1/3] net/ice: get PF VSI map Simei Su
` (3 more replies)
0 siblings, 4 replies; 25+ messages in thread
From: Simei Su @ 2020-09-10 7:37 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang; +Cc: dev, haiyue.wang, beilei.xing, Simei Su
[PATCH v1 1/3] get PF VSI map for DCF ACL rule.
[PATCH v1 2/3] add devargs support for DCF ACL IPV4 rule number.
[PATCH v1 3/3] support IPV4/IPV4_UDP/IPV4_TCP pattern and DROP action
for DCF ACL.
Simei Su (3):
net/ice: get PF VSI map
net/ice: add devarg for ACL ipv4 rule number
net/ice: support ACL filter in DCF
drivers/net/ice/ice_acl_filter.c | 1081 ++++++++++++++++++++++++++++++++++++
drivers/net/ice/ice_dcf.c | 1 +
drivers/net/ice/ice_dcf.h | 1 +
drivers/net/ice/ice_dcf_ethdev.c | 102 +++-
drivers/net/ice/ice_dcf_parent.c | 33 ++
drivers/net/ice/ice_ethdev.h | 10 +
drivers/net/ice/ice_generic_flow.c | 2 +
drivers/net/ice/meson.build | 3 +-
8 files changed, 1209 insertions(+), 24 deletions(-)
create mode 100644 drivers/net/ice/ice_acl_filter.c
--
1.8.3.1
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v1 1/3] net/ice: get PF VSI map
2020-09-10 7:37 [dpdk-dev] [PATCH v1 0/3] net/ice: support DCF ACL capabiltiy Simei Su
@ 2020-09-10 7:37 ` Simei Su
2020-09-10 7:37 ` [dpdk-dev] [PATCH v1 2/3] net/ice: add devarg for ACL ipv4 rule number Simei Su
` (2 subsequent siblings)
3 siblings, 0 replies; 25+ messages in thread
From: Simei Su @ 2020-09-10 7:37 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang; +Cc: dev, haiyue.wang, beilei.xing, Simei Su
This patch gets PF vsi number when issuing ACL rule in DCF.
Signed-off-by: Simei Su <simei.su@intel.com>
---
drivers/net/ice/ice_dcf.c | 1 +
drivers/net/ice/ice_dcf.h | 1 +
drivers/net/ice/ice_dcf_parent.c | 33 +++++++++++++++++++++++++++++++++
3 files changed, 35 insertions(+)
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 2d803c5..d20e2b3 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -318,6 +318,7 @@
}
hw->num_vfs = vsi_map->num_vfs;
+ hw->pf_vsi_id = vsi_map->pf_vsi;
}
if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) {
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index a44a01e..ff02996 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -43,6 +43,7 @@ struct ice_dcf_hw {
uint16_t num_vfs;
uint16_t *vf_vsi_map;
+ uint16_t pf_vsi_id;
struct virtchnl_version_info virtchnl_version;
struct virtchnl_vf_resource *vf_res; /* VF resource */
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index c5dfdd3..bbe533d 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -78,6 +78,35 @@
ice_dcf_update_vsi_ctx(hw, vf_id, vf_vsi_map[vf_id]);
}
+static void
+ice_dcf_update_pf_vsi_map(struct ice_hw *hw, uint16_t pf_vsi_idx,
+ uint16_t pf_vsi_num)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+
+ if (unlikely(pf_vsi_idx >= ICE_MAX_VSI)) {
+ PMD_DRV_LOG(ERR, "Invalid vsi handle %u", pf_vsi_idx);
+ return;
+ }
+
+ vsi_ctx = hw->vsi_ctx[pf_vsi_idx];
+
+ if (!vsi_ctx)
+ vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx));
+
+ if (!vsi_ctx) {
+ PMD_DRV_LOG(ERR, "No memory for vsi context %u",
+ pf_vsi_idx);
+ return;
+ }
+
+ vsi_ctx->vsi_num = pf_vsi_num;
+ hw->vsi_ctx[pf_vsi_idx] = vsi_ctx;
+
+ PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
+ pf_vsi_idx, vsi_ctx->vsi_num);
+}
+
static void*
ice_dcf_vsi_update_service_handler(void *param)
{
@@ -368,6 +397,10 @@ static void ice_dcf_uninit_parent_hw(struct ice_hw *hw)
}
parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw);
+ parent_adapter->pf.main_vsi->idx = hw->num_vfs;
+ ice_dcf_update_pf_vsi_map(parent_hw,
+ parent_adapter->pf.main_vsi->idx, hw->pf_vsi_id);
+
err = ice_flow_init(parent_adapter);
if (err) {
PMD_INIT_LOG(ERR, "Failed to initialize flow");
--
1.8.3.1
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v1 2/3] net/ice: add devarg for ACL ipv4 rule number
2020-09-10 7:37 [dpdk-dev] [PATCH v1 0/3] net/ice: support DCF ACL capabiltiy Simei Su
2020-09-10 7:37 ` [dpdk-dev] [PATCH v1 1/3] net/ice: get PF VSI map Simei Su
@ 2020-09-10 7:37 ` Simei Su
2020-09-10 7:53 ` Wang, Haiyue
2020-09-10 7:37 ` [dpdk-dev] [PATCH v1 3/3] net/ice: support ACL filter in DCF Simei Su
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 0/4] net/ice: support DCF ACL capabiltiy Simei Su
3 siblings, 1 reply; 25+ messages in thread
From: Simei Su @ 2020-09-10 7:37 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang; +Cc: dev, haiyue.wang, beilei.xing, Simei Su
This patch enables devargs for ACL ipv4 rule number and refactor
DCF capability selection API to be more flexible.
Signed-off-by: Simei Su <simei.su@intel.com>
---
drivers/net/ice/ice_dcf_ethdev.c | 102 ++++++++++++++++++++++++++++++---------
drivers/net/ice/ice_ethdev.h | 1 +
2 files changed, 80 insertions(+), 23 deletions(-)
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index 2faed3c..3238ce2 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -26,6 +26,16 @@
#include "ice_dcf_ethdev.h"
#include "ice_rxtx.h"
+/* devargs */
+#define ICE_DCF_CAP "cap"
+#define ICE_DCF_ACL_IPV4_RULES_NUM "acl_ipv4_nums"
+
+static const char * const ice_dcf_valid_args[] = {
+ ICE_DCF_CAP,
+ ICE_DCF_ACL_IPV4_RULES_NUM,
+ NULL,
+};
+
static uint16_t
ice_dcf_recv_pkts(__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **bufs,
@@ -895,9 +905,51 @@
};
static int
+parse_int(__rte_unused const char *key, const char *value, void *args)
+{
+ int *i = (int *)args;
+ char *end;
+ int num;
+
+ num = strtoul(value, &end, 10);
+ *i = num;
+
+ return 0;
+}
+
+static int ice_dcf_parse_devargs(struct rte_eth_dev *dev)
+{
+ struct ice_dcf_adapter *adapter = dev->data->dev_private;
+ struct ice_adapter *parent_adapter = &adapter->parent;
+
+ struct rte_devargs *devargs = dev->device->devargs;
+ struct rte_kvargs *kvlist;
+ int ret;
+
+ if (devargs == NULL)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, ice_dcf_valid_args);
+ if (kvlist == NULL) {
+ PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
+ return -EINVAL;
+ }
+
+ ret = rte_kvargs_process(kvlist, ICE_DCF_ACL_IPV4_RULES_NUM,
+ &parse_int, &parent_adapter->devargs.acl_ipv4_rules_num);
+ if (ret)
+ goto bail;
+
+bail:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
{
struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
+ int ret;
eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
@@ -908,6 +960,12 @@
eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+ ret = ice_dcf_parse_devargs(eth_dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to parse devargs");
+ return -EINVAL;
+ }
+
adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
@@ -932,49 +990,47 @@
}
static int
-ice_dcf_cap_check_handler(__rte_unused const char *key,
- const char *value, __rte_unused void *opaque)
+handle_dcf_arg(__rte_unused const char *key, const char *value,
+ __rte_unused void *arg)
{
- if (strcmp(value, "dcf"))
- return -1;
+ bool *dcf = arg;
+
+ if (arg == NULL || value == NULL)
+ return -EINVAL;
+
+ if (strcmp(value, "dcf") == 0)
+ *dcf = true;
+ else
+ *dcf = false;
return 0;
}
-static int
-ice_dcf_cap_selected(struct rte_devargs *devargs)
+static bool
+check_cap_dcf_enable(struct rte_devargs *devargs)
{
struct rte_kvargs *kvlist;
- const char *key = "cap";
- int ret = 0;
+ bool enable = false;
if (devargs == NULL)
- return 0;
+ return false;
kvlist = rte_kvargs_parse(devargs->args, NULL);
if (kvlist == NULL)
- return 0;
-
- if (!rte_kvargs_count(kvlist, key))
- goto exit;
-
- /* dcf capability selected when there's a key-value pair: cap=dcf */
- if (rte_kvargs_process(kvlist, key,
- ice_dcf_cap_check_handler, NULL) < 0)
- goto exit;
+ return false;
- ret = 1;
+ rte_kvargs_process(kvlist, ICE_DCF_CAP, handle_dcf_arg, &enable);
-exit:
rte_kvargs_free(kvlist);
- return ret;
+
+ return enable;
}
static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
struct rte_pci_device *pci_dev)
{
- if (!ice_dcf_cap_selected(pci_dev->device.devargs))
- return 1;
+ if (!check_cap_dcf_enable(pci_dev->device.devargs))
+ return 1; /* continue to probe */
return rte_eth_dev_pci_generic_probe(pci_dev,
sizeof(struct ice_dcf_adapter),
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 758caa8..13f4167 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -447,6 +447,7 @@ struct ice_devargs {
int pipe_mode_support;
int flow_mark_support;
uint8_t proto_xtr[ICE_MAX_QUEUE_NUM];
+ int acl_ipv4_rules_num;
};
/**
--
1.8.3.1
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v1 3/3] net/ice: support ACL filter in DCF
2020-09-10 7:37 [dpdk-dev] [PATCH v1 0/3] net/ice: support DCF ACL capabiltiy Simei Su
2020-09-10 7:37 ` [dpdk-dev] [PATCH v1 1/3] net/ice: get PF VSI map Simei Su
2020-09-10 7:37 ` [dpdk-dev] [PATCH v1 2/3] net/ice: add devarg for ACL ipv4 rule number Simei Su
@ 2020-09-10 7:37 ` Simei Su
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 0/4] net/ice: support DCF ACL capabiltiy Simei Su
3 siblings, 0 replies; 25+ messages in thread
From: Simei Su @ 2020-09-10 7:37 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang; +Cc: dev, haiyue.wang, beilei.xing, Simei Su
Add ice_acl_create_filter to create a rule and ice_acl_destroy_filter
to destroy a rule. If a flow is matched by ACL filter, filter rule
will be set to HW. Currently IPV4/IPV4_UDP/IPV4_TCP pattern and drop
action are supported.
Signed-off-by: Simei Su <simei.su@intel.com>
---
drivers/net/ice/ice_acl_filter.c | 1081 ++++++++++++++++++++++++++++++++++++
drivers/net/ice/ice_ethdev.h | 9 +
drivers/net/ice/ice_generic_flow.c | 2 +
drivers/net/ice/meson.build | 3 +-
4 files changed, 1094 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/ice/ice_acl_filter.c
diff --git a/drivers/net/ice/ice_acl_filter.c b/drivers/net/ice/ice_acl_filter.c
new file mode 100644
index 0000000..a4a8528
--- /dev/null
+++ b/drivers/net/ice/ice_acl_filter.c
@@ -0,0 +1,1081 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include "base/ice_type.h"
+#include "base/ice_acl.h"
+#include "ice_logs.h"
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "base/ice_flow.h"
+
+#define ICE_ACL_INSET_ETH_IPV4 ( \
+ ICE_INSET_SMAC | ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
+#define ICE_ACL_INSET_ETH_IPV4_UDP ( \
+ ICE_INSET_SMAC | ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
+ ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
+#define ICE_ACL_INSET_ETH_IPV4_TCP ( \
+ ICE_INSET_SMAC | ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
+ ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
+
+enum ice_fd_stat_idx {
+ ICE_FD_STAT_SB,
+};
+
+#define ICE_FD_STAT_CTR_BLOCK_COUNT 256
+#define ICE_FD_STAT_PF_IDX(base_idx) \
+ ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
+#define ICE_FD_SB_STAT_IDX(base_idx) \
+ (ICE_FD_STAT_PF_IDX(base_idx) + ICE_FD_STAT_SB)
+
+static struct ice_flow_parser ice_acl_parser;
+
+static struct
+ice_pattern_match_item ice_acl_pattern[] = {
+ {pattern_eth_ipv4, ICE_ACL_INSET_ETH_IPV4, ICE_INSET_NONE},
+ {pattern_eth_ipv4_udp, ICE_ACL_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_tcp, ICE_ACL_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
+};
+
+static int
+ice_acl_prof_alloc(struct ice_hw *hw)
+{
+ enum ice_fltr_ptype ptype, fltr_ptype;
+
+ if (!hw->acl_prof) {
+ hw->acl_prof = (struct ice_fd_hw_prof **)
+ ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
+ sizeof(*hw->acl_prof));
+ if (!hw->acl_prof)
+ return -ENOMEM;
+ }
+
+ for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+ ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
+ if (!hw->acl_prof[ptype]) {
+ hw->acl_prof[ptype] = (struct ice_fd_hw_prof *)
+ ice_malloc(hw, sizeof(**hw->acl_prof));
+ if (!hw->acl_prof[ptype])
+ goto fail_mem;
+ }
+ }
+
+ return 0;
+
+fail_mem:
+ for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+ fltr_ptype < ptype; fltr_ptype++) {
+ rte_free(hw->acl_prof[fltr_ptype]);
+ hw->acl_prof[fltr_ptype] = NULL;
+ }
+
+ rte_free(hw->acl_prof);
+ return -ENOMEM;
+}
+
+/**
+ * ice_acl_setup - Reserve and initialize the ACL resources
+ * @pf: board private structure
+ */
+static int
+ice_acl_setup(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->adapter->hw;
+ u32 pf_num = hw->dev_caps.num_funcs;
+ struct ice_acl_tbl_params params;
+ int acl_ipv4_rules = 0;
+ u16 scen_id;
+ int err = 0;
+
+ if (pf->adapter->devargs.acl_ipv4_rules_num)
+ acl_ipv4_rules = pf->adapter->devargs.acl_ipv4_rules_num;
+ else
+ acl_ipv4_rules = ICE_AQC_ACL_TCAM_DEPTH;
+
+ memset(¶ms, 0, sizeof(params));
+
+ /* create for IPV4 table */
+ if (acl_ipv4_rules) {
+ if (pf_num < 4)
+ params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 5;
+ else
+ params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 3;
+
+ params.depth = acl_ipv4_rules;
+ params.entry_act_pairs = 1;
+ params.concurr = false;
+
+ err = ice_acl_create_tbl(hw, ¶ms);
+ if (err)
+ return err;
+
+ err = ice_acl_create_scen(hw, params.width, params.depth,
+ &scen_id);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_deinit_acl - Unroll the initialization of the ACL block
+ * @pf: ptr to PF device
+ *
+ * returns 0 on success, negative on error
+ */
+static void ice_deinit_acl(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->adapter->hw;
+
+ ice_acl_destroy_tbl(hw);
+}
+
+/**
+ * ice_del_acl_entry - Delete an ACL rule entry
+ * @hw: pointer to HW instance
+ * @fltr: filter structure
+ *
+ * returns 0 on success and negative value on error
+ */
+static int
+ice_del_acl_entry(struct ice_hw *hw, struct ice_fdir_fltr *fltr)
+{
+ uint64_t entry;
+
+ entry = ice_flow_find_entry(hw, ICE_BLK_ACL, fltr->fltr_id);
+ return ice_flow_rem_entry(hw, ICE_BLK_ACL, entry);
+}
+
+/**
+ * ice_acl_erase_flow_from_hw - Remove a flow from the HW profile tables
+ * @hw: hardware structure containing the filter list
+ * @flow_type: flow type to release
+ */
+static void
+ice_acl_erase_flow_from_hw(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
+{
+ int tun;
+
+ if (!hw->acl_prof || !hw->acl_prof[flow_type])
+ return;
+
+ struct ice_fd_hw_prof *prof = hw->acl_prof[flow_type];
+ for (tun = 0; tun < ICE_FD_HW_SEG_TUN; tun++) {
+ uint64_t prof_id;
+ int j;
+
+ prof_id = flow_type + tun * ICE_FLTR_PTYPE_MAX;
+ for (j = 0; j < prof->cnt; j++) {
+ uint16_t vsi_num;
+
+ if (!prof->entry_h[j][tun] && !prof->vsi_h[j])
+ continue;
+ vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]);
+ ice_rem_prof_id_flow(hw, ICE_BLK_ACL, vsi_num, prof_id);
+ ice_flow_rem_entry(hw, ICE_BLK_ACL,
+ prof->entry_h[j][tun]);
+ prof->entry_h[j][tun] = 0;
+ }
+ ice_flow_rem_prof(hw, ICE_BLK_ACL, prof_id);
+ }
+}
+
+/**
+ * ice_acl_rem_flow - Release the ice_flow structures for a filter type
+ * @hw: hardware structure containing the filter list
+ * @flow_type: flow type to release
+ */
+static void ice_acl_rem_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
+{
+ struct ice_fd_hw_prof *prof;
+ int tun, i;
+
+ if (!hw->acl_prof || !hw->acl_prof[flow_type])
+ return;
+
+ prof = hw->acl_prof[flow_type];
+
+ ice_acl_erase_flow_from_hw(hw, flow_type);
+ for (i = 0; i < prof->cnt; i++)
+ prof->vsi_h[i] = 0;
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ if (!prof->fdir_seg[tun])
+ continue;
+ ice_free(hw, prof->fdir_seg[tun]);
+ prof->fdir_seg[tun] = NULL;
+ }
+ prof->cnt = 0;
+}
+
+/**
+ * ice_acl_update_list_entry - Add or delete a filter from the filter list
+ * @pf: PF structure
+ * @input: filter structure
+ * @fltr_idx: index of filter to modify
+ *
+ * returns 0 on success and negative on errors
+ */
+static int
+ice_acl_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
+ int fltr_idx)
+{
+ struct ice_fdir_fltr *old_fltr;
+ int err = 0;
+ struct ice_hw *hw;
+
+ hw = &pf->adapter->hw;
+ old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx);
+ if (old_fltr) {
+ if (old_fltr->acl_fltr) {
+ /* ACL filter */
+ if (!input) {
+ err = ice_del_acl_entry(hw, old_fltr);
+ if (err)
+ return err;
+ }
+ }
+ ice_fdir_update_cntrs(hw, old_fltr->flow_type,
+ old_fltr->acl_fltr, false);
+ /* we just deleted the last filter of flow_type so we
+ * should also delete the HW filter info.
+ */
+ if (old_fltr->acl_fltr && !input &&
+ !hw->acl_fltr_cnt[old_fltr->flow_type])
+ ice_acl_rem_flow(hw, old_fltr->flow_type);
+
+ LIST_DEL(&old_fltr->fltr_node);
+ ice_free(hw, old_fltr);
+ }
+
+ if (!input)
+ return err;
+
+ ice_fdir_list_add_fltr(hw, input);
+ ice_fdir_update_cntrs(hw, input->flow_type, input->acl_fltr, true);
+
+ return 0;
+}
+
+/**
+ * ice_acl_check_input_set - Check that a given ACL input set is valid
+ * @pf: ice PF structure
+ * @filter: pointer to ACL info
+ *
+ * Returns 0 on success.
+ */
+static int
+ice_acl_check_input_set(struct ice_pf *pf, struct ice_acl_info *filter)
+{
+ struct ice_fd_hw_prof *hw_prof = NULL;
+ struct ice_flow_prof *prof = NULL;
+ struct ice_flow_seg_info *old_seg;
+ struct ice_flow_seg_info *seg;
+ enum ice_fltr_ptype fltr_type = filter->input.flow_type;
+ struct ice_hw *hw = &pf->adapter->hw;
+ enum ice_status status;
+ u16 val_loc, mask_loc;
+ struct ice_fdir_v4 *ip4_mask;
+ uint64_t prof_id;
+
+ seg = (struct ice_flow_seg_info *)
+ ice_malloc(hw, sizeof(*seg));
+ if (!seg) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+
+ switch (fltr_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ ip4_mask = &filter->input.mask.v4;
+
+ /* make sure we don't have any empty rule */
+ if (!ip4_mask->src_ip && !ip4_mask->src_port &&
+ !ip4_mask->dst_ip && !ip4_mask->dst_port) {
+ goto err_exit;
+
+ /* filtering on TOS not supported */
+ if (ip4_mask->tos)
+ goto err_exit;
+ }
+
+ if (hw->dev_caps.num_funcs < 4) {
+ /* mac source address */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ext_data.src_mac);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ ext_mask.src_mac);
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* mac destination address */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ext_data.dst_mac);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ ext_mask.dst_mac);
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+
+ /* IP source address */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_ip);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_ip);
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* IP destination address */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_ip);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_ip);
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ ip4_mask = &filter->input.mask.v4;
+
+ if (ip4_mask->tos)
+ goto err_exit;
+ if (ip4_mask->ip_ver)
+ goto err_exit;
+ /* Filtering on Layer 4 protocol not supported */
+ if (ip4_mask->proto)
+ goto err_exit;
+ /* empty rules are not valid */
+ if (!ip4_mask->src_ip && !ip4_mask->dst_ip)
+ goto err_exit;
+
+ if (hw->dev_caps.num_funcs < 4) {
+ /* mac source address */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ext_data.src_mac);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ ext_mask.src_mac);
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* mac destination address */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ext_data.dst_mac);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ ext_mask.dst_mac);
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+
+ if (ip4_mask->src_ip != 0) {
+ /* IP source address */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ip.v4.src_ip);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ mask.v4.src_ip);
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+
+ if (ip4_mask->dst_ip != 0) {
+ /* IP destination address */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_ip);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ mask.v4.dst_ip);
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+ break;
+ default:
+ goto err_exit;
+ }
+
+ switch (fltr_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_ETH);
+ ip4_mask = &filter->input.mask.v4;
+
+ /* Layer 4 source port */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ip.v4.src_port);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ mask.v4.src_port);
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* Layer 4 destination port */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ip.v4.dst_port);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ mask.v4.dst_port);
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_ETH);
+ ip4_mask = &filter->input.mask.v4;
+
+ /* Layer 4 source port */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ip.v4.src_port);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ mask.v4.src_port);
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* Layer 4 destination port */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ip.v4.dst_port);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ mask.v4.dst_port);
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_ETH);
+ break;
+ default:
+ goto err_exit;
+ }
+
+ hw_prof = hw->acl_prof[fltr_type];
+ old_seg = hw_prof->fdir_seg[0];
+ if (old_seg) {
+ /* This flow_type already has an input set.
+ * If it matches the requested input set then we are
+ * done. If it's different then it's an error.
+ */
+ if (!memcmp(old_seg, seg, sizeof(*seg))) {
+ PMD_DRV_LOG(ERR, "Rule already exists!");
+ goto err_exit;
+ }
+
+ /* remove HW filter definition */
+ ice_acl_rem_flow(hw, fltr_type);
+
+ goto err_exit;
+ }
+
+ /* Adding a profile for the given flow specification with no
+ * actions (NULL) and zero actions 0.
+ */
+ prof_id = fltr_type;
+ status = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX, prof_id,
+ seg, 1, NULL, 0, &prof);
+ if (status)
+ goto err_exit;
+
+ hw_prof->fdir_seg[0] = seg;
+ return 0;
+
+err_exit:
+ ice_free(hw, seg);
+ return -EINVAL;
+}
+
+/**
+ * ice_acl_set_input_set - Helper function to set the input set for ACL
+ * @hw: pointer to HW instance
+ * @filter: pointer to ACL info
+ * @input: filter structure
+ *
+ * Return error value or 0 on success.
+ */
+static int
+ice_acl_set_input_set(struct ice_hw *hw, struct ice_acl_info *filter,
+ struct ice_fdir_fltr *input)
+{
+ if (!input)
+ return ICE_ERR_BAD_PTR;
+
+ input->fltr_id = filter->input.fltr_id;
+ input->q_index = filter->input.q_index;
+ input->dest_vsi = filter->input.dest_vsi;
+ input->dest_ctl = filter->input.dest_ctl;
+ input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
+ input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base);
+ input->flow_type = filter->input.flow_type;
+
+ switch (input->flow_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ input->ip.v4.dst_port = filter->input.ip.v4.dst_port;
+ input->ip.v4.src_port = filter->input.ip.v4.src_port;
+ input->ip.v4.dst_ip = filter->input.ip.v4.dst_ip;
+ input->ip.v4.src_ip = filter->input.ip.v4.src_ip;
+
+ input->mask.v4.dst_port = filter->input.mask.v4.dst_port;
+ input->mask.v4.src_port = filter->input.mask.v4.src_port;
+ input->mask.v4.dst_ip = filter->input.mask.v4.dst_ip;
+ input->mask.v4.src_ip = filter->input.mask.v4.src_ip;
+
+ rte_memcpy(&input->ext_data.src_mac,
+ &filter->input.ext_data.src_mac,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&input->ext_mask.src_mac,
+ &filter->input.ext_mask.src_mac,
+ RTE_ETHER_ADDR_LEN);
+
+ rte_memcpy(&input->ext_data.dst_mac,
+ &filter->input.ext_data.dst_mac,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&input->ext_mask.dst_mac,
+ &filter->input.ext_mask.dst_mac,
+ RTE_ETHER_ADDR_LEN);
+
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ rte_memcpy(&input->ip.v4, &filter->input.ip.v4,
+ sizeof(struct ice_fdir_v4));
+ rte_memcpy(&input->mask.v4, &filter->input.mask.v4,
+ sizeof(struct ice_fdir_v4));
+
+ rte_memcpy(&input->ext_data.src_mac,
+ &filter->input.ext_data.src_mac,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&input->ext_mask.src_mac,
+ &filter->input.ext_mask.src_mac,
+ RTE_ETHER_ADDR_LEN);
+
+ rte_memcpy(&input->ext_data.dst_mac,
+ &filter->input.ext_data.dst_mac,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&input->ext_mask.dst_mac,
+ &filter->input.ext_mask.dst_mac,
+ RTE_ETHER_ADDR_LEN);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ice_acl_create_filter(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ void *meta,
+ struct rte_flow_error *error)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_hw *hw = &ad->hw;
+ struct ice_acl_info *filter = meta;
+ struct ice_fdir_fltr *input;
+ struct ice_flow_action acts[1];
+ int act_cnt;
+ struct ice_fd_hw_prof *hw_prof = NULL;
+ enum ice_block blk = ICE_BLK_ACL;
+ enum ice_fltr_ptype flow_type = filter->input.flow_type;
+ int ret;
+ u64 entry_h = 0;
+ uint64_t prof_id;
+ int i;
+
+ input = rte_zmalloc("acl_entry", sizeof(*input), 0);
+ if (!input) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return -rte_errno;
+ }
+
+ ret = ice_acl_check_input_set(pf, filter);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Profile configure failed.");
+ goto free_input;
+ }
+
+ ret = ice_acl_set_input_set(hw, filter, input);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "failed to set input set.");
+ goto free_input;
+ }
+
+ act_cnt = 1;
+ if (filter->input.dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) {
+ acts[0].type = ICE_FLOW_ACT_DROP;
+ acts[0].data.acl_act.mdid = ICE_MDID_RX_PKT_DROP;
+ acts[0].data.acl_act.prio = 0x3;
+ acts[0].data.acl_act.value = CPU_TO_LE16(0x1);
+ }
+
+ hw_prof = hw->acl_prof[flow_type];
+ prof_id = flow_type;
+
+ for (i = 0; i < pf->main_vsi->idx; i++) {
+ ret = ice_flow_add_entry(hw, blk, prof_id, input->fltr_id, i,
+ ICE_FLOW_PRIO_NORMAL, input, acts,
+ act_cnt, &entry_h);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Flow add entry error.");
+ goto free_input;
+ }
+ }
+
+ if (!hw_prof->cnt || 0 != hw_prof->vsi_h[hw_prof->cnt - 1]) {
+ hw_prof->vsi_h[hw_prof->cnt] = 0;
+ hw_prof->entry_h[hw_prof->cnt++][0] = entry_h;
+ }
+
+ input->acl_fltr = true;
+ /* input struct is added to the HW filter list */
+ ice_acl_update_list_entry(pf, input, input->fltr_id);
+
+ flow->rule = input;
+
+ return 0;
+
+free_input:
+ rte_free(input);
+ return -rte_errno;
+}
+
+static int
+ice_acl_destroy_filter(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ struct rte_flow_error *error __rte_unused)
+{
+ struct ice_fdir_fltr *filter = (struct ice_fdir_fltr *)flow->rule;
+ int ret;
+
+ ret = ice_acl_update_list_entry(&ad->pf, NULL, filter->fltr_id);
+
+ flow->rule = NULL;
+ rte_free(filter);
+
+ return ret;
+}
+
+static void
+ice_acl_filter_free(struct rte_flow *flow)
+{
+ rte_free(flow->rule);
+}
+
+static int
+ice_acl_parse_action(__rte_unused struct ice_adapter *ad,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct ice_acl_info *filter)
+{
+ uint32_t dest_num = 0;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ dest_num++;
+
+ filter->input.dest_ctl =
+ ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ if (dest_num == 0 || dest_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Unsupported action combination");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ice_acl_parse_pattern(__rte_unused struct ice_adapter *ad,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct ice_acl_info *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ uint64_t input_set = ICE_INSET_NONE;
+ uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
+
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ item_type = item->type;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ if (eth_spec && eth_mask) {
+ if (rte_is_broadcast_ether_addr(ð_mask->dst) ||
+ rte_is_broadcast_ether_addr(ð_mask->src)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid mac addr mask");
+ return -rte_errno;
+ }
+
+ if (!rte_is_zero_ether_addr(ð_spec->src) &&
+ !rte_is_zero_ether_addr(ð_mask->src)) {
+ input_set |= ICE_INSET_SMAC;
+ rte_memcpy(&filter->input.ext_data.src_mac,
+ ð_spec->src,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&filter->input.ext_mask.src_mac,
+ ð_mask->src,
+ RTE_ETHER_ADDR_LEN);
+ }
+
+ if (!rte_is_zero_ether_addr(ð_spec->dst) &&
+ !rte_is_zero_ether_addr(ð_mask->dst)) {
+ input_set |= ICE_INSET_DMAC;
+ rte_memcpy(&filter->input.ext_data.dst_mac,
+ ð_spec->dst,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&filter->input.ext_mask.dst_mac,
+ ð_mask->dst,
+ RTE_ETHER_ADDR_LEN);
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+
+ if (ipv4_spec && ipv4_mask) {
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if ((ipv4_mask->hdr.src_addr == UINT32_MAX) ||
+ (ipv4_mask->hdr.dst_addr == UINT32_MAX)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if ((ipv4_spec->hdr.src_addr != 0) &&
+ (ipv4_mask->hdr.src_addr != 0)) {
+ filter->input.ip.v4.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.mask.v4.src_ip =
+ ipv4_mask->hdr.src_addr;
+
+ input_set |= ICE_INSET_IPV4_SRC;
+ }
+
+ if ((ipv4_spec->hdr.dst_addr != 0) &&
+ (ipv4_mask->hdr.dst_addr != 0)) {
+ filter->input.ip.v4.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+ filter->input.mask.v4.dst_ip =
+ ipv4_mask->hdr.dst_addr;
+
+ input_set |= ICE_INSET_IPV4_DST;
+ }
+ }
+
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+
+ if (tcp_spec && tcp_mask) {
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if ((tcp_mask->hdr.src_port == UINT16_MAX) ||
+ (tcp_mask->hdr.dst_port == UINT16_MAX)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if ((tcp_mask->hdr.src_port == UINT16_MAX) ||
+ (tcp_mask->hdr.dst_port == UINT16_MAX)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if ((l3 == RTE_FLOW_ITEM_TYPE_IPV4) &&
+ (tcp_spec->hdr.src_port != 0)) {
+ input_set |= ICE_INSET_TCP_SRC_PORT;
+ filter->input.ip.v4.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.mask.v4.src_port =
+ tcp_mask->hdr.src_port;
+ }
+
+ if ((l3 == RTE_FLOW_ITEM_TYPE_IPV4) &&
+ (tcp_spec->hdr.dst_port != 0)) {
+ input_set |= ICE_INSET_TCP_DST_PORT;
+ filter->input.ip.v4.dst_port =
+ tcp_spec->hdr.dst_port;
+ filter->input.mask.v4.dst_port =
+ tcp_mask->hdr.dst_port;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ if (udp_spec && udp_mask) {
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if ((udp_mask->hdr.src_port == UINT16_MAX) ||
+ (udp_mask->hdr.dst_port == UINT16_MAX)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ /* Get filter info */
+ if ((l3 == RTE_FLOW_ITEM_TYPE_IPV4) &&
+ (udp_spec->hdr.src_port != 0)) {
+ input_set |= ICE_INSET_UDP_SRC_PORT;
+ filter->input.ip.v4.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.mask.v4.src_port =
+ udp_mask->hdr.src_port;
+ }
+
+ if ((l3 == RTE_FLOW_ITEM_TYPE_IPV4) &&
+ (udp_spec->hdr.dst_port != 0)) {
+ input_set |= ICE_INSET_UDP_DST_PORT;
+ filter->input.ip.v4.dst_port =
+ udp_spec->hdr.dst_port;
+ filter->input.mask.v4.dst_port =
+ udp_mask->hdr.dst_port;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pattern item.");
+ return -rte_errno;
+ }
+ }
+
+ filter->input.flow_type = flow_type;
+ filter->input_set = input_set;
+
+ return 0;
+}
+
+static int
+ice_acl_parse(struct ice_adapter *ad,
+ struct ice_pattern_match_item *array,
+ uint32_t array_len,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta,
+ struct rte_flow_error *error)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_acl_info *filter = &pf->acl;
+ struct ice_pattern_match_item *item = NULL;
+ uint64_t input_set;
+ int ret;
+
+ memset(filter, 0, sizeof(*filter));
+ item = ice_search_pattern_match_item(pattern, array, array_len, error);
+ if (!item)
+ return -rte_errno;
+
+ ret = ice_acl_parse_pattern(ad, pattern, error, filter);
+ if (ret)
+ return ret;
+ input_set = filter->input_set;
+ if (!input_set || input_set & ~item->input_set_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ pattern,
+ "Invalid input set");
+ return -rte_errno;
+ }
+
+ ret = ice_acl_parse_action(ad, actions, error, filter);
+ if (ret)
+ return ret;
+
+ *meta = filter;
+
+ return 0;
+}
+
+static int
+ice_acl_init(struct ice_adapter *ad)
+{
+ int ret = 0;
+ struct ice_pf *pf = &ad->pf;
+ struct ice_flow_parser *parser = &ice_acl_parser;
+
+ ret = ice_acl_prof_alloc(&ad->hw);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Cannot allocate memory for "
+ "ACL profile.");
+ ret = -ENOMEM;
+ }
+
+ ret = ice_acl_setup(pf);
+ if (ret)
+ return ret;
+
+ return ice_register_parser(parser, ad);
+}
+
+static void
+ice_acl_prof_free(struct ice_hw *hw)
+{
+ enum ice_fltr_ptype ptype;
+
+ for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+ ptype < ICE_FLTR_PTYPE_MAX;
+ ptype++) {
+ rte_free(hw->acl_prof[ptype]);
+ hw->acl_prof[ptype] = NULL;
+ }
+
+ rte_free(hw->acl_prof);
+ hw->acl_prof = NULL;
+}
+
+static void
+ice_acl_uninit(struct ice_adapter *ad)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_flow_parser *parser = &ice_acl_parser;
+
+ ice_unregister_parser(parser, ad);
+
+ ice_deinit_acl(pf);
+ ice_acl_prof_free(&ad->hw);
+}
+
+static struct
+ice_flow_engine ice_acl_engine = {
+ .init = ice_acl_init,
+ .uninit = ice_acl_uninit,
+ .create = ice_acl_create_filter,
+ .destroy = ice_acl_destroy_filter,
+ .free = ice_acl_filter_free,
+ .type = ICE_FLOW_ENGINE_ACL,
+};
+
+static struct
+ice_flow_parser ice_acl_parser = {
+ .engine = &ice_acl_engine,
+ .array = ice_acl_pattern,
+ .array_len = RTE_DIM(ice_acl_pattern),
+ .parse_pattern_action = ice_acl_parse,
+ .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(ice_acl_engine_init)
+{
+ struct ice_flow_engine *engine = &ice_acl_engine;
+ ice_register_flow_engine(engine);
+}
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 13f4167..63fe716 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -393,6 +393,14 @@ struct ice_hash_gtpu_ctx {
struct ice_hash_cfg ipv6_tcp;
};
+/**
+ * A structure used to define fields of ACL related info.
+ */
+struct ice_acl_info {
+ struct ice_fdir_fltr input;
+ uint64_t input_set;
+};
+
struct ice_pf {
struct ice_adapter *adapter; /* The adapter this PF associate to */
struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -416,6 +424,7 @@ struct ice_pf {
uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
uint16_t fdir_qp_offset;
struct ice_fdir_info fdir; /* flow director info */
+ struct ice_acl_info acl; /* ACL info */
struct ice_hash_gtpu_ctx gtpu_hash_ctx;
uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 54b0316..1429cbc 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1896,6 +1896,8 @@ enum rte_flow_item_type pattern_eth_ipv6_pfcp[] = {
TAILQ_INSERT_TAIL(list, parser_node, node);
else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
TAILQ_INSERT_HEAD(list, parser_node, node);
+ else if (parser->engine->type == ICE_FLOW_ENGINE_ACL)
+ TAILQ_INSERT_HEAD(list, parser_node, node);
else
return -EINVAL;
}
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index e6fe744..3217463 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -10,7 +10,8 @@ sources = files(
'ice_switch_filter.c',
'ice_generic_flow.c',
'ice_fdir_filter.c',
- 'ice_hash.c'
+ 'ice_hash.c',
+ 'ice_acl_filter.c'
)
deps += ['hash', 'net', 'common_iavf']
--
1.8.3.1
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v1 2/3] net/ice: add devarg for ACL ipv4 rule number
2020-09-10 7:37 ` [dpdk-dev] [PATCH v1 2/3] net/ice: add devarg for ACL ipv4 rule number Simei Su
@ 2020-09-10 7:53 ` Wang, Haiyue
0 siblings, 0 replies; 25+ messages in thread
From: Wang, Haiyue @ 2020-09-10 7:53 UTC (permalink / raw)
To: Su, Simei, Zhang, Qi Z, Yang, Qiming; +Cc: dev, Xing, Beilei
> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Thursday, September 10, 2020 15:38
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Yang, Qiming <qiming.yang@intel.com>
> Cc: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Su,
> Simei <simei.su@intel.com>
> Subject: [PATCH v1 2/3] net/ice: add devarg for ACL ipv4 rule number
>
> This patch enables devargs for ACL ipv4 rule number and refactor
> DCF capability selection API to be more flexible.
>
> Signed-off-by: Simei Su <simei.su@intel.com>
> ---
>
> static int
> -ice_dcf_cap_check_handler(__rte_unused const char *key,
> - const char *value, __rte_unused void *opaque)
> +handle_dcf_arg(__rte_unused const char *key, const char *value,
> + __rte_unused void *arg)
> {
> - if (strcmp(value, "dcf"))
> - return -1;
> + bool *dcf = arg;
> +
> + if (arg == NULL || value == NULL)
> + return -EINVAL;
> +
> + if (strcmp(value, "dcf") == 0)
> + *dcf = true;
> + else
> + *dcf = false;
>
> return 0;
> }
>
> -static int
> -ice_dcf_cap_selected(struct rte_devargs *devargs)
> +static bool
> +check_cap_dcf_enable(struct rte_devargs *devargs)
> {
> struct rte_kvargs *kvlist;
> - const char *key = "cap";
> - int ret = 0;
> + bool enable = false;
>
> if (devargs == NULL)
> - return 0;
> + return false;
>
> kvlist = rte_kvargs_parse(devargs->args, NULL);
> if (kvlist == NULL)
> - return 0;
> -
> - if (!rte_kvargs_count(kvlist, key))
> - goto exit;
> -
> - /* dcf capability selected when there's a key-value pair: cap=dcf */
> - if (rte_kvargs_process(kvlist, key,
> - ice_dcf_cap_check_handler, NULL) < 0)
> - goto exit;
> + return false;
>
> - ret = 1;
> + rte_kvargs_process(kvlist, ICE_DCF_CAP, handle_dcf_arg, &enable);
>
> -exit:
> rte_kvargs_free(kvlist);
> - return ret;
> +
> + return enable;
> }
>
> static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
> struct rte_pci_device *pci_dev)
> {
> - if (!ice_dcf_cap_selected(pci_dev->device.devargs))
> - return 1;
> + if (!check_cap_dcf_enable(pci_dev->device.devargs))
> + return 1; /* continue to probe */
>
I didn't see how flexible about DCF capability selection. ;-)
And you add the unneeded comment "/* continue to probe */" that I did before.
> return rte_eth_dev_pci_generic_probe(pci_dev,
> sizeof(struct ice_dcf_adapter),
> diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
> index 758caa8..13f4167 100644
> --- a/drivers/net/ice/ice_ethdev.h
> +++ b/drivers/net/ice/ice_ethdev.h
> @@ -447,6 +447,7 @@ struct ice_devargs {
> int pipe_mode_support;
> int flow_mark_support;
> uint8_t proto_xtr[ICE_MAX_QUEUE_NUM];
> + int acl_ipv4_rules_num;
> };
>
> /**
> --
> 1.8.3.1
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v2 0/4] net/ice: support DCF ACL capabiltiy
2020-09-10 7:37 [dpdk-dev] [PATCH v1 0/3] net/ice: support DCF ACL capabiltiy Simei Su
` (2 preceding siblings ...)
2020-09-10 7:37 ` [dpdk-dev] [PATCH v1 3/3] net/ice: support ACL filter in DCF Simei Su
@ 2020-09-29 1:56 ` Simei Su
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 1/4] net/ice/base: change API from static to non-static Simei Su
` (4 more replies)
3 siblings, 5 replies; 25+ messages in thread
From: Simei Su @ 2020-09-29 1:56 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang; +Cc: dev, haiyue.wang, beilei.xing, Simei Su
[PATCH v2 1/4] change API from static to non-static.
[PATCH v2 2/4] get PF VSI map for DCF ACL rule.
[PATCH v2 3/4] support IPV4/IPV4_UDP/IPV4_TCP/IPV4_SCTP pattern
and DROP action for DCF ACL.
[PATCH v2 4/4] add devargs support for DCF ACL IPV4 rule number.
v2:
* Add release notes.
* Adjust patch sequence.
* Refactor ACL design and related structure.
* Add bitmap mechanism to allocate entry dynamically.
Simei Su (4):
net/ice/base: change API from static to non-static
net/ice: get PF VSI map
net/ice: support ACL filter in DCF
net/ice: add devarg for ACL ipv4 rule number
doc/guides/rel_notes/release_20_11.rst | 5 +
drivers/net/ice/base/ice_flow.c | 2 +-
drivers/net/ice/base/ice_flow.h | 3 +
drivers/net/ice/ice_acl_filter.c | 1096 ++++++++++++++++++++++++++++++++
drivers/net/ice/ice_dcf.c | 1 +
drivers/net/ice/ice_dcf.h | 1 +
drivers/net/ice/ice_dcf_ethdev.c | 100 ++-
drivers/net/ice/ice_dcf_parent.c | 37 +-
drivers/net/ice/ice_ethdev.h | 18 +
drivers/net/ice/ice_generic_flow.c | 2 +
drivers/net/ice/meson.build | 3 +-
11 files changed, 1241 insertions(+), 27 deletions(-)
create mode 100644 drivers/net/ice/ice_acl_filter.c
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v2 1/4] net/ice/base: change API from static to non-static
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 0/4] net/ice: support DCF ACL capabiltiy Simei Su
@ 2020-09-29 1:56 ` Simei Su
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 2/4] net/ice: get PF VSI map Simei Su
` (3 subsequent siblings)
4 siblings, 0 replies; 25+ messages in thread
From: Simei Su @ 2020-09-29 1:56 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang; +Cc: dev, haiyue.wang, beilei.xing, Simei Su
This patch changes static API "ice_flow_assoc_prof" to non-static
API in order to let it be used by other files.
Signed-off-by: Simei Su <simei.su@intel.com>
---
drivers/net/ice/base/ice_flow.c | 2 +-
drivers/net/ice/base/ice_flow.h | 3 +++
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
index de5dfb2..80ac0b6 100644
--- a/drivers/net/ice/base/ice_flow.c
+++ b/drivers/net/ice/base/ice_flow.c
@@ -2125,7 +2125,7 @@ ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+enum ice_status
ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
diff --git a/drivers/net/ice/base/ice_flow.h b/drivers/net/ice/base/ice_flow.h
index 0a52409..698a230 100644
--- a/drivers/net/ice/base/ice_flow.h
+++ b/drivers/net/ice/base/ice_flow.h
@@ -499,6 +499,9 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
enum ice_status
ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
enum ice_status
+ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_prof *prof, u16 vsi_handle);
+enum ice_status
ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
u16 vsig);
enum ice_status
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v2 2/4] net/ice: get PF VSI map
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 0/4] net/ice: support DCF ACL capabiltiy Simei Su
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 1/4] net/ice/base: change API from static to non-static Simei Su
@ 2020-09-29 1:56 ` Simei Su
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 3/4] net/ice: support ACL filter in DCF Simei Su
` (2 subsequent siblings)
4 siblings, 0 replies; 25+ messages in thread
From: Simei Su @ 2020-09-29 1:56 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang; +Cc: dev, haiyue.wang, beilei.xing, Simei Su
This patch gets PF vsi number when issuing ACL rule in DCF.
Signed-off-by: Simei Su <simei.su@intel.com>
---
drivers/net/ice/ice_dcf.c | 1 +
drivers/net/ice/ice_dcf.h | 1 +
drivers/net/ice/ice_dcf_parent.c | 37 +++++++++++++++++++++++++++++++++++--
3 files changed, 37 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 2d803c5..d20e2b3 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -318,6 +318,7 @@ ice_dcf_get_vf_vsi_map(struct ice_dcf_hw *hw)
}
hw->num_vfs = vsi_map->num_vfs;
+ hw->pf_vsi_id = vsi_map->pf_vsi;
}
if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) {
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index a44a01e..ff02996 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -43,6 +43,7 @@ struct ice_dcf_hw {
uint16_t num_vfs;
uint16_t *vf_vsi_map;
+ uint16_t pf_vsi_id;
struct virtchnl_version_info virtchnl_version;
struct virtchnl_vf_resource *vf_res; /* VF resource */
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index c5dfdd3..30ead4c 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -78,6 +78,35 @@ ice_dcf_update_vf_vsi_map(struct ice_hw *hw, uint16_t num_vfs,
ice_dcf_update_vsi_ctx(hw, vf_id, vf_vsi_map[vf_id]);
}
+static void
+ice_dcf_update_pf_vsi_map(struct ice_hw *hw, uint16_t pf_vsi_idx,
+ uint16_t pf_vsi_num)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+
+ if (unlikely(pf_vsi_idx >= ICE_MAX_VSI)) {
+ PMD_DRV_LOG(ERR, "Invalid vsi handle %u", pf_vsi_idx);
+ return;
+ }
+
+ vsi_ctx = hw->vsi_ctx[pf_vsi_idx];
+
+ if (!vsi_ctx)
+ vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx));
+
+ if (!vsi_ctx) {
+ PMD_DRV_LOG(ERR, "No memory for vsi context %u",
+ pf_vsi_idx);
+ return;
+ }
+
+ vsi_ctx->vsi_num = pf_vsi_num;
+ hw->vsi_ctx[pf_vsi_idx] = vsi_ctx;
+
+ PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
+ pf_vsi_idx, vsi_ctx->vsi_num);
+}
+
static void*
ice_dcf_vsi_update_service_handler(void *param)
{
@@ -368,14 +397,18 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
}
parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw);
+ parent_adapter->pf.main_vsi->idx = hw->num_vfs;
+ ice_dcf_update_pf_vsi_map(parent_hw,
+ parent_adapter->pf.main_vsi->idx, hw->pf_vsi_id);
+
+ ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
+
err = ice_flow_init(parent_adapter);
if (err) {
PMD_INIT_LOG(ERR, "Failed to initialize flow");
goto uninit_hw;
}
- ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
-
mac = (const struct rte_ether_addr *)hw->avf.mac.addr;
if (rte_is_valid_assigned_ether_addr(mac))
rte_ether_addr_copy(mac, &parent_adapter->pf.dev_addr);
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v2 3/4] net/ice: support ACL filter in DCF
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 0/4] net/ice: support DCF ACL capabiltiy Simei Su
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 1/4] net/ice/base: change API from static to non-static Simei Su
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 2/4] net/ice: get PF VSI map Simei Su
@ 2020-09-29 1:56 ` Simei Su
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 4/4] net/ice: add devarg for ACL ipv4 rule number Simei Su
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 0/3] net/ice: support DCF ACL capabiltiy Simei Su
4 siblings, 0 replies; 25+ messages in thread
From: Simei Su @ 2020-09-29 1:56 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang
Cc: dev, haiyue.wang, beilei.xing, Simei Su, Xuan Ding
Add ice_acl_create_filter to create a rule and ice_acl_destroy_filter
to destroy a rule. If a flow is matched by ACL filter, filter rule
will be set to HW. Currently IPV4/IPV4_UDP/IPV4_TCP/IPV4_SCTP pattern
and drop action are supported.
Signed-off-by: Simei Su <simei.su@intel.com>
Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---
doc/guides/rel_notes/release_20_11.rst | 5 +
drivers/net/ice/ice_acl_filter.c | 1088 ++++++++++++++++++++++++++++++++
drivers/net/ice/ice_ethdev.h | 17 +
drivers/net/ice/ice_generic_flow.c | 2 +
drivers/net/ice/meson.build | 3 +-
5 files changed, 1114 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/ice/ice_acl_filter.c
diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst
index 96d8c14..1d3afb2 100644
--- a/doc/guides/rel_notes/release_20_11.rst
+++ b/doc/guides/rel_notes/release_20_11.rst
@@ -90,6 +90,11 @@ New Features
* Added support for flexible descriptor metadata extraction.
+* **Updated the Intel ice driver.**
+
+ Updated the Intel ice driver with new features and improvements, including:
+
+ * Added acl filter support for Intel DCF.
Removed Items
-------------
diff --git a/drivers/net/ice/ice_acl_filter.c b/drivers/net/ice/ice_acl_filter.c
new file mode 100644
index 0000000..758362a
--- /dev/null
+++ b/drivers/net/ice/ice_acl_filter.c
@@ -0,0 +1,1088 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include "base/ice_type.h"
+#include "base/ice_acl.h"
+#include "ice_logs.h"
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "base/ice_flow.h"
+
+#define MAX_ACL_SLOTS_ID 2048
+
+#define ICE_ACL_INSET_ETH_IPV4 ( \
+ ICE_INSET_SMAC | ICE_INSET_DMAC | \
+ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
+#define ICE_ACL_INSET_ETH_IPV4_UDP ( \
+ ICE_INSET_SMAC | ICE_INSET_DMAC | \
+ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
+ ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
+#define ICE_ACL_INSET_ETH_IPV4_TCP ( \
+ ICE_INSET_SMAC | ICE_INSET_DMAC | \
+ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
+ ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
+#define ICE_ACL_INSET_ETH_IPV4_SCTP ( \
+ ICE_INSET_SMAC | ICE_INSET_DMAC | \
+ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
+ ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
+
+static struct ice_flow_parser ice_acl_parser;
+
+static struct
+ice_pattern_match_item ice_acl_pattern[] = {
+ {pattern_eth_ipv4, ICE_ACL_INSET_ETH_IPV4, ICE_INSET_NONE},
+ {pattern_eth_ipv4_udp, ICE_ACL_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_tcp, ICE_ACL_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_sctp, ICE_ACL_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
+};
+
+static int
+ice_acl_prof_alloc(struct ice_hw *hw)
+{
+ enum ice_fltr_ptype ptype, fltr_ptype;
+
+ if (!hw->acl_prof) {
+ hw->acl_prof = (struct ice_fd_hw_prof **)
+ ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
+ sizeof(*hw->acl_prof));
+ if (!hw->acl_prof)
+ return -ENOMEM;
+ }
+
+ for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+ ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
+ if (!hw->acl_prof[ptype]) {
+ hw->acl_prof[ptype] = (struct ice_fd_hw_prof *)
+ ice_malloc(hw, sizeof(**hw->acl_prof));
+ if (!hw->acl_prof[ptype])
+ goto fail_mem;
+ }
+ }
+
+ return 0;
+
+fail_mem:
+ for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+ fltr_ptype < ptype; fltr_ptype++) {
+ rte_free(hw->acl_prof[fltr_ptype]);
+ hw->acl_prof[fltr_ptype] = NULL;
+ }
+
+ rte_free(hw->acl_prof);
+ hw->acl_prof = NULL;
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_acl_setup - Reserve and initialize the ACL resources
+ * @pf: board private structure
+ */
+static int
+ice_acl_setup(struct ice_pf *pf)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ u32 pf_num = hw->dev_caps.num_funcs;
+ struct ice_acl_tbl_params params;
+ u16 scen_id;
+ int err = 0;
+
+ memset(¶ms, 0, sizeof(params));
+
+ /* create for IPV4 table */
+ if (pf_num < 4)
+ params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 6;
+ else
+ params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 3;
+
+ params.depth = ICE_AQC_ACL_TCAM_DEPTH;
+ params.entry_act_pairs = 1;
+ params.concurr = false;
+
+ err = ice_acl_create_tbl(hw, ¶ms);
+ if (err)
+ return err;
+
+ err = ice_acl_create_scen(hw, params.width, params.depth,
+ &scen_id);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_deinit_acl - Unroll the initialization of the ACL block
+ * @pf: ptr to PF device
+ *
+ * returns 0 on success, negative on error
+ */
+static void ice_deinit_acl(struct ice_pf *pf)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+ ice_acl_destroy_tbl(hw);
+
+ rte_free(hw->acl_tbl);
+ hw->acl_tbl = NULL;
+}
+
+/**
+ * ice_acl_erase_flow_from_hw - Remove a flow from the HW profile tables
+ * @hw: hardware structure containing the filter list
+ * @flow_type: flow type to release
+ */
+static void
+ice_acl_erase_flow_from_hw(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
+{
+ int tun;
+
+ if (!hw->acl_prof || !hw->acl_prof[flow_type])
+ return;
+
+ struct ice_fd_hw_prof *prof = hw->acl_prof[flow_type];
+ for (tun = 0; tun < ICE_FD_HW_SEG_TUN; tun++) {
+ uint64_t prof_id;
+ int j;
+
+ prof_id = flow_type + tun * ICE_FLTR_PTYPE_MAX;
+ for (j = 0; j < prof->cnt; j++) {
+ uint16_t vsi_num;
+
+ if (!prof->entry_h[j][tun] && !prof->vsi_h[j])
+ continue;
+ vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]);
+ ice_rem_prof_id_flow(hw, ICE_BLK_ACL, vsi_num, prof_id);
+ ice_flow_rem_entry(hw, ICE_BLK_ACL,
+ prof->entry_h[j][tun]);
+ prof->entry_h[j][tun] = 0;
+ }
+ ice_flow_rem_prof(hw, ICE_BLK_ACL, prof_id);
+ }
+}
+
+/**
+ * ice_acl_rem_flow - Release the ice_flow structures for a filter type
+ * @hw: hardware structure containing the filter list
+ * @flow_type: flow type to release
+ */
+static void ice_acl_rem_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
+{
+ struct ice_fd_hw_prof *prof;
+ int tun, i;
+
+ if (!hw->acl_prof || !hw->acl_prof[flow_type])
+ return;
+
+ prof = hw->acl_prof[flow_type];
+
+ ice_acl_erase_flow_from_hw(hw, flow_type);
+ for (i = 0; i < prof->cnt; i++)
+ prof->vsi_h[i] = 0;
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ if (!prof->fdir_seg[tun])
+ continue;
+ rte_free(prof->fdir_seg[tun]);
+ prof->fdir_seg[tun] = NULL;
+ }
+ prof->cnt = 0;
+}
+
+static int
+acl_prof_helper_function(struct ice_hw *hw, struct ice_flow_seg_info *seg,
+ bool is_l4, uint16_t src_port, uint16_t dst_port)
+{
+ uint16_t val_loc, mask_loc;
+
+ if (hw->dev_caps.num_funcs < 4) {
+ /* mac source address */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ext_data.src_mac);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ ext_mask.src_mac);
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* mac destination address */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ext_data.dst_mac);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ ext_mask.dst_mac);
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+
+ /* IP source address */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_ip);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_ip);
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* IP destination address */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_ip);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_ip);
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ if (is_l4) {
+ /* Layer 4 source port */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_port);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_port);
+ ice_flow_set_fld(seg, src_port, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* Layer 4 destination port */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_port);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_port);
+ ice_flow_set_fld(seg, dst_port, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+
+ return 0;
+};
+
+/**
+ * ice_acl_prof_init - Initialize ACL profile
+ * @pf: ice PF structure
+ *
+ * Returns 0 on success.
+ */
+static int
+ice_acl_prof_init(struct ice_pf *pf)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_flow_prof *prof_1 = NULL;
+ struct ice_flow_prof *prof_2 = NULL;
+ struct ice_flow_prof *prof_3 = NULL;
+ struct ice_flow_prof *prof_4 = NULL;
+ struct ice_flow_seg_info *seg_1, *seg_2, *seg_3, *seg_4;
+ int i;
+
+ seg_1 = (struct ice_flow_seg_info *)
+ ice_malloc(hw, sizeof(*seg_1));
+
+ seg_2 = (struct ice_flow_seg_info *)
+ ice_malloc(hw, sizeof(*seg_2));
+
+ seg_3 = (struct ice_flow_seg_info *)
+ ice_malloc(hw, sizeof(*seg_3));
+
+ seg_4 = (struct ice_flow_seg_info *)
+ ice_malloc(hw, sizeof(*seg_4));
+
+ ICE_FLOW_SET_HDRS(seg_1, ICE_FLOW_SEG_HDR_IPV4);
+ acl_prof_helper_function(hw, seg_1, false, 0, 0);
+ ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
+ seg_1, 1, NULL, 0, &prof_1);
+
+ ICE_FLOW_SET_HDRS(seg_2, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
+ acl_prof_helper_function(hw, seg_2, true,
+ ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_UDP_DST_PORT);
+ ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ seg_2, 1, NULL, 0, &prof_2);
+
+ ICE_FLOW_SET_HDRS(seg_3, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
+ acl_prof_helper_function(hw, seg_3, true,
+ ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_TCP_DST_PORT);
+ ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP,
+ seg_3, 1, NULL, 0, &prof_3);
+
+ ICE_FLOW_SET_HDRS(seg_4, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
+ acl_prof_helper_function(hw, seg_4, true,
+ ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_SCTP_DST_PORT);
+ ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
+ seg_4, 1, NULL, 0, &prof_4);
+
+ for (i = 0; i < pf->main_vsi->idx; i++) {
+ ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_1, i);
+ ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_2, i);
+ ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_3, i);
+ ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_4, i);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_acl_set_input_set - Helper function to set the input set for ACL
+ * @hw: pointer to HW instance
+ * @filter: pointer to ACL info
+ * @input: filter structure
+ *
+ * Return error value or 0 on success.
+ */
+static int
+ice_acl_set_input_set(struct ice_acl_conf *filter, struct ice_fdir_fltr *input)
+{
+ if (!input)
+ return ICE_ERR_BAD_PTR;
+
+ input->q_index = filter->input.q_index;
+ input->dest_vsi = filter->input.dest_vsi;
+ input->dest_ctl = filter->input.dest_ctl;
+ input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
+ input->flow_type = filter->input.flow_type;
+
+ switch (input->flow_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ input->ip.v4.dst_port = filter->input.ip.v4.dst_port;
+ input->ip.v4.src_port = filter->input.ip.v4.src_port;
+ input->ip.v4.dst_ip = filter->input.ip.v4.dst_ip;
+ input->ip.v4.src_ip = filter->input.ip.v4.src_ip;
+
+ input->mask.v4.dst_port = filter->input.mask.v4.dst_port;
+ input->mask.v4.src_port = filter->input.mask.v4.src_port;
+ input->mask.v4.dst_ip = filter->input.mask.v4.dst_ip;
+ input->mask.v4.src_ip = filter->input.mask.v4.src_ip;
+
+ rte_memcpy(&input->ext_data.src_mac,
+ &filter->input.ext_data.src_mac,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&input->ext_mask.src_mac,
+ &filter->input.ext_mask.src_mac,
+ RTE_ETHER_ADDR_LEN);
+
+ rte_memcpy(&input->ext_data.dst_mac,
+ &filter->input.ext_data.dst_mac,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&input->ext_mask.dst_mac,
+ &filter->input.ext_mask.dst_mac,
+ RTE_ETHER_ADDR_LEN);
+
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ rte_memcpy(&input->ip.v4, &filter->input.ip.v4,
+ sizeof(struct ice_fdir_v4));
+ rte_memcpy(&input->mask.v4, &filter->input.mask.v4,
+ sizeof(struct ice_fdir_v4));
+
+ rte_memcpy(&input->ext_data.src_mac,
+ &filter->input.ext_data.src_mac,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&input->ext_mask.src_mac,
+ &filter->input.ext_mask.src_mac,
+ RTE_ETHER_ADDR_LEN);
+
+ rte_memcpy(&input->ext_data.dst_mac,
+ &filter->input.ext_data.dst_mac,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&input->ext_mask.dst_mac,
+ &filter->input.ext_mask.dst_mac,
+ RTE_ETHER_ADDR_LEN);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct acl_rule {
+ enum ice_fltr_ptype flow_type;
+ uint32_t entry_id[4];
+ struct ice_fdir_fltr *input;
+};
+
+/* Allocate slot_id from bitmap table. */
+static inline uint32_t
+ice_acl_alloc_slot_id(struct rte_bitmap *slots, struct rte_flow_error *error)
+{
+ uint32_t pos = 0;
+ uint64_t slab = 0;
+ uint32_t i = 0;
+
+ __rte_bitmap_scan_init(slots);
+ if (!rte_bitmap_scan(slots, &pos, &slab)) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed with init acl bitmap.");
+ return -rte_errno;
+ }
+ i = rte_bsf64(slab);
+ pos += i;
+ rte_bitmap_clear(slots, pos);
+
+ return pos;
+}
+
+static int
+ice_acl_create_filter(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ void *meta,
+ struct rte_flow_error *error)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_acl_conf *filter = meta;
+ struct acl_rule *rule;
+ struct ice_fdir_fltr *input;
+ struct ice_flow_action acts[1];
+ int act_cnt, ret;
+ uint32_t slot_id;
+ enum ice_block blk = ICE_BLK_ACL;
+ enum ice_fltr_ptype flow_type = filter->input.flow_type;
+ uint64_t entry_id;
+ uint64_t entry_0, entry_1, entry_2, entry_3, entry_4, entry_5, entry_6;
+
+ rule = rte_zmalloc("acl_rule", sizeof(*rule), 0);
+ input = rte_zmalloc("acl_entry", sizeof(*input), 0);
+ if (!input) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return -rte_errno;
+ }
+
+ ret = ice_acl_set_input_set(filter, input);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "failed to set input set.");
+ }
+
+ act_cnt = 1;
+ if (filter->input.dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) {
+ acts[0].type = ICE_FLOW_ACT_DROP;
+ acts[0].data.acl_act.mdid = ICE_MDID_RX_PKT_DROP;
+ acts[0].data.acl_act.prio = 0x3;
+ acts[0].data.acl_act.value = CPU_TO_LE16(0x1);
+ }
+
+ input->acl_fltr = true;
+
+ switch (flow_type) {
+ /* For IPV4_OTHER type, should add entry for all types. */
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ slot_id = ice_acl_alloc_slot_id(pf->slots, error);
+ entry_id = ((uint64_t)(ICE_FLTR_PTYPE_NONF_IPV4_OTHER) << 32) |
+ slot_id;
+ ice_flow_add_entry(hw, blk,
+ ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
+ entry_id, pf->main_vsi->idx,
+ ICE_FLOW_PRIO_NORMAL, input, acts,
+ act_cnt, &entry_0);
+ rule->entry_id[0] = slot_id;
+ pf->hw_entry_id[slot_id] = entry_0;
+
+ slot_id = ice_acl_alloc_slot_id(pf->slots, error);
+ entry_id = ((uint64_t)(ICE_FLTR_PTYPE_NONF_IPV4_UDP) << 32) |
+ slot_id;
+ ice_flow_add_entry(hw, blk,
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ entry_id, pf->main_vsi->idx,
+ ICE_FLOW_PRIO_NORMAL, input, acts,
+ act_cnt, &entry_1);
+ rule->entry_id[1] = slot_id;
+ pf->hw_entry_id[slot_id] = entry_1;
+
+ slot_id = ice_acl_alloc_slot_id(pf->slots, error);
+ entry_id = ((uint64_t)(ICE_FLTR_PTYPE_NONF_IPV4_TCP) << 32) |
+ slot_id;
+ ice_flow_add_entry(hw, blk,
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP,
+ entry_id, pf->main_vsi->idx,
+ ICE_FLOW_PRIO_NORMAL, input, acts,
+ act_cnt, &entry_2);
+ rule->entry_id[2] = slot_id;
+ pf->hw_entry_id[slot_id] = entry_2;
+
+ slot_id = ice_acl_alloc_slot_id(pf->slots, error);
+ entry_id = ((uint64_t)(ICE_FLTR_PTYPE_NONF_IPV4_SCTP) << 32) |
+ slot_id;
+ ice_flow_add_entry(hw, blk,
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
+ entry_id, pf->main_vsi->idx,
+ ICE_FLOW_PRIO_NORMAL, input, acts,
+ act_cnt, &entry_3);
+ rule->entry_id[3] = slot_id;
+ pf->hw_entry_id[slot_id] = entry_3;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ slot_id = ice_acl_alloc_slot_id(pf->slots, error);
+ entry_id = ((uint64_t)flow_type << 32) | slot_id;
+ ice_flow_add_entry(hw, blk,
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ entry_id, pf->main_vsi->idx,
+ ICE_FLOW_PRIO_NORMAL, input, acts,
+ act_cnt, &entry_4);
+ rule->entry_id[0] = slot_id;
+ pf->hw_entry_id[slot_id] = entry_4;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ slot_id = ice_acl_alloc_slot_id(pf->slots, error);
+ entry_id = ((uint64_t)flow_type << 32) | slot_id;
+ ice_flow_add_entry(hw, blk,
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP,
+ entry_id, pf->main_vsi->idx,
+ ICE_FLOW_PRIO_NORMAL, input, acts,
+ act_cnt, &entry_5);
+ rule->entry_id[0] = slot_id;
+ pf->hw_entry_id[slot_id] = entry_5;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ slot_id = ice_acl_alloc_slot_id(pf->slots, error);
+ entry_id = ((uint64_t)flow_type << 32) | slot_id;
+ ice_flow_add_entry(hw, blk,
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
+ entry_id, pf->main_vsi->idx,
+ ICE_FLOW_PRIO_NORMAL, input, acts,
+ act_cnt, &entry_6);
+ rule->entry_id[0] = slot_id;
+ pf->hw_entry_id[slot_id] = entry_6;
+ break;
+ default:
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "error flow type");
+ break;
+ }
+ rule->flow_type = flow_type;
+ rule->input = input;
+ flow->rule = (void *)rule;
+
+ return 0;
+}
+
+static int
+ice_acl_destroy_filter(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ struct rte_flow_error *error __rte_unused)
+{
+ struct acl_rule *rule = (struct acl_rule *)flow->rule;
+ struct ice_pf *pf = &ad->pf;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ int ret = 0;
+ uint32_t slot_id0, slot_id1, slot_id2, slot_id3;
+
+ switch (rule->flow_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ slot_id0 = rule->entry_id[0];
+ rte_bitmap_set(pf->slots, slot_id0);
+ ice_flow_rem_entry(hw, ICE_BLK_ACL, pf->hw_entry_id[slot_id0]);
+
+ slot_id1 = rule->entry_id[1];
+ rte_bitmap_set(pf->slots, slot_id1);
+ ice_flow_rem_entry(hw, ICE_BLK_ACL, pf->hw_entry_id[slot_id1]);
+
+ slot_id2 = rule->entry_id[2];
+ rte_bitmap_set(pf->slots, slot_id2);
+ ice_flow_rem_entry(hw, ICE_BLK_ACL, pf->hw_entry_id[slot_id2]);
+
+ slot_id3 = rule->entry_id[3];
+ rte_bitmap_set(pf->slots, slot_id3);
+ ice_flow_rem_entry(hw, ICE_BLK_ACL, pf->hw_entry_id[slot_id3]);
+
+ ice_acl_rem_flow(hw, rule->flow_type);
+ break;
+
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ slot_id0 = rule->entry_id[0];
+ rte_bitmap_set(pf->slots, slot_id0);
+ ice_flow_rem_entry(&ad->hw, ICE_BLK_ACL,
+ pf->hw_entry_id[slot_id0]);
+ ice_acl_rem_flow(hw, rule->flow_type);
+ break;
+ default:
+ break;
+ }
+
+ rte_free(flow->rule);
+ flow->rule = NULL;
+ return ret;
+}
+
+static void
+ice_acl_filter_free(struct rte_flow *flow)
+{
+ rte_free(flow->rule);
+}
+
+static int
+ice_acl_parse_action(__rte_unused struct ice_adapter *ad,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct ice_acl_conf *filter)
+{
+ uint32_t dest_num = 0;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ dest_num++;
+
+ filter->input.dest_ctl =
+ ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ if (dest_num == 0 || dest_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Unsupported action combination");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ice_acl_parse_pattern(__rte_unused struct ice_adapter *ad,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct ice_acl_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ uint64_t input_set = ICE_INSET_NONE;
+ uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
+
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ item_type = item->type;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ if (eth_spec && eth_mask) {
+ if (rte_is_broadcast_ether_addr(ð_mask->dst) ||
+ rte_is_broadcast_ether_addr(ð_mask->src)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid mac addr mask");
+ return -rte_errno;
+ }
+
+ if (!rte_is_zero_ether_addr(ð_spec->src) &&
+ !rte_is_zero_ether_addr(ð_mask->src)) {
+ input_set |= ICE_INSET_SMAC;
+ rte_memcpy(&filter->input.ext_data.src_mac,
+ ð_spec->src,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&filter->input.ext_mask.src_mac,
+ ð_mask->src,
+ RTE_ETHER_ADDR_LEN);
+ }
+
+ if (!rte_is_zero_ether_addr(ð_spec->dst) &&
+ !rte_is_zero_ether_addr(ð_mask->dst)) {
+ input_set |= ICE_INSET_DMAC;
+ rte_memcpy(&filter->input.ext_data.dst_mac,
+ ð_spec->dst,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&filter->input.ext_mask.dst_mac,
+ ð_mask->dst,
+ RTE_ETHER_ADDR_LEN);
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+
+ if (ipv4_spec && ipv4_mask) {
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if ((ipv4_mask->hdr.src_addr == UINT32_MAX) ||
+ (ipv4_mask->hdr.dst_addr == UINT32_MAX)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if ((ipv4_spec->hdr.src_addr != 0) &&
+ (ipv4_mask->hdr.src_addr != 0)) {
+ filter->input.ip.v4.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.mask.v4.src_ip =
+ ipv4_mask->hdr.src_addr;
+
+ input_set |= ICE_INSET_IPV4_SRC;
+ }
+
+ if ((ipv4_spec->hdr.dst_addr != 0) &&
+ (ipv4_mask->hdr.dst_addr != 0)) {
+ filter->input.ip.v4.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+ filter->input.mask.v4.dst_ip =
+ ipv4_mask->hdr.dst_addr;
+
+ input_set |= ICE_INSET_IPV4_DST;
+ }
+ }
+
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+
+ if (tcp_spec && tcp_mask) {
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if ((tcp_mask->hdr.src_port == UINT16_MAX) ||
+ (tcp_mask->hdr.dst_port == UINT16_MAX)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if ((tcp_mask->hdr.src_port == UINT16_MAX) ||
+ (tcp_mask->hdr.dst_port == UINT16_MAX)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if ((l3 == RTE_FLOW_ITEM_TYPE_IPV4) &&
+ (tcp_spec->hdr.src_port != 0)) {
+ input_set |= ICE_INSET_TCP_SRC_PORT;
+ filter->input.ip.v4.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.mask.v4.src_port =
+ tcp_mask->hdr.src_port;
+ }
+
+ if ((l3 == RTE_FLOW_ITEM_TYPE_IPV4) &&
+ (tcp_spec->hdr.dst_port != 0)) {
+ input_set |= ICE_INSET_TCP_DST_PORT;
+ filter->input.ip.v4.dst_port =
+ tcp_spec->hdr.dst_port;
+ filter->input.mask.v4.dst_port =
+ tcp_mask->hdr.dst_port;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ if (udp_spec && udp_mask) {
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if ((udp_mask->hdr.src_port == UINT16_MAX) ||
+ (udp_mask->hdr.dst_port == UINT16_MAX)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if ((l3 == RTE_FLOW_ITEM_TYPE_IPV4) &&
+ (udp_spec->hdr.src_port != 0)) {
+ input_set |= ICE_INSET_UDP_SRC_PORT;
+ filter->input.ip.v4.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.mask.v4.src_port =
+ udp_mask->hdr.src_port;
+ }
+
+ if ((l3 == RTE_FLOW_ITEM_TYPE_IPV4) &&
+ (udp_spec->hdr.dst_port != 0)) {
+ input_set |= ICE_INSET_UDP_DST_PORT;
+ filter->input.ip.v4.dst_port =
+ udp_spec->hdr.dst_port;
+ filter->input.mask.v4.dst_port =
+ udp_mask->hdr.dst_port;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec = item->spec;
+ sctp_mask = item->mask;
+
+ if (sctp_spec && sctp_mask) {
+ if ((sctp_mask->hdr.src_port == UINT16_MAX) ||
+ (sctp_mask->hdr.dst_port == UINT16_MAX)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid SCTP mask");
+ return -rte_errno;
+ }
+
+ if ((l3 == RTE_FLOW_ITEM_TYPE_IPV4) &&
+ (sctp_spec->hdr.src_port != 0)) {
+ input_set |= ICE_INSET_SCTP_SRC_PORT;
+ filter->input.ip.v4.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.mask.v4.src_port =
+ sctp_mask->hdr.src_port;
+ }
+
+ if ((l3 == RTE_FLOW_ITEM_TYPE_IPV4) &&
+ (sctp_spec->hdr.dst_port != 0)) {
+ input_set |= ICE_INSET_SCTP_DST_PORT;
+ filter->input.ip.v4.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.mask.v4.dst_port =
+ sctp_mask->hdr.dst_port;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pattern item.");
+ return -rte_errno;
+ }
+ }
+
+ filter->input.flow_type = flow_type;
+ filter->input_set = input_set;
+
+ return 0;
+}
+
+static int
+ice_acl_parse(struct ice_adapter *ad,
+ struct ice_pattern_match_item *array,
+ uint32_t array_len,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta,
+ struct rte_flow_error *error)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_acl_conf *filter = &pf->acl.conf;
+ struct ice_pattern_match_item *item = NULL;
+ uint64_t input_set;
+ int ret;
+
+ memset(filter, 0, sizeof(*filter));
+ item = ice_search_pattern_match_item(pattern, array, array_len, error);
+ if (!item)
+ return -rte_errno;
+
+ ret = ice_acl_parse_pattern(ad, pattern, error, filter);
+ if (ret)
+ goto error;
+ input_set = filter->input_set;
+ if (!input_set || input_set & ~item->input_set_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ pattern,
+ "Invalid input set");
+ ret = -rte_errno;
+ goto error;
+ }
+
+ ret = ice_acl_parse_action(ad, actions, error, filter);
+ if (ret)
+ goto error;
+
+ if (meta)
+ *meta = filter;
+
+error:
+ rte_free(item);
+ return ret;
+}
+
+static int
+ice_acl_bitmap_init(struct ice_pf *pf)
+{
+ uint32_t bmp_size;
+ void *mem = NULL;
+ struct rte_bitmap *slots;
+ bmp_size = rte_bitmap_get_memory_footprint(MAX_ACL_SLOTS_ID);
+ mem = rte_zmalloc("create_acl_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
+ if (mem == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for acl bitmap.");
+ return -rte_errno;
+ }
+
+ slots = rte_bitmap_init_with_all_set(MAX_ACL_SLOTS_ID, mem, bmp_size);
+ if (slots == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for acl bitmap.");
+ return -rte_errno;
+ }
+ pf->slots = slots;
+
+ return 0;
+}
+
+static int
+ice_acl_init(struct ice_adapter *ad)
+{
+ int ret = 0;
+ struct ice_pf *pf = &ad->pf;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_flow_parser *parser = &ice_acl_parser;
+
+ ret = ice_acl_prof_alloc(hw);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Cannot allocate memory for "
+ "ACL profile.");
+ return -ENOMEM;
+ }
+
+ ret = ice_acl_setup(pf);
+ if (ret)
+ return ret;
+
+ ret = ice_acl_bitmap_init(pf);
+ if (ret)
+ return ret;
+
+ ret = ice_acl_prof_init(pf);
+ if (ret)
+ return ret;
+
+ return ice_register_parser(parser, ad);
+}
+
+static void
+ice_acl_prof_free(struct ice_hw *hw)
+{
+ enum ice_fltr_ptype ptype;
+
+ for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+ ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
+ rte_free(hw->acl_prof[ptype]);
+ hw->acl_prof[ptype] = NULL;
+ }
+
+ rte_free(hw->acl_prof);
+ hw->acl_prof = NULL;
+}
+
+static void
+ice_acl_uninit(struct ice_adapter *ad)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_flow_parser *parser = &ice_acl_parser;
+
+ ice_unregister_parser(parser, ad);
+
+ ice_deinit_acl(pf);
+ ice_acl_prof_free(hw);
+}
+
+static struct
+ice_flow_engine ice_acl_engine = {
+ .init = ice_acl_init,
+ .uninit = ice_acl_uninit,
+ .create = ice_acl_create_filter,
+ .destroy = ice_acl_destroy_filter,
+ .free = ice_acl_filter_free,
+ .type = ICE_FLOW_ENGINE_ACL,
+};
+
+static struct
+ice_flow_parser ice_acl_parser = {
+ .engine = &ice_acl_engine,
+ .array = ice_acl_pattern,
+ .array_len = RTE_DIM(ice_acl_pattern),
+ .parse_pattern_action = ice_acl_parse,
+ .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(ice_acl_engine_init)
+{
+ struct ice_flow_engine *engine = &ice_acl_engine;
+ ice_register_flow_engine(engine);
+}
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 37b956e..38de4c6 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -49,6 +49,8 @@
#define ICE_PKG_FILE_SEARCH_PATH_UPDATES "/lib/firmware/updates/intel/ice/ddp/"
#define ICE_MAX_PKG_FILENAME_SIZE 256
+#define MAX_ACL_ENTRIES 512
+
/**
* vlan_id is a 12 bit number.
* The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
@@ -398,6 +400,18 @@ struct ice_hash_gtpu_ctx {
struct ice_hash_cfg ipv6_tcp;
};
+struct ice_acl_conf {
+ struct ice_fdir_fltr input;
+ uint64_t input_set;
+};
+
+/**
+ * A structure used to define fields of ACL related info.
+ */
+struct ice_acl_info {
+ struct ice_acl_conf conf;
+};
+
struct ice_pf {
struct ice_adapter *adapter; /* The adapter this PF associate to */
struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -421,6 +435,7 @@ struct ice_pf {
uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
uint16_t fdir_qp_offset;
struct ice_fdir_info fdir; /* flow director info */
+ struct ice_acl_info acl; /* ACL info */
struct ice_hash_gtpu_ctx gtpu_hash_ctx;
uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
@@ -440,6 +455,8 @@ struct ice_pf {
uint64_t old_rx_bytes;
uint64_t old_tx_bytes;
uint64_t supported_rxdid; /* bitmap for supported RXDID */
+ struct rte_bitmap *slots;
+ uint64_t hw_entry_id[MAX_ACL_ENTRIES];
};
#define ICE_MAX_QUEUE_NUM 2048
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 54b0316..1429cbc 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1896,6 +1896,8 @@ ice_register_parser(struct ice_flow_parser *parser,
TAILQ_INSERT_TAIL(list, parser_node, node);
else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
TAILQ_INSERT_HEAD(list, parser_node, node);
+ else if (parser->engine->type == ICE_FLOW_ENGINE_ACL)
+ TAILQ_INSERT_HEAD(list, parser_node, node);
else
return -EINVAL;
}
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 99e1b77..254595a 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -10,7 +10,8 @@ sources = files(
'ice_switch_filter.c',
'ice_generic_flow.c',
'ice_fdir_filter.c',
- 'ice_hash.c'
+ 'ice_hash.c',
+ 'ice_acl_filter.c'
)
deps += ['hash', 'net', 'common_iavf']
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v2 4/4] net/ice: add devarg for ACL ipv4 rule number
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 0/4] net/ice: support DCF ACL capabiltiy Simei Su
` (2 preceding siblings ...)
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 3/4] net/ice: support ACL filter in DCF Simei Su
@ 2020-09-29 1:56 ` Simei Su
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 0/3] net/ice: support DCF ACL capabiltiy Simei Su
4 siblings, 0 replies; 25+ messages in thread
From: Simei Su @ 2020-09-29 1:56 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang; +Cc: dev, haiyue.wang, beilei.xing, Simei Su
This patch enables devargs for ACL ipv4 rule number and refactor
DCF capability selection API to support not just DCF capability.
Signed-off-by: Simei Su <simei.su@intel.com>
---
drivers/net/ice/ice_acl_filter.c | 42 +++++++++-------
drivers/net/ice/ice_dcf_ethdev.c | 100 ++++++++++++++++++++++++++++++---------
drivers/net/ice/ice_ethdev.h | 1 +
3 files changed, 103 insertions(+), 40 deletions(-)
diff --git a/drivers/net/ice/ice_acl_filter.c b/drivers/net/ice/ice_acl_filter.c
index 758362a..6e12774 100644
--- a/drivers/net/ice/ice_acl_filter.c
+++ b/drivers/net/ice/ice_acl_filter.c
@@ -102,29 +102,37 @@ ice_acl_setup(struct ice_pf *pf)
struct ice_hw *hw = ICE_PF_TO_HW(pf);
u32 pf_num = hw->dev_caps.num_funcs;
struct ice_acl_tbl_params params;
+ int acl_ipv4_rules = 0;
u16 scen_id;
int err = 0;
- memset(¶ms, 0, sizeof(params));
-
- /* create for IPV4 table */
- if (pf_num < 4)
- params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 6;
+ if (pf->adapter->devargs.acl_ipv4_rules_num)
+ acl_ipv4_rules = pf->adapter->devargs.acl_ipv4_rules_num;
else
- params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 3;
-
- params.depth = ICE_AQC_ACL_TCAM_DEPTH;
- params.entry_act_pairs = 1;
- params.concurr = false;
+ acl_ipv4_rules = ICE_AQC_ACL_TCAM_DEPTH;
- err = ice_acl_create_tbl(hw, ¶ms);
- if (err)
- return err;
+ memset(¶ms, 0, sizeof(params));
- err = ice_acl_create_scen(hw, params.width, params.depth,
- &scen_id);
- if (err)
- return err;
+ /* create for IPV4 table */
+ if (acl_ipv4_rules) {
+ if (pf_num < 4)
+ params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 6;
+ else
+ params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 3;
+
+ params.depth = acl_ipv4_rules;
+ params.entry_act_pairs = 1;
+ params.concurr = false;
+
+ err = ice_acl_create_tbl(hw, ¶ms);
+ if (err)
+ return err;
+
+ err = ice_acl_create_scen(hw, params.width, params.depth,
+ &scen_id);
+ if (err)
+ return err;
+ }
return 0;
}
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index 2faed3c..98f9766 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -26,6 +26,16 @@
#include "ice_dcf_ethdev.h"
#include "ice_rxtx.h"
+/* devargs */
+#define ICE_DCF_CAP "cap"
+#define ICE_DCF_ACL_IPV4_RULES_NUM "acl_ipv4_nums"
+
+static const char * const ice_dcf_valid_args[] = {
+ ICE_DCF_CAP,
+ ICE_DCF_ACL_IPV4_RULES_NUM,
+ NULL,
+};
+
static uint16_t
ice_dcf_recv_pkts(__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **bufs,
@@ -895,9 +905,51 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
};
static int
+parse_int(__rte_unused const char *key, const char *value, void *args)
+{
+ int *i = (int *)args;
+ char *end;
+ int num;
+
+ num = strtoul(value, &end, 10);
+ *i = num;
+
+ return 0;
+}
+
+static int ice_dcf_parse_devargs(struct rte_eth_dev *dev)
+{
+ struct ice_dcf_adapter *adapter = dev->data->dev_private;
+ struct ice_adapter *parent_adapter = &adapter->parent;
+
+ struct rte_devargs *devargs = dev->device->devargs;
+ struct rte_kvargs *kvlist;
+ int ret;
+
+ if (devargs == NULL)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, ice_dcf_valid_args);
+ if (kvlist == NULL) {
+ PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
+ return -EINVAL;
+ }
+
+ ret = rte_kvargs_process(kvlist, ICE_DCF_ACL_IPV4_RULES_NUM,
+ &parse_int, &parent_adapter->devargs.acl_ipv4_rules_num);
+ if (ret)
+ goto bail;
+
+bail:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
{
struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
+ int ret;
eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
@@ -908,6 +960,12 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+ ret = ice_dcf_parse_devargs(eth_dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to parse devargs");
+ return -EINVAL;
+ }
+
adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
@@ -932,48 +990,44 @@ ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
}
static int
-ice_dcf_cap_check_handler(__rte_unused const char *key,
- const char *value, __rte_unused void *opaque)
+handle_dcf_arg(__rte_unused const char *key, const char *value,
+ __rte_unused void *arg)
{
- if (strcmp(value, "dcf"))
- return -1;
+ bool *dcf = arg;
+
+ if (arg == NULL || value == NULL)
+ return -EINVAL;
+
+ if (strcmp(value, "dcf") == 0)
+ *dcf = true;
+ else
+ *dcf = false;
return 0;
}
-static int
-ice_dcf_cap_selected(struct rte_devargs *devargs)
+static bool
+check_cap_dcf_enable(struct rte_devargs *devargs)
{
struct rte_kvargs *kvlist;
- const char *key = "cap";
- int ret = 0;
+ bool enable = false;
if (devargs == NULL)
- return 0;
+ return false;
kvlist = rte_kvargs_parse(devargs->args, NULL);
if (kvlist == NULL)
- return 0;
-
- if (!rte_kvargs_count(kvlist, key))
- goto exit;
-
- /* dcf capability selected when there's a key-value pair: cap=dcf */
- if (rte_kvargs_process(kvlist, key,
- ice_dcf_cap_check_handler, NULL) < 0)
- goto exit;
+ return false;
- ret = 1;
+ rte_kvargs_process(kvlist, ICE_DCF_CAP, handle_dcf_arg, &enable);
-exit:
- rte_kvargs_free(kvlist);
- return ret;
+ return enable;
}
static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
struct rte_pci_device *pci_dev)
{
- if (!ice_dcf_cap_selected(pci_dev->device.devargs))
+ if (!check_cap_dcf_enable(pci_dev->device.devargs))
return 1;
return rte_eth_dev_pci_generic_probe(pci_dev,
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 38de4c6..a804182 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -469,6 +469,7 @@ struct ice_devargs {
uint8_t proto_xtr_dflt;
int pipe_mode_support;
uint8_t proto_xtr[ICE_MAX_QUEUE_NUM];
+ int acl_ipv4_rules_num;
};
/**
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v3 0/3] net/ice: support DCF ACL capabiltiy
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 0/4] net/ice: support DCF ACL capabiltiy Simei Su
` (3 preceding siblings ...)
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 4/4] net/ice: add devarg for ACL ipv4 rule number Simei Su
@ 2020-10-14 8:54 ` Simei Su
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 1/3] net/ice/base: change API from static to non-static Simei Su
` (3 more replies)
4 siblings, 4 replies; 25+ messages in thread
From: Simei Su @ 2020-10-14 8:54 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang
Cc: dev, haiyue.wang, beilei.xing, xuan.ding, Simei Su
[PATCH v3 1/3] change API from static to non-static.
[PATCH v3 2/3] get PF VSI map for DCF ACL rule.
[PATCH v3 3/3] support IPV4/IPV4_UDP/IPV4_TCP/IPV4_SCTP pattern
and DROP action for DCF ACL.
v3:
* Optimize code logic in ice_acl_prof_init and ice_acl_create_filter.
* Fix several bugs.
v2:
* Add release notes.
* Adjust patch sequence.
* Refactor ACL design and related structure.
* Add bitmap mechanism to allocate entry dynamically.
Simei Su (3):
net/ice/base: change API from static to non-static
net/ice: get PF VSI map
net/ice: support ACL filter in DCF
doc/guides/rel_notes/release_20_11.rst | 6 +
drivers/net/ice/base/ice_flow.c | 2 +-
drivers/net/ice/base/ice_flow.h | 3 +
drivers/net/ice/ice_acl_filter.c | 1034 ++++++++++++++++++++++++++++++++
drivers/net/ice/ice_dcf.c | 1 +
drivers/net/ice/ice_dcf.h | 1 +
drivers/net/ice/ice_dcf_parent.c | 37 +-
drivers/net/ice/ice_ethdev.h | 17 +
drivers/net/ice/ice_generic_flow.c | 2 +
drivers/net/ice/meson.build | 3 +-
10 files changed, 1102 insertions(+), 4 deletions(-)
create mode 100644 drivers/net/ice/ice_acl_filter.c
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v3 1/3] net/ice/base: change API from static to non-static
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 0/3] net/ice: support DCF ACL capabiltiy Simei Su
@ 2020-10-14 8:54 ` Simei Su
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 2/3] net/ice: get PF VSI map Simei Su
` (2 subsequent siblings)
3 siblings, 0 replies; 25+ messages in thread
From: Simei Su @ 2020-10-14 8:54 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang
Cc: dev, haiyue.wang, beilei.xing, xuan.ding, Simei Su
This patch changes static API "ice_flow_assoc_prof" to non-static
API in order to let it be used by other files.
Signed-off-by: Simei Su <simei.su@intel.com>
---
drivers/net/ice/base/ice_flow.c | 2 +-
drivers/net/ice/base/ice_flow.h | 3 +++
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
index de5dfb2..80ac0b6 100644
--- a/drivers/net/ice/base/ice_flow.c
+++ b/drivers/net/ice/base/ice_flow.c
@@ -2125,7 +2125,7 @@ ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+enum ice_status
ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
diff --git a/drivers/net/ice/base/ice_flow.h b/drivers/net/ice/base/ice_flow.h
index 0a52409..698a230 100644
--- a/drivers/net/ice/base/ice_flow.h
+++ b/drivers/net/ice/base/ice_flow.h
@@ -499,6 +499,9 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
enum ice_status
ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
enum ice_status
+ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_prof *prof, u16 vsi_handle);
+enum ice_status
ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
u16 vsig);
enum ice_status
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v3 2/3] net/ice: get PF VSI map
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 0/3] net/ice: support DCF ACL capabiltiy Simei Su
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 1/3] net/ice/base: change API from static to non-static Simei Su
@ 2020-10-14 8:54 ` Simei Su
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 3/3] net/ice: support ACL filter in DCF Simei Su
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 0/3] net/ice: support DCF ACL capabiltiy Simei Su
3 siblings, 0 replies; 25+ messages in thread
From: Simei Su @ 2020-10-14 8:54 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang
Cc: dev, haiyue.wang, beilei.xing, xuan.ding, Simei Su
This patch gets PF vsi number when issuing ACL rule in DCF.
Signed-off-by: Simei Su <simei.su@intel.com>
---
drivers/net/ice/ice_dcf.c | 1 +
drivers/net/ice/ice_dcf.h | 1 +
drivers/net/ice/ice_dcf_parent.c | 37 +++++++++++++++++++++++++++++++++++--
3 files changed, 37 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 2d803c5..d20e2b3 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -318,6 +318,7 @@ ice_dcf_get_vf_vsi_map(struct ice_dcf_hw *hw)
}
hw->num_vfs = vsi_map->num_vfs;
+ hw->pf_vsi_id = vsi_map->pf_vsi;
}
if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) {
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index a44a01e..ff02996 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -43,6 +43,7 @@ struct ice_dcf_hw {
uint16_t num_vfs;
uint16_t *vf_vsi_map;
+ uint16_t pf_vsi_id;
struct virtchnl_version_info virtchnl_version;
struct virtchnl_vf_resource *vf_res; /* VF resource */
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index c5dfdd3..30ead4c 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -78,6 +78,35 @@ ice_dcf_update_vf_vsi_map(struct ice_hw *hw, uint16_t num_vfs,
ice_dcf_update_vsi_ctx(hw, vf_id, vf_vsi_map[vf_id]);
}
+static void
+ice_dcf_update_pf_vsi_map(struct ice_hw *hw, uint16_t pf_vsi_idx,
+ uint16_t pf_vsi_num)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+
+ if (unlikely(pf_vsi_idx >= ICE_MAX_VSI)) {
+ PMD_DRV_LOG(ERR, "Invalid vsi handle %u", pf_vsi_idx);
+ return;
+ }
+
+ vsi_ctx = hw->vsi_ctx[pf_vsi_idx];
+
+ if (!vsi_ctx)
+ vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx));
+
+ if (!vsi_ctx) {
+ PMD_DRV_LOG(ERR, "No memory for vsi context %u",
+ pf_vsi_idx);
+ return;
+ }
+
+ vsi_ctx->vsi_num = pf_vsi_num;
+ hw->vsi_ctx[pf_vsi_idx] = vsi_ctx;
+
+ PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
+ pf_vsi_idx, vsi_ctx->vsi_num);
+}
+
static void*
ice_dcf_vsi_update_service_handler(void *param)
{
@@ -368,14 +397,18 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
}
parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw);
+ parent_adapter->pf.main_vsi->idx = hw->num_vfs;
+ ice_dcf_update_pf_vsi_map(parent_hw,
+ parent_adapter->pf.main_vsi->idx, hw->pf_vsi_id);
+
+ ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
+
err = ice_flow_init(parent_adapter);
if (err) {
PMD_INIT_LOG(ERR, "Failed to initialize flow");
goto uninit_hw;
}
- ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
-
mac = (const struct rte_ether_addr *)hw->avf.mac.addr;
if (rte_is_valid_assigned_ether_addr(mac))
rte_ether_addr_copy(mac, &parent_adapter->pf.dev_addr);
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v3 3/3] net/ice: support ACL filter in DCF
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 0/3] net/ice: support DCF ACL capabiltiy Simei Su
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 1/3] net/ice/base: change API from static to non-static Simei Su
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 2/3] net/ice: get PF VSI map Simei Su
@ 2020-10-14 8:54 ` Simei Su
2020-10-15 5:10 ` Zhang, Qi Z
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 0/3] net/ice: support DCF ACL capabiltiy Simei Su
3 siblings, 1 reply; 25+ messages in thread
From: Simei Su @ 2020-10-14 8:54 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang
Cc: dev, haiyue.wang, beilei.xing, xuan.ding, Simei Su
Add ice_acl_create_filter to create a rule and ice_acl_destroy_filter
to destroy a rule. If a flow is matched by ACL filter, filter rule
will be set to HW. Currently IPV4/IPV4_UDP/IPV4_TCP/IPV4_SCTP pattern
and drop action are supported.
Signed-off-by: Simei Su <simei.su@intel.com>
Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---
doc/guides/rel_notes/release_20_11.rst | 6 +
drivers/net/ice/ice_acl_filter.c | 1034 ++++++++++++++++++++++++++++++++
drivers/net/ice/ice_ethdev.h | 17 +
drivers/net/ice/ice_generic_flow.c | 2 +
drivers/net/ice/meson.build | 3 +-
5 files changed, 1061 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/ice/ice_acl_filter.c
diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst
index e8ae4d4..6cf1ef8 100644
--- a/doc/guides/rel_notes/release_20_11.rst
+++ b/doc/guides/rel_notes/release_20_11.rst
@@ -189,6 +189,12 @@ New Features
* Added support for flexible descriptor metadata extraction.
+* **Updated the Intel ice driver.**
+
+ Updated the Intel ice driver with new features and improvements, including:
+
+ * Added acl filter support for Intel DCF.
+
Removed Items
-------------
diff --git a/drivers/net/ice/ice_acl_filter.c b/drivers/net/ice/ice_acl_filter.c
new file mode 100644
index 0000000..92fbd53
--- /dev/null
+++ b/drivers/net/ice/ice_acl_filter.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include "base/ice_type.h"
+#include "base/ice_acl.h"
+#include "ice_logs.h"
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "base/ice_flow.h"
+
+#define MAX_ACL_SLOTS_ID 2048
+
+#define ICE_ACL_INSET_ETH_IPV4 ( \
+ ICE_INSET_SMAC | ICE_INSET_DMAC | \
+ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
+#define ICE_ACL_INSET_ETH_IPV4_UDP ( \
+ ICE_INSET_SMAC | ICE_INSET_DMAC | \
+ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
+ ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
+#define ICE_ACL_INSET_ETH_IPV4_TCP ( \
+ ICE_INSET_SMAC | ICE_INSET_DMAC | \
+ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
+ ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
+#define ICE_ACL_INSET_ETH_IPV4_SCTP ( \
+ ICE_INSET_SMAC | ICE_INSET_DMAC | \
+ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
+ ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
+
+static struct ice_flow_parser ice_acl_parser;
+
+static struct
+ice_pattern_match_item ice_acl_pattern[] = {
+ {pattern_eth_ipv4, ICE_ACL_INSET_ETH_IPV4, ICE_INSET_NONE},
+ {pattern_eth_ipv4_udp, ICE_ACL_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_tcp, ICE_ACL_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_sctp, ICE_ACL_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
+};
+
+static int
+ice_acl_prof_alloc(struct ice_hw *hw)
+{
+ enum ice_fltr_ptype ptype, fltr_ptype;
+
+ if (!hw->acl_prof) {
+ hw->acl_prof = (struct ice_fd_hw_prof **)
+ ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
+ sizeof(*hw->acl_prof));
+ if (!hw->acl_prof)
+ return -ENOMEM;
+ }
+
+ for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+ ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
+ if (!hw->acl_prof[ptype]) {
+ hw->acl_prof[ptype] = (struct ice_fd_hw_prof *)
+ ice_malloc(hw, sizeof(**hw->acl_prof));
+ if (!hw->acl_prof[ptype])
+ goto fail_mem;
+ }
+ }
+
+ return 0;
+
+fail_mem:
+ for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+ fltr_ptype < ptype; fltr_ptype++) {
+ rte_free(hw->acl_prof[fltr_ptype]);
+ hw->acl_prof[fltr_ptype] = NULL;
+ }
+
+ rte_free(hw->acl_prof);
+ hw->acl_prof = NULL;
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_acl_setup - Reserve and initialize the ACL resources
+ * @pf: board private structure
+ */
+static int
+ice_acl_setup(struct ice_pf *pf)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ uint32_t pf_num = hw->dev_caps.num_funcs;
+ struct ice_acl_tbl_params params;
+ uint16_t scen_id;
+ int err = 0;
+
+ memset(¶ms, 0, sizeof(params));
+
+ /* create for IPV4 table */
+ if (pf_num < 4)
+ params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 6;
+ else
+ params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 3;
+
+ params.depth = ICE_AQC_ACL_TCAM_DEPTH;
+ params.entry_act_pairs = 1;
+ params.concurr = false;
+
+ err = ice_acl_create_tbl(hw, ¶ms);
+ if (err)
+ return err;
+
+ err = ice_acl_create_scen(hw, params.width, params.depth,
+ &scen_id);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_deinit_acl - Unroll the initialization of the ACL block
+ * @pf: ptr to PF device
+ *
+ * returns 0 on success, negative on error
+ */
+static void ice_deinit_acl(struct ice_pf *pf)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+ ice_acl_destroy_tbl(hw);
+
+ rte_free(hw->acl_tbl);
+ hw->acl_tbl = NULL;
+
+ if (pf->slots) {
+ rte_free(pf->slots);
+ pf->slots = NULL;
+ }
+}
+
+/**
+ * ice_acl_erase_flow_from_hw - Remove a flow from the HW profile tables
+ * @hw: hardware structure containing the filter list
+ * @flow_type: flow type to release
+ */
+static void
+ice_acl_erase_flow_from_hw(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
+{
+ int tun;
+
+ if (!hw->acl_prof || !hw->acl_prof[flow_type])
+ return;
+
+ struct ice_fd_hw_prof *prof = hw->acl_prof[flow_type];
+ for (tun = 0; tun < ICE_FD_HW_SEG_TUN; tun++) {
+ uint64_t prof_id;
+ int j;
+
+ prof_id = flow_type + tun * ICE_FLTR_PTYPE_MAX;
+ for (j = 0; j < prof->cnt; j++) {
+ uint16_t vsi_num;
+
+ if (!prof->entry_h[j][tun] && !prof->vsi_h[j])
+ continue;
+ vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]);
+ ice_rem_prof_id_flow(hw, ICE_BLK_ACL, vsi_num, prof_id);
+ ice_flow_rem_entry(hw, ICE_BLK_ACL,
+ prof->entry_h[j][tun]);
+ prof->entry_h[j][tun] = 0;
+ }
+ }
+}
+
+/**
+ * ice_acl_rem_flow - Release the ice_flow structures for a filter type
+ * @hw: hardware structure containing the filter list
+ * @flow_type: flow type to release
+ */
+static void ice_acl_rem_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
+{
+ struct ice_fd_hw_prof *prof;
+ int tun, i;
+
+ if (!hw->acl_prof || !hw->acl_prof[flow_type])
+ return;
+
+ prof = hw->acl_prof[flow_type];
+
+ ice_acl_erase_flow_from_hw(hw, flow_type);
+ for (i = 0; i < prof->cnt; i++)
+ prof->vsi_h[i] = 0;
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ if (!prof->fdir_seg[tun])
+ continue;
+ rte_free(prof->fdir_seg[tun]);
+ prof->fdir_seg[tun] = NULL;
+ }
+ prof->cnt = 0;
+}
+
+static void
+acl_prof_helper_function(struct ice_hw *hw, struct ice_flow_seg_info *seg,
+ bool is_l4, uint16_t src_port, uint16_t dst_port)
+{
+ uint16_t val_loc, mask_loc;
+
+ if (hw->dev_caps.num_funcs < 4) {
+ /* mac source address */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ext_data.src_mac);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ ext_mask.src_mac);
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* mac destination address */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ext_data.dst_mac);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ ext_mask.dst_mac);
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+
+ /* IP source address */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_ip);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_ip);
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* IP destination address */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_ip);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_ip);
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ if (is_l4) {
+ /* Layer 4 source port */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_port);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_port);
+ ice_flow_set_fld(seg, src_port, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* Layer 4 destination port */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_port);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_port);
+ ice_flow_set_fld(seg, dst_port, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+}
+
+/**
+ * ice_acl_prof_init - Initialize ACL profile
+ * @pf: ice PF structure
+ *
+ * Returns 0 on success.
+ */
+static int
+ice_acl_prof_init(struct ice_pf *pf)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_flow_prof *prof_ipv4 = NULL;
+ struct ice_flow_prof *prof_ipv4_udp = NULL;
+ struct ice_flow_prof *prof_ipv4_tcp = NULL;
+ struct ice_flow_prof *prof_ipv4_sctp = NULL;
+ struct ice_flow_seg_info *seg;
+ int i;
+ int ret;
+
+ seg = (struct ice_flow_seg_info *)
+ ice_malloc(hw, sizeof(*seg));
+
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
+ acl_prof_helper_function(hw, seg, false, 0, 0);
+ ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
+ seg, 1, NULL, 0, &prof_ipv4);
+ if (ret)
+ goto err_add_prof;
+
+ ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
+ acl_prof_helper_function(hw, seg, true,
+ ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_UDP_DST_PORT);
+ ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ seg, 1, NULL, 0, &prof_ipv4_udp);
+ if (ret)
+ goto err_add_prof;
+
+ ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
+ acl_prof_helper_function(hw, seg, true,
+ ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_TCP_DST_PORT);
+ ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP,
+ seg, 1, NULL, 0, &prof_ipv4_tcp);
+ if (ret)
+ goto err_add_prof;
+
+ ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
+ acl_prof_helper_function(hw, seg, true,
+ ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_SCTP_DST_PORT);
+ ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
+ seg, 1, NULL, 0, &prof_ipv4_sctp);
+ if (ret)
+ goto err_add_prof;
+
+ for (i = 0; i < pf->main_vsi->idx; i++) {
+ ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4, i);
+ if (ret)
+ goto err_add_prof;
+
+ ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_udp, i);
+ if (ret)
+ goto err_add_prof;
+
+ ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_tcp, i);
+ if (ret)
+ goto err_add_prof;
+
+ ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_sctp, i);
+ if (ret)
+ goto err_add_prof;
+ }
+
+ return 0;
+
+err_add_prof:
+ ice_free(hw, seg);
+ return ret;
+}
+
+/**
+ * ice_acl_set_input_set - Helper function to set the input set for ACL
+ * @hw: pointer to HW instance
+ * @filter: pointer to ACL info
+ * @input: filter structure
+ *
+ * Return error value or 0 on success.
+ */
+static int
+ice_acl_set_input_set(struct ice_acl_conf *filter, struct ice_fdir_fltr *input)
+{
+ if (!input)
+ return ICE_ERR_BAD_PTR;
+
+ input->q_index = filter->input.q_index;
+ input->dest_vsi = filter->input.dest_vsi;
+ input->dest_ctl = filter->input.dest_ctl;
+ input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
+ input->flow_type = filter->input.flow_type;
+
+ switch (input->flow_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ input->ip.v4.dst_port = filter->input.ip.v4.dst_port;
+ input->ip.v4.src_port = filter->input.ip.v4.src_port;
+ input->ip.v4.dst_ip = filter->input.ip.v4.dst_ip;
+ input->ip.v4.src_ip = filter->input.ip.v4.src_ip;
+
+ input->mask.v4.dst_port = filter->input.mask.v4.dst_port;
+ input->mask.v4.src_port = filter->input.mask.v4.src_port;
+ input->mask.v4.dst_ip = filter->input.mask.v4.dst_ip;
+ input->mask.v4.src_ip = filter->input.mask.v4.src_ip;
+
+ rte_memcpy(&input->ext_data.src_mac,
+ &filter->input.ext_data.src_mac,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&input->ext_mask.src_mac,
+ &filter->input.ext_mask.src_mac,
+ RTE_ETHER_ADDR_LEN);
+
+ rte_memcpy(&input->ext_data.dst_mac,
+ &filter->input.ext_data.dst_mac,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&input->ext_mask.dst_mac,
+ &filter->input.ext_mask.dst_mac,
+ RTE_ETHER_ADDR_LEN);
+
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ rte_memcpy(&input->ip.v4, &filter->input.ip.v4,
+ sizeof(struct ice_fdir_v4));
+ rte_memcpy(&input->mask.v4, &filter->input.mask.v4,
+ sizeof(struct ice_fdir_v4));
+
+ rte_memcpy(&input->ext_data.src_mac,
+ &filter->input.ext_data.src_mac,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&input->ext_mask.src_mac,
+ &filter->input.ext_mask.src_mac,
+ RTE_ETHER_ADDR_LEN);
+
+ rte_memcpy(&input->ext_data.dst_mac,
+ &filter->input.ext_data.dst_mac,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&input->ext_mask.dst_mac,
+ &filter->input.ext_mask.dst_mac,
+ RTE_ETHER_ADDR_LEN);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct acl_rule {
+ enum ice_fltr_ptype flow_type;
+ uint32_t entry_id[4];
+};
+
+static inline int
+ice_acl_alloc_slot_id(struct rte_bitmap *slots, uint32_t *slot_id)
+{
+ uint32_t pos = 0;
+ uint64_t slab = 0;
+ uint32_t i = 0;
+
+ __rte_bitmap_scan_init(slots);
+ if (!rte_bitmap_scan(slots, &pos, &slab))
+ return -rte_errno;
+
+ i = rte_bsf64(slab);
+ pos += i;
+ rte_bitmap_clear(slots, pos);
+
+ *slot_id = pos;
+ return 0;
+}
+
+static inline int
+ice_acl_hw_set_conf(struct ice_pf *pf, struct ice_fdir_fltr *input,
+ struct ice_flow_action *acts, struct acl_rule *rule,
+ enum ice_fltr_ptype flow_type, int32_t entry_idx)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ enum ice_block blk = ICE_BLK_ACL;
+ uint64_t entry_id, hw_entry;
+ uint32_t slot_id = 0;
+ int act_cnt = 1;
+ int ret = 0;
+
+ /* Allocate slot_id from bitmap table. */
+ ret = ice_acl_alloc_slot_id(pf->slots, &slot_id);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "fail to alloc slot id.");
+ return ret;
+ }
+
+ /* For IPV4_OTHER type, should add entry for all types.
+ * For IPV4_UDP/TCP/SCTP type, only add entry for each.
+ */
+ if (slot_id < MAX_ACL_ENTRIES) {
+ entry_id = ((uint64_t)flow_type << 32) | slot_id;
+ ret = ice_flow_add_entry(hw, blk,
+ flow_type,
+ entry_id, pf->main_vsi->idx,
+ ICE_FLOW_PRIO_NORMAL, input, acts,
+ act_cnt, &hw_entry);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "fail to add entry.");
+ return ret;
+ }
+
+ rule->entry_id[entry_idx] = slot_id;
+ pf->hw_entry_id[slot_id] = hw_entry;
+ } else {
+ PMD_DRV_LOG(ERR, "Exceed the maximum entry number"
+ " HW supported");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+ice_acl_create_filter(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ void *meta,
+ struct rte_flow_error *error)
+{
+ struct ice_acl_conf *filter = meta;
+ enum ice_fltr_ptype flow_type = filter->input.flow_type;
+ struct ice_flow_action acts[1];
+ struct ice_pf *pf = &ad->pf;
+ struct ice_fdir_fltr *input;
+ struct acl_rule *rule;
+ int ret;
+
+ rule = rte_zmalloc("acl_rule", sizeof(*rule), 0);
+ if (!rule) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory for acl rule");
+ return -rte_errno;
+ }
+
+ input = rte_zmalloc("acl_entry", sizeof(*input), 0);
+ if (!input) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory for acl input");
+ ret = -rte_errno;
+ goto err_acl_input_alloc;
+ }
+
+ ret = ice_acl_set_input_set(filter, input);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "failed to set input set.");
+ ret = -rte_errno;
+ goto err_acl_set_input;
+ }
+
+ if (filter->input.dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) {
+ acts[0].type = ICE_FLOW_ACT_DROP;
+ acts[0].data.acl_act.mdid = ICE_MDID_RX_PKT_DROP;
+ acts[0].data.acl_act.prio = 0x3;
+ acts[0].data.acl_act.value = CPU_TO_LE16(0x1);
+ }
+
+ input->acl_fltr = true;
+
+ ret = ice_acl_hw_set_conf(pf, input, acts, rule, flow_type, 0);
+ if (ret)
+ goto err_acl_set_input;
+
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
+ ret = ice_acl_hw_set_conf(pf, input, acts, rule,
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP, 1);
+ if (ret)
+ goto err_acl_set_input;
+
+ ret = ice_acl_hw_set_conf(pf, input, acts, rule,
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP, 2);
+ if (ret)
+ goto err_acl_set_input;
+
+ ret = ice_acl_hw_set_conf(pf, input, acts, rule,
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP, 3);
+ if (ret)
+ goto err_acl_set_input;
+
+ }
+
+ rule->flow_type = flow_type;
+ flow->rule = rule;
+ return 0;
+
+err_acl_set_input:
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "failed to add entry.");
+ ret = -rte_errno;
+ rte_free(input);
+err_acl_input_alloc:
+ rte_free(rule);
+ return ret;
+}
+
+static int
+ice_acl_destroy_filter(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ struct rte_flow_error *error __rte_unused)
+{
+ struct acl_rule *rule = (struct acl_rule *)flow->rule;
+ uint32_t slot_id, i;
+ struct ice_pf *pf = &ad->pf;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ int ret = 0;
+
+ switch (rule->flow_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ for (i = 0; i < 4; i++) {
+ slot_id = rule->entry_id[i];
+ rte_bitmap_set(pf->slots, slot_id);
+ ice_flow_rem_entry(hw, ICE_BLK_ACL,
+ pf->hw_entry_id[slot_id]);
+ }
+ ice_acl_rem_flow(hw, rule->flow_type);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ slot_id = rule->entry_id[0];
+ rte_bitmap_set(pf->slots, slot_id);
+ ice_flow_rem_entry(&ad->hw, ICE_BLK_ACL,
+ pf->hw_entry_id[slot_id]);
+ ice_acl_rem_flow(hw, rule->flow_type);
+ break;
+ default:
+ break;
+ }
+
+ flow->rule = NULL;
+ rte_free(rule);
+ return ret;
+}
+
+static void
+ice_acl_filter_free(struct rte_flow *flow)
+{
+ rte_free(flow->rule);
+ flow->rule = NULL;
+}
+
+static int
+ice_acl_parse_action(__rte_unused struct ice_adapter *ad,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct ice_acl_conf *filter)
+{
+ uint32_t dest_num = 0;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ dest_num++;
+
+ filter->input.dest_ctl =
+ ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ if (dest_num == 0 || dest_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Unsupported action combination");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ice_acl_parse_pattern(__rte_unused struct ice_adapter *ad,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct ice_acl_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ uint64_t input_set = ICE_INSET_NONE;
+ uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
+
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ item_type = item->type;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ if (eth_spec && eth_mask) {
+ if (!rte_is_zero_ether_addr(ð_spec->src) &&
+ !rte_is_zero_ether_addr(ð_mask->src)) {
+ input_set |= ICE_INSET_SMAC;
+ rte_memcpy(&filter->input.ext_data.src_mac,
+ ð_spec->src,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&filter->input.ext_mask.src_mac,
+ ð_mask->src,
+ RTE_ETHER_ADDR_LEN);
+ }
+
+ if (!rte_is_zero_ether_addr(ð_spec->dst) &&
+ !rte_is_zero_ether_addr(ð_mask->dst)) {
+ input_set |= ICE_INSET_DMAC;
+ rte_memcpy(&filter->input.ext_data.dst_mac,
+ ð_spec->dst,
+ RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&filter->input.ext_mask.dst_mac,
+ ð_mask->dst,
+ RTE_ETHER_ADDR_LEN);
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+
+ if (ipv4_spec && ipv4_mask) {
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr) {
+ filter->input.ip.v4.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.mask.v4.src_ip =
+ ipv4_mask->hdr.src_addr;
+
+ input_set |= ICE_INSET_IPV4_SRC;
+ }
+
+ if (ipv4_mask->hdr.dst_addr) {
+ filter->input.ip.v4.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+ filter->input.mask.v4.dst_ip =
+ ipv4_mask->hdr.dst_addr;
+
+ input_set |= ICE_INSET_IPV4_DST;
+ }
+ }
+
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+
+ if (tcp_spec && tcp_mask) {
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ tcp_mask->hdr.src_port) {
+ input_set |= ICE_INSET_TCP_SRC_PORT;
+ filter->input.ip.v4.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.mask.v4.src_port =
+ tcp_mask->hdr.src_port;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ tcp_mask->hdr.dst_port) {
+ input_set |= ICE_INSET_TCP_DST_PORT;
+ filter->input.ip.v4.dst_port =
+ tcp_spec->hdr.dst_port;
+ filter->input.mask.v4.dst_port =
+ tcp_mask->hdr.dst_port;
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+
+ if (udp_spec && udp_mask) {
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ udp_mask->hdr.src_port) {
+ input_set |= ICE_INSET_UDP_SRC_PORT;
+ filter->input.ip.v4.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.mask.v4.src_port =
+ udp_mask->hdr.src_port;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ udp_mask->hdr.dst_port) {
+ input_set |= ICE_INSET_UDP_DST_PORT;
+ filter->input.ip.v4.dst_port =
+ udp_spec->hdr.dst_port;
+ filter->input.mask.v4.dst_port =
+ udp_mask->hdr.dst_port;
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec = item->spec;
+ sctp_mask = item->mask;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+
+ if (sctp_spec && sctp_mask) {
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ sctp_mask->hdr.src_port) {
+ input_set |= ICE_INSET_SCTP_SRC_PORT;
+ filter->input.ip.v4.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.mask.v4.src_port =
+ sctp_mask->hdr.src_port;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ sctp_mask->hdr.dst_port) {
+ input_set |= ICE_INSET_SCTP_DST_PORT;
+ filter->input.ip.v4.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.mask.v4.dst_port =
+ sctp_mask->hdr.dst_port;
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pattern item.");
+ return -rte_errno;
+ }
+ }
+
+ filter->input.flow_type = flow_type;
+ filter->input_set = input_set;
+
+ return 0;
+}
+
+static int
+ice_acl_parse(struct ice_adapter *ad,
+ struct ice_pattern_match_item *array,
+ uint32_t array_len,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta,
+ struct rte_flow_error *error)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_acl_conf *filter = &pf->acl.conf;
+ struct ice_pattern_match_item *item = NULL;
+ uint64_t input_set;
+ int ret;
+
+ memset(filter, 0, sizeof(*filter));
+ item = ice_search_pattern_match_item(pattern, array, array_len, error);
+ if (!item)
+ return -rte_errno;
+
+ ret = ice_acl_parse_pattern(ad, pattern, error, filter);
+ if (ret)
+ goto error;
+ input_set = filter->input_set;
+ if (!input_set || input_set & ~item->input_set_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ pattern,
+ "Invalid input set");
+ ret = -rte_errno;
+ goto error;
+ }
+
+ ret = ice_acl_parse_action(ad, actions, error, filter);
+ if (ret)
+ goto error;
+
+ if (meta)
+ *meta = filter;
+
+error:
+ rte_free(item);
+ return ret;
+}
+
+static int
+ice_acl_bitmap_init(struct ice_pf *pf)
+{
+ uint32_t bmp_size;
+ void *mem = NULL;
+ struct rte_bitmap *slots;
+ int ret = 0;
+ bmp_size = rte_bitmap_get_memory_footprint(MAX_ACL_SLOTS_ID);
+ mem = rte_zmalloc("create_acl_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
+ if (mem == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for acl bitmap.");
+ return -rte_errno;
+ }
+
+ slots = rte_bitmap_init_with_all_set(MAX_ACL_SLOTS_ID, mem, bmp_size);
+ if (slots == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to initialize acl bitmap.");
+ ret = -rte_errno;
+ goto err_acl_mem_alloc;
+ }
+ pf->slots = slots;
+ return 0;
+
+err_acl_mem_alloc:
+ rte_free(mem);
+
+ return ret;
+}
+
+static int
+ice_acl_init(struct ice_adapter *ad)
+{
+ int ret = 0;
+ struct ice_pf *pf = &ad->pf;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_flow_parser *parser = &ice_acl_parser;
+
+ ret = ice_acl_prof_alloc(hw);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Cannot allocate memory for "
+ "ACL profile.");
+ return -ENOMEM;
+ }
+
+ ret = ice_acl_setup(pf);
+ if (ret)
+ return ret;
+
+ ret = ice_acl_bitmap_init(pf);
+ if (ret)
+ return ret;
+
+ ret = ice_acl_prof_init(pf);
+ if (ret)
+ return ret;
+
+ return ice_register_parser(parser, ad);
+}
+
+static void
+ice_acl_prof_free(struct ice_hw *hw)
+{
+ enum ice_fltr_ptype ptype;
+
+ for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+ ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
+ rte_free(hw->acl_prof[ptype]);
+ hw->acl_prof[ptype] = NULL;
+ }
+
+ rte_free(hw->acl_prof);
+ hw->acl_prof = NULL;
+}
+
+static void
+ice_acl_uninit(struct ice_adapter *ad)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_flow_parser *parser = &ice_acl_parser;
+
+ ice_unregister_parser(parser, ad);
+
+ ice_deinit_acl(pf);
+ ice_acl_prof_free(hw);
+}
+
+static struct
+ice_flow_engine ice_acl_engine = {
+ .init = ice_acl_init,
+ .uninit = ice_acl_uninit,
+ .create = ice_acl_create_filter,
+ .destroy = ice_acl_destroy_filter,
+ .free = ice_acl_filter_free,
+ .type = ICE_FLOW_ENGINE_ACL,
+};
+
+static struct
+ice_flow_parser ice_acl_parser = {
+ .engine = &ice_acl_engine,
+ .array = ice_acl_pattern,
+ .array_len = RTE_DIM(ice_acl_pattern),
+ .parse_pattern_action = ice_acl_parse,
+ .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(ice_acl_engine_init)
+{
+ struct ice_flow_engine *engine = &ice_acl_engine;
+ ice_register_flow_engine(engine);
+}
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 9789096..5f3de1a 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -49,6 +49,8 @@
#define ICE_PKG_FILE_SEARCH_PATH_UPDATES "/lib/firmware/updates/intel/ice/ddp/"
#define ICE_MAX_PKG_FILENAME_SIZE 256
+#define MAX_ACL_ENTRIES 512
+
/**
* vlan_id is a 12 bit number.
* The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
@@ -398,6 +400,18 @@ struct ice_hash_ctx {
struct ice_hash_gtpu_ctx gtpu6;
};
+struct ice_acl_conf {
+ struct ice_fdir_fltr input;
+ uint64_t input_set;
+};
+
+/**
+ * A structure used to define fields of ACL related info.
+ */
+struct ice_acl_info {
+ struct ice_acl_conf conf;
+};
+
struct ice_pf {
struct ice_adapter *adapter; /* The adapter this PF associate to */
struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -421,6 +435,7 @@ struct ice_pf {
uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
uint16_t fdir_qp_offset;
struct ice_fdir_info fdir; /* flow director info */
+ struct ice_acl_info acl; /* ACL info */
struct ice_hash_ctx hash_ctx;
uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
@@ -440,6 +455,8 @@ struct ice_pf {
uint64_t old_rx_bytes;
uint64_t old_tx_bytes;
uint64_t supported_rxdid; /* bitmap for supported RXDID */
+ struct rte_bitmap *slots;
+ uint64_t hw_entry_id[MAX_ACL_ENTRIES];
};
#define ICE_MAX_QUEUE_NUM 2048
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 54b0316..1429cbc 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1896,6 +1896,8 @@ ice_register_parser(struct ice_flow_parser *parser,
TAILQ_INSERT_TAIL(list, parser_node, node);
else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
TAILQ_INSERT_HEAD(list, parser_node, node);
+ else if (parser->engine->type == ICE_FLOW_ENGINE_ACL)
+ TAILQ_INSERT_HEAD(list, parser_node, node);
else
return -EINVAL;
}
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 99e1b77..254595a 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -10,7 +10,8 @@ sources = files(
'ice_switch_filter.c',
'ice_generic_flow.c',
'ice_fdir_filter.c',
- 'ice_hash.c'
+ 'ice_hash.c',
+ 'ice_acl_filter.c'
)
deps += ['hash', 'net', 'common_iavf']
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v3 3/3] net/ice: support ACL filter in DCF
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 3/3] net/ice: support ACL filter in DCF Simei Su
@ 2020-10-15 5:10 ` Zhang, Qi Z
2020-10-15 7:08 ` Su, Simei
0 siblings, 1 reply; 25+ messages in thread
From: Zhang, Qi Z @ 2020-10-15 5:10 UTC (permalink / raw)
To: Su, Simei, Yang, Qiming; +Cc: dev, Wang, Haiyue, Xing, Beilei, Ding, Xuan
some minor captures
> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Wednesday, October 14, 2020 4:54 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Yang, Qiming <qiming.yang@intel.com>
> Cc: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Ding, Xuan <xuan.ding@intel.com>; Su, Simei
> <simei.su@intel.com>
> Subject: [PATCH v3 3/3] net/ice: support ACL filter in DCF
>
> Add ice_acl_create_filter to create a rule and ice_acl_destroy_filter to destroy a
> rule. If a flow is matched by ACL filter, filter rule will be set to HW. Currently
> IPV4/IPV4_UDP/IPV4_TCP/IPV4_SCTP pattern and drop action are supported.
>
> Signed-off-by: Simei Su <simei.su@intel.com>
> Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> ---
> doc/guides/rel_notes/release_20_11.rst | 6 +
> drivers/net/ice/ice_acl_filter.c | 1034
> ++++++++++++++++++++++++++++++++
> drivers/net/ice/ice_ethdev.h | 17 +
> drivers/net/ice/ice_generic_flow.c | 2 +
> drivers/net/ice/meson.build | 3 +-
> 5 files changed, 1061 insertions(+), 1 deletion(-) create mode 100644
> drivers/net/ice/ice_acl_filter.c
>
> diff --git a/doc/guides/rel_notes/release_20_11.rst
> b/doc/guides/rel_notes/release_20_11.rst
> index e8ae4d4..6cf1ef8 100644
> --- a/doc/guides/rel_notes/release_20_11.rst
> +++ b/doc/guides/rel_notes/release_20_11.rst
...
> +#define MAX_ACL_SLOTS_ID 2048
> +
> +#define ICE_ACL_INSET_ETH_IPV4 ( \
> + ICE_INSET_SMAC | ICE_INSET_DMAC | \
> + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST) #define
> +ICE_ACL_INSET_ETH_IPV4_UDP ( \
> + ICE_INSET_SMAC | ICE_INSET_DMAC | \
> + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
> + ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
Better to reuse ICE_ACL_INSET_ETH_IPV4 in ICE_ACL_INSET_ETH_IPV4_UDP
...
> +static void
> +acl_prof_helper_function(struct ice_hw *hw, struct ice_flow_seg_info *seg,
The function is always used before ice_add_prof, so better rename to acl_add_prof_prepare.
> + bool is_l4, uint16_t src_port, uint16_t dst_port) {
> + uint16_t val_loc, mask_loc;
> +
> + ret = ice_acl_parse_action(ad, actions, error, filter);
> + if (ret)
> + goto error;
> +
> + if (meta)
> + *meta = filter;
> +
> +error:
> + rte_free(item);
> + return ret;
> +}
> +
....
>
> +struct ice_acl_conf {
> + struct ice_fdir_fltr input;
> + uint64_t input_set;
> +};
> +
> +/**
> + * A structure used to define fields of ACL related info.
> + */
> +struct ice_acl_info {
> + struct ice_acl_conf conf;
> +};
> +
> struct ice_pf {
> struct ice_adapter *adapter; /* The adapter this PF associate to */
> struct ice_vsi *main_vsi; /* pointer to main VSI structure */ @@ -421,6
> +435,7 @@ struct ice_pf {
> uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
> uint16_t fdir_qp_offset;
> struct ice_fdir_info fdir; /* flow director info */
> + struct ice_acl_info acl; /* ACL info */
> struct ice_hash_ctx hash_ctx;
> uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
> uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
> @@ -440,6 +455,8 @@ struct ice_pf {
> uint64_t old_rx_bytes;
> uint64_t old_tx_bytes;
> uint64_t supported_rxdid; /* bitmap for supported RXDID */
> + struct rte_bitmap *slots;
> + uint64_t hw_entry_id[MAX_ACL_ENTRIES];
Can we move above 2 fields into ice_acl_info?
> };
>
>
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v3 3/3] net/ice: support ACL filter in DCF
2020-10-15 5:10 ` Zhang, Qi Z
@ 2020-10-15 7:08 ` Su, Simei
0 siblings, 0 replies; 25+ messages in thread
From: Su, Simei @ 2020-10-15 7:08 UTC (permalink / raw)
To: Zhang, Qi Z, Yang, Qiming; +Cc: dev, Wang, Haiyue, Xing, Beilei, Ding, Xuan
Hi, Qi
> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: Thursday, October 15, 2020 1:11 PM
> To: Su, Simei <simei.su@intel.com>; Yang, Qiming <qiming.yang@intel.com>
> Cc: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Ding, Xuan <xuan.ding@intel.com>
> Subject: RE: [PATCH v3 3/3] net/ice: support ACL filter in DCF
>
> some minor captures
>
> > -----Original Message-----
> > From: Su, Simei <simei.su@intel.com>
> > Sent: Wednesday, October 14, 2020 4:54 PM
> > To: Zhang, Qi Z <qi.z.zhang@intel.com>; Yang, Qiming
> > <qiming.yang@intel.com>
> > Cc: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>; Ding, Xuan <xuan.ding@intel.com>; Su, Simei
> > <simei.su@intel.com>
> > Subject: [PATCH v3 3/3] net/ice: support ACL filter in DCF
> >
> > Add ice_acl_create_filter to create a rule and ice_acl_destroy_filter
> > to destroy a rule. If a flow is matched by ACL filter, filter rule
> > will be set to HW. Currently IPV4/IPV4_UDP/IPV4_TCP/IPV4_SCTP pattern
> and drop action are supported.
> >
> > Signed-off-by: Simei Su <simei.su@intel.com>
> > Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> > ---
> > doc/guides/rel_notes/release_20_11.rst | 6 +
> > drivers/net/ice/ice_acl_filter.c | 1034
> > ++++++++++++++++++++++++++++++++
> > drivers/net/ice/ice_ethdev.h | 17 +
> > drivers/net/ice/ice_generic_flow.c | 2 +
> > drivers/net/ice/meson.build | 3 +-
> > 5 files changed, 1061 insertions(+), 1 deletion(-) create mode
> > 100644 drivers/net/ice/ice_acl_filter.c
> >
> > diff --git a/doc/guides/rel_notes/release_20_11.rst
> > b/doc/guides/rel_notes/release_20_11.rst
> > index e8ae4d4..6cf1ef8 100644
> > --- a/doc/guides/rel_notes/release_20_11.rst
> > +++ b/doc/guides/rel_notes/release_20_11.rst
> ...
>
> > +#define MAX_ACL_SLOTS_ID 2048
> > +
> > +#define ICE_ACL_INSET_ETH_IPV4 ( \
> > +ICE_INSET_SMAC | ICE_INSET_DMAC | \
> > +ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST) #define
> > +ICE_ACL_INSET_ETH_IPV4_UDP ( \ ICE_INSET_SMAC | ICE_INSET_DMAC | \
> > +ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ ICE_INSET_UDP_SRC_PORT
> |
> > +ICE_INSET_UDP_DST_PORT)
>
>
> Better to reuse ICE_ACL_INSET_ETH_IPV4 in ICE_ACL_INSET_ETH_IPV4_UDP
Ok, will simplify it in v4.
>
> ...
> > +static void
> > +acl_prof_helper_function(struct ice_hw *hw, struct ice_flow_seg_info *seg,
>
> The function is always used before ice_add_prof, so better rename to
> acl_add_prof_prepare.
Ok, will rename it in next version.
>
> > + bool is_l4, uint16_t src_port, uint16_t dst_port) {
> > +uint16_t val_loc, mask_loc;
> > +
> > +ret = ice_acl_parse_action(ad, actions, error, filter);
> > +if (ret)
> > +goto error;
> > +
> > +if (meta)
> > +*meta = filter;
> > +
> > +error:
> > +rte_free(item);
> > +return ret;
> > +}
> > +
> ....
>
> >
> > +struct ice_acl_conf {
> > +struct ice_fdir_fltr input;
> > +uint64_t input_set;
> > +};
> > +
> > +/**
> > + * A structure used to define fields of ACL related info.
> > + */
> > +struct ice_acl_info {
> > +struct ice_acl_conf conf;
> > +};
> > +
> > struct ice_pf {
> > struct ice_adapter *adapter; /* The adapter this PF associate to */
> > struct ice_vsi *main_vsi; /* pointer to main VSI structure */ @@ -421,6
> > +435,7 @@ struct ice_pf {
> > uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
> > uint16_t fdir_qp_offset;
> > struct ice_fdir_info fdir; /* flow director info */
> > +struct ice_acl_info acl; /* ACL info */
> > struct ice_hash_ctx hash_ctx;
> > uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
> > uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
> > @@ -440,6 +455,8 @@ struct ice_pf {
> > uint64_t old_rx_bytes;
> > uint64_t old_tx_bytes;
> > uint64_t supported_rxdid; /* bitmap for supported RXDID */
> > +struct rte_bitmap *slots;
> > +uint64_t hw_entry_id[MAX_ACL_ENTRIES];
>
> Can we move above 2 fields into ice_acl_info?
>
> > };
Yes, we can.
> >
> >
>
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v4 0/3] net/ice: support DCF ACL capabiltiy
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 0/3] net/ice: support DCF ACL capabiltiy Simei Su
` (2 preceding siblings ...)
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 3/3] net/ice: support ACL filter in DCF Simei Su
@ 2020-10-16 8:44 ` Simei Su
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 1/3] net/ice/base: change API from static to non-static Simei Su
` (3 more replies)
3 siblings, 4 replies; 25+ messages in thread
From: Simei Su @ 2020-10-16 8:44 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang
Cc: dev, haiyue.wang, beilei.xing, xuan.ding, Simei Su
[PATCH v4 1/3] change API from static to non-static.
[PATCH v4 2/3] get PF VSI map for DCF ACL rule.
[PATCH v4 3/3] support IPV4/IPV4_UDP/IPV4_TCP/IPV4_SCTP pattern
and DROP action for DCF ACL.
v4:
* Add processing for error logic.
* Fix several bugs.
v3:
* Optimize code logic in ice_acl_prof_init and ice_acl_create_filter.
* Fix several bugs.
v2:
* Add release notes.
* Adjust patch sequence.
* Refactor ACL design and related structure.
* Add bitmap mechanism to allocate entry dynamically.
Simei Su (3):
net/ice/base: change API from static to non-static
net/ice: get PF VSI map
net/ice: support ACL filter in DCF
doc/guides/rel_notes/release_20_11.rst | 6 +
drivers/net/ice/base/ice_flow.c | 2 +-
drivers/net/ice/base/ice_flow.h | 3 +
drivers/net/ice/ice_acl_filter.c | 1011 ++++++++++++++++++++++++++++++++
drivers/net/ice/ice_dcf.c | 1 +
drivers/net/ice/ice_dcf.h | 1 +
drivers/net/ice/ice_dcf_parent.c | 37 +-
drivers/net/ice/ice_ethdev.h | 17 +
drivers/net/ice/ice_generic_flow.c | 2 +
drivers/net/ice/meson.build | 3 +-
10 files changed, 1079 insertions(+), 4 deletions(-)
create mode 100644 drivers/net/ice/ice_acl_filter.c
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v4 1/3] net/ice/base: change API from static to non-static
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 0/3] net/ice: support DCF ACL capabiltiy Simei Su
@ 2020-10-16 8:44 ` Simei Su
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 2/3] net/ice: get PF VSI map Simei Su
` (2 subsequent siblings)
3 siblings, 0 replies; 25+ messages in thread
From: Simei Su @ 2020-10-16 8:44 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang
Cc: dev, haiyue.wang, beilei.xing, xuan.ding, Simei Su
This patch changes static API "ice_flow_assoc_prof" to non-static
API in order to let it be used by other files.
Signed-off-by: Simei Su <simei.su@intel.com>
---
drivers/net/ice/base/ice_flow.c | 2 +-
drivers/net/ice/base/ice_flow.h | 3 +++
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
index de5dfb2..80ac0b6 100644
--- a/drivers/net/ice/base/ice_flow.c
+++ b/drivers/net/ice/base/ice_flow.c
@@ -2125,7 +2125,7 @@ ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+enum ice_status
ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
diff --git a/drivers/net/ice/base/ice_flow.h b/drivers/net/ice/base/ice_flow.h
index 0a52409..698a230 100644
--- a/drivers/net/ice/base/ice_flow.h
+++ b/drivers/net/ice/base/ice_flow.h
@@ -499,6 +499,9 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
enum ice_status
ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
enum ice_status
+ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_prof *prof, u16 vsi_handle);
+enum ice_status
ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
u16 vsig);
enum ice_status
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v4 2/3] net/ice: get PF VSI map
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 0/3] net/ice: support DCF ACL capabiltiy Simei Su
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 1/3] net/ice/base: change API from static to non-static Simei Su
@ 2020-10-16 8:44 ` Simei Su
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 3/3] net/ice: support ACL filter in DCF Simei Su
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 0/3] net/ice: support DCF ACL capabiltiy Simei Su
3 siblings, 0 replies; 25+ messages in thread
From: Simei Su @ 2020-10-16 8:44 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang
Cc: dev, haiyue.wang, beilei.xing, xuan.ding, Simei Su
This patch gets PF vsi number when issuing ACL rule in DCF.
Signed-off-by: Simei Su <simei.su@intel.com>
---
drivers/net/ice/ice_dcf.c | 1 +
drivers/net/ice/ice_dcf.h | 1 +
drivers/net/ice/ice_dcf_parent.c | 37 +++++++++++++++++++++++++++++++++++--
3 files changed, 37 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 2d803c5..d20e2b3 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -318,6 +318,7 @@ ice_dcf_get_vf_vsi_map(struct ice_dcf_hw *hw)
}
hw->num_vfs = vsi_map->num_vfs;
+ hw->pf_vsi_id = vsi_map->pf_vsi;
}
if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) {
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index a44a01e..ff02996 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -43,6 +43,7 @@ struct ice_dcf_hw {
uint16_t num_vfs;
uint16_t *vf_vsi_map;
+ uint16_t pf_vsi_id;
struct virtchnl_version_info virtchnl_version;
struct virtchnl_vf_resource *vf_res; /* VF resource */
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index c5dfdd3..30ead4c 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -78,6 +78,35 @@ ice_dcf_update_vf_vsi_map(struct ice_hw *hw, uint16_t num_vfs,
ice_dcf_update_vsi_ctx(hw, vf_id, vf_vsi_map[vf_id]);
}
+static void
+ice_dcf_update_pf_vsi_map(struct ice_hw *hw, uint16_t pf_vsi_idx,
+ uint16_t pf_vsi_num)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+
+ if (unlikely(pf_vsi_idx >= ICE_MAX_VSI)) {
+ PMD_DRV_LOG(ERR, "Invalid vsi handle %u", pf_vsi_idx);
+ return;
+ }
+
+ vsi_ctx = hw->vsi_ctx[pf_vsi_idx];
+
+ if (!vsi_ctx)
+ vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx));
+
+ if (!vsi_ctx) {
+ PMD_DRV_LOG(ERR, "No memory for vsi context %u",
+ pf_vsi_idx);
+ return;
+ }
+
+ vsi_ctx->vsi_num = pf_vsi_num;
+ hw->vsi_ctx[pf_vsi_idx] = vsi_ctx;
+
+ PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
+ pf_vsi_idx, vsi_ctx->vsi_num);
+}
+
static void*
ice_dcf_vsi_update_service_handler(void *param)
{
@@ -368,14 +397,18 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
}
parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw);
+ parent_adapter->pf.main_vsi->idx = hw->num_vfs;
+ ice_dcf_update_pf_vsi_map(parent_hw,
+ parent_adapter->pf.main_vsi->idx, hw->pf_vsi_id);
+
+ ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
+
err = ice_flow_init(parent_adapter);
if (err) {
PMD_INIT_LOG(ERR, "Failed to initialize flow");
goto uninit_hw;
}
- ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
-
mac = (const struct rte_ether_addr *)hw->avf.mac.addr;
if (rte_is_valid_assigned_ether_addr(mac))
rte_ether_addr_copy(mac, &parent_adapter->pf.dev_addr);
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v4 3/3] net/ice: support ACL filter in DCF
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 0/3] net/ice: support DCF ACL capabiltiy Simei Su
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 1/3] net/ice/base: change API from static to non-static Simei Su
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 2/3] net/ice: get PF VSI map Simei Su
@ 2020-10-16 8:44 ` Simei Su
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 0/3] net/ice: support DCF ACL capabiltiy Simei Su
3 siblings, 0 replies; 25+ messages in thread
From: Simei Su @ 2020-10-16 8:44 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang
Cc: dev, haiyue.wang, beilei.xing, xuan.ding, Simei Su
Add ice_acl_create_filter to create a rule and ice_acl_destroy_filter
to destroy a rule. If a flow is matched by ACL filter, filter rule
will be set to HW. Currently IPV4/IPV4_UDP/IPV4_TCP/IPV4_SCTP pattern
and drop action are supported.
Signed-off-by: Simei Su <simei.su@intel.com>
Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---
doc/guides/rel_notes/release_20_11.rst | 6 +
drivers/net/ice/ice_acl_filter.c | 1011 ++++++++++++++++++++++++++++++++
drivers/net/ice/ice_ethdev.h | 17 +
drivers/net/ice/ice_generic_flow.c | 2 +
drivers/net/ice/meson.build | 3 +-
5 files changed, 1038 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/ice/ice_acl_filter.c
diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst
index e8ae4d4..6cf1ef8 100644
--- a/doc/guides/rel_notes/release_20_11.rst
+++ b/doc/guides/rel_notes/release_20_11.rst
@@ -189,6 +189,12 @@ New Features
* Added support for flexible descriptor metadata extraction.
+* **Updated the Intel ice driver.**
+
+ Updated the Intel ice driver with new features and improvements, including:
+
+ * Added acl filter support for Intel DCF.
+
Removed Items
-------------
diff --git a/drivers/net/ice/ice_acl_filter.c b/drivers/net/ice/ice_acl_filter.c
new file mode 100644
index 0000000..3d9ccf9
--- /dev/null
+++ b/drivers/net/ice/ice_acl_filter.c
@@ -0,0 +1,1011 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include "base/ice_type.h"
+#include "base/ice_acl.h"
+#include "ice_logs.h"
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "base/ice_flow.h"
+
+#define MAX_ACL_SLOTS_ID 2048
+
+#define ICE_ACL_INSET_ETH_IPV4 ( \
+ ICE_INSET_SMAC | ICE_INSET_DMAC | \
+ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
+#define ICE_ACL_INSET_ETH_IPV4_UDP ( \
+ ICE_ACL_INSET_ETH_IPV4 | \
+ ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
+#define ICE_ACL_INSET_ETH_IPV4_TCP ( \
+ ICE_ACL_INSET_ETH_IPV4 | \
+ ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
+#define ICE_ACL_INSET_ETH_IPV4_SCTP ( \
+ ICE_ACL_INSET_ETH_IPV4 | \
+ ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
+
+static struct ice_flow_parser ice_acl_parser;
+
+static struct
+ice_pattern_match_item ice_acl_pattern[] = {
+ {pattern_eth_ipv4, ICE_ACL_INSET_ETH_IPV4, ICE_INSET_NONE},
+ {pattern_eth_ipv4_udp, ICE_ACL_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_tcp, ICE_ACL_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_sctp, ICE_ACL_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
+};
+
+static int
+ice_acl_prof_alloc(struct ice_hw *hw)
+{
+ enum ice_fltr_ptype ptype, fltr_ptype;
+
+ if (!hw->acl_prof) {
+ hw->acl_prof = (struct ice_fd_hw_prof **)
+ ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
+ sizeof(*hw->acl_prof));
+ if (!hw->acl_prof)
+ return -ENOMEM;
+ }
+
+ for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+ ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
+ if (!hw->acl_prof[ptype]) {
+ hw->acl_prof[ptype] = (struct ice_fd_hw_prof *)
+ ice_malloc(hw, sizeof(**hw->acl_prof));
+ if (!hw->acl_prof[ptype])
+ goto fail_mem;
+ }
+ }
+
+ return 0;
+
+fail_mem:
+ for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+ fltr_ptype < ptype; fltr_ptype++) {
+ rte_free(hw->acl_prof[fltr_ptype]);
+ hw->acl_prof[fltr_ptype] = NULL;
+ }
+
+ rte_free(hw->acl_prof);
+ hw->acl_prof = NULL;
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_acl_setup - Reserve and initialize the ACL resources
+ * @pf: board private structure
+ */
+static int
+ice_acl_setup(struct ice_pf *pf)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ uint32_t pf_num = hw->dev_caps.num_funcs;
+ struct ice_acl_tbl_params params;
+ uint16_t scen_id;
+ int err = 0;
+
+ memset(¶ms, 0, sizeof(params));
+
+ /* create for IPV4 table */
+ if (pf_num < 4)
+ params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 6;
+ else
+ params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 3;
+
+ params.depth = ICE_AQC_ACL_TCAM_DEPTH;
+ params.entry_act_pairs = 1;
+ params.concurr = false;
+
+ err = ice_acl_create_tbl(hw, ¶ms);
+ if (err)
+ return err;
+
+ err = ice_acl_create_scen(hw, params.width, params.depth,
+ &scen_id);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_deinit_acl - Unroll the initialization of the ACL block
+ * @pf: ptr to PF device
+ *
+ * returns 0 on success, negative on error
+ */
+static void ice_deinit_acl(struct ice_pf *pf)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+ ice_acl_destroy_tbl(hw);
+
+ rte_free(hw->acl_tbl);
+ hw->acl_tbl = NULL;
+
+ if (pf->acl.slots) {
+ rte_free(pf->acl.slots);
+ pf->acl.slots = NULL;
+ }
+}
+
+static void
+acl_add_prof_prepare(struct ice_hw *hw, struct ice_flow_seg_info *seg,
+ bool is_l4, uint16_t src_port, uint16_t dst_port)
+{
+ uint16_t val_loc, mask_loc;
+
+ if (hw->dev_caps.num_funcs < 4) {
+ /* mac source address */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ext_data.src_mac);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ ext_mask.src_mac);
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* mac destination address */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ext_data.dst_mac);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ ext_mask.dst_mac);
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+
+ /* IP source address */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_ip);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_ip);
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* IP destination address */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_ip);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_ip);
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ if (is_l4) {
+ /* Layer 4 source port */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_port);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_port);
+ ice_flow_set_fld(seg, src_port, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* Layer 4 destination port */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_port);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_port);
+ ice_flow_set_fld(seg, dst_port, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+}
+
+/**
+ * ice_acl_prof_init - Initialize ACL profile
+ * @pf: ice PF structure
+ *
+ * Returns 0 on success.
+ */
+static int
+ice_acl_prof_init(struct ice_pf *pf)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_flow_prof *prof_ipv4 = NULL;
+ struct ice_flow_prof *prof_ipv4_udp = NULL;
+ struct ice_flow_prof *prof_ipv4_tcp = NULL;
+ struct ice_flow_prof *prof_ipv4_sctp = NULL;
+ struct ice_flow_seg_info *seg;
+ int i;
+ int ret;
+
+ seg = (struct ice_flow_seg_info *)
+ ice_malloc(hw, sizeof(*seg));
+ if (!seg)
+ return -ENOMEM;
+
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
+ acl_add_prof_prepare(hw, seg, false, 0, 0);
+ ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
+ seg, 1, NULL, 0, &prof_ipv4);
+ if (ret)
+ goto err_add_prof;
+
+ ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
+ acl_add_prof_prepare(hw, seg, true,
+ ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_UDP_DST_PORT);
+ ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ seg, 1, NULL, 0, &prof_ipv4_udp);
+ if (ret)
+ goto err_add_prof_ipv4_udp;
+
+ ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
+ acl_add_prof_prepare(hw, seg, true,
+ ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_TCP_DST_PORT);
+ ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP,
+ seg, 1, NULL, 0, &prof_ipv4_tcp);
+ if (ret)
+ goto err_add_prof_ipv4_tcp;
+
+ ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
+ acl_add_prof_prepare(hw, seg, true,
+ ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_SCTP_DST_PORT);
+ ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
+ seg, 1, NULL, 0, &prof_ipv4_sctp);
+ if (ret)
+ goto err_add_prof_ipv4_sctp;
+
+ for (i = 0; i < pf->main_vsi->idx; i++) {
+ ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4, i);
+ if (ret)
+ goto err_assoc_prof;
+
+ ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_udp, i);
+ if (ret)
+ goto err_assoc_prof;
+
+ ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_tcp, i);
+ if (ret)
+ goto err_assoc_prof;
+
+ ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_sctp, i);
+ if (ret)
+ goto err_assoc_prof;
+ }
+ return 0;
+
+err_assoc_prof:
+ ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_SCTP);
+err_add_prof_ipv4_sctp:
+ ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
+err_add_prof_ipv4_tcp:
+ ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_OTHER);
+err_add_prof_ipv4_udp:
+ ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
+err_add_prof:
+ ice_free(hw, seg);
+ return ret;
+}
+
+/**
+ * ice_acl_set_input_set - Helper function to set the input set for ACL
+ * @hw: pointer to HW instance
+ * @filter: pointer to ACL info
+ * @input: filter structure
+ *
+ * Return error value or 0 on success.
+ */
+static int
+ice_acl_set_input_set(struct ice_acl_conf *filter, struct ice_fdir_fltr *input)
+{
+ if (!input)
+ return ICE_ERR_BAD_PTR;
+
+ input->q_index = filter->input.q_index;
+ input->dest_vsi = filter->input.dest_vsi;
+ input->dest_ctl = filter->input.dest_ctl;
+ input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
+ input->flow_type = filter->input.flow_type;
+
+ switch (input->flow_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ input->ip.v4.dst_port = filter->input.ip.v4.dst_port;
+ input->ip.v4.src_port = filter->input.ip.v4.src_port;
+ input->ip.v4.dst_ip = filter->input.ip.v4.dst_ip;
+ input->ip.v4.src_ip = filter->input.ip.v4.src_ip;
+
+ input->mask.v4.dst_port = filter->input.mask.v4.dst_port;
+ input->mask.v4.src_port = filter->input.mask.v4.src_port;
+ input->mask.v4.dst_ip = filter->input.mask.v4.dst_ip;
+ input->mask.v4.src_ip = filter->input.mask.v4.src_ip;
+
+ ice_memcpy(&input->ext_data.src_mac,
+ &filter->input.ext_data.src_mac,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+
+ ice_memcpy(&input->ext_mask.src_mac,
+ &filter->input.ext_mask.src_mac,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+
+ ice_memcpy(&input->ext_data.dst_mac,
+ &filter->input.ext_data.dst_mac,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+ ice_memcpy(&input->ext_mask.dst_mac,
+ &filter->input.ext_mask.dst_mac,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ ice_memcpy(&input->ip.v4, &filter->input.ip.v4,
+ sizeof(struct ice_fdir_v4),
+ ICE_NONDMA_TO_NONDMA);
+ ice_memcpy(&input->mask.v4, &filter->input.mask.v4,
+ sizeof(struct ice_fdir_v4),
+ ICE_NONDMA_TO_NONDMA);
+
+ ice_memcpy(&input->ext_data.src_mac,
+ &filter->input.ext_data.src_mac,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+ ice_memcpy(&input->ext_mask.src_mac,
+ &filter->input.ext_mask.src_mac,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+
+ ice_memcpy(&input->ext_data.dst_mac,
+ &filter->input.ext_data.dst_mac,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+ ice_memcpy(&input->ext_mask.dst_mac,
+ &filter->input.ext_mask.dst_mac,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct acl_rule {
+ enum ice_fltr_ptype flow_type;
+ uint32_t entry_id[4];
+};
+
+static inline int
+ice_acl_alloc_slot_id(struct rte_bitmap *slots, uint32_t *slot_id)
+{
+ uint32_t pos = 0;
+ uint64_t slab = 0;
+ uint32_t i = 0;
+
+ __rte_bitmap_scan_init(slots);
+ if (!rte_bitmap_scan(slots, &pos, &slab))
+ return -rte_errno;
+
+ i = rte_bsf64(slab);
+ pos += i;
+ rte_bitmap_clear(slots, pos);
+
+ *slot_id = pos;
+ return 0;
+}
+
+static inline int
+ice_acl_hw_set_conf(struct ice_pf *pf, struct ice_fdir_fltr *input,
+ struct ice_flow_action *acts, struct acl_rule *rule,
+ enum ice_fltr_ptype flow_type, int32_t entry_idx)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ enum ice_block blk = ICE_BLK_ACL;
+ uint64_t entry_id, hw_entry;
+ uint32_t slot_id = 0;
+ int act_cnt = 1;
+ int ret = 0;
+
+ /* Allocate slot_id from bitmap table. */
+ ret = ice_acl_alloc_slot_id(pf->acl.slots, &slot_id);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "fail to alloc slot id.");
+ return ret;
+ }
+
+ /* For IPV4_OTHER type, should add entry for all types.
+ * For IPV4_UDP/TCP/SCTP type, only add entry for each.
+ */
+ if (slot_id < MAX_ACL_ENTRIES) {
+ entry_id = ((uint64_t)flow_type << 32) | slot_id;
+ ret = ice_flow_add_entry(hw, blk, flow_type,
+ entry_id, pf->main_vsi->idx,
+ ICE_FLOW_PRIO_NORMAL, input,
+ acts, act_cnt, &hw_entry);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Fail to add entry.");
+ return ret;
+ }
+ rule->entry_id[entry_idx] = slot_id;
+ pf->acl.hw_entry_id[slot_id] = hw_entry;
+ } else {
+ PMD_DRV_LOG(ERR, "Exceed the maximum entry number(%d)"
+ " HW supported!", MAX_ACL_ENTRIES);
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline void
+ice_acl_hw_rem_conf(struct ice_pf *pf, struct acl_rule *rule, int32_t entry_idx)
+{
+ uint32_t slot_id;
+ int32_t i;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+ for (i = 0; i < entry_idx; i++) {
+ slot_id = rule->entry_id[i];
+ rte_bitmap_set(pf->acl.slots, slot_id);
+ ice_flow_rem_entry(hw, ICE_BLK_ACL,
+ pf->acl.hw_entry_id[slot_id]);
+ }
+}
+
+static int
+ice_acl_create_filter(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ void *meta,
+ struct rte_flow_error *error)
+{
+ struct ice_acl_conf *filter = meta;
+ enum ice_fltr_ptype flow_type = filter->input.flow_type;
+ struct ice_flow_action acts[1];
+ struct ice_pf *pf = &ad->pf;
+ struct ice_fdir_fltr *input;
+ struct acl_rule *rule;
+ int ret;
+
+ rule = rte_zmalloc("acl_rule", sizeof(*rule), 0);
+ if (!rule) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory for acl rule");
+ return -rte_errno;
+ }
+
+ input = rte_zmalloc("acl_entry", sizeof(*input), 0);
+ if (!input) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory for acl input");
+ ret = -rte_errno;
+ goto err_acl_input_alloc;
+ }
+
+ ret = ice_acl_set_input_set(filter, input);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "failed to set input set.");
+ ret = -rte_errno;
+ goto err_acl_set_input;
+ }
+
+ if (filter->input.dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) {
+ acts[0].type = ICE_FLOW_ACT_DROP;
+ acts[0].data.acl_act.mdid = ICE_MDID_RX_PKT_DROP;
+ acts[0].data.acl_act.prio = 0x3;
+ acts[0].data.acl_act.value = CPU_TO_LE16(0x1);
+ }
+
+ input->acl_fltr = true;
+ ret = ice_acl_hw_set_conf(pf, input, acts, rule, flow_type, 0);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "failed to set hw configure.");
+ ret = -rte_errno;
+ return ret;
+ }
+
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
+ ret = ice_acl_hw_set_conf(pf, input, acts, rule,
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP, 1);
+ if (ret)
+ goto err_acl_hw_set_conf_udp;
+ ret = ice_acl_hw_set_conf(pf, input, acts, rule,
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP, 2);
+ if (ret)
+ goto err_acl_hw_set_conf_tcp;
+ ret = ice_acl_hw_set_conf(pf, input, acts, rule,
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP, 3);
+ if (ret)
+ goto err_acl_hw_set_conf_sctp;
+ }
+
+ rule->flow_type = flow_type;
+ flow->rule = rule;
+ return 0;
+
+err_acl_hw_set_conf_sctp:
+ ice_acl_hw_rem_conf(pf, rule, 3);
+err_acl_hw_set_conf_tcp:
+ ice_acl_hw_rem_conf(pf, rule, 2);
+err_acl_hw_set_conf_udp:
+ ice_acl_hw_rem_conf(pf, rule, 1);
+err_acl_set_input:
+ rte_free(input);
+err_acl_input_alloc:
+ rte_free(rule);
+ return ret;
+}
+
+static int
+ice_acl_destroy_filter(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ struct rte_flow_error *error __rte_unused)
+{
+ struct acl_rule *rule = (struct acl_rule *)flow->rule;
+ uint32_t slot_id, i;
+ struct ice_pf *pf = &ad->pf;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ int ret = 0;
+
+ switch (rule->flow_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ for (i = 0; i < 4; i++) {
+ slot_id = rule->entry_id[i];
+ rte_bitmap_set(pf->acl.slots, slot_id);
+ ice_flow_rem_entry(hw, ICE_BLK_ACL,
+ pf->acl.hw_entry_id[slot_id]);
+ }
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ slot_id = rule->entry_id[0];
+ rte_bitmap_set(pf->acl.slots, slot_id);
+ ice_flow_rem_entry(hw, ICE_BLK_ACL,
+ pf->acl.hw_entry_id[slot_id]);
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Unsupported flow type.");
+ break;
+ }
+
+ flow->rule = NULL;
+ rte_free(rule);
+ return ret;
+}
+
+static void
+ice_acl_filter_free(struct rte_flow *flow)
+{
+ rte_free(flow->rule);
+ flow->rule = NULL;
+}
+
+static int
+ice_acl_parse_action(__rte_unused struct ice_adapter *ad,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct ice_acl_conf *filter)
+{
+ uint32_t dest_num = 0;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ dest_num++;
+
+ filter->input.dest_ctl =
+ ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ if (dest_num == 0 || dest_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Unsupported action combination");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ice_acl_parse_pattern(__rte_unused struct ice_adapter *ad,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct ice_acl_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ uint64_t input_set = ICE_INSET_NONE;
+ uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
+
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ item_type = item->type;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ if (eth_spec && eth_mask) {
+ if (!rte_is_zero_ether_addr(ð_spec->src) &&
+ !rte_is_zero_ether_addr(ð_mask->src)) {
+ input_set |= ICE_INSET_SMAC;
+ ice_memcpy(&filter->input.ext_data.src_mac,
+ ð_spec->src,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+ ice_memcpy(&filter->input.ext_mask.src_mac,
+ ð_mask->src,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+ }
+
+ if (!rte_is_zero_ether_addr(ð_spec->dst) &&
+ !rte_is_zero_ether_addr(ð_mask->dst)) {
+ input_set |= ICE_INSET_DMAC;
+ ice_memcpy(&filter->input.ext_data.dst_mac,
+ ð_spec->dst,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+ ice_memcpy(&filter->input.ext_mask.dst_mac,
+ ð_mask->dst,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+
+ if (ipv4_spec && ipv4_mask) {
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr) {
+ filter->input.ip.v4.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.mask.v4.src_ip =
+ ipv4_mask->hdr.src_addr;
+
+ input_set |= ICE_INSET_IPV4_SRC;
+ }
+
+ if (ipv4_mask->hdr.dst_addr) {
+ filter->input.ip.v4.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+ filter->input.mask.v4.dst_ip =
+ ipv4_mask->hdr.dst_addr;
+
+ input_set |= ICE_INSET_IPV4_DST;
+ }
+ }
+
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+
+ if (tcp_spec && tcp_mask) {
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ tcp_mask->hdr.src_port) {
+ input_set |= ICE_INSET_TCP_SRC_PORT;
+ filter->input.ip.v4.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.mask.v4.src_port =
+ tcp_mask->hdr.src_port;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ tcp_mask->hdr.dst_port) {
+ input_set |= ICE_INSET_TCP_DST_PORT;
+ filter->input.ip.v4.dst_port =
+ tcp_spec->hdr.dst_port;
+ filter->input.mask.v4.dst_port =
+ tcp_mask->hdr.dst_port;
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+
+ if (udp_spec && udp_mask) {
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ udp_mask->hdr.src_port) {
+ input_set |= ICE_INSET_UDP_SRC_PORT;
+ filter->input.ip.v4.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.mask.v4.src_port =
+ udp_mask->hdr.src_port;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ udp_mask->hdr.dst_port) {
+ input_set |= ICE_INSET_UDP_DST_PORT;
+ filter->input.ip.v4.dst_port =
+ udp_spec->hdr.dst_port;
+ filter->input.mask.v4.dst_port =
+ udp_mask->hdr.dst_port;
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec = item->spec;
+ sctp_mask = item->mask;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+
+ if (sctp_spec && sctp_mask) {
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ sctp_mask->hdr.src_port) {
+ input_set |= ICE_INSET_SCTP_SRC_PORT;
+ filter->input.ip.v4.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.mask.v4.src_port =
+ sctp_mask->hdr.src_port;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ sctp_mask->hdr.dst_port) {
+ input_set |= ICE_INSET_SCTP_DST_PORT;
+ filter->input.ip.v4.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.mask.v4.dst_port =
+ sctp_mask->hdr.dst_port;
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pattern item.");
+ return -rte_errno;
+ }
+ }
+
+ filter->input.flow_type = flow_type;
+ filter->input_set = input_set;
+
+ return 0;
+}
+
+static int
+ice_acl_parse(struct ice_adapter *ad,
+ struct ice_pattern_match_item *array,
+ uint32_t array_len,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta,
+ struct rte_flow_error *error)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_acl_conf *filter = &pf->acl.conf;
+ struct ice_pattern_match_item *item = NULL;
+ uint64_t input_set;
+ int ret;
+
+ memset(filter, 0, sizeof(*filter));
+ item = ice_search_pattern_match_item(pattern, array, array_len, error);
+ if (!item)
+ return -rte_errno;
+
+ ret = ice_acl_parse_pattern(ad, pattern, error, filter);
+ if (ret)
+ goto error;
+ input_set = filter->input_set;
+ if (!input_set || input_set & ~item->input_set_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ pattern,
+ "Invalid input set");
+ ret = -rte_errno;
+ goto error;
+ }
+
+ ret = ice_acl_parse_action(ad, actions, error, filter);
+ if (ret)
+ goto error;
+
+ if (meta)
+ *meta = filter;
+
+error:
+ rte_free(item);
+ return ret;
+}
+
+static int
+ice_acl_bitmap_init(struct ice_pf *pf)
+{
+ uint32_t bmp_size;
+ void *mem = NULL;
+ struct rte_bitmap *slots;
+ int ret = 0;
+ bmp_size = rte_bitmap_get_memory_footprint(MAX_ACL_SLOTS_ID);
+ mem = rte_zmalloc("create_acl_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
+ if (mem == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for acl bitmap.");
+ return -rte_errno;
+ }
+
+ slots = rte_bitmap_init_with_all_set(MAX_ACL_SLOTS_ID, mem, bmp_size);
+ if (slots == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to initialize acl bitmap.");
+ ret = -rte_errno;
+ goto err_acl_mem_alloc;
+ }
+ pf->acl.slots = slots;
+ return 0;
+
+err_acl_mem_alloc:
+ rte_free(mem);
+ return ret;
+}
+
+static int
+ice_acl_init(struct ice_adapter *ad)
+{
+ int ret = 0;
+ struct ice_pf *pf = &ad->pf;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_flow_parser *parser = &ice_acl_parser;
+
+ ret = ice_acl_prof_alloc(hw);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Cannot allocate memory for "
+ "ACL profile.");
+ return -ENOMEM;
+ }
+
+ ret = ice_acl_setup(pf);
+ if (ret)
+ return ret;
+
+ ret = ice_acl_bitmap_init(pf);
+ if (ret)
+ return ret;
+
+ ret = ice_acl_prof_init(pf);
+ if (ret)
+ return ret;
+
+ return ice_register_parser(parser, ad);
+}
+
+static void
+ice_acl_prof_free(struct ice_hw *hw)
+{
+ enum ice_fltr_ptype ptype;
+
+ for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+ ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
+ rte_free(hw->acl_prof[ptype]);
+ hw->acl_prof[ptype] = NULL;
+ }
+
+ rte_free(hw->acl_prof);
+ hw->acl_prof = NULL;
+}
+
+static void
+ice_acl_uninit(struct ice_adapter *ad)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_flow_parser *parser = &ice_acl_parser;
+
+ ice_unregister_parser(parser, ad);
+
+ ice_deinit_acl(pf);
+ ice_acl_prof_free(hw);
+}
+
+static struct
+ice_flow_engine ice_acl_engine = {
+ .init = ice_acl_init,
+ .uninit = ice_acl_uninit,
+ .create = ice_acl_create_filter,
+ .destroy = ice_acl_destroy_filter,
+ .free = ice_acl_filter_free,
+ .type = ICE_FLOW_ENGINE_ACL,
+};
+
+static struct
+ice_flow_parser ice_acl_parser = {
+ .engine = &ice_acl_engine,
+ .array = ice_acl_pattern,
+ .array_len = RTE_DIM(ice_acl_pattern),
+ .parse_pattern_action = ice_acl_parse,
+ .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(ice_acl_engine_init)
+{
+ struct ice_flow_engine *engine = &ice_acl_engine;
+ ice_register_flow_engine(engine);
+}
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 9789096..05218af 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -49,6 +49,8 @@
#define ICE_PKG_FILE_SEARCH_PATH_UPDATES "/lib/firmware/updates/intel/ice/ddp/"
#define ICE_MAX_PKG_FILENAME_SIZE 256
+#define MAX_ACL_ENTRIES 512
+
/**
* vlan_id is a 12 bit number.
* The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
@@ -398,6 +400,20 @@ struct ice_hash_ctx {
struct ice_hash_gtpu_ctx gtpu6;
};
+struct ice_acl_conf {
+ struct ice_fdir_fltr input;
+ uint64_t input_set;
+};
+
+/**
+ * A structure used to define fields of ACL related info.
+ */
+struct ice_acl_info {
+ struct ice_acl_conf conf;
+ struct rte_bitmap *slots;
+ uint64_t hw_entry_id[MAX_ACL_ENTRIES];
+};
+
struct ice_pf {
struct ice_adapter *adapter; /* The adapter this PF associate to */
struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -421,6 +437,7 @@ struct ice_pf {
uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
uint16_t fdir_qp_offset;
struct ice_fdir_info fdir; /* flow director info */
+ struct ice_acl_info acl; /* ACL info */
struct ice_hash_ctx hash_ctx;
uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 54b0316..1429cbc 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1896,6 +1896,8 @@ ice_register_parser(struct ice_flow_parser *parser,
TAILQ_INSERT_TAIL(list, parser_node, node);
else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
TAILQ_INSERT_HEAD(list, parser_node, node);
+ else if (parser->engine->type == ICE_FLOW_ENGINE_ACL)
+ TAILQ_INSERT_HEAD(list, parser_node, node);
else
return -EINVAL;
}
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 99e1b77..254595a 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -10,7 +10,8 @@ sources = files(
'ice_switch_filter.c',
'ice_generic_flow.c',
'ice_fdir_filter.c',
- 'ice_hash.c'
+ 'ice_hash.c',
+ 'ice_acl_filter.c'
)
deps += ['hash', 'net', 'common_iavf']
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v5 0/3] net/ice: support DCF ACL capabiltiy
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 0/3] net/ice: support DCF ACL capabiltiy Simei Su
` (2 preceding siblings ...)
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 3/3] net/ice: support ACL filter in DCF Simei Su
@ 2020-10-20 11:32 ` Simei Su
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 1/3] net/ice/base: change API from static to non-static Simei Su
` (3 more replies)
3 siblings, 4 replies; 25+ messages in thread
From: Simei Su @ 2020-10-20 11:32 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang
Cc: dev, haiyue.wang, beilei.xing, xuan.ding, Simei Su
[PATCH v5 1/3] change API from static to non-static.
[PATCH v5 2/3] get PF VSI map for DCF ACL rule.
[PATCH v5 3/3] support IPV4/IPV4_UDP/IPV4_TCP/IPV4_SCTP pattern
and DROP action for DCF ACL.
v5:
* Change acl_rule structure location.
* Correct error processing in ice_acl_prof_init.
v4:
* Add processing for error logic.
* Fix several bugs.
v3:
* Optimize code logic in ice_acl_prof_init and ice_acl_create_filter.
* Fix several bugs.
v2:
* Add release notes.
* Adjust patch sequence.
* Refactor ACL design and related structure.
* Add bitmap mechanism to allocate entry dynamically.
Simei Su (3):
net/ice/base: change API from static to non-static
net/ice: get PF VSI map
net/ice: support ACL filter in DCF
doc/guides/rel_notes/release_20_11.rst | 2 +-
drivers/net/ice/base/ice_flow.c | 2 +-
drivers/net/ice/base/ice_flow.h | 3 +
drivers/net/ice/ice_acl_filter.c | 1011 ++++++++++++++++++++++++++++++++
drivers/net/ice/ice_dcf.c | 1 +
drivers/net/ice/ice_dcf.h | 1 +
drivers/net/ice/ice_dcf_parent.c | 37 +-
drivers/net/ice/ice_ethdev.h | 17 +
drivers/net/ice/ice_generic_flow.c | 2 +
drivers/net/ice/meson.build | 3 +-
10 files changed, 1074 insertions(+), 5 deletions(-)
create mode 100644 drivers/net/ice/ice_acl_filter.c
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v5 1/3] net/ice/base: change API from static to non-static
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 0/3] net/ice: support DCF ACL capabiltiy Simei Su
@ 2020-10-20 11:32 ` Simei Su
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 2/3] net/ice: get PF VSI map Simei Su
` (2 subsequent siblings)
3 siblings, 0 replies; 25+ messages in thread
From: Simei Su @ 2020-10-20 11:32 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang
Cc: dev, haiyue.wang, beilei.xing, xuan.ding, Simei Su
This patch changes static API "ice_flow_assoc_prof" to non-static
API in order to let it be used by other files.
Signed-off-by: Simei Su <simei.su@intel.com>
---
drivers/net/ice/base/ice_flow.c | 2 +-
drivers/net/ice/base/ice_flow.h | 3 +++
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
index de5dfb2..80ac0b6 100644
--- a/drivers/net/ice/base/ice_flow.c
+++ b/drivers/net/ice/base/ice_flow.c
@@ -2125,7 +2125,7 @@ ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+enum ice_status
ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
diff --git a/drivers/net/ice/base/ice_flow.h b/drivers/net/ice/base/ice_flow.h
index 0a52409..698a230 100644
--- a/drivers/net/ice/base/ice_flow.h
+++ b/drivers/net/ice/base/ice_flow.h
@@ -499,6 +499,9 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
enum ice_status
ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
enum ice_status
+ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_prof *prof, u16 vsi_handle);
+enum ice_status
ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
u16 vsig);
enum ice_status
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v5 2/3] net/ice: get PF VSI map
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 0/3] net/ice: support DCF ACL capabiltiy Simei Su
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 1/3] net/ice/base: change API from static to non-static Simei Su
@ 2020-10-20 11:32 ` Simei Su
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 3/3] net/ice: support ACL filter in DCF Simei Su
2020-10-20 12:37 ` [dpdk-dev] [PATCH v5 0/3] net/ice: support DCF ACL capabiltiy Zhang, Qi Z
3 siblings, 0 replies; 25+ messages in thread
From: Simei Su @ 2020-10-20 11:32 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang
Cc: dev, haiyue.wang, beilei.xing, xuan.ding, Simei Su
This patch gets PF vsi number when issuing ACL rule in DCF.
Signed-off-by: Simei Su <simei.su@intel.com>
---
drivers/net/ice/ice_dcf.c | 1 +
drivers/net/ice/ice_dcf.h | 1 +
drivers/net/ice/ice_dcf_parent.c | 37 +++++++++++++++++++++++++++++++++++--
3 files changed, 37 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 2d803c5..d20e2b3 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -318,6 +318,7 @@ ice_dcf_get_vf_vsi_map(struct ice_dcf_hw *hw)
}
hw->num_vfs = vsi_map->num_vfs;
+ hw->pf_vsi_id = vsi_map->pf_vsi;
}
if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) {
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index a44a01e..ff02996 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -43,6 +43,7 @@ struct ice_dcf_hw {
uint16_t num_vfs;
uint16_t *vf_vsi_map;
+ uint16_t pf_vsi_id;
struct virtchnl_version_info virtchnl_version;
struct virtchnl_vf_resource *vf_res; /* VF resource */
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index c5dfdd3..30ead4c 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -78,6 +78,35 @@ ice_dcf_update_vf_vsi_map(struct ice_hw *hw, uint16_t num_vfs,
ice_dcf_update_vsi_ctx(hw, vf_id, vf_vsi_map[vf_id]);
}
+static void
+ice_dcf_update_pf_vsi_map(struct ice_hw *hw, uint16_t pf_vsi_idx,
+ uint16_t pf_vsi_num)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+
+ if (unlikely(pf_vsi_idx >= ICE_MAX_VSI)) {
+ PMD_DRV_LOG(ERR, "Invalid vsi handle %u", pf_vsi_idx);
+ return;
+ }
+
+ vsi_ctx = hw->vsi_ctx[pf_vsi_idx];
+
+ if (!vsi_ctx)
+ vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx));
+
+ if (!vsi_ctx) {
+ PMD_DRV_LOG(ERR, "No memory for vsi context %u",
+ pf_vsi_idx);
+ return;
+ }
+
+ vsi_ctx->vsi_num = pf_vsi_num;
+ hw->vsi_ctx[pf_vsi_idx] = vsi_ctx;
+
+ PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
+ pf_vsi_idx, vsi_ctx->vsi_num);
+}
+
static void*
ice_dcf_vsi_update_service_handler(void *param)
{
@@ -368,14 +397,18 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
}
parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw);
+ parent_adapter->pf.main_vsi->idx = hw->num_vfs;
+ ice_dcf_update_pf_vsi_map(parent_hw,
+ parent_adapter->pf.main_vsi->idx, hw->pf_vsi_id);
+
+ ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
+
err = ice_flow_init(parent_adapter);
if (err) {
PMD_INIT_LOG(ERR, "Failed to initialize flow");
goto uninit_hw;
}
- ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
-
mac = (const struct rte_ether_addr *)hw->avf.mac.addr;
if (rte_is_valid_assigned_ether_addr(mac))
rte_ether_addr_copy(mac, &parent_adapter->pf.dev_addr);
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v5 3/3] net/ice: support ACL filter in DCF
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 0/3] net/ice: support DCF ACL capabiltiy Simei Su
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 1/3] net/ice/base: change API from static to non-static Simei Su
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 2/3] net/ice: get PF VSI map Simei Su
@ 2020-10-20 11:32 ` Simei Su
2020-10-20 12:37 ` [dpdk-dev] [PATCH v5 0/3] net/ice: support DCF ACL capabiltiy Zhang, Qi Z
3 siblings, 0 replies; 25+ messages in thread
From: Simei Su @ 2020-10-20 11:32 UTC (permalink / raw)
To: qi.z.zhang, qiming.yang
Cc: dev, haiyue.wang, beilei.xing, xuan.ding, Simei Su
Add ice_acl_create_filter to create a rule and ice_acl_destroy_filter
to destroy a rule. If a flow is matched by ACL filter, filter rule
will be set to HW. Currently IPV4/IPV4_UDP/IPV4_TCP/IPV4_SCTP pattern
and drop action are supported.
Signed-off-by: Simei Su <simei.su@intel.com>
Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---
doc/guides/rel_notes/release_20_11.rst | 2 +-
drivers/net/ice/ice_acl_filter.c | 1011 ++++++++++++++++++++++++++++++++
drivers/net/ice/ice_ethdev.h | 17 +
drivers/net/ice/ice_generic_flow.c | 2 +
drivers/net/ice/meson.build | 3 +-
5 files changed, 1033 insertions(+), 2 deletions(-)
create mode 100644 drivers/net/ice/ice_acl_filter.c
diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst
index 278d8dd..335a43f 100644
--- a/doc/guides/rel_notes/release_20_11.rst
+++ b/doc/guides/rel_notes/release_20_11.rst
@@ -183,7 +183,7 @@ New Features
* **Updated Intel ice driver.**
- Updated the Intel ice driver to use write combining stores.
+ * Added acl filter support for Intel DCF.
* **Updated Intel qat driver.**
diff --git a/drivers/net/ice/ice_acl_filter.c b/drivers/net/ice/ice_acl_filter.c
new file mode 100644
index 0000000..ca483f0
--- /dev/null
+++ b/drivers/net/ice/ice_acl_filter.c
@@ -0,0 +1,1011 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include "base/ice_type.h"
+#include "base/ice_acl.h"
+#include "ice_logs.h"
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "base/ice_flow.h"
+
+#define MAX_ACL_SLOTS_ID 2048
+
+#define ICE_ACL_INSET_ETH_IPV4 ( \
+ ICE_INSET_SMAC | ICE_INSET_DMAC | \
+ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
+#define ICE_ACL_INSET_ETH_IPV4_UDP ( \
+ ICE_ACL_INSET_ETH_IPV4 | \
+ ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
+#define ICE_ACL_INSET_ETH_IPV4_TCP ( \
+ ICE_ACL_INSET_ETH_IPV4 | \
+ ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
+#define ICE_ACL_INSET_ETH_IPV4_SCTP ( \
+ ICE_ACL_INSET_ETH_IPV4 | \
+ ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
+
+static struct ice_flow_parser ice_acl_parser;
+
+struct acl_rule {
+ enum ice_fltr_ptype flow_type;
+ uint32_t entry_id[4];
+};
+
+static struct
+ice_pattern_match_item ice_acl_pattern[] = {
+ {pattern_eth_ipv4, ICE_ACL_INSET_ETH_IPV4, ICE_INSET_NONE},
+ {pattern_eth_ipv4_udp, ICE_ACL_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_tcp, ICE_ACL_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_sctp, ICE_ACL_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
+};
+
+static int
+ice_acl_prof_alloc(struct ice_hw *hw)
+{
+ enum ice_fltr_ptype ptype, fltr_ptype;
+
+ if (!hw->acl_prof) {
+ hw->acl_prof = (struct ice_fd_hw_prof **)
+ ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
+ sizeof(*hw->acl_prof));
+ if (!hw->acl_prof)
+ return -ENOMEM;
+ }
+
+ for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+ ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
+ if (!hw->acl_prof[ptype]) {
+ hw->acl_prof[ptype] = (struct ice_fd_hw_prof *)
+ ice_malloc(hw, sizeof(**hw->acl_prof));
+ if (!hw->acl_prof[ptype])
+ goto fail_mem;
+ }
+ }
+
+ return 0;
+
+fail_mem:
+ for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+ fltr_ptype < ptype; fltr_ptype++) {
+ rte_free(hw->acl_prof[fltr_ptype]);
+ hw->acl_prof[fltr_ptype] = NULL;
+ }
+
+ rte_free(hw->acl_prof);
+ hw->acl_prof = NULL;
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_acl_setup - Reserve and initialize the ACL resources
+ * @pf: board private structure
+ */
+static int
+ice_acl_setup(struct ice_pf *pf)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ uint32_t pf_num = hw->dev_caps.num_funcs;
+ struct ice_acl_tbl_params params;
+ uint16_t scen_id;
+ int err = 0;
+
+ memset(¶ms, 0, sizeof(params));
+
+ /* create for IPV4 table */
+ if (pf_num < 4)
+ params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 6;
+ else
+ params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 3;
+
+ params.depth = ICE_AQC_ACL_TCAM_DEPTH;
+ params.entry_act_pairs = 1;
+ params.concurr = false;
+
+ err = ice_acl_create_tbl(hw, ¶ms);
+ if (err)
+ return err;
+
+ err = ice_acl_create_scen(hw, params.width, params.depth,
+ &scen_id);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_deinit_acl - Unroll the initialization of the ACL block
+ * @pf: ptr to PF device
+ *
+ * returns 0 on success, negative on error
+ */
+static void ice_deinit_acl(struct ice_pf *pf)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+ ice_acl_destroy_tbl(hw);
+
+ rte_free(hw->acl_tbl);
+ hw->acl_tbl = NULL;
+
+ if (pf->acl.slots) {
+ rte_free(pf->acl.slots);
+ pf->acl.slots = NULL;
+ }
+}
+
+static void
+acl_add_prof_prepare(struct ice_hw *hw, struct ice_flow_seg_info *seg,
+ bool is_l4, uint16_t src_port, uint16_t dst_port)
+{
+ uint16_t val_loc, mask_loc;
+
+ if (hw->dev_caps.num_funcs < 4) {
+ /* mac source address */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ext_data.src_mac);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ ext_mask.src_mac);
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* mac destination address */
+ val_loc = offsetof(struct ice_fdir_fltr,
+ ext_data.dst_mac);
+ mask_loc = offsetof(struct ice_fdir_fltr,
+ ext_mask.dst_mac);
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
+ val_loc, mask_loc,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+
+ /* IP source address */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_ip);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_ip);
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* IP destination address */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_ip);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_ip);
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ if (is_l4) {
+ /* Layer 4 source port */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_port);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_port);
+ ice_flow_set_fld(seg, src_port, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* Layer 4 destination port */
+ val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_port);
+ mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_port);
+ ice_flow_set_fld(seg, dst_port, val_loc,
+ mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+}
+
+/**
+ * ice_acl_prof_init - Initialize ACL profile
+ * @pf: ice PF structure
+ *
+ * Returns 0 on success.
+ */
+static int
+ice_acl_prof_init(struct ice_pf *pf)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_flow_prof *prof_ipv4 = NULL;
+ struct ice_flow_prof *prof_ipv4_udp = NULL;
+ struct ice_flow_prof *prof_ipv4_tcp = NULL;
+ struct ice_flow_prof *prof_ipv4_sctp = NULL;
+ struct ice_flow_seg_info *seg;
+ int i;
+ int ret;
+
+ seg = (struct ice_flow_seg_info *)
+ ice_malloc(hw, sizeof(*seg));
+ if (!seg)
+ return -ENOMEM;
+
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
+ acl_add_prof_prepare(hw, seg, false, 0, 0);
+ ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
+ seg, 1, NULL, 0, &prof_ipv4);
+ if (ret)
+ goto err_add_prof;
+
+ ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
+ acl_add_prof_prepare(hw, seg, true,
+ ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_UDP_DST_PORT);
+ ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ seg, 1, NULL, 0, &prof_ipv4_udp);
+ if (ret)
+ goto err_add_prof_ipv4_udp;
+
+ ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
+ acl_add_prof_prepare(hw, seg, true,
+ ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_TCP_DST_PORT);
+ ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP,
+ seg, 1, NULL, 0, &prof_ipv4_tcp);
+ if (ret)
+ goto err_add_prof_ipv4_tcp;
+
+ ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
+ acl_add_prof_prepare(hw, seg, true,
+ ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_SCTP_DST_PORT);
+ ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
+ seg, 1, NULL, 0, &prof_ipv4_sctp);
+ if (ret)
+ goto err_add_prof_ipv4_sctp;
+
+ for (i = 0; i < pf->main_vsi->idx; i++) {
+ ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4, i);
+ if (ret)
+ goto err_assoc_prof;
+
+ ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_udp, i);
+ if (ret)
+ goto err_assoc_prof;
+
+ ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_tcp, i);
+ if (ret)
+ goto err_assoc_prof;
+
+ ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_sctp, i);
+ if (ret)
+ goto err_assoc_prof;
+ }
+ return 0;
+
+err_assoc_prof:
+ ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_SCTP);
+err_add_prof_ipv4_sctp:
+ ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
+err_add_prof_ipv4_tcp:
+ ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
+err_add_prof_ipv4_udp:
+ ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_OTHER);
+err_add_prof:
+ ice_free(hw, seg);
+ return ret;
+}
+
+/**
+ * ice_acl_set_input_set - Helper function to set the input set for ACL
+ * @hw: pointer to HW instance
+ * @filter: pointer to ACL info
+ * @input: filter structure
+ *
+ * Return error value or 0 on success.
+ */
+static int
+ice_acl_set_input_set(struct ice_acl_conf *filter, struct ice_fdir_fltr *input)
+{
+ if (!input)
+ return ICE_ERR_BAD_PTR;
+
+ input->q_index = filter->input.q_index;
+ input->dest_vsi = filter->input.dest_vsi;
+ input->dest_ctl = filter->input.dest_ctl;
+ input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
+ input->flow_type = filter->input.flow_type;
+
+ switch (input->flow_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ input->ip.v4.dst_port = filter->input.ip.v4.dst_port;
+ input->ip.v4.src_port = filter->input.ip.v4.src_port;
+ input->ip.v4.dst_ip = filter->input.ip.v4.dst_ip;
+ input->ip.v4.src_ip = filter->input.ip.v4.src_ip;
+
+ input->mask.v4.dst_port = filter->input.mask.v4.dst_port;
+ input->mask.v4.src_port = filter->input.mask.v4.src_port;
+ input->mask.v4.dst_ip = filter->input.mask.v4.dst_ip;
+ input->mask.v4.src_ip = filter->input.mask.v4.src_ip;
+
+ ice_memcpy(&input->ext_data.src_mac,
+ &filter->input.ext_data.src_mac,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+
+ ice_memcpy(&input->ext_mask.src_mac,
+ &filter->input.ext_mask.src_mac,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+
+ ice_memcpy(&input->ext_data.dst_mac,
+ &filter->input.ext_data.dst_mac,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+ ice_memcpy(&input->ext_mask.dst_mac,
+ &filter->input.ext_mask.dst_mac,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ ice_memcpy(&input->ip.v4, &filter->input.ip.v4,
+ sizeof(struct ice_fdir_v4),
+ ICE_NONDMA_TO_NONDMA);
+ ice_memcpy(&input->mask.v4, &filter->input.mask.v4,
+ sizeof(struct ice_fdir_v4),
+ ICE_NONDMA_TO_NONDMA);
+
+ ice_memcpy(&input->ext_data.src_mac,
+ &filter->input.ext_data.src_mac,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+ ice_memcpy(&input->ext_mask.src_mac,
+ &filter->input.ext_mask.src_mac,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+
+ ice_memcpy(&input->ext_data.dst_mac,
+ &filter->input.ext_data.dst_mac,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+ ice_memcpy(&input->ext_mask.dst_mac,
+ &filter->input.ext_mask.dst_mac,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int
+ice_acl_alloc_slot_id(struct rte_bitmap *slots, uint32_t *slot_id)
+{
+ uint32_t pos = 0;
+ uint64_t slab = 0;
+ uint32_t i = 0;
+
+ __rte_bitmap_scan_init(slots);
+ if (!rte_bitmap_scan(slots, &pos, &slab))
+ return -rte_errno;
+
+ i = rte_bsf64(slab);
+ pos += i;
+ rte_bitmap_clear(slots, pos);
+
+ *slot_id = pos;
+ return 0;
+}
+
+static inline int
+ice_acl_hw_set_conf(struct ice_pf *pf, struct ice_fdir_fltr *input,
+ struct ice_flow_action *acts, struct acl_rule *rule,
+ enum ice_fltr_ptype flow_type, int32_t entry_idx)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ enum ice_block blk = ICE_BLK_ACL;
+ uint64_t entry_id, hw_entry;
+ uint32_t slot_id = 0;
+ int act_cnt = 1;
+ int ret = 0;
+
+ /* Allocate slot_id from bitmap table. */
+ ret = ice_acl_alloc_slot_id(pf->acl.slots, &slot_id);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "fail to alloc slot id.");
+ return ret;
+ }
+
+ /* For IPV4_OTHER type, should add entry for all types.
+ * For IPV4_UDP/TCP/SCTP type, only add entry for each.
+ */
+ if (slot_id < MAX_ACL_ENTRIES) {
+ entry_id = ((uint64_t)flow_type << 32) | slot_id;
+ ret = ice_flow_add_entry(hw, blk, flow_type,
+ entry_id, pf->main_vsi->idx,
+ ICE_FLOW_PRIO_NORMAL, input,
+ acts, act_cnt, &hw_entry);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Fail to add entry.");
+ return ret;
+ }
+ rule->entry_id[entry_idx] = slot_id;
+ pf->acl.hw_entry_id[slot_id] = hw_entry;
+ } else {
+ PMD_DRV_LOG(ERR, "Exceed the maximum entry number(%d)"
+ " HW supported!", MAX_ACL_ENTRIES);
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline void
+ice_acl_hw_rem_conf(struct ice_pf *pf, struct acl_rule *rule, int32_t entry_idx)
+{
+ uint32_t slot_id;
+ int32_t i;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+ for (i = 0; i < entry_idx; i++) {
+ slot_id = rule->entry_id[i];
+ rte_bitmap_set(pf->acl.slots, slot_id);
+ ice_flow_rem_entry(hw, ICE_BLK_ACL,
+ pf->acl.hw_entry_id[slot_id]);
+ }
+}
+
+static int
+ice_acl_create_filter(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ void *meta,
+ struct rte_flow_error *error)
+{
+ struct ice_acl_conf *filter = meta;
+ enum ice_fltr_ptype flow_type = filter->input.flow_type;
+ struct ice_flow_action acts[1];
+ struct ice_pf *pf = &ad->pf;
+ struct ice_fdir_fltr *input;
+ struct acl_rule *rule;
+ int ret;
+
+ rule = rte_zmalloc("acl_rule", sizeof(*rule), 0);
+ if (!rule) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory for acl rule");
+ return -rte_errno;
+ }
+
+ input = rte_zmalloc("acl_entry", sizeof(*input), 0);
+ if (!input) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory for acl input");
+ ret = -rte_errno;
+ goto err_acl_input_alloc;
+ }
+
+ ret = ice_acl_set_input_set(filter, input);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "failed to set input set.");
+ ret = -rte_errno;
+ goto err_acl_set_input;
+ }
+
+ if (filter->input.dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) {
+ acts[0].type = ICE_FLOW_ACT_DROP;
+ acts[0].data.acl_act.mdid = ICE_MDID_RX_PKT_DROP;
+ acts[0].data.acl_act.prio = 0x3;
+ acts[0].data.acl_act.value = CPU_TO_LE16(0x1);
+ }
+
+ input->acl_fltr = true;
+ ret = ice_acl_hw_set_conf(pf, input, acts, rule, flow_type, 0);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "failed to set hw configure.");
+ ret = -rte_errno;
+ return ret;
+ }
+
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
+ ret = ice_acl_hw_set_conf(pf, input, acts, rule,
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP, 1);
+ if (ret)
+ goto err_acl_hw_set_conf_udp;
+ ret = ice_acl_hw_set_conf(pf, input, acts, rule,
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP, 2);
+ if (ret)
+ goto err_acl_hw_set_conf_tcp;
+ ret = ice_acl_hw_set_conf(pf, input, acts, rule,
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP, 3);
+ if (ret)
+ goto err_acl_hw_set_conf_sctp;
+ }
+
+ rule->flow_type = flow_type;
+ flow->rule = rule;
+ return 0;
+
+err_acl_hw_set_conf_sctp:
+ ice_acl_hw_rem_conf(pf, rule, 3);
+err_acl_hw_set_conf_tcp:
+ ice_acl_hw_rem_conf(pf, rule, 2);
+err_acl_hw_set_conf_udp:
+ ice_acl_hw_rem_conf(pf, rule, 1);
+err_acl_set_input:
+ rte_free(input);
+err_acl_input_alloc:
+ rte_free(rule);
+ return ret;
+}
+
+static int
+ice_acl_destroy_filter(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ struct rte_flow_error *error __rte_unused)
+{
+ struct acl_rule *rule = (struct acl_rule *)flow->rule;
+ uint32_t slot_id, i;
+ struct ice_pf *pf = &ad->pf;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ int ret = 0;
+
+ switch (rule->flow_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ for (i = 0; i < 4; i++) {
+ slot_id = rule->entry_id[i];
+ rte_bitmap_set(pf->acl.slots, slot_id);
+ ice_flow_rem_entry(hw, ICE_BLK_ACL,
+ pf->acl.hw_entry_id[slot_id]);
+ }
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ slot_id = rule->entry_id[0];
+ rte_bitmap_set(pf->acl.slots, slot_id);
+ ice_flow_rem_entry(hw, ICE_BLK_ACL,
+ pf->acl.hw_entry_id[slot_id]);
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Unsupported flow type.");
+ break;
+ }
+
+ flow->rule = NULL;
+ rte_free(rule);
+ return ret;
+}
+
+static void
+ice_acl_filter_free(struct rte_flow *flow)
+{
+ rte_free(flow->rule);
+ flow->rule = NULL;
+}
+
+static int
+ice_acl_parse_action(__rte_unused struct ice_adapter *ad,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct ice_acl_conf *filter)
+{
+ uint32_t dest_num = 0;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ dest_num++;
+
+ filter->input.dest_ctl =
+ ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ if (dest_num == 0 || dest_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Unsupported action combination");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ice_acl_parse_pattern(__rte_unused struct ice_adapter *ad,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct ice_acl_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ uint64_t input_set = ICE_INSET_NONE;
+ uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
+
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ item_type = item->type;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ if (eth_spec && eth_mask) {
+ if (!rte_is_zero_ether_addr(ð_spec->src) &&
+ !rte_is_zero_ether_addr(ð_mask->src)) {
+ input_set |= ICE_INSET_SMAC;
+ ice_memcpy(&filter->input.ext_data.src_mac,
+ ð_spec->src,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+ ice_memcpy(&filter->input.ext_mask.src_mac,
+ ð_mask->src,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+ }
+
+ if (!rte_is_zero_ether_addr(ð_spec->dst) &&
+ !rte_is_zero_ether_addr(ð_mask->dst)) {
+ input_set |= ICE_INSET_DMAC;
+ ice_memcpy(&filter->input.ext_data.dst_mac,
+ ð_spec->dst,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+ ice_memcpy(&filter->input.ext_mask.dst_mac,
+ ð_mask->dst,
+ RTE_ETHER_ADDR_LEN,
+ ICE_NONDMA_TO_NONDMA);
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+
+ if (ipv4_spec && ipv4_mask) {
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr) {
+ filter->input.ip.v4.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.mask.v4.src_ip =
+ ipv4_mask->hdr.src_addr;
+
+ input_set |= ICE_INSET_IPV4_SRC;
+ }
+
+ if (ipv4_mask->hdr.dst_addr) {
+ filter->input.ip.v4.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+ filter->input.mask.v4.dst_ip =
+ ipv4_mask->hdr.dst_addr;
+
+ input_set |= ICE_INSET_IPV4_DST;
+ }
+ }
+
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+
+ if (tcp_spec && tcp_mask) {
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ tcp_mask->hdr.src_port) {
+ input_set |= ICE_INSET_TCP_SRC_PORT;
+ filter->input.ip.v4.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.mask.v4.src_port =
+ tcp_mask->hdr.src_port;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ tcp_mask->hdr.dst_port) {
+ input_set |= ICE_INSET_TCP_DST_PORT;
+ filter->input.ip.v4.dst_port =
+ tcp_spec->hdr.dst_port;
+ filter->input.mask.v4.dst_port =
+ tcp_mask->hdr.dst_port;
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+
+ if (udp_spec && udp_mask) {
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ udp_mask->hdr.src_port) {
+ input_set |= ICE_INSET_UDP_SRC_PORT;
+ filter->input.ip.v4.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.mask.v4.src_port =
+ udp_mask->hdr.src_port;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ udp_mask->hdr.dst_port) {
+ input_set |= ICE_INSET_UDP_DST_PORT;
+ filter->input.ip.v4.dst_port =
+ udp_spec->hdr.dst_port;
+ filter->input.mask.v4.dst_port =
+ udp_mask->hdr.dst_port;
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec = item->spec;
+ sctp_mask = item->mask;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+
+ if (sctp_spec && sctp_mask) {
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ sctp_mask->hdr.src_port) {
+ input_set |= ICE_INSET_SCTP_SRC_PORT;
+ filter->input.ip.v4.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.mask.v4.src_port =
+ sctp_mask->hdr.src_port;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ sctp_mask->hdr.dst_port) {
+ input_set |= ICE_INSET_SCTP_DST_PORT;
+ filter->input.ip.v4.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.mask.v4.dst_port =
+ sctp_mask->hdr.dst_port;
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pattern item.");
+ return -rte_errno;
+ }
+ }
+
+ filter->input.flow_type = flow_type;
+ filter->input_set = input_set;
+
+ return 0;
+}
+
+static int
+ice_acl_parse(struct ice_adapter *ad,
+ struct ice_pattern_match_item *array,
+ uint32_t array_len,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta,
+ struct rte_flow_error *error)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_acl_conf *filter = &pf->acl.conf;
+ struct ice_pattern_match_item *item = NULL;
+ uint64_t input_set;
+ int ret;
+
+ memset(filter, 0, sizeof(*filter));
+ item = ice_search_pattern_match_item(pattern, array, array_len, error);
+ if (!item)
+ return -rte_errno;
+
+ ret = ice_acl_parse_pattern(ad, pattern, error, filter);
+ if (ret)
+ goto error;
+ input_set = filter->input_set;
+ if (!input_set || input_set & ~item->input_set_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ pattern,
+ "Invalid input set");
+ ret = -rte_errno;
+ goto error;
+ }
+
+ ret = ice_acl_parse_action(ad, actions, error, filter);
+ if (ret)
+ goto error;
+
+ if (meta)
+ *meta = filter;
+
+error:
+ rte_free(item);
+ return ret;
+}
+
+static int
+ice_acl_bitmap_init(struct ice_pf *pf)
+{
+ uint32_t bmp_size;
+ void *mem = NULL;
+ struct rte_bitmap *slots;
+ int ret = 0;
+ bmp_size = rte_bitmap_get_memory_footprint(MAX_ACL_SLOTS_ID);
+ mem = rte_zmalloc("create_acl_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
+ if (mem == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for acl bitmap.");
+ return -rte_errno;
+ }
+
+ slots = rte_bitmap_init_with_all_set(MAX_ACL_SLOTS_ID, mem, bmp_size);
+ if (slots == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to initialize acl bitmap.");
+ ret = -rte_errno;
+ goto err_acl_mem_alloc;
+ }
+ pf->acl.slots = slots;
+ return 0;
+
+err_acl_mem_alloc:
+ rte_free(mem);
+ return ret;
+}
+
+static int
+ice_acl_init(struct ice_adapter *ad)
+{
+ int ret = 0;
+ struct ice_pf *pf = &ad->pf;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_flow_parser *parser = &ice_acl_parser;
+
+ ret = ice_acl_prof_alloc(hw);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Cannot allocate memory for "
+ "ACL profile.");
+ return -ENOMEM;
+ }
+
+ ret = ice_acl_setup(pf);
+ if (ret)
+ return ret;
+
+ ret = ice_acl_bitmap_init(pf);
+ if (ret)
+ return ret;
+
+ ret = ice_acl_prof_init(pf);
+ if (ret)
+ return ret;
+
+ return ice_register_parser(parser, ad);
+}
+
+static void
+ice_acl_prof_free(struct ice_hw *hw)
+{
+ enum ice_fltr_ptype ptype;
+
+ for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+ ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
+ rte_free(hw->acl_prof[ptype]);
+ hw->acl_prof[ptype] = NULL;
+ }
+
+ rte_free(hw->acl_prof);
+ hw->acl_prof = NULL;
+}
+
+static void
+ice_acl_uninit(struct ice_adapter *ad)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_flow_parser *parser = &ice_acl_parser;
+
+ ice_unregister_parser(parser, ad);
+
+ ice_deinit_acl(pf);
+ ice_acl_prof_free(hw);
+}
+
+static struct
+ice_flow_engine ice_acl_engine = {
+ .init = ice_acl_init,
+ .uninit = ice_acl_uninit,
+ .create = ice_acl_create_filter,
+ .destroy = ice_acl_destroy_filter,
+ .free = ice_acl_filter_free,
+ .type = ICE_FLOW_ENGINE_ACL,
+};
+
+static struct
+ice_flow_parser ice_acl_parser = {
+ .engine = &ice_acl_engine,
+ .array = ice_acl_pattern,
+ .array_len = RTE_DIM(ice_acl_pattern),
+ .parse_pattern_action = ice_acl_parse,
+ .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(ice_acl_engine_init)
+{
+ struct ice_flow_engine *engine = &ice_acl_engine;
+ ice_register_flow_engine(engine);
+}
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 9789096..05218af 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -49,6 +49,8 @@
#define ICE_PKG_FILE_SEARCH_PATH_UPDATES "/lib/firmware/updates/intel/ice/ddp/"
#define ICE_MAX_PKG_FILENAME_SIZE 256
+#define MAX_ACL_ENTRIES 512
+
/**
* vlan_id is a 12 bit number.
* The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
@@ -398,6 +400,20 @@ struct ice_hash_ctx {
struct ice_hash_gtpu_ctx gtpu6;
};
+struct ice_acl_conf {
+ struct ice_fdir_fltr input;
+ uint64_t input_set;
+};
+
+/**
+ * A structure used to define fields of ACL related info.
+ */
+struct ice_acl_info {
+ struct ice_acl_conf conf;
+ struct rte_bitmap *slots;
+ uint64_t hw_entry_id[MAX_ACL_ENTRIES];
+};
+
struct ice_pf {
struct ice_adapter *adapter; /* The adapter this PF associate to */
struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -421,6 +437,7 @@ struct ice_pf {
uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
uint16_t fdir_qp_offset;
struct ice_fdir_info fdir; /* flow director info */
+ struct ice_acl_info acl; /* ACL info */
struct ice_hash_ctx hash_ctx;
uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 54b0316..1429cbc 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1896,6 +1896,8 @@ ice_register_parser(struct ice_flow_parser *parser,
TAILQ_INSERT_TAIL(list, parser_node, node);
else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
TAILQ_INSERT_HEAD(list, parser_node, node);
+ else if (parser->engine->type == ICE_FLOW_ENGINE_ACL)
+ TAILQ_INSERT_HEAD(list, parser_node, node);
else
return -EINVAL;
}
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 99e1b77..254595a 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -10,7 +10,8 @@ sources = files(
'ice_switch_filter.c',
'ice_generic_flow.c',
'ice_fdir_filter.c',
- 'ice_hash.c'
+ 'ice_hash.c',
+ 'ice_acl_filter.c'
)
deps += ['hash', 'net', 'common_iavf']
--
2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v5 0/3] net/ice: support DCF ACL capabiltiy
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 0/3] net/ice: support DCF ACL capabiltiy Simei Su
` (2 preceding siblings ...)
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 3/3] net/ice: support ACL filter in DCF Simei Su
@ 2020-10-20 12:37 ` Zhang, Qi Z
3 siblings, 0 replies; 25+ messages in thread
From: Zhang, Qi Z @ 2020-10-20 12:37 UTC (permalink / raw)
To: Su, Simei, Yang, Qiming; +Cc: dev, Wang, Haiyue, Xing, Beilei, Ding, Xuan
> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Tuesday, October 20, 2020 7:33 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Yang, Qiming <qiming.yang@intel.com>
> Cc: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Ding, Xuan <xuan.ding@intel.com>; Su, Simei
> <simei.su@intel.com>
> Subject: [PATCH v5 0/3] net/ice: support DCF ACL capabiltiy
>
> [PATCH v5 1/3] change API from static to non-static.
> [PATCH v5 2/3] get PF VSI map for DCF ACL rule.
> [PATCH v5 3/3] support IPV4/IPV4_UDP/IPV4_TCP/IPV4_SCTP pattern
> and DROP action for DCF ACL.
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
,
Applied to dpdk-next-net-intel after squash patch [1/3] to patch [3/3],
as it is not much necessary to have separate a patch just for function expose.
Thanks
Qi
>
> v5:
> * Change acl_rule structure location.
> * Correct error processing in ice_acl_prof_init.
>
> v4:
> * Add processing for error logic.
> * Fix several bugs.
>
> v3:
> * Optimize code logic in ice_acl_prof_init and ice_acl_create_filter.
> * Fix several bugs.
>
> v2:
> * Add release notes.
> * Adjust patch sequence.
> * Refactor ACL design and related structure.
> * Add bitmap mechanism to allocate entry dynamically.
>
> Simei Su (3):
> net/ice/base: change API from static to non-static
> net/ice: get PF VSI map
> net/ice: support ACL filter in DCF
>
> doc/guides/rel_notes/release_20_11.rst | 2 +-
> drivers/net/ice/base/ice_flow.c | 2 +-
> drivers/net/ice/base/ice_flow.h | 3 +
> drivers/net/ice/ice_acl_filter.c | 1011
> ++++++++++++++++++++++++++++++++
> drivers/net/ice/ice_dcf.c | 1 +
> drivers/net/ice/ice_dcf.h | 1 +
> drivers/net/ice/ice_dcf_parent.c | 37 +-
> drivers/net/ice/ice_ethdev.h | 17 +
> drivers/net/ice/ice_generic_flow.c | 2 +
> drivers/net/ice/meson.build | 3 +-
> 10 files changed, 1074 insertions(+), 5 deletions(-) create mode 100644
> drivers/net/ice/ice_acl_filter.c
>
> --
> 2.9.5
^ permalink raw reply [flat|nested] 25+ messages in thread
end of thread, other threads:[~2020-10-20 12:37 UTC | newest]
Thread overview: 25+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-10 7:37 [dpdk-dev] [PATCH v1 0/3] net/ice: support DCF ACL capabiltiy Simei Su
2020-09-10 7:37 ` [dpdk-dev] [PATCH v1 1/3] net/ice: get PF VSI map Simei Su
2020-09-10 7:37 ` [dpdk-dev] [PATCH v1 2/3] net/ice: add devarg for ACL ipv4 rule number Simei Su
2020-09-10 7:53 ` Wang, Haiyue
2020-09-10 7:37 ` [dpdk-dev] [PATCH v1 3/3] net/ice: support ACL filter in DCF Simei Su
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 0/4] net/ice: support DCF ACL capabiltiy Simei Su
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 1/4] net/ice/base: change API from static to non-static Simei Su
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 2/4] net/ice: get PF VSI map Simei Su
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 3/4] net/ice: support ACL filter in DCF Simei Su
2020-09-29 1:56 ` [dpdk-dev] [PATCH v2 4/4] net/ice: add devarg for ACL ipv4 rule number Simei Su
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 0/3] net/ice: support DCF ACL capabiltiy Simei Su
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 1/3] net/ice/base: change API from static to non-static Simei Su
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 2/3] net/ice: get PF VSI map Simei Su
2020-10-14 8:54 ` [dpdk-dev] [PATCH v3 3/3] net/ice: support ACL filter in DCF Simei Su
2020-10-15 5:10 ` Zhang, Qi Z
2020-10-15 7:08 ` Su, Simei
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 0/3] net/ice: support DCF ACL capabiltiy Simei Su
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 1/3] net/ice/base: change API from static to non-static Simei Su
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 2/3] net/ice: get PF VSI map Simei Su
2020-10-16 8:44 ` [dpdk-dev] [PATCH v4 3/3] net/ice: support ACL filter in DCF Simei Su
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 0/3] net/ice: support DCF ACL capabiltiy Simei Su
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 1/3] net/ice/base: change API from static to non-static Simei Su
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 2/3] net/ice: get PF VSI map Simei Su
2020-10-20 11:32 ` [dpdk-dev] [PATCH v5 3/3] net/ice: support ACL filter in DCF Simei Su
2020-10-20 12:37 ` [dpdk-dev] [PATCH v5 0/3] net/ice: support DCF ACL capabiltiy Zhang, Qi Z
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).