From: Qiming Yang <qiming.yang@intel.com>
To: dev@dpdk.org
Cc: wei zhao <wei.zhao1@intel.com>
Subject: [dpdk-dev] [PATCH v4 1/3] net/ice: enable switch filter
Date: Fri, 21 Jun 2019 14:13:53 +0800 [thread overview]
Message-ID: <20190621061355.192659-2-qiming.yang@intel.com> (raw)
In-Reply-To: <20190621061355.192659-1-qiming.yang@intel.com>
From: wei zhao <wei.zhao1@intel.com>
The patch enables the backend of rte_flow. It transfers
rte_flow_xxx to device specific data structure and
configures packet process engine's binary classifier
(switch) properly.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/Makefile | 1 +
drivers/net/ice/ice_ethdev.c | 18 ++
drivers/net/ice/ice_ethdev.h | 7 +
drivers/net/ice/ice_switch_filter.c | 525 ++++++++++++++++++++++++++++++++++++
drivers/net/ice/ice_switch_filter.h | 24 ++
drivers/net/ice/meson.build | 3 +-
6 files changed, 577 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/ice/ice_switch_filter.c
create mode 100644 drivers/net/ice/ice_switch_filter.h
diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 0e5c55e..b10d826 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
endif
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
CC_AVX2_SUPPORT=1
else
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 203d0a9..a94aa7e 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1364,6 +1364,21 @@ static int ice_load_pkg(struct rte_eth_dev *dev)
return err;
}
+static void
+ice_base_queue_get(struct ice_pf *pf)
+{
+ uint32_t reg;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+ reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
+ if (reg & PFLAN_RX_QALLOC_VALID_M) {
+ pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
+ } else {
+ PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
+ " index");
+ }
+}
+
static int
ice_dev_init(struct rte_eth_dev *dev)
{
@@ -1460,6 +1475,9 @@ ice_dev_init(struct rte_eth_dev *dev)
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
+ /* get base queue pairs index in the device */
+ ice_base_queue_get(pf);
+
return 0;
err_pf_setup:
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1385afa..50b966c 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,6 +234,12 @@ struct ice_vsi {
bool offset_loaded;
};
+/* Struct to store flow created. */
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) node;
+ void *rule;
+};
+
struct ice_pf {
struct ice_adapter *adapter; /* The adapter this PF associate to */
struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -252,6 +258,7 @@ struct ice_pf {
uint16_t hash_lut_size; /* The size of hash lookup table */
uint16_t lan_nb_qp_max;
uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
+ uint16_t base_queue; /* The base queue pairs index in the device */
struct ice_hw_port_stats stats_offset;
struct ice_hw_port_stats stats;
/* internal packet statistics, it should be excluded from the total */
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
new file mode 100644
index 0000000..c1b6c47
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -0,0 +1,525 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "ice_logs.h"
+#include "base/ice_type.h"
+#include "ice_switch_filter.h"
+
+static int
+ice_parse_switch_filter(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct ice_adv_rule_info *rule_info,
+ struct ice_adv_lkup_elem **lkup_list,
+ uint16_t *lkups_num)
+{
+ const struct rte_flow_item *item = pattern;
+ enum rte_flow_item_type item_type;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
+ const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
+ struct ice_adv_lkup_elem *list;
+ uint16_t j, t = 0;
+ uint16_t item_num = 0;
+ enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+ uint16_t tunnel_valid = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6 ||
+ item->type == RTE_FLOW_ITEM_TYPE_UDP ||
+ item->type == RTE_FLOW_ITEM_TYPE_TCP ||
+ item->type == RTE_FLOW_ITEM_TYPE_SCTP ||
+ item->type == RTE_FLOW_ITEM_TYPE_VXLAN ||
+ item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+ item_num++;
+ if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+ tun_type = ICE_SW_TUN_VXLAN;
+ if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+ tun_type = ICE_SW_TUN_NVGRE;
+ }
+
+ list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
+ if (!list) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, actions,
+ "No memory for PMD internal items");
+ goto out;
+ }
+ *lkup_list = list;
+
+ for (item = pattern; item->type !=
+ RTE_FLOW_ITEM_TYPE_END; item++) {
+ item_type = item->type;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+ if (eth_spec && eth_mask) {
+ list[t].type = (tun_type == ICE_NON_TUN) ?
+ ICE_MAC_OFOS : ICE_MAC_IL;
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+ if (eth_mask->src.addr_bytes[j] ==
+ UINT8_MAX) {
+ list[t].h_u.eth_hdr.
+ src_addr[j] =
+ eth_spec->src.addr_bytes[j];
+ list[t].m_u.eth_hdr.
+ src_addr[j] =
+ eth_mask->src.addr_bytes[j];
+ }
+ if (eth_mask->dst.addr_bytes[j] ==
+ UINT8_MAX) {
+ list[t].h_u.eth_hdr.
+ dst_addr[j] =
+ eth_spec->dst.addr_bytes[j];
+ list[t].m_u.eth_hdr.
+ dst_addr[j] =
+ eth_mask->dst.addr_bytes[j];
+ }
+ }
+ if (eth_mask->type == UINT16_MAX) {
+ list[t].h_u.eth_hdr.ethtype_id =
+ rte_be_to_cpu_16(eth_spec->type);
+ list[t].m_u.eth_hdr.ethtype_id =
+ UINT16_MAX;
+ }
+ t++;
+ } else if (!eth_spec && !eth_mask) {
+ list[t].type = (tun_type == ICE_NON_TUN) ?
+ ICE_MAC_OFOS : ICE_MAC_IL;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+ if (ipv4_spec && ipv4_mask) {
+ list[t].type = (tun_type == ICE_NON_TUN) ?
+ ICE_IPV4_OFOS : ICE_IPV4_IL;
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+ list[t].h_u.ipv4_hdr.src_addr =
+ ipv4_spec->hdr.src_addr;
+ list[t].m_u.ipv4_hdr.src_addr =
+ UINT32_MAX;
+ }
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+ list[t].h_u.ipv4_hdr.dst_addr =
+ ipv4_spec->hdr.dst_addr;
+ list[t].m_u.ipv4_hdr.dst_addr =
+ UINT32_MAX;
+ }
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+ list[t].h_u.ipv4_hdr.time_to_live =
+ ipv4_spec->hdr.time_to_live;
+ list[t].m_u.ipv4_hdr.time_to_live =
+ UINT8_MAX;
+ }
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+ list[t].h_u.ipv4_hdr.protocol =
+ ipv4_spec->hdr.next_proto_id;
+ list[t].m_u.ipv4_hdr.protocol =
+ UINT8_MAX;
+ }
+ if (ipv4_mask->hdr.type_of_service ==
+ UINT8_MAX) {
+ list[t].h_u.ipv4_hdr.tos =
+ ipv4_spec->hdr.type_of_service;
+ list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+ }
+ t++;
+ } else if (!ipv4_spec && !ipv4_mask) {
+ list[t].type = (tun_type == ICE_NON_TUN) ?
+ ICE_IPV4_OFOS : ICE_IPV4_IL;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
+ if (ipv6_spec && ipv6_mask) {
+ list[t].type = (tun_type == ICE_NON_TUN) ?
+ ICE_IPV6_OFOS : ICE_IPV6_IL;
+ for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+ if (ipv6_mask->hdr.src_addr[j] ==
+ UINT8_MAX) {
+ list[t].h_u.ice_ipv6_ofos_hdr.
+ src_addr[j] =
+ ipv6_spec->hdr.src_addr[j];
+ list[t].m_u.ice_ipv6_ofos_hdr.
+ src_addr[j] =
+ ipv6_mask->hdr.src_addr[j];
+ }
+ if (ipv6_mask->hdr.dst_addr[j] ==
+ UINT8_MAX) {
+ list[t].h_u.ice_ipv6_ofos_hdr.
+ dst_addr[j] =
+ ipv6_spec->hdr.dst_addr[j];
+ list[t].m_u.ice_ipv6_ofos_hdr.
+ dst_addr[j] =
+ ipv6_mask->hdr.dst_addr[j];
+ }
+ }
+ if (ipv6_mask->hdr.proto == UINT8_MAX) {
+ list[t].h_u.ice_ipv6_ofos_hdr.next_hdr =
+ ipv6_spec->hdr.proto;
+ list[t].m_u.ice_ipv6_ofos_hdr.next_hdr =
+ UINT8_MAX;
+ }
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+ list[t].h_u.ice_ipv6_ofos_hdr.
+ hop_limit = ipv6_spec->hdr.hop_limits;
+ list[t].m_u.ice_ipv6_ofos_hdr.
+ hop_limit = UINT8_MAX;
+ }
+ t++;
+ } else if (!ipv6_spec && !ipv6_mask) {
+ list[t].type = (tun_type == ICE_NON_TUN) ?
+ ICE_IPV4_OFOS : ICE_IPV4_IL;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+ if (udp_spec && udp_mask) {
+ if (tun_type == ICE_SW_TUN_VXLAN &&
+ tunnel_valid == 0)
+ list[t].type = ICE_UDP_OF;
+ else
+ list[t].type = ICE_UDP_ILOS;
+ if (udp_mask->hdr.src_port == UINT16_MAX) {
+ list[t].h_u.l4_hdr.src_port =
+ udp_spec->hdr.src_port;
+ list[t].m_u.l4_hdr.src_port =
+ udp_mask->hdr.src_port;
+ }
+ if (udp_mask->hdr.dst_port == UINT16_MAX) {
+ list[t].h_u.l4_hdr.dst_port =
+ udp_spec->hdr.dst_port;
+ list[t].m_u.l4_hdr.dst_port =
+ udp_mask->hdr.dst_port;
+ }
+ t++;
+ } else if (!udp_spec && !udp_mask) {
+ list[t].type = ICE_UDP_ILOS;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+ if (tcp_spec && tcp_mask) {
+ list[t].type = ICE_TCP_IL;
+ if (tcp_mask->hdr.src_port == UINT16_MAX) {
+ list[t].h_u.l4_hdr.src_port =
+ tcp_spec->hdr.src_port;
+ list[t].m_u.l4_hdr.src_port =
+ tcp_mask->hdr.src_port;
+ }
+ if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+ list[t].h_u.l4_hdr.dst_port =
+ tcp_spec->hdr.dst_port;
+ list[t].m_u.l4_hdr.dst_port =
+ tcp_mask->hdr.dst_port;
+ }
+ t++;
+ } else if (!tcp_spec && !tcp_mask) {
+ list[t].type = ICE_TCP_IL;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec = item->spec;
+ sctp_mask = item->mask;
+ if (sctp_spec && sctp_mask) {
+ list[t].type = ICE_SCTP_IL;
+ if (sctp_mask->hdr.src_port == UINT16_MAX) {
+ list[t].h_u.sctp_hdr.src_port =
+ sctp_spec->hdr.src_port;
+ list[t].m_u.sctp_hdr.src_port =
+ sctp_mask->hdr.src_port;
+ }
+ if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+ list[t].h_u.sctp_hdr.dst_port =
+ sctp_spec->hdr.dst_port;
+ list[t].m_u.sctp_hdr.dst_port =
+ sctp_mask->hdr.dst_port;
+ }
+ t++;
+ } else if (!sctp_spec && !sctp_mask) {
+ list[t].type = ICE_SCTP_IL;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec = item->spec;
+ vxlan_mask = item->mask;
+ tunnel_valid = 1;
+ if (vxlan_spec && vxlan_mask) {
+ list[t].type = ICE_VXLAN;
+ if (vxlan_mask->vni[0] == UINT8_MAX &&
+ vxlan_mask->vni[1] == UINT8_MAX &&
+ vxlan_mask->vni[2] == UINT8_MAX) {
+ list[t].h_u.tnl_hdr.vni =
+ (vxlan_spec->vni[2] << 16) |
+ (vxlan_spec->vni[1] << 8) |
+ vxlan_spec->vni[0];
+ list[t].m_u.tnl_hdr.vni =
+ UINT32_MAX;
+ }
+ t++;
+ } else if (!vxlan_spec && !vxlan_mask) {
+ list[t].type = ICE_VXLAN;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ nvgre_spec = item->spec;
+ nvgre_mask = item->mask;
+ tunnel_valid = 1;
+ if (nvgre_spec && nvgre_mask) {
+ list[t].type = ICE_NVGRE;
+ if (nvgre_mask->tni[0] == UINT8_MAX &&
+ nvgre_mask->tni[1] == UINT8_MAX &&
+ nvgre_mask->tni[2] == UINT8_MAX) {
+ list[t].h_u.nvgre_hdr.tni_flow =
+ (nvgre_spec->tni[2] << 16) |
+ (nvgre_spec->tni[1] << 8) |
+ nvgre_spec->tni[0];
+ list[t].m_u.nvgre_hdr.tni_flow =
+ UINT32_MAX;
+ }
+ t++;
+ } else if (!nvgre_spec && !nvgre_mask) {
+ list[t].type = ICE_NVGRE;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ case RTE_FLOW_ITEM_TYPE_END:
+ break;
+
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, actions,
+ "Invalid pattern item.");
+ goto out;
+ }
+ }
+
+ rule_info->tun_type = tun_type;
+ *lkups_num = t;
+
+ return 0;
+out:
+ return -rte_errno;
+}
+
+/* By now ice switch filter action code implement only
+* supports QUEUE or DROP.
+*/
+static int
+ice_parse_switch_action(struct ice_pf *pf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct ice_adv_rule_info *rule_info)
+{
+ struct ice_vsi *vsi = pf->main_vsi;
+ const struct rte_flow_action_queue *act_q;
+ uint16_t base_queue;
+ const struct rte_flow_action *action;
+ enum rte_flow_action_type action_type;
+
+ base_queue = pf->base_queue;
+ for (action = actions; action->type !=
+ RTE_FLOW_ACTION_TYPE_END; action++) {
+ action_type = action->type;
+ switch (action_type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ act_q = action->conf;
+ rule_info->sw_act.fltr_act =
+ ICE_FWD_TO_Q;
+ rule_info->sw_act.fwd_id.q_id =
+ base_queue + act_q->index;
+ if (act_q->index >=
+ pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid queue ID"
+ " for switch filter.");
+ return -rte_errno;
+ }
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ rule_info->sw_act.fltr_act =
+ ICE_DROP_PACKET;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+
+ default:
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ actions,
+ "Invalid action type");
+ return -rte_errno;
+ }
+ }
+
+ rule_info->sw_act.vsi_handle = vsi->idx;
+ rule_info->rx = 1;
+ rule_info->sw_act.src = vsi->idx;
+
+ return 0;
+}
+
+static int
+ice_switch_rule_set(struct ice_pf *pf,
+ struct ice_adv_lkup_elem *list,
+ uint16_t lkups_cnt,
+ struct ice_adv_rule_info *rule_info,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ int ret;
+ struct ice_rule_query_data rule_added = {0};
+ struct ice_rule_query_data *filter_ptr;
+
+ if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "item number too large for rule");
+ return -rte_errno;
+ }
+ if (!list) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "lookup list should not be NULL");
+ return -rte_errno;
+ }
+
+ ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
+
+ if (!ret) {
+ filter_ptr = rte_zmalloc("ice_switch_filter",
+ sizeof(struct ice_rule_query_data), 0);
+ if (!filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return -EINVAL;
+ }
+ flow->rule = filter_ptr;
+ rte_memcpy(filter_ptr,
+ &rule_added,
+ sizeof(struct ice_rule_query_data));
+ }
+
+ return ret;
+}
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct ice_adv_rule_info rule_info = {0};
+ struct ice_adv_lkup_elem *list = NULL;
+ uint16_t lkups_num = 0;
+
+ ret = ice_parse_switch_filter(pattern, actions, error,
+ &rule_info, &list, &lkups_num);
+ if (ret)
+ goto error;
+
+ ret = ice_parse_switch_action(pf, actions, error, &rule_info);
+ if (ret)
+ goto error;
+
+ ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
+ if (ret)
+ goto error;
+
+ rte_free(list);
+ return 0;
+
+error:
+ rte_free(list);
+
+ return -rte_errno;
+}
+
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ int ret;
+ struct ice_rule_query_data *filter_ptr;
+
+ filter_ptr = (struct ice_rule_query_data *)
+ flow->rule;
+
+ if (!filter_ptr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "no such flow"
+ " create by switch filter");
+ return -rte_errno;
+ }
+
+ ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "fail to destroy switch filter rule");
+ return -rte_errno;
+ }
+
+ rte_free(filter_ptr);
+ return ret;
+}
+
+void
+ice_free_switch_filter_rule(void *rule)
+{
+ struct ice_rule_query_data *filter_ptr;
+
+ filter_ptr = (struct ice_rule_query_data *)rule;
+
+ rte_free(filter_ptr);
+}
diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h
new file mode 100644
index 0000000..cea4799
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_SWITCH_FILTER_H_
+#define _ICE_SWITCH_FILTER_H_
+
+#include "base/ice_switch.h"
+#include "base/ice_type.h"
+#include "ice_ethdev.h"
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+void
+ice_free_switch_filter_rule(void *rule);
+#endif /* _ICE_SWITCH_FILTER_H_ */
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 2bec688..8697676 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -6,7 +6,8 @@ objs = [base_objs]
sources = files(
'ice_ethdev.c',
- 'ice_rxtx.c'
+ 'ice_rxtx.c',
+ 'ice_switch_filter.c'
)
deps += ['hash']
--
2.9.5
next prev parent reply other threads:[~2019-06-21 6:17 UTC|newest]
Thread overview: 73+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-06-03 9:05 [dpdk-dev] [PATCH 0/2] Enable rte_flow API in ice driver Qiming Yang
2019-06-03 9:05 ` [dpdk-dev] [PATCH 1/2] net/ice: enable switch filter Qiming Yang
2019-06-03 17:07 ` Aaron Conole
2019-06-04 2:02 ` Zhao1, Wei
2019-06-03 9:05 ` [dpdk-dev] [PATCH 2/2] net/ice: add generic flow API Qiming Yang
2019-06-12 7:50 ` [dpdk-dev] [PATCH v2 0/3] Enable rte_flow API in ice driver Qiming Yang
2019-06-12 7:50 ` [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter Qiming Yang
2019-06-13 8:23 ` Wang, Xiao W
2019-06-14 9:46 ` Zhao1, Wei
2019-06-17 8:28 ` Wang, Xiao W
2019-06-18 1:57 ` Zhao1, Wei
2019-06-17 5:27 ` Xing, Beilei
2019-06-17 8:23 ` Zhao1, Wei
2019-06-17 8:51 ` Zhao1, Wei
2019-06-18 1:50 ` Xing, Beilei
2019-06-18 9:40 ` Ye Xiaolong
2019-06-19 3:06 ` Zhao1, Wei
2019-06-12 7:50 ` [dpdk-dev] [PATCH v2 2/3] net/ice: add generic flow API Qiming Yang
2019-06-17 5:50 ` Xing, Beilei
2019-06-17 6:02 ` Xing, Beilei
2019-06-17 9:19 ` Wang, Xiao W
2019-06-12 7:50 ` [dpdk-dev] [PATCH v2 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-20 5:34 ` [dpdk-dev] [PATCH v3 0/3] Enable rte_flow API in ice driver Qiming Yang
2019-06-20 5:34 ` [dpdk-dev] [PATCH v3 1/3] net/ice: enable switch filter Qiming Yang
2019-06-20 9:01 ` Wang, Xiao W
2019-06-20 9:12 ` Zhao1, Wei
2019-06-20 5:34 ` [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API Qiming Yang
2019-06-20 9:32 ` Wang, Xiao W
2019-06-21 5:47 ` Yang, Qiming
2019-06-20 10:21 ` Wang, Xiao W
2019-06-20 13:33 ` Aaron Conole
2019-06-21 2:18 ` Yang, Qiming
2019-06-20 5:34 ` [dpdk-dev] [PATCH v3 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-21 6:13 ` [dpdk-dev] [PATCH v4 0/3] Enable rte_flow API in ice driver Qiming Yang
2019-06-21 6:13 ` Qiming Yang [this message]
2019-06-21 6:13 ` [dpdk-dev] [PATCH v4 2/3] net/ice: add generic flow API Qiming Yang
2019-06-21 6:13 ` [dpdk-dev] [PATCH v4 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-21 9:21 ` [dpdk-dev] [PATCH v5 0/3] Enable rte_flow API in ice driver Qiming Yang
2019-06-21 9:21 ` [dpdk-dev] [PATCH v5 1/3] net/ice: enable switch filter Qiming Yang
2019-06-21 9:21 ` [dpdk-dev] [PATCH v5 2/3] net/ice: add generic flow API Qiming Yang
2019-06-21 9:21 ` [dpdk-dev] [PATCH v5 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-21 14:46 ` [dpdk-dev] [PATCH v5 0/3] Enable rte_flow API in ice driver Aaron Conole
2019-06-24 6:15 ` [dpdk-dev] [PATCH v6 " Qiming Yang
2019-06-24 6:15 ` [dpdk-dev] [PATCH v6 1/3] net/ice: enable switch filter Qiming Yang
2019-06-24 6:15 ` [dpdk-dev] [PATCH v6 2/3] net/ice: add generic flow API Qiming Yang
2019-06-24 6:15 ` [dpdk-dev] [PATCH v6 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-25 6:48 ` [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver Qiming Yang
2019-06-25 6:48 ` [dpdk-dev] [PATCH v7 1/3] net/ice: enable switch filter Qiming Yang
2019-06-25 6:48 ` [dpdk-dev] [PATCH v7 2/3] net/ice: add generic flow API Qiming Yang
2019-06-25 6:48 ` [dpdk-dev] [PATCH v7 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-26 7:07 ` Xing, Beilei
2019-06-25 14:58 ` [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver Aaron Conole
2019-06-26 1:52 ` Yang, Qiming
2019-06-26 7:42 ` Ferruh Yigit
2019-06-26 8:26 ` Yang, Qiming
2019-06-26 15:52 ` Ye Xiaolong
2019-06-26 8:03 ` [dpdk-dev] [PATCH v8 " Qiming Yang
2019-06-26 8:03 ` [dpdk-dev] [PATCH v8 1/3] net/ice: enable switch filter Qiming Yang
2019-06-26 8:03 ` [dpdk-dev] [PATCH v8 2/3] net/ice: add generic flow API Qiming Yang
2019-06-26 8:03 ` [dpdk-dev] [PATCH v8 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-26 8:58 ` [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver Qiming Yang
2019-06-26 8:58 ` [dpdk-dev] [PATCH v8 1/4] net/ice: enable switch filter Qiming Yang
2019-06-26 8:58 ` [dpdk-dev] [PATCH v8 2/4] net/ice: add generic flow API Qiming Yang
2019-06-26 8:58 ` [dpdk-dev] [PATCH v8 3/4] net/ice: add UDP tunnel port support Qiming Yang
2019-06-26 8:58 ` [dpdk-dev] [PATCH v8 4/4] doc: add release note for generic flow Qiming Yang
2019-06-26 21:27 ` Thomas Monjalon
2019-06-27 2:04 ` Yang, Qiming
2019-06-26 13:25 ` [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver Xing, Beilei
2019-07-01 8:32 ` [dpdk-dev] [PATCH v9 0/3] " Qiming Yang
2019-07-01 8:32 ` [dpdk-dev] [PATCH v9 1/3] net/ice: enable switch filter Qiming Yang
2019-07-01 8:32 ` [dpdk-dev] [PATCH v9 2/3] net/ice: add generic flow API Qiming Yang
2019-07-01 8:32 ` [dpdk-dev] [PATCH v9 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-07-01 11:38 ` [dpdk-dev] [PATCH v9 0/3] Enable rte_flow API in ice driver Zhang, Qi Z
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190621061355.192659-2-qiming.yang@intel.com \
--to=qiming.yang@intel.com \
--cc=dev@dpdk.org \
--cc=wei.zhao1@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).