DPDK patches and discussions
 help / color / mirror / Atom feed
From: <wanry@3snic.com>
To: <dev@dpdk.org>
Cc: <ferruh.yigit@amd.com>, Renyong Wan <wanry@3snic.com>,
	Steven Song <steven.song@3snic.com>
Subject: [PATCH v5 31/32] net/sssnic: add generic flow ops
Date: Mon, 4 Sep 2023 12:56:57 +0800	[thread overview]
Message-ID: <20230904045658.238185-32-wanry@3snic.com> (raw)
In-Reply-To: <20230904045658.238185-1-wanry@3snic.com>

From: Renyong Wan <wanry@3snic.com>

Signed-off-by: Steven Song <steven.song@3snic.com>
Signed-off-by: Renyong Wan <wanry@3snic.com>
---
v2:
* Fixed 'mask->hdr.src_addr' will always evaluate to 'true'.
* Removed error.h from including files.
---
 doc/guides/nics/features/sssnic.ini     |   12 +
 drivers/net/sssnic/base/sssnic_api.c    |  264 ++++++
 drivers/net/sssnic/base/sssnic_api.h    |   22 +
 drivers/net/sssnic/base/sssnic_cmd.h    |   71 ++
 drivers/net/sssnic/base/sssnic_hw.h     |    3 +
 drivers/net/sssnic/base/sssnic_misc.h   |    7 +
 drivers/net/sssnic/meson.build          |    2 +
 drivers/net/sssnic/sssnic_ethdev.c      |   12 +
 drivers/net/sssnic/sssnic_ethdev.h      |    1 +
 drivers/net/sssnic/sssnic_ethdev_fdir.c | 1017 +++++++++++++++++++++++
 drivers/net/sssnic/sssnic_ethdev_fdir.h |  332 ++++++++
 drivers/net/sssnic/sssnic_ethdev_flow.c |  981 ++++++++++++++++++++++
 drivers/net/sssnic/sssnic_ethdev_flow.h |   11 +
 drivers/net/sssnic/sssnic_ethdev_rx.c   |   18 +
 14 files changed, 2753 insertions(+)
 create mode 100644 drivers/net/sssnic/sssnic_ethdev_fdir.c
 create mode 100644 drivers/net/sssnic/sssnic_ethdev_fdir.h
 create mode 100644 drivers/net/sssnic/sssnic_ethdev_flow.c
 create mode 100644 drivers/net/sssnic/sssnic_ethdev_flow.h

diff --git a/doc/guides/nics/features/sssnic.ini b/doc/guides/nics/features/sssnic.ini
index f5738ac934..57e7440d86 100644
--- a/doc/guides/nics/features/sssnic.ini
+++ b/doc/guides/nics/features/sssnic.ini
@@ -33,3 +33,15 @@ FW version           = Y
 Linux                = Y
 ARMv8                = Y
 x86-64               = Y
+
+[rte_flow items]
+any                  = Y
+eth                  = Y
+ipv4                 = Y
+ipv6                 = Y
+tcp                  = Y
+udp                  = Y
+vxlan                = Y
+
+[rte_flow actions]
+queue                = Y
diff --git a/drivers/net/sssnic/base/sssnic_api.c b/drivers/net/sssnic/base/sssnic_api.c
index 68c16c9c1e..0e965442fd 100644
--- a/drivers/net/sssnic/base/sssnic_api.c
+++ b/drivers/net/sssnic/base/sssnic_api.c
@@ -1635,3 +1635,267 @@ sssnic_vlan_filter_set(struct sssnic_hw *hw, uint16_t vid, bool add)
 
 	return 0;
 }
+
+int
+sssnic_tcam_enable_set(struct sssnic_hw *hw, bool enabled)
+{
+	struct sssnic_tcam_enable_set_cmd cmd;
+	struct sssnic_msg msg;
+	uint32_t cmd_len;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd_len = sizeof(cmd);
+	cmd.function = SSSNIC_FUNC_IDX(hw);
+	cmd.enabled = enabled ? 1 : 0;
+
+	sssnic_msg_init(&msg, (uint8_t *)&cmd, cmd_len,
+		SSSNIC_SET_TCAM_ENABLE_CMD, SSSNIC_MPU_FUNC_IDX,
+		SSSNIC_LAN_MODULE, SSSNIC_MSG_TYPE_REQ);
+	ret = sssnic_mbox_send(hw, &msg, (uint8_t *)&cmd, &cmd_len, 0);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to send mbox message, ret=%d", ret);
+		return ret;
+	}
+
+	if (cmd_len == 0 || cmd.common.status != 0) {
+		if (cmd.common.status == SSSNIC_TCAM_CMD_STATUS_UNSUPPORTED)
+			PMD_DRV_LOG(WARNING,
+				"SSSNIC_SET_TCAM_ENABLED_CMD is unsupported");
+		else
+			PMD_DRV_LOG(ERR,
+				"Bad response to SSSNIC_SET_TCAM_ENABLE_CMD, len=%u, status=%u",
+				cmd_len, cmd.common.status);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+int
+sssnic_tcam_flush(struct sssnic_hw *hw)
+{
+	struct sssnic_tcam_flush_cmd cmd;
+	struct sssnic_msg msg;
+	uint32_t cmd_len;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd_len = sizeof(cmd);
+	cmd.function = SSSNIC_FUNC_IDX(hw);
+
+	sssnic_msg_init(&msg, (uint8_t *)&cmd, cmd_len, SSSNIC_FLUSH_TCAM_CMD,
+		SSSNIC_MPU_FUNC_IDX, SSSNIC_LAN_MODULE, SSSNIC_MSG_TYPE_REQ);
+	ret = sssnic_mbox_send(hw, &msg, (uint8_t *)&cmd, &cmd_len, 0);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to send mbox message, ret=%d", ret);
+		return ret;
+	}
+
+	if (cmd_len == 0 || cmd.common.status != 0) {
+		if (cmd.common.status == SSSNIC_TCAM_CMD_STATUS_UNSUPPORTED)
+			PMD_DRV_LOG(WARNING,
+				"SSSNIC_FLUSH_TCAM_CMD is unsupported");
+		else
+			PMD_DRV_LOG(ERR,
+				"Bad response to SSSNIC_FLUSH_TCAM_CMD, len=%u, status=%u",
+				cmd_len, cmd.common.status);
+		return -EIO;
+	}
+	return 0;
+}
+
+int
+sssnic_tcam_disable_and_flush(struct sssnic_hw *hw)
+{
+	int ret;
+
+	ret = sssnic_tcam_enable_set(hw, 0);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Could not disable TCAM");
+		return ret;
+	}
+
+	ret = sssnic_tcam_flush(hw);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Could not flush TCAM");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+sssnic_tcam_block_cfg(struct sssnic_hw *hw, uint8_t flag, uint16_t *block_idx)
+{
+	struct sssnic_tcam_block_cfg_cmd cmd;
+	struct sssnic_msg msg;
+	uint32_t cmd_len;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd_len = sizeof(cmd);
+	cmd.function = SSSNIC_FUNC_IDX(hw);
+	cmd.flag = flag;
+	if (flag == SSSNIC_TCAM_BLOCK_CFG_CMD_FLAG_FREE)
+		cmd.idx = *block_idx;
+
+	sssnic_msg_init(&msg, (uint8_t *)&cmd, cmd_len,
+		SSSNIC_TCAM_CFG_BLOCK_CMD, SSSNIC_MPU_FUNC_IDX,
+		SSSNIC_LAN_MODULE, SSSNIC_MSG_TYPE_REQ);
+	ret = sssnic_mbox_send(hw, &msg, (uint8_t *)&cmd, &cmd_len, 0);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to send mbox message, ret=%d", ret);
+		return ret;
+	}
+
+	if (cmd_len == 0 || cmd.common.status != 0) {
+		if (cmd.common.status == SSSNIC_TCAM_CMD_STATUS_UNSUPPORTED)
+			PMD_DRV_LOG(WARNING,
+				"SSSNIC_CFG_TCAM_BLOCK_CMD is unsupported");
+		else
+			PMD_DRV_LOG(ERR,
+				"Bad response to SSSNIC_CFG_TCAM_BLOCK_CMD, len=%u, status=%u",
+				cmd_len, cmd.common.status);
+		return -EIO;
+	}
+
+	if (flag == SSSNIC_TCAM_BLOCK_CFG_CMD_FLAG_ALLOC)
+		*block_idx = cmd.idx;
+
+	return 0;
+}
+
+int
+sssnic_tcam_block_alloc(struct sssnic_hw *hw, uint16_t *block_idx)
+{
+	if (block_idx == NULL)
+		return -EINVAL;
+
+	return sssnic_tcam_block_cfg(hw, SSSNIC_TCAM_BLOCK_CFG_CMD_FLAG_ALLOC,
+		block_idx);
+}
+
+int
+sssnic_tcam_block_free(struct sssnic_hw *hw, uint16_t block_idx)
+{
+	return sssnic_tcam_block_cfg(hw, SSSNIC_TCAM_BLOCK_CFG_CMD_FLAG_FREE,
+		&block_idx);
+}
+
+int
+sssnic_tcam_packet_type_filter_set(struct sssnic_hw *hw, uint8_t ptype,
+	uint16_t qid, bool enabled)
+{
+	struct sssnic_tcam_ptype_filter_set_cmd cmd;
+	struct sssnic_msg msg;
+	uint32_t cmd_len;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd_len = sizeof(cmd);
+	cmd.function = SSSNIC_FUNC_IDX(hw);
+	cmd.ptype = ptype;
+	cmd.qid = qid;
+	cmd.enable = enabled ? 1 : 0;
+
+	sssnic_msg_init(&msg, (uint8_t *)&cmd, cmd_len,
+		SSSNIC_TCAM_SET_PTYPE_FILTER_CMD, SSSNIC_MPU_FUNC_IDX,
+		SSSNIC_LAN_MODULE, SSSNIC_MSG_TYPE_REQ);
+	ret = sssnic_mbox_send(hw, &msg, (uint8_t *)&cmd, &cmd_len, 0);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to send mbox message, ret=%d", ret);
+		return ret;
+	}
+
+	if (cmd_len == 0 || cmd.common.status != 0) {
+		if (cmd.common.status == SSSNIC_TCAM_CMD_STATUS_UNSUPPORTED)
+			PMD_DRV_LOG(WARNING,
+				"SSSNIC_TCAM_SET_PTYPE_FILTER_CMD is unsupported");
+		else
+			PMD_DRV_LOG(ERR,
+				"Bad response to SSSNIC_TCAM_SET_PTYPE_FILTER_CMD, len=%u, status=%u",
+				cmd_len, cmd.common.status);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+int
+sssnic_tcam_entry_add(struct sssnic_hw *hw, struct sssnic_tcam_entry *entry)
+{
+	struct sssnic_tcam_entry_add_cmd cmd;
+	struct sssnic_msg msg;
+	uint32_t cmd_len;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd_len = sizeof(cmd);
+	cmd.function = SSSNIC_FUNC_IDX(hw);
+	rte_memcpy(&cmd.data, entry, sizeof(cmd.data));
+
+	if (entry->index >= SSSNIC_TCAM_MAX_ENTRY_NUM) {
+		PMD_DRV_LOG(ERR, "Invalid TCAM entry index: %u", entry->index);
+		return -EINVAL;
+	}
+
+	sssnic_msg_init(&msg, (uint8_t *)&cmd, cmd_len,
+		SSSNIC_ADD_TCAM_ENTRY_CMD, SSSNIC_MPU_FUNC_IDX,
+		SSSNIC_LAN_MODULE, SSSNIC_MSG_TYPE_REQ);
+	ret = sssnic_mbox_send(hw, &msg, (uint8_t *)&cmd, &cmd_len, 0);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to send mbox message, ret=%d", ret);
+		return ret;
+	}
+
+	if (cmd_len == 0 || cmd.common.status != 0) {
+		if (cmd.common.status == SSSNIC_TCAM_CMD_STATUS_UNSUPPORTED)
+			PMD_DRV_LOG(WARNING,
+				"SSSNIC_ADD_TCAM_ENTRY_CMD is unsupported");
+		else
+			PMD_DRV_LOG(ERR,
+				"Bad response to SSSNIC_ADD_TCAM_ENTRY_CMD, len=%u, status=%u",
+				cmd_len, cmd.common.status);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+int
+sssnic_tcam_entry_del(struct sssnic_hw *hw, uint32_t entry_idx)
+{
+	struct sssnic_tcam_entry_del_cmd cmd;
+	struct sssnic_msg msg;
+	uint32_t cmd_len;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd_len = sizeof(cmd);
+	cmd.function = SSSNIC_FUNC_IDX(hw);
+	cmd.start = entry_idx;
+	cmd.num = 1;
+
+	sssnic_msg_init(&msg, (uint8_t *)&cmd, cmd_len,
+		SSSNIC_DEL_TCAM_ENTRY_CMD, SSSNIC_MPU_FUNC_IDX,
+		SSSNIC_LAN_MODULE, SSSNIC_MSG_TYPE_REQ);
+	ret = sssnic_mbox_send(hw, &msg, (uint8_t *)&cmd, &cmd_len, 0);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to send mbox message, ret=%d", ret);
+		return ret;
+	}
+
+	if (cmd_len == 0 || cmd.common.status != 0) {
+		if (cmd.common.status == SSSNIC_TCAM_CMD_STATUS_UNSUPPORTED)
+			PMD_DRV_LOG(WARNING,
+				"SSSNIC_ADD_TCAM_ENTRY_CMD is unsupported");
+		else
+			PMD_DRV_LOG(ERR,
+				"Bad response to SSSNIC_ADD_TCAM_ENTRY_CMD, len=%u, status=%u",
+				cmd_len, cmd.common.status);
+		return -EIO;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/sssnic/base/sssnic_api.h b/drivers/net/sssnic/base/sssnic_api.h
index 28b235dda2..7a02ec61ee 100644
--- a/drivers/net/sssnic/base/sssnic_api.h
+++ b/drivers/net/sssnic/base/sssnic_api.h
@@ -409,6 +409,18 @@ struct sssnic_fw_version {
 	char time[SSSNIC_FW_VERSION_LEN];
 };
 
+struct sssnic_tcam_entry {
+	uint32_t index;
+	struct {
+		uint32_t qid;
+		uint32_t resvd;
+	} result;
+	struct {
+		uint8_t data0[SSSNIC_TCAM_KEY_SIZE];
+		uint8_t data1[SSSNIC_TCAM_KEY_SIZE];
+	} key;
+};
+
 int sssnic_msix_attr_get(struct sssnic_hw *hw, uint16_t msix_idx,
 	struct sssnic_msix_attr *attr);
 int sssnic_msix_attr_set(struct sssnic_hw *hw, uint16_t msix_idx,
@@ -470,5 +482,15 @@ int sssnic_flow_ctrl_set(struct sssnic_hw *hw, bool autoneg, bool rx_en,
 int sssnic_flow_ctrl_get(struct sssnic_hw *hw, bool *autoneg, bool *rx_en,
 	bool *tx_en);
 int sssnic_vlan_filter_set(struct sssnic_hw *hw, uint16_t vid, bool add);
+int sssnic_tcam_enable_set(struct sssnic_hw *hw, bool enabled);
+int sssnic_tcam_flush(struct sssnic_hw *hw);
+int sssnic_tcam_disable_and_flush(struct sssnic_hw *hw);
+int sssnic_tcam_block_alloc(struct sssnic_hw *hw, uint16_t *block_idx);
+int sssnic_tcam_block_free(struct sssnic_hw *hw, uint16_t block_idx);
+int sssnic_tcam_packet_type_filter_set(struct sssnic_hw *hw, uint8_t ptype,
+	uint16_t qid, bool enabled);
+int sssnic_tcam_entry_add(struct sssnic_hw *hw,
+	struct sssnic_tcam_entry *entry);
+int sssnic_tcam_entry_del(struct sssnic_hw *hw, uint32_t entry_idx);
 
 #endif /* _SSSNIC_API_H_ */
diff --git a/drivers/net/sssnic/base/sssnic_cmd.h b/drivers/net/sssnic/base/sssnic_cmd.h
index 3e70d0e223..c75cb0dad3 100644
--- a/drivers/net/sssnic/base/sssnic_cmd.h
+++ b/drivers/net/sssnic/base/sssnic_cmd.h
@@ -75,6 +75,16 @@ enum sssnic_rss_cmd_id {
 	SSSNIC_SET_RSS_TYPE_CMD = 65,
 };
 
+#define SSSNIC_TCAM_CMD_STATUS_UNSUPPORTED 0xff
+enum sssnic_tcam_cmd_id {
+	SSSNIC_ADD_TCAM_ENTRY_CMD = 80,
+	SSSNIC_DEL_TCAM_ENTRY_CMD = 81,
+	SSSNIC_FLUSH_TCAM_CMD = 83,
+	SSSNIC_TCAM_CFG_BLOCK_CMD = 84,
+	SSSNIC_SET_TCAM_ENABLE_CMD = 85,
+	SSSNIC_TCAM_SET_PTYPE_FILTER_CMD = 91,
+};
+
 struct sssnic_cmd_common {
 	uint8_t status;
 	uint8_t version;
@@ -434,4 +444,65 @@ struct sssnic_vlan_filter_set_cmd {
 	uint16_t resvd1;
 };
 
+struct sssnic_tcam_enable_set_cmd {
+	struct sssnic_cmd_common common;
+	uint16_t function;
+	uint8_t enabled;
+	uint8_t resvd[5];
+};
+
+struct sssnic_tcam_flush_cmd {
+	struct sssnic_cmd_common common;
+	uint16_t function;
+	uint16_t resvd;
+};
+
+#define SSSNIC_TCAM_BLOCK_CFG_CMD_FLAG_ALLOC 1
+#define SSSNIC_TCAM_BLOCK_CFG_CMD_FLAG_FREE 0
+struct sssnic_tcam_block_cfg_cmd {
+	struct sssnic_cmd_common common;
+	uint16_t function;
+	uint8_t flag; /* SSSNIC_TCAM_BLOCK_CFG_CMD_FLAG_XX */
+	uint8_t type;
+	uint16_t idx;
+	uint16_t resvd;
+};
+
+struct sssnic_tcam_ptype_filter_set_cmd {
+	struct sssnic_cmd_common common;
+	uint16_t function;
+	uint16_t resvd0;
+	uint8_t enable;
+	uint8_t ptype;
+	uint8_t qid;
+	uint8_t resvd1;
+};
+
+struct sssnic_tcam_entry_add_cmd {
+	struct sssnic_cmd_common common;
+	uint16_t function;
+	uint8_t type;
+	uint8_t resv;
+	struct {
+		uint32_t index;
+		struct {
+			uint32_t qid;
+			uint32_t resvd;
+		} result;
+		struct {
+			uint8_t d0[SSSNIC_TCAM_KEY_SIZE];
+			uint8_t d1[SSSNIC_TCAM_KEY_SIZE];
+		} key;
+	} data;
+};
+
+struct sssnic_tcam_entry_del_cmd {
+	struct sssnic_cmd_common common;
+	uint16_t function;
+	uint8_t type;
+	uint8_t resv;
+	uint32_t start; /* start index of entry to be deleted */
+	uint32_t num; /* number of entries to be deleted */
+};
+
 #endif /* _SSSNIC_CMD_H_ */
diff --git a/drivers/net/sssnic/base/sssnic_hw.h b/drivers/net/sssnic/base/sssnic_hw.h
index 4820212543..6a2d980d5a 100644
--- a/drivers/net/sssnic/base/sssnic_hw.h
+++ b/drivers/net/sssnic/base/sssnic_hw.h
@@ -96,6 +96,9 @@ enum sssnic_module {
 	SSSNIC_NETIF_MODULE = 14,
 };
 
+#define SSSNIC_TCAM_KEY_SIZE 44
+#define SSSNIC_TCAM_MAX_ENTRY_NUM 4096
+
 int sssnic_hw_init(struct sssnic_hw *hw);
 void sssnic_hw_shutdown(struct sssnic_hw *hw);
 void sssnic_msix_state_set(struct sssnic_hw *hw, uint16_t msix_id, int state);
diff --git a/drivers/net/sssnic/base/sssnic_misc.h b/drivers/net/sssnic/base/sssnic_misc.h
index e30691caef..a1e268710e 100644
--- a/drivers/net/sssnic/base/sssnic_misc.h
+++ b/drivers/net/sssnic/base/sssnic_misc.h
@@ -42,4 +42,11 @@ sssnic_mem_be_to_cpu_32(void *in, void *out, int size)
 	}
 }
 
+static inline bool
+sssnic_is_zero_ipv6_addr(const void *ipv6_addr)
+{
+	const uint64_t *ddw = ipv6_addr;
+	return ddw[0] == 0 && ddw[1] == 0;
+}
+
 #endif /* _SSSNIC_MISC_H_ */
diff --git a/drivers/net/sssnic/meson.build b/drivers/net/sssnic/meson.build
index 3541b75c30..03d60f08ec 100644
--- a/drivers/net/sssnic/meson.build
+++ b/drivers/net/sssnic/meson.build
@@ -23,4 +23,6 @@ sources = files(
         'sssnic_ethdev_tx.c',
         'sssnic_ethdev_stats.c',
         'sssnic_ethdev_rss.c',
+        'sssnic_ethdev_fdir.c',
+        'sssnic_ethdev_flow.c',
 )
diff --git a/drivers/net/sssnic/sssnic_ethdev.c b/drivers/net/sssnic/sssnic_ethdev.c
index 8a1ccff70b..545833fb55 100644
--- a/drivers/net/sssnic/sssnic_ethdev.c
+++ b/drivers/net/sssnic/sssnic_ethdev.c
@@ -14,6 +14,8 @@
 #include "sssnic_ethdev_tx.h"
 #include "sssnic_ethdev_stats.h"
 #include "sssnic_ethdev_rss.h"
+#include "sssnic_ethdev_fdir.h"
+#include "sssnic_ethdev_flow.h"
 
 static int sssnic_ethdev_init(struct rte_eth_dev *ethdev);
 static void sssnic_ethdev_vlan_filter_clean(struct rte_eth_dev *ethdev);
@@ -345,6 +347,7 @@ sssnic_ethdev_release(struct rte_eth_dev *ethdev)
 	sssnic_ethdev_link_intr_disable(ethdev);
 	sssnic_ethdev_tx_queue_all_release(ethdev);
 	sssnic_ethdev_rx_queue_all_release(ethdev);
+	sssnic_ethdev_fdir_shutdown(ethdev);
 	sssnic_ethdev_mac_addrs_clean(ethdev);
 	sssnic_hw_shutdown(hw);
 	rte_free(hw);
@@ -951,6 +954,7 @@ static const struct eth_dev_ops sssnic_ethdev_ops = {
 	.flow_ctrl_get = sssnic_ethdev_flow_ctrl_get,
 	.vlan_offload_set = sssnic_ethdev_vlan_offload_set,
 	.vlan_filter_set = sssnic_ethdev_vlan_filter_set,
+	.flow_ops_get = sssnic_ethdev_flow_ops_get,
 };
 
 static int
@@ -991,6 +995,12 @@ sssnic_ethdev_init(struct rte_eth_dev *ethdev)
 		goto mac_addrs_init_fail;
 	}
 
+	ret = sssnic_ethdev_fdir_init(ethdev);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to initialize fdir info");
+		goto fdir_init_fail;
+	}
+
 	netdev->max_num_rxq = SSSNIC_MAX_NUM_RXQ(hw);
 	netdev->max_num_txq = SSSNIC_MAX_NUM_TXQ(hw);
 
@@ -1001,6 +1011,8 @@ sssnic_ethdev_init(struct rte_eth_dev *ethdev)
 
 	return 0;
 
+fdir_init_fail:
+	sssnic_ethdev_mac_addrs_clean(ethdev);
 mac_addrs_init_fail:
 	sssnic_hw_shutdown(0);
 	return ret;
diff --git a/drivers/net/sssnic/sssnic_ethdev.h b/drivers/net/sssnic/sssnic_ethdev.h
index f19b2bd88f..0ca933b53b 100644
--- a/drivers/net/sssnic/sssnic_ethdev.h
+++ b/drivers/net/sssnic/sssnic_ethdev.h
@@ -82,6 +82,7 @@ struct sssnic_netdev {
 	void *hw;
 	struct rte_ether_addr *mcast_addrs;
 	struct rte_ether_addr default_addr;
+	struct sssnic_ethdev_fdir_info *fdir_info;
 	uint16_t max_num_txq;
 	uint16_t max_num_rxq;
 	uint16_t num_started_rxqs;
diff --git a/drivers/net/sssnic/sssnic_ethdev_fdir.c b/drivers/net/sssnic/sssnic_ethdev_fdir.c
new file mode 100644
index 0000000000..cec9fb219f
--- /dev/null
+++ b/drivers/net/sssnic/sssnic_ethdev_fdir.c
@@ -0,0 +1,1017 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2022 Shenzhen 3SNIC Information Technology Co., Ltd.
+ */
+
+#include <rte_common.h>
+#include <rte_tailq.h>
+#include <ethdev_pci.h>
+
+#include "sssnic_log.h"
+#include "sssnic_ethdev.h"
+#include "sssnic_ethdev_fdir.h"
+#include "base/sssnic_hw.h"
+#include "base/sssnic_api.h"
+
+#define SSSNIC_NETDEV_FDIR_INFO(netdev) ((netdev)->fdir_info)
+#define SSSNIC_ETHDEV_FDIR_INFO(ethdev)                                        \
+	(SSSNIC_NETDEV_FDIR_INFO(SSSNIC_ETHDEV_PRIVATE(ethdev)))
+
+enum {
+	SSSNIC_ETHDEV_PTYPE_INVAL = 0,
+	SSSNIC_ETHDEV_PTYPE_ARP = 1,
+	SSSNIC_ETHDEV_PTYPE_ARP_REQ = 2,
+	SSSNIC_ETHDEV_PTYPE_ARP_REP = 3,
+	SSSNIC_ETHDEV_PTYPE_RARP = 4,
+	SSSNIC_ETHDEV_PTYPE_LACP = 5,
+	SSSNIC_ETHDEV_PTYPE_LLDP = 6,
+	SSSNIC_ETHDEV_PTYPE_OAM = 7,
+	SSSNIC_ETHDEV_PTYPE_CDCP = 8,
+	SSSNIC_ETHDEV_PTYPE_CNM = 9,
+	SSSNIC_ETHDEV_PTYPE_ECP = 10,
+};
+
+#define SSSNIC_ETHDEV_TCAM_ENTRY_INVAL_IDX 0xffff
+struct sssnic_ethdev_fdir_entry {
+	TAILQ_ENTRY(sssnic_ethdev_fdir_entry) node;
+	struct sssnic_ethdev_tcam_block *tcam_block;
+	uint32_t tcam_entry_idx;
+	int enabled;
+	struct sssnic_ethdev_fdir_rule *rule;
+};
+
+#define SSSNIC_ETHDEV_TCAM_BLOCK_SZ 16
+struct sssnic_ethdev_tcam_block {
+	TAILQ_ENTRY(sssnic_ethdev_tcam_block) node;
+	uint16_t id;
+	uint16_t used_entries;
+	uint8_t entries_status[SSSNIC_ETHDEV_TCAM_BLOCK_SZ]; /* 0: IDLE, 1: USED */
+};
+
+struct sssnic_ethdev_tcam {
+	TAILQ_HEAD(, sssnic_ethdev_tcam_block) block_list;
+	uint16_t num_blocks;
+	uint16_t used_entries; /* Count of used entries */
+	int enabled;
+};
+
+struct sssnic_ethdev_fdir_info {
+	struct rte_eth_dev *ethdev;
+	struct sssnic_ethdev_tcam tcam;
+	uint32_t num_entries;
+	TAILQ_HEAD(, sssnic_ethdev_fdir_entry) ethertype_entry_list;
+	TAILQ_HEAD(, sssnic_ethdev_fdir_entry) flow_entry_list;
+};
+
+static int
+sssnic_ethdev_tcam_init(struct rte_eth_dev *ethdev)
+{
+	struct sssnic_hw *hw = SSSNIC_ETHDEV_TO_HW(ethdev);
+	struct sssnic_ethdev_fdir_info *fdir_info;
+
+	fdir_info = SSSNIC_ETHDEV_FDIR_INFO(ethdev);
+	TAILQ_INIT(&fdir_info->tcam.block_list);
+
+	sssnic_tcam_disable_and_flush(hw);
+
+	return 0;
+}
+
+static void
+sssnic_ethdev_tcam_shutdown(struct rte_eth_dev *ethdev)
+{
+	struct sssnic_ethdev_fdir_info *fdir_info;
+	struct sssnic_ethdev_tcam *tcam;
+	struct sssnic_ethdev_tcam_block *block, *tmp;
+
+	fdir_info = SSSNIC_ETHDEV_FDIR_INFO(ethdev);
+	tcam = &fdir_info->tcam;
+
+	RTE_TAILQ_FOREACH_SAFE(block, &tcam->block_list, node, tmp)
+	{
+		TAILQ_REMOVE(&tcam->block_list, block, node);
+		rte_free(block);
+	}
+}
+
+static int
+sssnic_ethdev_tcam_enable(struct rte_eth_dev *ethdev)
+{
+	struct sssnic_ethdev_fdir_info *fdir_info;
+	struct sssnic_hw *hw = SSSNIC_ETHDEV_TO_HW(ethdev);
+	int ret;
+
+	fdir_info = SSSNIC_ETHDEV_FDIR_INFO(ethdev);
+
+	if (!fdir_info->tcam.enabled) {
+		ret = sssnic_tcam_enable_set(hw, 1);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR, "Failed to enable TCAM");
+			return ret;
+		}
+
+		fdir_info->tcam.enabled = 1;
+	}
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_tcam_disable(struct rte_eth_dev *ethdev)
+{
+	struct sssnic_ethdev_fdir_info *fdir_info;
+	struct sssnic_hw *hw = SSSNIC_ETHDEV_TO_HW(ethdev);
+	int ret;
+
+	fdir_info = SSSNIC_ETHDEV_FDIR_INFO(ethdev);
+
+	if (fdir_info->tcam.enabled) {
+		ret = sssnic_tcam_enable_set(hw, 0);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR, "Failed to enable TCAM");
+			return ret;
+		}
+
+		fdir_info->tcam.enabled = 0;
+	}
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_tcam_block_alloc(struct rte_eth_dev *ethdev,
+	struct sssnic_ethdev_tcam_block **block)
+{
+	struct sssnic_hw *hw = SSSNIC_ETHDEV_TO_HW(ethdev);
+	struct sssnic_ethdev_fdir_info *fdir_info =
+		SSSNIC_ETHDEV_FDIR_INFO(ethdev);
+	struct sssnic_ethdev_tcam_block *new;
+	int ret;
+
+	new = rte_zmalloc("sssnic_tcam_block", sizeof(*new), 0);
+	if (new == NULL) {
+		PMD_DRV_LOG(ERR,
+			"Failed to allocate memory for tcam block struct!");
+		return -ENOMEM;
+	}
+
+	ret = sssnic_tcam_block_alloc(hw, &new->id);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to alloc tcam block!");
+		rte_free(new);
+		return ret;
+	}
+
+	TAILQ_INSERT_HEAD(&fdir_info->tcam.block_list, new, node);
+	fdir_info->tcam.num_blocks++;
+
+	if (block != NULL)
+		*block = new;
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_tcam_block_free(struct rte_eth_dev *ethdev,
+	struct sssnic_ethdev_tcam_block *block)
+{
+	struct sssnic_hw *hw = SSSNIC_ETHDEV_TO_HW(ethdev);
+	struct sssnic_ethdev_fdir_info *fdir_info =
+		SSSNIC_ETHDEV_FDIR_INFO(ethdev);
+	int ret;
+
+	ret = sssnic_tcam_block_free(hw, block->id);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to free tcam block:%u!", block->id);
+		return ret;
+	}
+
+	TAILQ_REMOVE(&fdir_info->tcam.block_list, block, node);
+	fdir_info->tcam.num_blocks--;
+	rte_free(block);
+
+	return 0;
+}
+
+static struct sssnic_ethdev_tcam_block *
+sssnic_ethdev_available_tcam_block_lookup(struct sssnic_ethdev_tcam *tcam)
+{
+	struct sssnic_ethdev_tcam_block *block;
+
+	TAILQ_FOREACH(block, &tcam->block_list, node)
+	{
+		if (block->used_entries < SSSNIC_ETHDEV_TCAM_BLOCK_SZ)
+			return block;
+	}
+
+	return NULL;
+}
+
+static int
+sssnic_ethdev_tcam_block_entry_alloc(struct sssnic_ethdev_tcam_block *block,
+	uint32_t *entry_idx)
+{
+	uint32_t i;
+
+	for (i = 0; i < SSSNIC_ETHDEV_TCAM_BLOCK_SZ; i++) {
+		if (block->entries_status[i] == 0) {
+			*entry_idx = i;
+			block->entries_status[i] = 1;
+			block->used_entries++;
+			return 0;
+		}
+	}
+
+	return -ENOMEM;
+}
+
+static int
+sssnic_ethdev_tcam_block_entry_free(struct sssnic_ethdev_tcam_block *block,
+	uint32_t entry_idx)
+{
+	if (block != NULL && entry_idx < SSSNIC_ETHDEV_TCAM_BLOCK_SZ) {
+		if (block->entries_status[entry_idx] == 1) {
+			block->entries_status[entry_idx] = 0;
+			block->used_entries--;
+			return 0; /* found and freed */
+		}
+	}
+	return -1; /* not found */
+}
+
+static int
+sssnic_ethdev_tcam_entry_alloc(struct rte_eth_dev *ethdev,
+	struct sssnic_ethdev_tcam_block **block, uint32_t *entry_idx)
+{
+	struct sssnic_ethdev_fdir_info *fdir_info =
+		SSSNIC_ETHDEV_FDIR_INFO(ethdev);
+	struct sssnic_ethdev_tcam *tcam;
+	struct sssnic_ethdev_tcam_block *tcam_block;
+	int new_block = 0;
+	uint32_t eid;
+	int ret;
+
+	tcam = &fdir_info->tcam;
+
+	if (tcam->num_blocks == 0 ||
+		tcam->used_entries >=
+			tcam->num_blocks * SSSNIC_ETHDEV_TCAM_BLOCK_SZ) {
+		ret = sssnic_ethdev_tcam_block_alloc(ethdev, &tcam_block);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR,
+				"No TCAM memory, used block count: %u, used entries count:%u",
+				tcam->num_blocks, tcam->used_entries);
+			return ret;
+		}
+		new_block = 1;
+	} else {
+		tcam_block = sssnic_ethdev_available_tcam_block_lookup(tcam);
+		if (tcam_block == NULL) {
+			PMD_DRV_LOG(CRIT,
+				"No available TCAM block, used block count:%u, used entries count:%u",
+				tcam->num_blocks, tcam->used_entries);
+			return -ENOMEM;
+		}
+	}
+
+	ret = sssnic_ethdev_tcam_block_entry_alloc(tcam_block, &eid);
+	if (ret != 0) {
+		PMD_DRV_LOG(CRIT,
+			"No available entry in TCAM block, block idx:%u, used entries:%u",
+			tcam_block->id, tcam_block->used_entries);
+		if (unlikely(new_block))
+			sssnic_ethdev_tcam_block_free(ethdev, tcam_block);
+
+		return -ENOMEM;
+	}
+
+	tcam->used_entries++;
+
+	*block = tcam_block;
+	*entry_idx = eid;
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_tcam_entry_free(struct rte_eth_dev *ethdev,
+	struct sssnic_ethdev_tcam_block *tcam_block, uint32_t entry_idx)
+{
+	int ret;
+	struct sssnic_ethdev_fdir_info *fdir_info =
+		SSSNIC_ETHDEV_FDIR_INFO(ethdev);
+	struct sssnic_ethdev_tcam *tcam;
+
+	tcam = &fdir_info->tcam;
+
+	ret = sssnic_ethdev_tcam_block_entry_free(tcam_block, entry_idx);
+	if (ret != 0)
+		return 0; /* not found was considered as success */
+
+	if (tcam_block->used_entries == 0) {
+		ret = sssnic_ethdev_tcam_block_free(ethdev, tcam_block);
+		if (ret != 0)
+			PMD_DRV_LOG(ERR, "Failed to free TCAM block:%u",
+				tcam_block->id);
+	}
+
+	tcam->used_entries--;
+	return 0;
+}
+
+static void
+sssnic_ethdev_tcam_entry_init(struct sssnic_ethdev_fdir_flow_match *flow,
+	struct sssnic_tcam_entry *entry)
+{
+	uint8_t i;
+	uint8_t *flow_key;
+	uint8_t *flow_mask;
+
+	flow_key = (uint8_t *)&flow->key;
+	flow_mask = (uint8_t *)&flow->mask;
+
+	for (i = 0; i < sizeof(entry->key.data0); i++) {
+		entry->key.data1[i] = flow_key[i] & flow_mask[i];
+		entry->key.data0[i] =
+			entry->key.data1[i] ^ flow_mask[i];
+	}
+}
+
+
+static struct sssnic_ethdev_fdir_entry *
+sssnic_ethdev_fdir_entry_lookup(struct sssnic_ethdev_fdir_info *fdir_info,
+	struct sssnic_ethdev_fdir_rule *rule)
+{
+	struct sssnic_ethdev_fdir_entry *e;
+	struct sssnic_ethdev_fdir_match *m;
+	struct sssnic_ethdev_fdir_match *match = &rule->match;
+
+	/* fast lookup */
+	if (rule->cookie != NULL)
+		return (struct sssnic_ethdev_fdir_entry *)rule->cookie;
+
+	if (rule->match.type == SSSNIC_ETHDEV_FDIR_MATCH_FLOW) {
+		TAILQ_FOREACH(e, &fdir_info->flow_entry_list, node)
+		{
+			m = &e->rule->match;
+			if (memcmp(&match->flow, &m->flow, sizeof(m->flow)) ==
+				0)
+				return e;
+		}
+	} else if (rule->match.type == SSSNIC_ETHDEV_FDIR_MATCH_ETHERTYPE) {
+		TAILQ_FOREACH(e, &fdir_info->ethertype_entry_list, node)
+		{
+			m = &e->rule->match;
+			if (match->ethertype.key.ether_type ==
+				m->ethertype.key.ether_type)
+				return e;
+		}
+	}
+
+	return NULL;
+}
+
+static inline void
+sssnic_ethdev_fdir_entry_add(struct sssnic_ethdev_fdir_info *fdir_info,
+	struct sssnic_ethdev_fdir_entry *entry)
+{
+	if (entry->rule->match.type == SSSNIC_ETHDEV_FDIR_MATCH_ETHERTYPE)
+		TAILQ_INSERT_TAIL(&fdir_info->ethertype_entry_list, entry,
+			node);
+	else
+		TAILQ_INSERT_TAIL(&fdir_info->flow_entry_list, entry, node);
+
+	fdir_info->num_entries++;
+}
+
+static inline void
+sssnic_ethdev_fdir_entry_del(struct sssnic_ethdev_fdir_info *fdir_info,
+	struct sssnic_ethdev_fdir_entry *entry)
+{
+	if (entry->rule->match.type == SSSNIC_ETHDEV_FDIR_MATCH_ETHERTYPE)
+		TAILQ_REMOVE(&fdir_info->ethertype_entry_list, entry, node);
+	else
+		TAILQ_REMOVE(&fdir_info->flow_entry_list, entry, node);
+
+	fdir_info->num_entries--;
+}
+
+static int
+sssnic_ethdev_fdir_arp_pkt_filter_set(struct rte_eth_dev *ethdev, uint16_t qid,
+	int enabled)
+{
+	struct sssnic_hw *hw = SSSNIC_ETHDEV_TO_HW(ethdev);
+	int ret;
+
+	ret = sssnic_tcam_packet_type_filter_set(hw, SSSNIC_ETHDEV_PTYPE_ARP,
+		qid, enabled);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to %s ARP packet filter!",
+			enabled ? "enable" : "disable");
+		return ret;
+	}
+
+	ret = sssnic_tcam_packet_type_filter_set(hw,
+		SSSNIC_ETHDEV_PTYPE_ARP_REQ, qid, enabled);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to %s ARP request packet filter!",
+			enabled ? "enable" : "disable");
+		goto set_arp_req_fail;
+	}
+
+	ret = sssnic_tcam_packet_type_filter_set(hw,
+		SSSNIC_ETHDEV_PTYPE_ARP_REP, qid, enabled);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to %s ARP reply packet filter!",
+			enabled ? "enable" : "disable");
+		goto set_arp_rep_fail;
+	}
+
+	return 0;
+
+set_arp_rep_fail:
+	sssnic_tcam_packet_type_filter_set(hw, SSSNIC_ETHDEV_PTYPE_ARP_REQ, qid,
+		!enabled);
+set_arp_req_fail:
+	sssnic_tcam_packet_type_filter_set(hw, SSSNIC_ETHDEV_PTYPE_ARP, qid,
+		!enabled);
+
+	return ret;
+}
+
+static int
+sssnic_ethdev_fdir_slow_pkt_filter_set(struct rte_eth_dev *ethdev, uint16_t qid,
+	int enabled)
+{
+	struct sssnic_hw *hw = SSSNIC_ETHDEV_TO_HW(ethdev);
+	int ret;
+
+	ret = sssnic_tcam_packet_type_filter_set(hw, SSSNIC_ETHDEV_PTYPE_LACP,
+		qid, enabled);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to %s LACP packet filter!",
+			enabled ? "enable" : "disable");
+		return ret;
+	}
+
+	ret = sssnic_tcam_packet_type_filter_set(hw, SSSNIC_ETHDEV_PTYPE_OAM,
+		qid, enabled);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to %s OAM packet filter!",
+			enabled ? "enable" : "disable");
+
+		sssnic_tcam_packet_type_filter_set(hw, SSSNIC_ETHDEV_PTYPE_LACP,
+			qid, !enabled);
+	}
+
+	return ret;
+}
+
+static int
+sssnic_ethdev_fdir_lldp_pkt_filter_set(struct rte_eth_dev *ethdev, uint16_t qid,
+	int enabled)
+{
+	struct sssnic_hw *hw = SSSNIC_ETHDEV_TO_HW(ethdev);
+	int ret;
+
+	ret = sssnic_tcam_packet_type_filter_set(hw, SSSNIC_ETHDEV_PTYPE_LLDP,
+		qid, enabled);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to %s LLDP packet filter!",
+			enabled ? "enable" : "disable");
+		return ret;
+	}
+
+	ret = sssnic_tcam_packet_type_filter_set(hw, SSSNIC_ETHDEV_PTYPE_CDCP,
+		qid, enabled);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to %s CDCP packet filter!",
+			enabled ? "enable" : "disable");
+
+		sssnic_tcam_packet_type_filter_set(hw, SSSNIC_ETHDEV_PTYPE_LLDP,
+			qid, !enabled);
+	}
+
+	return ret;
+}
+
+static int
+sssnic_ethdev_fdir_pkt_filter_set(struct rte_eth_dev *ethdev,
+	uint16_t ether_type, uint16_t qid, int enabled)
+{
+	int ret;
+	struct sssnic_hw *hw = SSSNIC_ETHDEV_TO_HW(ethdev);
+
+	switch (ether_type) {
+	case RTE_ETHER_TYPE_ARP:
+		ret = sssnic_ethdev_fdir_arp_pkt_filter_set(ethdev, qid,
+			enabled);
+		break;
+	case RTE_ETHER_TYPE_RARP:
+		ret = sssnic_tcam_packet_type_filter_set(hw,
+			SSSNIC_ETHDEV_PTYPE_RARP, qid, enabled);
+		break;
+	case RTE_ETHER_TYPE_SLOW:
+		ret = sssnic_ethdev_fdir_slow_pkt_filter_set(ethdev, qid,
+			enabled);
+		break;
+	case RTE_ETHER_TYPE_LLDP:
+		ret = sssnic_ethdev_fdir_lldp_pkt_filter_set(ethdev, qid,
+			enabled);
+		break;
+	case 0x22e7: /* CNM ether type */
+		ret = sssnic_tcam_packet_type_filter_set(hw,
+			SSSNIC_ETHDEV_PTYPE_CNM, qid, enabled);
+		break;
+	case 0x8940: /* ECP ether type */
+		ret = sssnic_tcam_packet_type_filter_set(hw,
+			SSSNIC_ETHDEV_PTYPE_ECP, qid, enabled);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Ethertype 0x%x is not supported to filter!",
+			ether_type);
+		return -EINVAL;
+	}
+
+	if (ret != 0)
+		PMD_DRV_LOG(ERR, "Failed to %s filter for ether type: %x.",
+			enabled ? "enable" : "disable", ether_type);
+
+	return ret;
+}
+
+static inline struct sssnic_ethdev_fdir_entry *
+sssnic_ethdev_fdir_entry_alloc(void)
+{
+	struct sssnic_ethdev_fdir_entry *e;
+
+	e = rte_zmalloc("sssnic_fdir_entry", sizeof(*e), 0);
+	if (e != NULL)
+		e->tcam_entry_idx = SSSNIC_ETHDEV_TCAM_ENTRY_INVAL_IDX;
+	else
+		PMD_DRV_LOG(ERR,
+			"Failed to allocate memory for fdir entry struct!");
+
+	return e;
+}
+
+static inline void
+sssnic_ethdev_fdir_entry_free(struct sssnic_ethdev_fdir_entry *e)
+{
+	if (e != NULL)
+		rte_free(e);
+}
+
+/* Apply fdir rule to HW */
+static int
+sssnic_ethdev_fdir_entry_enable(struct rte_eth_dev *ethdev,
+	struct sssnic_ethdev_fdir_entry *entry)
+{
+	struct sssnic_hw *hw = SSSNIC_ETHDEV_TO_HW(ethdev);
+	struct sssnic_tcam_entry tcam_entry;
+	int ret;
+
+	if (unlikely(entry->rule == NULL)) {
+		PMD_DRV_LOG(ERR, "fdir rule is null!");
+		return -EINVAL;
+	}
+
+	if (entry->enabled)
+		return 0;
+
+	if (entry->tcam_entry_idx != SSSNIC_ETHDEV_TCAM_ENTRY_INVAL_IDX) {
+		memset(&tcam_entry, 0, sizeof(tcam_entry));
+		sssnic_ethdev_tcam_entry_init(&entry->rule->match.flow,
+			&tcam_entry);
+		tcam_entry.result.qid = entry->rule->action.qid;
+		tcam_entry.index =
+			entry->tcam_entry_idx +
+			(entry->tcam_block->id * SSSNIC_ETHDEV_TCAM_BLOCK_SZ);
+
+		ret = sssnic_tcam_entry_add(hw, &tcam_entry);
+		if (ret != 0)
+			PMD_DRV_LOG(ERR,
+				"Failed to add TCAM entry, block:%u, entry:%u, tcam_entry:%u",
+				entry->tcam_block->id, entry->tcam_entry_idx,
+				tcam_entry.index);
+
+	} else {
+		ret = sssnic_ethdev_fdir_pkt_filter_set(ethdev,
+			entry->rule->match.ethertype.key.ether_type,
+			entry->rule->action.qid, 1);
+		if (ret != 0)
+			PMD_DRV_LOG(ERR, "Failed to enable ethertype(%x) filter",
+				entry->rule->match.ethertype.key.ether_type);
+	}
+
+	entry->enabled = 1;
+
+	return ret;
+}
+
+/* remove fdir rule from HW */
+static int
+sssnic_ethdev_fdir_entry_disable(struct rte_eth_dev *ethdev,
+	struct sssnic_ethdev_fdir_entry *entry)
+{
+	struct sssnic_hw *hw = SSSNIC_ETHDEV_TO_HW(ethdev);
+	uint32_t tcam_entry_idx;
+	int ret;
+
+	if (unlikely(entry->rule == NULL)) {
+		PMD_DRV_LOG(ERR, "fdir rule is null!");
+		return -EINVAL;
+	}
+
+	if (!entry->enabled)
+		return 0;
+
+	if (entry->tcam_entry_idx != SSSNIC_ETHDEV_TCAM_ENTRY_INVAL_IDX) {
+		tcam_entry_idx =
+			entry->tcam_entry_idx +
+			(entry->tcam_block->id * SSSNIC_ETHDEV_TCAM_BLOCK_SZ);
+
+		ret = sssnic_tcam_entry_del(hw, tcam_entry_idx);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR,
+				"Failed to del TCAM entry, block:%u, entry:%u",
+				entry->tcam_block->id, entry->tcam_entry_idx);
+			return ret;
+		}
+	} else {
+		ret = sssnic_ethdev_fdir_pkt_filter_set(ethdev,
+			entry->rule->match.ethertype.key.ether_type,
+			entry->rule->action.qid, 0);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR,
+				"Failed to disable ethertype(%x) filter",
+				entry->rule->match.ethertype.key.ether_type);
+			return ret;
+		}
+	}
+
+	entry->enabled = 0;
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_fdir_ethertype_rule_add(struct sssnic_ethdev_fdir_info *fdir_info,
+	struct sssnic_ethdev_fdir_rule *rule)
+{
+	struct sssnic_ethdev_fdir_entry *fdir_entry;
+	int ret;
+
+	fdir_entry = sssnic_ethdev_fdir_entry_alloc();
+	if (fdir_entry == NULL)
+		return -ENOMEM;
+
+	fdir_entry->rule = rule;
+
+	ret = sssnic_ethdev_fdir_entry_enable(fdir_info->ethdev, fdir_entry);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to enable ethertype(%u) entry",
+			rule->match.ethertype.key.ether_type);
+
+		sssnic_ethdev_fdir_entry_free(fdir_entry);
+
+		return ret;
+	}
+
+	rule->cookie = fdir_entry;
+	sssnic_ethdev_fdir_entry_add(fdir_info, fdir_entry);
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_fdir_ethertype_rule_del(struct sssnic_ethdev_fdir_info *fdir_info,
+	struct sssnic_ethdev_fdir_rule *rule)
+{
+	struct sssnic_ethdev_fdir_entry *fdir_entry;
+	int ret;
+
+	fdir_entry = (struct sssnic_ethdev_fdir_entry *)rule->cookie;
+
+	ret = sssnic_ethdev_fdir_entry_disable(fdir_info->ethdev, fdir_entry);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to disable ethertype(%u) entry",
+			rule->match.ethertype.key.ether_type);
+		return ret;
+	}
+
+	rule->cookie = NULL;
+	sssnic_ethdev_fdir_entry_del(fdir_info, fdir_entry);
+	sssnic_ethdev_fdir_entry_free(fdir_entry);
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_fdir_flow_rule_add(struct sssnic_ethdev_fdir_info *fdir_info,
+	struct sssnic_ethdev_fdir_rule *rule)
+{
+	struct sssnic_ethdev_fdir_entry *fdir_entry;
+	int ret;
+
+	fdir_entry = sssnic_ethdev_fdir_entry_alloc();
+	if (fdir_entry == NULL)
+		return -ENOMEM;
+
+	fdir_entry->rule = rule;
+
+	ret = sssnic_ethdev_tcam_entry_alloc(fdir_info->ethdev,
+		&fdir_entry->tcam_block, &fdir_entry->tcam_entry_idx);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to alloc TCAM entry");
+		goto tcam_entry_alloc_fail;
+	}
+
+	ret = sssnic_ethdev_fdir_entry_enable(fdir_info->ethdev, fdir_entry);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to enable fdir flow entry");
+		goto fdir_entry_enable_fail;
+	}
+
+	rule->cookie = fdir_entry;
+	sssnic_ethdev_fdir_entry_add(fdir_info, fdir_entry);
+
+	return 0;
+
+fdir_entry_enable_fail:
+	sssnic_ethdev_tcam_entry_free(fdir_info->ethdev, fdir_entry->tcam_block,
+		fdir_entry->tcam_entry_idx);
+tcam_entry_alloc_fail:
+	sssnic_ethdev_fdir_entry_free(fdir_entry);
+
+	return ret;
+}
+
+static int
+sssnic_ethdev_fdir_flow_rule_del(struct sssnic_ethdev_fdir_info *fdir_info,
+	struct sssnic_ethdev_fdir_rule *rule)
+{
+	struct sssnic_ethdev_fdir_entry *fdir_entry;
+	int ret;
+
+	fdir_entry = (struct sssnic_ethdev_fdir_entry *)rule->cookie;
+
+	ret = sssnic_ethdev_fdir_entry_disable(fdir_info->ethdev, fdir_entry);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to disable fdir flow entry");
+		return ret;
+	}
+
+	rule->cookie = NULL;
+	sssnic_ethdev_fdir_entry_del(fdir_info, fdir_entry);
+	sssnic_ethdev_fdir_entry_free(fdir_entry);
+
+	return 0;
+}
+
+int
+sssnic_ethdev_fdir_rule_add(struct rte_eth_dev *ethdev,
+	struct sssnic_ethdev_fdir_rule *rule)
+{
+	struct sssnic_ethdev_fdir_info *fdir_info;
+	int ret;
+
+	fdir_info = SSSNIC_ETHDEV_FDIR_INFO(ethdev);
+
+	if (sssnic_ethdev_fdir_entry_lookup(fdir_info, rule) != NULL) {
+		PMD_DRV_LOG(ERR, "FDIR rule exists!");
+		return -EEXIST;
+	}
+
+	if (rule->match.type == SSSNIC_ETHDEV_FDIR_MATCH_ETHERTYPE) {
+		ret = sssnic_ethdev_fdir_ethertype_rule_add(fdir_info, rule);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR, "Failed to add fdir ethertype rule");
+			return ret;
+		}
+		PMD_DRV_LOG(DEBUG,
+			"Added fdir ethertype rule, total number of rules: %u",
+			fdir_info->num_entries);
+	} else {
+		ret = sssnic_ethdev_fdir_flow_rule_add(fdir_info, rule);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR, "Failed to add fdir flow rule");
+			return ret;
+		}
+		PMD_DRV_LOG(DEBUG,
+			"Added fdir flow rule, total number of rules: %u",
+			fdir_info->num_entries);
+	}
+
+	ret = sssnic_ethdev_tcam_enable(ethdev);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to enable TCAM");
+		sssnic_ethdev_fdir_flow_rule_del(fdir_info, rule);
+	}
+
+	return ret;
+}
+
+int
+sssnic_ethdev_fdir_rule_del(struct rte_eth_dev *ethdev,
+	struct sssnic_ethdev_fdir_rule *fdir_rule)
+{
+	struct sssnic_ethdev_fdir_info *fdir_info;
+	struct sssnic_ethdev_fdir_entry *entry;
+	struct sssnic_ethdev_fdir_rule *rule;
+	int ret;
+
+	fdir_info = SSSNIC_ETHDEV_FDIR_INFO(ethdev);
+
+	entry = sssnic_ethdev_fdir_entry_lookup(fdir_info, fdir_rule);
+	if (entry == NULL)
+		return 0;
+
+	rule = entry->rule;
+	if (rule != fdir_rule)
+		return 0;
+
+	if (rule->match.type == SSSNIC_ETHDEV_FDIR_MATCH_ETHERTYPE) {
+		ret = sssnic_ethdev_fdir_ethertype_rule_del(fdir_info, rule);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR,
+				"Failed to delete fdir ethertype rule!");
+			return ret;
+		}
+		PMD_DRV_LOG(DEBUG,
+			"Deleted fdir ethertype rule, total number of rules: %u",
+			fdir_info->num_entries);
+	} else {
+		ret = sssnic_ethdev_fdir_flow_rule_del(fdir_info, rule);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR, "Failed to delete fdir flow rule!");
+			return ret;
+		}
+		PMD_DRV_LOG(DEBUG,
+			"Deleted fdir flow rule, total number of rules: %u",
+			fdir_info->num_entries);
+	}
+
+	/* if there are no added rules, then disable TCAM */
+	if (fdir_info->num_entries == 0) {
+		ret = sssnic_ethdev_tcam_disable(ethdev);
+		if (ret != 0) {
+			PMD_DRV_LOG(NOTICE,
+				"There are no added rules, but failed to disable TCAM");
+			ret = 0;
+		}
+	}
+
+	return ret;
+}
+
+int
+sssnic_ethdev_fdir_rules_disable_by_queue(struct rte_eth_dev *ethdev,
+	uint16_t qid)
+{
+	struct sssnic_ethdev_fdir_info *fdir_info;
+	struct sssnic_ethdev_fdir_entry *entry;
+	int ret;
+
+	fdir_info = SSSNIC_ETHDEV_FDIR_INFO(ethdev);
+
+	TAILQ_FOREACH(entry, &fdir_info->flow_entry_list, node)
+	{
+		if (entry->rule->action.qid == qid) {
+			ret = sssnic_ethdev_fdir_entry_disable(ethdev, entry);
+			if (ret != 0) {
+				PMD_DRV_LOG(ERR,
+					"Failed to disable flow rule of queue:%u",
+					qid);
+
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+int
+sssnic_ethdev_fdir_rules_enable_by_queue(struct rte_eth_dev *ethdev,
+	uint16_t qid)
+{
+	struct sssnic_ethdev_fdir_info *fdir_info;
+	struct sssnic_ethdev_fdir_entry *entry;
+	int ret;
+
+	fdir_info = SSSNIC_ETHDEV_FDIR_INFO(ethdev);
+
+	TAILQ_FOREACH(entry, &fdir_info->flow_entry_list, node)
+	{
+		if (entry->rule->action.qid == qid) {
+			ret = sssnic_ethdev_fdir_entry_enable(ethdev, entry);
+			if (ret != 0) {
+				PMD_DRV_LOG(ERR,
+					"Failed to enable flow rule of queue:%u",
+					qid);
+
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+int
+sssnic_ethdev_fdir_rules_flush(struct rte_eth_dev *ethdev)
+{
+	struct sssnic_netdev *netdev = SSSNIC_ETHDEV_PRIVATE(ethdev);
+	struct sssnic_ethdev_fdir_entry *entry, *tmp;
+	struct sssnic_ethdev_fdir_rule *rule;
+	int ret;
+
+	RTE_TAILQ_FOREACH_SAFE(entry, &netdev->fdir_info->flow_entry_list, node,
+		tmp)
+	{
+		rule = entry->rule;
+		ret = sssnic_ethdev_fdir_entry_disable(ethdev, entry);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR, "Failed to disable fdir flow entry");
+			return ret;
+		}
+		TAILQ_REMOVE(&netdev->fdir_info->flow_entry_list, entry, node);
+		sssnic_ethdev_fdir_entry_free(entry);
+		sssnic_ethdev_fdir_rule_free(rule);
+	}
+
+	RTE_TAILQ_FOREACH_SAFE(entry, &netdev->fdir_info->ethertype_entry_list,
+		node, tmp)
+	{
+		rule = entry->rule;
+		ret = sssnic_ethdev_fdir_entry_disable(ethdev, entry);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR, "Failed to disable ethertype(%u) entry",
+				rule->match.ethertype.key.ether_type);
+			return ret;
+		}
+		TAILQ_REMOVE(&netdev->fdir_info->ethertype_entry_list, entry,
+			node);
+		sssnic_ethdev_fdir_entry_free(entry);
+		sssnic_ethdev_fdir_rule_free(rule);
+	}
+
+	return 0;
+}
+
+int
+sssnic_ethdev_fdir_init(struct rte_eth_dev *ethdev)
+{
+	struct sssnic_netdev *netdev = SSSNIC_ETHDEV_PRIVATE(ethdev);
+
+	PMD_INIT_FUNC_TRACE();
+
+	netdev->fdir_info = rte_zmalloc("sssnic_fdir_info",
+		sizeof(struct sssnic_ethdev_fdir_info), 0);
+
+	if (netdev->fdir_info == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to alloc fdir info memory for port %u",
+			ethdev->data->port_id);
+		return -ENOMEM;
+	}
+
+	netdev->fdir_info->ethdev = ethdev;
+
+	TAILQ_INIT(&netdev->fdir_info->ethertype_entry_list);
+	TAILQ_INIT(&netdev->fdir_info->flow_entry_list);
+
+	sssnic_ethdev_tcam_init(ethdev);
+
+	return 0;
+}
+
+void
+sssnic_ethdev_fdir_shutdown(struct rte_eth_dev *ethdev)
+{
+	struct sssnic_netdev *netdev = SSSNIC_ETHDEV_PRIVATE(ethdev);
+	struct sssnic_ethdev_fdir_entry *entry, *tmp;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (netdev->fdir_info == NULL)
+		return;
+
+	RTE_TAILQ_FOREACH_SAFE(entry, &netdev->fdir_info->flow_entry_list, node,
+		tmp)
+	{
+		TAILQ_REMOVE(&netdev->fdir_info->flow_entry_list, entry, node);
+		sssnic_ethdev_fdir_entry_free(entry);
+	}
+
+	RTE_TAILQ_FOREACH_SAFE(entry, &netdev->fdir_info->ethertype_entry_list,
+		node, tmp)
+	{
+		TAILQ_REMOVE(&netdev->fdir_info->ethertype_entry_list, entry,
+			node);
+		sssnic_ethdev_fdir_entry_free(entry);
+	}
+
+	sssnic_ethdev_tcam_shutdown(ethdev);
+
+	rte_free(netdev->fdir_info);
+}
diff --git a/drivers/net/sssnic/sssnic_ethdev_fdir.h b/drivers/net/sssnic/sssnic_ethdev_fdir.h
new file mode 100644
index 0000000000..aaf426b8f2
--- /dev/null
+++ b/drivers/net/sssnic/sssnic_ethdev_fdir.h
@@ -0,0 +1,332 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2022 Shenzhen 3SNIC Information Technology Co., Ltd.
+ */
+
+#ifndef _SSSNIC_ETHDEV_FDIR_H_
+#define _SSSNIC_ETHDEV_FDIR_H_
+
+#define SSSINC_ETHDEV_FDIR_FLOW_KEY_SIZE 44
+#define SSSNIC_ETHDEV_FDIR_FLOW_KEY_NUM_DW                                     \
+	(SSSINC_ETHDEV_FDIR_FLOW_KEY_SIZE / sizeof(uint32_t))
+
+enum sssnic_ethdev_fdir_match_type {
+	SSSNIC_ETHDEV_FDIR_MATCH_ETHERTYPE = RTE_ETH_FILTER_ETHERTYPE,
+	SSSNIC_ETHDEV_FDIR_MATCH_FLOW = RTE_ETH_FILTER_FDIR,
+};
+
+enum sssnic_ethdev_fdir_flow_ip_type {
+	SSSNIC_ETHDEV_FDIR_FLOW_IPV4 = 0,
+	SSSNIC_ETHDEV_FDIR_FLOW_IPV6 = 1,
+};
+
+enum sssnic_ethdev_fdir_flow_tunnel_type {
+	SSSNIC_ETHDEV_FDIR_FLOW_TUNNEL_NONE = 0,
+	SSSNIC_ETHDEV_FDIR_FLOW_TUNNEL_VXLAN = 1,
+};
+
+#define SSSNIC_ETHDEV_FDIR_FLOW_FUNC_ID_MASK 0x7fff
+#define SSSNIC_ETHDEV_FDIR_FLOW_IP_TYPE_MASK 0x1
+#define SSSNIC_ETHDEV_FDIR_FLOW_TUNNEL_TYPE_MASK 0xf
+
+struct sssnic_ethdev_fdir_ethertype_key {
+	uint16_t ether_type;
+};
+
+struct sssnic_ethdev_fdir_ipv4_flow_key {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN)
+	uint32_t resvd0 : 16;
+	uint32_t ip_proto : 8;
+	uint32_t tunnel_type : 4;
+	uint32_t resvd1 : 4;
+
+	uint32_t func_id : 15;
+	uint32_t ip_type : 1;
+	uint32_t sip_w1 : 16;
+
+	uint32_t sip_w0 : 16;
+	uint32_t dip_w1 : 16;
+
+	uint32_t dip_w0 : 16;
+	uint32_t resvd2 : 16;
+
+	uint32_t resvd3;
+
+	uint32_t resvd4 : 16;
+	uint32_t dport : 16;
+
+	uint32_t sport : 16;
+	uint32_t resvd5 : 16;
+
+	uint32_t resvd6 : 16;
+	uint32_t outer_sip_w1 : 16;
+
+	uint32_t outer_sip_w0 : 16;
+	uint32_t outer_dip_w1 : 16;
+
+	uint32_t outer_dip_w0 : 16;
+	uint32_t vni_w1 : 16;
+
+	uint32_t vni_w0 : 16;
+	uint32_t resvd7 : 16;
+#else
+	uint32_t resvd1 : 4;
+	uint32_t tunnel_type : 4;
+	uint32_t ip_proto : 8;
+	uint32_t resvd0 : 16;
+
+	uint32_t sip_w1 : 16;
+	uint32_t ip_type : 1;
+	uint32_t func_id : 15;
+
+	uint32_t dip_w1 : 16;
+	uint32_t sip_w0 : 16;
+
+	uint32_t resvd2 : 16;
+	uint32_t dip_w0 : 16;
+
+	uint32_t rsvd3;
+
+	uint32_t dport : 16;
+	uint32_t resvd4 : 16;
+
+	uint32_t resvd5 : 16;
+	uint32_t sport : 16;
+
+	uint32_t outer_sip_w1 : 16;
+	uint32_t resvd6 : 16;
+
+	uint32_t outer_dip_w1 : 16;
+	uint32_t outer_sip_w0 : 16;
+
+	uint32_t vni_w1 : 16;
+	uint32_t outer_dip_w0 : 16;
+
+	uint32_t resvd7 : 16;
+	uint32_t vni_w0 : 16;
+#endif
+};
+
+struct sssnic_ethdev_fdir_ipv6_flow_key {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN)
+	uint32_t resvd0 : 16;
+	uint32_t ip_proto : 8;
+	uint32_t tunnel_type : 4;
+	uint32_t resvd1 : 4;
+
+	uint32_t func_id : 15;
+	uint32_t ip_type : 1;
+	uint32_t sip6_w0 : 16;
+
+	uint32_t sip6_w1 : 16;
+	uint32_t sip6_w2 : 16;
+
+	uint32_t sip6_w3 : 16;
+	uint32_t sip6_w4 : 16;
+
+	uint32_t sip6_w5 : 16;
+	uint32_t sip6_w6 : 16;
+
+	uint32_t sip6_w7 : 16;
+	uint32_t dport : 16;
+
+	uint32_t sport : 16;
+	uint32_t dip6_w0 : 16;
+
+	uint32_t dip6_w1 : 16;
+	uint32_t dip6_w2 : 16;
+
+	uint32_t dip6_w3 : 16;
+	uint32_t dip6_w4 : 16;
+
+	uint32_t dip6_w5 : 16;
+	uint32_t dip6_w6 : 16;
+
+	uint32_t dip6_w7 : 16;
+	uint32_t resvd2 : 16;
+#else
+	uint32_t resvd1 : 4;
+	uint32_t tunnel_type : 4;
+	uint32_t ip_proto : 8;
+	uint32_t resvd0 : 16;
+
+	uint32_t sip6_w0 : 16;
+	uint32_t ip_type : 1;
+	uint32_t func_id : 15;
+
+	uint32_t sip6_w2 : 16;
+	uint32_t sip6_w1 : 16;
+
+	uint32_t sip6_w4 : 16;
+	uint32_t sip6_w3 : 16;
+
+	uint32_t sip6_w6 : 16;
+	uint32_t sip6_w5 : 16;
+
+	uint32_t dport : 16;
+	uint32_t sip6_w7 : 16;
+
+	uint32_t dip6_w0 : 16;
+	uint32_t sport : 16;
+
+	uint32_t dip6_w2 : 16;
+	uint32_t dip6_w1 : 16;
+
+	uint32_t dip6_w4 : 16;
+	uint32_t dip6_w3 : 16;
+
+	uint32_t dip6_w6 : 16;
+	uint32_t dip6_w5 : 16;
+
+	uint32_t resvd2 : 16;
+	uint32_t dip6_w7 : 16;
+#endif
+};
+
+struct sssnic_ethdev_fdir_vxlan_ipv6_flow_key {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN)
+	uint32_t resvd0 : 16;
+	uint32_t ip_proto : 8;
+	uint32_t tunnel_type : 4;
+	uint32_t resvd1 : 4;
+
+	uint32_t func_id : 15;
+	uint32_t ip_type : 1;
+	uint32_t dip6_w0 : 16;
+
+	uint32_t dip6_w1 : 16;
+	uint32_t dip6_w2 : 16;
+
+	uint32_t dip6_w3 : 16;
+	uint32_t dip6_w4 : 16;
+
+	uint32_t dip6_w5 : 16;
+	uint32_t dip6_w6 : 16;
+
+	uint32_t dip6_w7 : 16;
+	uint32_t dport : 16;
+
+	uint32_t sport : 16;
+	uint32_t resvd2 : 16;
+
+	uint32_t resvd3 : 16;
+	uint32_t outer_sip_w1 : 16;
+
+	uint32_t outer_sip_w0 : 16;
+	uint32_t outer_dip_w1 : 16;
+
+	uint32_t outer_dip_w0 : 16;
+	uint32_t vni_w1 : 16;
+
+	uint32_t vni_w0 : 16;
+	uint32_t resvd4 : 16;
+#else
+	uint32_t rsvd1 : 4;
+	uint32_t tunnel_type : 4;
+	uint32_t ip_proto : 8;
+	uint32_t resvd0 : 16;
+
+	uint32_t dip6_w0 : 16;
+	uint32_t ip_type : 1;
+	uint32_t function_id : 15;
+
+	uint32_t dip6_w2 : 16;
+	uint32_t dip6_w1 : 16;
+
+	uint32_t dip6_w4 : 16;
+	uint32_t dip6_w3 : 16;
+
+	uint32_t dip6_w6 : 16;
+	uint32_t dip6_w5 : 16;
+
+	uint32_t dport : 16;
+	uint32_t dip6_w7 : 16;
+
+	uint32_t resvd2 : 16;
+	uint32_t sport : 16;
+
+	uint32_t outer_sip_w1 : 16;
+	uint32_t resvd3 : 16;
+
+	uint32_t outer_dip_w1 : 16;
+	uint32_t outer_sip_w0 : 16;
+
+	uint32_t vni_w1 : 16;
+	uint32_t outer_dip_w0 : 16;
+
+	uint32_t resvd4 : 16;
+	uint32_t vni_w0 : 16;
+#endif
+};
+
+struct sssnic_ethdev_fdir_flow_key {
+	union {
+		uint32_t dword[SSSNIC_ETHDEV_FDIR_FLOW_KEY_NUM_DW];
+		struct {
+			struct sssnic_ethdev_fdir_ipv4_flow_key ipv4;
+			struct sssnic_ethdev_fdir_ipv6_flow_key ipv6;
+			struct sssnic_ethdev_fdir_vxlan_ipv6_flow_key vxlan_ipv6;
+		};
+	};
+};
+
+struct sssnic_ethdev_fdir_flow_match {
+	struct sssnic_ethdev_fdir_flow_key key;
+	struct sssnic_ethdev_fdir_flow_key mask;
+};
+
+struct sssnic_ethdev_fdir_ethertype_match {
+	struct sssnic_ethdev_fdir_ethertype_key key;
+};
+
+struct sssnic_ethdev_fdir_match {
+	enum sssnic_ethdev_fdir_match_type type;
+	union {
+		struct sssnic_ethdev_fdir_flow_match flow;
+		struct sssnic_ethdev_fdir_ethertype_match ethertype;
+	};
+};
+
+struct sssnic_ethdev_fdir_action {
+	uint16_t qid;
+};
+
+/* struct sssnic_ethdev_fdir_rule must be dynamically allocated in the heap */
+struct sssnic_ethdev_fdir_rule {
+	struct sssnic_ethdev_fdir_match match;
+	struct sssnic_ethdev_fdir_action action;
+	void *cookie; /* low level data, initial value must be set to  NULL*/
+};
+
+struct sssnic_ethdev_fdir_info;
+
+static inline struct sssnic_ethdev_fdir_rule *
+sssnic_ethdev_fdir_rule_alloc(void)
+{
+	struct sssnic_ethdev_fdir_rule *rule;
+
+	rule = rte_zmalloc("sssnic_fdir_rule",
+		sizeof(struct sssnic_ethdev_fdir_rule), 0);
+
+	return rule;
+}
+
+static inline void
+sssnic_ethdev_fdir_rule_free(struct sssnic_ethdev_fdir_rule *rule)
+{
+	if (rule != NULL)
+		rte_free(rule);
+}
+
+int sssnic_ethdev_fdir_rules_disable_by_queue(struct rte_eth_dev *ethdev,
+	uint16_t qid);
+int sssnic_ethdev_fdir_rules_enable_by_queue(struct rte_eth_dev *ethdev,
+	uint16_t qid);
+int sssnic_ethdev_fdir_rule_add(struct rte_eth_dev *ethdev,
+	struct sssnic_ethdev_fdir_rule *rule);
+int sssnic_ethdev_fdir_rule_del(struct rte_eth_dev *ethdev,
+	struct sssnic_ethdev_fdir_rule *fdir_rule);
+int sssnic_ethdev_fdir_rules_flush(struct rte_eth_dev *ethdev);
+int sssnic_ethdev_fdir_init(struct rte_eth_dev *ethdev);
+void sssnic_ethdev_fdir_shutdown(struct rte_eth_dev *ethdev);
+
+#endif /* _SSSNIC_ETHDEV_FDIR_H_ */
diff --git a/drivers/net/sssnic/sssnic_ethdev_flow.c b/drivers/net/sssnic/sssnic_ethdev_flow.c
new file mode 100644
index 0000000000..372a5bed6b
--- /dev/null
+++ b/drivers/net/sssnic/sssnic_ethdev_flow.c
@@ -0,0 +1,981 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2022 Shenzhen 3SNIC Information Technology Co., Ltd.
+ */
+
+#include <rte_common.h>
+#include <ethdev_pci.h>
+#include <rte_flow_driver.h>
+
+#include "sssnic_log.h"
+#include "sssnic_ethdev.h"
+#include "sssnic_ethdev_fdir.h"
+#include "sssnic_ethdev_flow.h"
+#include "base/sssnic_hw.h"
+#include "base/sssnic_api.h"
+#include "base/sssnic_misc.h"
+
+struct rte_flow {
+	struct sssnic_ethdev_fdir_rule rule;
+};
+
+static enum rte_flow_item_type pattern_ethertype[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv4_any[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ANY,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_any[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ANY,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum sssnic_ethdev_flow_type {
+	SSSNIC_ETHDEV_FLOW_TYPE_UNKNOWN = -1,
+	SSSNIC_ETHDEV_FLOW_TYPE_ETHERTYPE,
+	SSSNIC_ETHDEV_FLOW_TYPE_FDIR,
+	SSSNIC_ETHDEV_FLOW_TYPE_COUNT,
+};
+
+struct sssnic_ethdev_flow_pattern {
+	enum rte_flow_item_type *flow_items;
+	enum sssnic_ethdev_flow_type type;
+	bool is_tunnel;
+};
+
+static struct sssnic_ethdev_flow_pattern supported_flow_patterns[] = {
+	{ pattern_ethertype, SSSNIC_ETHDEV_FLOW_TYPE_ETHERTYPE, false },
+	{ pattern_eth_ipv4, SSSNIC_ETHDEV_FLOW_TYPE_FDIR, false },
+	{ pattern_eth_ipv4_udp, SSSNIC_ETHDEV_FLOW_TYPE_FDIR, false },
+	{ pattern_eth_ipv4_tcp, SSSNIC_ETHDEV_FLOW_TYPE_FDIR, false },
+	{ pattern_eth_ipv4_any, SSSNIC_ETHDEV_FLOW_TYPE_FDIR, false },
+	{ pattern_eth_ipv4_udp_vxlan, SSSNIC_ETHDEV_FLOW_TYPE_FDIR, true },
+	{ pattern_eth_ipv4_udp_vxlan_udp, SSSNIC_ETHDEV_FLOW_TYPE_FDIR, true },
+	{ pattern_eth_ipv4_udp_vxlan_tcp, SSSNIC_ETHDEV_FLOW_TYPE_FDIR, true },
+	{ pattern_eth_ipv4_udp_vxlan_any, SSSNIC_ETHDEV_FLOW_TYPE_FDIR, true },
+	{ pattern_eth_ipv4_udp_vxlan_eth_ipv4, SSSNIC_ETHDEV_FLOW_TYPE_FDIR,
+		true },
+	{ pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, SSSNIC_ETHDEV_FLOW_TYPE_FDIR,
+		true },
+	{ pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, SSSNIC_ETHDEV_FLOW_TYPE_FDIR,
+		true },
+	{ pattern_eth_ipv4_udp_vxlan_eth_ipv6, SSSNIC_ETHDEV_FLOW_TYPE_FDIR,
+		true },
+	{ pattern_eth_ipv4_udp_vxlan_eth_ipv6_tcp, SSSNIC_ETHDEV_FLOW_TYPE_FDIR,
+		true },
+	{ pattern_eth_ipv4_udp_vxlan_eth_ipv6_udp, SSSNIC_ETHDEV_FLOW_TYPE_FDIR,
+		true },
+	{ pattern_eth_ipv6, SSSNIC_ETHDEV_FLOW_TYPE_FDIR, false },
+	{ pattern_eth_ipv6_udp, SSSNIC_ETHDEV_FLOW_TYPE_FDIR, false },
+	{ pattern_eth_ipv6_tcp, SSSNIC_ETHDEV_FLOW_TYPE_FDIR, false },
+};
+
+static bool
+sssnic_ethdev_flow_pattern_match(enum rte_flow_item_type *item_array,
+	const struct rte_flow_item *pattern)
+{
+	const struct rte_flow_item *item = pattern;
+
+	/* skip void items in the head of pattern */
+	while (item->type == RTE_FLOW_ITEM_TYPE_VOID)
+		item++;
+
+	while ((*item_array == item->type) &&
+		(*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+		item_array++;
+		item++;
+	}
+
+	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+		item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+static struct sssnic_ethdev_flow_pattern *
+sssnic_ethdev_flow_pattern_lookup(const struct rte_flow_item *pattern)
+{
+	struct sssnic_ethdev_flow_pattern *flow_pattern;
+	enum rte_flow_item_type *flow_items;
+	size_t i;
+
+	for (i = 0; i < RTE_DIM(supported_flow_patterns); i++) {
+		flow_pattern = &supported_flow_patterns[i];
+		flow_items = flow_pattern->flow_items;
+		if (sssnic_ethdev_flow_pattern_match(flow_items, pattern))
+			return flow_pattern;
+	}
+
+	return NULL;
+}
+
+static int
+sssnic_ethdev_flow_action_parse(struct rte_eth_dev *ethdev,
+	const struct rte_flow_action *actions, struct rte_flow_error *error,
+	struct sssnic_ethdev_fdir_rule *fdir_rule)
+{
+	const struct rte_flow_action_queue *action_queue;
+	const struct rte_flow_action *action = actions;
+
+	if (action->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+			NULL,
+			"Unsupported action type, only support action queue");
+		return -EINVAL;
+	}
+
+	action_queue = (const struct rte_flow_action_queue *)action->conf;
+	if (action_queue->index >= ethdev->data->nb_rx_queues) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+			NULL, "Invalid queue index");
+		return -EINVAL;
+	}
+
+	if (fdir_rule != NULL)
+		fdir_rule->action.qid = action_queue->index;
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_flow_ethertype_pattern_parse(const struct rte_flow_item *pattern,
+	struct rte_flow_error *error, struct sssnic_ethdev_fdir_rule *fdir_rule)
+{
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_eth *spec, *mask;
+	struct sssnic_ethdev_fdir_ethertype_match *fdir_match;
+
+	while (item->type != RTE_FLOW_ITEM_TYPE_ETH)
+		item++;
+
+	spec = (const struct rte_flow_item_eth *)item->spec;
+	mask = (const struct rte_flow_item_eth *)item->mask;
+
+	if (item->last != NULL) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+			item, "Not support range");
+		return -rte_errno;
+	}
+
+	if (spec == NULL || mask == NULL) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+			item, "Ether mask or spec is NULL");
+		return -rte_errno;
+	}
+
+	if (!rte_is_zero_ether_addr(&mask->src) ||
+		!rte_is_zero_ether_addr(&mask->dst)) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Invalid ether address mask");
+		return -rte_errno;
+	}
+
+	if (mask->type != 0xffff) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+			item, "Invalid ether type mask");
+		return -rte_errno;
+	}
+
+	if (fdir_rule != NULL) {
+		fdir_rule->match.type = SSSNIC_ETHDEV_FDIR_MATCH_ETHERTYPE;
+		fdir_match = &fdir_rule->match.ethertype;
+		fdir_match->key.ether_type = rte_be_to_cpu_16(spec->type);
+	}
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_flow_eth_parse(const struct rte_flow_item *item,
+	struct rte_flow_error *error)
+{
+	if (item->spec != NULL || item->mask != NULL) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not support eth match in fdir flow");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_flow_ipv4_parse(const struct rte_flow_item *item,
+	struct rte_flow_error *error, bool outer,
+	struct sssnic_ethdev_fdir_flow_match *fdir_match)
+{
+	const struct rte_flow_item_ipv4 *spec, *mask;
+	uint32_t ip_addr;
+
+	spec = (const struct rte_flow_item_ipv4 *)item->spec;
+	mask = (const struct rte_flow_item_ipv4 *)item->mask;
+
+	if (outer) {
+		/* only tunnel flow has outer ipv4 */
+		if (spec == NULL && mask == NULL)
+			return 0;
+
+		if (spec == NULL || mask == NULL) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, item,
+				"Invalid IPV4 spec or mask");
+			return -rte_errno;
+		}
+
+		if (mask->hdr.version_ihl || mask->hdr.type_of_service ||
+			mask->hdr.total_length || mask->hdr.packet_id ||
+			mask->hdr.fragment_offset || mask->hdr.time_to_live ||
+			mask->hdr.next_proto_id || mask->hdr.hdr_checksum) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, item,
+				"Only support outer IPv4 src and dest address for tunnel flow");
+			return -rte_errno;
+		}
+
+		if (fdir_match != NULL) {
+			ip_addr = rte_be_to_cpu_32(spec->hdr.src_addr);
+			fdir_match->key.ipv4.outer_sip_w0 = (uint16_t)ip_addr;
+			fdir_match->key.ipv4.outer_sip_w1 =
+				(uint16_t)(ip_addr >> 16);
+
+			ip_addr = rte_be_to_cpu_32(mask->hdr.src_addr);
+			fdir_match->mask.ipv4.outer_sip_w0 = (uint16_t)ip_addr;
+			fdir_match->mask.ipv4.outer_sip_w1 =
+				(uint16_t)(ip_addr >> 16);
+		}
+	} else {
+		/* inner ip of tunnel flow or ip of non tunnel flow */
+		if (spec == NULL && mask == NULL)
+			return 0;
+
+		if (spec == NULL || mask == NULL) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, item,
+				"Invalid IPV4 spec or mask");
+			return -rte_errno;
+		}
+
+		if (mask->hdr.version_ihl || mask->hdr.type_of_service ||
+			mask->hdr.total_length || mask->hdr.packet_id ||
+			mask->hdr.fragment_offset || mask->hdr.time_to_live ||
+			mask->hdr.hdr_checksum) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, item,
+				"Only support IPv4 address and ipproto");
+			return -rte_errno;
+		}
+
+		if (fdir_match != NULL) {
+			ip_addr = rte_be_to_cpu_32(spec->hdr.src_addr);
+			fdir_match->key.ipv4.sip_w0 = (uint16_t)ip_addr;
+			fdir_match->key.ipv4.sip_w1 = (uint16_t)(ip_addr >> 16);
+
+			ip_addr = rte_be_to_cpu_32(mask->hdr.src_addr);
+			fdir_match->mask.ipv4.sip_w0 = (uint16_t)ip_addr;
+			fdir_match->mask.ipv4.sip_w1 =
+				(uint16_t)(ip_addr >> 16);
+
+			fdir_match->key.ipv4.ip_proto = spec->hdr.next_proto_id;
+			fdir_match->mask.ipv4.ip_proto =
+				mask->hdr.next_proto_id;
+
+			fdir_match->key.ipv4.ip_type =
+				SSSNIC_ETHDEV_FDIR_FLOW_IPV4;
+			fdir_match->mask.ipv4.ip_type = 0x1;
+		}
+	}
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_flow_ipv6_parse(const struct rte_flow_item *item,
+	struct rte_flow_error *error, bool is_tunnel,
+	struct sssnic_ethdev_fdir_flow_match *fdir_match)
+{
+	const struct rte_flow_item_ipv6 *spec, *mask;
+	uint32_t ipv6_addr[4];
+	int i;
+
+	mask = (const struct rte_flow_item_ipv6 *)item->mask;
+	spec = (const struct rte_flow_item_ipv6 *)item->spec;
+
+	if (fdir_match != NULL) {
+		/* ip_type of ipv6 flow_match can share with other flow_matches */
+		fdir_match->key.ipv6.ip_type = SSSNIC_ETHDEV_FDIR_FLOW_IPV6;
+		fdir_match->mask.ipv6.ip_type = 0x1;
+	}
+
+	if (is_tunnel) {
+		if (mask == NULL && spec == NULL)
+			return 0;
+
+		if (spec == NULL || mask == NULL) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, item,
+				"Invalid IPV6 spec or mask");
+			return -rte_errno;
+		}
+
+		if (mask->hdr.vtc_flow || mask->hdr.payload_len ||
+			mask->hdr.hop_limits ||
+			!sssnic_is_zero_ipv6_addr(mask->hdr.src_addr)) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, item,
+				"Only support IPv6 dest_addr and ipproto in tunnel flow");
+			return -rte_errno;
+		}
+
+		if (fdir_match != NULL) {
+			rte_memcpy(ipv6_addr, spec->hdr.dst_addr,
+				sizeof(ipv6_addr));
+			for (i = 0; i < 4; i++)
+				ipv6_addr[i] = rte_be_to_cpu_32(ipv6_addr[i]);
+
+			fdir_match->key.vxlan_ipv6.dip6_w0 =
+				(uint16_t)ipv6_addr[0];
+			fdir_match->key.vxlan_ipv6.dip6_w1 =
+				(uint16_t)(ipv6_addr[0] >> 16);
+			fdir_match->key.vxlan_ipv6.dip6_w2 =
+				(uint16_t)ipv6_addr[1];
+			fdir_match->key.vxlan_ipv6.dip6_w3 =
+				(uint16_t)(ipv6_addr[1] >> 16);
+			fdir_match->key.vxlan_ipv6.dip6_w4 =
+				(uint16_t)ipv6_addr[2];
+			fdir_match->key.vxlan_ipv6.dip6_w5 =
+				(uint16_t)(ipv6_addr[2] >> 16);
+			fdir_match->key.vxlan_ipv6.dip6_w6 =
+				(uint16_t)ipv6_addr[3];
+			fdir_match->key.vxlan_ipv6.dip6_w7 =
+				(uint16_t)(ipv6_addr[3] >> 16);
+
+			rte_memcpy(ipv6_addr, mask->hdr.dst_addr,
+				sizeof(ipv6_addr));
+			for (i = 0; i < 4; i++)
+				ipv6_addr[i] = rte_be_to_cpu_32(ipv6_addr[i]);
+
+			fdir_match->mask.vxlan_ipv6.dip6_w0 =
+				(uint16_t)ipv6_addr[0];
+			fdir_match->mask.vxlan_ipv6.dip6_w1 =
+				(uint16_t)(ipv6_addr[0] >> 16);
+			fdir_match->mask.vxlan_ipv6.dip6_w2 =
+				(uint16_t)ipv6_addr[1];
+			fdir_match->mask.vxlan_ipv6.dip6_w3 =
+				(uint16_t)(ipv6_addr[1] >> 16);
+			fdir_match->mask.vxlan_ipv6.dip6_w4 =
+				(uint16_t)ipv6_addr[2];
+			fdir_match->mask.vxlan_ipv6.dip6_w5 =
+				(uint16_t)(ipv6_addr[2] >> 16);
+			fdir_match->mask.vxlan_ipv6.dip6_w6 =
+				(uint16_t)ipv6_addr[3];
+			fdir_match->mask.vxlan_ipv6.dip6_w7 =
+				(uint16_t)(ipv6_addr[3] >> 16);
+
+			fdir_match->key.vxlan_ipv6.ip_proto = spec->hdr.proto;
+			fdir_match->mask.vxlan_ipv6.ip_proto = mask->hdr.proto;
+		}
+	} else { /* non tunnel */
+		if (spec == NULL || mask == NULL) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, item,
+				"Invalid IPV6 spec or mask");
+			return -rte_errno;
+		}
+
+		if (mask->hdr.vtc_flow || mask->hdr.payload_len ||
+			mask->hdr.hop_limits) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, item,
+				"Only support IPv6 addr and ipproto");
+			return -rte_errno;
+		}
+
+		if (fdir_match != NULL) {
+			rte_memcpy(ipv6_addr, spec->hdr.dst_addr,
+				sizeof(ipv6_addr));
+			for (i = 0; i < 4; i++)
+				ipv6_addr[i] = rte_be_to_cpu_32(ipv6_addr[i]);
+
+			fdir_match->key.ipv6.dip6_w0 = (uint16_t)ipv6_addr[0];
+			fdir_match->key.ipv6.dip6_w1 =
+				(uint16_t)(ipv6_addr[0] >> 16);
+			fdir_match->key.ipv6.dip6_w2 = (uint16_t)ipv6_addr[1];
+			fdir_match->key.ipv6.dip6_w3 =
+				(uint16_t)(ipv6_addr[1] >> 16);
+			fdir_match->key.ipv6.dip6_w4 = (uint16_t)ipv6_addr[2];
+			fdir_match->key.ipv6.dip6_w5 =
+				(uint16_t)(ipv6_addr[2] >> 16);
+			fdir_match->key.ipv6.dip6_w6 = (uint16_t)ipv6_addr[3];
+			fdir_match->key.ipv6.dip6_w7 =
+				(uint16_t)(ipv6_addr[3] >> 16);
+
+			rte_memcpy(ipv6_addr, spec->hdr.src_addr,
+				sizeof(ipv6_addr));
+			for (i = 0; i < 4; i++)
+				ipv6_addr[i] = rte_be_to_cpu_32(ipv6_addr[i]);
+
+			fdir_match->key.ipv6.sip6_w0 = (uint16_t)ipv6_addr[0];
+			fdir_match->key.ipv6.sip6_w1 =
+				(uint16_t)(ipv6_addr[0] >> 16);
+			fdir_match->key.ipv6.sip6_w2 = (uint16_t)ipv6_addr[1];
+			fdir_match->key.ipv6.sip6_w3 =
+				(uint16_t)(ipv6_addr[1] >> 16);
+			fdir_match->key.ipv6.sip6_w4 = (uint16_t)ipv6_addr[2];
+			fdir_match->key.ipv6.sip6_w5 =
+				(uint16_t)(ipv6_addr[2] >> 16);
+			fdir_match->key.ipv6.sip6_w6 = (uint16_t)ipv6_addr[3];
+			fdir_match->key.ipv6.sip6_w7 =
+				(uint16_t)(ipv6_addr[3] >> 16);
+
+			rte_memcpy(ipv6_addr, mask->hdr.dst_addr,
+				sizeof(ipv6_addr));
+			for (i = 0; i < 4; i++)
+				ipv6_addr[i] = rte_be_to_cpu_32(ipv6_addr[i]);
+
+			fdir_match->mask.ipv6.dip6_w0 = (uint16_t)ipv6_addr[0];
+			fdir_match->mask.ipv6.dip6_w1 =
+				(uint16_t)(ipv6_addr[0] >> 16);
+			fdir_match->mask.ipv6.dip6_w2 = (uint16_t)ipv6_addr[1];
+			fdir_match->mask.ipv6.dip6_w3 =
+				(uint16_t)(ipv6_addr[1] >> 16);
+			fdir_match->mask.ipv6.dip6_w4 = (uint16_t)ipv6_addr[2];
+			fdir_match->mask.ipv6.dip6_w5 =
+				(uint16_t)(ipv6_addr[2] >> 16);
+			fdir_match->mask.ipv6.dip6_w6 = (uint16_t)ipv6_addr[3];
+			fdir_match->mask.ipv6.dip6_w7 =
+				(uint16_t)(ipv6_addr[3] >> 16);
+
+			rte_memcpy(ipv6_addr, mask->hdr.src_addr,
+				sizeof(ipv6_addr));
+			for (i = 0; i < 4; i++)
+				ipv6_addr[i] = rte_be_to_cpu_32(ipv6_addr[i]);
+
+			fdir_match->mask.ipv6.sip6_w0 = (uint16_t)ipv6_addr[0];
+			fdir_match->mask.ipv6.sip6_w1 =
+				(uint16_t)(ipv6_addr[0] >> 16);
+			fdir_match->mask.ipv6.sip6_w2 = (uint16_t)ipv6_addr[1];
+			fdir_match->mask.ipv6.sip6_w3 =
+				(uint16_t)(ipv6_addr[1] >> 16);
+			fdir_match->mask.ipv6.sip6_w4 = (uint16_t)ipv6_addr[2];
+			fdir_match->mask.ipv6.sip6_w5 =
+				(uint16_t)(ipv6_addr[2] >> 16);
+			fdir_match->mask.ipv6.sip6_w6 = (uint16_t)ipv6_addr[3];
+			fdir_match->mask.ipv6.sip6_w7 =
+				(uint16_t)(ipv6_addr[3] >> 16);
+
+			fdir_match->key.ipv6.ip_proto = spec->hdr.proto;
+			fdir_match->mask.ipv6.ip_proto = mask->hdr.proto;
+		}
+	}
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_flow_udp_parse(const struct rte_flow_item *item,
+	struct rte_flow_error *error, bool outer,
+	struct sssnic_ethdev_fdir_flow_match *fdir_match)
+{
+	const struct rte_flow_item_udp *spec, *mask;
+
+	spec = (const struct rte_flow_item_udp *)item->spec;
+	mask = (const struct rte_flow_item_udp *)item->mask;
+
+	if (outer) {
+		if (spec != NULL || mask != NULL) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, item,
+				"Both of outer UDP spec and mask must be NULL in tunnel flow");
+			return -rte_errno;
+		}
+
+		return 0;
+	}
+
+	if (fdir_match != NULL) {
+		/* ipv6 match can share ip_proto with ipv4 match */
+		fdir_match->key.ipv4.ip_proto = IPPROTO_UDP;
+		fdir_match->mask.ipv4.ip_proto = 0xff;
+	}
+
+	if (spec == NULL && mask == NULL)
+		return 0;
+
+	if (spec == NULL || mask == NULL) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Invalid UDP spec or mask");
+		return -rte_errno;
+	}
+
+	if (fdir_match != NULL) {
+		/* Other types of fdir match can share sport and dport with ipv4 match */
+		fdir_match->key.ipv4.sport =
+			rte_be_to_cpu_16(spec->hdr.src_port);
+		fdir_match->mask.ipv4.sport =
+			rte_be_to_cpu_16(mask->hdr.src_port);
+		fdir_match->key.ipv4.dport =
+			rte_be_to_cpu_16(spec->hdr.dst_port);
+		fdir_match->mask.ipv4.dport =
+			rte_be_to_cpu_16(mask->hdr.dst_port);
+	}
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_flow_tcp_parse(const struct rte_flow_item *item,
+	struct rte_flow_error *error, bool outer,
+	struct sssnic_ethdev_fdir_flow_match *fdir_match)
+{
+	const struct rte_flow_item_tcp *spec, *mask;
+
+	spec = (const struct rte_flow_item_tcp *)item->spec;
+	mask = (const struct rte_flow_item_tcp *)item->mask;
+
+	if (outer) {
+		if (spec != NULL || mask != NULL) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, item,
+				"Both of outer TCP spec and mask must be NULL in tunnel flow");
+			return -rte_errno;
+		}
+
+		return 0;
+	}
+
+	if (fdir_match != NULL) {
+		/* ipv6 match can share ip_proto with ipv4 match */
+		fdir_match->key.ipv4.ip_proto = IPPROTO_TCP;
+		fdir_match->mask.ipv6.ip_proto = 0xff;
+	}
+
+	if (spec == NULL && mask == NULL)
+		return 0;
+
+	if (spec == NULL || mask == NULL) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Invalid TCP spec or mask.");
+		return -rte_errno;
+	}
+
+	if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
+		mask->hdr.rx_win || mask->hdr.tcp_flags || mask->hdr.cksum ||
+		mask->hdr.tcp_urp) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			item,
+			"Invalid TCP item, support src_port and dst_port only");
+		return -rte_errno;
+	}
+
+	if (fdir_match != NULL) {
+		/* Other types of fdir match can share sport and dport with ipv4 match */
+		fdir_match->key.ipv4.sport =
+			rte_be_to_cpu_16(spec->hdr.src_port);
+		fdir_match->mask.ipv4.sport =
+			rte_be_to_cpu_16(mask->hdr.src_port);
+		fdir_match->key.ipv4.dport =
+			rte_be_to_cpu_16(spec->hdr.dst_port);
+		fdir_match->mask.ipv4.dport =
+			rte_be_to_cpu_16(mask->hdr.dst_port);
+	}
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_flow_vxlan_parse(const struct rte_flow_item *item,
+	struct rte_flow_error *error,
+	struct sssnic_ethdev_fdir_flow_match *fdir_match)
+{
+	const struct rte_flow_item_vxlan *spec, *mask;
+	uint32_t vni;
+
+	spec = (const struct rte_flow_item_vxlan *)item->spec;
+	mask = (const struct rte_flow_item_vxlan *)item->mask;
+
+	if (spec == NULL && mask == NULL)
+		return 0;
+
+	if (spec == NULL || mask == NULL) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Invalid VXLAN spec or mask");
+		return -rte_errno;
+	}
+
+	/* vxlan-ipv6 match can share vni with vxlan-ipv4 match */
+	if (fdir_match != NULL) {
+		rte_memcpy(((uint8_t *)&vni) + 1, spec->vni, 3);
+		vni = rte_be_to_cpu_32(vni);
+		fdir_match->key.ipv4.vni_w0 = (uint16_t)vni;
+		fdir_match->key.ipv4.vni_w1 = (uint16_t)(vni >> 16);
+		rte_memcpy(((uint8_t *)&vni) + 1, mask->vni, 3);
+		vni = rte_be_to_cpu_32(vni);
+		fdir_match->mask.ipv4.vni_w0 = (uint16_t)vni;
+		fdir_match->mask.ipv4.vni_w1 = (uint16_t)(vni >> 16);
+	}
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_flow_fdir_pattern_parse(const struct rte_flow_item *pattern,
+	struct rte_flow_error *error, bool is_tunnel,
+	struct sssnic_ethdev_fdir_rule *fdir_rule)
+{
+	struct sssnic_ethdev_fdir_flow_match *fdir_match = NULL;
+	const struct rte_flow_item *flow_item;
+	bool outer_ip;
+	int ret = 0;
+
+	fdir_rule->match.type = SSSNIC_ETHDEV_FDIR_MATCH_FLOW;
+	if (fdir_rule != NULL)
+		fdir_match = &fdir_rule->match.flow;
+
+	if (is_tunnel)
+		outer_ip = true;
+	else
+		outer_ip = false;
+
+	flow_item = pattern;
+	while (flow_item->type != RTE_FLOW_ITEM_TYPE_END) {
+		switch (flow_item->type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			ret = sssnic_ethdev_flow_eth_parse(flow_item, error);
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ret = sssnic_ethdev_flow_ipv4_parse(flow_item, error,
+				outer_ip, fdir_match);
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ret = sssnic_ethdev_flow_ipv6_parse(flow_item, error,
+				is_tunnel, fdir_match);
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			ret = sssnic_ethdev_flow_udp_parse(flow_item, error,
+				outer_ip, fdir_match);
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			ret = sssnic_ethdev_flow_tcp_parse(flow_item, error,
+				outer_ip, fdir_match);
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			ret = sssnic_ethdev_flow_vxlan_parse(flow_item, error,
+				fdir_match);
+			outer_ip = false; /* next parsing is inner_ip */
+			break;
+		default:
+			break;
+		}
+
+		if (ret != 0)
+			return ret;
+
+		flow_item++;
+	}
+
+	if (is_tunnel) {
+		if (fdir_match != NULL) {
+			/* tunnel_type of ipv4 flow_match can share with other flow_matches */
+			fdir_match->key.ipv4.tunnel_type =
+				SSSNIC_ETHDEV_FDIR_FLOW_TUNNEL_VXLAN;
+			fdir_match->mask.ipv4.tunnel_type = 0x1;
+		}
+	}
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_flow_attr_parse(const struct rte_flow_attr *attr,
+	struct rte_flow_error *error)
+{
+	if (attr->egress != 0 || attr->priority != 0 || attr->group != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+			attr, "Invalid flow attr, support ingress only");
+		return -rte_errno;
+	}
+
+	if (attr->ingress == 0) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
+			"Ingress of flow attr is not set");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_flow_parse(struct rte_eth_dev *ethdev,
+	const struct rte_flow_attr *attr, const struct rte_flow_item *pattern,
+	const struct rte_flow_action *actions, struct rte_flow_error *error,
+	struct sssnic_ethdev_fdir_rule *fdir_rule)
+{
+	int ret;
+	struct sssnic_ethdev_flow_pattern *flow_pattern;
+
+	flow_pattern = sssnic_ethdev_flow_pattern_lookup(pattern);
+	if (flow_pattern == NULL) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			NULL, "Unsupported pattern");
+		return -rte_errno;
+	}
+
+	if (flow_pattern->type == SSSNIC_ETHDEV_FLOW_TYPE_FDIR)
+		ret = sssnic_ethdev_flow_fdir_pattern_parse(pattern, error,
+			flow_pattern->is_tunnel, fdir_rule);
+	else
+		ret = sssnic_ethdev_flow_ethertype_pattern_parse(pattern, error,
+			fdir_rule);
+	if (ret != 0)
+		return ret;
+
+	ret = sssnic_ethdev_flow_action_parse(ethdev, actions, error,
+		fdir_rule);
+	if (ret != 0)
+		return ret;
+
+	ret = sssnic_ethdev_flow_attr_parse(attr, error);
+	if (ret != 0)
+		return ret;
+
+	return 0;
+}
+
+static struct rte_flow *
+sssnic_ethdev_flow_create(struct rte_eth_dev *ethdev,
+	const struct rte_flow_attr *attr, const struct rte_flow_item pattern[],
+	const struct rte_flow_action actions[], struct rte_flow_error *error)
+{
+	struct sssnic_ethdev_fdir_rule *rule;
+	int ret;
+
+	rule = sssnic_ethdev_fdir_rule_alloc();
+	if (rule == NULL) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+			NULL, "Failed to allocate fdir rule memory");
+		return NULL;
+	}
+
+	ret = sssnic_ethdev_flow_parse(ethdev, attr, pattern, actions, error,
+		rule);
+	if (ret != 0) {
+		sssnic_ethdev_fdir_rule_free(rule);
+		return NULL;
+	}
+
+	ret = sssnic_ethdev_fdir_rule_add(ethdev, rule);
+	if (ret != 0) {
+		sssnic_ethdev_fdir_rule_free(rule);
+		rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"Failed to add fdir rule");
+		return NULL;
+	}
+
+	return (struct rte_flow *)rule;
+}
+
+static int
+sssnic_ethdev_flow_destroy(struct rte_eth_dev *ethdev, struct rte_flow *flow,
+	struct rte_flow_error *error)
+{
+	int ret;
+
+	if (flow == NULL) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+			NULL, "Invalid parameter");
+		return -rte_errno;
+	}
+
+	ret = sssnic_ethdev_fdir_rule_del(ethdev,
+		(struct sssnic_ethdev_fdir_rule *)flow);
+
+	if (ret != 0) {
+		rte_flow_error_set(error, EIO, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"Failed to delete fdir rule");
+		return -rte_errno;
+	}
+
+	sssnic_ethdev_fdir_rule_free((struct sssnic_ethdev_fdir_rule *)flow);
+
+	return 0;
+}
+
+static int
+sssnic_ethdev_flow_validate(struct rte_eth_dev *ethdev,
+	const struct rte_flow_attr *attr, const struct rte_flow_item pattern[],
+	const struct rte_flow_action actions[], struct rte_flow_error *error)
+{
+	return sssnic_ethdev_flow_parse(ethdev, attr, pattern, actions, error,
+		NULL);
+}
+
+static int
+sssnic_ethdev_flow_flush(struct rte_eth_dev *ethdev,
+	struct rte_flow_error *error)
+{
+	int ret;
+
+	ret = sssnic_ethdev_fdir_rules_flush(ethdev);
+	if (ret != 0) {
+		rte_flow_error_set(error, EIO, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"Failed to flush fdir rules");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static const struct rte_flow_ops sssnic_ethdev_flow_ops = {
+	.validate = sssnic_ethdev_flow_validate,
+	.create = sssnic_ethdev_flow_create,
+	.destroy = sssnic_ethdev_flow_destroy,
+	.flush = sssnic_ethdev_flow_flush,
+};
+
+int
+sssnic_ethdev_flow_ops_get(struct rte_eth_dev *ethdev,
+	const struct rte_flow_ops **ops)
+{
+	RTE_SET_USED(ethdev);
+
+	*ops = &sssnic_ethdev_flow_ops;
+
+	return 0;
+}
diff --git a/drivers/net/sssnic/sssnic_ethdev_flow.h b/drivers/net/sssnic/sssnic_ethdev_flow.h
new file mode 100644
index 0000000000..2812b783e2
--- /dev/null
+++ b/drivers/net/sssnic/sssnic_ethdev_flow.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2022 Shenzhen 3SNIC Information Technology Co., Ltd.
+ */
+
+#ifndef _SSSNIC_ETHDEV_FLOW_H_
+#define _SSSNIC_ETHDEV_FLOW_H_
+
+int sssnic_ethdev_flow_ops_get(struct rte_eth_dev *ethdev,
+	const struct rte_flow_ops **ops);
+
+#endif /* _SSSNIC_ETHDEV_FLOW_H_ */
diff --git a/drivers/net/sssnic/sssnic_ethdev_rx.c b/drivers/net/sssnic/sssnic_ethdev_rx.c
index 6c5f209262..46a1d5fd23 100644
--- a/drivers/net/sssnic/sssnic_ethdev_rx.c
+++ b/drivers/net/sssnic/sssnic_ethdev_rx.c
@@ -11,6 +11,7 @@
 #include "sssnic_ethdev.h"
 #include "sssnic_ethdev_rx.h"
 #include "sssnic_ethdev_rss.h"
+#include "sssnic_ethdev_fdir.h"
 #include "base/sssnic_hw.h"
 #include "base/sssnic_workq.h"
 #include "base/sssnic_api.h"
@@ -593,9 +594,18 @@ static int
 sssnic_ethdev_rxq_enable(struct rte_eth_dev *ethdev, uint16_t queue_id)
 {
 	struct sssnic_ethdev_rxq *rxq = ethdev->data->rx_queues[queue_id];
+	int ret;
 
 	sssnic_ethdev_rxq_pktmbufs_fill(rxq);
 
+	pthread_mutex_lock(&ethdev->data->flow_ops_mutex);
+	ret = sssnic_ethdev_fdir_rules_enable_by_queue(ethdev, queue_id);
+	if (ret)
+		PMD_DRV_LOG(WARNING,
+			"Failed to enable fdir rules of rxq:%u, port:%u",
+			queue_id, ethdev->data->port_id);
+	pthread_mutex_unlock(&ethdev->data->flow_ops_mutex);
+
 	return 0;
 }
 
@@ -605,6 +615,14 @@ sssnic_ethdev_rxq_disable(struct rte_eth_dev *ethdev, uint16_t queue_id)
 	struct sssnic_ethdev_rxq *rxq = ethdev->data->rx_queues[queue_id];
 	int ret;
 
+	pthread_mutex_lock(&ethdev->data->flow_ops_mutex);
+	ret = sssnic_ethdev_fdir_rules_disable_by_queue(ethdev, queue_id);
+	if (ret != 0)
+		PMD_DRV_LOG(WARNING,
+			"Failed to disable fdir rules of rxq:%u, port:%u",
+			queue_id, ethdev->data->port_id);
+	pthread_mutex_unlock(&ethdev->data->flow_ops_mutex);
+
 	ret = sssnic_ethdev_rxq_flush(rxq);
 	if (ret != 0) {
 		PMD_DRV_LOG(ERR, "Failed to flush rxq:%u, port:%u", queue_id,
-- 
2.27.0


  parent reply	other threads:[~2023-09-04  5:01 UTC|newest]

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-09-04  4:56 [PATCH v5 00/32] Introduce sssnic PMD for 3SNIC's 9x0 serials Ethernet adapters wanry
2023-09-04  4:56 ` [PATCH v5 01/32] net/sssnic: add build and doc infrastructure wanry
2023-09-26 13:06   ` Ferruh Yigit
2023-09-04  4:56 ` [PATCH v5 02/32] net/sssnic: add log type and log macros wanry
2023-09-04  4:56 ` [PATCH v5 03/32] net/sssnic: support probe and remove wanry
2023-09-18 16:08   ` Stephen Hemminger
2023-09-19  2:00     ` Renyong Wan
2023-09-04  4:56 ` [PATCH v5 04/32] net/sssnic: initialize hardware base wanry
2023-09-18  2:28   ` Stephen Hemminger
2023-09-18  4:47     ` Renyong Wan
2023-09-04  4:56 ` [PATCH v5 05/32] net/sssnic: add event queue wanry
2023-09-04  4:56 ` [PATCH v5 06/32] net/sssnic/base: add message definition and utility wanry
2023-09-18  2:31   ` Stephen Hemminger
2023-09-18  5:08     ` Renyong Wan
2023-09-04  4:56 ` [PATCH v5 07/32] net/sssnic/base: add mailbox support wanry
2023-09-18  2:32   ` Stephen Hemminger
2023-09-18  5:10     ` Renyong Wan
2023-09-26 13:13   ` Ferruh Yigit
2023-09-04  4:56 ` [PATCH v5 08/32] net/sssnic/base: add work queue wanry
2023-09-18  2:33   ` Stephen Hemminger
2023-09-18  5:11     ` Renyong Wan
2023-09-04  4:56 ` [PATCH v5 09/32] net/sssnic/base: add control queue wanry
2023-09-18  2:36   ` Stephen Hemminger
2023-09-18  5:22     ` Renyong Wan
2023-09-04  4:56 ` [PATCH v5 10/32] net/sssnic: add dev configure and infos get wanry
2023-09-04  4:56 ` [PATCH v5 11/32] net/sssnic: add dev MAC ops wanry
2023-09-26 13:07   ` Ferruh Yigit
2023-09-04  4:56 ` [PATCH v5 12/32] net/sssnic: support dev link status wanry
2023-09-04  4:56 ` [PATCH v5 13/32] net/sssnic: support link status event wanry
2023-09-26 13:08   ` Ferruh Yigit
2023-09-04  4:56 ` [PATCH v5 14/32] net/sssnic: support Rx queue setup and release wanry
2023-09-04  4:56 ` [PATCH v5 15/32] net/sssnic: support Tx " wanry
2023-09-04  4:56 ` [PATCH v5 16/32] net/sssnic: support Rx queue start and stop wanry
2023-09-04  4:56 ` [PATCH v5 17/32] net/sssnic: support Tx " wanry
2023-09-04  4:56 ` [PATCH v5 18/32] net/sssnic: add Rx interrupt support wanry
2023-09-04  4:56 ` [PATCH v5 19/32] net/sssnic: support dev start and stop wanry
2023-09-26 13:09   ` Ferruh Yigit
2023-09-04  4:56 ` [PATCH v5 20/32] net/sssnic: support dev close and reset wanry
2023-09-26 13:09   ` Ferruh Yigit
2023-09-04  4:56 ` [PATCH v5 21/32] net/sssnic: add allmulticast and promiscuous ops wanry
2023-09-04  4:56 ` [PATCH v5 22/32] net/sssnic: add basic and extended stats ops wanry
2023-09-04  4:56 ` [PATCH v5 23/32] net/sssnic: support Rx packet burst wanry
2023-09-04  4:56 ` [PATCH v5 24/32] net/sssnic: support Tx " wanry
2023-09-26 13:10   ` Ferruh Yigit
2023-09-04  4:56 ` [PATCH v5 25/32] net/sssnic: add RSS support wanry
2023-09-04  4:56 ` [PATCH v5 26/32] net/sssnic: support dev MTU set wanry
2023-09-04  4:56 ` [PATCH v5 27/32] net/sssnic: support dev queue info get wanry
2023-09-04  4:56 ` [PATCH v5 28/32] net/sssnic: support dev firmware version get wanry
2023-09-04  4:56 ` [PATCH v5 29/32] net/sssnic: add dev flow control ops wanry
2023-09-26 13:12   ` Ferruh Yigit
2023-09-04  4:56 ` [PATCH v5 30/32] net/sssnic: support VLAN offload and filter wanry
2023-09-04  4:56 ` wanry [this message]
2023-09-04  4:56 ` [PATCH v5 32/32] net/sssnic: add VF dev support wanry
2023-09-26 13:11   ` Ferruh Yigit
2023-09-18  2:37 ` [PATCH v5 00/32] Introduce sssnic PMD for 3SNIC's 9x0 serials Ethernet adapters Stephen Hemminger
2023-09-18  3:23   ` Renyong Wan
2023-09-19  3:19 ` Stephen Hemminger
2023-09-19  5:18   ` Renyong Wan
2023-09-19  3:21 ` Stephen Hemminger
2023-09-19  5:18   ` Renyong Wan
2023-09-19  3:23 ` Stephen Hemminger
2023-09-19  5:19   ` Renyong Wan
2023-09-19 15:24 ` Stephen Hemminger
2023-09-26 13:13 ` Ferruh Yigit
2024-03-29 11:32   ` Ferruh Yigit
2024-07-31 17:32     ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230904045658.238185-32-wanry@3snic.com \
    --to=wanry@3snic.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@amd.com \
    --cc=steven.song@3snic.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).