DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH v3 28/33] net/txgbe: support to add traffic mirror rules
Date: Fri, 18 Dec 2020 17:36:57 +0800	[thread overview]
Message-ID: <20201218093702.3651867-29-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20201218093702.3651867-1-jiawenwu@trustnetic.com>

Add and reset traffic mirroring rules.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 doc/guides/nics/features/txgbe.ini |   1 +
 drivers/net/txgbe/txgbe_ethdev.c   | 186 +++++++++++++++++++++++++++++
 drivers/net/txgbe/txgbe_ethdev.h   |   1 +
 3 files changed, 188 insertions(+)

diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini
index ffeecfd20..9db2ccde0 100644
--- a/doc/guides/nics/features/txgbe.ini
+++ b/doc/guides/nics/features/txgbe.ini
@@ -28,6 +28,7 @@ VLAN filter          = Y
 Flow control         = Y
 Flow API             = Y
 Rate limitation      = Y
+Traffic mirroring    = Y
 CRC offload          = P
 VLAN offload         = P
 QinQ offload         = P
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 6f38eebc3..fdeac4f7e 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -3369,6 +3369,21 @@ txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	return 0;
 }
 
+int
+txgbe_vt_check(struct txgbe_hw *hw)
+{
+	uint32_t reg_val;
+
+	/* if Virtualization Technology is enabled */
+	reg_val = rd32(hw, TXGBE_PORTCTL);
+	if (!(reg_val & TXGBE_PORTCTL_NUMVT_MASK)) {
+		PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
+		return -1;
+	}
+
+	return 0;
+}
+
 static uint32_t
 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
 {
@@ -3506,6 +3521,175 @@ txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 	return new_val;
 }
 
+#define TXGBE_INVALID_MIRROR_TYPE(mirror_type) \
+	((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
+	ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
+
+static int
+txgbe_mirror_rule_set(struct rte_eth_dev *dev,
+		      struct rte_eth_mirror_conf *mirror_conf,
+		      uint8_t rule_id, uint8_t on)
+{
+	uint32_t mr_ctl, vlvf;
+	uint32_t mp_lsb = 0;
+	uint32_t mv_msb = 0;
+	uint32_t mv_lsb = 0;
+	uint32_t mp_msb = 0;
+	uint8_t i = 0;
+	int reg_index = 0;
+	uint64_t vlan_mask = 0;
+
+	const uint8_t pool_mask_offset = 32;
+	const uint8_t vlan_mask_offset = 32;
+	const uint8_t dst_pool_offset = 8;
+	const uint8_t rule_mr_offset  = 4;
+	const uint8_t mirror_rule_mask = 0x0F;
+
+	struct txgbe_mirror_info *mr_info = TXGBE_DEV_MR_INFO(dev);
+	struct rte_eth_mirror_conf *mr_conf = &mr_info->mr_conf[rule_id];
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	uint8_t mirror_type = 0;
+
+	if (txgbe_vt_check(hw) < 0)
+		return -ENOTSUP;
+
+	if (rule_id >= TXGBE_MAX_MIRROR_RULES)
+		return -EINVAL;
+
+	if (TXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
+		PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
+			    mirror_conf->rule_type);
+		return -EINVAL;
+	}
+
+	if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
+		mirror_type |= TXGBE_MIRRCTL_VLAN;
+		/* Check if vlan id is valid and find conresponding VLAN ID
+		 * index in PSRVLAN
+		 */
+		for (i = 0; i < TXGBE_NUM_POOL; i++) {
+			if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+				/* search vlan id related pool vlan filter
+				 * index
+				 */
+				reg_index = txgbe_find_vlvf_slot(hw,
+						mirror_conf->vlan.vlan_id[i],
+						false);
+				if (reg_index < 0)
+					return -EINVAL;
+				wr32(hw, TXGBE_PSRVLANIDX, reg_index);
+				vlvf = rd32(hw, TXGBE_PSRVLAN);
+				if ((TXGBE_PSRVLAN_VID(vlvf) ==
+				      mirror_conf->vlan.vlan_id[i]))
+					vlan_mask |= (1ULL << reg_index);
+				else
+					return -EINVAL;
+			}
+		}
+
+		if (on) {
+			mv_lsb = vlan_mask & BIT_MASK32;
+			mv_msb = vlan_mask >> vlan_mask_offset;
+
+			mr_conf->vlan.vlan_mask = mirror_conf->vlan.vlan_mask;
+			for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+				if (mirror_conf->vlan.vlan_mask & (1ULL << i))
+					mr_conf->vlan.vlan_id[i] =
+						mirror_conf->vlan.vlan_id[i];
+			}
+		} else {
+			mv_lsb = 0;
+			mv_msb = 0;
+			mr_conf->vlan.vlan_mask = 0;
+			for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+				mr_conf->vlan.vlan_id[i] = 0;
+		}
+	}
+
+	if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+		mirror_type |= TXGBE_MIRRCTL_POOL;
+		if (on) {
+			mp_lsb = mirror_conf->pool_mask & BIT_MASK32;
+			mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
+			mr_conf->pool_mask = mirror_conf->pool_mask;
+		} else {
+			mp_lsb = 0;
+			mp_msb = 0;
+			mr_conf->pool_mask = 0;
+		}
+	}
+	if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
+		mirror_type |= TXGBE_MIRRCTL_UPLINK;
+	if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
+		mirror_type |= TXGBE_MIRRCTL_DNLINK;
+
+	/* read  mirror control register and recalculate it */
+	mr_ctl = rd32(hw, TXGBE_MIRRCTL(rule_id));
+
+	if (on) {
+		mr_ctl |= mirror_type;
+		mr_ctl &= mirror_rule_mask;
+		mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
+	} else {
+		mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
+	}
+
+	mr_conf->rule_type = mirror_conf->rule_type;
+	mr_conf->dst_pool = mirror_conf->dst_pool;
+
+	/* write mirrror control  register */
+	wr32(hw, TXGBE_MIRRCTL(rule_id), mr_ctl);
+
+	/* write pool mirrror control  register */
+	if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+		wr32(hw, TXGBE_MIRRPOOLL(rule_id), mp_lsb);
+		wr32(hw, TXGBE_MIRRPOOLH(rule_id + rule_mr_offset),
+				mp_msb);
+	}
+	/* write VLAN mirrror control  register */
+	if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
+		wr32(hw, TXGBE_MIRRVLANL(rule_id), mv_lsb);
+		wr32(hw, TXGBE_MIRRVLANH(rule_id + rule_mr_offset),
+				mv_msb);
+	}
+
+	return 0;
+}
+
+static int
+txgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
+{
+	int mr_ctl = 0;
+	uint32_t lsb_val = 0;
+	uint32_t msb_val = 0;
+	const uint8_t rule_mr_offset = 4;
+
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_mirror_info *mr_info = TXGBE_DEV_MR_INFO(dev);
+
+	if (txgbe_vt_check(hw) < 0)
+		return -ENOTSUP;
+
+	if (rule_id >= TXGBE_MAX_MIRROR_RULES)
+		return -EINVAL;
+
+	memset(&mr_info->mr_conf[rule_id], 0,
+	       sizeof(struct rte_eth_mirror_conf));
+
+	/* clear MIRRCTL register */
+	wr32(hw, TXGBE_MIRRCTL(rule_id), mr_ctl);
+
+	/* clear pool mask register */
+	wr32(hw, TXGBE_MIRRPOOLL(rule_id), lsb_val);
+	wr32(hw, TXGBE_MIRRPOOLH(rule_id + rule_mr_offset), msb_val);
+
+	/* clear vlan mask register */
+	wr32(hw, TXGBE_MIRRVLANL(rule_id), lsb_val);
+	wr32(hw, TXGBE_MIRRVLANH(rule_id + rule_mr_offset), msb_val);
+
+	return 0;
+}
+
 static int
 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
@@ -5190,6 +5374,8 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
 	.mac_addr_set               = txgbe_set_default_mac_addr,
 	.uc_hash_table_set          = txgbe_uc_hash_table_set,
 	.uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
+	.mirror_rule_set            = txgbe_mirror_rule_set,
+	.mirror_rule_reset          = txgbe_mirror_rule_reset,
 	.set_queue_rate_limit       = txgbe_set_queue_rate_limit,
 	.reta_update                = txgbe_dev_rss_reta_update,
 	.reta_query                 = txgbe_dev_rss_reta_query,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 52ddda577..ecec49112 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -566,6 +566,7 @@ void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
 void txgbe_clear_syn_filter(struct rte_eth_dev *dev);
 int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);
 
+int txgbe_vt_check(struct txgbe_hw *hw);
 int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
 			    uint16_t tx_rate, uint64_t q_msk);
 int txgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops);
-- 
2.18.2




  parent reply	other threads:[~2020-12-18  9:43 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-18  9:36 [dpdk-dev] [PATCH v3 00/33] net: add txgbe PMD part 2 Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 01/33] net/txgbe: add generic flow API Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 02/33] net/txgbe: add ntuple filter init and uninit Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 03/33] net/txgbe: support ntuple filter add and delete Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 04/33] net/txgbe: parse n-tuple filter Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 05/33] net/txgbe: support ethertype filter add and delete Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 06/33] net/txgbe: parse ethertype filter Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 07/33] net/txgbe: support syn filter add and delete Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 08/33] net/txgbe: parse syn filter Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 09/33] net/txgbe: add L2 tunnel filter init and uninit Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 10/33] net/txgbe: config L2 tunnel filter with e-tag Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 11/33] net/txgbe: support L2 tunnel filter add and delete Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 12/33] net/txgbe: parse L2 tunnel filter Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 13/33] net/txgbe: add flow director filter init and uninit Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 14/33] net/txgbe: configure flow director filter Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 15/33] net/txgbe: support flow director filter add and delete Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 16/33] net/txgbe: parse flow director filter Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 17/33] net/txgbe: restore RSS filter Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 18/33] net/txgbe: parse " Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 19/33] net/txgbe: support to create consistent filter Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 20/33] net/txgbe: support to destroy " Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 21/33] net/txgbe: flush all the filters Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 22/33] net/txgbe: support UDP tunnel port add and delete Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 23/33] net/txgbe: add TM configuration init and uninit Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 24/33] net/txgbe: add TM capabilities get operation Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 25/33] net/txgbe: support TM shaper profile add and delete Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 26/33] net/txgbe: support TM node " Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 27/33] net/txgbe: add TM hierarchy commit Jiawen Wu
2020-12-18  9:36 ` Jiawen Wu [this message]
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 29/33] net/txgbe: add IPsec context creation Jiawen Wu
2020-12-18  9:36 ` [dpdk-dev] [PATCH v3 30/33] net/txgbe: add security session create operation Jiawen Wu
2020-12-18  9:37 ` [dpdk-dev] [PATCH v3 31/33] net/txgbe: destroy security session Jiawen Wu
2020-12-18  9:37 ` [dpdk-dev] [PATCH v3 32/33] net/txgbe: add security offload in Rx and Tx process Jiawen Wu
2020-12-18  9:37 ` [dpdk-dev] [PATCH v3 33/33] net/txgbe: add security type in flow action Jiawen Wu
2021-01-13  6:15 ` [dpdk-dev] [PATCH v3 00/33] net: add txgbe PMD part 2 Jiawen Wu
2021-01-13 13:15   ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201218093702.3651867-29-jiawenwu@trustnetic.com \
    --to=jiawenwu@trustnetic.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).