DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/5] misc updates and fixes for hns3 PMD driver
@ 2020-03-07  8:57 Wei Hu (Xavier)
  2020-03-07  8:57 ` [dpdk-dev] [PATCH 1/5] net/hns3: support TCP segment offload Wei Hu (Xavier)
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Wei Hu (Xavier) @ 2020-03-07  8:57 UTC (permalink / raw)
  To: dev

This series are updates and fixes for hns3 PMD driver.

Chengchang Tang (3):
  net/hns3: remove unnecessary restriction on setting VF's MTU
  net/hns3: support promiscuous and allmulticast mode for VF
  net/hns3: fix promiscuous mode for PF

Hongbo Zheng (1):
  net/hns3: support TCP segment offload

Wei Hu (Xavier) (1):
  net/hns3: fix wrong Tx interrupt when enabling Rx interrupt

 doc/guides/nics/features/hns3.ini    |   1 +
 doc/guides/nics/features/hns3_vf.ini |   1 +
 doc/guides/nics/hns3.rst             |   1 +
 drivers/net/hns3/hns3_cmd.h          |  31 ++-
 drivers/net/hns3/hns3_ethdev.c       | 162 +++++++++++---
 drivers/net/hns3/hns3_ethdev.h       |   7 +-
 drivers/net/hns3/hns3_ethdev_vf.c    | 315 +++++++++++++++++++++++----
 drivers/net/hns3/hns3_mbx.c          |  23 ++
 drivers/net/hns3/hns3_mbx.h          |   2 +
 drivers/net/hns3/hns3_regs.h         |  13 +-
 drivers/net/hns3/hns3_rxtx.c         | 306 +++++++++++++++++++++++---
 drivers/net/hns3/hns3_rxtx.h         |   5 +-
 12 files changed, 752 insertions(+), 115 deletions(-)

-- 
2.23.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH 1/5] net/hns3: support TCP segment offload
  2020-03-07  8:57 [dpdk-dev] [PATCH 0/5] misc updates and fixes for hns3 PMD driver Wei Hu (Xavier)
@ 2020-03-07  8:57 ` Wei Hu (Xavier)
  2020-03-07  8:57 ` [dpdk-dev] [PATCH 2/5] net/hns3: fix wrong Tx interrupt when enabling Rx interrupt Wei Hu (Xavier)
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Wei Hu (Xavier) @ 2020-03-07  8:57 UTC (permalink / raw)
  To: dev

From: Hongbo Zheng <zhenghongbo3@huawei.com>

This patch adds TCP segment offload support for hns3 PMD driver.

Signed-off-by: Hongbo Zheng <zhenghongbo3@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 doc/guides/nics/features/hns3.ini    |   1 +
 doc/guides/nics/features/hns3_vf.ini |   1 +
 doc/guides/nics/hns3.rst             |   1 +
 drivers/net/hns3/hns3_ethdev.c       |   4 +
 drivers/net/hns3/hns3_ethdev.h       |   6 +-
 drivers/net/hns3/hns3_ethdev_vf.c    |   4 +
 drivers/net/hns3/hns3_rxtx.c         | 258 +++++++++++++++++++++++++--
 7 files changed, 259 insertions(+), 16 deletions(-)

diff --git a/doc/guides/nics/features/hns3.ini b/doc/guides/nics/features/hns3.ini
index cd5c08a9d..c3a8544bc 100644
--- a/doc/guides/nics/features/hns3.ini
+++ b/doc/guides/nics/features/hns3.ini
@@ -8,6 +8,7 @@ Link status          = Y
 Rx interrupt         = Y
 MTU update           = Y
 Jumbo frame          = Y
+TSO                  = Y
 Promiscuous mode     = Y
 Allmulticast mode    = Y
 Unicast MAC filter   = Y
diff --git a/doc/guides/nics/features/hns3_vf.ini b/doc/guides/nics/features/hns3_vf.ini
index fd00ac3e2..e4e77380f 100644
--- a/doc/guides/nics/features/hns3_vf.ini
+++ b/doc/guides/nics/features/hns3_vf.ini
@@ -8,6 +8,7 @@ Link status          = Y
 Rx interrupt         = Y
 MTU update           = Y
 Jumbo frame          = Y
+TSO                  = Y
 Unicast MAC filter   = Y
 Multicast MAC filter = Y
 RSS hash             = Y
diff --git a/doc/guides/nics/hns3.rst b/doc/guides/nics/hns3.rst
index 8d19f4851..05dbe4174 100644
--- a/doc/guides/nics/hns3.rst
+++ b/doc/guides/nics/hns3.rst
@@ -17,6 +17,7 @@ Features of the HNS3 PMD are:
 - Receive Side Scaling (RSS)
 - Packet type information
 - Checksum offload
+- TSO offload
 - Promiscuous mode
 - Multicast mode
 - Port hardware statistics
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 918fbe076..d4751d478 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2256,6 +2256,10 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 				 DEV_TX_OFFLOAD_VLAN_INSERT |
 				 DEV_TX_OFFLOAD_QINQ_INSERT |
 				 DEV_TX_OFFLOAD_MULTI_SEGS |
+				 DEV_TX_OFFLOAD_TCP_TSO |
+				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
+				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
 				 info->tx_queue_offload_capa);
 
 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index d4a03065f..c7625119a 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -31,10 +31,14 @@
 #define HNS3_MC_MACADDR_NUM		128
 
 #define HNS3_MAX_BD_SIZE		65535
-#define HNS3_MAX_TX_BD_PER_PKT		8
+#define HNS3_MAX_NON_TSO_BD_PER_PKT	8
+#define HNS3_MAX_TSO_BD_PER_PKT		63
 #define HNS3_MAX_FRAME_LEN		9728
 #define HNS3_VLAN_TAG_SIZE		4
 #define HNS3_DEFAULT_RX_BUF_LEN		2048
+#define HNS3_MAX_BD_PAYLEN		(1024 * 1024 - 1)
+#define HNS3_MAX_TSO_HDR_SIZE		512
+#define HNS3_MAX_TSO_HDR_BD_NUM		3
 
 #define HNS3_ETH_OVERHEAD \
 	(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + HNS3_VLAN_TAG_SIZE * 2)
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 505525eba..7b776ad13 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -592,6 +592,10 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 				 DEV_TX_OFFLOAD_VLAN_INSERT |
 				 DEV_TX_OFFLOAD_QINQ_INSERT |
 				 DEV_TX_OFFLOAD_MULTI_SEGS |
+				 DEV_TX_OFFLOAD_TCP_TSO |
+				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
+				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
 				 info->tx_queue_offload_capa);
 
 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 03ebda658..aaccf7ef2 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -1704,6 +1704,78 @@ hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
 	txq->tx_bd_ready   = tx_bd_ready;
 }
 
+static int
+hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags,
+		     struct rte_mbuf *rxm, uint8_t *l2_len)
+{
+	uint64_t tun_flags;
+	uint8_t ol4_len;
+	uint32_t otmp;
+
+	tun_flags = ol_flags & PKT_TX_TUNNEL_MASK;
+	if (tun_flags == 0)
+		return 0;
+
+	otmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
+	switch (tun_flags) {
+	case PKT_TX_TUNNEL_GENEVE:
+	case PKT_TX_TUNNEL_VXLAN:
+		*l2_len = rxm->l2_len - RTE_ETHER_VXLAN_HLEN;
+		break;
+	case PKT_TX_TUNNEL_GRE:
+		/*
+		 * OL4 header size, defined in 4 Bytes, it contains outer
+		 * L4(GRE) length and tunneling length.
+		 */
+		ol4_len = hns3_get_field(otmp, HNS3_TXD_L4LEN_M,
+					 HNS3_TXD_L4LEN_S);
+		*l2_len = rxm->l2_len - (ol4_len << HNS3_L4_LEN_UNIT);
+		break;
+	default:
+		/* For non UDP / GRE tunneling, drop the tunnel packet */
+		return -EINVAL;
+	}
+	hns3_set_field(otmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
+		       rxm->outer_l2_len >> HNS3_L2_LEN_UNIT);
+	desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(otmp);
+
+	return 0;
+}
+
+static void
+hns3_set_tso(struct hns3_desc *desc,
+	     uint64_t ol_flags, struct rte_mbuf *rxm)
+{
+	uint32_t paylen, hdr_len;
+	uint32_t tmp;
+	uint8_t l2_len = rxm->l2_len;
+
+	if (!(ol_flags & PKT_TX_TCP_SEG))
+		return;
+
+	if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len))
+		return;
+
+	hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
+	hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
+		    rxm->outer_l2_len + rxm->outer_l3_len : 0;
+	paylen = rxm->pkt_len - hdr_len;
+	if (paylen <= rxm->tso_segsz)
+		return;
+
+	tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
+	hns3_set_bit(tmp, HNS3_TXD_TSO_B, 1);
+	hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
+	hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP);
+	hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
+	hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
+		       sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
+	hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
+		       l2_len >> HNS3_L2_LEN_UNIT);
+	desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
+	desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
+}
+
 static void
 fill_desc(struct hns3_tx_queue *txq, uint16_t tx_desc_id, struct rte_mbuf *rxm,
 	  bool first, int offset)
@@ -1711,9 +1783,9 @@ fill_desc(struct hns3_tx_queue *txq, uint16_t tx_desc_id, struct rte_mbuf *rxm,
 	struct hns3_desc *tx_ring = txq->tx_ring;
 	struct hns3_desc *desc = &tx_ring[tx_desc_id];
 	uint8_t frag_end = rxm->next == NULL ? 1 : 0;
+	uint64_t ol_flags = rxm->ol_flags;
 	uint16_t size = rxm->data_len;
 	uint16_t rrcfv = 0;
-	uint64_t ol_flags = rxm->ol_flags;
 	uint32_t hdr_len;
 	uint32_t paylen;
 	uint32_t tmp;
@@ -1728,6 +1800,7 @@ fill_desc(struct hns3_tx_queue *txq, uint16_t tx_desc_id, struct rte_mbuf *rxm,
 			   rxm->outer_l2_len + rxm->outer_l3_len : 0;
 		paylen = rxm->pkt_len - hdr_len;
 		desc->tx.paylen = rte_cpu_to_le_32(paylen);
+		hns3_set_tso(desc, ol_flags, rxm);
 	}
 
 	hns3_set_bit(rrcfv, HNS3_TXD_FE_B, frag_end);
@@ -2041,6 +2114,136 @@ hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
 	desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
 }
 
+static bool
+hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num)
+{
+	struct rte_mbuf *m_first = tx_pkts;
+	struct rte_mbuf *m_last = tx_pkts;
+	uint32_t tot_len = 0;
+	uint32_t hdr_len;
+	uint32_t i;
+
+	/*
+	 * Hardware requires that the sum of the data length of every 8
+	 * consecutive buffers is greater than MSS in hns3 network engine.
+	 * We simplify it by ensuring pkt_headlen + the first 8 consecutive
+	 * frags greater than gso header len + mss, and the remaining 7
+	 * consecutive frags greater than MSS except the last 7 frags.
+	 */
+	if (bd_num <= HNS3_MAX_NON_TSO_BD_PER_PKT)
+		return false;
+
+	for (i = 0; m_last && i < HNS3_MAX_NON_TSO_BD_PER_PKT - 1;
+	     i++, m_last = m_last->next)
+		tot_len += m_last->data_len;
+
+	if (!m_last)
+		return true;
+
+	/* ensure the first 8 frags is greater than mss + header */
+	hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
+	hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ?
+		   tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
+	if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
+		return true;
+
+	/*
+	 * ensure the sum of the data length of every 7 consecutive buffer
+	 * is greater than mss except the last one.
+	 */
+	for (i = 0; m_last && i < bd_num - HNS3_MAX_NON_TSO_BD_PER_PKT; i++) {
+		tot_len -= m_first->data_len;
+		tot_len += m_last->data_len;
+
+		if (tot_len < tx_pkts->tso_segsz)
+			return true;
+
+		m_first = m_first->next;
+		m_last = m_last->next;
+	}
+
+	return false;
+}
+
+static void
+hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
+{
+	uint64_t ol_flags = m->ol_flags;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_udp_hdr *udp_hdr;
+	uint32_t paylen, hdr_len;
+
+	if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
+		return;
+
+	if (ol_flags & PKT_TX_IPV4) {
+		ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+						   m->outer_l2_len);
+
+		if (ol_flags & PKT_TX_IP_CKSUM)
+			ipv4_hdr->hdr_checksum = 0;
+	}
+
+	if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM &&
+	    ol_flags & PKT_TX_TCP_SEG) {
+		hdr_len = m->l2_len + m->l3_len + m->l4_len;
+		hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
+				m->outer_l2_len + m->outer_l3_len : 0;
+		paylen = m->pkt_len - hdr_len;
+		if (paylen <= m->tso_segsz)
+			return;
+		udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+						  m->outer_l2_len +
+						  m->outer_l3_len);
+		udp_hdr->dgram_cksum = 0;
+	}
+}
+
+static inline bool
+hns3_pkt_is_tso(struct rte_mbuf *m)
+{
+	return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
+}
+
+static int
+hns3_check_tso_pkt_valid(struct rte_mbuf *m)
+{
+	uint32_t tmp_data_len_sum = 0;
+	uint16_t nb_buf = m->nb_segs;
+	uint32_t paylen, hdr_len;
+	struct rte_mbuf *m_seg;
+	int i;
+
+	if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT)
+		return -EINVAL;
+
+	hdr_len = m->l2_len + m->l3_len + m->l4_len;
+	hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ?
+			m->outer_l2_len + m->outer_l3_len : 0;
+	if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
+		return -EINVAL;
+
+	paylen = m->pkt_len - hdr_len;
+	if (paylen > HNS3_MAX_BD_PAYLEN)
+		return -EINVAL;
+
+	/*
+	 * The TSO header (include outer and inner L2, L3 and L4 header)
+	 * should be provided by three descriptors in maximum in hns3 network
+	 * engine.
+	 */
+	m_seg = m;
+	for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf;
+	     i++, m_seg = m_seg->next) {
+		tmp_data_len_sum += m_seg->data_len;
+	}
+
+	if (hdr_len > tmp_data_len_sum)
+		return -EINVAL;
+
+	return 0;
+}
+
 uint16_t
 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 	       uint16_t nb_pkts)
@@ -2058,6 +2261,13 @@ hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 			return i;
 		}
 
+		if (hns3_pkt_is_tso(m) &&
+		    (hns3_pkt_need_linearized(m, m->nb_segs) ||
+		     hns3_check_tso_pkt_valid(m))) {
+			rte_errno = EINVAL;
+			return i;
+		}
+
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
 		ret = rte_validate_tx_offload(m);
 		if (ret != 0) {
@@ -2070,6 +2280,8 @@ hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 			rte_errno = -ret;
 			return i;
 		}
+
+		hns3_outer_header_cksum_prepare(m);
 	}
 
 	return i;
@@ -2093,13 +2305,39 @@ hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
 	return 0;
 }
 
+static int
+hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
+		      struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
+{
+	struct rte_mbuf *new_pkt;
+	int ret;
+
+	if (hns3_pkt_is_tso(*m_seg))
+		return 0;
+
+	/*
+	 * If packet length is greater than HNS3_MAX_FRAME_LEN
+	 * driver support, the packet will be ignored.
+	 */
+	if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN))
+		return -EINVAL;
+
+	if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) {
+		ret = hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt);
+		if (ret)
+			return ret;
+		*m_seg = new_pkt;
+	}
+
+	return 0;
+}
+
 uint16_t
 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
 	struct rte_net_hdr_lens hdr_lens = {0};
 	struct hns3_tx_queue *txq = tx_queue;
 	struct hns3_entry *tx_bak_pkt;
-	struct rte_mbuf *new_pkt;
 	struct rte_mbuf *tx_pkt;
 	struct rte_mbuf *m_seg;
 	uint32_t nb_hold = 0;
@@ -2131,13 +2369,6 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			goto end_of_tx;
 		}
 
-		/*
-		 * If packet length is greater than HNS3_MAX_FRAME_LEN
-		 * driver support, the packet will be ignored.
-		 */
-		if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN))
-			break;
-
 		/*
 		 * If packet length is less than minimum packet size, driver
 		 * need to pad it.
@@ -2156,12 +2387,9 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		}
 
 		m_seg = tx_pkt;
-		if (unlikely(nb_buf > HNS3_MAX_TX_BD_PER_PKT)) {
-			if (hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt))
-				goto end_of_tx;
-			m_seg = new_pkt;
-			nb_buf = m_seg->nb_segs;
-		}
+
+		if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
+			goto end_of_tx;
 
 		if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
 			goto end_of_tx;
-- 
2.23.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH 2/5] net/hns3: fix wrong Tx interrupt when enabling Rx interrupt
  2020-03-07  8:57 [dpdk-dev] [PATCH 0/5] misc updates and fixes for hns3 PMD driver Wei Hu (Xavier)
  2020-03-07  8:57 ` [dpdk-dev] [PATCH 1/5] net/hns3: support TCP segment offload Wei Hu (Xavier)
@ 2020-03-07  8:57 ` Wei Hu (Xavier)
  2020-03-07  8:57 ` [dpdk-dev] [PATCH 3/5] net/hns3: remove unnecessary restriction on setting VF's MTU Wei Hu (Xavier)
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Wei Hu (Xavier) @ 2020-03-07  8:57 UTC (permalink / raw)
  To: dev

From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>

Currently, when receiving and transmitting packets based on hns3 network
engine there are probably unexpected and redundant Tx interrupts if Rx
interrupt is enabled.

The root cause as below:
Tx and Rx queues with the same number share the interrupt vector in hns3
network engine, and in this case there are the residual hardware mapping
relationship configuration between queue and interrupt vector configured
in hns3 kernel ethdev driver.

We should clear the all hardware mapping relationship configrations in the
initialization. Because of the hardware constraints, we have to implement
clearing the relationship by binding all queues to the last interrupt
vector and reserving the last interrupt vector, this method results in a
decrease of the maximum queues when upper applications call the
rte_eth_dev_configure API function to enable Rx interrupt.

Fixes: 02a7b55657b2 ("net/hns3: support Rx interrupt")

Signed-off-by: Hao Chen <chenhao164@huawei.com>
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_cmd.h       |  31 +++--
 drivers/net/hns3/hns3_ethdev.c    | 117 ++++++++++++++++---
 drivers/net/hns3/hns3_ethdev.h    |   1 +
 drivers/net/hns3/hns3_ethdev_vf.c | 180 ++++++++++++++++++++++++------
 drivers/net/hns3/hns3_regs.h      |  13 ++-
 drivers/net/hns3/hns3_rxtx.c      |  48 ++++++--
 drivers/net/hns3/hns3_rxtx.h      |   5 +-
 7 files changed, 326 insertions(+), 69 deletions(-)

diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index 897dc1420..26d410396 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -79,6 +79,7 @@ enum hns3_opcode_type {
 	HNS3_OPC_GBL_RST_STATUS         = 0x0021,
 	HNS3_OPC_QUERY_FUNC_STATUS      = 0x0022,
 	HNS3_OPC_QUERY_PF_RSRC          = 0x0023,
+	HNS3_OPC_QUERY_VF_RSRC          = 0x0024,
 	HNS3_OPC_GET_CFG_PARAM          = 0x0025,
 	HNS3_OPC_PF_RST_DONE            = 0x0026,
 
@@ -337,8 +338,9 @@ struct hns3_func_status_cmd {
 	uint8_t rsv[2];
 };
 
-#define HNS3_PF_VEC_NUM_S		0
-#define HNS3_PF_VEC_NUM_M		GENMASK(7, 0)
+#define HNS3_VEC_NUM_S		0
+#define HNS3_VEC_NUM_M		GENMASK(7, 0)
+#define HNS3_MIN_VECTOR_NUM	2 /* one for msi-x, another for IO */
 struct hns3_pf_res_cmd {
 	uint16_t tqp_num;
 	uint16_t buf_size;
@@ -351,6 +353,15 @@ struct hns3_pf_res_cmd {
 	uint32_t rsv[2];
 };
 
+struct hns3_vf_res_cmd {
+	uint16_t tqp_num;
+	uint16_t reserved;
+	uint16_t msixcap_localid_ba_nic;
+	uint16_t msixcap_localid_ba_rocee;
+	uint16_t vf_intr_vector_number;
+	uint16_t rsv[7];
+};
+
 #define HNS3_UMV_SPC_ALC_B	0
 struct hns3_umv_spc_alc_cmd {
 	uint8_t allocate;
@@ -677,13 +688,19 @@ struct hns3_tqp_map_cmd {
 	uint8_t rsv[18];
 };
 
-#define HNS3_RING_TYPE_B	0
-#define HNS3_RING_TYPE_TX	0
-#define HNS3_RING_TYPE_RX	1
+enum hns3_ring_type {
+	HNS3_RING_TYPE_TX,
+	HNS3_RING_TYPE_RX
+};
+
+enum hns3_int_gl_idx {
+	HNS3_RING_GL_RX,
+	HNS3_RING_GL_TX,
+	HNS3_RING_GL_IMMEDIATE = 3
+};
+
 #define HNS3_RING_GL_IDX_S	0
 #define HNS3_RING_GL_IDX_M	GENMASK(1, 0)
-#define HNS3_RING_GL_RX		0
-#define HNS3_RING_GL_TX		1
 
 #define HNS3_VECTOR_ELEMENTS_PER_CMD	10
 
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index d4751d478..f6495623e 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2023,34 +2023,98 @@ hns3_check_dcb_cfg(struct rte_eth_dev *dev)
 }
 
 static int
-hns3_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
-			   bool mmap, uint16_t queue_id)
+hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap,
+			   enum hns3_ring_type queue_type, uint16_t queue_id)
 {
-	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct hns3_cmd_desc desc;
 	struct hns3_ctrl_vector_chain_cmd *req =
 		(struct hns3_ctrl_vector_chain_cmd *)desc.data;
 	enum hns3_cmd_status status;
 	enum hns3_opcode_type op;
 	uint16_t tqp_type_and_id = 0;
+	const char *op_str;
+	uint16_t type;
+	uint16_t gl;
 
 	op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
 	hns3_cmd_setup_basic_desc(&desc, op, false);
 	req->int_vector_id = vector_id;
 
+	if (queue_type == HNS3_RING_TYPE_RX)
+		gl = HNS3_RING_GL_RX;
+	else
+		gl = HNS3_RING_GL_TX;
+
+	type = queue_type;
+
 	hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
-		       HNS3_RING_TYPE_RX);
+		       type);
 	hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
 	hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
-		       HNS3_RING_GL_RX);
+		       gl);
 	req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
-
 	req->int_cause_num = 1;
+	op_str = mmap ? "Map" : "Unmap";
 	status = hns3_cmd_send(hw, &desc, 1);
 	if (status) {
-		hns3_err(hw, "Map TQP %d fail, vector_id is %d, status is %d.",
-			 queue_id, vector_id, status);
-		return -EIO;
+		hns3_err(hw, "%s TQP %d fail, vector_id is %d, status is %d.",
+			 op_str, queue_id, req->int_vector_id, status);
+		return status;
+	}
+
+	return 0;
+}
+
+static int
+hns3_init_ring_with_vector(struct hns3_hw *hw)
+{
+	uint8_t vec;
+	int ret;
+	int i;
+
+	/*
+	 * In hns3 network engine, vector 0 is always the misc interrupt of this
+	 * function, vector 1~N can be used respectively for the queues of the
+	 * function. Tx and Rx queues with the same number share the interrupt
+	 * vector. In the initialization clearing the all hardware mapping
+	 * relationship configrations between queues and interrupt vectors is
+	 * needed, so some error caused by the residual configurations, such as
+	 * the unexpected Tx interrupt, can be avoid. Because of the hardware
+	 * constraints in hns3 hardware engine, we have to implement clearing
+	 * the mapping relationship configurations by binding all queues to the
+	 * last interrupt vector and reserving the last interrupt vector. This
+	 * method results in a decrease of the maximum queues when upper
+	 * applications call the rte_eth_dev_configure API function to enable
+	 * Rx interrupt.
+	 */
+	vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
+	hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */
+	for (i = 0; i < hw->intr_tqps_num; i++) {
+		/*
+		 * Set gap limiter and rate limiter configuration of queue's
+		 * interrupt.
+		 */
+		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
+				       HNS3_TQP_INTR_GL_DEFAULT);
+		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
+				       HNS3_TQP_INTR_GL_DEFAULT);
+		hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+
+		ret = hns3_bind_ring_with_vector(hw, vec, false,
+						 HNS3_RING_TYPE_TX, i);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with "
+					  "vector: %d, ret=%d", i, vec, ret);
+			return ret;
+		}
+
+		ret = hns3_bind_ring_with_vector(hw, vec, false,
+						 HNS3_RING_TYPE_RX, i);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with "
+					  "vector: %d, ret=%d", i, vec, ret);
+			return ret;
+		}
 	}
 
 	return 0;
@@ -2227,8 +2291,16 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 {
 	struct hns3_adapter *hns = eth_dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
+	uint16_t queue_num = hw->tqps_num;
 
-	info->max_rx_queues = hw->tqps_num;
+	/*
+	 * In interrupt mode, 'max_rx_queues' is set based on the number of
+	 * MSI-X interrupt resources of the hardware.
+	 */
+	if (hw->data->dev_conf.intr_conf.rxq == 1)
+		queue_num = hw->intr_tqps_num;
+
+	info->max_rx_queues = queue_num;
 	info->max_tx_queues = hw->tqps_num;
 	info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
 	info->min_rx_bufsize = hw->rx_buf_len;
@@ -2397,6 +2469,7 @@ hns3_query_pf_resource(struct hns3_hw *hw)
 	struct hns3_pf *pf = &hns->pf;
 	struct hns3_pf_res_cmd *req;
 	struct hns3_cmd_desc desc;
+	uint16_t num_msi;
 	int ret;
 
 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
@@ -2428,9 +2501,9 @@ hns3_query_pf_resource(struct hns3_hw *hw)
 
 	pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
 
-	hw->num_msi =
-	    hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
-			   HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
+	num_msi = hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
+				 HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
+	hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi;
 
 	return 0;
 }
@@ -4025,6 +4098,16 @@ hns3_init_pf(struct rte_eth_dev *eth_dev)
 		goto err_fdir;
 	}
 
+	/*
+	 * In the initialization clearing the all hardware mapping relationship
+	 * configrations between queues and interrupt vectors is needed, so
+	 * some error caused by the residual configurations, such as the
+	 * unexpected interrupt, can be avoid.
+	 */
+	ret = hns3_init_ring_with_vector(hw);
+	if (ret)
+		goto err_fdir;
+
 	return 0;
 
 err_fdir:
@@ -4147,7 +4230,9 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
-			ret = hns3_bind_ring_with_vector(dev, vec, true, q_id);
+			ret = hns3_bind_ring_with_vector(hw, vec, true,
+							 HNS3_RING_TYPE_RX,
+							 q_id);
 			if (ret)
 				goto bind_vector_error;
 			intr_handle->intr_vec[q_id] = vec;
@@ -4247,7 +4332,9 @@ hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
-			(void)hns3_bind_ring_with_vector(dev, vec, false, q_id);
+			(void)hns3_bind_ring_with_vector(hw, vec, false,
+							 HNS3_RING_TYPE_RX,
+							 q_id);
 			if (vec < base + intr_handle->nb_efd - 1)
 				vec++;
 		}
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index c7625119a..28484188a 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -357,6 +357,7 @@ struct hns3_hw {
 	uint16_t num_msi;
 	uint16_t total_tqps_num;    /* total task queue pairs of this PF */
 	uint16_t tqps_num;          /* num task queue pairs of this function */
+	uint16_t intr_tqps_num;     /* num queue pairs mapping interrupt */
 	uint16_t rss_size_max;      /* HW defined max RSS task queue */
 	uint16_t rx_buf_len;
 	uint16_t num_tx_desc;       /* desc num of per tx queue */
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 7b776ad13..f4e05e1c5 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -429,6 +429,96 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc)
 	return ret;
 }
 
+static int
+hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id,
+			     bool mmap, enum hns3_ring_type queue_type,
+			     uint16_t queue_id)
+{
+	struct hns3_vf_bind_vector_msg bind_msg;
+	const char *op_str;
+	uint16_t code;
+	int ret;
+
+	memset(&bind_msg, 0, sizeof(bind_msg));
+	code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
+		HNS3_MBX_UNMAP_RING_TO_VECTOR;
+	bind_msg.vector_id = vector_id;
+
+	if (queue_type == HNS3_RING_TYPE_RX)
+		bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
+	else
+		bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
+
+	bind_msg.param[0].ring_type = queue_type;
+	bind_msg.ring_num = 1;
+	bind_msg.param[0].tqp_index = queue_id;
+	op_str = mmap ? "Map" : "Unmap";
+	ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
+				sizeof(bind_msg), false, NULL, 0);
+	if (ret) {
+		hns3_err(hw, "%s TQP %d fail, vector_id is %d, ret is %d.",
+			 op_str, queue_id, bind_msg.vector_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+hns3vf_init_ring_with_vector(struct hns3_hw *hw)
+{
+	uint8_t vec;
+	int ret;
+	int i;
+
+	/*
+	 * In hns3 network engine, vector 0 is always the misc interrupt of this
+	 * function, vector 1~N can be used respectively for the queues of the
+	 * function. Tx and Rx queues with the same number share the interrupt
+	 * vector. In the initialization clearing the all hardware mapping
+	 * relationship configrations between queues and interrupt vectors is
+	 * needed, so some error caused by the residual configurations, such as
+	 * the unexpected Tx interrupt, can be avoid. Because of the hardware
+	 * constraints in hns3 hardware engine, we have to implement clearing
+	 * the mapping relationship configurations by binding all queues to the
+	 * last interrupt vector and reserving the last interrupt vector. This
+	 * method results in a decrease of the maximum queues when upper
+	 * applications call the rte_eth_dev_configure API function to enable
+	 * Rx interrupt.
+	 */
+	vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
+	hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */
+	for (i = 0; i < hw->intr_tqps_num; i++) {
+		/*
+		 * Set gap limiter and rate limiter configuration of queue's
+		 * interrupt.
+		 */
+		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
+				       HNS3_TQP_INTR_GL_DEFAULT);
+		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
+				       HNS3_TQP_INTR_GL_DEFAULT);
+		hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+
+		ret = hns3vf_bind_ring_with_vector(hw, vec, false,
+						   HNS3_RING_TYPE_TX, i);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with "
+					  "vector: %d, ret=%d", i, vec, ret);
+			return ret;
+		}
+
+		ret = hns3vf_bind_ring_with_vector(hw, vec, false,
+						   HNS3_RING_TYPE_RX, i);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with "
+					  "vector: %d, ret=%d", i, vec, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
 static int
 hns3vf_dev_configure(struct rte_eth_dev *dev)
 {
@@ -563,8 +653,16 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 {
 	struct hns3_adapter *hns = eth_dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
+	uint16_t q_num = hw->tqps_num;
+
+	/*
+	 * In interrupt mode, 'max_rx_queues' is set based on the number of
+	 * MSI-X interrupt resources of the hardware.
+	 */
+	if (hw->data->dev_conf.intr_conf.rxq == 1)
+		q_num = hw->intr_tqps_num;
 
-	info->max_rx_queues = hw->tqps_num;
+	info->max_rx_queues = q_num;
 	info->max_tx_queues = hw->tqps_num;
 	info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
 	info->min_rx_bufsize = hw->rx_buf_len;
@@ -1114,6 +1212,35 @@ hns3vf_service_handler(void *param)
 			  eth_dev);
 }
 
+static int
+hns3_query_vf_resource(struct hns3_hw *hw)
+{
+	struct hns3_vf_res_cmd *req;
+	struct hns3_cmd_desc desc;
+	uint16_t num_msi;
+	int ret;
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret) {
+		hns3_err(hw, "query vf resource failed, ret = %d", ret);
+		return ret;
+	}
+
+	req = (struct hns3_vf_res_cmd *)desc.data;
+	num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
+				 HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
+	if (num_msi < HNS3_MIN_VECTOR_NUM) {
+		hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
+			 num_msi, HNS3_MIN_VECTOR_NUM);
+		return -EINVAL;
+	}
+
+	hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi;
+
+	return 0;
+}
+
 static int
 hns3vf_init_hardware(struct hns3_adapter *hns)
 {
@@ -1190,6 +1317,11 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
 		goto err_cmd_init;
 	}
 
+	/* Get VF resource */
+	ret = hns3_query_vf_resource(hw);
+	if (ret)
+		goto err_cmd_init;
+
 	rte_spinlock_init(&hw->mbx_resp.lock);
 
 	hns3vf_clear_event_cause(hw, 0);
@@ -1239,6 +1371,16 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
 
 	hns3_set_default_rss_args(hw);
 
+	/*
+	 * In the initialization clearing the all hardware mapping relationship
+	 * configrations between queues and interrupt vectors is needed, so
+	 * some error caused by the residual configurations, such as the
+	 * unexpected interrupt, can be avoid.
+	 */
+	ret = hns3vf_init_ring_with_vector(hw);
+	if (ret)
+		goto err_get_config;
+
 	return 0;
 
 err_get_config:
@@ -1279,36 +1421,6 @@ hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
 	hw->io_base = NULL;
 }
 
-static int
-hns3vf_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
-			     bool mmap, uint16_t queue_id)
-
-{
-	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct hns3_vf_bind_vector_msg bind_msg;
-	uint16_t code;
-	int ret;
-
-	memset(&bind_msg, 0, sizeof(bind_msg));
-	code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
-		HNS3_MBX_UNMAP_RING_TO_VECTOR;
-	bind_msg.vector_id = vector_id;
-	bind_msg.ring_num = 1;
-	bind_msg.param[0].ring_type = HNS3_RING_TYPE_RX;
-	bind_msg.param[0].tqp_index = queue_id;
-	bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
-
-	ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
-				sizeof(bind_msg), false, NULL, 0);
-	if (ret) {
-		hns3_err(hw, "Map TQP %d fail, vector_id is %d, ret is %d.",
-			 queue_id, vector_id, ret);
-		return ret;
-	}
-
-	return 0;
-}
-
 static int
 hns3vf_do_stop(struct hns3_adapter *hns)
 {
@@ -1345,7 +1457,8 @@ hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
-			(void)hns3vf_bind_ring_with_vector(dev, vec, false,
+			(void)hns3vf_bind_ring_with_vector(hw, vec, false,
+							   HNS3_RING_TYPE_RX,
 							   q_id);
 			if (vec < base + intr_handle->nb_efd - 1)
 				vec++;
@@ -1511,7 +1624,8 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
-			ret = hns3vf_bind_ring_with_vector(dev, vec, true,
+			ret = hns3vf_bind_ring_with_vector(hw, vec, true,
+							   HNS3_RING_TYPE_RX,
 							   q_id);
 			if (ret)
 				goto vf_bind_vector_error;
diff --git a/drivers/net/hns3/hns3_regs.h b/drivers/net/hns3/hns3_regs.h
index 42581df67..64bd6931b 100644
--- a/drivers/net/hns3/hns3_regs.h
+++ b/drivers/net/hns3/hns3_regs.h
@@ -83,9 +83,6 @@
 
 #define HNS3_RING_EN_B				0
 
-#define HNS3_VECTOR_REG_OFFSET			0x4
-#define HNS3_VECTOR_VF_OFFSET			0x100000
-
 #define HNS3_TQP_REG_OFFSET			0x80000
 #define HNS3_TQP_REG_SIZE			0x200
 
@@ -97,6 +94,16 @@
 #define HNS3_TQP_INTR_RL_REG			0x20900
 
 #define HNS3_TQP_INTR_REG_SIZE			4
+#define HNS3_TQP_INTR_GL_MAX			0x1FE0
+#define HNS3_TQP_INTR_GL_DEFAULT		20
+#define HNS3_TQP_INTR_RL_MAX			0xEC
+#define HNS3_TQP_INTR_RL_ENABLE_MASK		0x40
+#define HNS3_TQP_INTR_RL_DEFAULT		0
+
+/* gl_usec convert to hardware count, as writing each 1 represents 2us */
+#define HNS3_GL_USEC_TO_REG(gl_usec)		((gl_usec) >> 1)
+/* rl_usec convert to hardware count, as writing each 1 represents 4us */
+#define HNS3_RL_USEC_TO_REG(rl_usec)		((rl_usec) >> 2)
 
 int hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs);
 #endif /* _HNS3_REGS_H_ */
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index aaccf7ef2..ec6d19f58 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -499,11 +499,45 @@ hns3_reset_all_queues(struct hns3_adapter *hns)
 }
 
 void
-hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en)
+hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
+		       uint8_t gl_idx, uint16_t gl_value)
 {
+	uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
+			     HNS3_TQP_INTR_GL1_REG,
+			     HNS3_TQP_INTR_GL2_REG};
 	uint32_t addr, value;
 
-	addr = HNS3_TQP_INTR_CTRL_REG + tpq_int_num * HNS3_VECTOR_REG_OFFSET;
+	if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
+		return;
+
+	addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
+	value = HNS3_GL_USEC_TO_REG(gl_value);
+
+	hns3_write_dev(hw, addr, value);
+}
+
+void
+hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
+{
+	uint32_t addr, value;
+
+	if (rl_value > HNS3_TQP_INTR_RL_MAX)
+		return;
+
+	addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+	value = HNS3_RL_USEC_TO_REG(rl_value);
+	if (value > 0)
+		value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
+
+	hns3_write_dev(hw, addr, value);
+}
+
+static void
+hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
+{
+	uint32_t addr, value;
+
+	addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
 	value = en ? 1 : 0;
 
 	hns3_write_dev(hw, addr, value);
@@ -519,8 +553,7 @@ hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 	if (dev->data->dev_conf.intr_conf.rxq == 0)
 		return -ENOTSUP;
 
-	/* enable the vectors */
-	hns3_tqp_intr_enable(hw, queue_id, true);
+	hns3_queue_intr_enable(hw, queue_id, true);
 
 	return rte_intr_ack(intr_handle);
 }
@@ -533,8 +566,7 @@ hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 	if (dev->data->dev_conf.intr_conf.rxq == 0)
 		return -ENOTSUP;
 
-	/* disable the vectors */
-	hns3_tqp_intr_enable(hw, queue_id, false);
+	hns3_queue_intr_enable(hw, queue_id, false);
 
 	return 0;
 }
@@ -940,7 +972,6 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 
 	if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
 		/* first time configuration */
-
 		uint32_t size;
 		size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
 		hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
@@ -951,7 +982,6 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 		}
 	} else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
 		/* re-configure */
-
 		rxq = hw->fkq_data.rx_queues;
 		for (i = nb_queues; i < old_nb_queues; i++)
 			hns3_dev_rx_queue_release(rxq[i]);
@@ -989,7 +1019,6 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 
 	if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
 		/* first time configuration */
-
 		uint32_t size;
 		size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
 		hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
@@ -1000,7 +1029,6 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 		}
 	} else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
 		/* re-configure */
-
 		txq = hw->fkq_data.tx_queues;
 		for (i = nb_queues; i < old_nb_queues; i++)
 			hns3_dev_tx_queue_release(txq[i]);
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index 1c2723ffb..5ba6ee915 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -320,7 +320,10 @@ uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			uint16_t nb_pkts);
 const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
-void hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en);
+void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
+			    uint8_t gl_idx, uint16_t gl_value);
+void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id,
+			    uint16_t rl_value);
 int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
 				  uint16_t nb_tx_q);
 
-- 
2.23.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH 3/5] net/hns3: remove unnecessary restriction on setting VF's MTU
  2020-03-07  8:57 [dpdk-dev] [PATCH 0/5] misc updates and fixes for hns3 PMD driver Wei Hu (Xavier)
  2020-03-07  8:57 ` [dpdk-dev] [PATCH 1/5] net/hns3: support TCP segment offload Wei Hu (Xavier)
  2020-03-07  8:57 ` [dpdk-dev] [PATCH 2/5] net/hns3: fix wrong Tx interrupt when enabling Rx interrupt Wei Hu (Xavier)
@ 2020-03-07  8:57 ` Wei Hu (Xavier)
  2020-03-07  8:57 ` [dpdk-dev] [PATCH 4/5] net/hns3: support promiscuous and allmulticast mode for VF Wei Hu (Xavier)
  2020-03-07  8:57 ` [dpdk-dev] [PATCH 5/5] net/hns3: fix promiscuous mode for PF Wei Hu (Xavier)
  4 siblings, 0 replies; 6+ messages in thread
From: Wei Hu (Xavier) @ 2020-03-07  8:57 UTC (permalink / raw)
  To: dev

From: Chengchang Tang <tangchengchang@huawei.com>

The hns3 PF/VF devices on the same port share the hardware MTU
configuration. Currently, we send mailbox to inform hns3 PF kernel
ethdev driver to finish hardware MTU configuration in hns3 VF PMD
driver, there is no need to stop the port for hns3 VF device, and
the MTU value issued by hns3 VF PMD driver must be less than or
equal to PF's MTU.

Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_ethdev_vf.c | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index f4e05e1c5..6b787133f 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -619,12 +619,14 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
 	int ret;
 
-	if (dev->data->dev_started) {
-		hns3_err(hw, "Failed to set mtu, port %u must be stopped "
-			 "before configuration", dev->data->port_id);
-		return -EBUSY;
-	}
-
+	/*
+	 * The hns3 PF/VF devices on the same port share the hardware MTU
+	 * configuration. Currently, we send mailbox to inform hns3 PF kernel
+	 * ethdev driver to finish hardware MTU configuration in hns3 VF PMD
+	 * driver, there is no need to stop the port for hns3 VF device, and the
+	 * MTU value issued by hns3 VF PMD driver must be less than or equal to
+	 * PF's MTU.
+	 */
 	if (rte_atomic16_read(&hw->reset.resetting)) {
 		hns3_err(hw, "Failed to set mtu during resetting");
 		return -EIO;
-- 
2.23.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH 4/5] net/hns3: support promiscuous and allmulticast mode for VF
  2020-03-07  8:57 [dpdk-dev] [PATCH 0/5] misc updates and fixes for hns3 PMD driver Wei Hu (Xavier)
                   ` (2 preceding siblings ...)
  2020-03-07  8:57 ` [dpdk-dev] [PATCH 3/5] net/hns3: remove unnecessary restriction on setting VF's MTU Wei Hu (Xavier)
@ 2020-03-07  8:57 ` Wei Hu (Xavier)
  2020-03-07  8:57 ` [dpdk-dev] [PATCH 5/5] net/hns3: fix promiscuous mode for PF Wei Hu (Xavier)
  4 siblings, 0 replies; 6+ messages in thread
From: Wei Hu (Xavier) @ 2020-03-07  8:57 UTC (permalink / raw)
  To: dev

From: Chengchang Tang <tangchengchang@huawei.com>

Currently, we only support VF device is bound to vfio_pci or igb_uio and
then driven by DPDK driver when PF is driven by kernel mode hns3 ethdev
driver, VF is not supported when PF is driven by hns3 DPDK driver.

This patch adds promiscuous and allmulticast mode support for hns3 VF PMD
driver.
1) The promiscuous/allmulticast mode can be configured successfully only
   based on the trusted VF device. If based on the non trusted VF device,
   configuring promiscuous/allmulticast mode will fail. The hns3 VF device
   can be confiruged as trusted device by hns3 PF kernel ethdev driver on
   the host by "ip link set <eth num> vf <vf id> turst on" command.
2) After the promiscuous mode is configured successfully, hns3 VF PMD
   driver can receive the ingress and outgoing traffic. In the words, all
   the ingress packets, all the packets sent from the PF and other VFs on
   the same physical port.
3) Note: Because of the hardware constraints, By default vlan filter is
   enabled and couldn't be turned off based on VF device, so vlan filter
   is still effective even in promiscuous mode. If upper applications
   don't call rte_eth_dev_vlan_filter API function to set vlan based on
   VF device, hns3 VF PMD driver will can't receive the packets with vlan
   tag in promiscuoue mode.

Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_ethdev_vf.c | 117 ++++++++++++++++++++++++++++--
 drivers/net/hns3/hns3_mbx.c       |  23 ++++++
 drivers/net/hns3/hns3_mbx.h       |   2 +
 3 files changed, 137 insertions(+), 5 deletions(-)

diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 6b787133f..28755bf4f 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -410,7 +410,8 @@ hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
 }
 
 static int
-hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc)
+hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
+			bool en_uc_pmc, bool en_mc_pmc)
 {
 	struct hns3_mbx_vf_to_pf_cmd *req;
 	struct hns3_cmd_desc desc;
@@ -418,17 +419,115 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc)
 
 	req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
 
+	/*
+	 * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev driver,
+	 * so there are some features for promiscuous/allmulticast mode in hns3
+	 * VF PMD driver as below:
+	 * 1. The promiscuous/allmulticast mode can be configured successfully
+	 *    only based on the trusted VF device. If based on the non trusted
+	 *    VF device, configuring promiscuous/allmulticast mode will fail.
+	 *    The hns3 VF device can be confiruged as trusted device by hns3 PF
+	 *    kernel ethdev driver on the host by the following command:
+	 *      "ip link set <eth num> vf <vf id> turst on"
+	 * 2. After the promiscuous mode is configured successfully, hns3 VF PMD
+	 *    driver can receive the ingress and outgoing traffic. In the words,
+	 *    all the ingress packets, all the packets sent from the PF and
+	 *    other VFs on the same physical port.
+	 * 3. Note: Because of the hardware constraints, By default vlan filter
+	 *    is enabled and couldn't be turned off based on VF device, so vlan
+	 *    filter is still effective even in promiscuous mode. If upper
+	 *    applications don't call rte_eth_dev_vlan_filter API function to
+	 *    set vlan based on VF device, hns3 VF PMD driver will can't receive
+	 *    the packets with vlan tag in promiscuoue mode.
+	 */
 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
 	req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
 	req->msg[1] = en_bc_pmc ? 1 : 0;
+	req->msg[2] = en_uc_pmc ? 1 : 0;
+	req->msg[3] = en_mc_pmc ? 1 : 0;
 
 	ret = hns3_cmd_send(hw, &desc, 1);
 	if (ret)
-		hns3_err(hw, "Set promisc mode fail, status is %d", ret);
+		hns3_err(hw, "Set promisc mode fail, ret = %d", ret);
+
+	return ret;
+}
+
+static int
+hns3vf_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	ret = hns3vf_set_promisc_mode(hw, true, true, true);
+	if (ret)
+		hns3_err(hw, "Failed to enable promiscuous mode, ret = %d",
+			ret);
+	return ret;
+}
+
+static int
+hns3vf_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+	bool allmulti = dev->data->all_multicast ? true : false;
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	ret = hns3vf_set_promisc_mode(hw, true, false, allmulti);
+	if (ret)
+		hns3_err(hw, "Failed to disable promiscuous mode, ret = %d",
+			ret);
+	return ret;
+}
+
+static int
+hns3vf_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	if (dev->data->promiscuous)
+		return 0;
+
+	ret = hns3vf_set_promisc_mode(hw, true, false, true);
+	if (ret)
+		hns3_err(hw, "Failed to enable allmulticast mode, ret = %d",
+			ret);
+	return ret;
+}
+
+static int
+hns3vf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	if (dev->data->promiscuous)
+		return 0;
 
+	ret = hns3vf_set_promisc_mode(hw, true, false, false);
+	if (ret)
+		hns3_err(hw, "Failed to disable allmulticast mode, ret = %d",
+			ret);
 	return ret;
 }
 
+static int
+hns3vf_restore_promisc(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	bool allmulti = hw->data->all_multicast ? true : false;
+
+	if (hw->data->promiscuous)
+		return hns3vf_set_promisc_mode(hw, true, true, true);
+
+	return hns3vf_set_promisc_mode(hw, true, false, allmulti);
+}
+
 static int
 hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id,
 			     bool mmap, enum hns3_ring_type queue_type,
@@ -1250,7 +1349,7 @@ hns3vf_init_hardware(struct hns3_adapter *hns)
 	uint16_t mtu = hw->data->mtu;
 	int ret;
 
-	ret = hns3vf_set_promisc_mode(hw, true);
+	ret = hns3vf_set_promisc_mode(hw, true, false, false);
 	if (ret)
 		return ret;
 
@@ -1280,7 +1379,7 @@ hns3vf_init_hardware(struct hns3_adapter *hns)
 	return 0;
 
 err_init_hardware:
-	(void)hns3vf_set_promisc_mode(hw, false);
+	(void)hns3vf_set_promisc_mode(hw, false, false, false);
 	return ret;
 }
 
@@ -1413,7 +1512,7 @@ hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
 
 	hns3_rss_uninit(hns);
 	(void)hns3vf_set_alive(hw, false);
-	(void)hns3vf_set_promisc_mode(hw, false);
+	(void)hns3vf_set_promisc_mode(hw, false, false, false);
 	hns3vf_disable_irq0(hw);
 	rte_intr_disable(&pci_dev->intr_handle);
 	hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
@@ -1896,6 +1995,10 @@ hns3vf_restore_conf(struct hns3_adapter *hns)
 	if (ret)
 		goto err_mc_mac;
 
+	ret = hns3vf_restore_promisc(hns);
+	if (ret)
+		goto err_vlan_table;
+
 	ret = hns3vf_restore_vlan_conf(hns);
 	if (ret)
 		goto err_vlan_table;
@@ -2048,6 +2151,10 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = {
 	.dev_stop           = hns3vf_dev_stop,
 	.dev_close          = hns3vf_dev_close,
 	.mtu_set            = hns3vf_dev_mtu_set,
+	.promiscuous_enable = hns3vf_dev_promiscuous_enable,
+	.promiscuous_disable = hns3vf_dev_promiscuous_disable,
+	.allmulticast_enable = hns3vf_dev_allmulticast_enable,
+	.allmulticast_disable = hns3vf_dev_allmulticast_disable,
 	.stats_get          = hns3_stats_get,
 	.stats_reset        = hns3_stats_reset,
 	.xstats_get         = hns3_dev_xstats_get,
diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index 0d03f5064..b03a3d6a1 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -324,6 +324,21 @@ hns3_handle_link_change_event(struct hns3_hw *hw,
 	hns3_update_link_status(hw);
 }
 
+static void
+hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
+{
+	if (!promisc_en) {
+		/*
+		 * When promisc/allmulti mode is closed by the hns3 PF kernel
+		 * ethdev driver for untrusted, modify VF's related status.
+		 */
+		hns3_warn(hw, "Promisc mode will be closed by host for being "
+			      "untrusted.");
+		hw->data->promiscuous = 0;
+		hw->data->all_multicast = 0;
+	}
+}
+
 void
 hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
 {
@@ -380,6 +395,14 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
 		case HNS3_MBX_PUSH_LINK_STATUS:
 			hns3_handle_link_change_event(hw, req);
 			break;
+		case HNS3_MBX_PUSH_PROMISC_INFO:
+			/*
+			 * When the trust status of VF device changed by the
+			 * hns3 PF kernel driver, VF driver will receive this
+			 * mailbox message from PF driver.
+			 */
+			hns3_handle_promisc_info(hw, req->msg[1]);
+			break;
 		default:
 			hns3_err(hw,
 				 "VF received unsupported(%d) mbx msg from PF",
diff --git a/drivers/net/hns3/hns3_mbx.h b/drivers/net/hns3/hns3_mbx.h
index b01eaacc3..d6d70f686 100644
--- a/drivers/net/hns3/hns3_mbx.h
+++ b/drivers/net/hns3/hns3_mbx.h
@@ -40,6 +40,8 @@ enum HNS3_MBX_OPCODE {
 	HNS3_MBX_SET_MTU,               /* (VF -> PF) set mtu */
 	HNS3_MBX_GET_QID_IN_PF,         /* (VF -> PF) get queue id in pf */
 
+	HNS3_MBX_PUSH_PROMISC_INFO = 36, /* (PF -> VF) push vf promisc info */
+
 	HNS3_MBX_HANDLE_VF_TBL = 38,    /* (VF -> PF) store/clear hw cfg tbl */
 	HNS3_MBX_GET_RING_VECTOR_MAP,   /* (VF -> PF) get ring-to-vector map */
 	HNS3_MBX_PUSH_LINK_STATUS = 201, /* (IMP -> PF) get port link status */
-- 
2.23.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH 5/5] net/hns3: fix promiscuous mode for PF
  2020-03-07  8:57 [dpdk-dev] [PATCH 0/5] misc updates and fixes for hns3 PMD driver Wei Hu (Xavier)
                   ` (3 preceding siblings ...)
  2020-03-07  8:57 ` [dpdk-dev] [PATCH 4/5] net/hns3: support promiscuous and allmulticast mode for VF Wei Hu (Xavier)
@ 2020-03-07  8:57 ` Wei Hu (Xavier)
  4 siblings, 0 replies; 6+ messages in thread
From: Wei Hu (Xavier) @ 2020-03-07  8:57 UTC (permalink / raw)
  To: dev

From: Chengchang Tang <tangchengchang@huawei.com>

Currently, when promiscuous mode is enabled, it is just allowed to accept
all the unicast and broadcast packets in hns3 PF PMD driver. It should
also be able to receive multicast packets.

Fixes: 19a3ca4c99cf ("net/hns3: add start/stop and configure operations")
Cc: stable@dpdk.org

Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_ethdev.c | 41 ++++++++++++++++++----------------
 1 file changed, 22 insertions(+), 19 deletions(-)

diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index f6495623e..58aaed47a 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -3649,7 +3649,7 @@ hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param)
 
 	ret = hns3_cmd_send(hw, &desc, 1);
 	if (ret)
-		PMD_INIT_LOG(ERR, "Set promisc mode fail, status is %d", ret);
+		PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret);
 
 	return ret;
 }
@@ -3702,14 +3702,14 @@ hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
 {
 	struct hns3_adapter *hns = dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
-	bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false;
 	int ret;
 
 	rte_spinlock_lock(&hw->lock);
-	ret = hns3_set_promisc_mode(hw, true, en_mc_pmc);
+	ret = hns3_set_promisc_mode(hw, true, true);
 	rte_spinlock_unlock(&hw->lock);
 	if (ret)
-		hns3_err(hw, "Failed to enable promiscuous mode: %d", ret);
+		hns3_err(hw, "Failed to enable promiscuous mode, ret = %d",
+			 ret);
 
 	return ret;
 }
@@ -3717,17 +3717,18 @@ hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
 static int
 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 {
+	bool allmulti = dev->data->all_multicast ? true : false;
 	struct hns3_adapter *hns = dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
-	bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false;
 	int ret;
 
 	/* If now in all_multicast mode, must remain in all_multicast mode. */
 	rte_spinlock_lock(&hw->lock);
-	ret = hns3_set_promisc_mode(hw, false, en_mc_pmc);
+	ret = hns3_set_promisc_mode(hw, false, allmulti);
 	rte_spinlock_unlock(&hw->lock);
 	if (ret)
-		hns3_err(hw, "Failed to disable promiscuous mode: %d", ret);
+		hns3_err(hw, "Failed to disable promiscuous mode, ret = %d",
+			 ret);
 
 	return ret;
 }
@@ -3737,14 +3738,17 @@ hns3_dev_allmulticast_enable(struct rte_eth_dev *dev)
 {
 	struct hns3_adapter *hns = dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
-	bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false;
 	int ret;
 
+	if (dev->data->promiscuous)
+		return 0;
+
 	rte_spinlock_lock(&hw->lock);
-	ret = hns3_set_promisc_mode(hw, en_uc_pmc, true);
+	ret = hns3_set_promisc_mode(hw, false, true);
 	rte_spinlock_unlock(&hw->lock);
 	if (ret)
-		hns3_err(hw, "Failed to enable allmulticast mode: %d", ret);
+		hns3_err(hw, "Failed to enable allmulticast mode, ret = %d",
+			 ret);
 
 	return ret;
 }
@@ -3754,18 +3758,18 @@ hns3_dev_allmulticast_disable(struct rte_eth_dev *dev)
 {
 	struct hns3_adapter *hns = dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
-	bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false;
 	int ret;
 
 	/* If now in promiscuous mode, must remain in all_multicast mode. */
-	if (dev->data->promiscuous == 1)
+	if (dev->data->promiscuous)
 		return 0;
 
 	rte_spinlock_lock(&hw->lock);
-	ret = hns3_set_promisc_mode(hw, en_uc_pmc, false);
+	ret = hns3_set_promisc_mode(hw, false, false);
 	rte_spinlock_unlock(&hw->lock);
 	if (ret)
-		hns3_err(hw, "Failed to disable allmulticast mode: %d", ret);
+		hns3_err(hw, "Failed to disable allmulticast mode, ret =  %d",
+			 ret);
 
 	return ret;
 }
@@ -3774,13 +3778,12 @@ static int
 hns3_dev_promisc_restore(struct hns3_adapter *hns)
 {
 	struct hns3_hw *hw = &hns->hw;
-	bool en_mc_pmc;
-	bool en_uc_pmc;
+	bool allmulti = hw->data->all_multicast ? true : false;
 
-	en_uc_pmc = (hw->data->promiscuous == 1) ? true : false;
-	en_mc_pmc = (hw->data->all_multicast == 1) ? true : false;
+	if (hw->data->promiscuous)
+		return hns3_set_promisc_mode(hw, true, true);
 
-	return hns3_set_promisc_mode(hw, en_uc_pmc, en_mc_pmc);
+	return hns3_set_promisc_mode(hw, false, allmulti);
 }
 
 static int
-- 
2.23.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-03-07  8:59 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-07  8:57 [dpdk-dev] [PATCH 0/5] misc updates and fixes for hns3 PMD driver Wei Hu (Xavier)
2020-03-07  8:57 ` [dpdk-dev] [PATCH 1/5] net/hns3: support TCP segment offload Wei Hu (Xavier)
2020-03-07  8:57 ` [dpdk-dev] [PATCH 2/5] net/hns3: fix wrong Tx interrupt when enabling Rx interrupt Wei Hu (Xavier)
2020-03-07  8:57 ` [dpdk-dev] [PATCH 3/5] net/hns3: remove unnecessary restriction on setting VF's MTU Wei Hu (Xavier)
2020-03-07  8:57 ` [dpdk-dev] [PATCH 4/5] net/hns3: support promiscuous and allmulticast mode for VF Wei Hu (Xavier)
2020-03-07  8:57 ` [dpdk-dev] [PATCH 5/5] net/hns3: fix promiscuous mode for PF Wei Hu (Xavier)

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).