patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH 19.11 0/6] backport for hns3 PMD driver
@ 2020-05-29  3:57 Wei Hu (Xavier)
  2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 1/6] net/hns3: support Rx interrupt Wei Hu (Xavier)
                   ` (6 more replies)
  0 siblings, 7 replies; 8+ messages in thread
From: Wei Hu (Xavier) @ 2020-05-29  3:57 UTC (permalink / raw)
  To: luca.boccassi; +Cc: stable, xavier.huwei

This series are backport patches to DPDK 19.11.3 for hns3 PMD driver.

Chengwen Feng (1):
  net/hns3: fix Rx interrupt after reset

Hao Chen (1):
  net/hns3: support Rx interrupt

Lijun Ou (1):
  net/hns3: fix RSS indirection table configuration

Wei Hu (Xavier) (3):
  net/hns3: support different numbers of Rx and Tx queues
  net/hns3: fix Tx interrupt when enabling Rx interrupt
  net/hns3: fix MSI-X interrupt during initialization

 doc/guides/nics/features/hns3.ini    |   1 +
 doc/guides/nics/features/hns3_vf.ini |   1 +
 doc/guides/nics/hns3.rst             |   1 +
 drivers/net/hns3/hns3_cmd.h          |  49 ++-
 drivers/net/hns3/hns3_dcb.c          | 103 +++--
 drivers/net/hns3/hns3_dcb.h          |   4 +-
 drivers/net/hns3/hns3_ethdev.c       | 363 +++++++++++++++--
 drivers/net/hns3/hns3_ethdev.h       |  17 +-
 drivers/net/hns3/hns3_ethdev_vf.c    | 398 ++++++++++++++++--
 drivers/net/hns3/hns3_flow.c         |  22 +-
 drivers/net/hns3/hns3_intr.c         |   2 +
 drivers/net/hns3/hns3_mbx.h          |  13 +
 drivers/net/hns3/hns3_regs.h         |  10 +
 drivers/net/hns3/hns3_rss.c          |  28 +-
 drivers/net/hns3/hns3_rss.h          |   2 +
 drivers/net/hns3/hns3_rxtx.c         | 769 ++++++++++++++++++++++++++++++-----
 drivers/net/hns3/hns3_rxtx.h         |  19 +
 17 files changed, 1578 insertions(+), 224 deletions(-)

-- 
2.7.4


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-stable] [PATCH 19.11 1/6] net/hns3: support Rx interrupt
  2020-05-29  3:57 [dpdk-stable] [PATCH 19.11 0/6] backport for hns3 PMD driver Wei Hu (Xavier)
@ 2020-05-29  3:57 ` Wei Hu (Xavier)
  2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 2/6] net/hns3: support different numbers of Rx and Tx queues Wei Hu (Xavier)
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Wei Hu (Xavier) @ 2020-05-29  3:57 UTC (permalink / raw)
  To: luca.boccassi; +Cc: stable, xavier.huwei

From: Hao Chen <chenhao164@huawei.com>

[ upstream commit 02a7b55657b232c79443cb5a7be18d7847b49fd2 ]

This patch adds supports of receive packets through interrupt mode for
hns3 PF/VF driver. The following ops functions should be implemented
defined in the struct eth_dev_ops:
rx_queue_intr_enable
rx_queue_intr_disable

Signed-off-by: Hao Chen <chenhao164@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 doc/guides/nics/features/hns3.ini    |   1 +
 doc/guides/nics/features/hns3_vf.ini |   1 +
 doc/guides/nics/hns3.rst             |   1 +
 drivers/net/hns3/hns3_cmd.h          |  28 ++++++
 drivers/net/hns3/hns3_ethdev.c       | 162 +++++++++++++++++++++++++++++++---
 drivers/net/hns3/hns3_ethdev_vf.c    | 165 ++++++++++++++++++++++++++++++++---
 drivers/net/hns3/hns3_mbx.h          |  13 +++
 drivers/net/hns3/hns3_regs.h         |   3 +
 drivers/net/hns3/hns3_rxtx.c         |  41 +++++++++
 drivers/net/hns3/hns3_rxtx.h         |   3 +
 10 files changed, 393 insertions(+), 25 deletions(-)

diff --git a/doc/guides/nics/features/hns3.ini b/doc/guides/nics/features/hns3.ini
index 6df789e..cd5c08a 100644
--- a/doc/guides/nics/features/hns3.ini
+++ b/doc/guides/nics/features/hns3.ini
@@ -5,6 +5,7 @@
 ;
 [Features]
 Link status          = Y
+Rx interrupt         = Y
 MTU update           = Y
 Jumbo frame          = Y
 Promiscuous mode     = Y
diff --git a/doc/guides/nics/features/hns3_vf.ini b/doc/guides/nics/features/hns3_vf.ini
index 41497c4..fd00ac3 100644
--- a/doc/guides/nics/features/hns3_vf.ini
+++ b/doc/guides/nics/features/hns3_vf.ini
@@ -5,6 +5,7 @@
 ;
 [Features]
 Link status          = Y
+Rx interrupt         = Y
 MTU update           = Y
 Jumbo frame          = Y
 Unicast MAC filter   = Y
diff --git a/doc/guides/nics/hns3.rst b/doc/guides/nics/hns3.rst
index 505488b..8d19f48 100644
--- a/doc/guides/nics/hns3.rst
+++ b/doc/guides/nics/hns3.rst
@@ -22,6 +22,7 @@ Features of the HNS3 PMD are:
 - Port hardware statistics
 - Jumbo frames
 - Link state information
+- Interrupt mode for RX
 - VLAN stripping
 - NUMA support
 
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index be0ecbe..897dc14 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -209,6 +209,10 @@ enum hns3_opcode_type {
 	/* SFP command */
 	HNS3_OPC_SFP_GET_SPEED          = 0x7104,
 
+	/* Interrupts commands */
+	HNS3_OPC_ADD_RING_TO_VECTOR	= 0x1503,
+	HNS3_OPC_DEL_RING_TO_VECTOR	= 0x1504,
+
 	/* Error INT commands */
 	HNS3_QUERY_MSIX_INT_STS_BD_NUM          = 0x1513,
 	HNS3_QUERY_CLEAR_ALL_MPF_MSIX_INT       = 0x1514,
@@ -673,6 +677,30 @@ struct hns3_tqp_map_cmd {
 	uint8_t rsv[18];
 };
 
+#define HNS3_RING_TYPE_B	0
+#define HNS3_RING_TYPE_TX	0
+#define HNS3_RING_TYPE_RX	1
+#define HNS3_RING_GL_IDX_S	0
+#define HNS3_RING_GL_IDX_M	GENMASK(1, 0)
+#define HNS3_RING_GL_RX		0
+#define HNS3_RING_GL_TX		1
+
+#define HNS3_VECTOR_ELEMENTS_PER_CMD	10
+
+#define HNS3_INT_TYPE_S		0
+#define HNS3_INT_TYPE_M		GENMASK(1, 0)
+#define HNS3_TQP_ID_S		2
+#define HNS3_TQP_ID_M		GENMASK(12, 2)
+#define HNS3_INT_GL_IDX_S	13
+#define HNS3_INT_GL_IDX_M	GENMASK(14, 13)
+struct hns3_ctrl_vector_chain_cmd {
+	uint8_t int_vector_id;
+	uint8_t int_cause_num;
+	uint16_t tqp_type_and_id[HNS3_VECTOR_ELEMENTS_PER_CMD];
+	uint8_t vfid;
+	uint8_t rsv;
+};
+
 struct hns3_config_max_frm_size_cmd {
 	uint16_t max_frm_size;
 	uint8_t min_frm_size;
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 6eb36d4..965a531 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2085,6 +2085,40 @@ hns3_check_dcb_cfg(struct rte_eth_dev *dev)
 }
 
 static int
+hns3_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
+			   bool mmap, uint16_t queue_id)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct hns3_cmd_desc desc;
+	struct hns3_ctrl_vector_chain_cmd *req =
+		(struct hns3_ctrl_vector_chain_cmd *)desc.data;
+	enum hns3_cmd_status status;
+	enum hns3_opcode_type op;
+	uint16_t tqp_type_and_id = 0;
+
+	op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
+	hns3_cmd_setup_basic_desc(&desc, op, false);
+	req->int_vector_id = vector_id;
+
+	hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
+		       HNS3_RING_TYPE_RX);
+	hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
+	hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
+		       HNS3_RING_GL_RX);
+	req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
+
+	req->int_cause_num = 1;
+	status = hns3_cmd_send(hw, &desc, 1);
+	if (status) {
+		hns3_err(hw, "Map TQP %d fail, vector_id is %d, status is %d.",
+			 queue_id, vector_id, status);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int
 hns3_dev_configure(struct rte_eth_dev *dev)
 {
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -4173,15 +4207,83 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
 }
 
 static int
-hns3_dev_start(struct rte_eth_dev *eth_dev)
+hns3_map_rx_interrupt(struct rte_eth_dev *dev)
 {
-	struct hns3_adapter *hns = eth_dev->data->dev_private;
-	struct hns3_hw *hw = &hns->hw;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t intr_vector;
+	uint8_t base = 0;
+	uint8_t vec = 0;
+	uint16_t q_id;
 	int ret;
 
+	if (dev->data->dev_conf.intr_conf.rxq == 0)
+		return 0;
+
+	/* disable uio/vfio intr/eventfd mapping */
+	rte_intr_disable(intr_handle);
+
+	/* check and configure queue intr-vector mapping */
+	if (rte_intr_cap_multiple(intr_handle) ||
+	    !RTE_ETH_DEV_SRIOV(dev).active) {
+		intr_vector = dev->data->nb_rx_queues;
+		/* creates event fd for each intr vector when MSIX is used */
+		if (rte_intr_efd_enable(intr_handle, intr_vector))
+			return -EINVAL;
+	}
+	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+		intr_handle->intr_vec =
+			rte_zmalloc("intr_vec",
+				    dev->data->nb_rx_queues * sizeof(int), 0);
+		if (intr_handle->intr_vec == NULL) {
+			hns3_err(hw, "Failed to allocate %d rx_queues"
+				     " intr_vec", dev->data->nb_rx_queues);
+			ret = -ENOMEM;
+			goto alloc_intr_vec_error;
+		}
+	}
+
+	if (rte_intr_allow_others(intr_handle)) {
+		vec = RTE_INTR_VEC_RXTX_OFFSET;
+		base = RTE_INTR_VEC_RXTX_OFFSET;
+	}
+	if (rte_intr_dp_is_en(intr_handle)) {
+		for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
+			ret = hns3_bind_ring_with_vector(dev, vec, true, q_id);
+			if (ret)
+				goto bind_vector_error;
+			intr_handle->intr_vec[q_id] = vec;
+			if (vec < base + intr_handle->nb_efd - 1)
+				vec++;
+		}
+	}
+	rte_intr_enable(intr_handle);
+	return 0;
+
+bind_vector_error:
+	rte_intr_efd_disable(intr_handle);
+	if (intr_handle->intr_vec) {
+		free(intr_handle->intr_vec);
+		intr_handle->intr_vec = NULL;
+	}
+	return ret;
+alloc_intr_vec_error:
+	rte_intr_efd_disable(intr_handle);
+	return ret;
+}
+
+static int
+hns3_dev_start(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	int ret = 0;
+
 	PMD_INIT_FUNC_TRACE();
 	if (rte_atomic16_read(&hw->reset.resetting))
 		return -EBUSY;
+
 	rte_spinlock_lock(&hw->lock);
 	hw->adapter_state = HNS3_NIC_STARTING;
 
@@ -4194,9 +4296,12 @@ hns3_dev_start(struct rte_eth_dev *eth_dev)
 
 	hw->adapter_state = HNS3_NIC_STARTED;
 	rte_spinlock_unlock(&hw->lock);
-	hns3_set_rxtx_function(eth_dev);
-	hns3_mp_req_start_rxtx(eth_dev);
-	rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
+	ret = hns3_map_rx_interrupt(dev);
+	if (ret)
+		return ret;
+	hns3_set_rxtx_function(dev);
+	hns3_mp_req_start_rxtx(dev);
+	rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev);
 
 	hns3_info(hw, "hns3 dev start successful!");
 	return 0;
@@ -4224,18 +4329,50 @@ hns3_do_stop(struct hns3_adapter *hns)
 }
 
 static void
-hns3_dev_stop(struct rte_eth_dev *eth_dev)
+hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
 {
-	struct hns3_adapter *hns = eth_dev->data->dev_private;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	uint8_t base = 0;
+	uint8_t vec = 0;
+	uint16_t q_id;
+
+	if (dev->data->dev_conf.intr_conf.rxq == 0)
+		return;
+
+	/* unmap the ring with vector */
+	if (rte_intr_allow_others(intr_handle)) {
+		vec = RTE_INTR_VEC_RXTX_OFFSET;
+		base = RTE_INTR_VEC_RXTX_OFFSET;
+	}
+	if (rte_intr_dp_is_en(intr_handle)) {
+		for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
+			(void)hns3_bind_ring_with_vector(dev, vec, false, q_id);
+			if (vec < base + intr_handle->nb_efd - 1)
+				vec++;
+		}
+	}
+	/* Clean datapath event and queue/vec mapping */
+	rte_intr_efd_disable(intr_handle);
+	if (intr_handle->intr_vec) {
+		rte_free(intr_handle->intr_vec);
+		intr_handle->intr_vec = NULL;
+	}
+}
+
+static void
+hns3_dev_stop(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
 
 	PMD_INIT_FUNC_TRACE();
 
 	hw->adapter_state = HNS3_NIC_STOPPING;
-	hns3_set_rxtx_function(eth_dev);
+	hns3_set_rxtx_function(dev);
 	rte_wmb();
 	/* Disable datapath on secondary process. */
-	hns3_mp_req_stop_rxtx(eth_dev);
+	hns3_mp_req_stop_rxtx(dev);
 	/* Prevent crashes when queues are still in use. */
 	rte_delay_ms(hw->tqps_num);
 
@@ -4245,8 +4382,9 @@ hns3_dev_stop(struct rte_eth_dev *eth_dev)
 		hns3_dev_release_mbufs(hns);
 		hw->adapter_state = HNS3_NIC_CONFIGURED;
 	}
-	rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
+	rte_eal_alarm_cancel(hns3_service_handler, dev);
 	rte_spinlock_unlock(&hw->lock);
+	hns3_unmap_rx_interrupt(dev);
 }
 
 static void
@@ -4898,6 +5036,8 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
 	.tx_queue_setup         = hns3_tx_queue_setup,
 	.rx_queue_release       = hns3_dev_rx_queue_release,
 	.tx_queue_release       = hns3_dev_tx_queue_release,
+	.rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
+	.rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
 	.dev_configure          = hns3_dev_configure,
 	.flow_ctrl_get          = hns3_flow_ctrl_get,
 	.flow_ctrl_set          = hns3_flow_ctrl_set,
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 08f5c78..2687ea7 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1216,6 +1216,36 @@ hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
 }
 
 static int
+hns3vf_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
+			     bool mmap, uint16_t queue_id)
+
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct hns3_vf_bind_vector_msg bind_msg;
+	uint16_t code;
+	int ret;
+
+	memset(&bind_msg, 0, sizeof(bind_msg));
+	code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
+		HNS3_MBX_UNMAP_RING_TO_VECTOR;
+	bind_msg.vector_id = vector_id;
+	bind_msg.ring_num = 1;
+	bind_msg.param[0].ring_type = HNS3_RING_TYPE_RX;
+	bind_msg.param[0].tqp_index = queue_id;
+	bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
+
+	ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
+				sizeof(bind_msg), false, NULL, 0);
+	if (ret) {
+		hns3_err(hw, "Map TQP %d fail, vector_id is %d, ret is %d.",
+			 queue_id, vector_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
 hns3vf_do_stop(struct hns3_adapter *hns)
 {
 	struct hns3_hw *hw = &hns->hw;
@@ -1232,18 +1262,51 @@ hns3vf_do_stop(struct hns3_adapter *hns)
 }
 
 static void
-hns3vf_dev_stop(struct rte_eth_dev *eth_dev)
+hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
 {
-	struct hns3_adapter *hns = eth_dev->data->dev_private;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	uint8_t base = 0;
+	uint8_t vec = 0;
+	uint16_t q_id;
+
+	if (dev->data->dev_conf.intr_conf.rxq == 0)
+		return;
+
+	/* unmap the ring with vector */
+	if (rte_intr_allow_others(intr_handle)) {
+		vec = RTE_INTR_VEC_RXTX_OFFSET;
+		base = RTE_INTR_VEC_RXTX_OFFSET;
+	}
+	if (rte_intr_dp_is_en(intr_handle)) {
+		for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
+			(void)hns3vf_bind_ring_with_vector(dev, vec, false,
+							   q_id);
+			if (vec < base + intr_handle->nb_efd - 1)
+				vec++;
+		}
+	}
+	/* Clean datapath event and queue/vec mapping */
+	rte_intr_efd_disable(intr_handle);
+	if (intr_handle->intr_vec) {
+		rte_free(intr_handle->intr_vec);
+		intr_handle->intr_vec = NULL;
+	}
+}
+
+static void
+hns3vf_dev_stop(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
 
 	PMD_INIT_FUNC_TRACE();
 
 	hw->adapter_state = HNS3_NIC_STOPPING;
-	hns3_set_rxtx_function(eth_dev);
+	hns3_set_rxtx_function(dev);
 	rte_wmb();
 	/* Disable datapath on secondary process. */
-	hns3_mp_req_stop_rxtx(eth_dev);
+	hns3_mp_req_stop_rxtx(dev);
 	/* Prevent crashes when queues are still in use. */
 	rte_delay_ms(hw->tqps_num);
 
@@ -1253,8 +1316,10 @@ hns3vf_dev_stop(struct rte_eth_dev *eth_dev)
 		hns3_dev_release_mbufs(hns);
 		hw->adapter_state = HNS3_NIC_CONFIGURED;
 	}
-	rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
+	rte_eal_alarm_cancel(hns3vf_service_handler, dev);
 	rte_spinlock_unlock(&hw->lock);
+
+	hns3vf_unmap_rx_interrupt(dev);
 }
 
 static void
@@ -1336,15 +1401,84 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
 }
 
 static int
-hns3vf_dev_start(struct rte_eth_dev *eth_dev)
+hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
 {
-	struct hns3_adapter *hns = eth_dev->data->dev_private;
-	struct hns3_hw *hw = &hns->hw;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t intr_vector;
+	uint8_t base = 0;
+	uint8_t vec = 0;
+	uint16_t q_id;
 	int ret;
 
+	if (dev->data->dev_conf.intr_conf.rxq == 0)
+		return 0;
+
+	/* disable uio/vfio intr/eventfd mapping */
+	rte_intr_disable(intr_handle);
+
+	/* check and configure queue intr-vector mapping */
+	if (rte_intr_cap_multiple(intr_handle) ||
+		!RTE_ETH_DEV_SRIOV(dev).active) {
+		intr_vector = dev->data->nb_rx_queues;
+		/* It creates event fd for each intr vector when MSIX is used */
+		if (rte_intr_efd_enable(intr_handle, intr_vector))
+			return -EINVAL;
+	}
+	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+		intr_handle->intr_vec =
+			rte_zmalloc("intr_vec",
+				    dev->data->nb_rx_queues * sizeof(int), 0);
+		if (intr_handle->intr_vec == NULL) {
+			hns3_err(hw, "Failed to allocate %d rx_queues"
+				     " intr_vec", dev->data->nb_rx_queues);
+			ret = -ENOMEM;
+			goto vf_alloc_intr_vec_error;
+		}
+	}
+
+	if (rte_intr_allow_others(intr_handle)) {
+		vec = RTE_INTR_VEC_RXTX_OFFSET;
+		base = RTE_INTR_VEC_RXTX_OFFSET;
+	}
+	if (rte_intr_dp_is_en(intr_handle)) {
+		for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
+			ret = hns3vf_bind_ring_with_vector(dev, vec, true,
+							   q_id);
+			if (ret)
+				goto vf_bind_vector_error;
+			intr_handle->intr_vec[q_id] = vec;
+			if (vec < base + intr_handle->nb_efd - 1)
+				vec++;
+		}
+	}
+	rte_intr_enable(intr_handle);
+	return 0;
+
+vf_bind_vector_error:
+	rte_intr_efd_disable(intr_handle);
+	if (intr_handle->intr_vec) {
+		free(intr_handle->intr_vec);
+		intr_handle->intr_vec = NULL;
+	}
+	return ret;
+vf_alloc_intr_vec_error:
+	rte_intr_efd_disable(intr_handle);
+	return ret;
+}
+
+static int
+hns3vf_dev_start(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	int ret = 0;
+
 	PMD_INIT_FUNC_TRACE();
 	if (rte_atomic16_read(&hw->reset.resetting))
 		return -EBUSY;
+
 	rte_spinlock_lock(&hw->lock);
 	hw->adapter_state = HNS3_NIC_STARTING;
 	ret = hns3vf_do_start(hns, true);
@@ -1355,12 +1489,13 @@ hns3vf_dev_start(struct rte_eth_dev *eth_dev)
 	}
 	hw->adapter_state = HNS3_NIC_STARTED;
 	rte_spinlock_unlock(&hw->lock);
-	hns3_set_rxtx_function(eth_dev);
-	hns3_mp_req_start_rxtx(eth_dev);
-	rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
-			  eth_dev);
-
-	return 0;
+	ret = hns3vf_map_rx_interrupt(dev);
+	if (ret)
+		return ret;
+	hns3_set_rxtx_function(dev);
+	hns3_mp_req_start_rxtx(dev);
+	rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev);
+	return ret;
 }
 
 static bool
@@ -1689,6 +1824,8 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = {
 	.tx_queue_setup     = hns3_tx_queue_setup,
 	.rx_queue_release   = hns3_dev_rx_queue_release,
 	.tx_queue_release   = hns3_dev_tx_queue_release,
+	.rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
+	.rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
 	.dev_configure      = hns3vf_dev_configure,
 	.mac_addr_add       = hns3vf_add_mac_addr,
 	.mac_addr_remove    = hns3vf_remove_mac_addr,
diff --git a/drivers/net/hns3/hns3_mbx.h b/drivers/net/hns3/hns3_mbx.h
index 45101c0..7bcfbbe 100644
--- a/drivers/net/hns3/hns3_mbx.h
+++ b/drivers/net/hns3/hns3_mbx.h
@@ -105,6 +105,19 @@ struct hns3_mbx_pf_to_vf_cmd {
 	uint16_t msg[8];
 };
 
+struct hns3_ring_chain_param {
+	uint8_t ring_type;
+	uint8_t tqp_index;
+	uint8_t int_gl_index;
+};
+
+#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM	4
+struct hns3_vf_bind_vector_msg {
+	uint8_t vector_id;
+	uint8_t ring_num;
+	struct hns3_ring_chain_param param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM];
+};
+
 struct hns3_vf_rst_cmd {
 	uint8_t dest_vfid;
 	uint8_t vf_rst;
diff --git a/drivers/net/hns3/hns3_regs.h b/drivers/net/hns3/hns3_regs.h
index 2f5faaf..42581df 100644
--- a/drivers/net/hns3/hns3_regs.h
+++ b/drivers/net/hns3/hns3_regs.h
@@ -83,6 +83,9 @@
 
 #define HNS3_RING_EN_B				0
 
+#define HNS3_VECTOR_REG_OFFSET			0x4
+#define HNS3_VECTOR_VF_OFFSET			0x100000
+
 #define HNS3_TQP_REG_OFFSET			0x80000
 #define HNS3_TQP_REG_SIZE			0x200
 
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index e0f64f0..53d48c4 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -395,6 +395,47 @@ hns3_reset_all_queues(struct hns3_adapter *hns)
 	return 0;
 }
 
+void
+hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en)
+{
+	uint32_t addr, value;
+
+	addr = HNS3_TQP_INTR_CTRL_REG + tpq_int_num * HNS3_VECTOR_REG_OFFSET;
+	value = en ? 1 : 0;
+
+	hns3_write_dev(hw, addr, value);
+}
+
+int
+hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (dev->data->dev_conf.intr_conf.rxq == 0)
+		return -ENOTSUP;
+
+	/* enable the vectors */
+	hns3_tqp_intr_enable(hw, queue_id, true);
+
+	return rte_intr_ack(intr_handle);
+}
+
+int
+hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (dev->data->dev_conf.intr_conf.rxq == 0)
+		return -ENOTSUP;
+
+	/* disable the vectors */
+	hns3_tqp_intr_enable(hw, queue_id, false);
+
+	return 0;
+}
+
 static int
 hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
 {
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index daf51f4..cc21026 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -295,6 +295,8 @@ void hns3_dev_rx_queue_release(void *queue);
 void hns3_dev_tx_queue_release(void *queue);
 void hns3_free_all_queues(struct rte_eth_dev *dev);
 int hns3_reset_all_queues(struct hns3_adapter *hns);
+int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
+int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
 int hns3_start_queues(struct hns3_adapter *hns, bool reset_queue);
 int hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue);
 void hns3_dev_release_mbufs(struct hns3_adapter *hns);
@@ -311,4 +313,5 @@ uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			uint16_t nb_pkts);
 const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
+void hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en);
 #endif /* _HNS3_RXTX_H_ */
-- 
2.7.4


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-stable] [PATCH 19.11 2/6] net/hns3: support different numbers of Rx and Tx queues
  2020-05-29  3:57 [dpdk-stable] [PATCH 19.11 0/6] backport for hns3 PMD driver Wei Hu (Xavier)
  2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 1/6] net/hns3: support Rx interrupt Wei Hu (Xavier)
@ 2020-05-29  3:57 ` Wei Hu (Xavier)
  2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 3/6] net/hns3: fix RSS indirection table configuration Wei Hu (Xavier)
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Wei Hu (Xavier) @ 2020-05-29  3:57 UTC (permalink / raw)
  To: luca.boccassi; +Cc: stable, xavier.huwei

[ upstream commit a951c1ed3ab5e6f7a4366d7f227ee574177f103c ]

Hardware does not support individually enable/disable/reset the Tx or Rx
queue in hns3 network engine, driver must enable/disable/reset Tx and Rx
queues at the same time.

Currently, hns3 PMD driver does not support the scenarios as below:
1) When calling the following function, the input parameter nb_rx_q and
   nb_tx_q are not equal.
     rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q,
                      uint16_t nb_tx_q,
		      const struct rte_eth_conf *dev_conf);
2) When calling the following functions to setup queues, the
   cumulatively setup Rx queues are not the same as the setup Tx queues.
     rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,,,);
     rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,,,);
However, these are common usage scenarios in some applications, such as,
l3fwd, ip_ressmbly and OVS-DPDK, etc.

This patch adds support for this usage of these functions by setup fake
Tx or Rx queues to adjust numbers of Tx/Rx queues. But these fake queues
are imperceptible, and can not be used by upper applications.

Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_dcb.c       |  88 +++--
 drivers/net/hns3/hns3_dcb.h       |   4 +-
 drivers/net/hns3/hns3_ethdev.c    |  56 ++--
 drivers/net/hns3/hns3_ethdev.h    |  16 +-
 drivers/net/hns3/hns3_ethdev_vf.c |  68 ++--
 drivers/net/hns3/hns3_flow.c      |   9 +-
 drivers/net/hns3/hns3_rxtx.c      | 675 +++++++++++++++++++++++++++++++-------
 drivers/net/hns3/hns3_rxtx.h      |  11 +
 8 files changed, 734 insertions(+), 193 deletions(-)

diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 19235df..369a40e 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -578,17 +578,33 @@ hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
 }
 
 void
-hns3_tc_queue_mapping_cfg(struct hns3_hw *hw)
+hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
+{
+	uint16_t rx_qnum_per_tc;
+
+	rx_qnum_per_tc = nb_rx_q / hw->num_tc;
+	rx_qnum_per_tc = RTE_MIN(hw->rss_size_max, rx_qnum_per_tc);
+	if (hw->alloc_rss_size != rx_qnum_per_tc) {
+		hns3_info(hw, "rss size changes from %u to %u",
+			  hw->alloc_rss_size, rx_qnum_per_tc);
+		hw->alloc_rss_size = rx_qnum_per_tc;
+	}
+	hw->used_rx_queues = hw->num_tc * hw->alloc_rss_size;
+}
+
+void
+hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_queue)
 {
 	struct hns3_tc_queue_info *tc_queue;
 	uint8_t i;
 
+	hw->tx_qnum_per_tc = nb_queue / hw->num_tc;
 	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
 		tc_queue = &hw->tc_queue[i];
 		if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
 			tc_queue->enable = true;
-			tc_queue->tqp_offset = i * hw->alloc_rss_size;
-			tc_queue->tqp_count = hw->alloc_rss_size;
+			tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
+			tc_queue->tqp_count = hw->tx_qnum_per_tc;
 			tc_queue->tc = i;
 		} else {
 			/* Set to default queue if TC is disable */
@@ -598,30 +614,22 @@ hns3_tc_queue_mapping_cfg(struct hns3_hw *hw)
 			tc_queue->tc = 0;
 		}
 	}
+	hw->used_tx_queues = hw->num_tc * hw->tx_qnum_per_tc;
 }
 
 static void
-hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t queue_num)
+hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
+				 uint16_t nb_tx_q)
 {
 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
 	struct hns3_pf *pf = &hns->pf;
-	uint16_t tqpnum_per_tc;
-	uint16_t alloc_tqps;
-
-	alloc_tqps = RTE_MIN(hw->tqps_num, queue_num);
-	hw->num_tc = RTE_MIN(alloc_tqps, hw->dcb_info.num_tc);
-	tqpnum_per_tc = RTE_MIN(hw->rss_size_max, alloc_tqps / hw->num_tc);
 
-	if (hw->alloc_rss_size != tqpnum_per_tc) {
-		PMD_INIT_LOG(INFO, "rss size changes from %d to %d",
-			     hw->alloc_rss_size, tqpnum_per_tc);
-		hw->alloc_rss_size = tqpnum_per_tc;
-	}
-	hw->alloc_tqps = hw->num_tc * hw->alloc_rss_size;
+	hw->num_tc = hw->dcb_info.num_tc;
+	hns3_set_rss_size(hw, nb_rx_q);
+	hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
 
-	hns3_tc_queue_mapping_cfg(hw);
-
-	memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
+	if (!hns->is_vf)
+		memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
 }
 
 int
@@ -1309,20 +1317,35 @@ hns3_dcb_info_cfg(struct hns3_adapter *hns)
 	for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
 		hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
 
-	hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues);
+	hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
+					 hw->data->nb_tx_queues);
 }
 
-static void
+static int
 hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
 {
 	struct hns3_pf *pf = &hns->pf;
 	struct hns3_hw *hw = &hns->hw;
+	uint16_t nb_rx_q = hw->data->nb_rx_queues;
+	uint16_t nb_tx_q = hw->data->nb_tx_queues;
 	uint8_t bit_map = 0;
 	uint8_t i;
 
 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
 	    hw->dcb_info.num_pg != 1)
-		return;
+		return -EINVAL;
+
+	if (nb_rx_q < num_tc) {
+		hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
+			 nb_rx_q, num_tc);
+		return -EINVAL;
+	}
+
+	if (nb_tx_q < num_tc) {
+		hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
+			 nb_tx_q, num_tc);
+		return -EINVAL;
+	}
 
 	/* Currently not support uncontinuous tc */
 	hw->dcb_info.num_tc = num_tc;
@@ -1333,10 +1356,10 @@ hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
 		bit_map = 1;
 		hw->dcb_info.num_tc = 1;
 	}
-
 	hw->hw_tc_map = bit_map;
-
 	hns3_dcb_info_cfg(hns);
+
+	return 0;
 }
 
 static int
@@ -1422,10 +1445,15 @@ hns3_dcb_configure(struct hns3_adapter *hns)
 
 	hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
 	if (map_changed || rte_atomic16_read(&hw->reset.resetting)) {
-		hns3_dcb_info_update(hns, num_tc);
+		ret = hns3_dcb_info_update(hns, num_tc);
+		if (ret) {
+			hns3_err(hw, "dcb info update failed: %d", ret);
+			return ret;
+		}
+
 		ret = hns3_dcb_hw_configure(hns);
 		if (ret) {
-			hns3_err(hw, "dcb sw configure fails: %d", ret);
+			hns3_err(hw, "dcb sw configure failed: %d", ret);
 			return ret;
 		}
 	}
@@ -1479,7 +1507,8 @@ hns3_dcb_init(struct hns3_hw *hw)
 			hns3_err(hw, "dcb info init failed: %d", ret);
 			return ret;
 		}
-		hns3_dcb_update_tc_queue_mapping(hw, hw->tqps_num);
+		hns3_dcb_update_tc_queue_mapping(hw, hw->tqps_num,
+						 hw->tqps_num);
 	}
 
 	/*
@@ -1502,10 +1531,11 @@ static int
 hns3_update_queue_map_configure(struct hns3_adapter *hns)
 {
 	struct hns3_hw *hw = &hns->hw;
-	uint16_t queue_num = hw->data->nb_rx_queues;
+	uint16_t nb_rx_q = hw->data->nb_rx_queues;
+	uint16_t nb_tx_q = hw->data->nb_tx_queues;
 	int ret;
 
-	hns3_dcb_update_tc_queue_mapping(hw, queue_num);
+	hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
 	ret = hns3_q_to_qs_map(hw);
 	if (ret) {
 		hns3_err(hw, "failed to map nq to qs! ret = %d", ret);
diff --git a/drivers/net/hns3/hns3_dcb.h b/drivers/net/hns3/hns3_dcb.h
index 9ec4e70..9c2c5f2 100644
--- a/drivers/net/hns3/hns3_dcb.h
+++ b/drivers/net/hns3/hns3_dcb.h
@@ -159,7 +159,9 @@ hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
 int
 hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf);
 
-void hns3_tc_queue_mapping_cfg(struct hns3_hw *hw);
+void hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q);
+
+void hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_queue);
 
 int hns3_dcb_cfg_update(struct hns3_adapter *hns);
 
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 965a531..84615c1 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2121,10 +2121,11 @@ hns3_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
 static int
 hns3_dev_configure(struct rte_eth_dev *dev)
 {
-	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct hns3_rss_conf *rss_cfg = &hw->rss_info;
+	struct hns3_adapter *hns = dev->data->dev_private;
 	struct rte_eth_conf *conf = &dev->data->dev_conf;
 	enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_rss_conf *rss_cfg = &hw->rss_info;
 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
 	struct rte_eth_rss_conf rss_conf;
@@ -2132,23 +2133,28 @@ hns3_dev_configure(struct rte_eth_dev *dev)
 	int ret;
 
 	/*
-	 * Hardware does not support where the number of rx and tx queues is
-	 * not equal in hip08.
+	 * Hardware does not support individually enable/disable/reset the Tx or
+	 * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx
+	 * and Rx queues at the same time. When the numbers of Tx queues
+	 * allocated by upper applications are not equal to the numbers of Rx
+	 * queues, driver needs to setup fake Tx or Rx queues to adjust numbers
+	 * of Tx/Rx queues. otherwise, network engine can not work as usual. But
+	 * these fake queues are imperceptible, and can not be used by upper
+	 * applications.
 	 */
-	if (nb_rx_q != nb_tx_q) {
-		hns3_err(hw,
-			 "nb_rx_queues(%u) not equal with nb_tx_queues(%u)! "
-			 "Hardware does not support this configuration!",
-			 nb_rx_q, nb_tx_q);
-		return -EINVAL;
+	ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
+	if (ret) {
+		hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret);
+		return ret;
 	}
 
+	hw->adapter_state = HNS3_NIC_CONFIGURING;
 	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
 		hns3_err(hw, "setting link speed/duplex not supported");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto cfg_err;
 	}
 
-	hw->adapter_state = HNS3_NIC_CONFIGURING;
 	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
 		ret = hns3_check_dcb_cfg(dev);
 		if (ret)
@@ -2194,7 +2200,9 @@ hns3_dev_configure(struct rte_eth_dev *dev)
 	return 0;
 
 cfg_err:
+	(void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
 	hw->adapter_state = HNS3_NIC_INITIALIZED;
+
 	return ret;
 }
 
@@ -4227,7 +4235,7 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)
 	/* check and configure queue intr-vector mapping */
 	if (rte_intr_cap_multiple(intr_handle) ||
 	    !RTE_ETH_DEV_SRIOV(dev).active) {
-		intr_vector = dev->data->nb_rx_queues;
+		intr_vector = hw->used_rx_queues;
 		/* creates event fd for each intr vector when MSIX is used */
 		if (rte_intr_efd_enable(intr_handle, intr_vector))
 			return -EINVAL;
@@ -4235,10 +4243,10 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)
 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
 		intr_handle->intr_vec =
 			rte_zmalloc("intr_vec",
-				    dev->data->nb_rx_queues * sizeof(int), 0);
+				    hw->used_rx_queues * sizeof(int), 0);
 		if (intr_handle->intr_vec == NULL) {
 			hns3_err(hw, "Failed to allocate %d rx_queues"
-				     " intr_vec", dev->data->nb_rx_queues);
+				     " intr_vec", hw->used_rx_queues);
 			ret = -ENOMEM;
 			goto alloc_intr_vec_error;
 		}
@@ -4249,7 +4257,7 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)
 		base = RTE_INTR_VEC_RXTX_OFFSET;
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
-		for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
+		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
 			ret = hns3_bind_ring_with_vector(dev, vec, true, q_id);
 			if (ret)
 				goto bind_vector_error;
@@ -4333,6 +4341,8 @@ hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
 	uint8_t base = 0;
 	uint8_t vec = 0;
 	uint16_t q_id;
@@ -4346,7 +4356,7 @@ hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
 		base = RTE_INTR_VEC_RXTX_OFFSET;
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
-		for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
+		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
 			(void)hns3_bind_ring_with_vector(dev, vec, false, q_id);
 			if (vec < base + intr_handle->nb_efd - 1)
 				vec++;
@@ -4589,15 +4599,13 @@ hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
 	for (i = 0; i < dcb_info->nb_tcs; i++)
 		dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i];
 
-	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
-		dcb_info->tc_queue.tc_rxq[0][i].base =
-					hw->tc_queue[i].tqp_offset;
+	for (i = 0; i < hw->num_tc; i++) {
+		dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i;
 		dcb_info->tc_queue.tc_txq[0][i].base =
-					hw->tc_queue[i].tqp_offset;
-		dcb_info->tc_queue.tc_rxq[0][i].nb_queue =
-					hw->tc_queue[i].tqp_count;
+						hw->tc_queue[i].tqp_offset;
+		dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size;
 		dcb_info->tc_queue.tc_txq[0][i].nb_queue =
-					hw->tc_queue[i].tqp_count;
+						hw->tc_queue[i].tqp_count;
 	}
 	rte_spinlock_unlock(&hw->lock);
 
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 03bbd24..72eabea 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -154,6 +154,12 @@ struct hns3_mac {
 	uint32_t link_speed;      /* ETH_SPEED_NUM_ */
 };
 
+struct hns3_fake_queue_data {
+	void **rx_queues; /* Array of pointers to fake RX queues. */
+	void **tx_queues; /* Array of pointers to fake TX queues. */
+	uint16_t nb_fake_rx_queues; /* Number of fake RX queues. */
+	uint16_t nb_fake_tx_queues; /* Number of fake TX queues. */
+};
 
 /* Primary process maintains driver state in main thread.
  *
@@ -367,8 +373,14 @@ struct hns3_hw {
 	struct hns3_dcb_info dcb_info;
 	enum hns3_fc_status current_fc_status; /* current flow control status */
 	struct hns3_tc_queue_info tc_queue[HNS3_MAX_TC_NUM];
-	uint16_t alloc_tqps;
-	uint16_t alloc_rss_size;    /* Queue number per TC */
+	uint16_t used_rx_queues;
+	uint16_t used_tx_queues;
+
+	/* Config max queue numbers between rx and tx queues from user */
+	uint16_t cfg_max_queues;
+	struct hns3_fake_queue_data fkq_data;     /* fake queue data */
+	uint16_t alloc_rss_size;    /* RX queue number per TC */
+	uint16_t tx_qnum_per_tc;    /* TX queue number per TC */
 
 	uint32_t flag;
 	/*
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 2687ea7..bbe4ee7 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -428,24 +428,28 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 	int ret;
 
 	/*
-	 * Hardware does not support where the number of rx and tx queues is
-	 * not equal in hip08.
+	 * Hardware does not support individually enable/disable/reset the Tx or
+	 * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx
+	 * and Rx queues at the same time. When the numbers of Tx queues
+	 * allocated by upper applications are not equal to the numbers of Rx
+	 * queues, driver needs to setup fake Tx or Rx queues to adjust numbers
+	 * of Tx/Rx queues. otherwise, network engine can not work as usual. But
+	 * these fake queues are imperceptible, and can not be used by upper
+	 * applications.
 	 */
-	if (nb_rx_q != nb_tx_q) {
-		hns3_err(hw,
-			 "nb_rx_queues(%u) not equal with nb_tx_queues(%u)! "
-			 "Hardware does not support this configuration!",
-			 nb_rx_q, nb_tx_q);
-		return -EINVAL;
+	ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
+	if (ret) {
+		hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret);
+		return ret;
 	}
 
+	hw->adapter_state = HNS3_NIC_CONFIGURING;
 	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
 		hns3_err(hw, "setting link speed/duplex not supported");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto cfg_err;
 	}
 
-	hw->adapter_state = HNS3_NIC_CONFIGURING;
-
 	/* When RSS is not configured, redirect the packet queue 0 */
 	if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
 		rss_conf = conf->rx_adv_conf.rss_conf;
@@ -484,7 +488,9 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 	return 0;
 
 cfg_err:
+	(void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
 	hw->adapter_state = HNS3_NIC_INITIALIZED;
+
 	return ret;
 }
 
@@ -801,12 +807,12 @@ hns3vf_get_configuration(struct hns3_hw *hw)
 	return hns3vf_get_tc_info(hw);
 }
 
-static void
+static int
 hns3vf_set_tc_info(struct hns3_adapter *hns)
 {
 	struct hns3_hw *hw = &hns->hw;
 	uint16_t nb_rx_q = hw->data->nb_rx_queues;
-	uint16_t new_tqps;
+	uint16_t nb_tx_q = hw->data->nb_tx_queues;
 	uint8_t i;
 
 	hw->num_tc = 0;
@@ -814,11 +820,22 @@ hns3vf_set_tc_info(struct hns3_adapter *hns)
 		if (hw->hw_tc_map & BIT(i))
 			hw->num_tc++;
 
-	new_tqps = RTE_MIN(hw->tqps_num, nb_rx_q);
-	hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, new_tqps / hw->num_tc);
-	hw->alloc_tqps = hw->alloc_rss_size * hw->num_tc;
+	if (nb_rx_q < hw->num_tc) {
+		hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
+			 nb_rx_q, hw->num_tc);
+		return -EINVAL;
+	}
+
+	if (nb_tx_q < hw->num_tc) {
+		hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
+			 nb_tx_q, hw->num_tc);
+		return -EINVAL;
+	}
 
-	hns3_tc_queue_mapping_cfg(hw);
+	hns3_set_rss_size(hw, nb_rx_q);
+	hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
+
+	return 0;
 }
 
 static void
@@ -1264,6 +1281,7 @@ hns3vf_do_stop(struct hns3_adapter *hns)
 static void
 hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
 {
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 	uint8_t base = 0;
@@ -1279,7 +1297,7 @@ hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
 		base = RTE_INTR_VEC_RXTX_OFFSET;
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
-		for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
+		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
 			(void)hns3vf_bind_ring_with_vector(dev, vec, false,
 							   q_id);
 			if (vec < base + intr_handle->nb_efd - 1)
@@ -1389,7 +1407,9 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	hns3vf_set_tc_info(hns);
+	ret = hns3vf_set_tc_info(hns);
+	if (ret)
+		return ret;
 
 	ret = hns3_start_queues(hns, reset_queue);
 	if (ret) {
@@ -1420,8 +1440,8 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
 
 	/* check and configure queue intr-vector mapping */
 	if (rte_intr_cap_multiple(intr_handle) ||
-		!RTE_ETH_DEV_SRIOV(dev).active) {
-		intr_vector = dev->data->nb_rx_queues;
+	    !RTE_ETH_DEV_SRIOV(dev).active) {
+		intr_vector = hw->used_rx_queues;
 		/* It creates event fd for each intr vector when MSIX is used */
 		if (rte_intr_efd_enable(intr_handle, intr_vector))
 			return -EINVAL;
@@ -1429,10 +1449,10 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
 		intr_handle->intr_vec =
 			rte_zmalloc("intr_vec",
-				    dev->data->nb_rx_queues * sizeof(int), 0);
+				    hw->used_rx_queues * sizeof(int), 0);
 		if (intr_handle->intr_vec == NULL) {
 			hns3_err(hw, "Failed to allocate %d rx_queues"
-				     " intr_vec", dev->data->nb_rx_queues);
+				     " intr_vec", hw->used_rx_queues);
 			ret = -ENOMEM;
 			goto vf_alloc_intr_vec_error;
 		}
@@ -1443,7 +1463,7 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
 		base = RTE_INTR_VEC_RXTX_OFFSET;
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
-		for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
+		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
 			ret = hns3vf_bind_ring_with_vector(dev, vec, true,
 							   q_id);
 			if (ret)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index c7027a7..cd56c99 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -224,14 +224,19 @@ hns3_handle_action_queue(struct rte_eth_dev *dev,
 			 struct rte_flow_error *error)
 {
 	struct hns3_adapter *hns = dev->data->dev_private;
-	struct hns3_hw *hw = &hns->hw;
 	const struct rte_flow_action_queue *queue;
+	struct hns3_hw *hw = &hns->hw;
 
 	queue = (const struct rte_flow_action_queue *)action->conf;
-	if (queue->index >= hw->data->nb_rx_queues)
+	if (queue->index >= hw->used_rx_queues) {
+		hns3_err(hw, "queue ID(%d) is greater than number of "
+			  "available queue (%d) in driver.",
+			  queue->index, hw->used_rx_queues);
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
 					  "Invalid queue ID in PF");
+	}
+
 	rule->queue_id = queue->index;
 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
 	return 0;
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 53d48c4..1e41b0d 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -37,6 +37,7 @@ hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
 {
 	uint16_t i;
 
+	/* Note: Fake rx queue will not enter here */
 	if (rxq->sw_ring) {
 		for (i = 0; i < rxq->nb_rx_desc; i++) {
 			if (rxq->sw_ring[i].mbuf) {
@@ -52,6 +53,7 @@ hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
 {
 	uint16_t i;
 
+	/* Note: Fake rx queue will not enter here */
 	if (txq->sw_ring) {
 		for (i = 0; i < txq->nb_tx_desc; i++) {
 			if (txq->sw_ring[i].mbuf) {
@@ -120,22 +122,115 @@ hns3_dev_tx_queue_release(void *queue)
 	rte_spinlock_unlock(&hns->hw.lock);
 }
 
-void
-hns3_free_all_queues(struct rte_eth_dev *dev)
+static void
+hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
+{
+	struct hns3_rx_queue *rxq = queue;
+	struct hns3_adapter *hns;
+	struct hns3_hw *hw;
+	uint16_t idx;
+
+	if (rxq == NULL)
+		return;
+
+	hns = rxq->hns;
+	hw = &hns->hw;
+	idx = rxq->queue_id;
+	if (hw->fkq_data.rx_queues[idx]) {
+		hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
+		hw->fkq_data.rx_queues[idx] = NULL;
+	}
+
+	/* free fake rx queue arrays */
+	if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
+		hw->fkq_data.nb_fake_rx_queues = 0;
+		rte_free(hw->fkq_data.rx_queues);
+		hw->fkq_data.rx_queues = NULL;
+	}
+}
+
+static void
+hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
 {
+	struct hns3_tx_queue *txq = queue;
+	struct hns3_adapter *hns;
+	struct hns3_hw *hw;
+	uint16_t idx;
+
+	if (txq == NULL)
+		return;
+
+	hns = txq->hns;
+	hw = &hns->hw;
+	idx = txq->queue_id;
+	if (hw->fkq_data.tx_queues[idx]) {
+		hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
+		hw->fkq_data.tx_queues[idx] = NULL;
+	}
+
+	/* free fake tx queue arrays */
+	if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
+		hw->fkq_data.nb_fake_tx_queues = 0;
+		rte_free(hw->fkq_data.tx_queues);
+		hw->fkq_data.tx_queues = NULL;
+	}
+}
+
+static void
+hns3_free_rx_queues(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_fake_queue_data *fkq_data;
+	struct hns3_hw *hw = &hns->hw;
+	uint16_t nb_rx_q;
 	uint16_t i;
 
-	if (dev->data->rx_queues)
-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+	nb_rx_q = hw->data->nb_rx_queues;
+	for (i = 0; i < nb_rx_q; i++) {
+		if (dev->data->rx_queues[i]) {
 			hns3_rx_queue_release(dev->data->rx_queues[i]);
 			dev->data->rx_queues[i] = NULL;
 		}
+	}
+
+	/* Free fake Rx queues */
+	fkq_data = &hw->fkq_data;
+	for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
+		if (fkq_data->rx_queues[i])
+			hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
+	}
+}
 
-	if (dev->data->tx_queues)
-		for (i = 0; i < dev->data->nb_tx_queues; i++) {
+static void
+hns3_free_tx_queues(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_fake_queue_data *fkq_data;
+	struct hns3_hw *hw = &hns->hw;
+	uint16_t nb_tx_q;
+	uint16_t i;
+
+	nb_tx_q = hw->data->nb_tx_queues;
+	for (i = 0; i < nb_tx_q; i++) {
+		if (dev->data->tx_queues[i]) {
 			hns3_tx_queue_release(dev->data->tx_queues[i]);
 			dev->data->tx_queues[i] = NULL;
 		}
+	}
+
+	/* Free fake Tx queues */
+	fkq_data = &hw->fkq_data;
+	for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
+		if (fkq_data->tx_queues[i])
+			hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
+	}
+}
+
+void
+hns3_free_all_queues(struct rte_eth_dev *dev)
+{
+	hns3_free_rx_queues(dev);
+	hns3_free_tx_queues(dev);
 }
 
 static int
@@ -223,17 +318,26 @@ hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
 static void
 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
 {
+	uint16_t nb_rx_q = hw->data->nb_rx_queues;
+	uint16_t nb_tx_q = hw->data->nb_tx_queues;
 	struct hns3_rx_queue *rxq;
 	struct hns3_tx_queue *txq;
 	uint32_t rcb_reg;
 	int i;
 
-	for (i = 0; i < hw->data->nb_rx_queues; i++) {
-		rxq = hw->data->rx_queues[i];
-		txq = hw->data->tx_queues[i];
+	for (i = 0; i < hw->cfg_max_queues; i++) {
+		if (i < nb_rx_q)
+			rxq = hw->data->rx_queues[i];
+		else
+			rxq = hw->fkq_data.rx_queues[i - nb_rx_q];
+		if (i < nb_tx_q)
+			txq = hw->data->tx_queues[i];
+		else
+			txq = hw->fkq_data.tx_queues[i - nb_tx_q];
 		if (rxq == NULL || txq == NULL ||
 		    (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
 			continue;
+
 		rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
 		if (en)
 			rcb_reg |= BIT(HNS3_RING_EN_B);
@@ -382,10 +486,9 @@ int
 hns3_reset_all_queues(struct hns3_adapter *hns)
 {
 	struct hns3_hw *hw = &hns->hw;
-	int ret;
-	uint16_t i;
+	int ret, i;
 
-	for (i = 0; i < hw->data->nb_rx_queues; i++) {
+	for (i = 0; i < hw->cfg_max_queues; i++) {
 		ret = hns3_reset_queue(hns, i);
 		if (ret) {
 			hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
@@ -445,12 +548,11 @@ hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
 
 	PMD_INIT_FUNC_TRACE();
 
-	rxq = hw->data->rx_queues[idx];
-
+	rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
 	ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
 	if (ret) {
 		hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
-			    idx, ret);
+			 idx, ret);
 		return ret;
 	}
 
@@ -462,15 +564,24 @@ hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
 }
 
 static void
-hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
 {
 	struct hns3_hw *hw = &hns->hw;
-	struct hns3_tx_queue *txq;
+	struct hns3_rx_queue *rxq;
+
+	rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
+	rxq->next_to_use = 0;
+	rxq->next_to_clean = 0;
+	hns3_init_rx_queue_hw(rxq);
+}
+
+static void
+hns3_init_tx_queue(struct hns3_tx_queue *queue)
+{
+	struct hns3_tx_queue *txq = queue;
 	struct hns3_desc *desc;
 	int i;
 
-	txq = hw->data->tx_queues[idx];
-
 	/* Clear tx bd */
 	desc = txq->tx_ring;
 	for (i = 0; i < txq->nb_tx_desc; i++) {
@@ -480,11 +591,31 @@ hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
 
 	txq->next_to_use = 0;
 	txq->next_to_clean = 0;
-	txq->tx_bd_ready   = txq->nb_tx_desc;
+	txq->tx_bd_ready = txq->nb_tx_desc;
 	hns3_init_tx_queue_hw(txq);
 }
 
 static void
+hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_tx_queue *txq;
+
+	txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];
+	hns3_init_tx_queue(txq);
+}
+
+static void
+hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_tx_queue *txq;
+
+	txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];
+	hns3_init_tx_queue(txq);
+}
+
+static void
 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
 {
 	struct hns3_hw *hw = &hns->hw;
@@ -500,7 +631,7 @@ hns3_init_tx_ring_tc(struct hns3_adapter *hns)
 
 		for (j = 0; j < tc_queue->tqp_count; j++) {
 			num = tc_queue->tqp_offset + j;
-			txq = hw->data->tx_queues[num];
+			txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
 			if (txq == NULL)
 				continue;
 
@@ -509,16 +640,13 @@ hns3_init_tx_ring_tc(struct hns3_adapter *hns)
 	}
 }
 
-int
-hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
+static int
+hns3_start_rx_queues(struct hns3_adapter *hns)
 {
 	struct hns3_hw *hw = &hns->hw;
-	struct rte_eth_dev_data *dev_data = hw->data;
 	struct hns3_rx_queue *rxq;
-	struct hns3_tx_queue *txq;
+	int i, j;
 	int ret;
-	int i;
-	int j;
 
 	/* Initialize RSS for queues */
 	ret = hns3_config_rss(hns);
@@ -527,49 +655,85 @@ hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
 		return ret;
 	}
 
-	if (reset_queue) {
-		ret = hns3_reset_all_queues(hns);
-		if (ret) {
-			hns3_err(hw, "Failed to reset all queues %d", ret);
-			return ret;
-		}
-	}
-
-	/*
-	 * Hardware does not support where the number of rx and tx queues is
-	 * not equal in hip08. In .dev_configure callback function we will
-	 * check the two values, here we think that the number of rx and tx
-	 * queues is equal.
-	 */
 	for (i = 0; i < hw->data->nb_rx_queues; i++) {
-		rxq = dev_data->rx_queues[i];
-		txq = dev_data->tx_queues[i];
-		if (rxq == NULL || txq == NULL || rxq->rx_deferred_start ||
-		    txq->tx_deferred_start)
+		rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
+		if (rxq == NULL || rxq->rx_deferred_start)
 			continue;
-
 		ret = hns3_dev_rx_queue_start(hns, i);
 		if (ret) {
 			hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
 				 ret);
 			goto out;
 		}
-		hns3_dev_tx_queue_start(hns, i);
 	}
-	hns3_init_tx_ring_tc(hns);
 
-	hns3_enable_all_queues(hw, true);
+	for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
+		rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
+		if (rxq == NULL || rxq->rx_deferred_start)
+			continue;
+		hns3_fake_rx_queue_start(hns, i);
+	}
 	return 0;
 
 out:
 	for (j = 0; j < i; j++) {
-		rxq = dev_data->rx_queues[j];
+		rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
 		hns3_rx_queue_release_mbufs(rxq);
 	}
 
 	return ret;
 }
 
+static void
+hns3_start_tx_queues(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_tx_queue *txq;
+	int i;
+
+	for (i = 0; i < hw->data->nb_tx_queues; i++) {
+		txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
+		if (txq == NULL || txq->tx_deferred_start)
+			continue;
+		hns3_dev_tx_queue_start(hns, i);
+	}
+
+	for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
+		txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
+		if (txq == NULL || txq->tx_deferred_start)
+			continue;
+		hns3_fake_tx_queue_start(hns, i);
+	}
+
+	hns3_init_tx_ring_tc(hns);
+}
+
+int
+hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
+{
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	if (reset_queue) {
+		ret = hns3_reset_all_queues(hns);
+		if (ret) {
+			hns3_err(hw, "Failed to reset all queues %d", ret);
+			return ret;
+		}
+	}
+
+	ret = hns3_start_rx_queues(hns);
+	if (ret) {
+		hns3_err(hw, "Failed to start rx queues: %d", ret);
+		return ret;
+	}
+
+	hns3_start_tx_queues(hns);
+	hns3_enable_all_queues(hw, true);
+
+	return 0;
+}
+
 int
 hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
 {
@@ -587,6 +751,337 @@ hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
 	return 0;
 }
 
+static void*
+hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
+			    struct hns3_queue_info *q_info)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	const struct rte_memzone *rx_mz;
+	struct hns3_rx_queue *rxq;
+	unsigned int rx_desc;
+
+	rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
+				 RTE_CACHE_LINE_SIZE, q_info->socket_id);
+	if (rxq == NULL) {
+		hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
+			 q_info->idx);
+		return NULL;
+	}
+
+	/* Allocate rx ring hardware descriptors. */
+	rxq->queue_id = q_info->idx;
+	rxq->nb_rx_desc = q_info->nb_desc;
+	rx_desc = rxq->nb_rx_desc * sizeof(struct hns3_desc);
+	rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
+					 rx_desc, HNS3_RING_BASE_ALIGN,
+					 q_info->socket_id);
+	if (rx_mz == NULL) {
+		hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
+			 q_info->idx);
+		hns3_rx_queue_release(rxq);
+		return NULL;
+	}
+	rxq->mz = rx_mz;
+	rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
+	rxq->rx_ring_phys_addr = rx_mz->iova;
+
+	hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
+		 rxq->rx_ring_phys_addr);
+
+	return rxq;
+}
+
+static int
+hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+			 uint16_t nb_desc, unsigned int socket_id)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_queue_info q_info;
+	struct hns3_rx_queue *rxq;
+	uint16_t nb_rx_q;
+
+	if (hw->fkq_data.rx_queues[idx]) {
+		hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
+		hw->fkq_data.rx_queues[idx] = NULL;
+	}
+
+	q_info.idx = idx;
+	q_info.socket_id = socket_id;
+	q_info.nb_desc = nb_desc;
+	q_info.type = "hns3 fake RX queue";
+	q_info.ring_name = "rx_fake_ring";
+	rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
+	if (rxq == NULL) {
+		hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
+		return -ENOMEM;
+	}
+
+	/* Don't need alloc sw_ring, because upper applications don't use it */
+	rxq->sw_ring = NULL;
+
+	rxq->hns = hns;
+	rxq->rx_deferred_start = false;
+	rxq->port_id = dev->data->port_id;
+	rxq->configured = true;
+	nb_rx_q = dev->data->nb_rx_queues;
+	rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
+				(nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
+	rxq->rx_buf_len = hw->rx_buf_len;
+
+	rte_spinlock_lock(&hw->lock);
+	hw->fkq_data.rx_queues[idx] = rxq;
+	rte_spinlock_unlock(&hw->lock);
+
+	return 0;
+}
+
+static void*
+hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
+			    struct hns3_queue_info *q_info)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	const struct rte_memzone *tx_mz;
+	struct hns3_tx_queue *txq;
+	struct hns3_desc *desc;
+	unsigned int tx_desc;
+	int i;
+
+	txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
+				 RTE_CACHE_LINE_SIZE, q_info->socket_id);
+	if (txq == NULL) {
+		hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
+			 q_info->idx);
+		return NULL;
+	}
+
+	/* Allocate tx ring hardware descriptors. */
+	txq->queue_id = q_info->idx;
+	txq->nb_tx_desc = q_info->nb_desc;
+	tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
+	tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
+					 tx_desc, HNS3_RING_BASE_ALIGN,
+					 q_info->socket_id);
+	if (tx_mz == NULL) {
+		hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
+			 q_info->idx);
+		hns3_tx_queue_release(txq);
+		return NULL;
+	}
+	txq->mz = tx_mz;
+	txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
+	txq->tx_ring_phys_addr = tx_mz->iova;
+
+	hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
+		 txq->tx_ring_phys_addr);
+
+	/* Clear tx bd */
+	desc = txq->tx_ring;
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		desc->tx.tp_fe_sc_vld_ra_ri = 0;
+		desc++;
+	}
+
+	return txq;
+}
+
+static int
+hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+			 uint16_t nb_desc, unsigned int socket_id)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_queue_info q_info;
+	struct hns3_tx_queue *txq;
+	uint16_t nb_tx_q;
+
+	if (hw->fkq_data.tx_queues[idx] != NULL) {
+		hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
+		hw->fkq_data.tx_queues[idx] = NULL;
+	}
+
+	q_info.idx = idx;
+	q_info.socket_id = socket_id;
+	q_info.nb_desc = nb_desc;
+	q_info.type = "hns3 fake TX queue";
+	q_info.ring_name = "tx_fake_ring";
+	txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
+	if (txq == NULL) {
+		hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
+		return -ENOMEM;
+	}
+
+	/* Don't need alloc sw_ring, because upper applications don't use it */
+	txq->sw_ring = NULL;
+
+	txq->hns = hns;
+	txq->tx_deferred_start = false;
+	txq->port_id = dev->data->port_id;
+	txq->configured = true;
+	nb_tx_q = dev->data->nb_tx_queues;
+	txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
+				(nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
+
+	rte_spinlock_lock(&hw->lock);
+	hw->fkq_data.tx_queues[idx] = txq;
+	rte_spinlock_unlock(&hw->lock);
+
+	return 0;
+}
+
+static int
+hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
+{
+	uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
+	void **rxq;
+	uint8_t i;
+
+	if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
+		/* first time configuration */
+
+		uint32_t size;
+		size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
+		hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
+						     RTE_CACHE_LINE_SIZE);
+		if (hw->fkq_data.rx_queues == NULL) {
+			hw->fkq_data.nb_fake_rx_queues = 0;
+			return -ENOMEM;
+		}
+	} else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
+		/* re-configure */
+
+		rxq = hw->fkq_data.rx_queues;
+		for (i = nb_queues; i < old_nb_queues; i++)
+			hns3_dev_rx_queue_release(rxq[i]);
+
+		rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
+				  RTE_CACHE_LINE_SIZE);
+		if (rxq == NULL)
+			return -ENOMEM;
+		if (nb_queues > old_nb_queues) {
+			uint16_t new_qs = nb_queues - old_nb_queues;
+			memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
+		}
+
+		hw->fkq_data.rx_queues = rxq;
+	} else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
+		rxq = hw->fkq_data.rx_queues;
+		for (i = nb_queues; i < old_nb_queues; i++)
+			hns3_dev_rx_queue_release(rxq[i]);
+
+		rte_free(hw->fkq_data.rx_queues);
+		hw->fkq_data.rx_queues = NULL;
+	}
+
+	hw->fkq_data.nb_fake_rx_queues = nb_queues;
+
+	return 0;
+}
+
+static int
+hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
+{
+	uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
+	void **txq;
+	uint8_t i;
+
+	if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
+		/* first time configuration */
+
+		uint32_t size;
+		size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
+		hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
+						     RTE_CACHE_LINE_SIZE);
+		if (hw->fkq_data.tx_queues == NULL) {
+			hw->fkq_data.nb_fake_tx_queues = 0;
+			return -ENOMEM;
+		}
+	} else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
+		/* re-configure */
+
+		txq = hw->fkq_data.tx_queues;
+		for (i = nb_queues; i < old_nb_queues; i++)
+			hns3_dev_tx_queue_release(txq[i]);
+		txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
+				  RTE_CACHE_LINE_SIZE);
+		if (txq == NULL)
+			return -ENOMEM;
+		if (nb_queues > old_nb_queues) {
+			uint16_t new_qs = nb_queues - old_nb_queues;
+			memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
+		}
+
+		hw->fkq_data.tx_queues = txq;
+	} else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
+		txq = hw->fkq_data.tx_queues;
+		for (i = nb_queues; i < old_nb_queues; i++)
+			hns3_dev_tx_queue_release(txq[i]);
+
+		rte_free(hw->fkq_data.tx_queues);
+		hw->fkq_data.tx_queues = NULL;
+	}
+	hw->fkq_data.nb_fake_tx_queues = nb_queues;
+
+	return 0;
+}
+
+int
+hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
+			      uint16_t nb_tx_q)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint16_t rx_need_add_nb_q;
+	uint16_t tx_need_add_nb_q;
+	uint16_t port_id;
+	uint16_t q;
+	int ret;
+
+	/* Setup new number of fake RX/TX queues and reconfigure device. */
+	hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
+	rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
+	tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
+	ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
+	if (ret) {
+		hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
+		goto cfg_fake_rx_q_fail;
+	}
+
+	ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
+	if (ret) {
+		hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
+		goto cfg_fake_tx_q_fail;
+	}
+
+	/* Allocate and set up fake RX queue per Ethernet port. */
+	port_id = hw->data->port_id;
+	for (q = 0; q < rx_need_add_nb_q; q++) {
+		ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
+					       rte_eth_dev_socket_id(port_id));
+		if (ret)
+			goto setup_fake_rx_q_fail;
+	}
+
+	/* Allocate and set up fake TX queue per Ethernet port. */
+	for (q = 0; q < tx_need_add_nb_q; q++) {
+		ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
+					       rte_eth_dev_socket_id(port_id));
+		if (ret)
+			goto setup_fake_tx_q_fail;
+	}
+
+	return 0;
+
+setup_fake_tx_q_fail:
+setup_fake_rx_q_fail:
+	(void)hns3_fake_tx_queue_config(hw, 0);
+cfg_fake_tx_q_fail:
+	(void)hns3_fake_rx_queue_config(hw, 0);
+cfg_fake_rx_q_fail:
+	hw->cfg_max_queues = 0;
+
+	return ret;
+}
+
 void
 hns3_dev_release_mbufs(struct hns3_adapter *hns)
 {
@@ -618,11 +1113,9 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 		    struct rte_mempool *mp)
 {
 	struct hns3_adapter *hns = dev->data->dev_private;
-	const struct rte_memzone *rx_mz;
 	struct hns3_hw *hw = &hns->hw;
+	struct hns3_queue_info q_info;
 	struct hns3_rx_queue *rxq;
-	unsigned int desc_size = sizeof(struct hns3_desc);
-	unsigned int rx_desc;
 	int rx_entry_len;
 
 	if (dev->data->dev_started) {
@@ -642,17 +1135,20 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 		dev->data->rx_queues[idx] = NULL;
 	}
 
-	rxq = rte_zmalloc_socket("hns3 RX queue", sizeof(struct hns3_rx_queue),
-				 RTE_CACHE_LINE_SIZE, socket_id);
+	q_info.idx = idx;
+	q_info.socket_id = socket_id;
+	q_info.nb_desc = nb_desc;
+	q_info.type = "hns3 RX queue";
+	q_info.ring_name = "rx_ring";
+	rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
 	if (rxq == NULL) {
-		hns3_err(hw, "Failed to allocate memory for rx queue!");
+		hns3_err(hw,
+			 "Failed to alloc mem and reserve DMA mem for rx ring!");
 		return -ENOMEM;
 	}
 
 	rxq->hns = hns;
 	rxq->mb_pool = mp;
-	rxq->nb_rx_desc = nb_desc;
-	rxq->queue_id = idx;
 	if (conf->rx_free_thresh <= 0)
 		rxq->rx_free_thresh = DEFAULT_RX_FREE_THRESH;
 	else
@@ -668,23 +1164,6 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 		return -ENOMEM;
 	}
 
-	/* Allocate rx ring hardware descriptors. */
-	rx_desc = rxq->nb_rx_desc * desc_size;
-	rx_mz = rte_eth_dma_zone_reserve(dev, "rx_ring", idx, rx_desc,
-					 HNS3_RING_BASE_ALIGN, socket_id);
-	if (rx_mz == NULL) {
-		hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
-			 idx);
-		hns3_rx_queue_release(rxq);
-		return -ENOMEM;
-	}
-	rxq->mz = rx_mz;
-	rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
-	rxq->rx_ring_phys_addr = rx_mz->iova;
-
-	hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, idx,
-		 rxq->rx_ring_phys_addr);
-
 	rxq->next_to_use = 0;
 	rxq->next_to_clean = 0;
 	rxq->nb_rx_hold = 0;
@@ -1063,14 +1542,10 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 		    unsigned int socket_id, const struct rte_eth_txconf *conf)
 {
 	struct hns3_adapter *hns = dev->data->dev_private;
-	const struct rte_memzone *tx_mz;
 	struct hns3_hw *hw = &hns->hw;
+	struct hns3_queue_info q_info;
 	struct hns3_tx_queue *txq;
-	struct hns3_desc *desc;
-	unsigned int desc_size = sizeof(struct hns3_desc);
-	unsigned int tx_desc;
 	int tx_entry_len;
-	int i;
 
 	if (dev->data->dev_started) {
 		hns3_err(hw, "tx_queue_setup after dev_start no supported");
@@ -1089,17 +1564,19 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 		dev->data->tx_queues[idx] = NULL;
 	}
 
-	txq = rte_zmalloc_socket("hns3 TX queue", sizeof(struct hns3_tx_queue),
-				 RTE_CACHE_LINE_SIZE, socket_id);
+	q_info.idx = idx;
+	q_info.socket_id = socket_id;
+	q_info.nb_desc = nb_desc;
+	q_info.type = "hns3 TX queue";
+	q_info.ring_name = "tx_ring";
+	txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
 	if (txq == NULL) {
-		hns3_err(hw, "Failed to allocate memory for tx queue!");
+		hns3_err(hw,
+			 "Failed to alloc mem and reserve DMA mem for tx ring!");
 		return -ENOMEM;
 	}
 
-	txq->nb_tx_desc = nb_desc;
-	txq->queue_id = idx;
 	txq->tx_deferred_start = conf->tx_deferred_start;
-
 	tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
 	txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
 					  RTE_CACHE_LINE_SIZE, socket_id);
@@ -1109,34 +1586,10 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 		return -ENOMEM;
 	}
 
-	/* Allocate tx ring hardware descriptors. */
-	tx_desc = txq->nb_tx_desc * desc_size;
-	tx_mz = rte_eth_dma_zone_reserve(dev, "tx_ring", idx, tx_desc,
-					 HNS3_RING_BASE_ALIGN, socket_id);
-	if (tx_mz == NULL) {
-		hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
-			 idx);
-		hns3_tx_queue_release(txq);
-		return -ENOMEM;
-	}
-	txq->mz = tx_mz;
-	txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
-	txq->tx_ring_phys_addr = tx_mz->iova;
-
-	hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, idx,
-		 txq->tx_ring_phys_addr);
-
-	/* Clear tx bd */
-	desc = txq->tx_ring;
-	for (i = 0; i < txq->nb_tx_desc; i++) {
-		desc->tx.tp_fe_sc_vld_ra_ri = 0;
-		desc++;
-	}
-
 	txq->hns = hns;
 	txq->next_to_use = 0;
 	txq->next_to_clean = 0;
-	txq->tx_bd_ready   = txq->nb_tx_desc;
+	txq->tx_bd_ready = txq->nb_tx_desc;
 	txq->port_id = dev->data->port_id;
 	txq->configured = true;
 	txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index cc21026..a042c99 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -273,6 +273,14 @@ struct hns3_tx_queue {
 	bool configured;        /* indicate if tx queue has been configured */
 };
 
+struct hns3_queue_info {
+	const char *type;   /* point to queue memory name */
+	const char *ring_name;  /* point to hardware ring name */
+	uint16_t idx;
+	uint16_t nb_desc;
+	unsigned int socket_id;
+};
+
 #define HNS3_TX_CKSUM_OFFLOAD_MASK ( \
 	PKT_TX_OUTER_IPV6 | \
 	PKT_TX_OUTER_IPV4 | \
@@ -314,4 +322,7 @@ uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
 void hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en);
+int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
+				  uint16_t nb_tx_q);
+
 #endif /* _HNS3_RXTX_H_ */
-- 
2.7.4


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-stable] [PATCH 19.11 3/6] net/hns3: fix RSS indirection table configuration
  2020-05-29  3:57 [dpdk-stable] [PATCH 19.11 0/6] backport for hns3 PMD driver Wei Hu (Xavier)
  2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 1/6] net/hns3: support Rx interrupt Wei Hu (Xavier)
  2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 2/6] net/hns3: support different numbers of Rx and Tx queues Wei Hu (Xavier)
@ 2020-05-29  3:57 ` Wei Hu (Xavier)
  2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 4/6] net/hns3: fix Tx interrupt when enabling Rx interrupt Wei Hu (Xavier)
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Wei Hu (Xavier) @ 2020-05-29  3:57 UTC (permalink / raw)
  To: luca.boccassi; +Cc: stable, xavier.huwei

From: Lijun Ou <oulijun@huawei.com>

[ upstream commit 3e791e07b7e1d66141b0c5a0e9a0924e77911b0b ]

For the current hns3 PMD driver, there are some RSS related bugs at
the following scenes:
1. Start the application with the number of Rx queues equals 1(--rxq=1),
   modify the number of Rx queue to some number greater than 1 during
   normal operation. As a result, upper application can't receive packets
   from multiple rx queues.
2. Start testpmd application with the option disable-rss and the number
   of Rx queue is greater than 1(--disable-rss --rxq=N, N>1). As a result,
   upper application still can receive packets from multiple rx queues.

The root cause as below:
There are some error configuration in the RSS indirection table of hns3
network engine.

This patch fixes them with the following modification.
1. When RSS size is changed, we need to update RSS redirection table
   maintained by driver and configure them to hardware. Besides, during
   the entire reset process, we need to ensure that the RSS table
   information are not overwritten and configured directly to the hardware
   in the RESET_STAGE_RESTORE stage of the reset process.
2. When sarting testpmd application with the options disable-rss, it
   doesn't need to configure RSS redirection table to hardware.

Fixes: c37ca66f2b27 ("net/hns3: support RSS")
Cc: stable@dpdk.org

Signed-off-by: Lijun Ou <oulijun@huawei.com>
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_dcb.c       | 15 +++++++++++++++
 drivers/net/hns3/hns3_ethdev.c    |  8 ++++++++
 drivers/net/hns3/hns3_ethdev_vf.c |  9 +++++++++
 drivers/net/hns3/hns3_flow.c      | 13 +++++++++++++
 drivers/net/hns3/hns3_rss.c       | 28 ++++++++++++++++++----------
 drivers/net/hns3/hns3_rss.h       |  2 ++
 6 files changed, 65 insertions(+), 10 deletions(-)

diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 369a40e..8688de2 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -580,7 +580,9 @@ hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
 void
 hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
 {
+	struct hns3_rss_conf *rss_cfg = &hw->rss_info;
 	uint16_t rx_qnum_per_tc;
+	int i;
 
 	rx_qnum_per_tc = nb_rx_q / hw->num_tc;
 	rx_qnum_per_tc = RTE_MIN(hw->rss_size_max, rx_qnum_per_tc);
@@ -590,6 +592,19 @@ hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
 		hw->alloc_rss_size = rx_qnum_per_tc;
 	}
 	hw->used_rx_queues = hw->num_tc * hw->alloc_rss_size;
+
+	/*
+	 * When rss size is changed, we need to update rss redirection table
+	 * maintained by driver. Besides, during the entire reset process, we
+	 * need to ensure that the rss table information are not overwritten
+	 * and configured directly to the hardware in the RESET_STAGE_RESTORE
+	 * stage of the reset process.
+	 */
+	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+		for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++)
+			rss_cfg->rss_indirection_tbl[i] =
+							i % hw->alloc_rss_size;
+	}
 }
 
 void
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 84615c1..f70f43f 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -4281,6 +4281,12 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)
 	return ret;
 }
 
+static void
+hns3_restore_filter(struct rte_eth_dev *dev)
+{
+	hns3_restore_rss_filter(dev);
+}
+
 static int
 hns3_dev_start(struct rte_eth_dev *dev)
 {
@@ -4311,6 +4317,8 @@ hns3_dev_start(struct rte_eth_dev *dev)
 	hns3_mp_req_start_rxtx(dev);
 	rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev);
 
+	hns3_restore_filter(dev);
+
 	hns3_info(hw, "hns3 dev start successful!");
 	return 0;
 }
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index bbe4ee7..feb49df 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1488,6 +1488,12 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
 	return ret;
 }
 
+static void
+hns3vf_restore_filter(struct rte_eth_dev *dev)
+{
+	hns3_restore_rss_filter(dev);
+}
+
 static int
 hns3vf_dev_start(struct rte_eth_dev *dev)
 {
@@ -1515,6 +1521,9 @@ hns3vf_dev_start(struct rte_eth_dev *dev)
 	hns3_set_rxtx_function(dev);
 	hns3_mp_req_start_rxtx(dev);
 	rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev);
+
+	hns3vf_restore_filter(dev);
+
 	return ret;
 }
 
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index cd56c99..1b9dc1d 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -1547,6 +1547,19 @@ hns3_clear_rss_filter(struct rte_eth_dev *dev)
 	return hns3_config_rss_filter(dev, &hw->rss_info, false);
 }
 
+/* Restore the rss filter */
+int
+hns3_restore_rss_filter(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+
+	if (hw->rss_info.conf.queue_num == 0)
+		return 0;
+
+	return hns3_config_rss_filter(dev, &hw->rss_info, true);
+}
+
 static int
 hns3_flow_parse_rss(struct rte_eth_dev *dev,
 		    const struct hns3_rss_conf *conf, bool add)
diff --git a/drivers/net/hns3/hns3_rss.c b/drivers/net/hns3/hns3_rss.c
index ff2da5a..f19b799 100644
--- a/drivers/net/hns3/hns3_rss.c
+++ b/drivers/net/hns3/hns3_rss.c
@@ -127,7 +127,7 @@ hns3_set_rss_indir_table(struct hns3_hw *hw, uint8_t *indir, uint16_t size)
 		req->rss_set_bitmap = rte_cpu_to_le_16(HNS3_RSS_SET_BITMAP_MSK);
 		for (j = 0; j < HNS3_RSS_CFG_TBL_SIZE; j++) {
 			num = i * HNS3_RSS_CFG_TBL_SIZE + j;
-			req->rss_result[j] = indir[num] % hw->alloc_rss_size;
+			req->rss_result[j] = indir[num];
 		}
 		ret = hns3_cmd_send(hw, &desc, 1);
 		if (ret) {
@@ -422,7 +422,7 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev,
 		shift = i % RTE_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
 			reta_conf[idx].reta[shift] =
-			  rss_cfg->rss_indirection_tbl[i] % hw->alloc_rss_size;
+						rss_cfg->rss_indirection_tbl[i];
 	}
 	rte_spinlock_unlock(&hw->lock);
 	return 0;
@@ -529,7 +529,7 @@ hns3_config_rss(struct hns3_adapter *hns)
 
 	enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
 
-	/* When there is no open RSS, redirect the packet queue 0 */
+	/* When RSS is off, redirect the packet queue 0 */
 	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) == 0)
 		hns3_rss_uninit(hns);
 
@@ -543,10 +543,16 @@ hns3_config_rss(struct hns3_adapter *hns)
 	if (ret)
 		return ret;
 
-	ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
-				       HNS3_RSS_IND_TBL_SIZE);
-	if (ret)
-		goto rss_tuple_uninit;
+	/*
+	 * When RSS is off, it doesn't need to configure rss redirection table
+	 * to hardware.
+	 */
+	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+		ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
+					       HNS3_RSS_IND_TBL_SIZE);
+		if (ret)
+			goto rss_tuple_uninit;
+	}
 
 	ret = hns3_set_rss_tc_mode(hw);
 	if (ret)
@@ -555,9 +561,11 @@ hns3_config_rss(struct hns3_adapter *hns)
 	return ret;
 
 rss_indir_table_uninit:
-	ret1 = hns3_rss_reset_indir_table(hw);
-	if (ret1 != 0)
-		return ret;
+	if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+		ret1 = hns3_rss_reset_indir_table(hw);
+		if (ret1 != 0)
+			return ret;
+	}
 
 rss_tuple_uninit:
 	hns3_rss_tuple_uninit(hw);
diff --git a/drivers/net/hns3/hns3_rss.h b/drivers/net/hns3/hns3_rss.h
index 7ffc151..8f065af 100644
--- a/drivers/net/hns3/hns3_rss.h
+++ b/drivers/net/hns3/hns3_rss.h
@@ -121,4 +121,6 @@ int hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw,
 				 uint64_t rss_hf);
 int hns3_set_rss_algo_key(struct hns3_hw *hw, uint8_t hash_algo,
 			  const uint8_t *key);
+int hns3_restore_rss_filter(struct rte_eth_dev *dev);
+
 #endif /* _HNS3_RSS_H_ */
-- 
2.7.4


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-stable] [PATCH 19.11 4/6] net/hns3: fix Tx interrupt when enabling Rx interrupt
  2020-05-29  3:57 [dpdk-stable] [PATCH 19.11 0/6] backport for hns3 PMD driver Wei Hu (Xavier)
                   ` (2 preceding siblings ...)
  2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 3/6] net/hns3: fix RSS indirection table configuration Wei Hu (Xavier)
@ 2020-05-29  3:57 ` Wei Hu (Xavier)
  2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 5/6] net/hns3: fix Rx interrupt after reset Wei Hu (Xavier)
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Wei Hu (Xavier) @ 2020-05-29  3:57 UTC (permalink / raw)
  To: luca.boccassi; +Cc: stable, xavier.huwei

[ upstream commit 26aff86b694ea4b143cd61a08c910986adcc7669 ]

Currently, when receiving and transmitting packets based on hns3 network
engine there are probably unexpected and redundant Tx interrupts if Rx
interrupt is enabled.

The root cause as below:
Tx and Rx queues with the same number share the interrupt vector in hns3
network engine, and in this case there are the residual hardware mapping
relationship configuration between queue and interrupt vector configured
in hns3 kernel ethdev driver.

We should clear the all hardware mapping relationship configurations in
the initialization. Because of the hardware constraints, we have to
implement clearing the relationship by binding all queues to the last
interrupt vector and reserving the last interrupt vector, this method
results in a decrease of the maximum queues when upper applications call
the rte_eth_dev_configure API function to enable Rx interrupt.

Fixes: 02a7b55657b2 ("net/hns3: support Rx interrupt")
Cc: stable@dpdk.org

Signed-off-by: Hao Chen <chenhao164@huawei.com>
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_cmd.h       |  31 +++++--
 drivers/net/hns3/hns3_ethdev.c    | 117 +++++++++++++++++++++----
 drivers/net/hns3/hns3_ethdev.h    |   1 +
 drivers/net/hns3/hns3_ethdev_vf.c | 180 +++++++++++++++++++++++++++++++-------
 drivers/net/hns3/hns3_regs.h      |  13 ++-
 drivers/net/hns3/hns3_rxtx.c      |  48 +++++++---
 drivers/net/hns3/hns3_rxtx.h      |   5 +-
 7 files changed, 326 insertions(+), 69 deletions(-)

diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index 897dc14..26d4103 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -79,6 +79,7 @@ enum hns3_opcode_type {
 	HNS3_OPC_GBL_RST_STATUS         = 0x0021,
 	HNS3_OPC_QUERY_FUNC_STATUS      = 0x0022,
 	HNS3_OPC_QUERY_PF_RSRC          = 0x0023,
+	HNS3_OPC_QUERY_VF_RSRC          = 0x0024,
 	HNS3_OPC_GET_CFG_PARAM          = 0x0025,
 	HNS3_OPC_PF_RST_DONE            = 0x0026,
 
@@ -337,8 +338,9 @@ struct hns3_func_status_cmd {
 	uint8_t rsv[2];
 };
 
-#define HNS3_PF_VEC_NUM_S		0
-#define HNS3_PF_VEC_NUM_M		GENMASK(7, 0)
+#define HNS3_VEC_NUM_S		0
+#define HNS3_VEC_NUM_M		GENMASK(7, 0)
+#define HNS3_MIN_VECTOR_NUM	2 /* one for msi-x, another for IO */
 struct hns3_pf_res_cmd {
 	uint16_t tqp_num;
 	uint16_t buf_size;
@@ -351,6 +353,15 @@ struct hns3_pf_res_cmd {
 	uint32_t rsv[2];
 };
 
+struct hns3_vf_res_cmd {
+	uint16_t tqp_num;
+	uint16_t reserved;
+	uint16_t msixcap_localid_ba_nic;
+	uint16_t msixcap_localid_ba_rocee;
+	uint16_t vf_intr_vector_number;
+	uint16_t rsv[7];
+};
+
 #define HNS3_UMV_SPC_ALC_B	0
 struct hns3_umv_spc_alc_cmd {
 	uint8_t allocate;
@@ -677,13 +688,19 @@ struct hns3_tqp_map_cmd {
 	uint8_t rsv[18];
 };
 
-#define HNS3_RING_TYPE_B	0
-#define HNS3_RING_TYPE_TX	0
-#define HNS3_RING_TYPE_RX	1
+enum hns3_ring_type {
+	HNS3_RING_TYPE_TX,
+	HNS3_RING_TYPE_RX
+};
+
+enum hns3_int_gl_idx {
+	HNS3_RING_GL_RX,
+	HNS3_RING_GL_TX,
+	HNS3_RING_GL_IMMEDIATE = 3
+};
+
 #define HNS3_RING_GL_IDX_S	0
 #define HNS3_RING_GL_IDX_M	GENMASK(1, 0)
-#define HNS3_RING_GL_RX		0
-#define HNS3_RING_GL_TX		1
 
 #define HNS3_VECTOR_ELEMENTS_PER_CMD	10
 
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index f70f43f..35f317a 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2085,34 +2085,98 @@ hns3_check_dcb_cfg(struct rte_eth_dev *dev)
 }
 
 static int
-hns3_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
-			   bool mmap, uint16_t queue_id)
+hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap,
+			   enum hns3_ring_type queue_type, uint16_t queue_id)
 {
-	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct hns3_cmd_desc desc;
 	struct hns3_ctrl_vector_chain_cmd *req =
 		(struct hns3_ctrl_vector_chain_cmd *)desc.data;
 	enum hns3_cmd_status status;
 	enum hns3_opcode_type op;
 	uint16_t tqp_type_and_id = 0;
+	const char *op_str;
+	uint16_t type;
+	uint16_t gl;
 
 	op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
 	hns3_cmd_setup_basic_desc(&desc, op, false);
 	req->int_vector_id = vector_id;
 
+	if (queue_type == HNS3_RING_TYPE_RX)
+		gl = HNS3_RING_GL_RX;
+	else
+		gl = HNS3_RING_GL_TX;
+
+	type = queue_type;
+
 	hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
-		       HNS3_RING_TYPE_RX);
+		       type);
 	hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
 	hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
-		       HNS3_RING_GL_RX);
+		       gl);
 	req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
-
 	req->int_cause_num = 1;
+	op_str = mmap ? "Map" : "Unmap";
 	status = hns3_cmd_send(hw, &desc, 1);
 	if (status) {
-		hns3_err(hw, "Map TQP %d fail, vector_id is %d, status is %d.",
-			 queue_id, vector_id, status);
-		return -EIO;
+		hns3_err(hw, "%s TQP %d fail, vector_id is %d, status is %d.",
+			 op_str, queue_id, req->int_vector_id, status);
+		return status;
+	}
+
+	return 0;
+}
+
+static int
+hns3_init_ring_with_vector(struct hns3_hw *hw)
+{
+	uint8_t vec;
+	int ret;
+	int i;
+
+	/*
+	 * In hns3 network engine, vector 0 is always the misc interrupt of this
+	 * function, vector 1~N can be used respectively for the queues of the
+	 * function. Tx and Rx queues with the same number share the interrupt
+	 * vector. In the initialization clearing the all hardware mapping
+	 * relationship configurations between queues and interrupt vectors is
+	 * needed, so some error caused by the residual configurations, such as
+	 * the unexpected Tx interrupt, can be avoid. Because of the hardware
+	 * constraints in hns3 hardware engine, we have to implement clearing
+	 * the mapping relationship configurations by binding all queues to the
+	 * last interrupt vector and reserving the last interrupt vector. This
+	 * method results in a decrease of the maximum queues when upper
+	 * applications call the rte_eth_dev_configure API function to enable
+	 * Rx interrupt.
+	 */
+	vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
+	hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */
+	for (i = 0; i < hw->intr_tqps_num; i++) {
+		/*
+		 * Set gap limiter and rate limiter configuration of queue's
+		 * interrupt.
+		 */
+		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
+				       HNS3_TQP_INTR_GL_DEFAULT);
+		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
+				       HNS3_TQP_INTR_GL_DEFAULT);
+		hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+
+		ret = hns3_bind_ring_with_vector(hw, vec, false,
+						 HNS3_RING_TYPE_TX, i);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with "
+					  "vector: %d, ret=%d", i, vec, ret);
+			return ret;
+		}
+
+		ret = hns3_bind_ring_with_vector(hw, vec, false,
+						 HNS3_RING_TYPE_RX, i);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with "
+					  "vector: %d, ret=%d", i, vec, ret);
+			return ret;
+		}
 	}
 
 	return 0;
@@ -2289,8 +2353,16 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 {
 	struct hns3_adapter *hns = eth_dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
+	uint16_t queue_num = hw->tqps_num;
 
-	info->max_rx_queues = hw->tqps_num;
+	/*
+	 * In interrupt mode, 'max_rx_queues' is set based on the number of
+	 * MSI-X interrupt resources of the hardware.
+	 */
+	if (hw->data->dev_conf.intr_conf.rxq == 1)
+		queue_num = hw->intr_tqps_num;
+
+	info->max_rx_queues = queue_num;
 	info->max_tx_queues = hw->tqps_num;
 	info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
 	info->min_rx_bufsize = hw->rx_buf_len;
@@ -2451,6 +2523,7 @@ hns3_query_pf_resource(struct hns3_hw *hw)
 	struct hns3_pf *pf = &hns->pf;
 	struct hns3_pf_res_cmd *req;
 	struct hns3_cmd_desc desc;
+	uint16_t num_msi;
 	int ret;
 
 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
@@ -2482,9 +2555,9 @@ hns3_query_pf_resource(struct hns3_hw *hw)
 
 	pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
 
-	hw->num_msi =
-	    hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
-			   HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
+	num_msi = hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
+				 HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
+	hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi;
 
 	return 0;
 }
@@ -4139,6 +4212,16 @@ hns3_init_pf(struct rte_eth_dev *eth_dev)
 		goto err_fdir;
 	}
 
+	/*
+	 * In the initialization clearing the all hardware mapping relationship
+	 * configurations between queues and interrupt vectors is needed, so
+	 * some error caused by the residual configurations, such as the
+	 * unexpected interrupt, can be avoid.
+	 */
+	ret = hns3_init_ring_with_vector(hw);
+	if (ret)
+		goto err_fdir;
+
 	return 0;
 
 err_fdir:
@@ -4258,7 +4341,9 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
-			ret = hns3_bind_ring_with_vector(dev, vec, true, q_id);
+			ret = hns3_bind_ring_with_vector(hw, vec, true,
+							 HNS3_RING_TYPE_RX,
+							 q_id);
 			if (ret)
 				goto bind_vector_error;
 			intr_handle->intr_vec[q_id] = vec;
@@ -4365,7 +4450,9 @@ hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
-			(void)hns3_bind_ring_with_vector(dev, vec, false, q_id);
+			(void)hns3_bind_ring_with_vector(hw, vec, false,
+							 HNS3_RING_TYPE_RX,
+							 q_id);
 			if (vec < base + intr_handle->nb_efd - 1)
 				vec++;
 		}
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 72eabea..cdbfc70 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -354,6 +354,7 @@ struct hns3_hw {
 	uint16_t num_msi;
 	uint16_t total_tqps_num;    /* total task queue pairs of this PF */
 	uint16_t tqps_num;          /* num task queue pairs of this function */
+	uint16_t intr_tqps_num;     /* num queue pairs mapping interrupt */
 	uint16_t rss_size_max;      /* HW defined max RSS task queue */
 	uint16_t rx_buf_len;
 	uint16_t num_tx_desc;       /* desc num of per tx queue */
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index feb49df..428a6d2 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -415,6 +415,96 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc)
 }
 
 static int
+hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id,
+			     bool mmap, enum hns3_ring_type queue_type,
+			     uint16_t queue_id)
+{
+	struct hns3_vf_bind_vector_msg bind_msg;
+	const char *op_str;
+	uint16_t code;
+	int ret;
+
+	memset(&bind_msg, 0, sizeof(bind_msg));
+	code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
+		HNS3_MBX_UNMAP_RING_TO_VECTOR;
+	bind_msg.vector_id = vector_id;
+
+	if (queue_type == HNS3_RING_TYPE_RX)
+		bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
+	else
+		bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
+
+	bind_msg.param[0].ring_type = queue_type;
+	bind_msg.ring_num = 1;
+	bind_msg.param[0].tqp_index = queue_id;
+	op_str = mmap ? "Map" : "Unmap";
+	ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
+				sizeof(bind_msg), false, NULL, 0);
+	if (ret) {
+		hns3_err(hw, "%s TQP %d fail, vector_id is %d, ret is %d.",
+			 op_str, queue_id, bind_msg.vector_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+hns3vf_init_ring_with_vector(struct hns3_hw *hw)
+{
+	uint8_t vec;
+	int ret;
+	int i;
+
+	/*
+	 * In hns3 network engine, vector 0 is always the misc interrupt of this
+	 * function, vector 1~N can be used respectively for the queues of the
+	 * function. Tx and Rx queues with the same number share the interrupt
+	 * vector. In the initialization clearing the all hardware mapping
+	 * relationship configurations between queues and interrupt vectors is
+	 * needed, so some error caused by the residual configurations, such as
+	 * the unexpected Tx interrupt, can be avoid. Because of the hardware
+	 * constraints in hns3 hardware engine, we have to implement clearing
+	 * the mapping relationship configurations by binding all queues to the
+	 * last interrupt vector and reserving the last interrupt vector. This
+	 * method results in a decrease of the maximum queues when upper
+	 * applications call the rte_eth_dev_configure API function to enable
+	 * Rx interrupt.
+	 */
+	vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
+	hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */
+	for (i = 0; i < hw->intr_tqps_num; i++) {
+		/*
+		 * Set gap limiter and rate limiter configuration of queue's
+		 * interrupt.
+		 */
+		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
+				       HNS3_TQP_INTR_GL_DEFAULT);
+		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
+				       HNS3_TQP_INTR_GL_DEFAULT);
+		hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+
+		ret = hns3vf_bind_ring_with_vector(hw, vec, false,
+						   HNS3_RING_TYPE_TX, i);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with "
+					  "vector: %d, ret=%d", i, vec, ret);
+			return ret;
+		}
+
+		ret = hns3vf_bind_ring_with_vector(hw, vec, false,
+						   HNS3_RING_TYPE_RX, i);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with "
+					  "vector: %d, ret=%d", i, vec, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
 hns3vf_dev_configure(struct rte_eth_dev *dev)
 {
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -548,8 +638,16 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 {
 	struct hns3_adapter *hns = eth_dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
+	uint16_t q_num = hw->tqps_num;
+
+	/*
+	 * In interrupt mode, 'max_rx_queues' is set based on the number of
+	 * MSI-X interrupt resources of the hardware.
+	 */
+	if (hw->data->dev_conf.intr_conf.rxq == 1)
+		q_num = hw->intr_tqps_num;
 
-	info->max_rx_queues = hw->tqps_num;
+	info->max_rx_queues = q_num;
 	info->max_tx_queues = hw->tqps_num;
 	info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
 	info->min_rx_bufsize = hw->rx_buf_len;
@@ -1082,6 +1180,35 @@ hns3vf_service_handler(void *param)
 }
 
 static int
+hns3_query_vf_resource(struct hns3_hw *hw)
+{
+	struct hns3_vf_res_cmd *req;
+	struct hns3_cmd_desc desc;
+	uint16_t num_msi;
+	int ret;
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret) {
+		hns3_err(hw, "query vf resource failed, ret = %d", ret);
+		return ret;
+	}
+
+	req = (struct hns3_vf_res_cmd *)desc.data;
+	num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
+				 HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
+	if (num_msi < HNS3_MIN_VECTOR_NUM) {
+		hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
+			 num_msi, HNS3_MIN_VECTOR_NUM);
+		return -EINVAL;
+	}
+
+	hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi;
+
+	return 0;
+}
+
+static int
 hns3vf_init_hardware(struct hns3_adapter *hns)
 {
 	struct hns3_hw *hw = &hns->hw;
@@ -1157,6 +1284,11 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
 		goto err_cmd_init;
 	}
 
+	/* Get VF resource */
+	ret = hns3_query_vf_resource(hw);
+	if (ret)
+		goto err_cmd_init;
+
 	rte_spinlock_init(&hw->mbx_resp.lock);
 
 	hns3vf_clear_event_cause(hw, 0);
@@ -1193,6 +1325,16 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
 
 	hns3_set_default_rss_args(hw);
 
+	/*
+	 * In the initialization clearing the all hardware mapping relationship
+	 * configurations between queues and interrupt vectors is needed, so
+	 * some error caused by the residual configurations, such as the
+	 * unexpected interrupt, can be avoid.
+	 */
+	ret = hns3vf_init_ring_with_vector(hw);
+	if (ret)
+		goto err_get_config;
+
 	(void)hns3_stats_reset(eth_dev);
 	return 0;
 
@@ -1233,36 +1375,6 @@ hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
 }
 
 static int
-hns3vf_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
-			     bool mmap, uint16_t queue_id)
-
-{
-	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct hns3_vf_bind_vector_msg bind_msg;
-	uint16_t code;
-	int ret;
-
-	memset(&bind_msg, 0, sizeof(bind_msg));
-	code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
-		HNS3_MBX_UNMAP_RING_TO_VECTOR;
-	bind_msg.vector_id = vector_id;
-	bind_msg.ring_num = 1;
-	bind_msg.param[0].ring_type = HNS3_RING_TYPE_RX;
-	bind_msg.param[0].tqp_index = queue_id;
-	bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
-
-	ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
-				sizeof(bind_msg), false, NULL, 0);
-	if (ret) {
-		hns3_err(hw, "Map TQP %d fail, vector_id is %d, ret is %d.",
-			 queue_id, vector_id, ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-static int
 hns3vf_do_stop(struct hns3_adapter *hns)
 {
 	struct hns3_hw *hw = &hns->hw;
@@ -1298,7 +1410,8 @@ hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
-			(void)hns3vf_bind_ring_with_vector(dev, vec, false,
+			(void)hns3vf_bind_ring_with_vector(hw, vec, false,
+							   HNS3_RING_TYPE_RX,
 							   q_id);
 			if (vec < base + intr_handle->nb_efd - 1)
 				vec++;
@@ -1464,7 +1577,8 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
-			ret = hns3vf_bind_ring_with_vector(dev, vec, true,
+			ret = hns3vf_bind_ring_with_vector(hw, vec, true,
+							   HNS3_RING_TYPE_RX,
 							   q_id);
 			if (ret)
 				goto vf_bind_vector_error;
diff --git a/drivers/net/hns3/hns3_regs.h b/drivers/net/hns3/hns3_regs.h
index 42581df..64bd693 100644
--- a/drivers/net/hns3/hns3_regs.h
+++ b/drivers/net/hns3/hns3_regs.h
@@ -83,9 +83,6 @@
 
 #define HNS3_RING_EN_B				0
 
-#define HNS3_VECTOR_REG_OFFSET			0x4
-#define HNS3_VECTOR_VF_OFFSET			0x100000
-
 #define HNS3_TQP_REG_OFFSET			0x80000
 #define HNS3_TQP_REG_SIZE			0x200
 
@@ -97,6 +94,16 @@
 #define HNS3_TQP_INTR_RL_REG			0x20900
 
 #define HNS3_TQP_INTR_REG_SIZE			4
+#define HNS3_TQP_INTR_GL_MAX			0x1FE0
+#define HNS3_TQP_INTR_GL_DEFAULT		20
+#define HNS3_TQP_INTR_RL_MAX			0xEC
+#define HNS3_TQP_INTR_RL_ENABLE_MASK		0x40
+#define HNS3_TQP_INTR_RL_DEFAULT		0
+
+/* gl_usec convert to hardware count, as writing each 1 represents 2us */
+#define HNS3_GL_USEC_TO_REG(gl_usec)		((gl_usec) >> 1)
+/* rl_usec convert to hardware count, as writing each 1 represents 4us */
+#define HNS3_RL_USEC_TO_REG(rl_usec)		((rl_usec) >> 2)
 
 int hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs);
 #endif /* _HNS3_REGS_H_ */
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 1e41b0d..edf0038 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -499,11 +499,45 @@ hns3_reset_all_queues(struct hns3_adapter *hns)
 }
 
 void
-hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en)
+hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
+		       uint8_t gl_idx, uint16_t gl_value)
 {
+	uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
+			     HNS3_TQP_INTR_GL1_REG,
+			     HNS3_TQP_INTR_GL2_REG};
 	uint32_t addr, value;
 
-	addr = HNS3_TQP_INTR_CTRL_REG + tpq_int_num * HNS3_VECTOR_REG_OFFSET;
+	if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
+		return;
+
+	addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
+	value = HNS3_GL_USEC_TO_REG(gl_value);
+
+	hns3_write_dev(hw, addr, value);
+}
+
+void
+hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
+{
+	uint32_t addr, value;
+
+	if (rl_value > HNS3_TQP_INTR_RL_MAX)
+		return;
+
+	addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+	value = HNS3_RL_USEC_TO_REG(rl_value);
+	if (value > 0)
+		value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
+
+	hns3_write_dev(hw, addr, value);
+}
+
+static void
+hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
+{
+	uint32_t addr, value;
+
+	addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
 	value = en ? 1 : 0;
 
 	hns3_write_dev(hw, addr, value);
@@ -519,8 +553,7 @@ hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 	if (dev->data->dev_conf.intr_conf.rxq == 0)
 		return -ENOTSUP;
 
-	/* enable the vectors */
-	hns3_tqp_intr_enable(hw, queue_id, true);
+	hns3_queue_intr_enable(hw, queue_id, true);
 
 	return rte_intr_ack(intr_handle);
 }
@@ -533,8 +566,7 @@ hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 	if (dev->data->dev_conf.intr_conf.rxq == 0)
 		return -ENOTSUP;
 
-	/* disable the vectors */
-	hns3_tqp_intr_enable(hw, queue_id, false);
+	hns3_queue_intr_enable(hw, queue_id, false);
 
 	return 0;
 }
@@ -938,7 +970,6 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 
 	if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
 		/* first time configuration */
-
 		uint32_t size;
 		size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
 		hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
@@ -949,7 +980,6 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 		}
 	} else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
 		/* re-configure */
-
 		rxq = hw->fkq_data.rx_queues;
 		for (i = nb_queues; i < old_nb_queues; i++)
 			hns3_dev_rx_queue_release(rxq[i]);
@@ -987,7 +1017,6 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 
 	if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
 		/* first time configuration */
-
 		uint32_t size;
 		size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
 		hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
@@ -998,7 +1027,6 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 		}
 	} else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
 		/* re-configure */
-
 		txq = hw->fkq_data.tx_queues;
 		for (i = nb_queues; i < old_nb_queues; i++)
 			hns3_dev_tx_queue_release(txq[i]);
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index a042c99..ba89425 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -321,7 +321,10 @@ uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			uint16_t nb_pkts);
 const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
-void hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en);
+void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
+			    uint8_t gl_idx, uint16_t gl_value);
+void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id,
+			    uint16_t rl_value);
 int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
 				  uint16_t nb_tx_q);
 
-- 
2.7.4


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-stable] [PATCH 19.11 5/6] net/hns3: fix Rx interrupt after reset
  2020-05-29  3:57 [dpdk-stable] [PATCH 19.11 0/6] backport for hns3 PMD driver Wei Hu (Xavier)
                   ` (3 preceding siblings ...)
  2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 4/6] net/hns3: fix Tx interrupt when enabling Rx interrupt Wei Hu (Xavier)
@ 2020-05-29  3:57 ` Wei Hu (Xavier)
  2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 6/6] net/hns3: fix MSI-X interrupt during initialization Wei Hu (Xavier)
  2020-05-29  8:47 ` [dpdk-stable] [PATCH 19.11 0/6] backport for hns3 PMD driver Luca Boccassi
  6 siblings, 0 replies; 8+ messages in thread
From: Wei Hu (Xavier) @ 2020-05-29  3:57 UTC (permalink / raw)
  To: luca.boccassi; +Cc: stable, xavier.huwei

From: Chengwen Feng <fengchengwen@huawei.com>

[ upstream commit af1857b05c63caac3cb0f84deca7a43e0bcf4beb ]

Currently, Rx interrupt cannot work normally after reset (such as FLR,
global reset and IMP reset), when running l3fwd-power application based
on hns3 network engine.

The root cause is that the hardware configuration about Rx interrupt
does not recover after reset.

This patch fixes it with the following modification.
1. The internal static function named hns3(vf)_init_ring_with_vector is
   moved from hns3_init_pf to hns3(vf)_init_hardware because
   hns3(vf)_init_hardware is called both in the initialization and the
   RESET_STAGE_DEV_INIT stage of the reset process.
2. The internal static function named hns3(vf)_restore_rx_interrupt is
   added in hns3(vf)_restore_conf, it is used to recover hardware
   configuration about interrupt vectors of rx queues in the
   RESET_STAGE_DEV_INIT stage of the reset process.
3. The internal static function named hns3_dev_all_rx_queue_intr_enable
   and hns3_enable_all_queues are added in hns3(vf)_dev_start(which
   called in the initialization, so after calling the rte_eth_dev_start
   API successfully, the driver is ready to work.
4. The function named hns3_dev_all_rx_queue_intr_enable and
   hns3_enable_all_queues are also added in hns3(vf)_start_service(which
   called in the RESET_STAGE_DEV_INIT stage of the reset process), so
   after start_service, the driver is ready to work.

Note:
1. Because FLR will clear queue's interrupt enable bit hardware
   configuration, so we add calling hns3_dev_all_rx_queue_intr_enable to
   enable interrupt before enabling queues.
2. After finished the initialization, we can enable queues to work by
   calling the internal function named hns3_enable_all_queues.

Fixes: 02a7b55657b2 ("net/hns3: support Rx interrupt")
Cc: stable@dpdk.org

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Hongbo Zheng <zhenghongbo3@huawei.com>
---
 drivers/net/hns3/hns3_ethdev.c    | 84 ++++++++++++++++++++++++++++++++-------
 drivers/net/hns3/hns3_ethdev_vf.c | 83 ++++++++++++++++++++++++++++++--------
 drivers/net/hns3/hns3_intr.c      |  2 +
 drivers/net/hns3/hns3_rxtx.c      | 27 ++++++++++++-
 drivers/net/hns3/hns3_rxtx.h      |  2 +
 5 files changed, 165 insertions(+), 33 deletions(-)

diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 35f317a..c693913 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -4134,6 +4134,19 @@ hns3_init_hardware(struct hns3_adapter *hns)
 		PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
 		goto err_mac_init;
 	}
+
+	/*
+	 * In the initialization clearing the all hardware mapping relationship
+	 * configurations between queues and interrupt vectors is needed, so
+	 * some error caused by the residual configurations, such as the
+	 * unexpected interrupt, can be avoid.
+	 */
+	ret = hns3_init_ring_with_vector(hw);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
+		goto err_mac_init;
+	}
+
 	return 0;
 
 err_mac_init:
@@ -4212,16 +4225,6 @@ hns3_init_pf(struct rte_eth_dev *eth_dev)
 		goto err_fdir;
 	}
 
-	/*
-	 * In the initialization clearing the all hardware mapping relationship
-	 * configurations between queues and interrupt vectors is needed, so
-	 * some error caused by the residual configurations, such as the
-	 * unexpected interrupt, can be avoid.
-	 */
-	ret = hns3_init_ring_with_vector(hw);
-	if (ret)
-		goto err_fdir;
-
 	return 0;
 
 err_fdir:
@@ -4366,6 +4369,31 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)
 	return ret;
 }
 
+static int
+hns3_restore_rx_interrupt(struct hns3_hw *hw)
+{
+	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	uint16_t q_id;
+	int ret;
+
+	if (dev->data->dev_conf.intr_conf.rxq == 0)
+		return 0;
+
+	if (rte_intr_dp_is_en(intr_handle)) {
+		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
+			ret = hns3_bind_ring_with_vector(hw,
+					intr_handle->intr_vec[q_id], true,
+					HNS3_RING_TYPE_RX, q_id);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
 static void
 hns3_restore_filter(struct rte_eth_dev *dev)
 {
@@ -4393,17 +4421,30 @@ hns3_dev_start(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	hw->adapter_state = HNS3_NIC_STARTED;
-	rte_spinlock_unlock(&hw->lock);
 	ret = hns3_map_rx_interrupt(dev);
-	if (ret)
+	if (ret) {
+		hw->adapter_state = HNS3_NIC_CONFIGURED;
+		rte_spinlock_unlock(&hw->lock);
 		return ret;
+	}
+
+	hw->adapter_state = HNS3_NIC_STARTED;
+	rte_spinlock_unlock(&hw->lock);
+
 	hns3_set_rxtx_function(dev);
 	hns3_mp_req_start_rxtx(dev);
 	rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev);
 
 	hns3_restore_filter(dev);
 
+	/* Enable interrupt of all rx queues before enabling queues */
+	hns3_dev_all_rx_queue_intr_enable(hw, true);
+	/*
+	 * When finished the initialization, enable queues to receive/transmit
+	 * packets.
+	 */
+	hns3_enable_all_queues(hw, true);
+
 	hns3_info(hw, "hns3 dev start successful!");
 	return 0;
 }
@@ -4484,12 +4525,12 @@ hns3_dev_stop(struct rte_eth_dev *dev)
 	rte_spinlock_lock(&hw->lock);
 	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
 		hns3_do_stop(hns);
+		hns3_unmap_rx_interrupt(dev);
 		hns3_dev_release_mbufs(hns);
 		hw->adapter_state = HNS3_NIC_CONFIGURED;
 	}
 	rte_eal_alarm_cancel(hns3_service_handler, dev);
 	rte_spinlock_unlock(&hw->lock);
-	hns3_unmap_rx_interrupt(dev);
 }
 
 static void
@@ -5003,9 +5044,18 @@ hns3_start_service(struct hns3_adapter *hns)
 	eth_dev = &rte_eth_devices[hw->data->port_id];
 	hns3_set_rxtx_function(eth_dev);
 	hns3_mp_req_start_rxtx(eth_dev);
-	if (hw->adapter_state == HNS3_NIC_STARTED)
+	if (hw->adapter_state == HNS3_NIC_STARTED) {
 		hns3_service_handler(eth_dev);
 
+		/* Enable interrupt of all rx queues before enabling queues */
+		hns3_dev_all_rx_queue_intr_enable(hw, true);
+		/*
+		 * When finished the initialization, enable queues to receive
+		 * and transmit packets.
+		 */
+		hns3_enable_all_queues(hw, true);
+	}
+
 	return 0;
 }
 
@@ -5039,6 +5089,10 @@ hns3_restore_conf(struct hns3_adapter *hns)
 	if (ret)
 		goto err_promisc;
 
+	ret = hns3_restore_rx_interrupt(hw);
+	if (ret)
+		goto err_promisc;
+
 	if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
 		ret = hns3_do_start(hns, false);
 		if (ret)
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 428a6d2..11d7dea 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1235,6 +1235,18 @@ hns3vf_init_hardware(struct hns3_adapter *hns)
 		goto err_init_hardware;
 	}
 
+	/*
+	 * In the initialization clearing the all hardware mapping relationship
+	 * configurations between queues and interrupt vectors is needed, so
+	 * some error caused by the residual configurations, such as the
+	 * unexpected interrupt, can be avoid.
+	 */
+	ret = hns3vf_init_ring_with_vector(hw);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
+		goto err_init_hardware;
+	}
+
 	ret = hns3vf_set_alive(hw, true);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
@@ -1325,16 +1337,6 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
 
 	hns3_set_default_rss_args(hw);
 
-	/*
-	 * In the initialization clearing the all hardware mapping relationship
-	 * configurations between queues and interrupt vectors is needed, so
-	 * some error caused by the residual configurations, such as the
-	 * unexpected interrupt, can be avoid.
-	 */
-	ret = hns3vf_init_ring_with_vector(hw);
-	if (ret)
-		goto err_get_config;
-
 	(void)hns3_stats_reset(eth_dev);
 	return 0;
 
@@ -1444,13 +1446,12 @@ hns3vf_dev_stop(struct rte_eth_dev *dev)
 	rte_spinlock_lock(&hw->lock);
 	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
 		hns3vf_do_stop(hns);
+		hns3vf_unmap_rx_interrupt(dev);
 		hns3_dev_release_mbufs(hns);
 		hw->adapter_state = HNS3_NIC_CONFIGURED;
 	}
 	rte_eal_alarm_cancel(hns3vf_service_handler, dev);
 	rte_spinlock_unlock(&hw->lock);
-
-	hns3vf_unmap_rx_interrupt(dev);
 }
 
 static void
@@ -1602,6 +1603,31 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
 	return ret;
 }
 
+static int
+hns3vf_restore_rx_interrupt(struct hns3_hw *hw)
+{
+	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	uint16_t q_id;
+	int ret;
+
+	if (dev->data->dev_conf.intr_conf.rxq == 0)
+		return 0;
+
+	if (rte_intr_dp_is_en(intr_handle)) {
+		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
+			ret = hns3vf_bind_ring_with_vector(hw,
+					intr_handle->intr_vec[q_id], true,
+					HNS3_RING_TYPE_RX, q_id);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
 static void
 hns3vf_restore_filter(struct rte_eth_dev *dev)
 {
@@ -1627,17 +1653,29 @@ hns3vf_dev_start(struct rte_eth_dev *dev)
 		rte_spinlock_unlock(&hw->lock);
 		return ret;
 	}
-	hw->adapter_state = HNS3_NIC_STARTED;
-	rte_spinlock_unlock(&hw->lock);
 	ret = hns3vf_map_rx_interrupt(dev);
-	if (ret)
+	if (ret) {
+		hw->adapter_state = HNS3_NIC_CONFIGURED;
+		rte_spinlock_unlock(&hw->lock);
 		return ret;
+	}
+	hw->adapter_state = HNS3_NIC_STARTED;
+	rte_spinlock_unlock(&hw->lock);
+
 	hns3_set_rxtx_function(dev);
 	hns3_mp_req_start_rxtx(dev);
 	rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev);
 
 	hns3vf_restore_filter(dev);
 
+	/* Enable interrupt of all rx queues before enabling queues */
+	hns3_dev_all_rx_queue_intr_enable(hw, true);
+	/*
+	 * When finished the initialization, enable queues to receive/transmit
+	 * packets.
+	 */
+	hns3_enable_all_queues(hw, true);
+
 	return ret;
 }
 
@@ -1789,9 +1827,18 @@ hns3vf_start_service(struct hns3_adapter *hns)
 	eth_dev = &rte_eth_devices[hw->data->port_id];
 	hns3_set_rxtx_function(eth_dev);
 	hns3_mp_req_start_rxtx(eth_dev);
-	if (hw->adapter_state == HNS3_NIC_STARTED)
+	if (hw->adapter_state == HNS3_NIC_STARTED) {
 		hns3vf_service_handler(eth_dev);
 
+		/* Enable interrupt of all rx queues before enabling queues */
+		hns3_dev_all_rx_queue_intr_enable(hw, true);
+		/*
+		 * When finished the initialization, enable queues to receive
+		 * and transmit packets.
+		 */
+		hns3_enable_all_queues(hw, true);
+	}
+
 	return 0;
 }
 
@@ -1813,6 +1860,10 @@ hns3vf_restore_conf(struct hns3_adapter *hns)
 	if (ret)
 		goto err_vlan_table;
 
+	ret = hns3vf_restore_rx_interrupt(hw);
+	if (ret)
+		goto err_vlan_table;
+
 	if (hw->adapter_state == HNS3_NIC_STARTED) {
 		ret = hns3vf_do_start(hns, false);
 		if (ret)
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 6c3ebd3..9953a1d 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -1001,7 +1001,9 @@ hns3_reset_post(struct hns3_adapter *hns)
 		hw->reset.attempts = 0;
 		hw->reset.stats.success_cnt++;
 		hw->reset.stage = RESET_STAGE_NONE;
+		rte_spinlock_lock(&hw->lock);
 		hw->reset.ops->start_service(hns);
+		rte_spinlock_unlock(&hw->lock);
 		gettimeofday(&tv, NULL);
 		timersub(&tv, &hw->reset.start_time, &tv_delta);
 		hns3_warn(hw, "%s reset done fail_cnt:%" PRIx64
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index edf0038..a0fcb4c 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -315,7 +315,7 @@ hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
 		       HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
 }
 
-static void
+void
 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
 {
 	uint16_t nb_rx_q = hw->data->nb_rx_queues;
@@ -543,6 +543,26 @@ hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
 	hns3_write_dev(hw, addr, value);
 }
 
+/*
+ * Enable all rx queue interrupt when in interrupt rx mode.
+ * This api was called before enable queue rx&tx (in normal start or reset
+ * recover scenes), used to fix hardware rx queue interrupt enable was clear
+ * when FLR.
+ */
+void
+hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
+{
+	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+	uint16_t nb_rx_q = hw->data->nb_rx_queues;
+	int i;
+
+	if (dev->data->dev_conf.intr_conf.rxq == 0)
+		return;
+
+	for (i = 0; i < nb_rx_q; i++)
+		hns3_queue_intr_enable(hw, i, en);
+}
+
 int
 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
@@ -740,6 +760,10 @@ hns3_start_tx_queues(struct hns3_adapter *hns)
 	hns3_init_tx_ring_tc(hns);
 }
 
+/*
+ * Start all queues.
+ * Note: just init and setup queues, and don't enable queue rx&tx.
+ */
 int
 hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
 {
@@ -761,7 +785,6 @@ hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
 	}
 
 	hns3_start_tx_queues(hns);
-	hns3_enable_all_queues(hw, true);
 
 	return 0;
 }
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index ba89425..b751472 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -303,8 +303,10 @@ void hns3_dev_rx_queue_release(void *queue);
 void hns3_dev_tx_queue_release(void *queue);
 void hns3_free_all_queues(struct rte_eth_dev *dev);
 int hns3_reset_all_queues(struct hns3_adapter *hns);
+void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en);
 int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
 int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
+void hns3_enable_all_queues(struct hns3_hw *hw, bool en);
 int hns3_start_queues(struct hns3_adapter *hns, bool reset_queue);
 int hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue);
 void hns3_dev_release_mbufs(struct hns3_adapter *hns);
-- 
2.7.4


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-stable] [PATCH 19.11 6/6] net/hns3: fix MSI-X interrupt during initialization
  2020-05-29  3:57 [dpdk-stable] [PATCH 19.11 0/6] backport for hns3 PMD driver Wei Hu (Xavier)
                   ` (4 preceding siblings ...)
  2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 5/6] net/hns3: fix Rx interrupt after reset Wei Hu (Xavier)
@ 2020-05-29  3:57 ` Wei Hu (Xavier)
  2020-05-29  8:47 ` [dpdk-stable] [PATCH 19.11 0/6] backport for hns3 PMD driver Luca Boccassi
  6 siblings, 0 replies; 8+ messages in thread
From: Wei Hu (Xavier) @ 2020-05-29  3:57 UTC (permalink / raw)
  To: luca.boccassi; +Cc: stable, xavier.huwei

[ upstream commit 2de74dcfbd150fde59c8e0a20ae21e22ab27605e ]

Currently, based on hns3 VF device error may occur during initialization.

The root cause as below:
When the following formula is executed during initialization, the
private variable named hw->tqps_num has not been obtained from PF driver
through mailbox, further causes failure when mapping interrupt and
queues.
  hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi;
We need to use hw->tqp_num after it is correctly assigned.

On the other hand, because the private variable named hw->num_msi, which
represents the number of MSI-x interrupt of hns3 PF/VF device, is used in
the '.get_reg' ops implementation function to dump all interrupt related
registers, it should be obtained from firmware directly and we'd better
not modify it in the driver.

Fixes: ef2e785c36cf ("net/hns3: fix Tx interrupt when enabling Rx interrupt")
Fixes: 02a7b55657b2 ("net/hns3: support Rx interrupt")
Cc: stable@dpdk.org

Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Hao Chen <chenhao164@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
 drivers/net/hns3/hns3_ethdev.c    | 10 +++++-----
 drivers/net/hns3/hns3_ethdev_vf.c |  5 +++--
 2 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index c693913..3271b61 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2150,7 +2150,8 @@ hns3_init_ring_with_vector(struct hns3_hw *hw)
 	 * Rx interrupt.
 	 */
 	vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
-	hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */
+	/* vec - 1: the last interrupt is reserved */
+	hw->intr_tqps_num = vec > hw->tqps_num ? hw->tqps_num : vec - 1;
 	for (i = 0; i < hw->intr_tqps_num; i++) {
 		/*
 		 * Set gap limiter and rate limiter configuration of queue's
@@ -2523,7 +2524,6 @@ hns3_query_pf_resource(struct hns3_hw *hw)
 	struct hns3_pf *pf = &hns->pf;
 	struct hns3_pf_res_cmd *req;
 	struct hns3_cmd_desc desc;
-	uint16_t num_msi;
 	int ret;
 
 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
@@ -2555,9 +2555,9 @@ hns3_query_pf_resource(struct hns3_hw *hw)
 
 	pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
 
-	num_msi = hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
-				 HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
-	hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi;
+	hw->num_msi =
+	    hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
+			   HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
 
 	return 0;
 }
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 11d7dea..251d0ef 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -472,7 +472,8 @@ hns3vf_init_ring_with_vector(struct hns3_hw *hw)
 	 * Rx interrupt.
 	 */
 	vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
-	hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */
+	/* vec - 1: the last interrupt is reserved */
+	hw->intr_tqps_num = vec > hw->tqps_num ? hw->tqps_num : vec - 1;
 	for (i = 0; i < hw->intr_tqps_num; i++) {
 		/*
 		 * Set gap limiter and rate limiter configuration of queue's
@@ -1203,7 +1204,7 @@ hns3_query_vf_resource(struct hns3_hw *hw)
 		return -EINVAL;
 	}
 
-	hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi;
+	hw->num_msi = num_msi;
 
 	return 0;
 }
-- 
2.7.4


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-stable] [PATCH 19.11 0/6] backport for hns3 PMD driver
  2020-05-29  3:57 [dpdk-stable] [PATCH 19.11 0/6] backport for hns3 PMD driver Wei Hu (Xavier)
                   ` (5 preceding siblings ...)
  2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 6/6] net/hns3: fix MSI-X interrupt during initialization Wei Hu (Xavier)
@ 2020-05-29  8:47 ` Luca Boccassi
  6 siblings, 0 replies; 8+ messages in thread
From: Luca Boccassi @ 2020-05-29  8:47 UTC (permalink / raw)
  To: Wei Hu (Xavier); +Cc: stable

On Fri, 2020-05-29 at 11:57 +0800, Wei Hu (Xavier) wrote:
> This series are backport patches to DPDK 19.11.3 for hns3 PMD driver.
> 
> Chengwen Feng (1):
>   net/hns3: fix Rx interrupt after reset
> 
> Hao Chen (1):
>   net/hns3: support Rx interrupt
> 
> Lijun Ou (1):
>   net/hns3: fix RSS indirection table configuration
> 
> Wei Hu (Xavier) (3):
>   net/hns3: support different numbers of Rx and Tx queues
>   net/hns3: fix Tx interrupt when enabling Rx interrupt
>   net/hns3: fix MSI-X interrupt during initialization
> 
>  doc/guides/nics/features/hns3.ini    |   1 +
>  doc/guides/nics/features/hns3_vf.ini |   1 +
>  doc/guides/nics/hns3.rst             |   1 +
>  drivers/net/hns3/hns3_cmd.h          |  49 ++-
>  drivers/net/hns3/hns3_dcb.c          | 103 +++--
>  drivers/net/hns3/hns3_dcb.h          |   4 +-
>  drivers/net/hns3/hns3_ethdev.c       | 363 +++++++++++++++--
>  drivers/net/hns3/hns3_ethdev.h       |  17 +-
>  drivers/net/hns3/hns3_ethdev_vf.c    | 398 ++++++++++++++++--
>  drivers/net/hns3/hns3_flow.c         |  22 +-
>  drivers/net/hns3/hns3_intr.c         |   2 +
>  drivers/net/hns3/hns3_mbx.h          |  13 +
>  drivers/net/hns3/hns3_regs.h         |  10 +
>  drivers/net/hns3/hns3_rss.c          |  28 +-
>  drivers/net/hns3/hns3_rss.h          |   2 +
>  drivers/net/hns3/hns3_rxtx.c         | 769 ++++++++++++++++++++++++++++++-----
>  drivers/net/hns3/hns3_rxtx.h         |  19 +
>  17 files changed, 1578 insertions(+), 224 deletions(-)

Series-acked-by: Luca Boccassi <bluca@debian.org>

Thanks, applied

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2020-05-29  8:47 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-29  3:57 [dpdk-stable] [PATCH 19.11 0/6] backport for hns3 PMD driver Wei Hu (Xavier)
2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 1/6] net/hns3: support Rx interrupt Wei Hu (Xavier)
2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 2/6] net/hns3: support different numbers of Rx and Tx queues Wei Hu (Xavier)
2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 3/6] net/hns3: fix RSS indirection table configuration Wei Hu (Xavier)
2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 4/6] net/hns3: fix Tx interrupt when enabling Rx interrupt Wei Hu (Xavier)
2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 5/6] net/hns3: fix Rx interrupt after reset Wei Hu (Xavier)
2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 6/6] net/hns3: fix MSI-X interrupt during initialization Wei Hu (Xavier)
2020-05-29  8:47 ` [dpdk-stable] [PATCH 19.11 0/6] backport for hns3 PMD driver Luca Boccassi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).