patches for DPDK stable branches
 help / color / mirror / Atom feed
From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
To: <luca.boccassi@gmail.com>
Cc: <stable@dpdk.org>, <xavier.huwei@huawei.com>
Subject: [dpdk-stable] [PATCH 19.11 4/6] net/hns3: fix Tx interrupt when enabling Rx interrupt
Date: Fri, 29 May 2020 11:57:32 +0800	[thread overview]
Message-ID: <1590724654-23075-5-git-send-email-xavier.huwei@huawei.com> (raw)
In-Reply-To: <1590724654-23075-1-git-send-email-xavier.huwei@huawei.com>

[ upstream commit 26aff86b694ea4b143cd61a08c910986adcc7669 ]

Currently, when receiving and transmitting packets based on hns3 network
engine there are probably unexpected and redundant Tx interrupts if Rx
interrupt is enabled.

The root cause as below:
Tx and Rx queues with the same number share the interrupt vector in hns3
network engine, and in this case there are the residual hardware mapping
relationship configuration between queue and interrupt vector configured
in hns3 kernel ethdev driver.

We should clear the all hardware mapping relationship configurations in
the initialization. Because of the hardware constraints, we have to
implement clearing the relationship by binding all queues to the last
interrupt vector and reserving the last interrupt vector, this method
results in a decrease of the maximum queues when upper applications call
the rte_eth_dev_configure API function to enable Rx interrupt.

Fixes: 02a7b55657b2 ("net/hns3: support Rx interrupt")
Cc: stable@dpdk.org

Signed-off-by: Hao Chen <chenhao164@huawei.com>
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_cmd.h       |  31 +++++--
 drivers/net/hns3/hns3_ethdev.c    | 117 +++++++++++++++++++++----
 drivers/net/hns3/hns3_ethdev.h    |   1 +
 drivers/net/hns3/hns3_ethdev_vf.c | 180 +++++++++++++++++++++++++++++++-------
 drivers/net/hns3/hns3_regs.h      |  13 ++-
 drivers/net/hns3/hns3_rxtx.c      |  48 +++++++---
 drivers/net/hns3/hns3_rxtx.h      |   5 +-
 7 files changed, 326 insertions(+), 69 deletions(-)

diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index 897dc14..26d4103 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -79,6 +79,7 @@ enum hns3_opcode_type {
 	HNS3_OPC_GBL_RST_STATUS         = 0x0021,
 	HNS3_OPC_QUERY_FUNC_STATUS      = 0x0022,
 	HNS3_OPC_QUERY_PF_RSRC          = 0x0023,
+	HNS3_OPC_QUERY_VF_RSRC          = 0x0024,
 	HNS3_OPC_GET_CFG_PARAM          = 0x0025,
 	HNS3_OPC_PF_RST_DONE            = 0x0026,
 
@@ -337,8 +338,9 @@ struct hns3_func_status_cmd {
 	uint8_t rsv[2];
 };
 
-#define HNS3_PF_VEC_NUM_S		0
-#define HNS3_PF_VEC_NUM_M		GENMASK(7, 0)
+#define HNS3_VEC_NUM_S		0
+#define HNS3_VEC_NUM_M		GENMASK(7, 0)
+#define HNS3_MIN_VECTOR_NUM	2 /* one for msi-x, another for IO */
 struct hns3_pf_res_cmd {
 	uint16_t tqp_num;
 	uint16_t buf_size;
@@ -351,6 +353,15 @@ struct hns3_pf_res_cmd {
 	uint32_t rsv[2];
 };
 
+struct hns3_vf_res_cmd {
+	uint16_t tqp_num;
+	uint16_t reserved;
+	uint16_t msixcap_localid_ba_nic;
+	uint16_t msixcap_localid_ba_rocee;
+	uint16_t vf_intr_vector_number;
+	uint16_t rsv[7];
+};
+
 #define HNS3_UMV_SPC_ALC_B	0
 struct hns3_umv_spc_alc_cmd {
 	uint8_t allocate;
@@ -677,13 +688,19 @@ struct hns3_tqp_map_cmd {
 	uint8_t rsv[18];
 };
 
-#define HNS3_RING_TYPE_B	0
-#define HNS3_RING_TYPE_TX	0
-#define HNS3_RING_TYPE_RX	1
+enum hns3_ring_type {
+	HNS3_RING_TYPE_TX,
+	HNS3_RING_TYPE_RX
+};
+
+enum hns3_int_gl_idx {
+	HNS3_RING_GL_RX,
+	HNS3_RING_GL_TX,
+	HNS3_RING_GL_IMMEDIATE = 3
+};
+
 #define HNS3_RING_GL_IDX_S	0
 #define HNS3_RING_GL_IDX_M	GENMASK(1, 0)
-#define HNS3_RING_GL_RX		0
-#define HNS3_RING_GL_TX		1
 
 #define HNS3_VECTOR_ELEMENTS_PER_CMD	10
 
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index f70f43f..35f317a 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2085,34 +2085,98 @@ hns3_check_dcb_cfg(struct rte_eth_dev *dev)
 }
 
 static int
-hns3_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
-			   bool mmap, uint16_t queue_id)
+hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap,
+			   enum hns3_ring_type queue_type, uint16_t queue_id)
 {
-	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct hns3_cmd_desc desc;
 	struct hns3_ctrl_vector_chain_cmd *req =
 		(struct hns3_ctrl_vector_chain_cmd *)desc.data;
 	enum hns3_cmd_status status;
 	enum hns3_opcode_type op;
 	uint16_t tqp_type_and_id = 0;
+	const char *op_str;
+	uint16_t type;
+	uint16_t gl;
 
 	op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
 	hns3_cmd_setup_basic_desc(&desc, op, false);
 	req->int_vector_id = vector_id;
 
+	if (queue_type == HNS3_RING_TYPE_RX)
+		gl = HNS3_RING_GL_RX;
+	else
+		gl = HNS3_RING_GL_TX;
+
+	type = queue_type;
+
 	hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
-		       HNS3_RING_TYPE_RX);
+		       type);
 	hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
 	hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
-		       HNS3_RING_GL_RX);
+		       gl);
 	req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
-
 	req->int_cause_num = 1;
+	op_str = mmap ? "Map" : "Unmap";
 	status = hns3_cmd_send(hw, &desc, 1);
 	if (status) {
-		hns3_err(hw, "Map TQP %d fail, vector_id is %d, status is %d.",
-			 queue_id, vector_id, status);
-		return -EIO;
+		hns3_err(hw, "%s TQP %d fail, vector_id is %d, status is %d.",
+			 op_str, queue_id, req->int_vector_id, status);
+		return status;
+	}
+
+	return 0;
+}
+
+static int
+hns3_init_ring_with_vector(struct hns3_hw *hw)
+{
+	uint8_t vec;
+	int ret;
+	int i;
+
+	/*
+	 * In hns3 network engine, vector 0 is always the misc interrupt of this
+	 * function, vector 1~N can be used respectively for the queues of the
+	 * function. Tx and Rx queues with the same number share the interrupt
+	 * vector. In the initialization clearing the all hardware mapping
+	 * relationship configurations between queues and interrupt vectors is
+	 * needed, so some error caused by the residual configurations, such as
+	 * the unexpected Tx interrupt, can be avoid. Because of the hardware
+	 * constraints in hns3 hardware engine, we have to implement clearing
+	 * the mapping relationship configurations by binding all queues to the
+	 * last interrupt vector and reserving the last interrupt vector. This
+	 * method results in a decrease of the maximum queues when upper
+	 * applications call the rte_eth_dev_configure API function to enable
+	 * Rx interrupt.
+	 */
+	vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
+	hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */
+	for (i = 0; i < hw->intr_tqps_num; i++) {
+		/*
+		 * Set gap limiter and rate limiter configuration of queue's
+		 * interrupt.
+		 */
+		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
+				       HNS3_TQP_INTR_GL_DEFAULT);
+		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
+				       HNS3_TQP_INTR_GL_DEFAULT);
+		hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+
+		ret = hns3_bind_ring_with_vector(hw, vec, false,
+						 HNS3_RING_TYPE_TX, i);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with "
+					  "vector: %d, ret=%d", i, vec, ret);
+			return ret;
+		}
+
+		ret = hns3_bind_ring_with_vector(hw, vec, false,
+						 HNS3_RING_TYPE_RX, i);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with "
+					  "vector: %d, ret=%d", i, vec, ret);
+			return ret;
+		}
 	}
 
 	return 0;
@@ -2289,8 +2353,16 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 {
 	struct hns3_adapter *hns = eth_dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
+	uint16_t queue_num = hw->tqps_num;
 
-	info->max_rx_queues = hw->tqps_num;
+	/*
+	 * In interrupt mode, 'max_rx_queues' is set based on the number of
+	 * MSI-X interrupt resources of the hardware.
+	 */
+	if (hw->data->dev_conf.intr_conf.rxq == 1)
+		queue_num = hw->intr_tqps_num;
+
+	info->max_rx_queues = queue_num;
 	info->max_tx_queues = hw->tqps_num;
 	info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
 	info->min_rx_bufsize = hw->rx_buf_len;
@@ -2451,6 +2523,7 @@ hns3_query_pf_resource(struct hns3_hw *hw)
 	struct hns3_pf *pf = &hns->pf;
 	struct hns3_pf_res_cmd *req;
 	struct hns3_cmd_desc desc;
+	uint16_t num_msi;
 	int ret;
 
 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
@@ -2482,9 +2555,9 @@ hns3_query_pf_resource(struct hns3_hw *hw)
 
 	pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
 
-	hw->num_msi =
-	    hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
-			   HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
+	num_msi = hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
+				 HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
+	hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi;
 
 	return 0;
 }
@@ -4139,6 +4212,16 @@ hns3_init_pf(struct rte_eth_dev *eth_dev)
 		goto err_fdir;
 	}
 
+	/*
+	 * In the initialization clearing the all hardware mapping relationship
+	 * configurations between queues and interrupt vectors is needed, so
+	 * some error caused by the residual configurations, such as the
+	 * unexpected interrupt, can be avoid.
+	 */
+	ret = hns3_init_ring_with_vector(hw);
+	if (ret)
+		goto err_fdir;
+
 	return 0;
 
 err_fdir:
@@ -4258,7 +4341,9 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
-			ret = hns3_bind_ring_with_vector(dev, vec, true, q_id);
+			ret = hns3_bind_ring_with_vector(hw, vec, true,
+							 HNS3_RING_TYPE_RX,
+							 q_id);
 			if (ret)
 				goto bind_vector_error;
 			intr_handle->intr_vec[q_id] = vec;
@@ -4365,7 +4450,9 @@ hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
-			(void)hns3_bind_ring_with_vector(dev, vec, false, q_id);
+			(void)hns3_bind_ring_with_vector(hw, vec, false,
+							 HNS3_RING_TYPE_RX,
+							 q_id);
 			if (vec < base + intr_handle->nb_efd - 1)
 				vec++;
 		}
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 72eabea..cdbfc70 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -354,6 +354,7 @@ struct hns3_hw {
 	uint16_t num_msi;
 	uint16_t total_tqps_num;    /* total task queue pairs of this PF */
 	uint16_t tqps_num;          /* num task queue pairs of this function */
+	uint16_t intr_tqps_num;     /* num queue pairs mapping interrupt */
 	uint16_t rss_size_max;      /* HW defined max RSS task queue */
 	uint16_t rx_buf_len;
 	uint16_t num_tx_desc;       /* desc num of per tx queue */
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index feb49df..428a6d2 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -415,6 +415,96 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc)
 }
 
 static int
+hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id,
+			     bool mmap, enum hns3_ring_type queue_type,
+			     uint16_t queue_id)
+{
+	struct hns3_vf_bind_vector_msg bind_msg;
+	const char *op_str;
+	uint16_t code;
+	int ret;
+
+	memset(&bind_msg, 0, sizeof(bind_msg));
+	code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
+		HNS3_MBX_UNMAP_RING_TO_VECTOR;
+	bind_msg.vector_id = vector_id;
+
+	if (queue_type == HNS3_RING_TYPE_RX)
+		bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
+	else
+		bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
+
+	bind_msg.param[0].ring_type = queue_type;
+	bind_msg.ring_num = 1;
+	bind_msg.param[0].tqp_index = queue_id;
+	op_str = mmap ? "Map" : "Unmap";
+	ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
+				sizeof(bind_msg), false, NULL, 0);
+	if (ret) {
+		hns3_err(hw, "%s TQP %d fail, vector_id is %d, ret is %d.",
+			 op_str, queue_id, bind_msg.vector_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+hns3vf_init_ring_with_vector(struct hns3_hw *hw)
+{
+	uint8_t vec;
+	int ret;
+	int i;
+
+	/*
+	 * In hns3 network engine, vector 0 is always the misc interrupt of this
+	 * function, vector 1~N can be used respectively for the queues of the
+	 * function. Tx and Rx queues with the same number share the interrupt
+	 * vector. In the initialization clearing the all hardware mapping
+	 * relationship configurations between queues and interrupt vectors is
+	 * needed, so some error caused by the residual configurations, such as
+	 * the unexpected Tx interrupt, can be avoid. Because of the hardware
+	 * constraints in hns3 hardware engine, we have to implement clearing
+	 * the mapping relationship configurations by binding all queues to the
+	 * last interrupt vector and reserving the last interrupt vector. This
+	 * method results in a decrease of the maximum queues when upper
+	 * applications call the rte_eth_dev_configure API function to enable
+	 * Rx interrupt.
+	 */
+	vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
+	hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */
+	for (i = 0; i < hw->intr_tqps_num; i++) {
+		/*
+		 * Set gap limiter and rate limiter configuration of queue's
+		 * interrupt.
+		 */
+		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
+				       HNS3_TQP_INTR_GL_DEFAULT);
+		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
+				       HNS3_TQP_INTR_GL_DEFAULT);
+		hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+
+		ret = hns3vf_bind_ring_with_vector(hw, vec, false,
+						   HNS3_RING_TYPE_TX, i);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with "
+					  "vector: %d, ret=%d", i, vec, ret);
+			return ret;
+		}
+
+		ret = hns3vf_bind_ring_with_vector(hw, vec, false,
+						   HNS3_RING_TYPE_RX, i);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with "
+					  "vector: %d, ret=%d", i, vec, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
 hns3vf_dev_configure(struct rte_eth_dev *dev)
 {
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -548,8 +638,16 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 {
 	struct hns3_adapter *hns = eth_dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
+	uint16_t q_num = hw->tqps_num;
+
+	/*
+	 * In interrupt mode, 'max_rx_queues' is set based on the number of
+	 * MSI-X interrupt resources of the hardware.
+	 */
+	if (hw->data->dev_conf.intr_conf.rxq == 1)
+		q_num = hw->intr_tqps_num;
 
-	info->max_rx_queues = hw->tqps_num;
+	info->max_rx_queues = q_num;
 	info->max_tx_queues = hw->tqps_num;
 	info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
 	info->min_rx_bufsize = hw->rx_buf_len;
@@ -1082,6 +1180,35 @@ hns3vf_service_handler(void *param)
 }
 
 static int
+hns3_query_vf_resource(struct hns3_hw *hw)
+{
+	struct hns3_vf_res_cmd *req;
+	struct hns3_cmd_desc desc;
+	uint16_t num_msi;
+	int ret;
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret) {
+		hns3_err(hw, "query vf resource failed, ret = %d", ret);
+		return ret;
+	}
+
+	req = (struct hns3_vf_res_cmd *)desc.data;
+	num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
+				 HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
+	if (num_msi < HNS3_MIN_VECTOR_NUM) {
+		hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
+			 num_msi, HNS3_MIN_VECTOR_NUM);
+		return -EINVAL;
+	}
+
+	hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi;
+
+	return 0;
+}
+
+static int
 hns3vf_init_hardware(struct hns3_adapter *hns)
 {
 	struct hns3_hw *hw = &hns->hw;
@@ -1157,6 +1284,11 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
 		goto err_cmd_init;
 	}
 
+	/* Get VF resource */
+	ret = hns3_query_vf_resource(hw);
+	if (ret)
+		goto err_cmd_init;
+
 	rte_spinlock_init(&hw->mbx_resp.lock);
 
 	hns3vf_clear_event_cause(hw, 0);
@@ -1193,6 +1325,16 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
 
 	hns3_set_default_rss_args(hw);
 
+	/*
+	 * In the initialization clearing the all hardware mapping relationship
+	 * configurations between queues and interrupt vectors is needed, so
+	 * some error caused by the residual configurations, such as the
+	 * unexpected interrupt, can be avoid.
+	 */
+	ret = hns3vf_init_ring_with_vector(hw);
+	if (ret)
+		goto err_get_config;
+
 	(void)hns3_stats_reset(eth_dev);
 	return 0;
 
@@ -1233,36 +1375,6 @@ hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
 }
 
 static int
-hns3vf_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
-			     bool mmap, uint16_t queue_id)
-
-{
-	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct hns3_vf_bind_vector_msg bind_msg;
-	uint16_t code;
-	int ret;
-
-	memset(&bind_msg, 0, sizeof(bind_msg));
-	code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
-		HNS3_MBX_UNMAP_RING_TO_VECTOR;
-	bind_msg.vector_id = vector_id;
-	bind_msg.ring_num = 1;
-	bind_msg.param[0].ring_type = HNS3_RING_TYPE_RX;
-	bind_msg.param[0].tqp_index = queue_id;
-	bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
-
-	ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
-				sizeof(bind_msg), false, NULL, 0);
-	if (ret) {
-		hns3_err(hw, "Map TQP %d fail, vector_id is %d, ret is %d.",
-			 queue_id, vector_id, ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-static int
 hns3vf_do_stop(struct hns3_adapter *hns)
 {
 	struct hns3_hw *hw = &hns->hw;
@@ -1298,7 +1410,8 @@ hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
-			(void)hns3vf_bind_ring_with_vector(dev, vec, false,
+			(void)hns3vf_bind_ring_with_vector(hw, vec, false,
+							   HNS3_RING_TYPE_RX,
 							   q_id);
 			if (vec < base + intr_handle->nb_efd - 1)
 				vec++;
@@ -1464,7 +1577,8 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
-			ret = hns3vf_bind_ring_with_vector(dev, vec, true,
+			ret = hns3vf_bind_ring_with_vector(hw, vec, true,
+							   HNS3_RING_TYPE_RX,
 							   q_id);
 			if (ret)
 				goto vf_bind_vector_error;
diff --git a/drivers/net/hns3/hns3_regs.h b/drivers/net/hns3/hns3_regs.h
index 42581df..64bd693 100644
--- a/drivers/net/hns3/hns3_regs.h
+++ b/drivers/net/hns3/hns3_regs.h
@@ -83,9 +83,6 @@
 
 #define HNS3_RING_EN_B				0
 
-#define HNS3_VECTOR_REG_OFFSET			0x4
-#define HNS3_VECTOR_VF_OFFSET			0x100000
-
 #define HNS3_TQP_REG_OFFSET			0x80000
 #define HNS3_TQP_REG_SIZE			0x200
 
@@ -97,6 +94,16 @@
 #define HNS3_TQP_INTR_RL_REG			0x20900
 
 #define HNS3_TQP_INTR_REG_SIZE			4
+#define HNS3_TQP_INTR_GL_MAX			0x1FE0
+#define HNS3_TQP_INTR_GL_DEFAULT		20
+#define HNS3_TQP_INTR_RL_MAX			0xEC
+#define HNS3_TQP_INTR_RL_ENABLE_MASK		0x40
+#define HNS3_TQP_INTR_RL_DEFAULT		0
+
+/* gl_usec convert to hardware count, as writing each 1 represents 2us */
+#define HNS3_GL_USEC_TO_REG(gl_usec)		((gl_usec) >> 1)
+/* rl_usec convert to hardware count, as writing each 1 represents 4us */
+#define HNS3_RL_USEC_TO_REG(rl_usec)		((rl_usec) >> 2)
 
 int hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs);
 #endif /* _HNS3_REGS_H_ */
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 1e41b0d..edf0038 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -499,11 +499,45 @@ hns3_reset_all_queues(struct hns3_adapter *hns)
 }
 
 void
-hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en)
+hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
+		       uint8_t gl_idx, uint16_t gl_value)
 {
+	uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
+			     HNS3_TQP_INTR_GL1_REG,
+			     HNS3_TQP_INTR_GL2_REG};
 	uint32_t addr, value;
 
-	addr = HNS3_TQP_INTR_CTRL_REG + tpq_int_num * HNS3_VECTOR_REG_OFFSET;
+	if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
+		return;
+
+	addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
+	value = HNS3_GL_USEC_TO_REG(gl_value);
+
+	hns3_write_dev(hw, addr, value);
+}
+
+void
+hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
+{
+	uint32_t addr, value;
+
+	if (rl_value > HNS3_TQP_INTR_RL_MAX)
+		return;
+
+	addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+	value = HNS3_RL_USEC_TO_REG(rl_value);
+	if (value > 0)
+		value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
+
+	hns3_write_dev(hw, addr, value);
+}
+
+static void
+hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
+{
+	uint32_t addr, value;
+
+	addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
 	value = en ? 1 : 0;
 
 	hns3_write_dev(hw, addr, value);
@@ -519,8 +553,7 @@ hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 	if (dev->data->dev_conf.intr_conf.rxq == 0)
 		return -ENOTSUP;
 
-	/* enable the vectors */
-	hns3_tqp_intr_enable(hw, queue_id, true);
+	hns3_queue_intr_enable(hw, queue_id, true);
 
 	return rte_intr_ack(intr_handle);
 }
@@ -533,8 +566,7 @@ hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 	if (dev->data->dev_conf.intr_conf.rxq == 0)
 		return -ENOTSUP;
 
-	/* disable the vectors */
-	hns3_tqp_intr_enable(hw, queue_id, false);
+	hns3_queue_intr_enable(hw, queue_id, false);
 
 	return 0;
 }
@@ -938,7 +970,6 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 
 	if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
 		/* first time configuration */
-
 		uint32_t size;
 		size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
 		hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
@@ -949,7 +980,6 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 		}
 	} else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
 		/* re-configure */
-
 		rxq = hw->fkq_data.rx_queues;
 		for (i = nb_queues; i < old_nb_queues; i++)
 			hns3_dev_rx_queue_release(rxq[i]);
@@ -987,7 +1017,6 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 
 	if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
 		/* first time configuration */
-
 		uint32_t size;
 		size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
 		hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
@@ -998,7 +1027,6 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 		}
 	} else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
 		/* re-configure */
-
 		txq = hw->fkq_data.tx_queues;
 		for (i = nb_queues; i < old_nb_queues; i++)
 			hns3_dev_tx_queue_release(txq[i]);
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index a042c99..ba89425 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -321,7 +321,10 @@ uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			uint16_t nb_pkts);
 const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
-void hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en);
+void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
+			    uint8_t gl_idx, uint16_t gl_value);
+void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id,
+			    uint16_t rl_value);
 int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
 				  uint16_t nb_tx_q);
 
-- 
2.7.4


  parent reply	other threads:[~2020-05-29  3:59 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-29  3:57 [dpdk-stable] [PATCH 19.11 0/6] backport for hns3 PMD driver Wei Hu (Xavier)
2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 1/6] net/hns3: support Rx interrupt Wei Hu (Xavier)
2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 2/6] net/hns3: support different numbers of Rx and Tx queues Wei Hu (Xavier)
2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 3/6] net/hns3: fix RSS indirection table configuration Wei Hu (Xavier)
2020-05-29  3:57 ` Wei Hu (Xavier) [this message]
2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 5/6] net/hns3: fix Rx interrupt after reset Wei Hu (Xavier)
2020-05-29  3:57 ` [dpdk-stable] [PATCH 19.11 6/6] net/hns3: fix MSI-X interrupt during initialization Wei Hu (Xavier)
2020-05-29  8:47 ` [dpdk-stable] [PATCH 19.11 0/6] backport for hns3 PMD driver Luca Boccassi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1590724654-23075-5-git-send-email-xavier.huwei@huawei.com \
    --to=xavier.huwei@huawei.com \
    --cc=luca.boccassi@gmail.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).