patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH 20.11 1/4] net/hns3: fix possible mismatched response of mailbox
@ 2021-05-17 13:50 Min Hu (Connor)
  2021-05-17 13:50 ` [dpdk-stable] [PATCH 20.11 2/4] net/hns3: fix VF handling LSC event in secondary process Min Hu (Connor)
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Min Hu (Connor) @ 2021-05-17 13:50 UTC (permalink / raw)
  To: stable; +Cc: ferruh.yigit, xuemingl

From: Chengwen Feng <fengchengwen@huawei.com>

[ upstream commit c8dec72de00039f367c1a8cd1f40378d04cf3e8f ]

Currently, the mailbox synchronous communication between VF and PF use
the following fields to maintain communication:
1. Req_msg_data which was combined by message code and subcode, used to
   match request and response.
2. Head which means the number of requests successfully sent by VF.
3. Tail which means the number of responses successfully received by VF.
4. Lost which means the number of requests which are timeout.

There may possible mismatches of the following situation:
1. VF sends message A with code=1 subcode=1.
	Then head=1, tail=0, lost=0.
2. PF was blocked about 500ms when processing the message A.
3. VF will detect message A timeout because it can't get the response
within 500ms.
	Then head=1, tail=0, lost=1.
4. VF sends message B with code=1 subcode=1 which equal message A.
	Then head=2, tail=0, lost=1.
5. PF processes the first message A and send the response message to VF.
6. VF will update tail field to 1, but the lost field will remain
   unchanged because the code/subcode equal message B's, so driver will
   return success because now the head(2) equals tail(1) plus lost(1).
   This will lead to mismatch of request and response.

To fix the above bug, we use the following scheme:
1. The message sent from VF was labelled with match_id which was a
   unique 16-bit non-zero value.
2. The response sent from PF will label with match_id which got from the
   request.
3. The VF uses the match_id to match request and response message.

This scheme depends on the PF driver, if the PF driver don't support
then VF will uses the original scheme.

Fixes: 463e748964f5 ("net/hns3: support mailbox")
Cc: stable@dpdk.org

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
 drivers/net/hns3/hns3_mbx.c | 120 +++++++++++++++++++++++++++++++++++---------
 drivers/net/hns3/hns3_mbx.h |  20 +++++++-
 2 files changed, 114 insertions(+), 26 deletions(-)

diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index 975a60b..79ac16a 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -40,14 +40,32 @@ hns3_resp_to_errno(uint16_t resp_code)
 	return -EIO;
 }
 
+static void
+hns3_mbx_proc_timeout(struct hns3_hw *hw, uint16_t code, uint16_t subcode)
+{
+	if (hw->mbx_resp.matching_scheme ==
+	    HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL) {
+		hw->mbx_resp.lost++;
+		hns3_err(hw,
+			 "VF could not get mbx(%u,%u) head(%u) tail(%u) "
+			 "lost(%u) from PF",
+			 code, subcode, hw->mbx_resp.head, hw->mbx_resp.tail,
+			 hw->mbx_resp.lost);
+		return;
+	}
+
+	hns3_err(hw, "VF could not get mbx(%u,%u) from PF", code, subcode);
+}
+
 static int
-hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
+hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
 		  uint8_t *resp_data, uint16_t resp_len)
 {
 #define HNS3_MAX_RETRY_MS	500
 #define HNS3_WAIT_RESP_US	100
 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
 	struct hns3_mbx_resp_status *mbx_resp;
+	bool received;
 	uint64_t now;
 	uint64_t end;
 
@@ -59,8 +77,7 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
 
 	now = get_timeofday_ms();
 	end = now + HNS3_MAX_RETRY_MS;
-	while ((hw->mbx_resp.head != hw->mbx_resp.tail + hw->mbx_resp.lost) &&
-	       (now < end)) {
+	while (now < end) {
 		if (rte_atomic16_read(&hw->reset.disable_cmd)) {
 			hns3_err(hw, "Don't wait for mbx respone because of "
 				 "disable_cmd");
@@ -77,16 +94,20 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
 		hns3_dev_handle_mbx_msg(hw);
 		rte_delay_us(HNS3_WAIT_RESP_US);
 
+		if (hw->mbx_resp.matching_scheme ==
+		    HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL)
+			received = (hw->mbx_resp.head ==
+				    hw->mbx_resp.tail + hw->mbx_resp.lost);
+		else
+			received = hw->mbx_resp.received_match_resp;
+		if (received)
+			break;
+
 		now = get_timeofday_ms();
 	}
 	hw->mbx_resp.req_msg_data = 0;
 	if (now >= end) {
-		hw->mbx_resp.lost++;
-		hns3_err(hw,
-			 "VF could not get mbx(%u,%u) head(%u) tail(%u) "
-			 "lost(%u) from PF",
-			 code0, code1, hw->mbx_resp.head, hw->mbx_resp.tail,
-			 hw->mbx_resp.lost);
+		hns3_mbx_proc_timeout(hw, code, subcode);
 		return -ETIME;
 	}
 	rte_io_rmb();
@@ -101,6 +122,29 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
 	return 0;
 }
 
+static void
+hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode)
+{
+	/*
+	 * Init both matching scheme fields because we may not know the exact
+	 * scheme will be used when in the initial phase.
+	 *
+	 * Also, there are OK to init both matching scheme fields even though
+	 * we get the exact scheme which is used.
+	 */
+	hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode;
+	hw->mbx_resp.head++;
+
+	/* Update match_id and ensure the value of match_id is not zero */
+	hw->mbx_resp.match_id++;
+	if (hw->mbx_resp.match_id == 0)
+		hw->mbx_resp.match_id = 1;
+	hw->mbx_resp.received_match_resp = false;
+
+	hw->mbx_resp.resp_status = 0;
+	memset(hw->mbx_resp.additional_info, 0, HNS3_MBX_MAX_RESP_DATA_SIZE);
+}
+
 int
 hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
 		  const uint8_t *msg_data, uint8_t msg_len, bool need_resp,
@@ -138,8 +182,8 @@ hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
 	if (need_resp) {
 		req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT;
 		rte_spinlock_lock(&hw->mbx_resp.lock);
-		hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode;
-		hw->mbx_resp.head++;
+		hns3_mbx_prepare_resp(hw, code, subcode);
+		req->match_id = hw->mbx_resp.match_id;
 		ret = hns3_cmd_send(hw, &desc, 1);
 		if (ret) {
 			rte_spinlock_unlock(&hw->mbx_resp.lock);
@@ -251,6 +295,46 @@ hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg)
 }
 
 static void
+hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req)
+{
+	struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
+	uint32_t msg_data;
+
+	if (req->match_id != 0) {
+		/*
+		 * If match_id is not zero, it means PF support copy request's
+		 * match_id to its response. So VF could use the match_id
+		 * to match the request.
+		 */
+		if (resp->matching_scheme !=
+		    HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID) {
+			resp->matching_scheme =
+				HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID;
+			hns3_info(hw, "detect mailbox support match id!");
+		}
+		if (req->match_id == resp->match_id) {
+			resp->resp_status = hns3_resp_to_errno(req->msg[3]);
+			memcpy(resp->additional_info, &req->msg[4],
+			       HNS3_MBX_MAX_RESP_DATA_SIZE);
+			rte_io_wmb();
+			resp->received_match_resp = true;
+		}
+		return;
+	}
+
+	/*
+	 * If the below instructions can be executed, it means PF does not
+	 * support copy request's match_id to its response. So VF follows the
+	 * original scheme to process.
+	 */
+	resp->resp_status = hns3_resp_to_errno(req->msg[3]);
+	memcpy(resp->additional_info, &req->msg[4],
+	       HNS3_MBX_MAX_RESP_DATA_SIZE);
+	msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2];
+	hns3_update_resp_position(hw, msg_data);
+}
+
+static void
 hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code)
 {
 	switch (link_fail_code) {
@@ -326,16 +410,12 @@ hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
 void
 hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
 {
-	struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
 	struct hns3_cmq_ring *crq = &hw->cmq.crq;
 	struct hns3_mbx_pf_to_vf_cmd *req;
 	struct hns3_cmd_desc *desc;
-	uint32_t msg_data;
 	uint16_t *msg_q;
 	uint8_t opcode;
 	uint16_t flag;
-	uint8_t *temp;
-	int i;
 	rte_spinlock_lock(&hw->cmq.crq.lock);
 
 	while (!hns3_cmd_crq_empty(hw)) {
@@ -361,15 +441,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
 
 		switch (opcode) {
 		case HNS3_MBX_PF_VF_RESP:
-			resp->resp_status = hns3_resp_to_errno(req->msg[3]);
-
-			temp = (uint8_t *)&req->msg[4];
-			for (i = 0; i < HNS3_MBX_MAX_RESP_DATA_SIZE; i++) {
-				resp->additional_info[i] = *temp;
-				temp++;
-			}
-			msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2];
-			hns3_update_resp_position(hw, msg_data);
+			hns3_handle_mbx_response(hw, req);
 			break;
 		case HNS3_MBX_LINK_STAT_CHANGE:
 		case HNS3_MBX_ASSERTING_RESET:
diff --git a/drivers/net/hns3/hns3_mbx.h b/drivers/net/hns3/hns3_mbx.h
index 7f7ade1..8a9d7ba 100644
--- a/drivers/net/hns3/hns3_mbx.h
+++ b/drivers/net/hns3/hns3_mbx.h
@@ -83,12 +83,26 @@ enum hns3_mbx_link_fail_subcode {
 #define HNS3_MBX_RING_MAP_BASIC_MSG_NUM	3
 #define HNS3_MBX_RING_NODE_VARIABLE_NUM	3
 
+enum {
+	HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL = 0,
+	HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID
+};
+
 struct hns3_mbx_resp_status {
 	rte_spinlock_t lock; /* protects against contending sync cmd resp */
+
+	uint8_t matching_scheme;
+
+	/* The following fields used in the matching scheme for original */
 	uint32_t req_msg_data;
 	uint32_t head;
 	uint32_t tail;
 	uint32_t lost;
+
+	/* The following fields used in the matching scheme for match_id */
+	uint16_t match_id;
+	bool received_match_resp;
+
 	int resp_status;
 	uint8_t additional_info[HNS3_MBX_MAX_RESP_DATA_SIZE];
 };
@@ -106,7 +120,8 @@ struct hns3_mbx_vf_to_pf_cmd {
 	uint8_t mbx_need_resp;
 	uint8_t rsv1;
 	uint8_t msg_len;
-	uint8_t rsv2[3];
+	uint8_t rsv2;
+	uint16_t match_id;
 	uint8_t msg[HNS3_MBX_MAX_MSG_SIZE];
 };
 
@@ -114,7 +129,8 @@ struct hns3_mbx_pf_to_vf_cmd {
 	uint8_t dest_vfid;
 	uint8_t rsv[3];
 	uint8_t msg_len;
-	uint8_t rsv1[3];
+	uint8_t rsv1;
+	uint16_t match_id;
 	uint16_t msg[8];
 };
 
-- 
2.7.4


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-stable] [PATCH 20.11 2/4] net/hns3: fix VF handling LSC event in secondary process
  2021-05-17 13:50 [dpdk-stable] [PATCH 20.11 1/4] net/hns3: fix possible mismatched response of mailbox Min Hu (Connor)
@ 2021-05-17 13:50 ` Min Hu (Connor)
  2021-05-18  4:15   ` Xueming(Steven) Li
  2021-05-17 13:50 ` [dpdk-stable] [PATCH 20.11 3/4] net/hns3: fix timing in mailbox Min Hu (Connor)
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 8+ messages in thread
From: Min Hu (Connor) @ 2021-05-17 13:50 UTC (permalink / raw)
  To: stable; +Cc: ferruh.yigit, xuemingl

From: Chengwen Feng <fengchengwen@huawei.com>

[ upstream commit dbbbad23e380773b37872df2195c4529fd93ca6f ]

VF will build two queues (csq: command send queue, crq: command receive
queue) with firmware, the crq may contain the following messages:
1) mailbox response message which was the ack of mailbox sync request.
2) PF's link status change message which may send by PF at anytime;

Currently, any threads in the primary and secondary processes could
send mailbox sync request, so it will need to process the crq messages
in there own thread context.

If the crq hold two messages: a) PF's link status change message, b)
mailbox response message when secondary process deals with the crq
messages, it will lead to report lsc event in secondary process
because it uses the policy of processing all pending messages at once.

We use the following scheme to solve it:
1) threads in secondary process could only process specifics messages
   (eg. mailbox response message) in crq, if the message processed, its
   opcode will rewrite with zero, then the intr thread in primary
   process will not process again.
2) threads other than intr thread in the primary process use the same
   processing logic as the threads in secondary process.
3) intr thread in the primary process could process all messages.

Fixes: 76a3836b98c4 ("net/hns3: fix setting default MAC address in bonding of VF")
Fixes: 463e748964f5 ("net/hns3: support mailbox")
Cc: stable@dpdk.org

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
 drivers/net/hns3/hns3_mbx.c | 68 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 68 insertions(+)

diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index 79ac16a..11e5235 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -407,6 +407,44 @@ hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
 	}
 }
 
+static void
+hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw)
+{
+	struct hns3_cmq_ring *crq = &hw->cmq.crq;
+	struct hns3_mbx_pf_to_vf_cmd *req;
+	struct hns3_cmd_desc *desc;
+	uint32_t tail, next_to_use;
+	uint8_t opcode;
+	uint16_t flag;
+
+	tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
+	next_to_use = crq->next_to_use;
+	while (next_to_use != tail) {
+		desc = &crq->desc[next_to_use];
+		req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
+		opcode = req->msg[0] & 0xff;
+
+		flag = rte_le_to_cpu_16(crq->desc[next_to_use].flag);
+		if (!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))
+			goto scan_next;
+
+		if (crq->desc[next_to_use].opcode == 0)
+			goto scan_next;
+
+		if (opcode == HNS3_MBX_PF_VF_RESP) {
+			hns3_handle_mbx_response(hw, req);
+			/*
+			 * Clear opcode to inform intr thread don't process
+			 * again.
+			 */
+			crq->desc[crq->next_to_use].opcode = 0;
+		}
+
+scan_next:
+		next_to_use = (next_to_use + 1) % hw->cmq.crq.desc_num;
+	}
+}
+
 void
 hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
 {
@@ -418,6 +456,29 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
 	uint16_t flag;
 	rte_spinlock_lock(&hw->cmq.crq.lock);
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+	    !rte_thread_is_intr()) {
+		/*
+		 * Currently, any threads in the primary and secondary processes
+		 * could send mailbox sync request, so it will need to process
+		 * the crq message (which is the HNS3_MBX_PF_VF_RESP) in there
+		 * own thread context. It may also process other messages
+		 * because it uses the policy of processing all pending messages
+		 * at once.
+		 * But some messages such as HNS3_MBX_PUSH_LINK_STATUS could
+		 * only process within the intr thread in primary process,
+		 * otherwise it may lead to report lsc event in secondary
+		 * process.
+		 * So the threads other than intr thread in primary process
+		 * could only process HNS3_MBX_PF_VF_RESP message, if the
+		 * message processed, its opcode will rewrite with zero, then
+		 * the intr thread in primary process will not process again.
+		 */
+		hns3_handle_mbx_msg_out_intr(hw);
+		rte_spinlock_unlock(&hw->cmq.crq.lock);
+		return;
+	}
+
 	while (!hns3_cmd_crq_empty(hw)) {
 		if (rte_atomic16_read(&hw->reset.disable_cmd))
 			rte_spinlock_unlock(&hw->cmq.crq.lock);
@@ -439,6 +500,13 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
 			continue;
 		}
 
+		if (desc->opcode == 0) {
+			/* Message already processed by other thread */
+			crq->desc[crq->next_to_use].flag = 0;
+			hns3_mbx_ring_ptr_move_crq(crq);
+			continue;
+		}
+
 		switch (opcode) {
 		case HNS3_MBX_PF_VF_RESP:
 			hns3_handle_mbx_response(hw, req);
-- 
2.7.4


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-stable] [PATCH 20.11 3/4] net/hns3: fix timing in mailbox
  2021-05-17 13:50 [dpdk-stable] [PATCH 20.11 1/4] net/hns3: fix possible mismatched response of mailbox Min Hu (Connor)
  2021-05-17 13:50 ` [dpdk-stable] [PATCH 20.11 2/4] net/hns3: fix VF handling LSC event in secondary process Min Hu (Connor)
@ 2021-05-17 13:50 ` Min Hu (Connor)
  2021-05-18  4:27   ` Xueming(Steven) Li
  2021-05-17 13:50 ` [dpdk-stable] [PATCH 20.11 4/4] net/hns3: fix verification of NEON support Min Hu (Connor)
  2021-05-18  4:14 ` [dpdk-stable] [PATCH 20.11 1/4] net/hns3: fix possible mismatched response of mailbox Xueming(Steven) Li
  3 siblings, 1 reply; 8+ messages in thread
From: Min Hu (Connor) @ 2021-05-17 13:50 UTC (permalink / raw)
  To: stable; +Cc: ferruh.yigit, xuemingl

From: Chengchang Tang <tangchengchang@huawei.com>

[ upstream commit d566bfcff0c7bfe167f6c520d4fd5b0104130af6 ]

Currently, when processing MBX messages, the system timestamp is obtained
to determine whether timeout occurs. However, the gettimeofday function
is not monotonically increasing. Therefore, this may lead to incorrect
judgment or difficulty exiting the loop. And actually, in this scenario,
it is not necessary to obtain the timestamp.

This patch deletes the call to the gettimeofday function during MBX
message processing.

Fixes: 463e748964f5 ("net/hns3: support mailbox")
Cc: stable@dpdk.org

Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
 drivers/net/hns3/hns3_mbx.c | 13 +++++--------
 1 file changed, 5 insertions(+), 8 deletions(-)

diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index 11e5235..2a479f2 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -61,13 +61,12 @@ static int
 hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
 		  uint8_t *resp_data, uint16_t resp_len)
 {
-#define HNS3_MAX_RETRY_MS	500
+#define HNS3_MAX_RETRY_US	500000
 #define HNS3_WAIT_RESP_US	100
 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
 	struct hns3_mbx_resp_status *mbx_resp;
+	uint32_t wait_time = 0;
 	bool received;
-	uint64_t now;
-	uint64_t end;
 
 	if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) {
 		hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)",
@@ -75,9 +74,7 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
 		return -EINVAL;
 	}
 
-	now = get_timeofday_ms();
-	end = now + HNS3_MAX_RETRY_MS;
-	while (now < end) {
+	while (wait_time < HNS3_MAX_RETRY_US) {
 		if (rte_atomic16_read(&hw->reset.disable_cmd)) {
 			hns3_err(hw, "Don't wait for mbx respone because of "
 				 "disable_cmd");
@@ -103,10 +100,10 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
 		if (received)
 			break;
 
-		now = get_timeofday_ms();
+		wait_time += HNS3_WAIT_RESP_US;
 	}
 	hw->mbx_resp.req_msg_data = 0;
-	if (now >= end) {
+	if (wait_time >= HNS3_MAX_RETRY_US) {
 		hns3_mbx_proc_timeout(hw, code, subcode);
 		return -ETIME;
 	}
-- 
2.7.4


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-stable] [PATCH 20.11 4/4] net/hns3: fix verification of NEON support
  2021-05-17 13:50 [dpdk-stable] [PATCH 20.11 1/4] net/hns3: fix possible mismatched response of mailbox Min Hu (Connor)
  2021-05-17 13:50 ` [dpdk-stable] [PATCH 20.11 2/4] net/hns3: fix VF handling LSC event in secondary process Min Hu (Connor)
  2021-05-17 13:50 ` [dpdk-stable] [PATCH 20.11 3/4] net/hns3: fix timing in mailbox Min Hu (Connor)
@ 2021-05-17 13:50 ` Min Hu (Connor)
  2021-05-18  4:27   ` Xueming(Steven) Li
  2021-05-18  4:14 ` [dpdk-stable] [PATCH 20.11 1/4] net/hns3: fix possible mismatched response of mailbox Xueming(Steven) Li
  3 siblings, 1 reply; 8+ messages in thread
From: Min Hu (Connor) @ 2021-05-17 13:50 UTC (permalink / raw)
  To: stable; +Cc: ferruh.yigit, xuemingl

From: Chengwen Feng <fengchengwen@huawei.com>

[ upstream commit e40ad6fca467b8671c7dfa7435b602997f5358e1 ]

This patch adds verification of whether NEON supported.

Fixes: a3d4f4d291d7 ("net/hns3: support NEON Rx")
Fixes: e31f123db06b ("net/hns3: support NEON Tx")
Cc: stable@dpdk.org

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
 drivers/net/hns3/hns3_rxtx.c | 27 +++++++++++++++++++++------
 1 file changed, 21 insertions(+), 6 deletions(-)

diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 896567c..7a7d6ff 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -10,7 +10,7 @@
 #include <rte_io.h>
 #include <rte_net.h>
 #include <rte_malloc.h>
-#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
+#if defined(RTE_ARCH_ARM64)
 #include <rte_cpuflags.h>
 #endif
 
@@ -2483,6 +2483,16 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
 }
 
 static bool
+hns3_get_default_vec_support(void)
+{
+#if defined(RTE_ARCH_ARM64)
+	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
+		return true;
+#endif
+	return false;
+}
+
+static bool
 hns3_check_sve_support(void)
 {
 #if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
@@ -2498,9 +2508,12 @@ hns3_get_rx_function(struct rte_eth_dev *dev)
 	struct hns3_adapter *hns = dev->data->dev_private;
 	uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
 
-	if (hns->rx_vec_allowed && hns3_rx_check_vec_support(dev) == 0)
-		return hns3_check_sve_support() ? hns3_recv_pkts_vec_sve :
-		       hns3_recv_pkts_vec;
+	if (hns->rx_vec_allowed && hns3_rx_check_vec_support(dev) == 0) {
+		if (hns3_get_default_vec_support())
+			return hns3_recv_pkts_vec;
+		else if (hns3_check_sve_support())
+			return hns3_recv_pkts_vec_sve;
+	}
 
 	if (hns->rx_simple_allowed && !dev->data->scattered_rx &&
 	    (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0)
@@ -3734,8 +3747,10 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
 
 	if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) {
 		*prep = NULL;
-		return hns3_check_sve_support() ? hns3_xmit_pkts_vec_sve :
-			hns3_xmit_pkts_vec;
+		if (hns3_get_default_vec_support())
+			return hns3_xmit_pkts_vec;
+		else if (hns3_check_sve_support())
+			return hns3_xmit_pkts_vec_sve;
 	}
 
 	if (hns->tx_simple_allowed &&
-- 
2.7.4


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-stable] [PATCH 20.11 1/4] net/hns3: fix possible mismatched response of mailbox
  2021-05-17 13:50 [dpdk-stable] [PATCH 20.11 1/4] net/hns3: fix possible mismatched response of mailbox Min Hu (Connor)
                   ` (2 preceding siblings ...)
  2021-05-17 13:50 ` [dpdk-stable] [PATCH 20.11 4/4] net/hns3: fix verification of NEON support Min Hu (Connor)
@ 2021-05-18  4:14 ` Xueming(Steven) Li
  3 siblings, 0 replies; 8+ messages in thread
From: Xueming(Steven) Li @ 2021-05-18  4:14 UTC (permalink / raw)
  To: Min Hu (Connor), stable; +Cc: ferruh.yigit

Thanks, merged.

> -----Original Message-----
> From: Min Hu (Connor) <humin29@huawei.com>
> Sent: Monday, May 17, 2021 9:51 PM
> To: stable@dpdk.org
> Cc: ferruh.yigit@intel.com; Xueming(Steven) Li <xuemingl@nvidia.com>
> Subject: [PATCH 20.11 1/4] net/hns3: fix possible mismatched response of mailbox
> 
> From: Chengwen Feng <fengchengwen@huawei.com>
> 
> [ upstream commit c8dec72de00039f367c1a8cd1f40378d04cf3e8f ]
> 
> Currently, the mailbox synchronous communication between VF and PF use the following fields to maintain communication:
> 1. Req_msg_data which was combined by message code and subcode, used to
>    match request and response.
> 2. Head which means the number of requests successfully sent by VF.
> 3. Tail which means the number of responses successfully received by VF.
> 4. Lost which means the number of requests which are timeout.
> 
> There may possible mismatches of the following situation:
> 1. VF sends message A with code=1 subcode=1.
> 	Then head=1, tail=0, lost=0.
> 2. PF was blocked about 500ms when processing the message A.
> 3. VF will detect message A timeout because it can't get the response within 500ms.
> 	Then head=1, tail=0, lost=1.
> 4. VF sends message B with code=1 subcode=1 which equal message A.
> 	Then head=2, tail=0, lost=1.
> 5. PF processes the first message A and send the response message to VF.
> 6. VF will update tail field to 1, but the lost field will remain
>    unchanged because the code/subcode equal message B's, so driver will
>    return success because now the head(2) equals tail(1) plus lost(1).
>    This will lead to mismatch of request and response.
> 
> To fix the above bug, we use the following scheme:
> 1. The message sent from VF was labelled with match_id which was a
>    unique 16-bit non-zero value.
> 2. The response sent from PF will label with match_id which got from the
>    request.
> 3. The VF uses the match_id to match request and response message.
> 
> This scheme depends on the PF driver, if the PF driver don't support then VF will uses the original scheme.
> 
> Fixes: 463e748964f5 ("net/hns3: support mailbox")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
> ---
>  drivers/net/hns3/hns3_mbx.c | 120 +++++++++++++++++++++++++++++++++++---------
>  drivers/net/hns3/hns3_mbx.h |  20 +++++++-
>  2 files changed, 114 insertions(+), 26 deletions(-)
> 
> diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c index 975a60b..79ac16a 100644
> --- a/drivers/net/hns3/hns3_mbx.c
> +++ b/drivers/net/hns3/hns3_mbx.c
> @@ -40,14 +40,32 @@ hns3_resp_to_errno(uint16_t resp_code)
>  	return -EIO;
>  }
> 
> +static void
> +hns3_mbx_proc_timeout(struct hns3_hw *hw, uint16_t code, uint16_t
> +subcode) {
> +	if (hw->mbx_resp.matching_scheme ==
> +	    HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL) {
> +		hw->mbx_resp.lost++;
> +		hns3_err(hw,
> +			 "VF could not get mbx(%u,%u) head(%u) tail(%u) "
> +			 "lost(%u) from PF",
> +			 code, subcode, hw->mbx_resp.head, hw->mbx_resp.tail,
> +			 hw->mbx_resp.lost);
> +		return;
> +	}
> +
> +	hns3_err(hw, "VF could not get mbx(%u,%u) from PF", code, subcode); }
> +
>  static int
> -hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
> +hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
>  		  uint8_t *resp_data, uint16_t resp_len)  {
>  #define HNS3_MAX_RETRY_MS	500
>  #define HNS3_WAIT_RESP_US	100
>  	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
>  	struct hns3_mbx_resp_status *mbx_resp;
> +	bool received;
>  	uint64_t now;
>  	uint64_t end;
> 
> @@ -59,8 +77,7 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
> 
>  	now = get_timeofday_ms();
>  	end = now + HNS3_MAX_RETRY_MS;
> -	while ((hw->mbx_resp.head != hw->mbx_resp.tail + hw->mbx_resp.lost) &&
> -	       (now < end)) {
> +	while (now < end) {
>  		if (rte_atomic16_read(&hw->reset.disable_cmd)) {
>  			hns3_err(hw, "Don't wait for mbx respone because of "
>  				 "disable_cmd");
> @@ -77,16 +94,20 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
>  		hns3_dev_handle_mbx_msg(hw);
>  		rte_delay_us(HNS3_WAIT_RESP_US);
> 
> +		if (hw->mbx_resp.matching_scheme ==
> +		    HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL)
> +			received = (hw->mbx_resp.head ==
> +				    hw->mbx_resp.tail + hw->mbx_resp.lost);
> +		else
> +			received = hw->mbx_resp.received_match_resp;
> +		if (received)
> +			break;
> +
>  		now = get_timeofday_ms();
>  	}
>  	hw->mbx_resp.req_msg_data = 0;
>  	if (now >= end) {
> -		hw->mbx_resp.lost++;
> -		hns3_err(hw,
> -			 "VF could not get mbx(%u,%u) head(%u) tail(%u) "
> -			 "lost(%u) from PF",
> -			 code0, code1, hw->mbx_resp.head, hw->mbx_resp.tail,
> -			 hw->mbx_resp.lost);
> +		hns3_mbx_proc_timeout(hw, code, subcode);
>  		return -ETIME;
>  	}
>  	rte_io_rmb();
> @@ -101,6 +122,29 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
>  	return 0;
>  }
> 
> +static void
> +hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t
> +subcode) {
> +	/*
> +	 * Init both matching scheme fields because we may not know the exact
> +	 * scheme will be used when in the initial phase.
> +	 *
> +	 * Also, there are OK to init both matching scheme fields even though
> +	 * we get the exact scheme which is used.
> +	 */
> +	hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode;
> +	hw->mbx_resp.head++;
> +
> +	/* Update match_id and ensure the value of match_id is not zero */
> +	hw->mbx_resp.match_id++;
> +	if (hw->mbx_resp.match_id == 0)
> +		hw->mbx_resp.match_id = 1;
> +	hw->mbx_resp.received_match_resp = false;
> +
> +	hw->mbx_resp.resp_status = 0;
> +	memset(hw->mbx_resp.additional_info, 0, HNS3_MBX_MAX_RESP_DATA_SIZE);
> +}
> +
>  int
>  hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
>  		  const uint8_t *msg_data, uint8_t msg_len, bool need_resp, @@ -138,8 +182,8 @@ hns3_send_mbx_msg(struct
> hns3_hw *hw, uint16_t code, uint16_t subcode,
>  	if (need_resp) {
>  		req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT;
>  		rte_spinlock_lock(&hw->mbx_resp.lock);
> -		hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode;
> -		hw->mbx_resp.head++;
> +		hns3_mbx_prepare_resp(hw, code, subcode);
> +		req->match_id = hw->mbx_resp.match_id;
>  		ret = hns3_cmd_send(hw, &desc, 1);
>  		if (ret) {
>  			rte_spinlock_unlock(&hw->mbx_resp.lock);
> @@ -251,6 +295,46 @@ hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg)  }
> 
>  static void
> +hns3_handle_mbx_response(struct hns3_hw *hw, struct
> +hns3_mbx_pf_to_vf_cmd *req) {
> +	struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
> +	uint32_t msg_data;
> +
> +	if (req->match_id != 0) {
> +		/*
> +		 * If match_id is not zero, it means PF support copy request's
> +		 * match_id to its response. So VF could use the match_id
> +		 * to match the request.
> +		 */
> +		if (resp->matching_scheme !=
> +		    HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID) {
> +			resp->matching_scheme =
> +				HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID;
> +			hns3_info(hw, "detect mailbox support match id!");
> +		}
> +		if (req->match_id == resp->match_id) {
> +			resp->resp_status = hns3_resp_to_errno(req->msg[3]);
> +			memcpy(resp->additional_info, &req->msg[4],
> +			       HNS3_MBX_MAX_RESP_DATA_SIZE);
> +			rte_io_wmb();
> +			resp->received_match_resp = true;
> +		}
> +		return;
> +	}
> +
> +	/*
> +	 * If the below instructions can be executed, it means PF does not
> +	 * support copy request's match_id to its response. So VF follows the
> +	 * original scheme to process.
> +	 */
> +	resp->resp_status = hns3_resp_to_errno(req->msg[3]);
> +	memcpy(resp->additional_info, &req->msg[4],
> +	       HNS3_MBX_MAX_RESP_DATA_SIZE);
> +	msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2];
> +	hns3_update_resp_position(hw, msg_data); }
> +
> +static void
>  hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code)  {
>  	switch (link_fail_code) {
> @@ -326,16 +410,12 @@ hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)  void
> hns3_dev_handle_mbx_msg(struct hns3_hw *hw)  {
> -	struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
>  	struct hns3_cmq_ring *crq = &hw->cmq.crq;
>  	struct hns3_mbx_pf_to_vf_cmd *req;
>  	struct hns3_cmd_desc *desc;
> -	uint32_t msg_data;
>  	uint16_t *msg_q;
>  	uint8_t opcode;
>  	uint16_t flag;
> -	uint8_t *temp;
> -	int i;
>  	rte_spinlock_lock(&hw->cmq.crq.lock);
> 
>  	while (!hns3_cmd_crq_empty(hw)) {
> @@ -361,15 +441,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
> 
>  		switch (opcode) {
>  		case HNS3_MBX_PF_VF_RESP:
> -			resp->resp_status = hns3_resp_to_errno(req->msg[3]);
> -
> -			temp = (uint8_t *)&req->msg[4];
> -			for (i = 0; i < HNS3_MBX_MAX_RESP_DATA_SIZE; i++) {
> -				resp->additional_info[i] = *temp;
> -				temp++;
> -			}
> -			msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2];
> -			hns3_update_resp_position(hw, msg_data);
> +			hns3_handle_mbx_response(hw, req);
>  			break;
>  		case HNS3_MBX_LINK_STAT_CHANGE:
>  		case HNS3_MBX_ASSERTING_RESET:
> diff --git a/drivers/net/hns3/hns3_mbx.h b/drivers/net/hns3/hns3_mbx.h index 7f7ade1..8a9d7ba 100644
> --- a/drivers/net/hns3/hns3_mbx.h
> +++ b/drivers/net/hns3/hns3_mbx.h
> @@ -83,12 +83,26 @@ enum hns3_mbx_link_fail_subcode {
>  #define HNS3_MBX_RING_MAP_BASIC_MSG_NUM	3
>  #define HNS3_MBX_RING_NODE_VARIABLE_NUM	3
> 
> +enum {
> +	HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL = 0,
> +	HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID
> +};
> +
>  struct hns3_mbx_resp_status {
>  	rte_spinlock_t lock; /* protects against contending sync cmd resp */
> +
> +	uint8_t matching_scheme;
> +
> +	/* The following fields used in the matching scheme for original */
>  	uint32_t req_msg_data;
>  	uint32_t head;
>  	uint32_t tail;
>  	uint32_t lost;
> +
> +	/* The following fields used in the matching scheme for match_id */
> +	uint16_t match_id;
> +	bool received_match_resp;
> +
>  	int resp_status;
>  	uint8_t additional_info[HNS3_MBX_MAX_RESP_DATA_SIZE];
>  };
> @@ -106,7 +120,8 @@ struct hns3_mbx_vf_to_pf_cmd {
>  	uint8_t mbx_need_resp;
>  	uint8_t rsv1;
>  	uint8_t msg_len;
> -	uint8_t rsv2[3];
> +	uint8_t rsv2;
> +	uint16_t match_id;
>  	uint8_t msg[HNS3_MBX_MAX_MSG_SIZE];
>  };
> 
> @@ -114,7 +129,8 @@ struct hns3_mbx_pf_to_vf_cmd {
>  	uint8_t dest_vfid;
>  	uint8_t rsv[3];
>  	uint8_t msg_len;
> -	uint8_t rsv1[3];
> +	uint8_t rsv1;
> +	uint16_t match_id;
>  	uint16_t msg[8];
>  };
> 
> --
> 2.7.4


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-stable] [PATCH 20.11 2/4] net/hns3: fix VF handling LSC event in secondary process
  2021-05-17 13:50 ` [dpdk-stable] [PATCH 20.11 2/4] net/hns3: fix VF handling LSC event in secondary process Min Hu (Connor)
@ 2021-05-18  4:15   ` Xueming(Steven) Li
  0 siblings, 0 replies; 8+ messages in thread
From: Xueming(Steven) Li @ 2021-05-18  4:15 UTC (permalink / raw)
  To: Min Hu (Connor), stable; +Cc: ferruh.yigit

Thanks, merged

> -----Original Message-----
> From: Min Hu (Connor) <humin29@huawei.com>
> Sent: Monday, May 17, 2021 9:51 PM
> To: stable@dpdk.org
> Cc: ferruh.yigit@intel.com; Xueming(Steven) Li <xuemingl@nvidia.com>
> Subject: [PATCH 20.11 2/4] net/hns3: fix VF handling LSC event in secondary process
> 
> From: Chengwen Feng <fengchengwen@huawei.com>
> 
> [ upstream commit dbbbad23e380773b37872df2195c4529fd93ca6f ]
> 
> VF will build two queues (csq: command send queue, crq: command receive
> queue) with firmware, the crq may contain the following messages:
> 1) mailbox response message which was the ack of mailbox sync request.
> 2) PF's link status change message which may send by PF at anytime;
> 
> Currently, any threads in the primary and secondary processes could send mailbox sync request, so it will need to process the crq
> messages in there own thread context.
> 
> If the crq hold two messages: a) PF's link status change message, b) mailbox response message when secondary process deals with
> the crq messages, it will lead to report lsc event in secondary process because it uses the policy of processing all pending messages at
> once.
> 
> We use the following scheme to solve it:
> 1) threads in secondary process could only process specifics messages
>    (eg. mailbox response message) in crq, if the message processed, its
>    opcode will rewrite with zero, then the intr thread in primary
>    process will not process again.
> 2) threads other than intr thread in the primary process use the same
>    processing logic as the threads in secondary process.
> 3) intr thread in the primary process could process all messages.
> 
> Fixes: 76a3836b98c4 ("net/hns3: fix setting default MAC address in bonding of VF")
> Fixes: 463e748964f5 ("net/hns3: support mailbox")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
> ---
>  drivers/net/hns3/hns3_mbx.c | 68 +++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 68 insertions(+)
> 
> diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c index 79ac16a..11e5235 100644
> --- a/drivers/net/hns3/hns3_mbx.c
> +++ b/drivers/net/hns3/hns3_mbx.c
> @@ -407,6 +407,44 @@ hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
>  	}
>  }
> 
> +static void
> +hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw) {
> +	struct hns3_cmq_ring *crq = &hw->cmq.crq;
> +	struct hns3_mbx_pf_to_vf_cmd *req;
> +	struct hns3_cmd_desc *desc;
> +	uint32_t tail, next_to_use;
> +	uint8_t opcode;
> +	uint16_t flag;
> +
> +	tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
> +	next_to_use = crq->next_to_use;
> +	while (next_to_use != tail) {
> +		desc = &crq->desc[next_to_use];
> +		req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
> +		opcode = req->msg[0] & 0xff;
> +
> +		flag = rte_le_to_cpu_16(crq->desc[next_to_use].flag);
> +		if (!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))
> +			goto scan_next;
> +
> +		if (crq->desc[next_to_use].opcode == 0)
> +			goto scan_next;
> +
> +		if (opcode == HNS3_MBX_PF_VF_RESP) {
> +			hns3_handle_mbx_response(hw, req);
> +			/*
> +			 * Clear opcode to inform intr thread don't process
> +			 * again.
> +			 */
> +			crq->desc[crq->next_to_use].opcode = 0;
> +		}
> +
> +scan_next:
> +		next_to_use = (next_to_use + 1) % hw->cmq.crq.desc_num;
> +	}
> +}
> +
>  void
>  hns3_dev_handle_mbx_msg(struct hns3_hw *hw)  { @@ -418,6 +456,29 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
>  	uint16_t flag;
>  	rte_spinlock_lock(&hw->cmq.crq.lock);
> 
> +	if (rte_eal_process_type() != RTE_PROC_PRIMARY ||
> +	    !rte_thread_is_intr()) {
> +		/*
> +		 * Currently, any threads in the primary and secondary processes
> +		 * could send mailbox sync request, so it will need to process
> +		 * the crq message (which is the HNS3_MBX_PF_VF_RESP) in there
> +		 * own thread context. It may also process other messages
> +		 * because it uses the policy of processing all pending messages
> +		 * at once.
> +		 * But some messages such as HNS3_MBX_PUSH_LINK_STATUS could
> +		 * only process within the intr thread in primary process,
> +		 * otherwise it may lead to report lsc event in secondary
> +		 * process.
> +		 * So the threads other than intr thread in primary process
> +		 * could only process HNS3_MBX_PF_VF_RESP message, if the
> +		 * message processed, its opcode will rewrite with zero, then
> +		 * the intr thread in primary process will not process again.
> +		 */
> +		hns3_handle_mbx_msg_out_intr(hw);
> +		rte_spinlock_unlock(&hw->cmq.crq.lock);
> +		return;
> +	}
> +
>  	while (!hns3_cmd_crq_empty(hw)) {
>  		if (rte_atomic16_read(&hw->reset.disable_cmd))
>  			rte_spinlock_unlock(&hw->cmq.crq.lock);
> @@ -439,6 +500,13 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
>  			continue;
>  		}
> 
> +		if (desc->opcode == 0) {
> +			/* Message already processed by other thread */
> +			crq->desc[crq->next_to_use].flag = 0;
> +			hns3_mbx_ring_ptr_move_crq(crq);
> +			continue;
> +		}
> +
>  		switch (opcode) {
>  		case HNS3_MBX_PF_VF_RESP:
>  			hns3_handle_mbx_response(hw, req);
> --
> 2.7.4


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-stable] [PATCH 20.11 3/4] net/hns3: fix timing in mailbox
  2021-05-17 13:50 ` [dpdk-stable] [PATCH 20.11 3/4] net/hns3: fix timing in mailbox Min Hu (Connor)
@ 2021-05-18  4:27   ` Xueming(Steven) Li
  0 siblings, 0 replies; 8+ messages in thread
From: Xueming(Steven) Li @ 2021-05-18  4:27 UTC (permalink / raw)
  To: Min Hu (Connor), stable; +Cc: ferruh.yigit

Thanks, merged

> -----Original Message-----
> From: Min Hu (Connor) <humin29@huawei.com>
> Sent: Monday, May 17, 2021 9:51 PM
> To: stable@dpdk.org
> Cc: ferruh.yigit@intel.com; Xueming(Steven) Li <xuemingl@nvidia.com>
> Subject: [PATCH 20.11 3/4] net/hns3: fix timing in mailbox
> 
> From: Chengchang Tang <tangchengchang@huawei.com>
> 
> [ upstream commit d566bfcff0c7bfe167f6c520d4fd5b0104130af6 ]
> 
> Currently, when processing MBX messages, the system timestamp is obtained to determine whether timeout occurs. However, the
> gettimeofday function is not monotonically increasing. Therefore, this may lead to incorrect judgment or difficulty exiting the loop.
> And actually, in this scenario, it is not necessary to obtain the timestamp.
> 
> This patch deletes the call to the gettimeofday function during MBX message processing.
> 
> Fixes: 463e748964f5 ("net/hns3: support mailbox")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
> Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
> ---
>  drivers/net/hns3/hns3_mbx.c | 13 +++++--------
>  1 file changed, 5 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c index 11e5235..2a479f2 100644
> --- a/drivers/net/hns3/hns3_mbx.c
> +++ b/drivers/net/hns3/hns3_mbx.c
> @@ -61,13 +61,12 @@ static int
>  hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
>  		  uint8_t *resp_data, uint16_t resp_len)  {
> -#define HNS3_MAX_RETRY_MS	500
> +#define HNS3_MAX_RETRY_US	500000
>  #define HNS3_WAIT_RESP_US	100
>  	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
>  	struct hns3_mbx_resp_status *mbx_resp;
> +	uint32_t wait_time = 0;
>  	bool received;
> -	uint64_t now;
> -	uint64_t end;
> 
>  	if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) {
>  		hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)", @@ -75,9 +74,7 @@ hns3_get_mbx_resp(struct
> hns3_hw *hw, uint16_t code, uint16_t subcode,
>  		return -EINVAL;
>  	}
> 
> -	now = get_timeofday_ms();
> -	end = now + HNS3_MAX_RETRY_MS;
> -	while (now < end) {
> +	while (wait_time < HNS3_MAX_RETRY_US) {
>  		if (rte_atomic16_read(&hw->reset.disable_cmd)) {
>  			hns3_err(hw, "Don't wait for mbx respone because of "
>  				 "disable_cmd");
> @@ -103,10 +100,10 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
>  		if (received)
>  			break;
> 
> -		now = get_timeofday_ms();
> +		wait_time += HNS3_WAIT_RESP_US;
>  	}
>  	hw->mbx_resp.req_msg_data = 0;
> -	if (now >= end) {
> +	if (wait_time >= HNS3_MAX_RETRY_US) {
>  		hns3_mbx_proc_timeout(hw, code, subcode);
>  		return -ETIME;
>  	}
> --
> 2.7.4


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-stable] [PATCH 20.11 4/4] net/hns3: fix verification of NEON support
  2021-05-17 13:50 ` [dpdk-stable] [PATCH 20.11 4/4] net/hns3: fix verification of NEON support Min Hu (Connor)
@ 2021-05-18  4:27   ` Xueming(Steven) Li
  0 siblings, 0 replies; 8+ messages in thread
From: Xueming(Steven) Li @ 2021-05-18  4:27 UTC (permalink / raw)
  To: Min Hu (Connor), stable; +Cc: ferruh.yigit

Thanks, merged!

> -----Original Message-----
> From: Min Hu (Connor) <humin29@huawei.com>
> Sent: Monday, May 17, 2021 9:51 PM
> To: stable@dpdk.org
> Cc: ferruh.yigit@intel.com; Xueming(Steven) Li <xuemingl@nvidia.com>
> Subject: [PATCH 20.11 4/4] net/hns3: fix verification of NEON support
> 
> From: Chengwen Feng <fengchengwen@huawei.com>
> 
> [ upstream commit e40ad6fca467b8671c7dfa7435b602997f5358e1 ]
> 
> This patch adds verification of whether NEON supported.
> 
> Fixes: a3d4f4d291d7 ("net/hns3: support NEON Rx")
> Fixes: e31f123db06b ("net/hns3: support NEON Tx")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
> ---
>  drivers/net/hns3/hns3_rxtx.c | 27 +++++++++++++++++++++------
>  1 file changed, 21 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index 896567c..7a7d6ff 100644
> --- a/drivers/net/hns3/hns3_rxtx.c
> +++ b/drivers/net/hns3/hns3_rxtx.c
> @@ -10,7 +10,7 @@
>  #include <rte_io.h>
>  #include <rte_net.h>
>  #include <rte_malloc.h>
> -#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
> +#if defined(RTE_ARCH_ARM64)
>  #include <rte_cpuflags.h>
>  #endif
> 
> @@ -2483,6 +2483,16 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,  }
> 
>  static bool
> +hns3_get_default_vec_support(void)
> +{
> +#if defined(RTE_ARCH_ARM64)
> +	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
> +		return true;
> +#endif
> +	return false;
> +}
> +
> +static bool
>  hns3_check_sve_support(void)
>  {
>  #if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE) @@ -2498,9 +2508,12 @@ hns3_get_rx_function(struct
> rte_eth_dev *dev)
>  	struct hns3_adapter *hns = dev->data->dev_private;
>  	uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
> 
> -	if (hns->rx_vec_allowed && hns3_rx_check_vec_support(dev) == 0)
> -		return hns3_check_sve_support() ? hns3_recv_pkts_vec_sve :
> -		       hns3_recv_pkts_vec;
> +	if (hns->rx_vec_allowed && hns3_rx_check_vec_support(dev) == 0) {
> +		if (hns3_get_default_vec_support())
> +			return hns3_recv_pkts_vec;
> +		else if (hns3_check_sve_support())
> +			return hns3_recv_pkts_vec_sve;
> +	}
> 
>  	if (hns->rx_simple_allowed && !dev->data->scattered_rx &&
>  	    (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0) @@ -3734,8 +3747,10 @@ hns3_get_tx_function(struct rte_eth_dev *dev,
> eth_tx_prep_t *prep)
> 
>  	if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) {
>  		*prep = NULL;
> -		return hns3_check_sve_support() ? hns3_xmit_pkts_vec_sve :
> -			hns3_xmit_pkts_vec;
> +		if (hns3_get_default_vec_support())
> +			return hns3_xmit_pkts_vec;
> +		else if (hns3_check_sve_support())
> +			return hns3_xmit_pkts_vec_sve;
>  	}
> 
>  	if (hns->tx_simple_allowed &&
> --
> 2.7.4


^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2021-05-18  4:27 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-05-17 13:50 [dpdk-stable] [PATCH 20.11 1/4] net/hns3: fix possible mismatched response of mailbox Min Hu (Connor)
2021-05-17 13:50 ` [dpdk-stable] [PATCH 20.11 2/4] net/hns3: fix VF handling LSC event in secondary process Min Hu (Connor)
2021-05-18  4:15   ` Xueming(Steven) Li
2021-05-17 13:50 ` [dpdk-stable] [PATCH 20.11 3/4] net/hns3: fix timing in mailbox Min Hu (Connor)
2021-05-18  4:27   ` Xueming(Steven) Li
2021-05-17 13:50 ` [dpdk-stable] [PATCH 20.11 4/4] net/hns3: fix verification of NEON support Min Hu (Connor)
2021-05-18  4:27   ` Xueming(Steven) Li
2021-05-18  4:14 ` [dpdk-stable] [PATCH 20.11 1/4] net/hns3: fix possible mismatched response of mailbox Xueming(Steven) Li

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).