DPDK patches and discussions
 help / color / mirror / Atom feed
From: beilei.xing@intel.com
To: jingjing.wu@intel.com
Cc: dev@dpdk.org, Beilei Xing <beilei.xing@intel.com>
Subject: [PATCH] common/idpf: refine get packet type
Date: Fri, 22 Dec 2023 16:51:28 +0000	[thread overview]
Message-ID: <20231222165128.2899806-1-beilei.xing@intel.com> (raw)

From: Beilei Xing <beilei.xing@intel.com>

Since the response of virtual channel virtchnl2_get_ptype_info is
changed on IMC side, driver needs to be updated when requiring
the virtual channel.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/common/idpf/idpf_common_device.c   | 64 ++++++++++++++--------
 drivers/common/idpf/idpf_common_device.h   |  9 +++
 drivers/common/idpf/idpf_common_virtchnl.c | 28 ++++------
 drivers/common/idpf/idpf_common_virtchnl.h |  4 +-
 4 files changed, 63 insertions(+), 42 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c
index cc4207a46e..1380cc462c 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -157,49 +157,65 @@ idpf_init_mbx(struct idpf_hw *hw)
 static int
 idpf_get_pkt_type(struct idpf_adapter *adapter)
 {
-	struct virtchnl2_get_ptype_info *ptype_info;
+	struct virtchnl2_get_ptype_info *req_ptype_info;
+	struct virtchnl2_get_ptype_info *recv_ptype_info;
+	uint16_t recv_num_ptypes = 0;
 	uint16_t ptype_offset, i, j;
-	uint16_t ptype_recvd = 0;
+	uint16_t start_ptype_id = 0;
 	int ret;
 
-	ret = idpf_vc_ptype_info_query(adapter);
-	if (ret != 0) {
-		DRV_LOG(ERR, "Fail to query packet type information");
-		return ret;
+	req_ptype_info = rte_zmalloc("req_ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0);
+	if (req_ptype_info == NULL)
+		return -ENOMEM;
+
+	recv_ptype_info = rte_zmalloc("recv_ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0);
+	if (recv_ptype_info == NULL) {
+		ret = -ENOMEM;
+		goto free_req_ptype_info;
 	}
 
-	ptype_info = rte_zmalloc("ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0);
-		if (ptype_info == NULL)
-			return -ENOMEM;
+	while (start_ptype_id < IDPF_MAX_PKT_TYPE) {
+		memset(req_ptype_info, 0, sizeof(*req_ptype_info));
+		memset(recv_ptype_info, 0, sizeof(*recv_ptype_info));
+
+		if ((start_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > IDPF_MAX_PKT_TYPE)
+			req_ptype_info->num_ptypes =
+				rte_cpu_to_le_16(IDPF_MAX_PKT_TYPE - start_ptype_id);
+		else
+			req_ptype_info->num_ptypes = rte_cpu_to_le_16(IDPF_RX_MAX_PTYPES_PER_BUF);
+		req_ptype_info->start_ptype_id = start_ptype_id;
 
-	while (ptype_recvd < IDPF_MAX_PKT_TYPE) {
-		ret = idpf_vc_one_msg_read(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
-					   IDPF_DFLT_MBX_BUF_SIZE, (uint8_t *)ptype_info);
+		ret = idpf_vc_ptype_info_query(adapter, req_ptype_info, recv_ptype_info);
 		if (ret != 0) {
-			DRV_LOG(ERR, "Fail to get packet type information");
-			goto free_ptype_info;
+			DRV_LOG(ERR, "Fail to query packet type information");
+			goto free_recv_ptype_info;
 		}
 
-		ptype_recvd += ptype_info->num_ptypes;
+		recv_num_ptypes += rte_cpu_to_le_16(recv_ptype_info->num_ptypes);
+		if (recv_num_ptypes > IDPF_MAX_PKT_TYPE) {
+			ret = -EINVAL;
+			goto free_recv_ptype_info;
+		}
+
+		start_ptype_id = rte_cpu_to_le_16(req_ptype_info->start_ptype_id) +
+			rte_cpu_to_le_16(req_ptype_info->num_ptypes);
+
 		ptype_offset = sizeof(struct virtchnl2_get_ptype_info) -
 						sizeof(struct virtchnl2_ptype);
 
-		for (i = 0; i < rte_cpu_to_le_16(ptype_info->num_ptypes); i++) {
+		for (i = 0; i < rte_cpu_to_le_16(recv_ptype_info->num_ptypes); i++) {
 			bool is_inner = false, is_ip = false;
 			struct virtchnl2_ptype *ptype;
 			uint32_t proto_hdr = 0;
 
 			ptype = (struct virtchnl2_ptype *)
-					((uint8_t *)ptype_info + ptype_offset);
+					((uint8_t *)recv_ptype_info + ptype_offset);
 			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
 			if (ptype_offset > IDPF_DFLT_MBX_BUF_SIZE) {
 				ret = -EINVAL;
-				goto free_ptype_info;
+				goto free_recv_ptype_info;
 			}
 
-			if (rte_cpu_to_le_16(ptype->ptype_id_10) == 0xFFFF)
-				goto free_ptype_info;
-
 			for (j = 0; j < ptype->proto_id_count; j++) {
 				switch (rte_cpu_to_le_16(ptype->proto_id[j])) {
 				case VIRTCHNL2_PROTO_HDR_GRE:
@@ -358,8 +374,10 @@ idpf_get_pkt_type(struct idpf_adapter *adapter)
 		}
 	}
 
-free_ptype_info:
-	rte_free(ptype_info);
+free_recv_ptype_info:
+	rte_free(recv_ptype_info);
+free_req_ptype_info:
+	rte_free(req_ptype_info);
 	clear_cmd(adapter);
 	return ret;
 }
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index f767ea7cec..2b94f0331a 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -31,6 +31,15 @@
 
 #define IDPF_DFLT_INTERVAL	16
 
+#define IDPF_RX_MAX_PTYPE_PROTO_IDS	32
+#define IDPF_RX_MAX_PTYPE_SZ	(sizeof(struct virtchnl2_ptype) +	\
+				 (sizeof(uint16_t) *			\
+				  (IDPF_RX_MAX_PTYPE_PROTO_IDS - 1)))
+#define IDPF_RX_PTYPE_HDR_SZ	(sizeof(struct virtchnl2_get_ptype_info) - \
+				 sizeof(struct virtchnl2_ptype))
+#define IDPF_RX_MAX_PTYPES_PER_BUF				\
+	((IDPF_DFLT_MBX_BUF_SIZE - IDPF_RX_PTYPE_HDR_SZ)/	\
+	 IDPF_RX_MAX_PTYPE_SZ)
 #define IDPF_GET_PTYPE_SIZE(p)						\
 	(sizeof(struct virtchnl2_ptype) +				\
 	 (((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * sizeof((p)->proto_id[0])))
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 6455f640da..c46ed50eb5 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -202,15 +202,11 @@ idpf_vc_cmd_execute(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
 	switch (args->ops) {
 	case VIRTCHNL_OP_VERSION:
 	case VIRTCHNL2_OP_GET_CAPS:
+	case VIRTCHNL2_OP_GET_PTYPE_INFO:
 		/* for init virtchnl ops, need to poll the response */
 		err = idpf_vc_one_msg_read(adapter, args->ops, args->out_size, args->out_buffer);
 		clear_cmd(adapter);
 		break;
-	case VIRTCHNL2_OP_GET_PTYPE_INFO:
-		/* for multuple response message,
-		 * do not handle the response here.
-		 */
-		break;
 	default:
 		/* For other virtchnl ops in running time,
 		 * wait for the cmd done flag.
@@ -909,28 +905,24 @@ idpf_vc_vport_ena_dis(struct idpf_vport *vport, bool enable)
 }
 
 int
-idpf_vc_ptype_info_query(struct idpf_adapter *adapter)
+idpf_vc_ptype_info_query(struct idpf_adapter *adapter,
+			 struct virtchnl2_get_ptype_info *req_ptype_info,
+			 struct virtchnl2_get_ptype_info *recv_ptype_info)
 {
-	struct virtchnl2_get_ptype_info *ptype_info;
 	struct idpf_cmd_info args;
-	int len, err;
-
-	len = sizeof(struct virtchnl2_get_ptype_info);
-	ptype_info = rte_zmalloc("ptype_info", len, 0);
-	if (ptype_info == NULL)
-		return -ENOMEM;
+	int err;
 
-	ptype_info->start_ptype_id = 0;
-	ptype_info->num_ptypes = IDPF_MAX_PKT_TYPE;
 	args.ops = VIRTCHNL2_OP_GET_PTYPE_INFO;
-	args.in_args = (uint8_t *)ptype_info;
-	args.in_args_size = len;
+	args.in_args = (uint8_t *)req_ptype_info;
+	args.in_args_size = sizeof(struct virtchnl2_get_ptype_info);
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
 
 	err = idpf_vc_cmd_execute(adapter, &args);
 	if (err != 0)
 		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_PTYPE_INFO");
 
-	rte_free(ptype_info);
+	rte_memcpy(recv_ptype_info, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
 	return err;
 }
 
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 9ff5c38c26..73446ded86 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -41,7 +41,9 @@ int idpf_vc_vectors_alloc(struct idpf_vport *vport, uint16_t num_vectors);
 __rte_internal
 int idpf_vc_vectors_dealloc(struct idpf_vport *vport);
 __rte_internal
-int idpf_vc_ptype_info_query(struct idpf_adapter *adapter);
+int idpf_vc_ptype_info_query(struct idpf_adapter *adapter,
+			     struct virtchnl2_get_ptype_info *req_ptype_info,
+			     struct virtchnl2_get_ptype_info *recv_ptype_info);
 __rte_internal
 int idpf_vc_one_msg_read(struct idpf_adapter *adapter, uint32_t ops,
 			 uint16_t buf_len, uint8_t *buf);
-- 
2.34.1


             reply	other threads:[~2023-12-22  8:29 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-22 16:51 beilei.xing [this message]
2024-02-27 19:35 ` Medvedkin, Vladimir
2024-02-29 17:09   ` Bruce Richardson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231222165128.2899806-1-beilei.xing@intel.com \
    --to=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).