DPDK patches and discussions
 help / color / mirror / Atom feed
From: beilei.xing@intel.com
To: jingjing.wu@intel.com
Cc: dev@dpdk.org, Beilei Xing <beilei.xing@intel.com>
Subject: [PATCH v5] common/idpf: refine capability get
Date: Mon, 24 Apr 2023 08:08:22 +0000	[thread overview]
Message-ID: <20230424080822.13541-1-beilei.xing@intel.com> (raw)
In-Reply-To: <20230406074245.82991-1-beilei.xing@intel.com>

From: Beilei Xing <beilei.xing@intel.com>

Initialize required capability in PMD, and refine
idpf_vc_caps_get function. Then different PMDs can
require different capability.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
v5 changes:
 - No need to move check_pf_reset_done and mbx_init from common module.
V4 changes:
 - No need to require PTP CAP for CPFL. 
V3 changes:
 - refine capability get.
 
 drivers/common/idpf/idpf_common_virtchnl.c | 45 ++--------------------
 drivers/net/cpfl/cpfl_ethdev.c             | 40 +++++++++++++++++++
 drivers/net/idpf/idpf_ethdev.c             | 40 +++++++++++++++++++
 3 files changed, 83 insertions(+), 42 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 9ee7259539..a4e129062e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -278,51 +278,12 @@ idpf_vc_api_version_check(struct idpf_adapter *adapter)
 int
 idpf_vc_caps_get(struct idpf_adapter *adapter)
 {
-	struct virtchnl2_get_capabilities caps_msg;
 	struct idpf_cmd_info args;
 	int err;
 
-	memset(&caps_msg, 0, sizeof(struct virtchnl2_get_capabilities));
-
-	caps_msg.csum_caps =
-		VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
-		VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP      |
-		VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP      |
-		VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP     |
-		VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP      |
-		VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP      |
-		VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP     |
-		VIRTCHNL2_CAP_TX_CSUM_GENERIC          |
-		VIRTCHNL2_CAP_RX_CSUM_L3_IPV4          |
-		VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP      |
-		VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP      |
-		VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP     |
-		VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP      |
-		VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP      |
-		VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP     |
-		VIRTCHNL2_CAP_RX_CSUM_GENERIC;
-
-	caps_msg.rss_caps =
-		VIRTCHNL2_CAP_RSS_IPV4_TCP             |
-		VIRTCHNL2_CAP_RSS_IPV4_UDP             |
-		VIRTCHNL2_CAP_RSS_IPV4_SCTP            |
-		VIRTCHNL2_CAP_RSS_IPV4_OTHER           |
-		VIRTCHNL2_CAP_RSS_IPV6_TCP             |
-		VIRTCHNL2_CAP_RSS_IPV6_UDP             |
-		VIRTCHNL2_CAP_RSS_IPV6_SCTP            |
-		VIRTCHNL2_CAP_RSS_IPV6_OTHER           |
-		VIRTCHNL2_CAP_RSS_IPV4_AH              |
-		VIRTCHNL2_CAP_RSS_IPV4_ESP             |
-		VIRTCHNL2_CAP_RSS_IPV4_AH_ESP          |
-		VIRTCHNL2_CAP_RSS_IPV6_AH              |
-		VIRTCHNL2_CAP_RSS_IPV6_ESP             |
-		VIRTCHNL2_CAP_RSS_IPV6_AH_ESP;
-
-	caps_msg.other_caps = VIRTCHNL2_CAP_WB_ON_ITR;
-
 	args.ops = VIRTCHNL2_OP_GET_CAPS;
-	args.in_args = (uint8_t *)&caps_msg;
-	args.in_args_size = sizeof(caps_msg);
+	args.in_args = (uint8_t *)&adapter->caps;
+	args.in_args_size = sizeof(struct virtchnl2_get_capabilities);
 	args.out_buffer = adapter->mbx_resp;
 	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
 
@@ -333,7 +294,7 @@ idpf_vc_caps_get(struct idpf_adapter *adapter)
 		return err;
 	}
 
-	rte_memcpy(&adapter->caps, args.out_buffer, sizeof(caps_msg));
+	rte_memcpy(&adapter->caps, args.out_buffer, sizeof(struct virtchnl2_get_capabilities));
 
 	return 0;
 }
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index ede730fd50..517ae15f4c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1165,6 +1165,44 @@ cpfl_dev_alarm_handler(void *param)
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 }
 
+static struct virtchnl2_get_capabilities req_caps = {
+	.csum_caps =
+	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
+	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP      |
+	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP      |
+	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP     |
+	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP      |
+	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP      |
+	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP     |
+	VIRTCHNL2_CAP_TX_CSUM_GENERIC          |
+	VIRTCHNL2_CAP_RX_CSUM_L3_IPV4          |
+	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP      |
+	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP      |
+	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP     |
+	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP      |
+	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP      |
+	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP     |
+	VIRTCHNL2_CAP_RX_CSUM_GENERIC,
+
+	.rss_caps =
+	VIRTCHNL2_CAP_RSS_IPV4_TCP             |
+	VIRTCHNL2_CAP_RSS_IPV4_UDP             |
+	VIRTCHNL2_CAP_RSS_IPV4_SCTP            |
+	VIRTCHNL2_CAP_RSS_IPV4_OTHER           |
+	VIRTCHNL2_CAP_RSS_IPV6_TCP             |
+	VIRTCHNL2_CAP_RSS_IPV6_UDP             |
+	VIRTCHNL2_CAP_RSS_IPV6_SCTP            |
+	VIRTCHNL2_CAP_RSS_IPV6_OTHER           |
+	VIRTCHNL2_CAP_RSS_IPV4_AH              |
+	VIRTCHNL2_CAP_RSS_IPV4_ESP             |
+	VIRTCHNL2_CAP_RSS_IPV4_AH_ESP          |
+	VIRTCHNL2_CAP_RSS_IPV6_AH              |
+	VIRTCHNL2_CAP_RSS_IPV6_ESP             |
+	VIRTCHNL2_CAP_RSS_IPV6_AH_ESP,
+
+	.other_caps = VIRTCHNL2_CAP_WB_ON_ITR
+};
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1181,6 +1219,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	strncpy(adapter->name, pci_dev->device.name, PCI_PRI_STR_SIZE);
 
+	rte_memcpy(&base->caps, &req_caps, sizeof(struct virtchnl2_get_capabilities));
+
 	ret = idpf_adapter_init(base);
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Failed to init adapter");
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index e02ec2ec5a..899fdf8b29 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -1128,6 +1128,44 @@ idpf_dev_alarm_handler(void *param)
 	rte_eal_alarm_set(IDPF_ALARM_INTERVAL, idpf_dev_alarm_handler, adapter);
 }
 
+static struct virtchnl2_get_capabilities req_caps = {
+	.csum_caps =
+	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
+	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP      |
+	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP      |
+	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP     |
+	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP      |
+	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP      |
+	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP     |
+	VIRTCHNL2_CAP_TX_CSUM_GENERIC          |
+	VIRTCHNL2_CAP_RX_CSUM_L3_IPV4          |
+	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP      |
+	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP      |
+	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP     |
+	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP      |
+	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP      |
+	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP     |
+	VIRTCHNL2_CAP_RX_CSUM_GENERIC,
+
+	.rss_caps =
+	VIRTCHNL2_CAP_RSS_IPV4_TCP             |
+	VIRTCHNL2_CAP_RSS_IPV4_UDP             |
+	VIRTCHNL2_CAP_RSS_IPV4_SCTP            |
+	VIRTCHNL2_CAP_RSS_IPV4_OTHER           |
+	VIRTCHNL2_CAP_RSS_IPV6_TCP             |
+	VIRTCHNL2_CAP_RSS_IPV6_UDP             |
+	VIRTCHNL2_CAP_RSS_IPV6_SCTP            |
+	VIRTCHNL2_CAP_RSS_IPV6_OTHER           |
+	VIRTCHNL2_CAP_RSS_IPV4_AH              |
+	VIRTCHNL2_CAP_RSS_IPV4_ESP             |
+	VIRTCHNL2_CAP_RSS_IPV4_AH_ESP          |
+	VIRTCHNL2_CAP_RSS_IPV6_AH              |
+	VIRTCHNL2_CAP_RSS_IPV6_ESP             |
+	VIRTCHNL2_CAP_RSS_IPV6_AH_ESP,
+
+	.other_caps = VIRTCHNL2_CAP_WB_ON_ITR
+};
+
 static int
 idpf_adapter_ext_init(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *adapter)
 {
@@ -1144,6 +1182,8 @@ idpf_adapter_ext_init(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *a
 
 	strncpy(adapter->name, pci_dev->device.name, PCI_PRI_STR_SIZE);
 
+	rte_memcpy(&base->caps, &req_caps, sizeof(struct virtchnl2_get_capabilities));
+
 	ret = idpf_adapter_init(base);
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Failed to init adapter");
-- 
2.26.2


  parent reply	other threads:[~2023-04-24  8:31 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-21  6:53 [PATCH] common/idpf: move PF specific functions from common init beilei.xing
2023-03-21  7:07 ` [PATCH v2] " beilei.xing
2023-04-04 12:41   ` [PATCH v3 0/2] refine common module beilei.xing
2023-04-04 12:41     ` [PATCH v3 1/2] common/idpf: move PF specific functions from common init beilei.xing
2023-04-04 12:41     ` [PATCH v3 2/2] common/idpf: refine capability get beilei.xing
2023-04-06  7:42     ` [PATCH v4 0/2] refine common module beilei.xing
2023-04-06  7:42       ` [PATCH v4 1/2] common/idpf: move PF specific functions from common init beilei.xing
2023-04-23  8:25         ` Wu, Jingjing
2023-04-06  7:42       ` [PATCH v4 2/2] common/idpf: refine capability get beilei.xing
2023-04-24  8:08       ` beilei.xing [this message]
2023-04-26  5:17         ` [PATCH v5] " Wu, Jingjing
2023-04-26  8:11           ` Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230424080822.13541-1-beilei.xing@intel.com \
    --to=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).