DPDK patches and discussions
 help / color / mirror / Atom feed
From: Wenjun Wu <wenjun1.wu@intel.com>
To: dev@dpdk.org, qi.z.zhang@intel.com, jingjing.wu@intel.com,
	beilei.xing@intel.com
Subject: [PATCH v4 3/4] net/iavf: support quanta size configuration
Date: Fri,  8 Apr 2022 16:45:36 +0800	[thread overview]
Message-ID: <20220408084537.920685-4-wenjun1.wu@intel.com> (raw)
In-Reply-To: <20220408084537.920685-1-wenjun1.wu@intel.com>

This patch adds quanta size configuration support.
Quanta size should between 256 and 4096, and be a product of 64.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/net/iavf/iavf.h        |  3 +++
 drivers/net/iavf/iavf_ethdev.c | 38 ++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c  | 31 +++++++++++++++++++++++++++
 3 files changed, 72 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 96515a3ee9..c0a4a47b04 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -292,6 +292,7 @@ enum iavf_proto_xtr_type {
 struct iavf_devargs {
 	uint8_t proto_xtr_dflt;
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
+	uint16_t quanta_size;
 };
 
 struct iavf_security_ctx;
@@ -467,6 +468,8 @@ int iavf_set_q_bw(struct rte_eth_dev *dev,
 int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			struct virtchnl_queue_tc_mapping *q_tc_mapping,
 			uint16_t size);
+int iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id,
+			    u16 num_queues);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
 int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac24a..7d093bdc24 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -34,9 +34,11 @@
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
+#define IAVF_QUANTA_SIZE_ARG       "quanta_size"
 
 static const char * const iavf_valid_args[] = {
 	IAVF_PROTO_XTR_ARG,
+	IAVF_QUANTA_SIZE_ARG,
 	NULL
 };
 
@@ -950,6 +952,9 @@ iavf_dev_start(struct rte_eth_dev *dev)
 		return -1;
 	}
 
+	if (iavf_set_vf_quanta_size(adapter, index, num_queue_pairs) != 0)
+		PMD_DRV_LOG(WARNING, "configure quanta size failed");
+
 	/* If needed, send configure queues msg multiple times to make the
 	 * adminq buffer length smaller than the 4K limitation.
 	 */
@@ -2092,6 +2097,25 @@ iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
 	return 0;
 }
 
+static int
+parse_u16(__rte_unused const char *key, const char *value, void *args)
+{
+	u16 *num = (u16 *)args;
+	u16 tmp;
+
+	errno = 0;
+	tmp = strtoull(value, NULL, 10);
+	if (errno || !tmp) {
+		PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u16",
+			    key, value);
+		return -1;
+	}
+
+	*num = tmp;
+
+	return 0;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2118,6 +2142,20 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 	if (ret)
 		goto bail;
 
+	ret = rte_kvargs_process(kvlist, IAVF_QUANTA_SIZE_ARG,
+				 &parse_u16, &ad->devargs.quanta_size);
+	if (ret)
+		goto bail;
+
+	if (ad->devargs.quanta_size == 0)
+		ad->devargs.quanta_size = 1024;
+
+	if (ad->devargs.quanta_size < 256 || ad->devargs.quanta_size > 4096 ||
+	    ad->devargs.quanta_size & 0x40) {
+		PMD_INIT_LOG(ERR, "invalid quanta size\n");
+		return -EINVAL;
+	}
+
 bail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 537369f736..f9452d14ae 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1828,3 +1828,34 @@ iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 
 	return 0;
 }
+
+int
+iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id, u16 num_queues)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	struct virtchnl_quanta_cfg q_quanta;
+	int err;
+
+	if (adapter->devargs.quanta_size == 0)
+		return 0;
+
+	q_quanta.quanta_size = adapter->devargs.quanta_size;
+	q_quanta.queue_select.type = VIRTCHNL_QUEUE_TYPE_TX;
+	q_quanta.queue_select.start_queue_id = start_queue_id;
+	q_quanta.queue_select.num_queues = num_queues;
+
+	args.ops = VIRTCHNL_OP_CONFIG_QUANTA;
+	args.in_args = (uint8_t *)&q_quanta;
+	args.in_args_size = sizeof(q_quanta);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to execute command VIRTCHNL_OP_CONFIG_QUANTA");
+		return err;
+	}
+
+	return 0;
+}
-- 
2.25.1


  parent reply	other threads:[~2022-04-08  9:07 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-29  2:07 [PATCH v1 0/3] Enable queue rate limit and " Wenjun Wu
2022-03-29  2:07 ` [PATCH v1 1/3] common/iavf: support " Wenjun Wu
2022-03-29  2:07 ` [PATCH v1 2/3] net/iavf: support queue rate limit configuration Wenjun Wu
2022-03-29  2:07 ` [PATCH v1 3/3] net/iavf: support quanta size configuration Wenjun Wu
2022-04-08  1:30 ` [PATCH v2 0/3] Enable queue rate limit and " Wenjun Wu
2022-04-08  1:30   ` [PATCH v2 1/3] common/iavf: support " Wenjun Wu
2022-04-08  1:30   ` [PATCH v2 2/3] net/iavf: support queue rate limit configuration Wenjun Wu
2022-04-08  1:30   ` [PATCH v2 3/3] net/iavf: support quanta size configuration Wenjun Wu
2022-04-08  5:30 ` [PATCH v3 0/4] Enable queue rate limit and " Wenjun Wu
2022-04-08  5:30   ` [PATCH v3 1/4] common/iavf: support " Wenjun Wu
2022-04-08  5:30   ` [PATCH v3 2/4] net/iavf: support queue rate limit configuration Wenjun Wu
2022-04-08  5:30   ` [PATCH v3 3/4] net/iavf: support quanta size configuration Wenjun Wu
2022-04-08  5:30   ` [PATCH v3 4/4] doc: add release notes for 22.07 Wenjun Wu
2022-04-08  8:45 ` [PATCH v4 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
2022-04-08  8:45   ` [PATCH v4 1/4] common/iavf: support " Wenjun Wu
2022-04-08  8:45   ` [PATCH v4 2/4] net/iavf: support queue rate limit configuration Wenjun Wu
2022-04-08  8:45   ` Wenjun Wu [this message]
2022-04-08  8:45   ` [PATCH v4 4/4] doc: add release notes for 22.07 Wenjun Wu
2022-04-19  2:05 ` [PATCH v5 0/4] Enable queue rate limit and quanta size configuration Wenjun Wu
2022-04-19  2:05   ` [PATCH v5 1/4] common/iavf: support " Wenjun Wu
2022-04-19  2:05   ` [PATCH v5 2/4] net/iavf: support queue rate limit configuration Wenjun Wu
2022-04-19  2:05   ` [PATCH v5 3/4] net/iavf: support quanta size configuration Wenjun Wu
2022-04-19  2:05   ` [PATCH v5 4/4] doc: update IAVF driver guide and 22.07 release notes Wenjun Wu
2022-04-19  2:39   ` [PATCH v5 0/4] Enable queue rate limit and quanta size configuration Zhang, Qi Z
2022-04-22  1:42 ` [PATCH v6 0/3] " Wenjun Wu
2022-04-22  1:42   ` [PATCH v6 1/3] common/iavf: support " Wenjun Wu
2022-04-22  1:42   ` [PATCH v6 2/3] net/iavf: support queue rate limit configuration Wenjun Wu
2022-04-22  1:43   ` [PATCH v6 3/3] net/iavf: support quanta size configuration Wenjun Wu
2022-04-22 12:09   ` [PATCH v6 0/3] Enable queue rate limit and " Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220408084537.920685-4-wenjun1.wu@intel.com \
    --to=wenjun1.wu@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qi.z.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).