From: Gordon@dpdk.org, Noonan@dpdk.org, gordon.noonan@intel.com
To: dev@dpdk.org
Cc: gordon.noonan@intel.com, Qi Zhang <qi.z.zhang@intel.com>
Subject: [dpdk-dev] [PATCH RFC 2/8] net/iavf: support 64 queues
Date: Fri, 3 Jul 2020 11:28:23 +0100 [thread overview]
Message-ID: <20200703102829.52581-3-gordon.noonan@intel.com> (raw)
In-Reply-To: <20200703102829.52581-1-gordon.noonan@intel.com>
From: Qi Zhang <qi.z.zhang@intel.com>
Enlarge max queue number from 16 to 64 by using "large" vc ops.
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/iavf/iavf.h | 4 ++--
drivers/net/iavf/iavf_vchnl.c | 35 ++++++++++++++++++-----------------
2 files changed, 20 insertions(+), 19 deletions(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 9be8a2381..039517af9 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -19,7 +19,7 @@
#define IAVF_FRAME_SIZE_MAX 9728
#define IAVF_QUEUE_BASE_ADDR_UNIT 128
-#define IAVF_MAX_NUM_QUEUES 16
+#define IAVF_MAX_NUM_QUEUES 64
#define IAVF_NUM_MACADDR_MAX 64
@@ -138,7 +138,7 @@ struct iavf_info {
uint16_t nb_msix; /* number of MSI-X interrupts on Rx */
uint16_t msix_base; /* msix vector base from */
/* queue bitmask for each vector */
- uint16_t rxq_map[IAVF_MAX_MSIX_VECTORS];
+ uint64_t rxq_map[IAVF_MAX_MSIX_VECTORS];
struct iavf_flow_list flow_list;
rte_spinlock_t flow_ops_lock;
struct iavf_parser_list rss_parser_list;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 33acea54a..2b28d0577 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -383,7 +383,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
VIRTCHNL_VF_OFFLOAD_FDIR_PF |
- VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
+ VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
+ VIRTCHNL_VF_OFFLOAD_LARGE_VF;
args.in_args = (uint8_t *)∩︀
args.in_args_size = sizeof(caps);
@@ -450,7 +451,7 @@ int
iavf_enable_queues(struct iavf_adapter *adapter)
{
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
- struct virtchnl_queue_select queue_select;
+ struct virtchnl_large_queue_select queue_select;
struct iavf_cmd_info args;
int err;
@@ -460,7 +461,7 @@ iavf_enable_queues(struct iavf_adapter *adapter)
queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
- args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
+ args.ops = VIRTCHNL_OP_ENABLE_LARGE_QUEUES;
args.in_args = (u8 *)&queue_select;
args.in_args_size = sizeof(queue_select);
args.out_buffer = vf->aq_resp;
@@ -468,7 +469,7 @@ iavf_enable_queues(struct iavf_adapter *adapter)
err = iavf_execute_vf_cmd(adapter, &args);
if (err) {
PMD_DRV_LOG(ERR,
- "Failed to execute command of OP_ENABLE_QUEUES");
+ "Failed to execute command of OP_ENABLE_LARGE_QUEUES");
return err;
}
return 0;
@@ -478,7 +479,7 @@ int
iavf_disable_queues(struct iavf_adapter *adapter)
{
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
- struct virtchnl_queue_select queue_select;
+ struct virtchnl_large_queue_select queue_select;
struct iavf_cmd_info args;
int err;
@@ -488,7 +489,7 @@ iavf_disable_queues(struct iavf_adapter *adapter)
queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
- args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
+ args.ops = VIRTCHNL_OP_DISABLE_LARGE_QUEUES;
args.in_args = (u8 *)&queue_select;
args.in_args_size = sizeof(queue_select);
args.out_buffer = vf->aq_resp;
@@ -496,7 +497,7 @@ iavf_disable_queues(struct iavf_adapter *adapter)
err = iavf_execute_vf_cmd(adapter, &args);
if (err) {
PMD_DRV_LOG(ERR,
- "Failed to execute command of OP_DISABLE_QUEUES");
+ "Failed to execute command of OP_DISABLE_LARGE_QUEUES");
return err;
}
return 0;
@@ -507,7 +508,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
bool rx, bool on)
{
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
- struct virtchnl_queue_select queue_select;
+ struct virtchnl_large_queue_select queue_select;
struct iavf_cmd_info args;
int err;
@@ -519,9 +520,9 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
queue_select.tx_queues |= 1 << qid;
if (on)
- args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
+ args.ops = VIRTCHNL_OP_ENABLE_LARGE_QUEUES;
else
- args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
+ args.ops = VIRTCHNL_OP_DISABLE_LARGE_QUEUES;
args.in_args = (u8 *)&queue_select;
args.in_args_size = sizeof(queue_select);
args.out_buffer = vf->aq_resp;
@@ -529,7 +530,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
err = iavf_execute_vf_cmd(adapter, &args);
if (err)
PMD_DRV_LOG(ERR, "Failed to execute command of %s",
- on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
+ on ? "OP_ENABLE_LARGE_QUEUES" : "OP_DISABLE_LARGE_QUEUES");
return err;
}
@@ -686,13 +687,13 @@ int
iavf_config_irq_map(struct iavf_adapter *adapter)
{
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
- struct virtchnl_irq_map_info *map_info;
- struct virtchnl_vector_map *vecmap;
+ struct virtchnl_large_irq_map_info *map_info;
+ struct virtchnl_large_vector_map *vecmap;
struct iavf_cmd_info args;
int len, i, err;
- len = sizeof(struct virtchnl_irq_map_info) +
- sizeof(struct virtchnl_vector_map) * vf->nb_msix;
+ len = sizeof(struct virtchnl_large_irq_map_info) +
+ sizeof(struct virtchnl_large_vector_map) * vf->nb_msix;
map_info = rte_zmalloc("map_info", len, 0);
if (!map_info)
@@ -708,14 +709,14 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
vecmap->rxq_map = vf->rxq_map[vf->msix_base + i];
}
- args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
+ args.ops = VIRTCHNL_OP_CONFIG_LARGE_IRQ_MAP;
args.in_args = (u8 *)map_info;
args.in_args_size = len;
args.out_buffer = vf->aq_resp;
args.out_size = IAVF_AQ_BUF_SZ;
err = iavf_execute_vf_cmd(adapter, &args);
if (err)
- PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
+ PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_LARGE_IRQ_MAP");
rte_free(map_info);
return err;
--
2.17.1
next prev parent reply other threads:[~2020-07-03 14:37 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-07-03 10:28 [dpdk-dev] [PATCH RFC 0/8] net/iavf: Enable 256 queues Gordon, Noonan, gordon.noonan
2020-07-03 10:28 ` [dpdk-dev] [PATCH RFC 1/8] common/iavf: add large queue VC ops Gordon, Noonan, gordon.noonan
2020-07-03 10:28 ` Gordon, Noonan, gordon.noonan [this message]
2020-07-03 10:28 ` [dpdk-dev] [PATCH RFC 3/8] common/iavf: add large vsi queue config Gordon, Noonan, gordon.noonan
2020-07-03 10:28 ` [dpdk-dev] [PATCH RFC 4/8] net/iavf: support > 256 lut table size Gordon, Noonan, gordon.noonan
2020-07-03 10:28 ` [dpdk-dev] [PATCH RFC 5/8] Support dst ip only for RSS Gordon, Noonan, gordon.noonan
2020-07-03 10:28 ` [dpdk-dev] [PATCH RFC 6/8] ICE: Enable advanced RSS for PPPoE Gordon, Noonan, gordon.noonan
2020-07-03 10:28 ` [dpdk-dev] [PATCH RFC 7/8] Update/Add support for RSS on 5 Tuple Flows for GTPU encapsulated packets (AVF) Gordon, Noonan, gordon.noonan
2020-07-03 10:28 ` [dpdk-dev] [PATCH RFC 8/8] net/iavf: fix gtpu ip udp issue Gordon, Noonan, gordon.noonan
2020-07-03 11:16 [dpdk-dev] [PATCH RFC 0/8] net/iavf: Enable 256 queues Gordon, Noonan, gordon.noonan
2020-07-03 11:16 ` [dpdk-dev] [PATCH RFC 2/8] net/iavf: support 64 queues Gordon, Noonan, gordon.noonan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200703102829.52581-3-gordon.noonan@intel.com \
--to=gordon@dpdk.org \
--cc=Noonan@dpdk.org \
--cc=dev@dpdk.org \
--cc=gordon.noonan@intel.com \
--cc=qi.z.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).