From: Hanumanth Pothula <hpothula@marvell.com>
To: Aman Singh <aman.deep.singh@intel.com>,
Yuying Zhang <yuying.zhang@intel.com>
Cc: <dev@dpdk.org>, <andrew.rybchenko@oktetlabs.ru>,
<thomas@monjalon.net>, <yux.jiang@intel.com>,
<jerinj@marvell.com>, <ndabilpuram@marvell.com>,
<hpothula@marvell.com>
Subject: [PATCH v5 1/1] app/testpmd: add valid check to verify multi mempool feature
Date: Mon, 21 Nov 2022 18:15:46 +0530 [thread overview]
Message-ID: <20221121124546.3920722-1-hpothula@marvell.com> (raw)
In-Reply-To: <20221118141334.3825072-1-hpothula@marvell.com>
Validate ethdev parameter 'max_rx_mempools' to know whether
device supports multi-mempool feature or not.
Also, add new testpmd command line argument, multi-mempool,
to control multi-mempool feature. By default its disabled.
Bugzilla ID: 1128
Fixes: 4f04edcda769 ("app/testpmd: support multiple mbuf pools per Rx queue")
Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
---
v5:
- Added testpmd argument to enable multi-mempool feature.
- Simplified logic to distinguish between multi-mempool,
multi-segment and single pool/segment.
v4:
- updated if condition.
v3:
- Simplified conditional check.
- Corrected spell, whether.
v2:
- Rebased on tip of next-net/main.
---
app/test-pmd/parameters.c | 3 ++
app/test-pmd/testpmd.c | 58 +++++++++++++++++++++++++--------------
app/test-pmd/testpmd.h | 1 +
3 files changed, 41 insertions(+), 21 deletions(-)
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index aed4cdcb84..26d6450db4 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -700,6 +700,7 @@ launch_args_parse(int argc, char** argv)
{ "rx-mq-mode", 1, 0, 0 },
{ "record-core-cycles", 0, 0, 0 },
{ "record-burst-stats", 0, 0, 0 },
+ { "multi-mempool", 0, 0, 0 },
{ PARAM_NUM_PROCS, 1, 0, 0 },
{ PARAM_PROC_ID, 1, 0, 0 },
{ 0, 0, 0, 0 },
@@ -1449,6 +1450,8 @@ launch_args_parse(int argc, char** argv)
record_core_cycles = 1;
if (!strcmp(lgopts[opt_idx].name, "record-burst-stats"))
record_burst_stats = 1;
+ if (!strcmp(lgopts[opt_idx].name, "multi-mempool"))
+ multi_mempool = 1;
if (!strcmp(lgopts[opt_idx].name, PARAM_NUM_PROCS))
num_procs = atoi(optarg);
if (!strcmp(lgopts[opt_idx].name, PARAM_PROC_ID))
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 4e25f77c6a..9dfc4c9d0e 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -497,6 +497,11 @@ uint8_t record_burst_stats;
*/
uint32_t rxq_share;
+/*
+ * Multi-mempool support, disabled by default.
+ */
+uint8_t multi_mempool;
+
unsigned int num_sockets = 0;
unsigned int socket_ids[RTE_MAX_NUMA_NODES];
@@ -2655,28 +2660,23 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
struct rte_mempool *mpx;
+ struct rte_eth_dev_info dev_info;
unsigned int i, mp_n;
uint32_t prev_hdrs = 0;
int ret;
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return ret;
+
/* Verify Rx queue configuration is single pool and segment or
* multiple pool/segment.
+ * @see rte_eth_dev_info::max_rx_mempools
* @see rte_eth_rxconf::rx_mempools
* @see rte_eth_rxconf::rx_seg
*/
- if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 ||
- ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) {
- /* Single pool/segment configuration */
- rx_conf->rx_seg = NULL;
- rx_conf->rx_nseg = 0;
- ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
- nb_rx_desc, socket_id,
- rx_conf, mp);
- goto exit;
- }
-
- if (rx_pkt_nb_segs > 1 ||
- rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
+ if ((rx_pkt_nb_segs > 1) &&
+ (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
/* multi-segment configuration */
for (i = 0; i < rx_pkt_nb_segs; i++) {
struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
@@ -2701,7 +2701,14 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
}
rx_conf->rx_nseg = rx_pkt_nb_segs;
rx_conf->rx_seg = rx_useg;
- } else {
+ rx_conf->rx_mempools = NULL;
+ rx_conf->rx_nmempool = 0;
+ ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
+ socket_id, rx_conf, NULL);
+ rx_conf->rx_seg = NULL;
+ rx_conf->rx_nseg = 0;
+ } else if ((multi_mempool == 1) && (dev_info.max_rx_mempools != 0) &&
+ (mbuf_data_size_n > 1)) {
/* multi-pool configuration */
for (i = 0; i < mbuf_data_size_n; i++) {
mpx = mbuf_pool_find(socket_id, i);
@@ -2709,14 +2716,23 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
}
rx_conf->rx_mempools = rx_mempool;
rx_conf->rx_nmempool = mbuf_data_size_n;
- }
- ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
+ rx_conf->rx_seg = NULL;
+ rx_conf->rx_nseg = 0;
+ ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
socket_id, rx_conf, NULL);
- rx_conf->rx_seg = NULL;
- rx_conf->rx_nseg = 0;
- rx_conf->rx_mempools = NULL;
- rx_conf->rx_nmempool = 0;
-exit:
+ rx_conf->rx_mempools = NULL;
+ rx_conf->rx_nmempool = 0;
+ } else {
+ /* Single pool/segment configuration */
+ rx_conf->rx_seg = NULL;
+ rx_conf->rx_nseg = 0;
+ rx_conf->rx_mempools = NULL;
+ rx_conf->rx_nmempool = 0;
+ ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
+ socket_id, rx_conf, mp);
+ }
+
+
ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ?
RTE_ETH_QUEUE_STATE_STOPPED :
RTE_ETH_QUEUE_STATE_STARTED;
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index aaf69c349a..9472a2cb19 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -464,6 +464,7 @@ enum dcb_mode_enable
extern uint8_t xstats_hide_zero; /**< Hide zero values for xstats display */
/* globals used for configuration */
+extern uint8_t multi_mempool; /**< Enables multi-mempool feature. */
extern uint8_t record_core_cycles; /**< Enables measurement of CPU cycles */
extern uint8_t record_burst_stats; /**< Enables display of RX and TX bursts */
extern uint16_t verbose_level; /**< Drives messages being displayed, if any. */
--
2.25.1
next prev parent reply other threads:[~2022-11-21 12:45 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-17 11:30 [PATCH v1 " Hanumanth Pothula
2022-11-17 12:55 ` [PATCH v2 " Hanumanth Pothula
2022-11-17 15:00 ` Singh, Aman Deep
2022-11-17 15:58 ` [EXT] " Hanumanth Reddy Pothula
2022-11-17 16:03 ` [PATCH v3 " Hanumanth Pothula
2022-11-17 23:36 ` Ferruh Yigit
2022-11-18 6:51 ` Han, YingyaX
2022-11-18 11:37 ` Hanumanth Reddy Pothula
2022-11-18 11:13 ` Hanumanth Pothula
2022-11-18 14:13 ` [PATCH v4 " Hanumanth Pothula
2022-11-18 20:56 ` Ferruh Yigit
2022-11-19 0:00 ` [EXT] " Hanumanth Reddy Pothula
2022-11-21 10:08 ` Ferruh Yigit
2022-11-21 10:44 ` Hanumanth Reddy Pothula
2022-11-21 12:45 ` Hanumanth Pothula [this message]
2022-11-21 13:22 ` [PATCH v5 " Ferruh Yigit
2022-11-21 13:36 ` [EXT] " Hanumanth Reddy Pothula
2022-11-21 14:10 ` Ferruh Yigit
2022-11-21 14:33 ` [PATCH v6 " Hanumanth Pothula
2022-11-21 17:31 ` Ferruh Yigit
2022-11-21 17:45 ` [EXT] " Hanumanth Reddy Pothula
2022-11-21 18:05 ` Ferruh Yigit
2022-11-21 18:07 ` [PATCH v7 " Hanumanth Pothula
2022-11-21 18:40 ` Ferruh Yigit
2022-11-22 6:42 ` Han, YingyaX
2022-11-22 6:52 ` Tang, Yaqi
2022-11-22 8:33 ` Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221121124546.3920722-1-hpothula@marvell.com \
--to=hpothula@marvell.com \
--cc=aman.deep.singh@intel.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=ndabilpuram@marvell.com \
--cc=thomas@monjalon.net \
--cc=yux.jiang@intel.com \
--cc=yuying.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).