From: Wenwu Ma <wenwux.ma@intel.com>
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com, dev@dpdk.org
Cc: jiayu.hu@intel.com, yinan.wang@intel.com, xingguang.he@intel.com,
Wenwu Ma <wenwux.ma@intel.com>,
stable@dpdk.org
Subject: [PATCH v2] examples/vhost: fix launch failure with physical port
Date: Fri, 4 Mar 2022 16:24:24 +0000 [thread overview]
Message-ID: <20220304162424.822916-1-wenwux.ma@intel.com> (raw)
In-Reply-To: <20220217151705.441734-1-wenwux.ma@intel.com>
dpdk-vhost will fail to launch with a 40G i40e port because
there are not enough mbufs. This patch adds a new option
--total-num-mbufs, through which the user can set larger
mbuf pool to avoid this problem.
Fixes: 4796ad63ba1f ("examples/vhost: import userspace vhost application")
Cc: stable@dpdk.org
Signed-off-by: Wenwu Ma <wenwux.ma@intel.com>
---
examples/vhost/main.c | 83 +++++++++++++++----------------------------
1 file changed, 29 insertions(+), 54 deletions(-)
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 68afd398bb..d94fabb060 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -33,6 +33,8 @@
#define MAX_QUEUES 128
#endif
+#define NUM_MBUFS_DEFAULT 0x24000
+
/* the maximum number of external ports supported */
#define MAX_SUP_PORTS 1
@@ -61,6 +63,9 @@
#define DMA_RING_SIZE 4096
+/* number of mbufs in all pools - if specified on command-line. */
+static int total_num_mbufs = NUM_MBUFS_DEFAULT;
+
struct dma_for_vhost dma_bind[RTE_MAX_VHOST_DEVICE];
int16_t dmas_id[RTE_DMADEV_DEFAULT_MAX];
static int dma_count;
@@ -608,7 +613,8 @@ us_vhost_usage(const char *prgname)
" --tx-csum [0|1] disable/enable TX checksum offload.\n"
" --tso [0|1] disable/enable TCP segment offload.\n"
" --client register a vhost-user socket as client mode.\n"
- " --dmas register dma channel for specific vhost device.\n",
+ " --dmas register dma channel for specific vhost device.\n"
+ " --total-num-mbufs [0-N] set the number of mbufs to be allocated in mbuf pools, the default value is 147456.\n",
prgname);
}
@@ -637,6 +643,8 @@ enum {
OPT_BUILTIN_NET_DRIVER_NUM,
#define OPT_DMAS "dmas"
OPT_DMAS_NUM,
+#define OPT_NUM_MBUFS "total-num-mbufs"
+ OPT_NUM_MBUFS_NUM,
};
/*
@@ -674,6 +682,8 @@ us_vhost_parse_args(int argc, char **argv)
NULL, OPT_BUILTIN_NET_DRIVER_NUM},
{OPT_DMAS, required_argument,
NULL, OPT_DMAS_NUM},
+ {OPT_NUM_MBUFS, required_argument,
+ NULL, OPT_NUM_MBUFS_NUM},
{NULL, 0, 0, 0},
};
@@ -801,6 +811,19 @@ us_vhost_parse_args(int argc, char **argv)
}
break;
+ case OPT_NUM_MBUFS_NUM:
+ ret = parse_num_opt(optarg, INT32_MAX);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "Invalid argument for total-num-mbufs [0..N]\n");
+ us_vhost_usage(prgname);
+ return -1;
+ }
+
+ if (total_num_mbufs < ret)
+ total_num_mbufs = ret;
+ break;
+
case OPT_CLIENT_NUM:
client_mode = 1;
break;
@@ -1730,57 +1753,6 @@ sigint_handler(__rte_unused int signum)
exit(0);
}
-/*
- * While creating an mbuf pool, one key thing is to figure out how
- * many mbuf entries is enough for our use. FYI, here are some
- * guidelines:
- *
- * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
- *
- * - For each switch core (A CPU core does the packet switch), we need
- * also make some reservation for receiving the packets from virtio
- * Tx queue. How many is enough depends on the usage. It's normally
- * a simple calculation like following:
- *
- * MAX_PKT_BURST * max packet size / mbuf size
- *
- * So, we definitely need allocate more mbufs when TSO is enabled.
- *
- * - Similarly, for each switching core, we should serve @nr_rx_desc
- * mbufs for receiving the packets from physical NIC device.
- *
- * - We also need make sure, for each switch core, we have allocated
- * enough mbufs to fill up the mbuf cache.
- */
-static void
-create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
- uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
-{
- uint32_t nr_mbufs;
- uint32_t nr_mbufs_per_core;
- uint32_t mtu = 1500;
-
- if (mergeable)
- mtu = 9000;
- if (enable_tso)
- mtu = 64 * 1024;
-
- nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
- (mbuf_size - RTE_PKTMBUF_HEADROOM);
- nr_mbufs_per_core += nr_rx_desc;
- nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
-
- nr_mbufs = nr_queues * nr_rx_desc;
- nr_mbufs += nr_mbufs_per_core * nr_switch_core;
- nr_mbufs *= nr_port;
-
- mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
- nr_mbuf_cache, 0, mbuf_size,
- rte_socket_id());
- if (mbuf_pool == NULL)
- rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
-}
-
static void
reset_dma(void)
{
@@ -1860,8 +1832,11 @@ main(int argc, char *argv[])
* many queues here. We probably should only do allocation for
* those queues we are going to use.
*/
- create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
- MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
+ mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", total_num_mbufs,
+ MBUF_CACHE_SIZE, 0, MBUF_DATA_SIZE,
+ rte_socket_id());
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
if (vm2vm_mode == VM2VM_HARDWARE) {
/* Enable VT loop back to let L2 switch to do it. */
--
2.25.1
next parent reply other threads:[~2022-03-04 8:25 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20220217151705.441734-1-wenwux.ma@intel.com>
2022-03-04 16:24 ` Wenwu Ma [this message]
2022-03-04 9:51 ` Xia, Chenbo
2022-03-04 14:33 ` Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220304162424.822916-1-wenwux.ma@intel.com \
--to=wenwux.ma@intel.com \
--cc=chenbo.xia@intel.com \
--cc=dev@dpdk.org \
--cc=jiayu.hu@intel.com \
--cc=maxime.coquelin@redhat.com \
--cc=stable@dpdk.org \
--cc=xingguang.he@intel.com \
--cc=yinan.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).