* [PATCH] examples/vhost: add option to control mbuf pool size
@ 2022-02-17 15:17 Wenwu Ma
2022-03-03 11:42 ` Xia, Chenbo
2022-03-04 16:24 ` [PATCH v2] examples/vhost: fix launch failure with physical port Wenwu Ma
0 siblings, 2 replies; 5+ messages in thread
From: Wenwu Ma @ 2022-02-17 15:17 UTC (permalink / raw)
To: maxime.coquelin, chenbo.xia
Cc: dev, jiayu.hu, yinan.wang, xingguang.he, Wenwu Ma
dpdk-vhost will fail to launch with a 40G i40e port because
there are not enough mbufs. This patch adds a new option
--total-num-mbufs, through which the user can set larger
mbuf pool to avoid this problem.
Signed-off-by: Wenwu Ma <wenwux.ma@intel.com>
---
examples/vhost/main.c | 83 +++++++++++++++----------------------------
1 file changed, 29 insertions(+), 54 deletions(-)
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 3e784f5c6f..360f9f7f4d 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -33,6 +33,8 @@
#define MAX_QUEUES 128
#endif
+#define NUM_MBUFS_DEFAULT 0x24000
+
/* the maximum number of external ports supported */
#define MAX_SUP_PORTS 1
@@ -61,6 +63,9 @@
#define DMA_RING_SIZE 4096
+/* number of mbufs in all pools - if specified on command-line. */
+static int total_num_mbufs = NUM_MBUFS_DEFAULT;
+
struct dma_for_vhost dma_bind[RTE_MAX_VHOST_DEVICE];
int16_t dmas_id[RTE_DMADEV_DEFAULT_MAX];
static int dma_count;
@@ -609,7 +614,8 @@ us_vhost_usage(const char *prgname)
" --tso [0|1] disable/enable TCP segment offload.\n"
" --client register a vhost-user socket as client mode.\n"
" --dma-type register dma type for your vhost async driver. For example \"ioat\" for now.\n"
- " --dmas register dma channel for specific vhost device.\n",
+ " --dmas register dma channel for specific vhost device.\n"
+ " --total-num-mbufs [0-N] set the number of mbufs to be allocated in mbuf pools.\n",
prgname);
}
@@ -638,6 +644,8 @@ enum {
OPT_BUILTIN_NET_DRIVER_NUM,
#define OPT_DMAS "dmas"
OPT_DMAS_NUM,
+#define OPT_NUM_MBUFS "total-num-mbufs"
+ OPT_NUM_MBUFS_NUM,
};
/*
@@ -675,6 +683,8 @@ us_vhost_parse_args(int argc, char **argv)
NULL, OPT_BUILTIN_NET_DRIVER_NUM},
{OPT_DMAS, required_argument,
NULL, OPT_DMAS_NUM},
+ {OPT_NUM_MBUFS, required_argument,
+ NULL, OPT_NUM_MBUFS_NUM},
{NULL, 0, 0, 0},
};
@@ -802,6 +812,19 @@ us_vhost_parse_args(int argc, char **argv)
}
break;
+ case OPT_NUM_MBUFS_NUM:
+ ret = parse_num_opt(optarg, INT32_MAX);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "Invalid argument for total-num-mbufs [0..N]\n");
+ us_vhost_usage(prgname);
+ return -1;
+ }
+
+ if (total_num_mbufs < ret)
+ total_num_mbufs = ret;
+ break;
+
case OPT_CLIENT_NUM:
client_mode = 1;
break;
@@ -1731,57 +1754,6 @@ sigint_handler(__rte_unused int signum)
exit(0);
}
-/*
- * While creating an mbuf pool, one key thing is to figure out how
- * many mbuf entries is enough for our use. FYI, here are some
- * guidelines:
- *
- * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
- *
- * - For each switch core (A CPU core does the packet switch), we need
- * also make some reservation for receiving the packets from virtio
- * Tx queue. How many is enough depends on the usage. It's normally
- * a simple calculation like following:
- *
- * MAX_PKT_BURST * max packet size / mbuf size
- *
- * So, we definitely need allocate more mbufs when TSO is enabled.
- *
- * - Similarly, for each switching core, we should serve @nr_rx_desc
- * mbufs for receiving the packets from physical NIC device.
- *
- * - We also need make sure, for each switch core, we have allocated
- * enough mbufs to fill up the mbuf cache.
- */
-static void
-create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
- uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
-{
- uint32_t nr_mbufs;
- uint32_t nr_mbufs_per_core;
- uint32_t mtu = 1500;
-
- if (mergeable)
- mtu = 9000;
- if (enable_tso)
- mtu = 64 * 1024;
-
- nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
- (mbuf_size - RTE_PKTMBUF_HEADROOM);
- nr_mbufs_per_core += nr_rx_desc;
- nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
-
- nr_mbufs = nr_queues * nr_rx_desc;
- nr_mbufs += nr_mbufs_per_core * nr_switch_core;
- nr_mbufs *= nr_port;
-
- mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
- nr_mbuf_cache, 0, mbuf_size,
- rte_socket_id());
- if (mbuf_pool == NULL)
- rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
-}
-
static void
reset_dma(void)
{
@@ -1861,8 +1833,11 @@ main(int argc, char *argv[])
* many queues here. We probably should only do allocation for
* those queues we are going to use.
*/
- create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
- MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
+ mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", total_num_mbufs,
+ MBUF_CACHE_SIZE, 0, MBUF_DATA_SIZE,
+ rte_socket_id());
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
if (vm2vm_mode == VM2VM_HARDWARE) {
/* Enable VT loop back to let L2 switch to do it. */
--
2.25.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* RE: [PATCH] examples/vhost: add option to control mbuf pool size
2022-02-17 15:17 [PATCH] examples/vhost: add option to control mbuf pool size Wenwu Ma
@ 2022-03-03 11:42 ` Xia, Chenbo
2022-03-04 16:24 ` [PATCH v2] examples/vhost: fix launch failure with physical port Wenwu Ma
1 sibling, 0 replies; 5+ messages in thread
From: Xia, Chenbo @ 2022-03-03 11:42 UTC (permalink / raw)
To: Ma, WenwuX, maxime.coquelin; +Cc: dev, Hu, Jiayu, Wang, Yinan, He, Xingguang
Hi,
> -----Original Message-----
> From: Ma, WenwuX <wenwux.ma@intel.com>
> Sent: Thursday, February 17, 2022 11:17 PM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Wang, Yinan
> <yinan.wang@intel.com>; He, Xingguang <xingguang.he@intel.com>; Ma, WenwuX
> <wenwux.ma@intel.com>
> Subject: [PATCH] examples/vhost: add option to control mbuf pool size
>
> dpdk-vhost will fail to launch with a 40G i40e port because
> there are not enough mbufs. This patch adds a new option
> --total-num-mbufs, through which the user can set larger
> mbuf pool to avoid this problem.
>
> Signed-off-by: Wenwu Ma <wenwux.ma@intel.com>
> ---
> examples/vhost/main.c | 83 +++++++++++++++----------------------------
> 1 file changed, 29 insertions(+), 54 deletions(-)
>
> diff --git a/examples/vhost/main.c b/examples/vhost/main.c
> index 3e784f5c6f..360f9f7f4d 100644
> --- a/examples/vhost/main.c
> +++ b/examples/vhost/main.c
> @@ -33,6 +33,8 @@
> #define MAX_QUEUES 128
> #endif
>
> +#define NUM_MBUFS_DEFAULT 0x24000
> +
> /* the maximum number of external ports supported */
> #define MAX_SUP_PORTS 1
>
> @@ -61,6 +63,9 @@
>
> #define DMA_RING_SIZE 4096
>
> +/* number of mbufs in all pools - if specified on command-line. */
> +static int total_num_mbufs = NUM_MBUFS_DEFAULT;
> +
> struct dma_for_vhost dma_bind[RTE_MAX_VHOST_DEVICE];
> int16_t dmas_id[RTE_DMADEV_DEFAULT_MAX];
> static int dma_count;
> @@ -609,7 +614,8 @@ us_vhost_usage(const char *prgname)
> " --tso [0|1] disable/enable TCP segment offload.\n"
> " --client register a vhost-user socket as client mode.\n"
> " --dma-type register dma type for your vhost async driver.
> For example \"ioat\" for now.\n"
> - " --dmas register dma channel for specific vhost device.\n",
> + " --dmas register dma channel for specific vhost device.\n"
> + " --total-num-mbufs [0-N] set the number of mbufs to be
> allocated in mbuf pools.\n",
Let's tell the user about the default value here. And this patch can't apply
on current next-virtio tree now.
> prgname);
> }
>
> @@ -638,6 +644,8 @@ enum {
> OPT_BUILTIN_NET_DRIVER_NUM,
> #define OPT_DMAS "dmas"
> OPT_DMAS_NUM,
> +#define OPT_NUM_MBUFS "total-num-mbufs"
Align it with "dmas".
And as I said in the previous version, this fix is not good. But since we will move
all vhost test to vhost driver and give up this example soon. I will accept this
fix to help test all stuff.
Thanks,
Chenbo
> + OPT_NUM_MBUFS_NUM,
> };
>
> /*
> @@ -675,6 +683,8 @@ us_vhost_parse_args(int argc, char **argv)
> NULL, OPT_BUILTIN_NET_DRIVER_NUM},
> {OPT_DMAS, required_argument,
> NULL, OPT_DMAS_NUM},
> + {OPT_NUM_MBUFS, required_argument,
> + NULL, OPT_NUM_MBUFS_NUM},
> {NULL, 0, 0, 0},
> };
>
> @@ -802,6 +812,19 @@ us_vhost_parse_args(int argc, char **argv)
> }
> break;
>
> + case OPT_NUM_MBUFS_NUM:
> + ret = parse_num_opt(optarg, INT32_MAX);
> + if (ret == -1) {
> + RTE_LOG(INFO, VHOST_CONFIG,
> + "Invalid argument for total-num-mbufs [0..N]\n");
> + us_vhost_usage(prgname);
> + return -1;
> + }
> +
> + if (total_num_mbufs < ret)
> + total_num_mbufs = ret;
> + break;
> +
> case OPT_CLIENT_NUM:
> client_mode = 1;
> break;
> @@ -1731,57 +1754,6 @@ sigint_handler(__rte_unused int signum)
> exit(0);
> }
>
> -/*
> - * While creating an mbuf pool, one key thing is to figure out how
> - * many mbuf entries is enough for our use. FYI, here are some
> - * guidelines:
> - *
> - * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
> - *
> - * - For each switch core (A CPU core does the packet switch), we need
> - * also make some reservation for receiving the packets from virtio
> - * Tx queue. How many is enough depends on the usage. It's normally
> - * a simple calculation like following:
> - *
> - * MAX_PKT_BURST * max packet size / mbuf size
> - *
> - * So, we definitely need allocate more mbufs when TSO is enabled.
> - *
> - * - Similarly, for each switching core, we should serve @nr_rx_desc
> - * mbufs for receiving the packets from physical NIC device.
> - *
> - * - We also need make sure, for each switch core, we have allocated
> - * enough mbufs to fill up the mbuf cache.
> - */
> -static void
> -create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t
> mbuf_size,
> - uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
> -{
> - uint32_t nr_mbufs;
> - uint32_t nr_mbufs_per_core;
> - uint32_t mtu = 1500;
> -
> - if (mergeable)
> - mtu = 9000;
> - if (enable_tso)
> - mtu = 64 * 1024;
> -
> - nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
> - (mbuf_size - RTE_PKTMBUF_HEADROOM);
> - nr_mbufs_per_core += nr_rx_desc;
> - nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
> -
> - nr_mbufs = nr_queues * nr_rx_desc;
> - nr_mbufs += nr_mbufs_per_core * nr_switch_core;
> - nr_mbufs *= nr_port;
> -
> - mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
> - nr_mbuf_cache, 0, mbuf_size,
> - rte_socket_id());
> - if (mbuf_pool == NULL)
> - rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
> -}
> -
> static void
> reset_dma(void)
> {
> @@ -1861,8 +1833,11 @@ main(int argc, char *argv[])
> * many queues here. We probably should only do allocation for
> * those queues we are going to use.
> */
> - create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
> - MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
> + mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", total_num_mbufs,
> + MBUF_CACHE_SIZE, 0, MBUF_DATA_SIZE,
> + rte_socket_id());
> + if (mbuf_pool == NULL)
> + rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
>
> if (vm2vm_mode == VM2VM_HARDWARE) {
> /* Enable VT loop back to let L2 switch to do it. */
> --
> 2.25.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* RE: [PATCH v2] examples/vhost: fix launch failure with physical port
2022-03-04 16:24 ` [PATCH v2] examples/vhost: fix launch failure with physical port Wenwu Ma
@ 2022-03-04 9:51 ` Xia, Chenbo
2022-03-04 14:33 ` Ferruh Yigit
0 siblings, 1 reply; 5+ messages in thread
From: Xia, Chenbo @ 2022-03-04 9:51 UTC (permalink / raw)
To: Ma, WenwuX, maxime.coquelin, dev
Cc: Hu, Jiayu, Wang, Yinan, He, Xingguang, stable
> -----Original Message-----
> From: Ma, WenwuX <wenwux.ma@intel.com>
> Sent: Saturday, March 5, 2022 12:24 AM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>;
> dev@dpdk.org
> Cc: Hu, Jiayu <jiayu.hu@intel.com>; Wang, Yinan <yinan.wang@intel.com>; He,
> Xingguang <xingguang.he@intel.com>; Ma, WenwuX <wenwux.ma@intel.com>;
> stable@dpdk.org
> Subject: [PATCH v2] examples/vhost: fix launch failure with physical port
>
> dpdk-vhost will fail to launch with a 40G i40e port because
> there are not enough mbufs. This patch adds a new option
> --total-num-mbufs, through which the user can set larger
> mbuf pool to avoid this problem.
>
> Fixes: 4796ad63ba1f ("examples/vhost: import userspace vhost application")
> Cc: stable@dpdk.org
>
> Signed-off-by: Wenwu Ma <wenwux.ma@intel.com>
> ---
> examples/vhost/main.c | 83 +++++++++++++++----------------------------
> 1 file changed, 29 insertions(+), 54 deletions(-)
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH v2] examples/vhost: fix launch failure with physical port
2022-03-04 9:51 ` Xia, Chenbo
@ 2022-03-04 14:33 ` Ferruh Yigit
0 siblings, 0 replies; 5+ messages in thread
From: Ferruh Yigit @ 2022-03-04 14:33 UTC (permalink / raw)
To: Xia, Chenbo, Ma, WenwuX, maxime.coquelin, dev
Cc: Hu, Jiayu, Wang, Yinan, He, Xingguang, stable
On 3/4/2022 9:51 AM, Xia, Chenbo wrote:
>> -----Original Message-----
>> From: Ma, WenwuX <wenwux.ma@intel.com>
>> Sent: Saturday, March 5, 2022 12:24 AM
>> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>;
>> dev@dpdk.org
>> Cc: Hu, Jiayu <jiayu.hu@intel.com>; Wang, Yinan <yinan.wang@intel.com>; He,
>> Xingguang <xingguang.he@intel.com>; Ma, WenwuX <wenwux.ma@intel.com>;
>> stable@dpdk.org
>> Subject: [PATCH v2] examples/vhost: fix launch failure with physical port
>>
>> dpdk-vhost will fail to launch with a 40G i40e port because
>> there are not enough mbufs. This patch adds a new option
>> --total-num-mbufs, through which the user can set larger
>> mbuf pool to avoid this problem.
>>
>> Fixes: 4796ad63ba1f ("examples/vhost: import userspace vhost application")
>> Cc: stable@dpdk.org
>>
>> Signed-off-by: Wenwu Ma <wenwux.ma@intel.com>
>> ---
>> examples/vhost/main.c | 83 +++++++++++++++----------------------------
>> 1 file changed, 29 insertions(+), 54 deletions(-)
>
> Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
Applied to dpdk-next-net/main, thanks.
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v2] examples/vhost: fix launch failure with physical port
2022-02-17 15:17 [PATCH] examples/vhost: add option to control mbuf pool size Wenwu Ma
2022-03-03 11:42 ` Xia, Chenbo
@ 2022-03-04 16:24 ` Wenwu Ma
2022-03-04 9:51 ` Xia, Chenbo
1 sibling, 1 reply; 5+ messages in thread
From: Wenwu Ma @ 2022-03-04 16:24 UTC (permalink / raw)
To: maxime.coquelin, chenbo.xia, dev
Cc: jiayu.hu, yinan.wang, xingguang.he, Wenwu Ma, stable
dpdk-vhost will fail to launch with a 40G i40e port because
there are not enough mbufs. This patch adds a new option
--total-num-mbufs, through which the user can set larger
mbuf pool to avoid this problem.
Fixes: 4796ad63ba1f ("examples/vhost: import userspace vhost application")
Cc: stable@dpdk.org
Signed-off-by: Wenwu Ma <wenwux.ma@intel.com>
---
examples/vhost/main.c | 83 +++++++++++++++----------------------------
1 file changed, 29 insertions(+), 54 deletions(-)
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 68afd398bb..d94fabb060 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -33,6 +33,8 @@
#define MAX_QUEUES 128
#endif
+#define NUM_MBUFS_DEFAULT 0x24000
+
/* the maximum number of external ports supported */
#define MAX_SUP_PORTS 1
@@ -61,6 +63,9 @@
#define DMA_RING_SIZE 4096
+/* number of mbufs in all pools - if specified on command-line. */
+static int total_num_mbufs = NUM_MBUFS_DEFAULT;
+
struct dma_for_vhost dma_bind[RTE_MAX_VHOST_DEVICE];
int16_t dmas_id[RTE_DMADEV_DEFAULT_MAX];
static int dma_count;
@@ -608,7 +613,8 @@ us_vhost_usage(const char *prgname)
" --tx-csum [0|1] disable/enable TX checksum offload.\n"
" --tso [0|1] disable/enable TCP segment offload.\n"
" --client register a vhost-user socket as client mode.\n"
- " --dmas register dma channel for specific vhost device.\n",
+ " --dmas register dma channel for specific vhost device.\n"
+ " --total-num-mbufs [0-N] set the number of mbufs to be allocated in mbuf pools, the default value is 147456.\n",
prgname);
}
@@ -637,6 +643,8 @@ enum {
OPT_BUILTIN_NET_DRIVER_NUM,
#define OPT_DMAS "dmas"
OPT_DMAS_NUM,
+#define OPT_NUM_MBUFS "total-num-mbufs"
+ OPT_NUM_MBUFS_NUM,
};
/*
@@ -674,6 +682,8 @@ us_vhost_parse_args(int argc, char **argv)
NULL, OPT_BUILTIN_NET_DRIVER_NUM},
{OPT_DMAS, required_argument,
NULL, OPT_DMAS_NUM},
+ {OPT_NUM_MBUFS, required_argument,
+ NULL, OPT_NUM_MBUFS_NUM},
{NULL, 0, 0, 0},
};
@@ -801,6 +811,19 @@ us_vhost_parse_args(int argc, char **argv)
}
break;
+ case OPT_NUM_MBUFS_NUM:
+ ret = parse_num_opt(optarg, INT32_MAX);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "Invalid argument for total-num-mbufs [0..N]\n");
+ us_vhost_usage(prgname);
+ return -1;
+ }
+
+ if (total_num_mbufs < ret)
+ total_num_mbufs = ret;
+ break;
+
case OPT_CLIENT_NUM:
client_mode = 1;
break;
@@ -1730,57 +1753,6 @@ sigint_handler(__rte_unused int signum)
exit(0);
}
-/*
- * While creating an mbuf pool, one key thing is to figure out how
- * many mbuf entries is enough for our use. FYI, here are some
- * guidelines:
- *
- * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
- *
- * - For each switch core (A CPU core does the packet switch), we need
- * also make some reservation for receiving the packets from virtio
- * Tx queue. How many is enough depends on the usage. It's normally
- * a simple calculation like following:
- *
- * MAX_PKT_BURST * max packet size / mbuf size
- *
- * So, we definitely need allocate more mbufs when TSO is enabled.
- *
- * - Similarly, for each switching core, we should serve @nr_rx_desc
- * mbufs for receiving the packets from physical NIC device.
- *
- * - We also need make sure, for each switch core, we have allocated
- * enough mbufs to fill up the mbuf cache.
- */
-static void
-create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
- uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
-{
- uint32_t nr_mbufs;
- uint32_t nr_mbufs_per_core;
- uint32_t mtu = 1500;
-
- if (mergeable)
- mtu = 9000;
- if (enable_tso)
- mtu = 64 * 1024;
-
- nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
- (mbuf_size - RTE_PKTMBUF_HEADROOM);
- nr_mbufs_per_core += nr_rx_desc;
- nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
-
- nr_mbufs = nr_queues * nr_rx_desc;
- nr_mbufs += nr_mbufs_per_core * nr_switch_core;
- nr_mbufs *= nr_port;
-
- mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
- nr_mbuf_cache, 0, mbuf_size,
- rte_socket_id());
- if (mbuf_pool == NULL)
- rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
-}
-
static void
reset_dma(void)
{
@@ -1860,8 +1832,11 @@ main(int argc, char *argv[])
* many queues here. We probably should only do allocation for
* those queues we are going to use.
*/
- create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
- MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
+ mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", total_num_mbufs,
+ MBUF_CACHE_SIZE, 0, MBUF_DATA_SIZE,
+ rte_socket_id());
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
if (vm2vm_mode == VM2VM_HARDWARE) {
/* Enable VT loop back to let L2 switch to do it. */
--
2.25.1
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2022-03-04 14:34 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-02-17 15:17 [PATCH] examples/vhost: add option to control mbuf pool size Wenwu Ma
2022-03-03 11:42 ` Xia, Chenbo
2022-03-04 16:24 ` [PATCH v2] examples/vhost: fix launch failure with physical port Wenwu Ma
2022-03-04 9:51 ` Xia, Chenbo
2022-03-04 14:33 ` Ferruh Yigit
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).