patches for DPDK stable branches
 help / color / Atom feed
* [dpdk-stable] [dpdk-dev] [PATCH] net/tap: free mempool when closing
@ 2020-07-29 11:35 wangyunjian
  2020-08-05 13:47 ` Thomas Monjalon
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: wangyunjian @ 2020-07-29 11:35 UTC (permalink / raw)
  To: dev; +Cc: keith.wiles, ophirmu, jerry.lilijun, xudingke, Yunjian Wang, stable

From: Yunjian Wang <wangyunjian@huawei.com>

When setup tx queues, we will create a mempool for the 'gso_ctx'.
The mempool is not freed when closing tap device. If free the tap
device and create it with different name, it will create a new
mempool. This maybe cause an OOM.

Fixes: 050316a88313 ("net/tap: support TSO (TCP Segment Offload)")
Cc: stable@dpdk.org

Signed-off-by: Yunjian Wang <wangyunjian@huawei.com>
---
 drivers/net/tap/rte_eth_tap.c | 43 +++++++++++++++++++++--------------
 drivers/net/tap/rte_eth_tap.h |  1 +
 2 files changed, 27 insertions(+), 17 deletions(-)

diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index 339f24bf8..119985d90 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -1070,6 +1070,9 @@ tap_dev_close(struct rte_eth_dev *dev)
 				&internals->remote_initial_flags);
 	}
 
+	rte_mempool_free(internals->gso_ctx_mp);
+	internals->gso_ctx_mp = NULL;
+
 	if (internals->ka_fd != -1) {
 		close(internals->ka_fd);
 		internals->ka_fd = -1;
@@ -1317,26 +1320,31 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
 {
 	uint32_t gso_types;
 	char pool_name[64];
-
-	/*
-	 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes
-	 * size per mbuf use this pool for both direct and indirect mbufs
-	 */
-
-	struct rte_mempool *mp;      /* Mempool for GSO packets */
+	struct pmd_internals *pmd = dev->data->dev_private;
+	int ret;
 
 	/* initialize GSO context */
 	gso_types = DEV_TX_OFFLOAD_TCP_TSO;
-	snprintf(pool_name, sizeof(pool_name), "mp_%s", dev->device->name);
-	mp = rte_mempool_lookup((const char *)pool_name);
-	if (!mp) {
-		mp = rte_pktmbuf_pool_create(pool_name, TAP_GSO_MBUFS_NUM,
-			TAP_GSO_MBUF_CACHE_SIZE, 0,
+	if (!pmd->gso_ctx_mp) {
+		/*
+		 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
+		 * bytes size per mbuf use this pool for both direct and
+		 * indirect mbufs
+		 */
+		ret = snprintf(pool_name, sizeof(pool_name), "mp_%s",
+				dev->device->name);
+		if (ret < 0 || ret >= (int)sizeof(pool_name)) {
+			TAP_LOG(ERR,
+				"%s: failed to create mbuf pool "
+				"name for device %s\n",
+				pmd->name, dev->device->name);
+			return -ENAMETOOLONG;
+		}
+		pmd->gso_ctx_mp = rte_pktmbuf_pool_create(pool_name,
+			TAP_GSO_MBUFS_NUM, TAP_GSO_MBUF_CACHE_SIZE, 0,
 			RTE_PKTMBUF_HEADROOM + TAP_GSO_MBUF_SEG_SIZE,
 			SOCKET_ID_ANY);
-		if (!mp) {
-			struct pmd_internals *pmd = dev->data->dev_private;
-
+		if (!pmd->gso_ctx_mp) {
 			TAP_LOG(ERR,
 				"%s: failed to create mbuf pool for device %s\n",
 				pmd->name, dev->device->name);
@@ -1344,8 +1352,8 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
 		}
 	}
 
-	gso_ctx->direct_pool = mp;
-	gso_ctx->indirect_pool = mp;
+	gso_ctx->direct_pool = pmd->gso_ctx_mp;
+	gso_ctx->indirect_pool = pmd->gso_ctx_mp;
 	gso_ctx->gso_types = gso_types;
 	gso_ctx->gso_size = 0; /* gso_size is set in tx_burst() per packet */
 	gso_ctx->flag = 0;
@@ -1842,6 +1850,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,
 	pmd->type = type;
 	pmd->ka_fd = -1;
 	pmd->nlsk_fd = -1;
+	pmd->gso_ctx_mp = NULL;
 
 	pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
 	if (pmd->ioctl_sock == -1) {
diff --git a/drivers/net/tap/rte_eth_tap.h b/drivers/net/tap/rte_eth_tap.h
index 8d6d53dc0..ba45de840 100644
--- a/drivers/net/tap/rte_eth_tap.h
+++ b/drivers/net/tap/rte_eth_tap.h
@@ -91,6 +91,7 @@ struct pmd_internals {
 	struct tx_queue txq[RTE_PMD_TAP_MAX_QUEUES]; /* List of TX queues */
 	struct rte_intr_handle intr_handle;          /* LSC interrupt handle. */
 	int ka_fd;                        /* keep-alive file descriptor */
+	struct rte_mempool *gso_ctx_mp;     /* Mempool for GSO packets */
 };
 
 struct pmd_process_private {
-- 
2.23.0



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH] net/tap: free mempool when closing
  2020-07-29 11:35 [dpdk-stable] [dpdk-dev] [PATCH] net/tap: free mempool when closing wangyunjian
@ 2020-08-05 13:47 ` Thomas Monjalon
  2020-08-06 12:47   ` wangyunjian
  2020-08-05 16:35 ` Ferruh Yigit
  2020-08-08  9:58 ` [dpdk-stable] [dpdk-dev] [PATCH v2] " wangyunjian
  2 siblings, 1 reply; 9+ messages in thread
From: Thomas Monjalon @ 2020-08-05 13:47 UTC (permalink / raw)
  To: wangyunjian
  Cc: dev, stable, keith.wiles, ophirmu, jerry.lilijun, xudingke,
	Yunjian Wang, stable

29/07/2020 13:35, wangyunjian:
> From: Yunjian Wang <wangyunjian@huawei.com>
> 
> When setup tx queues, we will create a mempool for the 'gso_ctx'.
> The mempool is not freed when closing tap device. If free the tap
> device and create it with different name, it will create a new
> mempool. This maybe cause an OOM.

While at it, please look at implementing RTE_ETH_DEV_CLOSE_REMOVE
behaviour in tap. Thanks



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH] net/tap: free mempool when closing
  2020-07-29 11:35 [dpdk-stable] [dpdk-dev] [PATCH] net/tap: free mempool when closing wangyunjian
  2020-08-05 13:47 ` Thomas Monjalon
@ 2020-08-05 16:35 ` Ferruh Yigit
  2020-08-06 12:45   ` wangyunjian
  2020-08-08  9:58 ` [dpdk-stable] [dpdk-dev] [PATCH v2] " wangyunjian
  2 siblings, 1 reply; 9+ messages in thread
From: Ferruh Yigit @ 2020-08-05 16:35 UTC (permalink / raw)
  To: wangyunjian, dev; +Cc: keith.wiles, ophirmu, jerry.lilijun, xudingke, stable

On 7/29/2020 12:35 PM, wangyunjian wrote:
> From: Yunjian Wang <wangyunjian@huawei.com>
> 
> When setup tx queues, we will create a mempool for the 'gso_ctx'.
> The mempool is not freed when closing tap device. If free the tap
> device and create it with different name, it will create a new
> mempool. This maybe cause an OOM.
> 
> Fixes: 050316a88313 ("net/tap: support TSO (TCP Segment Offload)")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Yunjian Wang <wangyunjian@huawei.com>

<...>

> @@ -1317,26 +1320,31 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
>  {
>  	uint32_t gso_types;
>  	char pool_name[64];
> -
> -	/*
> -	 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes
> -	 * size per mbuf use this pool for both direct and indirect mbufs
> -	 */
> -
> -	struct rte_mempool *mp;      /* Mempool for GSO packets */
> +	struct pmd_internals *pmd = dev->data->dev_private;
> +	int ret;
>  
>  	/* initialize GSO context */
>  	gso_types = DEV_TX_OFFLOAD_TCP_TSO;
> -	snprintf(pool_name, sizeof(pool_name), "mp_%s", dev->device->name);
> -	mp = rte_mempool_lookup((const char *)pool_name);
> -	if (!mp) {
> -		mp = rte_pktmbuf_pool_create(pool_name, TAP_GSO_MBUFS_NUM,
> -			TAP_GSO_MBUF_CACHE_SIZE, 0,
> +	if (!pmd->gso_ctx_mp) {
> +		/*
> +		 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
> +		 * bytes size per mbuf use this pool for both direct and
> +		 * indirect mbufs
> +		 */
> +		ret = snprintf(pool_name, sizeof(pool_name), "mp_%s",
> +				dev->device->name);
> +		if (ret < 0 || ret >= (int)sizeof(pool_name)) {
> +			TAP_LOG(ERR,
> +				"%s: failed to create mbuf pool "
> +				"name for device %s\n",
> +				pmd->name, dev->device->name);

Overall looks good. Only above error doesn't say why it failed, informing the
user that device name is too long may help her to overcome the error.


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH] net/tap: free mempool when closing
  2020-08-05 16:35 ` Ferruh Yigit
@ 2020-08-06 12:45   ` wangyunjian
  2020-08-06 13:04     ` Ferruh Yigit
  0 siblings, 1 reply; 9+ messages in thread
From: wangyunjian @ 2020-08-06 12:45 UTC (permalink / raw)
  To: Ferruh Yigit, dev; +Cc: keith.wiles, ophirmu, Lilijun (Jerry), xudingke, stable



> -----Original Message-----
> From: Ferruh Yigit [mailto:ferruh.yigit@intel.com]
> Sent: Thursday, August 6, 2020 12:36 AM
> To: wangyunjian <wangyunjian@huawei.com>; dev@dpdk.org
> Cc: keith.wiles@intel.com; ophirmu@mellanox.com; Lilijun (Jerry)
> <jerry.lilijun@huawei.com>; xudingke <xudingke@huawei.com>;
> stable@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] net/tap: free mempool when closing
> 
> On 7/29/2020 12:35 PM, wangyunjian wrote:
> > From: Yunjian Wang <wangyunjian@huawei.com>
> >
> > When setup tx queues, we will create a mempool for the 'gso_ctx'.
> > The mempool is not freed when closing tap device. If free the tap
> > device and create it with different name, it will create a new
> > mempool. This maybe cause an OOM.
> >
> > Fixes: 050316a88313 ("net/tap: support TSO (TCP Segment Offload)")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Yunjian Wang <wangyunjian@huawei.com>
> 
> <...>
> 
> > @@ -1317,26 +1320,31 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx,
> struct rte_eth_dev *dev)
> >  {
> >  	uint32_t gso_types;
> >  	char pool_name[64];
> > -
> > -	/*
> > -	 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes
> > -	 * size per mbuf use this pool for both direct and indirect mbufs
> > -	 */
> > -
> > -	struct rte_mempool *mp;      /* Mempool for GSO packets */
> > +	struct pmd_internals *pmd = dev->data->dev_private;
> > +	int ret;
> >
> >  	/* initialize GSO context */
> >  	gso_types = DEV_TX_OFFLOAD_TCP_TSO;
> > -	snprintf(pool_name, sizeof(pool_name), "mp_%s", dev->device->name);
> > -	mp = rte_mempool_lookup((const char *)pool_name);
> > -	if (!mp) {
> > -		mp = rte_pktmbuf_pool_create(pool_name, TAP_GSO_MBUFS_NUM,
> > -			TAP_GSO_MBUF_CACHE_SIZE, 0,
> > +	if (!pmd->gso_ctx_mp) {
> > +		/*
> > +		 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
> > +		 * bytes size per mbuf use this pool for both direct and
> > +		 * indirect mbufs
> > +		 */
> > +		ret = snprintf(pool_name, sizeof(pool_name), "mp_%s",
> > +				dev->device->name);
> > +		if (ret < 0 || ret >= (int)sizeof(pool_name)) {
> > +			TAP_LOG(ERR,
> > +				"%s: failed to create mbuf pool "
> > +				"name for device %s\n",
> > +				pmd->name, dev->device->name);
> 
> Overall looks good. Only above error doesn't say why it failed, informing the
> user that device name is too long may help her to overcome the error.

I found that the return value of functions snprintf was not checked
when modifying the code, so fixed it.
I think it maybe fail, because the max device name length is
RTE_DEV_NAME_MAX_LEN(64).

Do I need to split into two patches?

Thanks,
Yunjian

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH] net/tap: free mempool when closing
  2020-08-05 13:47 ` Thomas Monjalon
@ 2020-08-06 12:47   ` wangyunjian
  2020-08-06 13:19     ` Thomas Monjalon
  0 siblings, 1 reply; 9+ messages in thread
From: wangyunjian @ 2020-08-06 12:47 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: dev, stable, keith.wiles, ophirmu, Lilijun (Jerry), xudingke, stable

> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> Sent: Wednesday, August 5, 2020 9:48 PM
> To: wangyunjian <wangyunjian@huawei.com>
> Cc: dev@dpdk.org; stable@dpdk.org; keith.wiles@intel.com;
> ophirmu@mellanox.com; Lilijun (Jerry) <jerry.lilijun@huawei.com>; xudingke
> <xudingke@huawei.com>; wangyunjian <wangyunjian@huawei.com>;
> stable@dpdk.org
> Subject: Re: [dpdk-stable] [dpdk-dev] [PATCH] net/tap: free mempool when
> closing
> 
> 29/07/2020 13:35, wangyunjian:
> > From: Yunjian Wang <wangyunjian@huawei.com>
> >
> > When setup tx queues, we will create a mempool for the 'gso_ctx'.
> > The mempool is not freed when closing tap device. If free the tap
> > device and create it with different name, it will create a new
> > mempool. This maybe cause an OOM.
> 
> While at it, please look at implementing RTE_ETH_DEV_CLOSE_REMOVE
> behaviour in tap. Thanks
> 

I read the codes about tap device. Currently, the tap pmd doesn't
use RTE_ETH_DEV_CLOSE_REMOVE flag.

Thanks,
Yunjian

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH] net/tap: free mempool when closing
  2020-08-06 12:45   ` wangyunjian
@ 2020-08-06 13:04     ` Ferruh Yigit
  2020-08-06 13:35       ` wangyunjian
  0 siblings, 1 reply; 9+ messages in thread
From: Ferruh Yigit @ 2020-08-06 13:04 UTC (permalink / raw)
  To: wangyunjian, dev; +Cc: keith.wiles, ophirmu, Lilijun (Jerry), xudingke, stable

On 8/6/2020 1:45 PM, wangyunjian wrote:
> 
> 
>> -----Original Message-----
>> From: Ferruh Yigit [mailto:ferruh.yigit@intel.com]
>> Sent: Thursday, August 6, 2020 12:36 AM
>> To: wangyunjian <wangyunjian@huawei.com>; dev@dpdk.org
>> Cc: keith.wiles@intel.com; ophirmu@mellanox.com; Lilijun (Jerry)
>> <jerry.lilijun@huawei.com>; xudingke <xudingke@huawei.com>;
>> stable@dpdk.org
>> Subject: Re: [dpdk-dev] [PATCH] net/tap: free mempool when closing
>>
>> On 7/29/2020 12:35 PM, wangyunjian wrote:
>>> From: Yunjian Wang <wangyunjian@huawei.com>
>>>
>>> When setup tx queues, we will create a mempool for the 'gso_ctx'.
>>> The mempool is not freed when closing tap device. If free the tap
>>> device and create it with different name, it will create a new
>>> mempool. This maybe cause an OOM.
>>>
>>> Fixes: 050316a88313 ("net/tap: support TSO (TCP Segment Offload)")
>>> Cc: stable@dpdk.org
>>>
>>> Signed-off-by: Yunjian Wang <wangyunjian@huawei.com>
>>
>> <...>
>>
>>> @@ -1317,26 +1320,31 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx,
>> struct rte_eth_dev *dev)
>>>  {
>>>  	uint32_t gso_types;
>>>  	char pool_name[64];
>>> -
>>> -	/*
>>> -	 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes
>>> -	 * size per mbuf use this pool for both direct and indirect mbufs
>>> -	 */
>>> -
>>> -	struct rte_mempool *mp;      /* Mempool for GSO packets */
>>> +	struct pmd_internals *pmd = dev->data->dev_private;
>>> +	int ret;
>>>
>>>  	/* initialize GSO context */
>>>  	gso_types = DEV_TX_OFFLOAD_TCP_TSO;
>>> -	snprintf(pool_name, sizeof(pool_name), "mp_%s", dev->device->name);
>>> -	mp = rte_mempool_lookup((const char *)pool_name);
>>> -	if (!mp) {
>>> -		mp = rte_pktmbuf_pool_create(pool_name, TAP_GSO_MBUFS_NUM,
>>> -			TAP_GSO_MBUF_CACHE_SIZE, 0,
>>> +	if (!pmd->gso_ctx_mp) {
>>> +		/*
>>> +		 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
>>> +		 * bytes size per mbuf use this pool for both direct and
>>> +		 * indirect mbufs
>>> +		 */
>>> +		ret = snprintf(pool_name, sizeof(pool_name), "mp_%s",
>>> +				dev->device->name);
>>> +		if (ret < 0 || ret >= (int)sizeof(pool_name)) {
>>> +			TAP_LOG(ERR,
>>> +				"%s: failed to create mbuf pool "
>>> +				"name for device %s\n",
>>> +				pmd->name, dev->device->name);
>>
>> Overall looks good. Only above error doesn't say why it failed, informing the
>> user that device name is too long may help her to overcome the error.
> 
> I found that the return value of functions snprintf was not checked
> when modifying the code, so fixed it.
> I think it maybe fail, because the max device name length is
> RTE_DEV_NAME_MAX_LEN(64).

+1 to the check.
My comment was on the log message, which says "failed to create mbuf pool", but
it doesn't say it is failed because of long device name.
If user knows the reason of the failure, can prevent it by providing shorter
device name.
My suggestion is update the error log message to have the reason of failure.

> 
> Do I need to split into two patches?

I think OK to have the change in this patch.

> 
> Thanks,
> Yunjian
> 


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH] net/tap: free mempool when closing
  2020-08-06 12:47   ` wangyunjian
@ 2020-08-06 13:19     ` Thomas Monjalon
  0 siblings, 0 replies; 9+ messages in thread
From: Thomas Monjalon @ 2020-08-06 13:19 UTC (permalink / raw)
  To: wangyunjian
  Cc: dev, stable, keith.wiles, ophirmu, Lilijun (Jerry), xudingke, stable

06/08/2020 14:47, wangyunjian:
> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> > 29/07/2020 13:35, wangyunjian:
> > > From: Yunjian Wang <wangyunjian@huawei.com>
> > >
> > > When setup tx queues, we will create a mempool for the 'gso_ctx'.
> > > The mempool is not freed when closing tap device. If free the tap
> > > device and create it with different name, it will create a new
> > > mempool. This maybe cause an OOM.
> > 
> > While at it, please look at implementing RTE_ETH_DEV_CLOSE_REMOVE
> > behaviour in tap. Thanks
> > 
> 
> I read the codes about tap device. Currently, the tap pmd doesn't
> use RTE_ETH_DEV_CLOSE_REMOVE flag.

I know. That's why I suggest to switch to RTE_ETH_DEV_CLOSE_REMOVE.
Please see this deprecation notice:
	http://git.dpdk.org/dpdk/commit/?id=7efbaa7b4e423



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH] net/tap: free mempool when closing
  2020-08-06 13:04     ` Ferruh Yigit
@ 2020-08-06 13:35       ` wangyunjian
  0 siblings, 0 replies; 9+ messages in thread
From: wangyunjian @ 2020-08-06 13:35 UTC (permalink / raw)
  To: Ferruh Yigit, dev; +Cc: keith.wiles, ophirmu, Lilijun (Jerry), xudingke, stable

> -----Original Message-----
> From: Ferruh Yigit [mailto:ferruh.yigit@intel.com]
> Sent: Thursday, August 6, 2020 9:04 PM
> To: wangyunjian <wangyunjian@huawei.com>; dev@dpdk.org
> Cc: keith.wiles@intel.com; ophirmu@mellanox.com; Lilijun (Jerry)
> <jerry.lilijun@huawei.com>; xudingke <xudingke@huawei.com>;
> stable@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] net/tap: free mempool when closing
> 
> On 8/6/2020 1:45 PM, wangyunjian wrote:
> >
> >
> >> -----Original Message-----
> >> From: Ferruh Yigit [mailto:ferruh.yigit@intel.com]
> >> Sent: Thursday, August 6, 2020 12:36 AM
> >> To: wangyunjian <wangyunjian@huawei.com>; dev@dpdk.org
> >> Cc: keith.wiles@intel.com; ophirmu@mellanox.com; Lilijun (Jerry)
> >> <jerry.lilijun@huawei.com>; xudingke <xudingke@huawei.com>;
> >> stable@dpdk.org
> >> Subject: Re: [dpdk-dev] [PATCH] net/tap: free mempool when closing
> >>
> >> On 7/29/2020 12:35 PM, wangyunjian wrote:
> >>> From: Yunjian Wang <wangyunjian@huawei.com>
> >>>
> >>> When setup tx queues, we will create a mempool for the 'gso_ctx'.
> >>> The mempool is not freed when closing tap device. If free the tap
> >>> device and create it with different name, it will create a new
> >>> mempool. This maybe cause an OOM.
> >>>
> >>> Fixes: 050316a88313 ("net/tap: support TSO (TCP Segment Offload)")
> >>> Cc: stable@dpdk.org
> >>>
> >>> Signed-off-by: Yunjian Wang <wangyunjian@huawei.com>
> >>
> >> <...>
> >>
> >>> @@ -1317,26 +1320,31 @@ tap_gso_ctx_setup(struct rte_gso_ctx
> >>> *gso_ctx,
> >> struct rte_eth_dev *dev)
> >>>  {
> >>>  	uint32_t gso_types;
> >>>  	char pool_name[64];
> >>> -
> >>> -	/*
> >>> -	 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes
> >>> -	 * size per mbuf use this pool for both direct and indirect mbufs
> >>> -	 */
> >>> -
> >>> -	struct rte_mempool *mp;      /* Mempool for GSO packets */
> >>> +	struct pmd_internals *pmd = dev->data->dev_private;
> >>> +	int ret;
> >>>
> >>>  	/* initialize GSO context */
> >>>  	gso_types = DEV_TX_OFFLOAD_TCP_TSO;
> >>> -	snprintf(pool_name, sizeof(pool_name), "mp_%s",
> dev->device->name);
> >>> -	mp = rte_mempool_lookup((const char *)pool_name);
> >>> -	if (!mp) {
> >>> -		mp = rte_pktmbuf_pool_create(pool_name,
> TAP_GSO_MBUFS_NUM,
> >>> -			TAP_GSO_MBUF_CACHE_SIZE, 0,
> >>> +	if (!pmd->gso_ctx_mp) {
> >>> +		/*
> >>> +		 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
> >>> +		 * bytes size per mbuf use this pool for both direct and
> >>> +		 * indirect mbufs
> >>> +		 */
> >>> +		ret = snprintf(pool_name, sizeof(pool_name), "mp_%s",
> >>> +				dev->device->name);
> >>> +		if (ret < 0 || ret >= (int)sizeof(pool_name)) {
> >>> +			TAP_LOG(ERR,
> >>> +				"%s: failed to create mbuf pool "
> >>> +				"name for device %s\n",
> >>> +				pmd->name, dev->device->name);
> >>
> >> Overall looks good. Only above error doesn't say why it failed,
> >> informing the user that device name is too long may help her to overcome
> the error.
> >
> > I found that the return value of functions snprintf was not checked
> > when modifying the code, so fixed it.
> > I think it maybe fail, because the max device name length is
> > RTE_DEV_NAME_MAX_LEN(64).
> 
> +1 to the check.
> My comment was on the log message, which says "failed to create mbuf pool",
> but it doesn't say it is failed because of long device name.
> If user knows the reason of the failure, can prevent it by providing shorter
> device name.
> My suggestion is update the error log message to have the reason of failure.

Thanks for your suggestion, will include them in next version.

> 
> >
> > Do I need to split into two patches?
> 
> I think OK to have the change in this patch.

OK

> 
> >
> > Thanks,
> > Yunjian
> >


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-stable] [dpdk-dev] [PATCH v2] net/tap: free mempool when closing
  2020-07-29 11:35 [dpdk-stable] [dpdk-dev] [PATCH] net/tap: free mempool when closing wangyunjian
  2020-08-05 13:47 ` Thomas Monjalon
  2020-08-05 16:35 ` Ferruh Yigit
@ 2020-08-08  9:58 ` " wangyunjian
  2 siblings, 0 replies; 9+ messages in thread
From: wangyunjian @ 2020-08-08  9:58 UTC (permalink / raw)
  To: dev, ferruh.yigit, nikhil.rao
  Cc: jerry.lilijun, xudingke, Yunjian Wang, stable

From: Yunjian Wang <wangyunjian@huawei.com>

When setup tx queues, we will create a mempool for the 'gso_ctx'.
The mempool is not freed when closing tap device. If free the tap
device and create it with different name, it will create a new
mempool. This maybe cause an OOM.

The snprintf function return value is not checked and the mempool
name may be truncated. This patch also fix it.

Fixes: 050316a88313 ("net/tap: support TSO (TCP Segment Offload)")
Cc: stable@dpdk.org

Signed-off-by: Yunjian Wang <wangyunjian@huawei.com>
---
 v2: Updated the error log message as suggested by Ferruh Yigit
---
 drivers/net/tap/rte_eth_tap.c | 43 +++++++++++++++++++++--------------
 drivers/net/tap/rte_eth_tap.h |  1 +
 2 files changed, 27 insertions(+), 17 deletions(-)

diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index 339f24bf8..185de8aee 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -1070,6 +1070,9 @@ tap_dev_close(struct rte_eth_dev *dev)
 				&internals->remote_initial_flags);
 	}
 
+	rte_mempool_free(internals->gso_ctx_mp);
+	internals->gso_ctx_mp = NULL;
+
 	if (internals->ka_fd != -1) {
 		close(internals->ka_fd);
 		internals->ka_fd = -1;
@@ -1317,26 +1320,31 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
 {
 	uint32_t gso_types;
 	char pool_name[64];
-
-	/*
-	 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes
-	 * size per mbuf use this pool for both direct and indirect mbufs
-	 */
-
-	struct rte_mempool *mp;      /* Mempool for GSO packets */
+	struct pmd_internals *pmd = dev->data->dev_private;
+	int ret;
 
 	/* initialize GSO context */
 	gso_types = DEV_TX_OFFLOAD_TCP_TSO;
-	snprintf(pool_name, sizeof(pool_name), "mp_%s", dev->device->name);
-	mp = rte_mempool_lookup((const char *)pool_name);
-	if (!mp) {
-		mp = rte_pktmbuf_pool_create(pool_name, TAP_GSO_MBUFS_NUM,
-			TAP_GSO_MBUF_CACHE_SIZE, 0,
+	if (!pmd->gso_ctx_mp) {
+		/*
+		 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
+		 * bytes size per mbuf use this pool for both direct and
+		 * indirect mbufs
+		 */
+		ret = snprintf(pool_name, sizeof(pool_name), "mp_%s",
+				dev->device->name);
+		if (ret < 0 || ret >= (int)sizeof(pool_name)) {
+			TAP_LOG(ERR,
+				"%s: failed to create mbuf pool name for device %s,"
+				"device name too long or output error, ret: %d\n",
+				pmd->name, dev->device->name, ret);
+			return -ENAMETOOLONG;
+		}
+		pmd->gso_ctx_mp = rte_pktmbuf_pool_create(pool_name,
+			TAP_GSO_MBUFS_NUM, TAP_GSO_MBUF_CACHE_SIZE, 0,
 			RTE_PKTMBUF_HEADROOM + TAP_GSO_MBUF_SEG_SIZE,
 			SOCKET_ID_ANY);
-		if (!mp) {
-			struct pmd_internals *pmd = dev->data->dev_private;
-
+		if (!pmd->gso_ctx_mp) {
 			TAP_LOG(ERR,
 				"%s: failed to create mbuf pool for device %s\n",
 				pmd->name, dev->device->name);
@@ -1344,8 +1352,8 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
 		}
 	}
 
-	gso_ctx->direct_pool = mp;
-	gso_ctx->indirect_pool = mp;
+	gso_ctx->direct_pool = pmd->gso_ctx_mp;
+	gso_ctx->indirect_pool = pmd->gso_ctx_mp;
 	gso_ctx->gso_types = gso_types;
 	gso_ctx->gso_size = 0; /* gso_size is set in tx_burst() per packet */
 	gso_ctx->flag = 0;
@@ -1842,6 +1850,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,
 	pmd->type = type;
 	pmd->ka_fd = -1;
 	pmd->nlsk_fd = -1;
+	pmd->gso_ctx_mp = NULL;
 
 	pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
 	if (pmd->ioctl_sock == -1) {
diff --git a/drivers/net/tap/rte_eth_tap.h b/drivers/net/tap/rte_eth_tap.h
index 8d6d53dc0..ba45de840 100644
--- a/drivers/net/tap/rte_eth_tap.h
+++ b/drivers/net/tap/rte_eth_tap.h
@@ -91,6 +91,7 @@ struct pmd_internals {
 	struct tx_queue txq[RTE_PMD_TAP_MAX_QUEUES]; /* List of TX queues */
 	struct rte_intr_handle intr_handle;          /* LSC interrupt handle. */
 	int ka_fd;                        /* keep-alive file descriptor */
+	struct rte_mempool *gso_ctx_mp;     /* Mempool for GSO packets */
 };
 
 struct pmd_process_private {
-- 
2.23.0



^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, back to index

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-29 11:35 [dpdk-stable] [dpdk-dev] [PATCH] net/tap: free mempool when closing wangyunjian
2020-08-05 13:47 ` Thomas Monjalon
2020-08-06 12:47   ` wangyunjian
2020-08-06 13:19     ` Thomas Monjalon
2020-08-05 16:35 ` Ferruh Yigit
2020-08-06 12:45   ` wangyunjian
2020-08-06 13:04     ` Ferruh Yigit
2020-08-06 13:35       ` wangyunjian
2020-08-08  9:58 ` [dpdk-stable] [dpdk-dev] [PATCH v2] " wangyunjian

patches for DPDK stable branches

Archives are clonable:
	git clone --mirror http://inbox.dpdk.org/stable/0 stable/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 stable stable/ http://inbox.dpdk.org/stable \
		stable@dpdk.org
	public-inbox-index stable


Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.stable


AGPL code for this site: git clone https://public-inbox.org/ public-inbox