DPDK patches and discussions
 help / color / mirror / Atom feed
From: wangyunjian <wangyunjian@huawei.com>
To: <dev@dpdk.org>, <ferruh.yigit@intel.com>, <nikhil.rao@intel.com>
Cc: <jerry.lilijun@huawei.com>, <xudingke@huawei.com>,
	Yunjian Wang <wangyunjian@huawei.com>, <stable@dpdk.org>
Subject: [dpdk-dev]  [PATCH v2] net/tap: free mempool when closing
Date: Sat, 8 Aug 2020 17:58:43 +0800	[thread overview]
Message-ID: <1596880723-1552-1-git-send-email-wangyunjian@huawei.com> (raw)
In-Reply-To: <40a0e68ed41b05fba8cbe5f34e369a59a1c0c09c.1596022448.git.wangyunjian@huawei.com>

From: Yunjian Wang <wangyunjian@huawei.com>

When setup tx queues, we will create a mempool for the 'gso_ctx'.
The mempool is not freed when closing tap device. If free the tap
device and create it with different name, it will create a new
mempool. This maybe cause an OOM.

The snprintf function return value is not checked and the mempool
name may be truncated. This patch also fix it.

Fixes: 050316a88313 ("net/tap: support TSO (TCP Segment Offload)")
Cc: stable@dpdk.org

Signed-off-by: Yunjian Wang <wangyunjian@huawei.com>
---
 v2: Updated the error log message as suggested by Ferruh Yigit
---
 drivers/net/tap/rte_eth_tap.c | 43 +++++++++++++++++++++--------------
 drivers/net/tap/rte_eth_tap.h |  1 +
 2 files changed, 27 insertions(+), 17 deletions(-)

diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index 339f24bf8..185de8aee 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -1070,6 +1070,9 @@ tap_dev_close(struct rte_eth_dev *dev)
 				&internals->remote_initial_flags);
 	}
 
+	rte_mempool_free(internals->gso_ctx_mp);
+	internals->gso_ctx_mp = NULL;
+
 	if (internals->ka_fd != -1) {
 		close(internals->ka_fd);
 		internals->ka_fd = -1;
@@ -1317,26 +1320,31 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
 {
 	uint32_t gso_types;
 	char pool_name[64];
-
-	/*
-	 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes
-	 * size per mbuf use this pool for both direct and indirect mbufs
-	 */
-
-	struct rte_mempool *mp;      /* Mempool for GSO packets */
+	struct pmd_internals *pmd = dev->data->dev_private;
+	int ret;
 
 	/* initialize GSO context */
 	gso_types = DEV_TX_OFFLOAD_TCP_TSO;
-	snprintf(pool_name, sizeof(pool_name), "mp_%s", dev->device->name);
-	mp = rte_mempool_lookup((const char *)pool_name);
-	if (!mp) {
-		mp = rte_pktmbuf_pool_create(pool_name, TAP_GSO_MBUFS_NUM,
-			TAP_GSO_MBUF_CACHE_SIZE, 0,
+	if (!pmd->gso_ctx_mp) {
+		/*
+		 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
+		 * bytes size per mbuf use this pool for both direct and
+		 * indirect mbufs
+		 */
+		ret = snprintf(pool_name, sizeof(pool_name), "mp_%s",
+				dev->device->name);
+		if (ret < 0 || ret >= (int)sizeof(pool_name)) {
+			TAP_LOG(ERR,
+				"%s: failed to create mbuf pool name for device %s,"
+				"device name too long or output error, ret: %d\n",
+				pmd->name, dev->device->name, ret);
+			return -ENAMETOOLONG;
+		}
+		pmd->gso_ctx_mp = rte_pktmbuf_pool_create(pool_name,
+			TAP_GSO_MBUFS_NUM, TAP_GSO_MBUF_CACHE_SIZE, 0,
 			RTE_PKTMBUF_HEADROOM + TAP_GSO_MBUF_SEG_SIZE,
 			SOCKET_ID_ANY);
-		if (!mp) {
-			struct pmd_internals *pmd = dev->data->dev_private;
-
+		if (!pmd->gso_ctx_mp) {
 			TAP_LOG(ERR,
 				"%s: failed to create mbuf pool for device %s\n",
 				pmd->name, dev->device->name);
@@ -1344,8 +1352,8 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
 		}
 	}
 
-	gso_ctx->direct_pool = mp;
-	gso_ctx->indirect_pool = mp;
+	gso_ctx->direct_pool = pmd->gso_ctx_mp;
+	gso_ctx->indirect_pool = pmd->gso_ctx_mp;
 	gso_ctx->gso_types = gso_types;
 	gso_ctx->gso_size = 0; /* gso_size is set in tx_burst() per packet */
 	gso_ctx->flag = 0;
@@ -1842,6 +1850,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,
 	pmd->type = type;
 	pmd->ka_fd = -1;
 	pmd->nlsk_fd = -1;
+	pmd->gso_ctx_mp = NULL;
 
 	pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
 	if (pmd->ioctl_sock == -1) {
diff --git a/drivers/net/tap/rte_eth_tap.h b/drivers/net/tap/rte_eth_tap.h
index 8d6d53dc0..ba45de840 100644
--- a/drivers/net/tap/rte_eth_tap.h
+++ b/drivers/net/tap/rte_eth_tap.h
@@ -91,6 +91,7 @@ struct pmd_internals {
 	struct tx_queue txq[RTE_PMD_TAP_MAX_QUEUES]; /* List of TX queues */
 	struct rte_intr_handle intr_handle;          /* LSC interrupt handle. */
 	int ka_fd;                        /* keep-alive file descriptor */
+	struct rte_mempool *gso_ctx_mp;     /* Mempool for GSO packets */
 };
 
 struct pmd_process_private {
-- 
2.23.0



  parent reply	other threads:[~2020-08-08  9:59 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-29 11:35 [dpdk-dev] [PATCH] " wangyunjian
2020-08-05 13:47 ` [dpdk-dev] [dpdk-stable] " Thomas Monjalon
2020-08-06 12:47   ` wangyunjian
2020-08-06 13:19     ` Thomas Monjalon
2020-08-28 12:51       ` wangyunjian
2020-09-01 10:57         ` Thomas Monjalon
2020-08-05 16:35 ` [dpdk-dev] " Ferruh Yigit
2020-08-06 12:45   ` wangyunjian
2020-08-06 13:04     ` Ferruh Yigit
2020-08-06 13:35       ` wangyunjian
2020-08-08  9:58 ` wangyunjian [this message]
2020-09-14 14:43   ` [dpdk-dev] [PATCH v2] " Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1596880723-1552-1-git-send-email-wangyunjian@huawei.com \
    --to=wangyunjian@huawei.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=jerry.lilijun@huawei.com \
    --cc=nikhil.rao@intel.com \
    --cc=stable@dpdk.org \
    --cc=xudingke@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).