DPDK patches and discussions
 help / color / mirror / Atom feed
From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
To: <dev@dpdk.org>
Cc: <rasland@nvidia.com>, <matan@nvidia.com>, <orika@nvidia.com>,
	<stable@dpdk.org>
Subject: [dpdk-dev] [PATCH] common/mlx5: fix Netlink receive message buffer size
Date: Thu, 1 Jul 2021 10:31:33 +0300	[thread overview]
Message-ID: <20210701073133.23488-1-viacheslavo@nvidia.com> (raw)

If there are many VFs the Netlink message length sent by kernel
in reply to RTM_GETLINK request can be large. We should query
the size of message being received in advance and allocate
the large enough buffer to handle these large messages.

Fixes: ccdcba53a3f4 ("net/mlx5: use Netlink to add/remove MAC addresses")
Cc: stable@dpdk.org

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/common/mlx5/linux/mlx5_nl.c | 69 +++++++++++++++++++++++------
 1 file changed, 55 insertions(+), 14 deletions(-)

diff --git a/drivers/common/mlx5/linux/mlx5_nl.c b/drivers/common/mlx5/linux/mlx5_nl.c
index 3f1912d078..dc8dafd0a8 100644
--- a/drivers/common/mlx5/linux/mlx5_nl.c
+++ b/drivers/common/mlx5/linux/mlx5_nl.c
@@ -189,8 +189,8 @@ int
 mlx5_nl_init(int protocol)
 {
 	int fd;
-	int sndbuf_size = MLX5_SEND_BUF_SIZE;
-	int rcvbuf_size = MLX5_RECV_BUF_SIZE;
+	int buf_size;
+	socklen_t opt_size;
 	struct sockaddr_nl local = {
 		.nl_family = AF_NETLINK,
 	};
@@ -201,16 +201,36 @@ mlx5_nl_init(int protocol)
 		rte_errno = errno;
 		return -rte_errno;
 	}
-	ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf_size, sizeof(int));
+	opt_size = sizeof(buf_size);
+	ret = getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buf_size, &opt_size);
 	if (ret == -1) {
 		rte_errno = errno;
 		goto error;
 	}
-	ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf_size, sizeof(int));
+	DRV_LOG(DEBUG, "Netlink socket send buffer: %d", buf_size);
+	if (buf_size < MLX5_SEND_BUF_SIZE) {
+		ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF,
+				 &buf_size, sizeof(buf_size));
+		if (ret == -1) {
+			rte_errno = errno;
+			goto error;
+		}
+	}
+	opt_size = sizeof(buf_size);
+	ret = getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &buf_size, &opt_size);
 	if (ret == -1) {
 		rte_errno = errno;
 		goto error;
 	}
+	DRV_LOG(DEBUG, "Netlink socket recv buffer: %d", buf_size);
+	if (buf_size < MLX5_RECV_BUF_SIZE) {
+		ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF,
+				 &buf_size, sizeof(buf_size));
+		if (ret == -1) {
+			rte_errno = errno;
+			goto error;
+		}
+	}
 	ret = bind(fd, (struct sockaddr *)&local, sizeof(local));
 	if (ret == -1) {
 		rte_errno = errno;
@@ -332,11 +352,7 @@ mlx5_nl_recv(int nlsk_fd, uint32_t sn, int (*cb)(struct nlmsghdr *, void *arg),
 	     void *arg)
 {
 	struct sockaddr_nl sa;
-	void *buf = mlx5_malloc(0, MLX5_RECV_BUF_SIZE, 0, SOCKET_ID_ANY);
-	struct iovec iov = {
-		.iov_base = buf,
-		.iov_len = MLX5_RECV_BUF_SIZE,
-	};
+	struct iovec iov;
 	struct msghdr msg = {
 		.msg_name = &sa,
 		.msg_namelen = sizeof(sa),
@@ -344,18 +360,43 @@ mlx5_nl_recv(int nlsk_fd, uint32_t sn, int (*cb)(struct nlmsghdr *, void *arg),
 		/* One message at a time */
 		.msg_iovlen = 1,
 	};
+	void *buf = NULL;
 	int multipart = 0;
 	int ret = 0;
 
-	if (!buf) {
-		rte_errno = ENOMEM;
-		return -rte_errno;
-	}
 	do {
 		struct nlmsghdr *nh;
-		int recv_bytes = 0;
+		int recv_bytes;
 
 		do {
+			/* Query length of incoming message. */
+			iov.iov_base = NULL;
+			iov.iov_len = 0;
+			recv_bytes = recvmsg(nlsk_fd, &msg,
+					     MSG_PEEK | MSG_TRUNC);
+			if (recv_bytes < 0) {
+				rte_errno = errno;
+				ret = -rte_errno;
+				goto exit;
+			}
+			if (recv_bytes == 0) {
+				rte_errno = ENODATA;
+				ret = -rte_errno;
+				goto exit;
+			}
+			/* Allocate buffer to fetch the message. */
+			if (recv_bytes < MLX5_RECV_BUF_SIZE)
+				recv_bytes = MLX5_RECV_BUF_SIZE;
+			mlx5_free(buf);
+			buf = mlx5_malloc(0, recv_bytes, 0, SOCKET_ID_ANY);
+			if (!buf) {
+				rte_errno = ENOMEM;
+				ret = -rte_errno;
+				goto exit;
+			}
+			/* Fetch the message. */
+			iov.iov_base = buf;
+			iov.iov_len = recv_bytes;
 			recv_bytes = recvmsg(nlsk_fd, &msg, 0);
 			if (recv_bytes == -1) {
 				rte_errno = errno;
-- 
2.18.1


             reply	other threads:[~2021-07-01  7:31 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-01  7:31 Viacheslav Ovsiienko [this message]
2021-07-06 11:42 ` Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210701073133.23488-1-viacheslavo@nvidia.com \
    --to=viacheslavo@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).