DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 0/2] app/testpmd: external RxQ tests
@ 2022-03-01 20:26 Michael Baum
  2022-03-01 20:26 ` [PATCH 1/2] app/testpmd: add test for remote PD and CTX Michael Baum
                   ` (2 more replies)
  0 siblings, 3 replies; 16+ messages in thread
From: Michael Baum @ 2022-03-01 20:26 UTC (permalink / raw)
  To: dev; +Cc: Xiaoyun Li, Aman Singh, Yuying Zhang

Recently [1] mlx5 PMD added support for external queues, in the
following patches add internal tests for in in Testpmd application.

[1]
https://patchwork.dpdk.org/project/dpdk/cover/20220224232511.3238707-1-michaelba@nvidia.com/


Michael Baum (2):
  app/testpmd: add test for remote PD and CTX
  app/testpmd: add test for external RxQ

 app/test-pmd/cmdline.c                      | 172 +++++++++++++++++++-
 app/test-pmd/meson.build                    |   3 +
 app/test-pmd/testpmd.c                      | 153 +++++++++++++++++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  60 +++++++
 4 files changed, 386 insertions(+), 2 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/2] app/testpmd: add test for remote PD and CTX
  2022-03-01 20:26 [PATCH 0/2] app/testpmd: external RxQ tests Michael Baum
@ 2022-03-01 20:26 ` Michael Baum
  2022-03-03 12:57   ` Ferruh Yigit
  2022-03-01 20:26 ` [PATCH 2/2] app/testpmd: add test for external RxQ Michael Baum
  2022-06-16 17:10 ` [PATCH v2 0/2] mlx5/testpmd: external RxQ tests Michael Baum
  2 siblings, 1 reply; 16+ messages in thread
From: Michael Baum @ 2022-03-01 20:26 UTC (permalink / raw)
  To: dev; +Cc: Xiaoyun Li, Aman Singh, Yuying Zhang, Matan Azrad

Add mlx5 internal option in testpmd run-time function "port attach" to
add another parameter named "mlx5_socket" for attaching port and add 2
devargs before.

The arguments are "cmd_fd" and "pd_handle" using to import device
created out of PMD. Testpmd application import it using IPC, and updates
the devargs list before attaching.

The syntax is:

  testpmd > port attach (identifier) mlx5_socket=(path)

Where "path" is the IPC socket path agreed on the remote process.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 app/test-pmd/cmdline.c                      |  14 +-
 app/test-pmd/meson.build                    |   3 +
 app/test-pmd/testpmd.c                      | 153 ++++++++++++++++++++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  40 +++++
 4 files changed, 208 insertions(+), 2 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 7ab0575e64..479e0290c4 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -773,6 +773,12 @@ static void cmd_help_long_parsed(void *parsed_result,
 			"port attach (ident)\n"
 			"    Attach physical or virtual dev by pci address or virtual device name\n\n"
 
+#ifdef RTE_NET_MLX5
+			"port attach (ident) mlx5_socket=(path)\n"
+			"    Attach physical or virtual dev by pci address or virtual device name "
+			"and add \"cmd_fd\" and \"pd_handle\" devargs before attaching\n\n"
+#endif
+
 			"port detach (port_id)\n"
 			"    Detach physical or virtual dev by port_id\n\n"
 
@@ -1379,8 +1385,12 @@ cmdline_parse_token_string_t cmd_operate_attach_port_identifier =
 cmdline_parse_inst_t cmd_operate_attach_port = {
 	.f = cmd_operate_attach_port_parsed,
 	.data = NULL,
-	.help_str = "port attach <identifier>: "
-		"(identifier: pci address or virtual dev name)",
+	.help_str = "port attach <identifier> mlx5_socket=<path>: "
+		"(identifier: pci address or virtual dev name"
+#ifdef RTE_NET_MLX5
+		", path (optional): socket path to get cmd FD and PD handle"
+#endif
+		")",
 	.tokens = {
 		(void *)&cmd_operate_attach_port_port,
 		(void *)&cmd_operate_attach_port_keyword,
diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build
index 43130c8856..c4fd379e67 100644
--- a/app/test-pmd/meson.build
+++ b/app/test-pmd/meson.build
@@ -73,3 +73,6 @@ endif
 if dpdk_conf.has('RTE_NET_DPAA')
     deps += ['bus_dpaa', 'mempool_dpaa', 'net_dpaa']
 endif
+if dpdk_conf.has('RTE_NET_MLX5')
+    deps += 'net_mlx5'
+endif
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index fe2ce19f99..7ec95e5ae4 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -11,6 +11,10 @@
 #include <fcntl.h>
 #ifndef RTE_EXEC_ENV_WINDOWS
 #include <sys/mman.h>
+#ifdef RTE_NET_MLX5
+#include <sys/socket.h>
+#include <sys/un.h>
+#endif
 #endif
 #include <sys/types.h>
 #include <errno.h>
@@ -3200,11 +3204,150 @@ reset_port(portid_t pid)
 	printf("Done\n");
 }
 
+#if defined(RTE_NET_MLX5) && !defined(RTE_EXEC_ENV_WINDOWS)
+static const char*
+get_socket_path(char *extend)
+{
+	if (strstr(extend, "mlx5_socket=") == extend) {
+		const char *socket_path = strchr(extend, '=') + 1;
+
+		TESTPMD_LOG(DEBUG, "MLX5 socket path is %s\n", socket_path);
+		return socket_path;
+	}
+
+	TESTPMD_LOG(ERR, "Failed to extract a valid socket path from %s\n",
+		    extend);
+	return NULL;
+}
+
+static int
+attach_port_extend_devargs(char *identifier, char *extend)
+{
+	struct sockaddr_un un = {
+		.sun_family = AF_UNIX,
+	};
+	struct sockaddr_un dst = {
+		.sun_family = AF_UNIX,
+	};
+	int cmd_fd;
+	int pd_handle;
+	struct iovec iov = {
+		.iov_base = &pd_handle,
+		.iov_len = sizeof(int),
+	};
+	union {
+		char buf[CMSG_SPACE(sizeof(int))];
+		struct cmsghdr align;
+	} control;
+	struct msghdr msgh = {
+		.msg_name = &dst,
+		.msg_namelen = sizeof(dst),
+		.msg_iov = NULL,
+		.msg_iovlen = 0,
+	};
+	struct cmsghdr *cmsg;
+	const char *path = get_socket_path(extend + 1);
+	size_t length = 1;
+	int socket_fd;
+	int ret;
+
+	if (path == NULL) {
+		TESTPMD_LOG(ERR, "Invalid devargs extension is specified\n");
+		return -1;
+	}
+
+	/* Initialize IPC channel. */
+	socket_fd = socket(AF_UNIX, SOCK_DGRAM, 0);
+	if (socket_fd < 0) {
+		TESTPMD_LOG(ERR, "Failed to create unix socket: %s\n",
+			    strerror(errno));
+		return -1;
+	}
+	snprintf(un.sun_path, sizeof(un.sun_path), "%s_%d", path, getpid());
+	unlink(un.sun_path); /* May still exist since last run */
+	if (bind(socket_fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
+		TESTPMD_LOG(ERR, "Failed to bind %s: %s\n", un.sun_path,
+			    strerror(errno));
+		close(socket_fd);
+		return -1;
+	}
+
+	strlcpy(dst.sun_path, path, sizeof(dst.sun_path));
+	/* Send the request message. */
+	do {
+		ret = sendmsg(socket_fd, &msgh, 0);
+	} while (ret < 0 && errno == EINTR);
+	if (ret < 0) {
+		TESTPMD_LOG(ERR, "Failed to send request to (%s): %s\n", path,
+			    strerror(errno));
+		close(socket_fd);
+		unlink(un.sun_path);
+		return -1;
+	}
+
+	msgh.msg_iov = &iov;
+	msgh.msg_iovlen = 1;
+	msgh.msg_control = control.buf;
+	msgh.msg_controllen = sizeof(control.buf);
+	do {
+		ret = recvmsg(socket_fd, &msgh, 0);
+	} while (ret < 0);
+	if (ret != sizeof(int) || (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
+		TESTPMD_LOG(ERR, "truncated msg");
+		close(socket_fd);
+		unlink(un.sun_path);
+		return -1;
+	}
+
+	/* Translate the FD. */
+	cmsg = CMSG_FIRSTHDR(&msgh);
+	if (cmsg == NULL || cmsg->cmsg_len != CMSG_LEN(sizeof(int)) ||
+	    cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
+		TESTPMD_LOG(ERR, "Fail to get FD using SCM_RIGHTS mechanism\n");
+		close(socket_fd);
+		unlink(un.sun_path);
+		return -1;
+	}
+	memcpy(&cmd_fd, CMSG_DATA(cmsg), sizeof(int));
+
+	TESTPMD_LOG(DEBUG, "Command FD (%d) and PD handle (%d) "
+		    "are successfully imported from remote process\n",
+		    cmd_fd, pd_handle);
+
+	/* Cleanup IPC channel. */
+	close(socket_fd);
+	unlink(un.sun_path);
+
+	/* Calculate the new length of devargs string. */
+	length += snprintf(NULL, 0, ",cmd_fd=%d,pd_handle=%d",
+			   cmd_fd, pd_handle);
+	/* Extend the devargs string. */
+	snprintf(extend, length, ",cmd_fd=%d,pd_handle=%d", cmd_fd, pd_handle);
+
+	TESTPMD_LOG(DEBUG, "Attach port with extra devargs %s\n", identifier);
+	return 0;
+}
+
+static bool
+is_delimiter_path_spaces(char *extend)
+{
+	while (*extend != '\0') {
+		if (*extend != ' ')
+			return true;
+		extend++;
+	}
+	return false;
+}
+#endif
+
 void
 attach_port(char *identifier)
 {
 	portid_t pi;
 	struct rte_dev_iterator iterator;
+#if defined(RTE_NET_MLX5) && !defined(RTE_EXEC_ENV_WINDOWS)
+	char *extend;
+#endif
 
 	printf("Attaching a new port...\n");
 
@@ -3213,6 +3356,16 @@ attach_port(char *identifier)
 		return;
 	}
 
+#if defined(RTE_NET_MLX5) && !defined(RTE_EXEC_ENV_WINDOWS)
+	extend = strchr(identifier, ' ');
+	if (extend != NULL && is_delimiter_path_spaces(extend) &&
+	    attach_port_extend_devargs(identifier, extend) < 0) {
+		TESTPMD_LOG(ERR, "Failed to extend devargs for port %s\n",
+			    identifier);
+		return;
+	}
+#endif
+
 	if (rte_dev_probe(identifier) < 0) {
 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
 		return;
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 1083c6d538..d6490947c4 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -2127,6 +2127,46 @@ the mode and slave parameters must be given.
    Done
 
 
+port attach with mlx5 socket path
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+MLX5 internal option to attach a port specified by pci address or virtual device
+args and add extra devargs to it, which is imported from external process::
+
+   testpmd> port attach (identifier) mlx5_socket=(path)
+
+where:
+
+* ``identifier``: pci address or virtual device args.
+* ``path``: socket path to import arguments agreed by the external process.
+
+The mlx5 PMD enables to import CTX and PD created outside the PMD.
+It gets as devargs the device's ``cmd_fd`` and ``pd_handle``,
+then using those arguments to import objects.
+See :ref:`mlx5 driver options <mlx5_common_driver_options>` for more information.
+
+When ``cmd_fd`` and ``pd_handle`` arguments are coming from another process,
+the FD must be dup'd before being passed.
+In this function, testpmd initializes IPC socket to get FD using SCM_RIGHTS.
+It gets the external process socket path, then import the ``cmd_fd`` and
+``pd_handle`` arguments and add them to devargs list.
+After updating this, it calls the regular ``port attach`` function
+with extended idevtifier.
+
+For example, to attach a port whose pci address is ``0000:0a:00.0`` and its
+socket path is ``/var/run/import_ipc_socket``.
+
+.. code-block:: console
+
+   testpmd> port attach 0000:0a:00.0 mlx5_socket=/var/run/import_ipc_socket
+   Attaching a new port...
+   testpmd: MLX5 socket path is /var/run/import_ipc_socket
+   testpmd: Attach port with extra devargs 0000:0a:00.0,cmd_fd=40,pd_handle=1
+   EAL: Probe PCI driver: mlx5_pci (15b3:101d) device: 0000:03:00.0 (socket 0)
+   Port 0 is attached. Now total ports is 1
+   Done
+
+
 port detach
 ~~~~~~~~~~~
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 2/2] app/testpmd: add test for external RxQ
  2022-03-01 20:26 [PATCH 0/2] app/testpmd: external RxQ tests Michael Baum
  2022-03-01 20:26 ` [PATCH 1/2] app/testpmd: add test for remote PD and CTX Michael Baum
@ 2022-03-01 20:26 ` Michael Baum
  2022-03-03 13:02   ` Ferruh Yigit
  2022-06-16 17:10 ` [PATCH v2 0/2] mlx5/testpmd: external RxQ tests Michael Baum
  2 siblings, 1 reply; 16+ messages in thread
From: Michael Baum @ 2022-03-01 20:26 UTC (permalink / raw)
  To: dev; +Cc: Xiaoyun Li, Aman Singh, Yuying Zhang, Matan Azrad

Add mlx5 internal test for map and unmap external RxQs.
This patch adds to Testpmd app a runtime function to test the mapping
API.

For insert mapping use this command:

  testpmd> port (port_id) ext_rxq map (rte_queue_id) (hw_queue_id)

For insert mapping use this command:

  testpmd> port (port_id) ext_rxq unmap (rte_queue_id)

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 app/test-pmd/cmdline.c                      | 158 ++++++++++++++++++++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  20 +++
 2 files changed, 178 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 479e0290c4..929680f5cb 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -63,6 +63,9 @@
 #ifdef RTE_NET_BNXT
 #include <rte_pmd_bnxt.h>
 #endif
+#ifdef RTE_NET_MLX5
+#include <rte_pmd_mlx5.h>
+#endif
 #include "testpmd.h"
 #include "cmdline_mtr.h"
 #include "cmdline_tm.h"
@@ -917,6 +920,15 @@ static void cmd_help_long_parsed(void *parsed_result,
 
 			"port cleanup (port_id) txq (queue_id) (free_cnt)\n"
 			"    Cleanup txq mbufs for a specific Tx queue\n\n"
+
+#ifdef RTE_NET_MLX5
+			"port (port_id) ext_rxq map (rte_queue_id) (hw_queue_id)\n"
+			"    Map HW queue index (32 bit) to rte_flow queue"
+			" index (16 bit) for external RxQ\n\n"
+
+			"port (port_id) ext_rxq unmap (rte_queue_id)\n"
+			"    Unmap external Rx queue rte_flow index mapping\n\n"
+#endif
 		);
 	}
 
@@ -17817,6 +17829,148 @@ cmdline_parse_inst_t cmd_show_port_flow_transfer_proxy = {
 	}
 };
 
+#ifdef RTE_NET_MLX5
+
+/* Map HW queue index to rte queue index. */
+struct cmd_map_ext_rxq {
+	cmdline_fixed_string_t port;
+	portid_t port_id;
+	cmdline_fixed_string_t ext_rxq;
+	cmdline_fixed_string_t map;
+	uint16_t rte_queue_id;
+	uint32_t hw_queue_id;
+};
+
+cmdline_parse_token_string_t cmd_map_ext_rxq_port =
+	TOKEN_STRING_INITIALIZER(struct cmd_map_ext_rxq, port, "port");
+cmdline_parse_token_num_t cmd_map_ext_rxq_port_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_map_ext_rxq, port_id, RTE_UINT16);
+cmdline_parse_token_string_t cmd_map_ext_rxq_ext_rxq =
+	TOKEN_STRING_INITIALIZER(struct cmd_map_ext_rxq, ext_rxq, "ext_rxq");
+cmdline_parse_token_string_t cmd_map_ext_rxq_map =
+	TOKEN_STRING_INITIALIZER(struct cmd_map_ext_rxq, map, "map");
+cmdline_parse_token_num_t cmd_map_ext_rxq_rte_queue_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_map_ext_rxq, rte_queue_id, RTE_UINT16);
+cmdline_parse_token_num_t cmd_map_ext_rxq_hw_queue_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_map_ext_rxq, hw_queue_id, RTE_UINT32);
+
+static void
+cmd_map_ext_rxq_parsed(void *parsed_result,
+		       __rte_unused struct cmdline *cl,
+		       __rte_unused void *data)
+{
+	struct cmd_map_ext_rxq *res = parsed_result;
+	int ret;
+
+	if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+		return;
+	ret = rte_pmd_mlx5_external_rx_queue_id_map(res->port_id,
+						    res->rte_queue_id,
+						    res->hw_queue_id);
+	switch (ret) {
+	case 0:
+		break;
+	case -EINVAL:
+		fprintf(stderr, "invalid rte_flow index (%u), out of range\n",
+			res->rte_queue_id);
+		break;
+	case -ENODEV:
+		fprintf(stderr, "invalid port_id %u\n", res->port_id);
+		break;
+	case -ENOTSUP:
+		fprintf(stderr, "function not implemented or supported\n");
+		break;
+	case -EEXIST:
+		fprintf(stderr, "mapping with index %u already exists\n",
+			res->rte_queue_id);
+		break;
+	default:
+		fprintf(stderr, "programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_map_ext_rxq = {
+	.f = cmd_map_ext_rxq_parsed,
+	.data = NULL,
+	.help_str = "port <port_id> ext_rxq map <rte_queue_id> <hw_queue_id>",
+	.tokens = {
+		(void *)&cmd_map_ext_rxq_port,
+		(void *)&cmd_map_ext_rxq_port_id,
+		(void *)&cmd_map_ext_rxq_ext_rxq,
+		(void *)&cmd_map_ext_rxq_map,
+		(void *)&cmd_map_ext_rxq_rte_queue_id,
+		(void *)&cmd_map_ext_rxq_hw_queue_id,
+		NULL,
+	}
+};
+
+/* Unmap HW queue index to rte queue index. */
+struct cmd_unmap_ext_rxq {
+	cmdline_fixed_string_t port;
+	portid_t port_id;
+	cmdline_fixed_string_t ext_rxq;
+	cmdline_fixed_string_t unmap;
+	uint16_t queue_id;
+};
+
+cmdline_parse_token_string_t cmd_unmap_ext_rxq_port =
+	TOKEN_STRING_INITIALIZER(struct cmd_unmap_ext_rxq, port, "port");
+cmdline_parse_token_num_t cmd_unmap_ext_rxq_port_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_unmap_ext_rxq, port_id, RTE_UINT16);
+cmdline_parse_token_string_t cmd_unmap_ext_rxq_ext_rxq =
+	TOKEN_STRING_INITIALIZER(struct cmd_unmap_ext_rxq, ext_rxq, "ext_rxq");
+cmdline_parse_token_string_t cmd_unmap_ext_rxq_unmap =
+	TOKEN_STRING_INITIALIZER(struct cmd_unmap_ext_rxq, unmap, "unmap");
+cmdline_parse_token_num_t cmd_unmap_ext_rxq_queue_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_unmap_ext_rxq, queue_id, RTE_UINT16);
+
+static void
+cmd_unmap_ext_rxq_parsed(void *parsed_result,
+			 __rte_unused struct cmdline *cl,
+			 __rte_unused void *data)
+{
+	struct cmd_unmap_ext_rxq *res = parsed_result;
+	int ret;
+
+	if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+		return;
+	ret = rte_pmd_mlx5_external_rx_queue_id_unmap(res->port_id,
+						      res->queue_id);
+	switch (ret) {
+	case 0:
+		break;
+	case -EINVAL:
+		fprintf(stderr, "invalid rte_flow index (%u), "
+			"out of range, doesn't exist or still referenced\n",
+			res->queue_id);
+		break;
+	case -ENODEV:
+		fprintf(stderr, "invalid port_id %u\n", res->port_id);
+		break;
+	case -ENOTSUP:
+		fprintf(stderr, "function not implemented or supported\n");
+		break;
+	default:
+		fprintf(stderr, "programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_unmap_ext_rxq = {
+	.f = cmd_unmap_ext_rxq_parsed,
+	.data = NULL,
+	.help_str = "port <port_id> ext_rxq unmap <queue_id>",
+	.tokens = {
+		(void *)&cmd_unmap_ext_rxq_port,
+		(void *)&cmd_unmap_ext_rxq_port_id,
+		(void *)&cmd_unmap_ext_rxq_ext_rxq,
+		(void *)&cmd_unmap_ext_rxq_unmap,
+		(void *)&cmd_unmap_ext_rxq_queue_id,
+		NULL,
+	}
+};
+
+#endif /* RTE_NET_MLX5 */
+
 /* ******************************************************************************** */
 
 /* list of instructions */
@@ -18103,6 +18257,10 @@ cmdline_parse_ctx_t main_ctx[] = {
 	(cmdline_parse_inst_t *)&cmd_show_capability,
 	(cmdline_parse_inst_t *)&cmd_set_flex_is_pattern,
 	(cmdline_parse_inst_t *)&cmd_set_flex_spec_pattern,
+#ifdef RTE_NET_MLX5
+	(cmdline_parse_inst_t *)&cmd_map_ext_rxq,
+	(cmdline_parse_inst_t *)&cmd_unmap_ext_rxq,
+#endif
 	NULL,
 };
 
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index d6490947c4..de5665c886 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -2502,6 +2502,26 @@ To cleanup txq mbufs currently cached by driver::
 
 If the value of ``free_cnt`` is 0, driver should free all cached mbufs.
 
+
+port map external RxQ
+~~~~~~~~~~~~~~~~~~~~~
+
+MLX5 internal API for external RxQ mapping management.
+
+Map HW queue index (32 bit) to rte_flow queue index (16 bit) for external RxQ::
+
+   testpmd> port (port_id) ext_rxq map (rte_queue_id) (hw_queue_id)
+
+Unmap external Rx queue rte_flow index mapping::
+
+   testpmd> port (port_id) ext_rxq unmap (rte_queue_id)
+
+where:
+
+* ``rte_queue_id``: queue index in reage [64536, 65535].
+* ``hw_queue_id``: queue index given by HW in queue creation.
+
+
 Device Functions
 ----------------
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] app/testpmd: add test for remote PD and CTX
  2022-03-01 20:26 ` [PATCH 1/2] app/testpmd: add test for remote PD and CTX Michael Baum
@ 2022-03-03 12:57   ` Ferruh Yigit
  2022-03-07 16:07     ` Michael Baum
  0 siblings, 1 reply; 16+ messages in thread
From: Ferruh Yigit @ 2022-03-03 12:57 UTC (permalink / raw)
  To: Michael Baum, dev; +Cc: Xiaoyun Li, Aman Singh, Yuying Zhang, Matan Azrad

On 3/1/2022 8:26 PM, Michael Baum wrote:
> Add mlx5 internal option in testpmd run-time function "port attach" to
> add another parameter named "mlx5_socket" for attaching port and add 2
> devargs before.
> 
> The arguments are "cmd_fd" and "pd_handle" using to import device
> created out of PMD. Testpmd application import it using IPC, and updates
> the devargs list before attaching.
> 
> The syntax is:
> 
>    testpmd > port attach (identifier) mlx5_socket=(path)
> 
> Where "path" is the IPC socket path agreed on the remote process.
> 
> Signed-off-by: Michael Baum <michaelba@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>

<...>

> diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build
> index 43130c8856..c4fd379e67 100644
> --- a/app/test-pmd/meson.build
> +++ b/app/test-pmd/meson.build
> @@ -73,3 +73,6 @@ endif
>   if dpdk_conf.has('RTE_NET_DPAA')
>       deps += ['bus_dpaa', 'mempool_dpaa', 'net_dpaa']
>   endif
> +if dpdk_conf.has('RTE_NET_MLX5')
> +    deps += 'net_mlx5'
> +endif

Is this patch introduce any build time dependency to mlx5
driver? If not this chunk should go to next patch, which
uses mlx5 PMD specific API.

<...>

> diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> index 1083c6d538..d6490947c4 100644
> --- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> +++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> @@ -2127,6 +2127,46 @@ the mode and slave parameters must be given.
>      Done
>   
>   
> +port attach with mlx5 socket path
> +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> +
> +MLX5 internal option to attach a port specified by pci address or virtual device
> +args and add extra devargs to it, which is imported from external process::
> +
> +   testpmd> port attach (identifier) mlx5_socket=(path)
> +
> +where:
> +
> +* ``identifier``: pci address or virtual device args.
> +* ``path``: socket path to import arguments agreed by the external process.
> +
> +The mlx5 PMD enables to import CTX and PD created outside the PMD.
> +It gets as devargs the device's ``cmd_fd`` and ``pd_handle``,
> +then using those arguments to import objects.
> +See :ref:`mlx5 driver options <mlx5_common_driver_options>` for more information.
> +
> +When ``cmd_fd`` and ``pd_handle`` arguments are coming from another process,
> +the FD must be dup'd before being passed.
> +In this function, testpmd initializes IPC socket to get FD using SCM_RIGHTS.
> +It gets the external process socket path, then import the ``cmd_fd`` and
> +``pd_handle`` arguments and add them to devargs list.
> +After updating this, it calls the regular ``port attach`` function
> +with extended idevtifier.
> +
> +For example, to attach a port whose pci address is ``0000:0a:00.0`` and its
> +socket path is ``/var/run/import_ipc_socket``.
> +
> +.. code-block:: console
> +
> +   testpmd> port attach 0000:0a:00.0 mlx5_socket=/var/run/import_ipc_socket
> +   Attaching a new port...
> +   testpmd: MLX5 socket path is /var/run/import_ipc_socket
> +   testpmd: Attach port with extra devargs 0000:0a:00.0,cmd_fd=40,pd_handle=1
> +   EAL: Probe PCI driver: mlx5_pci (15b3:101d) device: 0000:03:00.0 (socket 0)
> +   Port 0 is attached. Now total ports is 1
> +   Done
> +
> +


Hi Michael,

This is too much mlx5 specific addition, and I don't think it is
good to extend testpmd with PMD specific code.
If we enable it, sure there will be other vendors willing to do
the same, making testpmd even messier.

I don't know what those ``cmd_fd`` and ``pd_handle`` (that read
from provided socket), but can they be read from some other
script and feed to testpmd, like a python wrapper etc...

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/2] app/testpmd: add test for external RxQ
  2022-03-01 20:26 ` [PATCH 2/2] app/testpmd: add test for external RxQ Michael Baum
@ 2022-03-03 13:02   ` Ferruh Yigit
  2022-03-07 15:51     ` Michael Baum
  0 siblings, 1 reply; 16+ messages in thread
From: Ferruh Yigit @ 2022-03-03 13:02 UTC (permalink / raw)
  To: Michael Baum, dev; +Cc: Xiaoyun Li, Aman Singh, Yuying Zhang, Matan Azrad

On 3/1/2022 8:26 PM, Michael Baum wrote:
> diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> index d6490947c4..de5665c886 100644
> --- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> +++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> @@ -2502,6 +2502,26 @@ To cleanup txq mbufs currently cached by driver::
>   
>   If the value of ``free_cnt`` is 0, driver should free all cached mbufs.
>   
> +
> +port map external RxQ
> +~~~~~~~~~~~~~~~~~~~~~
> +
> +MLX5 internal API for external RxQ mapping management.
> +
> +Map HW queue index (32 bit) to rte_flow queue index (16 bit) for external RxQ::
> +
> +   testpmd> port (port_id) ext_rxq map (rte_queue_id) (hw_queue_id)
> +
> +Unmap external Rx queue rte_flow index mapping::
> +
> +   testpmd> port (port_id) ext_rxq unmap (rte_queue_id)
> +
> +where:
> +
> +* ``rte_queue_id``: queue index in reage [64536, 65535].

s/reage/range/ ?

And range looks odd, is [64536, 65535] correct? If so does it
deserve additional clarification?

> +* ``hw_queue_id``: queue index given by HW in queue creation.
> +
> +
>   Device Functions
>   ----------------


^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [PATCH 2/2] app/testpmd: add test for external RxQ
  2022-03-03 13:02   ` Ferruh Yigit
@ 2022-03-07 15:51     ` Michael Baum
  0 siblings, 0 replies; 16+ messages in thread
From: Michael Baum @ 2022-03-07 15:51 UTC (permalink / raw)
  To: Ferruh Yigit, dev; +Cc: Xiaoyun Li, Aman Singh, Yuying Zhang, Matan Azrad


On 3/3/2022 3:02 PM, Ferruh Yigit wrote:
> On 3/1/2022 8:26 PM, Michael Baum wrote:
> > diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> > b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> > index d6490947c4..de5665c886 100644
> > --- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> > +++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> > @@ -2502,6 +2502,26 @@ To cleanup txq mbufs currently cached by
> driver::
> >
> >   If the value of ``free_cnt`` is 0, driver should free all cached mbufs.
> >
> > +
> > +port map external RxQ
> > +~~~~~~~~~~~~~~~~~~~~~
> > +
> > +MLX5 internal API for external RxQ mapping management.
> > +
> > +Map HW queue index (32 bit) to rte_flow queue index (16 bit) for
> external RxQ::
> > +
> > +   testpmd> port (port_id) ext_rxq map (rte_queue_id) (hw_queue_id)
> > +
> > +Unmap external Rx queue rte_flow index mapping::
> > +
> > +   testpmd> port (port_id) ext_rxq unmap (rte_queue_id)
> > +
> > +where:
> > +
> > +* ``rte_queue_id``: queue index in reage [64536, 65535].
> 
> s/reage/range/ ?

Yes you right, It should have been "range".
Thank you for This correction.

> And range looks odd, is [64536, 65535] correct? If so does it deserve
> additional clarification?

It is correct, it the highest 1000 numbers in 16 bits.
[64536, 65535] is [UINT16_MAX - 1000 +1, UINT16_MAX].
I'll add more explanation.

> > +* ``hw_queue_id``: queue index given by HW in queue creation.
> > +
> > +
> >   Device Functions
> >   ----------------


^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [PATCH 1/2] app/testpmd: add test for remote PD and CTX
  2022-03-03 12:57   ` Ferruh Yigit
@ 2022-03-07 16:07     ` Michael Baum
  2022-03-08  9:40       ` Thomas Monjalon
  0 siblings, 1 reply; 16+ messages in thread
From: Michael Baum @ 2022-03-07 16:07 UTC (permalink / raw)
  To: Ferruh Yigit, dev; +Cc: Xiaoyun Li, Aman Singh, Yuying Zhang, Matan Azrad


On 3/3/2022 2:57 PM, Ferruh Yigit wrote: 
> On 3/1/2022 8:26 PM, Michael Baum wrote:
> > Add mlx5 internal option in testpmd run-time function "port attach" to
> > add another parameter named "mlx5_socket" for attaching port and add 2
> > devargs before.
> >
> > The arguments are "cmd_fd" and "pd_handle" using to import device
> > created out of PMD. Testpmd application import it using IPC, and
> > updates the devargs list before attaching.
> >
> > The syntax is:
> >
> >    testpmd > port attach (identifier) mlx5_socket=(path)
> >
> > Where "path" is the IPC socket path agreed on the remote process.
> >
> > Signed-off-by: Michael Baum <michaelba@nvidia.com>
> > Acked-by: Matan Azrad <matan@nvidia.com>
> 
> <...>
> 
> > diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build index
> > 43130c8856..c4fd379e67 100644
> > --- a/app/test-pmd/meson.build
> > +++ b/app/test-pmd/meson.build
> > @@ -73,3 +73,6 @@ endif
> >   if dpdk_conf.has('RTE_NET_DPAA')
> >       deps += ['bus_dpaa', 'mempool_dpaa', 'net_dpaa']
> >   endif
> > +if dpdk_conf.has('RTE_NET_MLX5')
> > +    deps += 'net_mlx5'
> > +endif
> 
> Is this patch introduce any build time dependency to mlx5 driver? If not this
> chunk should go to next patch, which uses mlx5 PMD specific API.

OK

> 
> <...>
> 
> > diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> > b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> > index 1083c6d538..d6490947c4 100644
> > --- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> > +++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> > @@ -2127,6 +2127,46 @@ the mode and slave parameters must be given.
> >      Done
> >
> >
> > +port attach with mlx5 socket path
> > +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> > +
> > +MLX5 internal option to attach a port specified by pci address or
> > +virtual device args and add extra devargs to it, which is imported from external process::
> > +
> > +   testpmd> port attach (identifier) mlx5_socket=(path)
> > +
> > +where:
> > +
> > +* ``identifier``: pci address or virtual device args.
> > +* ``path``: socket path to import arguments agreed by the external process.
> > +
> > +The mlx5 PMD enables to import CTX and PD created outside the PMD.
> > +It gets as devargs the device's ``cmd_fd`` and ``pd_handle``, then
> > +using those arguments to import objects.
> > +See :ref:`mlx5 driver options <mlx5_common_driver_options>` for more information.
> > +
> > +When ``cmd_fd`` and ``pd_handle`` arguments are coming from another
> > +process, the FD must be dup'd before being passed.
> > +In this function, testpmd initializes IPC socket to get FD using SCM_RIGHTS.
> > +It gets the external process socket path, then import the ``cmd_fd``
> > +and ``pd_handle`` arguments and add them to devargs list.
> > +After updating this, it calls the regular ``port attach`` function
> > +with extended idevtifier.
> > +
> > +For example, to attach a port whose pci address is ``0000:0a:00.0``
> > +and its socket path is ``/var/run/import_ipc_socket``.
> > +
> > +.. code-block:: console
> > +
> > +   testpmd> port attach 0000:0a:00.0 mlx5_socket=/var/run/import_ipc_socket
> > +   Attaching a new port...
> > +   testpmd: MLX5 socket path is /var/run/import_ipc_socket
> > +   testpmd: Attach port with extra devargs 0000:0a:00.0,cmd_fd=40,pd_handle=1
> > +   EAL: Probe PCI driver: mlx5_pci (15b3:101d) device: 0000:03:00.0 (socket 0)
> > +   Port 0 is attached. Now total ports is 1
> > +   Done
> > +
> 
> 
> Hi Michael,
> 
> This is too much mlx5 specific addition, and I don't think it is good to extend
> testpmd with PMD specific code.
> If we enable it, sure there will be other vendors willing to do the same,
> making testpmd even messier.

Hi Ferruh,

It is mlx5 PMD specific API, which enables to import device from remote process.
This extension is the way to test this API, you can see a lot of PMD specific APIs along testpmd files. 

If one day, other vendors want to import devargs from remote process, they will remove the mlx5 build time dependency and use it.

> 
> I don't know what those ``cmd_fd`` and ``pd_handle`` (that read from
> provided socket), but can they be read from some other script and feed to
> testpmd, like a python wrapper etc...

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] app/testpmd: add test for remote PD and CTX
  2022-03-07 16:07     ` Michael Baum
@ 2022-03-08  9:40       ` Thomas Monjalon
  0 siblings, 0 replies; 16+ messages in thread
From: Thomas Monjalon @ 2022-03-08  9:40 UTC (permalink / raw)
  To: Ferruh Yigit, Michael Baum
  Cc: dev, Xiaoyun Li, Aman Singh, Yuying Zhang, Matan Azrad, asafp

07/03/2022 17:07, Michael Baum:
> On 3/3/2022 2:57 PM, Ferruh Yigit wrote: 
> > Hi Michael,
> > 
> > This is too much mlx5 specific addition, and I don't think it is good to extend
> > testpmd with PMD specific code.
> > If we enable it, sure there will be other vendors willing to do the same,
> > making testpmd even messier.
> 
> Hi Ferruh,
> 
> It is mlx5 PMD specific API, which enables to import device from remote process.
> This extension is the way to test this API, you can see a lot of PMD specific APIs along testpmd files. 
> 
> If one day, other vendors want to import devargs from remote process, they will remove the mlx5 build time dependency and use it.
> 
> > I don't know what those ``cmd_fd`` and ``pd_handle`` (that read from
> > provided socket), but can they be read from some other script and feed to
> > testpmd, like a python wrapper etc...

I agree with Ferruh that it's a lot of code only for mlx5.
Yes we are already calling other PMD-specific API in testpmd
but we should try to keep it as small as possible.
I propose to try a rework to make it easier to digest.
As a consequence, we won't have this testpmd feature in 22.03,
and we can work together for the next release.



^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v2 0/2] mlx5/testpmd: external RxQ tests
  2022-03-01 20:26 [PATCH 0/2] app/testpmd: external RxQ tests Michael Baum
  2022-03-01 20:26 ` [PATCH 1/2] app/testpmd: add test for remote PD and CTX Michael Baum
  2022-03-01 20:26 ` [PATCH 2/2] app/testpmd: add test for external RxQ Michael Baum
@ 2022-06-16 17:10 ` Michael Baum
  2022-06-16 17:10   ` [PATCH v2 1/2] app/testpmd: add test for remote PD and CTX Michael Baum
                     ` (3 more replies)
  2 siblings, 4 replies; 16+ messages in thread
From: Michael Baum @ 2022-06-16 17:10 UTC (permalink / raw)
  To: dev; +Cc: Matan Azrad, Raslan Darawsheh, Viacheslav Ovsiienko

Recently [1] mlx5 PMD added support for external queues, in the
following patches add internal tests for in Testpmd application.

[1]
https://patchwork.dpdk.org/project/dpdk/cover/20220224232511.3238707-1-michaelba@nvidia.com/

Depends-on: series=112853 ("introduce per-queue available descriptor threshold and host shaper")

v1: Initial commit.
v2: Fix typos in documentation.
    Move mlx5 specific tests to mlx5 library.
    Change socket style SOCK_DGRAM -> SOCK_SEQPACKET.


Michael Baum (2):
  app/testpmd: add test for remote PD and CTX
  app/testpmd: add test for external RxQ

 app/test-pmd/cmdline.c          |  14 +-
 app/test-pmd/testpmd.c          |   5 +
 doc/guides/nics/mlx5.rst        |  63 +++++++
 drivers/net/mlx5/mlx5_testpmd.c | 298 +++++++++++++++++++++++++++++++-
 drivers/net/mlx5/mlx5_testpmd.h |  16 ++
 5 files changed, 392 insertions(+), 4 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v2 1/2] app/testpmd: add test for remote PD and CTX
  2022-06-16 17:10 ` [PATCH v2 0/2] mlx5/testpmd: external RxQ tests Michael Baum
@ 2022-06-16 17:10   ` Michael Baum
  2022-06-16 17:10   ` [PATCH v2 2/2] app/testpmd: add test for external RxQ Michael Baum
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 16+ messages in thread
From: Michael Baum @ 2022-06-16 17:10 UTC (permalink / raw)
  To: dev; +Cc: Matan Azrad, Raslan Darawsheh, Viacheslav Ovsiienko

Add mlx5 internal option in testpmd run-time function "port attach" to
add another parameter named "mlx5_socket" for attaching port and add 2
devargs before.

The arguments are "cmd_fd" and "pd_handle" using to import device
created out of PMD. Testpmd application import it using IPC, and updates
the devargs list before attaching.

The syntax is:

  testpmd > port attach (identifier) mlx5_socket=(path)

Where "path" is the IPC socket path agreed on the remote process.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 app/test-pmd/cmdline.c          |  14 ++-
 app/test-pmd/testpmd.c          |   5 ++
 doc/guides/nics/mlx5.rst        |  44 ++++++++++
 drivers/net/mlx5/mlx5_testpmd.c | 145 ++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_testpmd.h |  16 ++++
 5 files changed, 222 insertions(+), 2 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index a59e6166d5..869ecd3d2a 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -780,6 +780,12 @@ static void cmd_help_long_parsed(void *parsed_result,
 			"port attach (ident)\n"
 			"    Attach physical or virtual dev by pci address or virtual device name\n\n"
 
+#ifdef RTE_NET_MLX5
+			"port attach (ident) mlx5_socket=(path)\n"
+			"    Attach physical or virtual dev by pci address or virtual device name "
+			"and add \"cmd_fd\" and \"pd_handle\" devargs before attaching\n\n"
+#endif
+
 			"port detach (port_id)\n"
 			"    Detach physical or virtual dev by port_id\n\n"
 
@@ -1401,8 +1407,12 @@ static cmdline_parse_token_string_t cmd_operate_attach_port_identifier =
 static cmdline_parse_inst_t cmd_operate_attach_port = {
 	.f = cmd_operate_attach_port_parsed,
 	.data = NULL,
-	.help_str = "port attach <identifier>: "
-		"(identifier: pci address or virtual dev name)",
+	.help_str = "port attach <identifier> mlx5_socket=<path>: "
+		"(identifier: pci address or virtual dev name"
+#ifdef RTE_NET_MLX5
+		", path (optional): socket path to get cmd FD and PD handle"
+#endif
+		")",
 	.tokens = {
 		(void *)&cmd_operate_attach_port_port,
 		(void *)&cmd_operate_attach_port_keyword,
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index e6321bdedb..d2df6732a0 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -3360,6 +3360,11 @@ attach_port(char *identifier)
 		return;
 	}
 
+#if defined(RTE_NET_MLX5) && !defined(RTE_EXEC_ENV_WINDOWS)
+	if (mlx5_test_attach_port_extend_devargs(identifier) < 0)
+		return;
+#endif
+
 	if (rte_dev_probe(identifier) < 0) {
 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
 		return;
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 1b66b2bc33..392292cc95 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -1777,3 +1777,47 @@ the command sets the current shaper to 5Gbps and disables avail_thresh_triggered
 .. code-block:: console
 
    testpmd> mlx5 set port 1 host_shaper avail_thresh_triggered 0 rate 50
+
+
+Testpmd
+-------
+
+port attach with socket path
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Attach a port specified by pci address or virtual device args and add extra
+devargs to it, which is imported from external process::
+
+   testpmd> port attach (identifier) mlx5_socket=(path)
+
+where:
+
+* ``identifier``: pci address or virtual device args.
+* ``path``: socket path to import arguments agreed by the external process.
+
+The mlx5 PMD enables to import CTX and PD created outside the PMD.
+It gets as devargs the device's ``cmd_fd`` and ``pd_handle``,
+then using those arguments to import objects.
+See :ref:`mlx5 driver options <mlx5_common_driver_options>` for more information.
+
+When ``cmd_fd`` and ``pd_handle`` arguments are coming from another process,
+the FD must be dup'd before being passed.
+In this function, testpmd initializes IPC socket to get FD using SCM_RIGHTS.
+It gets the external process socket path, then import the ``cmd_fd`` and
+``pd_handle`` arguments and add them to devargs list.
+After updating this, it calls the regular ``port attach`` function
+with extended identifier.
+
+For example, to attach a port whose pci address is ``0000:0a:00.0`` and its
+socket path is ``/var/run/import_ipc_socket``.
+
+.. code-block:: console
+
+   testpmd> port attach 0000:0a:00.0 mlx5_socket=/var/run/import_ipc_socket
+   Attaching a new port...
+   testpmd: MLX5 socket path is /var/run/import_ipc_socket
+   testpmd: Attach port with extra devargs 0000:0a:00.0,cmd_fd=40,pd_handle=1
+   EAL: Probe PCI driver: mlx5_pci (15b3:101d) device: 0000:0a:00.0 (socket 0)
+   Port 0 is attached. Now total ports is 1
+   Done
+
diff --git a/drivers/net/mlx5/mlx5_testpmd.c b/drivers/net/mlx5/mlx5_testpmd.c
index 98bd395ae0..46444f06e6 100644
--- a/drivers/net/mlx5/mlx5_testpmd.c
+++ b/drivers/net/mlx5/mlx5_testpmd.c
@@ -6,6 +6,11 @@
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
+#include <unistd.h>
+#ifndef RTE_EXEC_ENV_WINDOWS
+#include <sys/socket.h>
+#include <sys/un.h>
+#endif
 
 #include <rte_prefetch.h>
 #include <rte_common.h>
@@ -14,6 +19,7 @@
 #include <rte_alarm.h>
 #include <rte_pmd_mlx5.h>
 #include <rte_ethdev.h>
+
 #include "mlx5_testpmd.h"
 #include "testpmd.h"
 
@@ -111,6 +117,145 @@ mlx5_test_set_port_host_shaper(uint16_t port_id, uint16_t avail_thresh_triggered
 	return 0;
 }
 
+#ifndef RTE_EXEC_ENV_WINDOWS
+static const char*
+mlx5_test_get_socket_path(char *extend)
+{
+	if (strstr(extend, "mlx5_socket=") == extend) {
+		const char *socket_path = strchr(extend, '=') + 1;
+
+		TESTPMD_LOG(DEBUG, "MLX5 socket path is %s\n", socket_path);
+		return socket_path;
+	}
+
+	TESTPMD_LOG(ERR, "Failed to extract a valid socket path from %s\n",
+		    extend);
+	return NULL;
+}
+
+static int
+mlx5_test_extend_devargs(char *identifier, char *extend)
+{
+	struct sockaddr_un un = {
+		.sun_family = AF_UNIX,
+	};
+	int cmd_fd;
+	int pd_handle;
+	struct iovec iov = {
+		.iov_base = &pd_handle,
+		.iov_len = sizeof(int),
+	};
+	union {
+		char buf[CMSG_SPACE(sizeof(int))];
+		struct cmsghdr align;
+	} control;
+	struct msghdr msgh = {
+		.msg_iov = NULL,
+		.msg_iovlen = 0,
+	};
+	struct cmsghdr *cmsg;
+	const char *path = mlx5_test_get_socket_path(extend + 1);
+	size_t len = 1;
+	int socket_fd;
+	int ret;
+
+	if (path == NULL) {
+		TESTPMD_LOG(ERR, "Invalid devargs extension is specified\n");
+		return -1;
+	}
+
+	/* Initialize IPC channel. */
+	socket_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
+	if (socket_fd < 0) {
+		TESTPMD_LOG(ERR, "Failed to create unix socket: %s\n",
+			    strerror(errno));
+		return -1;
+	}
+	rte_strlcpy(un.sun_path, path, sizeof(un.sun_path));
+	if (connect(socket_fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
+		TESTPMD_LOG(ERR, "Failed to connect %s: %s\n", un.sun_path,
+			    strerror(errno));
+		close(socket_fd);
+		return -1;
+	}
+
+	/* Send the request message. */
+	do {
+		ret = sendmsg(socket_fd, &msgh, 0);
+	} while (ret < 0 && errno == EINTR);
+	if (ret < 0) {
+		TESTPMD_LOG(ERR, "Failed to send request to (%s): %s\n", path,
+			    strerror(errno));
+		close(socket_fd);
+		return -1;
+	}
+
+	msgh.msg_iov = &iov;
+	msgh.msg_iovlen = 1;
+	msgh.msg_control = control.buf;
+	msgh.msg_controllen = sizeof(control.buf);
+	do {
+		ret = recvmsg(socket_fd, &msgh, 0);
+	} while (ret < 0);
+	if (ret != sizeof(int) || (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
+		TESTPMD_LOG(ERR, "truncated msg");
+		close(socket_fd);
+		return -1;
+	}
+
+	/* Translate the FD. */
+	cmsg = CMSG_FIRSTHDR(&msgh);
+	if (cmsg == NULL || cmsg->cmsg_len != CMSG_LEN(sizeof(int)) ||
+	    cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
+		TESTPMD_LOG(ERR, "Fail to get FD using SCM_RIGHTS mechanism\n");
+		close(socket_fd);
+		unlink(un.sun_path);
+		return -1;
+	}
+	memcpy(&cmd_fd, CMSG_DATA(cmsg), sizeof(int));
+
+	TESTPMD_LOG(DEBUG, "Command FD (%d) and PD handle (%d) "
+		    "are successfully imported from remote process\n",
+		    cmd_fd, pd_handle);
+
+	/* Cleanup IPC channel. */
+	close(socket_fd);
+
+	/* Calculate the new length of devargs string. */
+	len += snprintf(NULL, 0, ",cmd_fd=%d,pd_handle=%d", cmd_fd, pd_handle);
+	/* Extend the devargs string. */
+	snprintf(extend, len, ",cmd_fd=%d,pd_handle=%d", cmd_fd, pd_handle);
+
+	TESTPMD_LOG(DEBUG, "Attach port with extra devargs %s\n", identifier);
+	return 0;
+}
+
+static bool
+is_delimiter_path_spaces(char *extend)
+{
+	while (*extend != '\0') {
+		if (*extend != ' ')
+			return true;
+		extend++;
+	}
+	return false;
+}
+
+int
+mlx5_test_attach_port_extend_devargs(char *identifier)
+{
+	char *extend = strchr(identifier, ' ');
+
+	if (extend != NULL && is_delimiter_path_spaces(extend) &&
+	    mlx5_test_extend_devargs(identifier, extend) < 0) {
+		TESTPMD_LOG(ERR, "Failed to extend devargs for port %s\n",
+			    identifier);
+		return -1;
+	}
+	return 0;
+}
+#endif
+
 /* *** SET HOST_SHAPER FOR A PORT *** */
 struct cmd_port_host_shaper_result {
 	cmdline_fixed_string_t mlx5;
diff --git a/drivers/net/mlx5/mlx5_testpmd.h b/drivers/net/mlx5/mlx5_testpmd.h
index 7a54658eb5..06976341a4 100644
--- a/drivers/net/mlx5/mlx5_testpmd.h
+++ b/drivers/net/mlx5/mlx5_testpmd.h
@@ -23,4 +23,20 @@
 void
 mlx5_test_avail_thresh_event_handler(uint16_t port_id, uint16_t rxq_id);
 
+/**
+ * Extend devargs list with "cmd_fd" and "pd_handle" coming from external
+ * process. It happens only in this format:
+ *  testpmd> port attach (identifier) mlx5_socket=<socket path>
+ * all "(identifier) mlx5_socket=<socket path>" is in the same string pointed
+ * by the input parameter 'identifier'.
+ *
+ * @param identifier
+ *   Identifier of port attach command line.
+ *
+ * @return
+ *   0 on success, -1 on failure.
+ */
+int
+mlx5_test_attach_port_extend_devargs(char *identifier);
+
 #endif
-- 
2.25.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v2 2/2] app/testpmd: add test for external RxQ
  2022-06-16 17:10 ` [PATCH v2 0/2] mlx5/testpmd: external RxQ tests Michael Baum
  2022-06-16 17:10   ` [PATCH v2 1/2] app/testpmd: add test for remote PD and CTX Michael Baum
@ 2022-06-16 17:10   ` Michael Baum
  2022-06-21  9:27   ` [PATCH v2 0/2] mlx5/testpmd: external RxQ tests Raslan Darawsheh
  2022-06-28 14:58   ` [PATCH v3 0/2] net/mlx5: " Michael Baum
  3 siblings, 0 replies; 16+ messages in thread
From: Michael Baum @ 2022-06-16 17:10 UTC (permalink / raw)
  To: dev; +Cc: Matan Azrad, Raslan Darawsheh, Viacheslav Ovsiienko

Add mlx5 internal test for map and unmap external RxQs.
This patch adds to Testpmd app a runtime function to test the mapping
API.

For insert mapping use this command:

  testpmd> port (port_id) ext_rxq map (rte_queue_id) (hw_queue_id)

For insert mapping use this command:

  testpmd> port (port_id) ext_rxq unmap (rte_queue_id)

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 doc/guides/nics/mlx5.rst        |  19 ++++
 drivers/net/mlx5/mlx5_testpmd.c | 153 +++++++++++++++++++++++++++++++-
 2 files changed, 170 insertions(+), 2 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 392292cc95..9007bfcac9 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -1821,3 +1821,22 @@ socket path is ``/var/run/import_ipc_socket``.
    Port 0 is attached. Now total ports is 1
    Done
 
+
+port map external RxQ
+~~~~~~~~~~~~~~~~~~~~~
+
+API for external RxQ mapping management.
+
+Map HW queue index (32 bit) to rte_flow queue index (16 bit) for external RxQ::
+
+   testpmd> port (port_id) ext_rxq map (rte_queue_id) (hw_queue_id)
+
+Unmap external Rx queue rte_flow index mapping::
+
+   testpmd> port (port_id) ext_rxq unmap (rte_queue_id)
+
+where:
+
+* ``rte_queue_id``: queue index in range [64536, 65535].
+  This range is the highest 1000 numbers represented by 16 bits.
+* ``hw_queue_id``: queue index given by HW in queue creation.
diff --git a/drivers/net/mlx5/mlx5_testpmd.c b/drivers/net/mlx5/mlx5_testpmd.c
index 46444f06e6..7007ee8a2c 100644
--- a/drivers/net/mlx5/mlx5_testpmd.c
+++ b/drivers/net/mlx5/mlx5_testpmd.c
@@ -334,13 +334,162 @@ static cmdline_parse_inst_t mlx5_test_cmd_port_host_shaper = {
 	}
 };
 
+/* Map HW queue index to rte queue index. */
+struct cmd_map_ext_rxq {
+	cmdline_fixed_string_t port;
+	portid_t port_id;
+	cmdline_fixed_string_t ext_rxq;
+	cmdline_fixed_string_t map;
+	uint16_t rte_queue_id;
+	uint32_t hw_queue_id;
+};
+
+cmdline_parse_token_string_t cmd_map_ext_rxq_port =
+	TOKEN_STRING_INITIALIZER(struct cmd_map_ext_rxq, port, "port");
+cmdline_parse_token_num_t cmd_map_ext_rxq_port_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_map_ext_rxq, port_id, RTE_UINT16);
+cmdline_parse_token_string_t cmd_map_ext_rxq_ext_rxq =
+	TOKEN_STRING_INITIALIZER(struct cmd_map_ext_rxq, ext_rxq, "ext_rxq");
+cmdline_parse_token_string_t cmd_map_ext_rxq_map =
+	TOKEN_STRING_INITIALIZER(struct cmd_map_ext_rxq, map, "map");
+cmdline_parse_token_num_t cmd_map_ext_rxq_rte_queue_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_map_ext_rxq, rte_queue_id, RTE_UINT16);
+cmdline_parse_token_num_t cmd_map_ext_rxq_hw_queue_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_map_ext_rxq, hw_queue_id, RTE_UINT32);
+
+static void
+cmd_map_ext_rxq_parsed(void *parsed_result,
+		       __rte_unused struct cmdline *cl,
+		       __rte_unused void *data)
+{
+	struct cmd_map_ext_rxq *res = parsed_result;
+	int ret;
+
+	if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+		return;
+	ret = rte_pmd_mlx5_external_rx_queue_id_map(res->port_id,
+						    res->rte_queue_id,
+						    res->hw_queue_id);
+	switch (ret) {
+	case 0:
+		break;
+	case -EINVAL:
+		fprintf(stderr, "invalid rte_flow index (%u), out of range\n",
+			res->rte_queue_id);
+		break;
+	case -ENODEV:
+		fprintf(stderr, "invalid port_id %u\n", res->port_id);
+		break;
+	case -ENOTSUP:
+		fprintf(stderr, "function not implemented or supported\n");
+		break;
+	case -EEXIST:
+		fprintf(stderr, "mapping with index %u already exists\n",
+			res->rte_queue_id);
+		break;
+	default:
+		fprintf(stderr, "programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_map_ext_rxq = {
+	.f = cmd_map_ext_rxq_parsed,
+	.data = NULL,
+	.help_str = "port <port_id> ext_rxq map <rte_queue_id> <hw_queue_id>",
+	.tokens = {
+		(void *)&cmd_map_ext_rxq_port,
+		(void *)&cmd_map_ext_rxq_port_id,
+		(void *)&cmd_map_ext_rxq_ext_rxq,
+		(void *)&cmd_map_ext_rxq_map,
+		(void *)&cmd_map_ext_rxq_rte_queue_id,
+		(void *)&cmd_map_ext_rxq_hw_queue_id,
+		NULL,
+	}
+};
+
+/* Unmap HW queue index to rte queue index. */
+struct cmd_unmap_ext_rxq {
+	cmdline_fixed_string_t port;
+	portid_t port_id;
+	cmdline_fixed_string_t ext_rxq;
+	cmdline_fixed_string_t unmap;
+	uint16_t queue_id;
+};
+
+cmdline_parse_token_string_t cmd_unmap_ext_rxq_port =
+	TOKEN_STRING_INITIALIZER(struct cmd_unmap_ext_rxq, port, "port");
+cmdline_parse_token_num_t cmd_unmap_ext_rxq_port_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_unmap_ext_rxq, port_id, RTE_UINT16);
+cmdline_parse_token_string_t cmd_unmap_ext_rxq_ext_rxq =
+	TOKEN_STRING_INITIALIZER(struct cmd_unmap_ext_rxq, ext_rxq, "ext_rxq");
+cmdline_parse_token_string_t cmd_unmap_ext_rxq_unmap =
+	TOKEN_STRING_INITIALIZER(struct cmd_unmap_ext_rxq, unmap, "unmap");
+cmdline_parse_token_num_t cmd_unmap_ext_rxq_queue_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_unmap_ext_rxq, queue_id, RTE_UINT16);
+
+static void
+cmd_unmap_ext_rxq_parsed(void *parsed_result,
+			 __rte_unused struct cmdline *cl,
+			 __rte_unused void *data)
+{
+	struct cmd_unmap_ext_rxq *res = parsed_result;
+	int ret;
+
+	if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+		return;
+	ret = rte_pmd_mlx5_external_rx_queue_id_unmap(res->port_id,
+						      res->queue_id);
+	switch (ret) {
+	case 0:
+		break;
+	case -EINVAL:
+		fprintf(stderr, "invalid rte_flow index (%u), "
+			"out of range, doesn't exist or still referenced\n",
+			res->queue_id);
+		break;
+	case -ENODEV:
+		fprintf(stderr, "invalid port_id %u\n", res->port_id);
+		break;
+	case -ENOTSUP:
+		fprintf(stderr, "function not implemented or supported\n");
+		break;
+	default:
+		fprintf(stderr, "programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_unmap_ext_rxq = {
+	.f = cmd_unmap_ext_rxq_parsed,
+	.data = NULL,
+	.help_str = "port <port_id> ext_rxq unmap <queue_id>",
+	.tokens = {
+		(void *)&cmd_unmap_ext_rxq_port,
+		(void *)&cmd_unmap_ext_rxq_port_id,
+		(void *)&cmd_unmap_ext_rxq_ext_rxq,
+		(void *)&cmd_unmap_ext_rxq_unmap,
+		(void *)&cmd_unmap_ext_rxq_queue_id,
+		NULL,
+	}
+};
+
 static struct testpmd_driver_commands mlx5_driver_cmds = {
 	.commands = {
 		{
 			.ctx = &mlx5_test_cmd_port_host_shaper,
 			.help = "mlx5 set port (port_id) host_shaper avail_thresh_triggered (on|off)"
-			"rate (rate_num):\n"
-			"    Set HOST_SHAPER avail_thresh_triggered and rate with port_id\n\n",
+				"rate (rate_num):\n"
+				"    Set HOST_SHAPER avail_thresh_triggered and rate with port_id\n\n",
+		},
+		{
+			.ctx = &cmd_map_ext_rxq,
+			.help = "port (port_id) ext_rxq map (rte_queue_id) (hw_queue_id)\n"
+				"    Map HW queue index (32 bit) to rte_flow"
+				" queue index (16 bit) for external RxQ\n\n",
+		},
+		{
+			.ctx = &cmd_unmap_ext_rxq,
+			.help = "port (port_id) ext_rxq unmap (rte_queue_id)\n"
+				"    Unmap external Rx queue rte_flow index mapping\n\n",
 		},
 		{
 			.ctx = NULL,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [PATCH v2 0/2] mlx5/testpmd: external RxQ tests
  2022-06-16 17:10 ` [PATCH v2 0/2] mlx5/testpmd: external RxQ tests Michael Baum
  2022-06-16 17:10   ` [PATCH v2 1/2] app/testpmd: add test for remote PD and CTX Michael Baum
  2022-06-16 17:10   ` [PATCH v2 2/2] app/testpmd: add test for external RxQ Michael Baum
@ 2022-06-21  9:27   ` Raslan Darawsheh
  2022-06-28 14:58   ` [PATCH v3 0/2] net/mlx5: " Michael Baum
  3 siblings, 0 replies; 16+ messages in thread
From: Raslan Darawsheh @ 2022-06-21  9:27 UTC (permalink / raw)
  To: Michael Baum, dev; +Cc: Matan Azrad, Slava Ovsiienko

Hi,

> -----Original Message-----
> From: Michael Baum <michaelba@nvidia.com>
> Sent: Thursday, June 16, 2022 8:10 PM
> To: dev@dpdk.org
> Cc: Matan Azrad <matan@nvidia.com>; Raslan Darawsheh
> <rasland@nvidia.com>; Slava Ovsiienko <viacheslavo@nvidia.com>
> Subject: [PATCH v2 0/2] mlx5/testpmd: external RxQ tests
> 
> Recently [1] mlx5 PMD added support for external queues, in the following
> patches add internal tests for in Testpmd application.
> 
> [1]
> https://patchwork.dpdk.org/project/dpdk/cover/20220224232511.3238707-
> 1-michaelba@nvidia.com/
> 
> Depends-on: series=112853 ("introduce per-queue available descriptor
> threshold and host shaper")
> 
> v1: Initial commit.
> v2: Fix typos in documentation.
>     Move mlx5 specific tests to mlx5 library.
>     Change socket style SOCK_DGRAM -> SOCK_SEQPACKET.
> 
> 
> Michael Baum (2):
>   app/testpmd: add test for remote PD and CTX
>   app/testpmd: add test for external RxQ
> 
>  app/test-pmd/cmdline.c          |  14 +-
>  app/test-pmd/testpmd.c          |   5 +
>  doc/guides/nics/mlx5.rst        |  63 +++++++
>  drivers/net/mlx5/mlx5_testpmd.c | 298
> +++++++++++++++++++++++++++++++-  drivers/net/mlx5/mlx5_testpmd.h
> |  16 ++
>  5 files changed, 392 insertions(+), 4 deletions(-)
> 
> --
> 2.25.1

Series applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v3 0/2] net/mlx5: external RxQ tests
  2022-06-16 17:10 ` [PATCH v2 0/2] mlx5/testpmd: external RxQ tests Michael Baum
                     ` (2 preceding siblings ...)
  2022-06-21  9:27   ` [PATCH v2 0/2] mlx5/testpmd: external RxQ tests Raslan Darawsheh
@ 2022-06-28 14:58   ` Michael Baum
  2022-06-28 14:58     ` [PATCH v3 1/2] net/mlx5: add test for remote PD and CTX Michael Baum
                       ` (2 more replies)
  3 siblings, 3 replies; 16+ messages in thread
From: Michael Baum @ 2022-06-28 14:58 UTC (permalink / raw)
  To: dev; +Cc: Matan Azrad, Raslan Darawsheh, Viacheslav Ovsiienko

Recently [1] mlx5 PMD added support for external queues, in the
following patches add internal tests for in Testpmd application.

[1]
https://patchwork.dpdk.org/project/dpdk/cover/20220224232511.3238707-1-michaelba@nvidia.com/

v1: Initial commit.
v2: Fix typos in documentation.
    Move mlx5 specific tests to mlx5 library.
    Change socket style SOCK_DGRAM -> SOCK_SEQPACKET.
v3: Move all these tests to mlx5 library.
    Add mlx5 prefix to mlx5 specific tests.
    Improve documentation.

Michael Baum (2):
  net/mlx5: add test for remote PD and CTX
  net/mlx5: add test for external Rx queue

 doc/guides/nics/mlx5.rst                    |  66 ++++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |   2 +
 drivers/net/mlx5/mlx5_testpmd.c             | 383 ++++++++++++++++++++
 3 files changed, 451 insertions(+)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v3 1/2] net/mlx5: add test for remote PD and CTX
  2022-06-28 14:58   ` [PATCH v3 0/2] net/mlx5: " Michael Baum
@ 2022-06-28 14:58     ` Michael Baum
  2022-06-28 14:58     ` [PATCH v3 2/2] net/mlx5: add test for external Rx queue Michael Baum
  2022-06-29  9:06     ` [PATCH v3 0/2] net/mlx5: external RxQ tests Raslan Darawsheh
  2 siblings, 0 replies; 16+ messages in thread
From: Michael Baum @ 2022-06-28 14:58 UTC (permalink / raw)
  To: dev; +Cc: Matan Azrad, Raslan Darawsheh, Viacheslav Ovsiienko, Thomas Monjalon

Add mlx5 internal option in testpmd similar to run-time function
"port attach" which adds another parameter named "socket" for attaching
port and add 2 devargs before.

The arguments are "cmd_fd" and "pd_handle" using to import device
created out of PMD. Testpmd application import it using IPC, and updates
the devargs list before attaching.

These arguments was added in this commit [1].

The syntax is:

  testpmd > mlx5 port attach (identifier) socket=(path)

Where "path" is the IPC socket path agreed on the remote process.

[1]
http://patches.dpdk.org/project/dpdk/patch/20220224232511.3238707-4-michaelba@nvidia.com/

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Reviewed-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 doc/guides/nics/mlx5.rst                    |  47 +++++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |   2 +
 drivers/net/mlx5/mlx5_testpmd.c             | 220 ++++++++++++++++++++
 3 files changed, 269 insertions(+)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index bc2bd2c8a6..cd3a613640 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -1793,3 +1793,50 @@ and disables ``avail_thresh_triggered``.
 .. code-block:: console
 
    testpmd> mlx5 set port 1 host_shaper avail_thresh_triggered 0 rate 50
+
+
+Testpmd
+-------
+
+port attach with socket path
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It is possible to allocate a port with ``libibverbs`` from external application.
+For importing the external port with extra device arguments,
+there is a specific testpmd command
+similar to :ref:`port attach command <port_attach>`::
+
+   testpmd> mlx5 port attach (identifier) socket=(path)
+
+where:
+
+* ``identifier``: device identifier with optional parameters
+  as same as :ref:`port attach command <port_attach>`.
+* ``path``: path to IPC server socket created by the external application.
+
+This command performs:
+
+#. Open IPC client socket using the given path, and connect it.
+
+#. Import ibverbs context and ibverbs protection domain.
+
+#. Add two device arguments for context (``cmd_fd``)
+   and protection domain (``pd_handle``) to the device identifier.
+   See :ref:`mlx5 driver options <mlx5_common_driver_options>` for more
+   information about these device arguments.
+
+#. Call the regular ``port attach`` function with updated identifier.
+
+For example, to attach a port whose PCI address is ``0000:0a:00.0``
+and its socket path is ``/var/run/import_ipc_socket``:
+
+.. code-block:: console
+
+   testpmd> mlx5 port attach 0000:0a:00.0 socket=/var/run/import_ipc_socket
+   testpmd: MLX5 socket path is /var/run/import_ipc_socket
+   testpmd: Attach port with extra devargs 0000:0a:00.0,cmd_fd=40,pd_handle=1
+   Attaching a new port...
+   EAL: Probe PCI driver: mlx5_pci (15b3:101d) device: 0000:0a:00.0 (socket 0)
+   Port 0 is attached. Now total ports is 1
+   Done
+
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index f716ea2797..c0965cd3b9 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -1882,6 +1882,8 @@ The following sections show functions for configuring ports.
 
    Port configuration changes only become active when forwarding is started/restarted.
 
+.. _port_attach:
+
 port attach
 ~~~~~~~~~~~
 
diff --git a/drivers/net/mlx5/mlx5_testpmd.c b/drivers/net/mlx5/mlx5_testpmd.c
index 4f9826496d..463ee8e764 100644
--- a/drivers/net/mlx5/mlx5_testpmd.c
+++ b/drivers/net/mlx5/mlx5_testpmd.c
@@ -6,6 +6,11 @@
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
+#include <unistd.h>
+#ifndef RTE_EXEC_ENV_WINDOWS
+#include <sys/socket.h>
+#include <sys/un.h>
+#endif
 
 #include <rte_prefetch.h>
 #include <rte_common.h>
@@ -14,6 +19,7 @@
 #include <rte_alarm.h>
 #include <rte_pmd_mlx5.h>
 #include <rte_ethdev.h>
+
 #include "mlx5_testpmd.h"
 #include "testpmd.h"
 
@@ -111,6 +117,162 @@ mlx5_test_set_port_host_shaper(uint16_t port_id, uint16_t avail_thresh_triggered
 	return 0;
 }
 
+#ifndef RTE_EXEC_ENV_WINDOWS
+static const char*
+mlx5_test_get_socket_path(char *extend)
+{
+	if (strstr(extend, "socket=") == extend) {
+		const char *socket_path = strchr(extend, '=') + 1;
+
+		TESTPMD_LOG(DEBUG, "MLX5 socket path is %s\n", socket_path);
+		return socket_path;
+	}
+
+	TESTPMD_LOG(ERR, "Failed to extract a valid socket path from %s\n",
+		    extend);
+	return NULL;
+}
+
+static int
+mlx5_test_extend_devargs(char *identifier, char *extend)
+{
+	struct sockaddr_un un = {
+		.sun_family = AF_UNIX,
+	};
+	int cmd_fd;
+	int pd_handle;
+	struct iovec iov = {
+		.iov_base = &pd_handle,
+		.iov_len = sizeof(int),
+	};
+	union {
+		char buf[CMSG_SPACE(sizeof(int))];
+		struct cmsghdr align;
+	} control;
+	struct msghdr msgh = {
+		.msg_iov = NULL,
+		.msg_iovlen = 0,
+	};
+	struct cmsghdr *cmsg;
+	const char *path = mlx5_test_get_socket_path(extend + 1);
+	size_t len = 1;
+	int socket_fd;
+	int ret;
+
+	if (path == NULL) {
+		TESTPMD_LOG(ERR, "Invalid devargs extension is specified\n");
+		return -1;
+	}
+
+	/* Initialize IPC channel. */
+	socket_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
+	if (socket_fd < 0) {
+		TESTPMD_LOG(ERR, "Failed to create unix socket: %s\n",
+			    strerror(errno));
+		return -1;
+	}
+	rte_strlcpy(un.sun_path, path, sizeof(un.sun_path));
+	if (connect(socket_fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
+		TESTPMD_LOG(ERR, "Failed to connect %s: %s\n", un.sun_path,
+			    strerror(errno));
+		close(socket_fd);
+		return -1;
+	}
+
+	/* Send the request message. */
+	do {
+		ret = sendmsg(socket_fd, &msgh, 0);
+	} while (ret < 0 && errno == EINTR);
+	if (ret < 0) {
+		TESTPMD_LOG(ERR, "Failed to send request to (%s): %s\n", path,
+			    strerror(errno));
+		close(socket_fd);
+		return -1;
+	}
+
+	msgh.msg_iov = &iov;
+	msgh.msg_iovlen = 1;
+	msgh.msg_control = control.buf;
+	msgh.msg_controllen = sizeof(control.buf);
+	do {
+		ret = recvmsg(socket_fd, &msgh, 0);
+	} while (ret < 0);
+	if (ret != sizeof(int) || (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
+		TESTPMD_LOG(ERR, "truncated msg");
+		close(socket_fd);
+		return -1;
+	}
+
+	/* Translate the FD. */
+	cmsg = CMSG_FIRSTHDR(&msgh);
+	if (cmsg == NULL || cmsg->cmsg_len != CMSG_LEN(sizeof(int)) ||
+	    cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
+		TESTPMD_LOG(ERR, "Fail to get FD using SCM_RIGHTS mechanism\n");
+		close(socket_fd);
+		unlink(un.sun_path);
+		return -1;
+	}
+	memcpy(&cmd_fd, CMSG_DATA(cmsg), sizeof(int));
+
+	TESTPMD_LOG(DEBUG, "Command FD (%d) and PD handle (%d) "
+		    "are successfully imported from remote process\n",
+		    cmd_fd, pd_handle);
+
+	/* Cleanup IPC channel. */
+	close(socket_fd);
+
+	/* Calculate the new length of devargs string. */
+	len += snprintf(NULL, 0, ",cmd_fd=%d,pd_handle=%d", cmd_fd, pd_handle);
+	/* Extend the devargs string. */
+	snprintf(extend, len, ",cmd_fd=%d,pd_handle=%d", cmd_fd, pd_handle);
+
+	TESTPMD_LOG(DEBUG, "Attach port with extra devargs %s\n", identifier);
+	return 0;
+}
+
+static bool
+is_delimiter_path_spaces(char *extend)
+{
+	while (*extend != '\0') {
+		if (*extend != ' ')
+			return true;
+		extend++;
+	}
+	return false;
+}
+
+/*
+ * Extend devargs list with "cmd_fd" and "pd_handle" coming from external
+ * process. It happens only in this format:
+ *  testpmd> mlx5 port attach (identifier) socket=<socket path>
+ * all "(identifier) socket=<socket path>" is in the same string pointed
+ * by the input parameter 'identifier'.
+ *
+ * @param identifier
+ *   Identifier of port attach command line.
+ */
+static void
+mlx5_test_attach_port_extend_devargs(char *identifier)
+{
+	char *extend;
+
+	if (identifier == NULL) {
+		fprintf(stderr, "Invalid parameters are specified\n");
+		return;
+	}
+
+	extend = strchr(identifier, ' ');
+	if (extend != NULL && is_delimiter_path_spaces(extend) &&
+	    mlx5_test_extend_devargs(identifier, extend) < 0) {
+		TESTPMD_LOG(ERR, "Failed to extend devargs for port %s\n",
+			    identifier);
+		return;
+	}
+
+	attach_port(identifier);
+}
+#endif
+
 /* *** SET HOST_SHAPER FOR A PORT *** */
 struct cmd_port_host_shaper_result {
 	cmdline_fixed_string_t mlx5;
@@ -189,6 +351,56 @@ static cmdline_parse_inst_t mlx5_test_cmd_port_host_shaper = {
 	}
 };
 
+#ifndef RTE_EXEC_ENV_WINDOWS
+/* *** attach a specified port *** */
+struct mlx5_cmd_operate_attach_port_result {
+	cmdline_fixed_string_t mlx5;
+	cmdline_fixed_string_t port;
+	cmdline_fixed_string_t keyword;
+	cmdline_multi_string_t identifier;
+};
+
+static void mlx5_cmd_operate_attach_port_parsed(void *parsed_result,
+						__rte_unused struct cmdline *cl,
+						__rte_unused void *data)
+{
+	struct mlx5_cmd_operate_attach_port_result *res = parsed_result;
+
+	if (!strcmp(res->keyword, "attach"))
+		mlx5_test_attach_port_extend_devargs(res->identifier);
+	else
+		fprintf(stderr, "Unknown parameter\n");
+}
+
+static cmdline_parse_token_string_t mlx5_cmd_operate_attach_port_mlx5 =
+	TOKEN_STRING_INITIALIZER(struct mlx5_cmd_operate_attach_port_result,
+				 mlx5, "mlx5");
+static cmdline_parse_token_string_t mlx5_cmd_operate_attach_port_port =
+	TOKEN_STRING_INITIALIZER(struct mlx5_cmd_operate_attach_port_result,
+				 port, "port");
+static cmdline_parse_token_string_t mlx5_cmd_operate_attach_port_keyword =
+	TOKEN_STRING_INITIALIZER(struct mlx5_cmd_operate_attach_port_result,
+				 keyword, "attach");
+static cmdline_parse_token_string_t mlx5_cmd_operate_attach_port_identifier =
+	TOKEN_STRING_INITIALIZER(struct mlx5_cmd_operate_attach_port_result,
+				 identifier, TOKEN_STRING_MULTI);
+
+static cmdline_parse_inst_t mlx5_cmd_operate_attach_port = {
+	.f = mlx5_cmd_operate_attach_port_parsed,
+	.data = NULL,
+	.help_str = "mlx5 port attach <identifier> socket=<path>: "
+		"(identifier: pci address or virtual dev name"
+		", path (optional): socket path to get cmd FD and PD handle)",
+	.tokens = {
+		(void *)&mlx5_cmd_operate_attach_port_mlx5,
+		(void *)&mlx5_cmd_operate_attach_port_port,
+		(void *)&mlx5_cmd_operate_attach_port_keyword,
+		(void *)&mlx5_cmd_operate_attach_port_identifier,
+		NULL,
+	},
+};
+#endif
+
 static struct testpmd_driver_commands mlx5_driver_cmds = {
 	.commands = {
 		{
@@ -197,6 +409,14 @@ static struct testpmd_driver_commands mlx5_driver_cmds = {
 				"rate (rate_num):\n"
 				"    Set HOST_SHAPER avail_thresh_triggered and rate with port_id\n\n",
 		},
+#ifndef RTE_EXEC_ENV_WINDOWS
+		{
+			.ctx = &mlx5_cmd_operate_attach_port,
+			.help = "mlx5 port attach (ident) socket=(path)\n"
+				"    Attach physical or virtual dev by pci address or virtual device name "
+				"and add \"cmd_fd\" and \"pd_handle\" devargs before attaching\n\n",
+		},
+#endif
 		{
 			.ctx = NULL,
 		},
-- 
2.25.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v3 2/2] net/mlx5: add test for external Rx queue
  2022-06-28 14:58   ` [PATCH v3 0/2] net/mlx5: " Michael Baum
  2022-06-28 14:58     ` [PATCH v3 1/2] net/mlx5: add test for remote PD and CTX Michael Baum
@ 2022-06-28 14:58     ` Michael Baum
  2022-06-29  9:06     ` [PATCH v3 0/2] net/mlx5: external RxQ tests Raslan Darawsheh
  2 siblings, 0 replies; 16+ messages in thread
From: Michael Baum @ 2022-06-28 14:58 UTC (permalink / raw)
  To: dev; +Cc: Matan Azrad, Raslan Darawsheh, Viacheslav Ovsiienko, Thomas Monjalon

Add mlx5 internal test for map and unmap external RxQs.
This patch adds to Testpmd app a runtime function to test the mapping
API.

For insert mapping use this command:

  testpmd> mlx5 port (port_id) ext_rxq map (sw_queue_id) (hw_queue_id)

For insert mapping use this command:

  testpmd> mlx5 port (port_id) ext_rxq unmap (sw_queue_id)

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Reviewed-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 doc/guides/nics/mlx5.rst        |  19 ++++
 drivers/net/mlx5/mlx5_testpmd.c | 163 ++++++++++++++++++++++++++++++++
 2 files changed, 182 insertions(+)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index cd3a613640..9f2832e284 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -1840,3 +1840,22 @@ and its socket path is ``/var/run/import_ipc_socket``:
    Port 0 is attached. Now total ports is 1
    Done
 
+
+port map external Rx queue
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+External Rx queue indexes mapping management.
+
+Map HW queue index (32-bit) to ethdev queue index (16-bit) for external Rx queue::
+
+   testpmd> mlx5 port (port_id) ext_rxq map (sw_queue_id) (hw_queue_id)
+
+Unmap external Rx queue::
+
+   testpmd> mlx5 port (port_id) ext_rxq unmap (sw_queue_id)
+
+where:
+
+* ``sw_queue_id``: queue index in range [64536, 65535].
+  This range is the highest 1000 numbers.
+* ``hw_queue_id``: queue index given by HW in queue creation.
diff --git a/drivers/net/mlx5/mlx5_testpmd.c b/drivers/net/mlx5/mlx5_testpmd.c
index 463ee8e764..ed845834aa 100644
--- a/drivers/net/mlx5/mlx5_testpmd.c
+++ b/drivers/net/mlx5/mlx5_testpmd.c
@@ -401,6 +401,158 @@ static cmdline_parse_inst_t mlx5_cmd_operate_attach_port = {
 };
 #endif
 
+/* Map HW queue index to rte queue index. */
+struct mlx5_cmd_map_ext_rxq {
+	cmdline_fixed_string_t mlx5;
+	cmdline_fixed_string_t port;
+	portid_t port_id;
+	cmdline_fixed_string_t ext_rxq;
+	cmdline_fixed_string_t map;
+	uint16_t sw_queue_id;
+	uint32_t hw_queue_id;
+};
+
+cmdline_parse_token_string_t mlx5_cmd_map_ext_rxq_mlx5 =
+	TOKEN_STRING_INITIALIZER(struct mlx5_cmd_map_ext_rxq, mlx5, "mlx5");
+cmdline_parse_token_string_t mlx5_cmd_map_ext_rxq_port =
+	TOKEN_STRING_INITIALIZER(struct mlx5_cmd_map_ext_rxq, port, "port");
+cmdline_parse_token_num_t mlx5_cmd_map_ext_rxq_port_id =
+	TOKEN_NUM_INITIALIZER(struct mlx5_cmd_map_ext_rxq, port_id, RTE_UINT16);
+cmdline_parse_token_string_t mlx5_cmd_map_ext_rxq_ext_rxq =
+	TOKEN_STRING_INITIALIZER(struct mlx5_cmd_map_ext_rxq, ext_rxq,
+				 "ext_rxq");
+cmdline_parse_token_string_t mlx5_cmd_map_ext_rxq_map =
+	TOKEN_STRING_INITIALIZER(struct mlx5_cmd_map_ext_rxq, map, "map");
+cmdline_parse_token_num_t mlx5_cmd_map_ext_rxq_sw_queue_id =
+	TOKEN_NUM_INITIALIZER(struct mlx5_cmd_map_ext_rxq, sw_queue_id,
+			      RTE_UINT16);
+cmdline_parse_token_num_t mlx5_cmd_map_ext_rxq_hw_queue_id =
+	TOKEN_NUM_INITIALIZER(struct mlx5_cmd_map_ext_rxq, hw_queue_id,
+			      RTE_UINT32);
+
+static void
+mlx5_cmd_map_ext_rxq_parsed(void *parsed_result,
+			    __rte_unused struct cmdline *cl,
+			    __rte_unused void *data)
+{
+	struct mlx5_cmd_map_ext_rxq *res = parsed_result;
+	int ret;
+
+	if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+		return;
+	ret = rte_pmd_mlx5_external_rx_queue_id_map(res->port_id,
+						    res->sw_queue_id,
+						    res->hw_queue_id);
+	switch (ret) {
+	case 0:
+		break;
+	case -EINVAL:
+		fprintf(stderr, "invalid ethdev index (%u), out of range\n",
+			res->sw_queue_id);
+		break;
+	case -ENODEV:
+		fprintf(stderr, "invalid port_id %u\n", res->port_id);
+		break;
+	case -ENOTSUP:
+		fprintf(stderr, "function not implemented or supported\n");
+		break;
+	case -EEXIST:
+		fprintf(stderr, "mapping with index %u already exists\n",
+			res->sw_queue_id);
+		break;
+	default:
+		fprintf(stderr, "programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t mlx5_cmd_map_ext_rxq = {
+	.f = mlx5_cmd_map_ext_rxq_parsed,
+	.data = NULL,
+	.help_str = "mlx5 port <port_id> ext_rxq map <sw_queue_id> <hw_queue_id>",
+	.tokens = {
+		(void *)&mlx5_cmd_map_ext_rxq_mlx5,
+		(void *)&mlx5_cmd_map_ext_rxq_port,
+		(void *)&mlx5_cmd_map_ext_rxq_port_id,
+		(void *)&mlx5_cmd_map_ext_rxq_ext_rxq,
+		(void *)&mlx5_cmd_map_ext_rxq_map,
+		(void *)&mlx5_cmd_map_ext_rxq_sw_queue_id,
+		(void *)&mlx5_cmd_map_ext_rxq_hw_queue_id,
+		NULL,
+	}
+};
+
+/* Unmap HW queue index to rte queue index. */
+struct mlx5_cmd_unmap_ext_rxq {
+	cmdline_fixed_string_t mlx5;
+	cmdline_fixed_string_t port;
+	portid_t port_id;
+	cmdline_fixed_string_t ext_rxq;
+	cmdline_fixed_string_t unmap;
+	uint16_t queue_id;
+};
+
+cmdline_parse_token_string_t mlx5_cmd_unmap_ext_rxq_mlx5 =
+	TOKEN_STRING_INITIALIZER(struct mlx5_cmd_unmap_ext_rxq, mlx5, "mlx5");
+cmdline_parse_token_string_t mlx5_cmd_unmap_ext_rxq_port =
+	TOKEN_STRING_INITIALIZER(struct mlx5_cmd_unmap_ext_rxq, port, "port");
+cmdline_parse_token_num_t mlx5_cmd_unmap_ext_rxq_port_id =
+	TOKEN_NUM_INITIALIZER(struct mlx5_cmd_unmap_ext_rxq, port_id,
+			      RTE_UINT16);
+cmdline_parse_token_string_t mlx5_cmd_unmap_ext_rxq_ext_rxq =
+	TOKEN_STRING_INITIALIZER(struct mlx5_cmd_unmap_ext_rxq, ext_rxq,
+				 "ext_rxq");
+cmdline_parse_token_string_t mlx5_cmd_unmap_ext_rxq_unmap =
+	TOKEN_STRING_INITIALIZER(struct mlx5_cmd_unmap_ext_rxq, unmap, "unmap");
+cmdline_parse_token_num_t mlx5_cmd_unmap_ext_rxq_queue_id =
+	TOKEN_NUM_INITIALIZER(struct mlx5_cmd_unmap_ext_rxq, queue_id,
+			      RTE_UINT16);
+
+static void
+mlx5_cmd_unmap_ext_rxq_parsed(void *parsed_result,
+			      __rte_unused struct cmdline *cl,
+			      __rte_unused void *data)
+{
+	struct mlx5_cmd_unmap_ext_rxq *res = parsed_result;
+	int ret;
+
+	if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+		return;
+	ret = rte_pmd_mlx5_external_rx_queue_id_unmap(res->port_id,
+						      res->queue_id);
+	switch (ret) {
+	case 0:
+		break;
+	case -EINVAL:
+		fprintf(stderr, "invalid rte_flow index (%u), "
+			"out of range, doesn't exist or still referenced\n",
+			res->queue_id);
+		break;
+	case -ENODEV:
+		fprintf(stderr, "invalid port_id %u\n", res->port_id);
+		break;
+	case -ENOTSUP:
+		fprintf(stderr, "function not implemented or supported\n");
+		break;
+	default:
+		fprintf(stderr, "programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t mlx5_cmd_unmap_ext_rxq = {
+	.f = mlx5_cmd_unmap_ext_rxq_parsed,
+	.data = NULL,
+	.help_str = "mlx5 port <port_id> ext_rxq unmap <queue_id>",
+	.tokens = {
+		(void *)&mlx5_cmd_unmap_ext_rxq_mlx5,
+		(void *)&mlx5_cmd_unmap_ext_rxq_port,
+		(void *)&mlx5_cmd_unmap_ext_rxq_port_id,
+		(void *)&mlx5_cmd_unmap_ext_rxq_ext_rxq,
+		(void *)&mlx5_cmd_unmap_ext_rxq_unmap,
+		(void *)&mlx5_cmd_unmap_ext_rxq_queue_id,
+		NULL,
+	}
+};
+
 static struct testpmd_driver_commands mlx5_driver_cmds = {
 	.commands = {
 		{
@@ -417,6 +569,17 @@ static struct testpmd_driver_commands mlx5_driver_cmds = {
 				"and add \"cmd_fd\" and \"pd_handle\" devargs before attaching\n\n",
 		},
 #endif
+		{
+			.ctx = &mlx5_cmd_map_ext_rxq,
+			.help = "mlx5 port (port_id) ext_rxq map (sw_queue_id) (hw_queue_id)\n"
+				"    Map HW queue index (32-bit) to ethdev"
+				" queue index (16-bit) for external RxQ\n\n",
+		},
+		{
+			.ctx = &mlx5_cmd_unmap_ext_rxq,
+			.help = "mlx5 port (port_id) ext_rxq unmap (sw_queue_id)\n"
+				"    Unmap external Rx queue ethdev index mapping\n\n",
+		},
 		{
 			.ctx = NULL,
 		},
-- 
2.25.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [PATCH v3 0/2] net/mlx5: external RxQ tests
  2022-06-28 14:58   ` [PATCH v3 0/2] net/mlx5: " Michael Baum
  2022-06-28 14:58     ` [PATCH v3 1/2] net/mlx5: add test for remote PD and CTX Michael Baum
  2022-06-28 14:58     ` [PATCH v3 2/2] net/mlx5: add test for external Rx queue Michael Baum
@ 2022-06-29  9:06     ` Raslan Darawsheh
  2 siblings, 0 replies; 16+ messages in thread
From: Raslan Darawsheh @ 2022-06-29  9:06 UTC (permalink / raw)
  To: Michael Baum, dev; +Cc: Matan Azrad, Slava Ovsiienko

Hi,

> -----Original Message-----
> From: Michael Baum <michaelba@nvidia.com>
> Sent: Tuesday, June 28, 2022 5:59 PM
> To: dev@dpdk.org
> Cc: Matan Azrad <matan@nvidia.com>; Raslan Darawsheh
> <rasland@nvidia.com>; Slava Ovsiienko <viacheslavo@nvidia.com>
> Subject: [PATCH v3 0/2] net/mlx5: external RxQ tests
> 
> Recently [1] mlx5 PMD added support for external queues, in the following
> patches add internal tests for in Testpmd application.
> 
> [1]
> https://patchwork.dpdk.org/project/dpdk/cover/20220224232511.3238707-
> 1-michaelba@nvidia.com/
> 
> v1: Initial commit.
> v2: Fix typos in documentation.
>     Move mlx5 specific tests to mlx5 library.
>     Change socket style SOCK_DGRAM -> SOCK_SEQPACKET.
> v3: Move all these tests to mlx5 library.
>     Add mlx5 prefix to mlx5 specific tests.
>     Improve documentation.
> 
> Michael Baum (2):
>   net/mlx5: add test for remote PD and CTX
>   net/mlx5: add test for external Rx queue
> 
>  doc/guides/nics/mlx5.rst                    |  66 ++++
>  doc/guides/testpmd_app_ug/testpmd_funcs.rst |   2 +
>  drivers/net/mlx5/mlx5_testpmd.c             | 383 ++++++++++++++++++++
>  3 files changed, 451 insertions(+)
> 
> --
> 2.25.1

Replaced old series with this version:
Series applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2022-06-29  9:06 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-01 20:26 [PATCH 0/2] app/testpmd: external RxQ tests Michael Baum
2022-03-01 20:26 ` [PATCH 1/2] app/testpmd: add test for remote PD and CTX Michael Baum
2022-03-03 12:57   ` Ferruh Yigit
2022-03-07 16:07     ` Michael Baum
2022-03-08  9:40       ` Thomas Monjalon
2022-03-01 20:26 ` [PATCH 2/2] app/testpmd: add test for external RxQ Michael Baum
2022-03-03 13:02   ` Ferruh Yigit
2022-03-07 15:51     ` Michael Baum
2022-06-16 17:10 ` [PATCH v2 0/2] mlx5/testpmd: external RxQ tests Michael Baum
2022-06-16 17:10   ` [PATCH v2 1/2] app/testpmd: add test for remote PD and CTX Michael Baum
2022-06-16 17:10   ` [PATCH v2 2/2] app/testpmd: add test for external RxQ Michael Baum
2022-06-21  9:27   ` [PATCH v2 0/2] mlx5/testpmd: external RxQ tests Raslan Darawsheh
2022-06-28 14:58   ` [PATCH v3 0/2] net/mlx5: " Michael Baum
2022-06-28 14:58     ` [PATCH v3 1/2] net/mlx5: add test for remote PD and CTX Michael Baum
2022-06-28 14:58     ` [PATCH v3 2/2] net/mlx5: add test for external Rx queue Michael Baum
2022-06-29  9:06     ` [PATCH v3 0/2] net/mlx5: external RxQ tests Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).