DPDK patches and discussions
 help / color / mirror / Atom feed
From: <michaelba@nvidia.com>
To: <dev@dpdk.org>
Cc: Matan Azrad <matan@nvidia.com>,
	Thomas Monjalon <thomas@monjalon.net>,
	Michael Baum <michaelba@oss.nvidia.com>
Subject: [dpdk-dev] [PATCH 05/18] net/mlx5/windows: rearrange probing code
Date: Thu, 30 Sep 2021 20:28:09 +0300	[thread overview]
Message-ID: <20210930172822.1949969-6-michaelba@nvidia.com> (raw)
In-Reply-To: <20210930172822.1949969-1-michaelba@nvidia.com>

From: Michael Baum <michaelba@oss.nvidia.com>

Rearrange device detection code.
Rearrange configuration structures filling.
Remove unneeded variables.

Signed-off-by: Michael Baum <michaelba@oss.nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/windows/mlx5_os.c | 224 ++++++++++++-----------------
 1 file changed, 94 insertions(+), 130 deletions(-)

diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index ea2d45bbce..90e21479e3 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -956,9 +956,9 @@ mlx5_match_devx_devices_to_addr(struct devx_device_bdf *devx_bdf,
 
 	if (mlx5_match_devx_bdf_to_addr(devx_bdf, addr))
 		return 1;
-	/**
-	 * Didn't match on Native/PF BDF, could still
-	 * Match a VF BDF, check it next
+	/*
+	 * Didn't match on Native/PF BDF, could still match a VF BDF,
+	 * check it next.
 	 */
 	err = mlx5_glue->query_device(devx_bdf, &mlx5_dev);
 	if (err) {
@@ -971,6 +971,52 @@ mlx5_match_devx_devices_to_addr(struct devx_device_bdf *devx_bdf,
 	return 0;
 }
 
+/**
+ * Look for DevX device that match to given rte_device.
+ *
+ * @param dev
+ *   Pointer to the generic device.
+ * @param orig_devx_list
+ *   Pointer to head of DevX devices list.
+ * @param n
+ *   Number of devices in given DevX devices list.
+ *
+ * @return
+ *   A device match on success, NULL otherwise and rte_errno is set.
+ */
+static struct devx_device_bdf *
+mlx5_os_get_devx_device(struct rte_device *dev,
+			struct devx_device_bdf *orig_devx_list, int n)
+{
+	struct devx_device_bdf *devx_list = orig_devx_list;
+	struct devx_device_bdf *devx_match = NULL;
+	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
+	struct rte_pci_addr *addr = &pci_dev->addr;
+
+	while (n-- > 0) {
+		int ret = mlx5_match_devx_devices_to_addr(devx_list, addr);
+		if (!ret) {
+			devx_list++;
+			continue;
+		}
+		if (ret != 1) {
+			rte_errno = ret;
+			return NULL;
+		}
+		devx_match = devx_list;
+		break;
+	}
+	if (devx_match == NULL) {
+		/* No device matches, just complain and bail out. */
+		DRV_LOG(WARNING,
+			"No DevX device matches PCI device " PCI_PRI_FMT ","
+			" is DevX Configured?",
+			addr->domain, addr->bus, addr->devid, addr->function);
+		rte_errno = ENOENT;
+	}
+	return devx_match;
+}
+
 /**
  * DPDK callback to register a PCI device.
  *
@@ -986,37 +1032,34 @@ int
 mlx5_os_net_probe(struct mlx5_common_device *cdev)
 {
 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
-	struct devx_device_bdf *devx_bdf_devs, *orig_devx_bdf_devs;
-	/*
-	 * Number of found IB Devices matching with requested PCI BDF.
-	 * nd != 1 means there are multiple IB devices over the same
-	 * PCI device and we have representors and master.
-	 */
-	unsigned int nd = 0;
-	/*
-	 * Number of found IB device Ports. nd = 1 and np = 1..n means
-	 * we have the single multiport IB device, and there may be
-	 * representors attached to some of found ports.
-	 * Currently not supported.
-	 * unsigned int np = 0;
-	 */
-
-	/*
-	 * Number of DPDK ethernet devices to Spawn - either over
-	 * multiple IB devices or multiple ports of single IB device.
-	 * Actually this is the number of iterations to spawn.
-	 */
-	unsigned int ns = 0;
-	/*
-	 * Bonding device
-	 *   < 0 - no bonding device (single one)
-	 *  >= 0 - bonding device (value is slave PF index)
-	 */
-	int bd = -1;
-	struct mlx5_dev_spawn_data *list = NULL;
-	struct mlx5_dev_config dev_config;
-	unsigned int dev_config_vf;
-	int ret, err;
+	struct devx_device_bdf *devx_list;
+	struct devx_device_bdf *devx_bdf_match;
+	struct mlx5_dev_spawn_data spawn = {
+		.pf_bond = -1,
+		.max_port = 1,
+		.phys_port = 1,
+		.pci_dev = pci_dev,
+		.cdev = cdev,
+		.ifindex = -1, /* Spawn will assign */
+		.info = (struct mlx5_switch_info){
+			.name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK,
+		},
+	};
+	struct mlx5_dev_config dev_config = {
+		.rx_vec_en = 1,
+		.txq_inline_max = MLX5_ARG_UNSET,
+		.txq_inline_min = MLX5_ARG_UNSET,
+		.txq_inline_mpw = MLX5_ARG_UNSET,
+		.txqs_inline = MLX5_ARG_UNSET,
+		.mprq = {
+			.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
+			.min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
+		},
+		.dv_flow_en = 1,
+		.log_hp_size = MLX5_ARG_UNSET,
+	};
+	int ret;
+	int n;
 	uint32_t restore;
 
 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
@@ -1030,74 +1073,18 @@ mlx5_os_net_probe(struct mlx5_common_device *cdev)
 		return -rte_errno;
 	}
 	errno = 0;
-	devx_bdf_devs = mlx5_glue->get_device_list(&ret);
-	orig_devx_bdf_devs = devx_bdf_devs;
-	if (!devx_bdf_devs) {
+	devx_list = mlx5_glue->get_device_list(&n);
+	if (devx_list == NULL) {
 		rte_errno = errno ? errno : ENOSYS;
-		DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
+		DRV_LOG(ERR, "Cannot list devices, is DevX enabled?");
 		return -rte_errno;
 	}
-	/*
-	 * First scan the list of all Infiniband devices to find
-	 * matching ones, gathering into the list.
-	 */
-	struct devx_device_bdf *devx_bdf_match[ret + 1];
-
-	while (ret-- > 0) {
-		err = mlx5_match_devx_devices_to_addr(devx_bdf_devs,
-		    &pci_dev->addr);
-		if (!err) {
-			devx_bdf_devs++;
-			continue;
-		}
-		if (err != 1) {
-			ret = -err;
-			goto exit;
-		}
-		devx_bdf_match[nd++] = devx_bdf_devs;
-	}
-	devx_bdf_match[nd] = NULL;
-	if (!nd) {
-		/* No device matches, just complain and bail out. */
-		DRV_LOG(WARNING,
-			"no DevX device matches PCI device " PCI_PRI_FMT ","
-			" is DevX Configured?",
-			pci_dev->addr.domain, pci_dev->addr.bus,
-			pci_dev->addr.devid, pci_dev->addr.function);
-		rte_errno = ENOENT;
+	devx_bdf_match = mlx5_os_get_devx_device(cdev->dev, devx_list, n);
+	if (devx_bdf_match == NULL) {
 		ret = -rte_errno;
 		goto exit;
 	}
-	/*
-	 * Now we can determine the maximal
-	 * amount of devices to be spawned.
-	 */
-	list = mlx5_malloc(MLX5_MEM_ZERO,
-			   sizeof(struct mlx5_dev_spawn_data),
-			   RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
-	if (!list) {
-		DRV_LOG(ERR, "spawn data array allocation failure");
-		rte_errno = ENOMEM;
-		ret = -rte_errno;
-		goto exit;
-	}
-	memset(&list[ns].info, 0, sizeof(list[ns].info));
-	list[ns].max_port = 1;
-	list[ns].phys_port = 1;
-	list[ns].phys_dev = devx_bdf_match[ns];
-	list[ns].eth_dev = NULL;
-	list[ns].pci_dev = pci_dev;
-	list[ns].cdev = cdev;
-	list[ns].pf_bond = bd;
-	list[ns].ifindex = -1; /* Spawn will assign */
-	list[ns].info =
-		(struct mlx5_switch_info){
-			.master = 0,
-			.representor = 0,
-			.name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK,
-			.port_name = 0,
-			.switch_id = 0,
-		};
+	spawn.phys_dev = devx_bdf_match;
 	/* Device specific configuration. */
 	switch (pci_dev->id.device_id) {
 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
@@ -1107,47 +1094,24 @@ mlx5_os_net_probe(struct mlx5_common_device *cdev)
 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
 	case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
 	case PCI_DEVICE_ID_MELLANOX_CONNECTXVF:
-		dev_config_vf = 1;
+		dev_config.vf = 1;
 		break;
 	default:
-		dev_config_vf = 0;
+		dev_config.vf = 0;
 		break;
 	}
-	/* Default configuration. */
-	memset(&dev_config, 0, sizeof(struct mlx5_dev_config));
-	dev_config.vf = dev_config_vf;
-	dev_config.mps = 0;
-	dev_config.rx_vec_en = 1;
-	dev_config.txq_inline_max = MLX5_ARG_UNSET;
-	dev_config.txq_inline_min = MLX5_ARG_UNSET;
-	dev_config.txq_inline_mpw = MLX5_ARG_UNSET;
-	dev_config.txqs_inline = MLX5_ARG_UNSET;
-	dev_config.vf_nl_en = 0;
-	dev_config.mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
-	dev_config.mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
-	dev_config.dv_esw_en = 0;
-	dev_config.dv_flow_en = 1;
-	dev_config.decap_en = 0;
-	dev_config.log_hp_size = MLX5_ARG_UNSET;
-	list[ns].eth_dev = mlx5_dev_spawn(cdev->dev, &list[ns], &dev_config);
-	if (!list[ns].eth_dev)
+	spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn, &dev_config);
+	if (!spawn.eth_dev) {
+		ret = -rte_errno;
 		goto exit;
-	restore = list[ns].eth_dev->data->dev_flags;
-	rte_eth_copy_pci_info(list[ns].eth_dev, pci_dev);
+	}
+	restore = spawn.eth_dev->data->dev_flags;
+	rte_eth_copy_pci_info(spawn.eth_dev, pci_dev);
 	/* Restore non-PCI flags cleared by the above call. */
-	list[ns].eth_dev->data->dev_flags |= restore;
-	rte_eth_dev_probing_finish(list[ns].eth_dev);
-	ret = 0;
+	spawn.eth_dev->data->dev_flags |= restore;
+	rte_eth_dev_probing_finish(spawn.eth_dev);
 exit:
-	/*
-	 * Do the routine cleanup:
-	 * - free allocated spawn data array
-	 * - free the device list
-	 */
-	if (list)
-		mlx5_free(list);
-	MLX5_ASSERT(orig_devx_bdf_devs);
-	mlx5_glue->free_device_list(orig_devx_bdf_devs);
+	mlx5_glue->free_device_list(devx_list);
 	return ret;
 }
 
-- 
2.25.1


  parent reply	other threads:[~2021-09-30 17:40 UTC|newest]

Thread overview: 60+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-30 17:28 [dpdk-dev] [PATCH 00/18] mlx5: sharing global MR cache between drivers michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 01/18] net/mlx5/windows: fix miss callback register for mem event michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 02/18] common/mlx5: share basic probing with the internal drivers michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 03/18] common/mlx5: share common definitions michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 04/18] common/mlx5: share memory related devargs michaelba
2021-09-30 17:28 ` michaelba [this message]
2021-09-30 17:28 ` [dpdk-dev] [PATCH 06/18] common/mlx5: move basic probing functions to common michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 07/18] net/mlx5: remove redundant flag in device config michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 08/18] common/mlx5: share device context object michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 09/18] common/mlx5: add ROCE disable in context device creation michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 10/18] common/mlx5: share the protection domain object michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 11/18] common/mlx5: share the HCA capabilities handle michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 12/18] net/mlx5: remove redundancy in MR file michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 13/18] common/mlx5: add MR ctrl init function michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 14/18] common/mlx5: add global MR cache create function michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 15/18] common/mlx5: share MR top-half search function michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 16/18] common/mlx5: share MR management michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 17/18] common/mlx5: support device DMA map and unmap michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 18/18] common/mlx5: share MR mempool registration michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 00/18] mlx5: sharing global MR cache between drivers michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 01/18] net/mlx5/windows: fix miss callback register for mem event michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 02/18] common/mlx5: share basic probing with the internal drivers michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 03/18] common/mlx5: share common definitions michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 04/18] common/mlx5: share memory related devargs michaelba
2021-10-19 16:54     ` Thomas Monjalon
2021-10-19 20:49       ` Michael Baum
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 05/18] net/mlx5/windows: rearrange probing code michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 06/18] common/mlx5: move basic probing functions to common michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 07/18] net/mlx5: remove redundant flag in device config michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 08/18] common/mlx5: share device context object michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 09/18] common/mlx5: add ROCE disable in context device creation michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 10/18] common/mlx5: share the protection domain object michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 11/18] common/mlx5: share the HCA capabilities handle michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 12/18] net/mlx5: remove redundancy in MR file michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 13/18] common/mlx5: add MR ctrl init function michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 14/18] common/mlx5: add global MR cache create function michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 15/18] common/mlx5: share MR top-half search function michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 16/18] common/mlx5: share MR management michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 17/18] common/mlx5: support device DMA map and unmap michaelba
2021-10-06 22:03   ` [dpdk-dev] [PATCH v2 18/18] common/mlx5: share MR mempool registration michaelba
2021-10-19 20:55   ` [dpdk-dev] [PATCH v3 00/18] mlx5: sharing global MR cache between drivers michaelba
2021-10-19 20:55     ` [dpdk-dev] [PATCH v3 01/18] net/mlx5/windows: fix miss callback register for mem event michaelba
2021-10-19 20:55     ` [dpdk-dev] [PATCH v3 02/18] common/mlx5: share basic probing with the internal drivers michaelba
2021-10-19 20:55     ` [dpdk-dev] [PATCH v3 03/18] common/mlx5: share common definitions michaelba
2021-10-19 20:55     ` [dpdk-dev] [PATCH v3 04/18] common/mlx5: share memory related devargs michaelba
2021-10-19 20:55     ` [dpdk-dev] [PATCH v3 05/18] net/mlx5/windows: rearrange probing code michaelba
2021-10-19 20:55     ` [dpdk-dev] [PATCH v3 06/18] common/mlx5: move basic probing functions to common michaelba
2021-10-19 20:55     ` [dpdk-dev] [PATCH v3 07/18] net/mlx5: remove redundant flag in device config michaelba
2021-10-19 20:55     ` [dpdk-dev] [PATCH v3 08/18] common/mlx5: share device context object michaelba
2021-10-19 20:55     ` [dpdk-dev] [PATCH v3 09/18] common/mlx5: add ROCE disable in context device creation michaelba
2021-10-19 20:55     ` [dpdk-dev] [PATCH v3 10/18] common/mlx5: share the protection domain object michaelba
2021-10-19 20:55     ` [dpdk-dev] [PATCH v3 11/18] common/mlx5: share the HCA capabilities handle michaelba
2021-10-19 20:55     ` [dpdk-dev] [PATCH v3 12/18] net/mlx5: remove redundancy in MR file michaelba
2021-10-19 20:55     ` [dpdk-dev] [PATCH v3 13/18] common/mlx5: add MR ctrl init function michaelba
2021-10-19 20:55     ` [dpdk-dev] [PATCH v3 14/18] common/mlx5: add global MR cache create function michaelba
2021-10-19 20:55     ` [dpdk-dev] [PATCH v3 15/18] common/mlx5: share MR top-half search function michaelba
2021-10-19 20:56     ` [dpdk-dev] [PATCH v3 16/18] common/mlx5: share MR management michaelba
2021-10-19 20:56     ` [dpdk-dev] [PATCH v3 17/18] common/mlx5: support device DMA map and unmap michaelba
2021-10-19 20:56     ` [dpdk-dev] [PATCH v3 18/18] common/mlx5: share MR mempool registration michaelba
2021-10-21 14:26     ` [dpdk-dev] [PATCH v3 00/18] mlx5: sharing global MR cache between drivers Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210930172822.1949969-6-michaelba@nvidia.com \
    --to=michaelba@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=michaelba@oss.nvidia.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).