DPDK patches and discussions
 help / color / mirror / Atom feed
From: Michael Baum <michaelba@nvidia.com>
To: <dev@dpdk.org>
Cc: Matan Azrad <matan@nvidia.com>,
	Raslan Darawsheh <rasland@nvidia.com>,
	Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [PATCH 14/20] net/mlx5: rearrange device attribute structure
Date: Thu, 27 Jan 2022 17:39:44 +0200	[thread overview]
Message-ID: <20220127153950.812953-15-michaelba@nvidia.com> (raw)
In-Reply-To: <20220127153950.812953-1-michaelba@nvidia.com>

Rearrange the mlx5_os_get_dev_attr() function in such a way that it
first executes the queries and only then updates the fields.
In addition, it changed its name in preparation for expanding its
operations to configure the capabilities inside it.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c    | 122 +++++++++++++---------------
 drivers/net/mlx5/linux/mlx5_verbs.c |   5 +-
 drivers/net/mlx5/mlx5.c             |   4 +-
 drivers/net/mlx5/mlx5.h             |  56 ++++++-------
 drivers/net/mlx5/mlx5_devx.c        |   2 +-
 drivers/net/mlx5/mlx5_ethdev.c      |   5 +-
 drivers/net/mlx5/mlx5_trigger.c     |   8 +-
 drivers/net/mlx5/mlx5_txq.c         |  18 ++--
 drivers/net/mlx5/windows/mlx5_os.c  |  67 ++++++---------
 9 files changed, 127 insertions(+), 160 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 47b088db83..b6848fc34c 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -131,46 +131,25 @@ mlx5_os_set_nonblock_channel_fd(int fd)
  * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
  * device attributes from the glue out parameter.
  *
- * @param cdev
- *   Pointer to mlx5 device.
- *
- * @param device_attr
- *   Pointer to mlx5 device attributes.
+ * @param sh
+ *   Pointer to shared device context.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
-		     struct mlx5_dev_attr *device_attr)
+mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh)
 {
 	int err;
-	struct ibv_context *ctx = cdev->ctx;
+	struct ibv_context *ctx = sh->cdev->ctx;
 	struct ibv_device_attr_ex attr_ex;
+	struct mlx5dv_context dv_attr = { .comp_mask = 0 };
 
-	memset(device_attr, 0, sizeof(*device_attr));
 	err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
 	if (err) {
 		rte_errno = errno;
 		return -rte_errno;
 	}
-	device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex;
-	device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr;
-	device_attr->max_sge = attr_ex.orig_attr.max_sge;
-	device_attr->max_cq = attr_ex.orig_attr.max_cq;
-	device_attr->max_cqe = attr_ex.orig_attr.max_cqe;
-	device_attr->max_mr = attr_ex.orig_attr.max_mr;
-	device_attr->max_pd = attr_ex.orig_attr.max_pd;
-	device_attr->max_qp = attr_ex.orig_attr.max_qp;
-	device_attr->max_srq = attr_ex.orig_attr.max_srq;
-	device_attr->max_srq_wr = attr_ex.orig_attr.max_srq_wr;
-	device_attr->raw_packet_caps = attr_ex.raw_packet_caps;
-	device_attr->max_rwq_indirection_table_size =
-		attr_ex.rss_caps.max_rwq_indirection_table_size;
-	device_attr->max_tso = attr_ex.tso_caps.max_tso;
-	device_attr->tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
-
-	struct mlx5dv_context dv_attr = { .comp_mask = 0 };
 #ifdef HAVE_IBV_MLX5_MOD_SWP
 	dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
 #endif
@@ -185,31 +164,40 @@ mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
 		rte_errno = errno;
 		return -rte_errno;
 	}
-
-	device_attr->flags = dv_attr.flags;
-	device_attr->comp_mask = dv_attr.comp_mask;
+	memset(&sh->dev_cap, 0, sizeof(struct mlx5_dev_cap));
+	sh->dev_cap.device_cap_flags_ex = attr_ex.device_cap_flags_ex;
+	sh->dev_cap.max_qp_wr = attr_ex.orig_attr.max_qp_wr;
+	sh->dev_cap.max_sge = attr_ex.orig_attr.max_sge;
+	sh->dev_cap.max_cq = attr_ex.orig_attr.max_cq;
+	sh->dev_cap.max_qp = attr_ex.orig_attr.max_qp;
+	sh->dev_cap.raw_packet_caps = attr_ex.raw_packet_caps;
+	sh->dev_cap.max_rwq_indirection_table_size =
+		attr_ex.rss_caps.max_rwq_indirection_table_size;
+	sh->dev_cap.max_tso = attr_ex.tso_caps.max_tso;
+	sh->dev_cap.tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
+	strlcpy(sh->dev_cap.fw_ver, attr_ex.orig_attr.fw_ver,
+		sizeof(sh->dev_cap.fw_ver));
+	sh->dev_cap.flags = dv_attr.flags;
+	sh->dev_cap.comp_mask = dv_attr.comp_mask;
 #ifdef HAVE_IBV_MLX5_MOD_SWP
-	device_attr->sw_parsing_offloads =
+	sh->dev_cap.sw_parsing_offloads =
 		dv_attr.sw_parsing_caps.sw_parsing_offloads;
 #endif
 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
-	device_attr->min_single_stride_log_num_of_bytes =
+	sh->dev_cap.min_single_stride_log_num_of_bytes =
 		dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes;
-	device_attr->max_single_stride_log_num_of_bytes =
+	sh->dev_cap.max_single_stride_log_num_of_bytes =
 		dv_attr.striding_rq_caps.max_single_stride_log_num_of_bytes;
-	device_attr->min_single_wqe_log_num_of_strides =
+	sh->dev_cap.min_single_wqe_log_num_of_strides =
 		dv_attr.striding_rq_caps.min_single_wqe_log_num_of_strides;
-	device_attr->max_single_wqe_log_num_of_strides =
+	sh->dev_cap.max_single_wqe_log_num_of_strides =
 		dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides;
-	device_attr->stride_supported_qpts =
+	sh->dev_cap.stride_supported_qpts =
 		dv_attr.striding_rq_caps.supported_qpts;
 #endif
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-	device_attr->tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
+	sh->dev_cap.tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
 #endif
-	strlcpy(device_attr->fw_ver, attr_ex.orig_attr.fw_ver,
-		sizeof(device_attr->fw_ver));
-
 	return 0;
 }
 
@@ -983,8 +971,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 * Multi-packet send is supported by ConnectX-4 Lx PF as well
 	 * as all ConnectX-5 devices.
 	 */
-	if (sh->device_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
-		if (sh->device_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
+	if (sh->dev_cap.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
+		if (sh->dev_cap.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
 			DRV_LOG(DEBUG, "enhanced MPW is supported");
 			mps = MLX5_MPW_ENHANCED;
 		} else {
@@ -996,41 +984,41 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		mps = MLX5_MPW_DISABLED;
 	}
 #ifdef HAVE_IBV_MLX5_MOD_SWP
-	if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
-		swp = sh->device_attr.sw_parsing_offloads;
+	if (sh->dev_cap.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
+		swp = sh->dev_cap.sw_parsing_offloads;
 	DRV_LOG(DEBUG, "SWP support: %u", swp);
 #endif
 	config->swp = swp & (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
 		MLX5_SW_PARSING_TSO_CAP);
 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
-	if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
+	if (sh->dev_cap.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
 		DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
-			sh->device_attr.min_single_stride_log_num_of_bytes);
+			sh->dev_cap.min_single_stride_log_num_of_bytes);
 		DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
-			sh->device_attr.max_single_stride_log_num_of_bytes);
+			sh->dev_cap.max_single_stride_log_num_of_bytes);
 		DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
-			sh->device_attr.min_single_wqe_log_num_of_strides);
+			sh->dev_cap.min_single_wqe_log_num_of_strides);
 		DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
-			sh->device_attr.max_single_wqe_log_num_of_strides);
+			sh->dev_cap.max_single_wqe_log_num_of_strides);
 		DRV_LOG(DEBUG, "\tsupported_qpts: %d",
-			sh->device_attr.stride_supported_qpts);
+			sh->dev_cap.stride_supported_qpts);
 		DRV_LOG(DEBUG, "\tmin_stride_wqe_log_size: %d",
 			config->mprq.log_min_stride_wqe_size);
 		DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
 		mprq = 1;
 		config->mprq.log_min_stride_size =
-			sh->device_attr.min_single_stride_log_num_of_bytes;
+			sh->dev_cap.min_single_stride_log_num_of_bytes;
 		config->mprq.log_max_stride_size =
-			sh->device_attr.max_single_stride_log_num_of_bytes;
+			sh->dev_cap.max_single_stride_log_num_of_bytes;
 		config->mprq.log_min_stride_num =
-			sh->device_attr.min_single_wqe_log_num_of_strides;
+			sh->dev_cap.min_single_wqe_log_num_of_strides;
 		config->mprq.log_max_stride_num =
-			sh->device_attr.max_single_wqe_log_num_of_strides;
+			sh->dev_cap.max_single_wqe_log_num_of_strides;
 	}
 #endif
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-	if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
-		config->tunnel_en = sh->device_attr.tunnel_offloads_caps &
+	if (sh->dev_cap.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
+		config->tunnel_en = sh->dev_cap.tunnel_offloads_caps &
 			     (MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN |
 			      MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE |
 			      MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE);
@@ -1052,9 +1040,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		"tunnel offloading disabled due to old OFED/rdma-core version");
 #endif
 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
-	mpls_en = ((sh->device_attr.tunnel_offloads_caps &
+	mpls_en = ((sh->dev_cap.tunnel_offloads_caps &
 		    MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
-		   (sh->device_attr.tunnel_offloads_caps &
+		   (sh->dev_cap.tunnel_offloads_caps &
 		    MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
 	DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
 		mpls_en ? "" : "not ");
@@ -1215,7 +1203,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		DRV_LOG(DEBUG, "dev_port-%u new domain_id=%u\n",
 			priv->dev_port, priv->domain_id);
 	}
-	config->hw_csum = !!(sh->device_attr.device_cap_flags_ex &
+	config->hw_csum = !!(sh->dev_cap.device_cap_flags_ex &
 			    IBV_DEVICE_RAW_IP_CSUM);
 	DRV_LOG(DEBUG, "checksum offloading is %ssupported",
 		(config->hw_csum ? "" : "not "));
@@ -1224,7 +1212,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	DRV_LOG(DEBUG, "counters are not supported");
 #endif
 	config->ind_table_max_size =
-		sh->device_attr.max_rwq_indirection_table_size;
+		sh->dev_cap.max_rwq_indirection_table_size;
 	/*
 	 * Remove this check once DPDK supports larger/variable
 	 * indirection tables.
@@ -1233,16 +1221,16 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
 	DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
 		config->ind_table_max_size);
-	config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
+	config->hw_vlan_strip = !!(sh->dev_cap.raw_packet_caps &
 				  IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
 	DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
 		(config->hw_vlan_strip ? "" : "not "));
-	config->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
+	config->hw_fcs_strip = !!(sh->dev_cap.raw_packet_caps &
 				 IBV_RAW_PACKET_CAP_SCATTER_FCS);
 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
-	hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
+	hw_padding = !!sh->dev_cap.rx_pad_end_addr_align;
 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
-	hw_padding = !!(sh->device_attr.device_cap_flags_ex &
+	hw_padding = !!(sh->dev_cap.device_cap_flags_ex &
 			IBV_DEVICE_PCI_WRITE_END_PADDING);
 #endif
 	if (config->hw_padding && !hw_padding) {
@@ -1251,11 +1239,11 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	} else if (config->hw_padding) {
 		DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
 	}
-	config->tso = (sh->device_attr.max_tso > 0 &&
-		      (sh->device_attr.tso_supported_qpts &
+	config->tso = (sh->dev_cap.max_tso > 0 &&
+		      (sh->dev_cap.tso_supported_qpts &
 		       (1 << IBV_QPT_RAW_PACKET)));
 	if (config->tso)
-		config->tso_max_payload_sz = sh->device_attr.max_tso;
+		config->tso_max_payload_sz = sh->dev_cap.max_tso;
 	/*
 	 * MPW is disabled by default, while the Enhanced MPW is enabled
 	 * by default.
@@ -1382,7 +1370,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 #endif
 	}
 	if (config->cqe_comp && RTE_CACHE_LINE_SIZE == 128 &&
-	    !(sh->device_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) {
+	    !(sh->dev_cap.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) {
 		DRV_LOG(WARNING, "Rx CQE 128B compression is not supported");
 		config->cqe_comp = 0;
 	}
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index 722017efa4..73c44138de 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -872,13 +872,12 @@ mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t idx)
 	/* CQ to be associated with the receive queue. */
 	qp_attr.recv_cq = txq_ctrl->obj->cq;
 	/* Max number of outstanding WRs. */
-	qp_attr.cap.max_send_wr = ((priv->sh->device_attr.max_qp_wr < desc) ?
-				   priv->sh->device_attr.max_qp_wr : desc);
+	qp_attr.cap.max_send_wr = RTE_MIN(priv->sh->dev_cap.max_qp_wr, desc);
 	/*
 	 * Max number of scatter/gather elements in a WR, must be 1 to prevent
 	 * libmlx5 from trying to affect must be 1 to prevent libmlx5 from
 	 * trying to affect too much memory. TX gather is not impacted by the
-	 * device_attr.max_sge limit and will still work properly.
+	 * dev_cap.max_sge limit and will still work properly.
 	 */
 	qp_attr.cap.max_send_sge = 1;
 	qp_attr.qp_type = IBV_QPT_RAW_PACKET,
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index e1fe8f9375..b33dc0e7b4 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1262,9 +1262,9 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 	sh->esw_mode = !!(spawn->info.master || spawn->info.representor);
 	if (spawn->bond_info)
 		sh->bond = *spawn->bond_info;
-	err = mlx5_os_get_dev_attr(sh->cdev, &sh->device_attr);
+	err = mlx5_os_capabilities_prepare(sh);
 	if (err) {
-		DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
+		DRV_LOG(ERR, "Fail to configure device capabilities.");
 		goto error;
 	}
 	sh->refcnt = 1;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index a713e61572..fd6350eee7 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -114,32 +114,31 @@ struct mlx5_flow_cb_ctx {
 	void *data2;
 };
 
-/* Device attributes used in mlx5 PMD */
-struct mlx5_dev_attr {
-	uint64_t	device_cap_flags_ex;
-	int		max_qp_wr;
-	int		max_sge;
-	int		max_cq;
-	int		max_qp;
-	int		max_cqe;
-	uint32_t	max_pd;
-	uint32_t	max_mr;
-	uint32_t	max_srq;
-	uint32_t	max_srq_wr;
-	uint32_t	raw_packet_caps;
-	uint32_t	max_rwq_indirection_table_size;
-	uint32_t	max_tso;
-	uint32_t	tso_supported_qpts;
-	uint64_t	flags;
-	uint64_t	comp_mask;
-	uint32_t	sw_parsing_offloads;
-	uint32_t	min_single_stride_log_num_of_bytes;
-	uint32_t	max_single_stride_log_num_of_bytes;
-	uint32_t	min_single_wqe_log_num_of_strides;
-	uint32_t	max_single_wqe_log_num_of_strides;
-	uint32_t	stride_supported_qpts;
-	uint32_t	tunnel_offloads_caps;
-	char		fw_ver[64];
+/* Device capabilities structure which isn't changed in any stage. */
+struct mlx5_dev_cap {
+	uint64_t device_cap_flags_ex;
+	int max_cq; /* Maximum number of supported CQs */
+	int max_qp; /* Maximum number of supported QPs. */
+	int max_qp_wr; /* Maximum number of outstanding WR on any WQ. */
+	int max_sge;
+	/* Maximum number of s/g per WR for SQ & RQ of QP for non RDMA Read
+	 * operations.
+	 */
+	uint32_t raw_packet_caps;
+	uint32_t max_rwq_indirection_table_size;
+	/* Maximum receive WQ indirection table size. */
+	uint32_t max_tso; /* Maximum TCP payload for TSO. */
+	uint32_t tso_supported_qpts;
+	uint64_t flags;
+	uint64_t comp_mask;
+	uint32_t sw_parsing_offloads;
+	uint32_t min_single_stride_log_num_of_bytes;
+	uint32_t max_single_stride_log_num_of_bytes;
+	uint32_t min_single_wqe_log_num_of_strides;
+	uint32_t max_single_wqe_log_num_of_strides;
+	uint32_t stride_supported_qpts;
+	uint32_t tunnel_offloads_caps;
+	char fw_ver[64]; /* Firmware version of this device. */
 };
 
 /** Data associated with devices to spawn. */
@@ -1165,7 +1164,7 @@ struct mlx5_dev_ctx_shared {
 	uint32_t tdn; /* Transport Domain number. */
 	char ibdev_name[MLX5_FS_NAME_MAX]; /* SYSFS dev name. */
 	char ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */
-	struct mlx5_dev_attr device_attr; /* Device properties. */
+	struct mlx5_dev_cap dev_cap; /* Device capabilities. */
 	int numa_node; /* Numa node of backing physical device. */
 	/* Packet pacing related structure. */
 	struct mlx5_dev_txpp txpp;
@@ -1792,8 +1791,7 @@ void mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev);
 /* mlx5_os.c */
 
 struct rte_pci_driver;
-int mlx5_os_get_dev_attr(struct mlx5_common_device *dev,
-			 struct mlx5_dev_attr *dev_attr);
+int mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh);
 void mlx5_os_free_shared_dr(struct mlx5_priv *priv);
 int mlx5_os_net_probe(struct mlx5_common_device *cdev);
 void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 97c8925044..553df6424d 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -1305,7 +1305,7 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 	wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
 	/* Create Send Queue object with DevX. */
 	wqe_n = RTE_MIN((1UL << txq_data->elts_n) * wqe_size,
-			(uint32_t)priv->sh->device_attr.max_qp_wr);
+			(uint32_t)priv->sh->dev_cap.max_qp_wr);
 	log_desc_n = log2above(wqe_n);
 	ret = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n);
 	if (ret) {
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 06d5acb75f..d970eb6904 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -313,8 +313,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 	 * Since we need one CQ per QP, the limit is the minimum number
 	 * between the two values.
 	 */
-	max = RTE_MIN(priv->sh->device_attr.max_cq,
-		      priv->sh->device_attr.max_qp);
+	max = RTE_MIN(priv->sh->dev_cap.max_cq, priv->sh->dev_cap.max_qp);
 	/* max_rx_queues is uint16_t. */
 	max = RTE_MIN(max, (unsigned int)UINT16_MAX);
 	info->max_rx_queues = max;
@@ -516,7 +515,7 @@ int
 mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_attr *attr = &priv->sh->device_attr;
+	struct mlx5_dev_cap *attr = &priv->sh->dev_cap;
 	size_t size = strnlen(attr->fw_ver, sizeof(attr->fw_ver)) + 1;
 
 	if (fw_size < size)
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index d128b3e978..cd8c451286 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -215,10 +215,10 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
 		/* Should not release Rx queues but return immediately. */
 		return -rte_errno;
 	}
-	DRV_LOG(DEBUG, "Port %u device_attr.max_qp_wr is %d.",
-		dev->data->port_id, priv->sh->device_attr.max_qp_wr);
-	DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.",
-		dev->data->port_id, priv->sh->device_attr.max_sge);
+	DRV_LOG(DEBUG, "Port %u dev_cap.max_qp_wr is %d.",
+		dev->data->port_id, priv->sh->dev_cap.max_qp_wr);
+	DRV_LOG(DEBUG, "Port %u dev_cap.max_sge is %d.",
+		dev->data->port_id, priv->sh->dev_cap.max_sge);
 	for (i = 0; i != priv->rxqs_n; ++i) {
 		struct mlx5_rxq_priv *rxq = mlx5_rxq_ref(dev, i);
 		struct mlx5_rxq_ctrl *rxq_ctrl;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 4e0bf7af9c..56e0937ca3 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -714,7 +714,7 @@ txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
 	struct mlx5_priv *priv = txq_ctrl->priv;
 	unsigned int wqe_size;
 
-	wqe_size = priv->sh->device_attr.max_qp_wr / desc;
+	wqe_size = priv->sh->dev_cap.max_qp_wr / desc;
 	if (!wqe_size)
 		return 0;
 	/*
@@ -982,8 +982,7 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
 			" satisfied (%u) on port %u, try the smaller"
 			" Tx queue size (%d)",
 			txq_ctrl->txq.inlen_mode, max_inline,
-			priv->dev_data->port_id,
-			priv->sh->device_attr.max_qp_wr);
+			priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
 		goto error;
 	}
 	if (txq_ctrl->txq.inlen_send > max_inline &&
@@ -994,8 +993,7 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
 			" satisfied (%u) on port %u, try the smaller"
 			" Tx queue size (%d)",
 			txq_ctrl->txq.inlen_send, max_inline,
-			priv->dev_data->port_id,
-			priv->sh->device_attr.max_qp_wr);
+			priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
 		goto error;
 	}
 	if (txq_ctrl->txq.inlen_empw > max_inline &&
@@ -1006,8 +1004,7 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
 			" satisfied (%u) on port %u, try the smaller"
 			" Tx queue size (%d)",
 			txq_ctrl->txq.inlen_empw, max_inline,
-			priv->dev_data->port_id,
-			priv->sh->device_attr.max_qp_wr);
+			priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
 		goto error;
 	}
 	if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
@@ -1016,8 +1013,7 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
 			" satisfied (%u) on port %u, try the smaller"
 			" Tx queue size (%d)",
 			MLX5_MAX_TSO_HEADER, max_inline,
-			priv->dev_data->port_id,
-			priv->sh->device_attr.max_qp_wr);
+			priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
 		goto error;
 	}
 	if (txq_ctrl->txq.inlen_send > max_inline) {
@@ -1098,12 +1094,12 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	if (txq_adjust_params(tmpl))
 		goto error;
 	if (txq_calc_wqebb_cnt(tmpl) >
-	    priv->sh->device_attr.max_qp_wr) {
+	    priv->sh->dev_cap.max_qp_wr) {
 		DRV_LOG(ERR,
 			"port %u Tx WQEBB count (%d) exceeds the limit (%d),"
 			" try smaller queue size",
 			dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
-			priv->sh->device_attr.max_qp_wr);
+			priv->sh->dev_cap.max_qp_wr);
 		rte_errno = ENOMEM;
 		goto error;
 	}
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index eaa63ad50f..16fd54091e 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -143,55 +143,42 @@ mlx5_init_once(void)
 }
 
 /**
- * Get mlx5 device attributes.
+ * Get mlx5 device capabilities.
  *
- * @param cdev
- *   Pointer to mlx5 device.
- *
- * @param device_attr
- *   Pointer to mlx5 device attributes.
+ * @param sh
+ *   Pointer to shared device context.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
-		     struct mlx5_dev_attr *device_attr)
+mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh)
 {
-	struct mlx5_context *mlx5_ctx;
+	struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
+	struct mlx5_context *mlx5_ctx = sh->cdev->ctx;
 	void *pv_iseg = NULL;
 	u32 cb_iseg = 0;
 
-	if (!cdev || !cdev->ctx) {
-		rte_errno = EINVAL;
-		return -rte_errno;
-	}
-	mlx5_ctx = (struct mlx5_context *)cdev->ctx;
-	memset(device_attr, 0, sizeof(*device_attr));
-	device_attr->max_cq = 1 << cdev->config.hca_attr.log_max_cq;
-	device_attr->max_qp = 1 << cdev->config.hca_attr.log_max_qp;
-	device_attr->max_qp_wr = 1 << cdev->config.hca_attr.log_max_qp_sz;
-	device_attr->max_cqe = 1 << cdev->config.hca_attr.log_max_cq_sz;
-	device_attr->max_mr = 1 << cdev->config.hca_attr.log_max_mrw_sz;
-	device_attr->max_pd = 1 << cdev->config.hca_attr.log_max_pd;
-	device_attr->max_srq = 1 << cdev->config.hca_attr.log_max_srq;
-	device_attr->max_srq_wr = 1 << cdev->config.hca_attr.log_max_srq_sz;
-	device_attr->max_tso = 1 << cdev->config.hca_attr.max_lso_cap;
-	if (cdev->config.hca_attr.rss_ind_tbl_cap) {
-		device_attr->max_rwq_indirection_table_size =
-			1 << cdev->config.hca_attr.rss_ind_tbl_cap;
-	}
-	device_attr->sw_parsing_offloads =
-		mlx5_get_supported_sw_parsing_offloads(&cdev->config.hca_attr);
-	device_attr->tunnel_offloads_caps =
-		mlx5_get_supported_tunneling_offloads(&cdev->config.hca_attr);
 	pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
 	if (pv_iseg == NULL) {
-		DRV_LOG(ERR, "Failed to get device hca_iseg");
+		DRV_LOG(ERR, "Failed to get device hca_iseg.");
 		rte_errno = errno;
 		return -rte_errno;
 	}
-	snprintf(device_attr->fw_ver, 64, "%x.%x.%04x",
+	memset(&sh->dev_cap, 0, sizeof(struct mlx5_dev_cap));
+	sh->dev_cap.max_cq = 1 << hca_attr->log_max_cq;
+	sh->dev_cap.max_qp = 1 << hca_attr->log_max_qp;
+	sh->dev_cap.max_qp_wr = 1 << hca_attr->log_max_qp_sz;
+	sh->dev_cap.max_tso = 1 << hca_attr->max_lso_cap;
+	if (hca_attr->rss_ind_tbl_cap) {
+		sh->dev_cap.max_rwq_indirection_table_size =
+			1 << hca_attr->rss_ind_tbl_cap;
+	}
+	sh->dev_cap.sw_parsing_offloads =
+		mlx5_get_supported_sw_parsing_offloads(hca_attr);
+	sh->dev_cap.tunnel_offloads_caps =
+		mlx5_get_supported_tunneling_offloads(hca_attr);
+	snprintf(sh->dev_cap.fw_ver, 64, "%x.%x.%04x",
 		 MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
 		 MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),
 		 MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor));
@@ -335,12 +322,12 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		goto error;
 	}
 	DRV_LOG(DEBUG, "MPW isn't supported");
-	config->swp = sh->device_attr.sw_parsing_offloads &
+	config->swp = sh->dev_cap.sw_parsing_offloads &
 		(MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
 		 MLX5_SW_PARSING_TSO_CAP);
 	config->ind_table_max_size =
-		sh->device_attr.max_rwq_indirection_table_size;
-	config->tunnel_en = sh->device_attr.tunnel_offloads_caps &
+		sh->dev_cap.max_rwq_indirection_table_size;
+	config->tunnel_en = sh->dev_cap.tunnel_offloads_caps &
 		(MLX5_TUNNELED_OFFLOADS_VXLAN_CAP |
 		 MLX5_TUNNELED_OFFLOADS_GRE_CAP |
 		 MLX5_TUNNELED_OFFLOADS_GENEVE_CAP);
@@ -410,7 +397,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	}
 	DRV_LOG(DEBUG, "counters are not supported");
 	config->ind_table_max_size =
-		sh->device_attr.max_rwq_indirection_table_size;
+		sh->dev_cap.max_rwq_indirection_table_size;
 	/*
 	 * Remove this check once DPDK supports larger/variable
 	 * indirection tables.
@@ -423,9 +410,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
 		config->hw_padding = 0;
 	}
-	config->tso = (sh->device_attr.max_tso > 0);
+	config->tso = (sh->dev_cap.max_tso > 0);
 	if (config->tso)
-		config->tso_max_payload_sz = sh->device_attr.max_tso;
+		config->tso_max_payload_sz = sh->dev_cap.max_tso;
 	DRV_LOG(DEBUG, "%sMPS is %s.",
 		config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
 		config->mps == MLX5_MPW ? "legacy " : "",
-- 
2.25.1


  parent reply	other threads:[~2022-01-27 15:41 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-01-27 15:39 [PATCH 00/20] mlx5: refactor devargs management Michael Baum
2022-01-27 15:39 ` [PATCH 01/20] net/mlx5: fix wrong check sibling device config mismatch Michael Baum
2022-01-27 15:39 ` [PATCH 02/20] net/mlx5: fix ineffective metadata argument adjustment Michael Baum
2022-01-27 15:39 ` [PATCH 03/20] net/mlx5: fix wrong place of ASO CT object release Michael Baum
2022-01-27 15:39 ` [PATCH 04/20] net/mlx5: fix inconsistency errno update in SH creation Michael Baum
2022-01-27 15:39 ` [PATCH 05/20] net/mlx5: remove declaration duplications Michael Baum
2022-01-27 15:39 ` [PATCH 06/20] net/mlx5: remove checking devargs duplication Michael Baum
2022-01-27 15:39 ` [PATCH 07/20] net/mlx5: remove HCA attr structure duplication Michael Baum
2022-01-27 15:39 ` [PATCH 08/20] net/mlx5: remove DevX flag duplication Michael Baum
2022-01-27 15:39 ` [PATCH 09/20] net/mlx5: remove Verbs query device duplication Michael Baum
2022-01-27 15:39 ` [PATCH 10/20] common/mlx5: share VF checking function Michael Baum
2022-01-27 15:39 ` [PATCH 11/20] net/mlx5: share realtime timestamp configure Michael Baum
2022-01-27 15:39 ` [PATCH 12/20] net/mlx5: share counter config function Michael Baum
2022-01-27 15:39 ` [PATCH 13/20] net/mlx5: add E-switch mode flag Michael Baum
2022-01-27 15:39 ` Michael Baum [this message]
2022-01-27 15:39 ` [PATCH 15/20] net/mlx5: concentrate all device configurations Michael Baum
2022-01-27 15:39 ` [PATCH 16/20] net/mlx5: add share device context config structure Michael Baum
2022-01-27 15:39 ` [PATCH 17/20] net/mlx5: using function to detect operation by DevX Michael Baum
2022-01-27 15:39 ` [PATCH 18/20] net/mlx5: separate per port configuration Michael Baum
2022-01-27 15:39 ` [PATCH 19/20] common/mlx5: add check for common devargs in probing again Michael Baum
2022-01-27 15:39 ` [PATCH 20/20] common/mlx5: refactor devargs management Michael Baum
2022-02-14  9:34 ` [PATCH v2 00/20] mlx5: " Michael Baum
2022-02-14  9:34   ` [PATCH v2 01/20] net/mlx5: fix wrong check sibling device config mismatch Michael Baum
2022-02-14  9:34   ` [PATCH v2 02/20] net/mlx5: fix ineffective metadata argument adjustment Michael Baum
2022-02-14  9:34   ` [PATCH v2 03/20] net/mlx5: fix wrong place of ASO CT object release Michael Baum
2022-02-14  9:34   ` [PATCH v2 04/20] net/mlx5: fix inconsistency errno update in SH creation Michael Baum
2022-02-14  9:34   ` [PATCH v2 05/20] net/mlx5: remove declaration duplications Michael Baum
2022-02-14  9:34   ` [PATCH v2 06/20] net/mlx5: remove checking devargs duplication Michael Baum
2022-02-14  9:34   ` [PATCH v2 07/20] net/mlx5: remove HCA attr structure duplication Michael Baum
2022-02-14  9:34   ` [PATCH v2 08/20] net/mlx5: remove DevX flag duplication Michael Baum
2022-02-14  9:35   ` [PATCH v2 09/20] net/mlx5: remove Verbs query device duplication Michael Baum
2022-02-14  9:35   ` [PATCH v2 10/20] common/mlx5: share VF checking function Michael Baum
2022-02-14  9:35   ` [PATCH v2 11/20] net/mlx5: share realtime timestamp configure Michael Baum
2022-02-14  9:35   ` [PATCH v2 12/20] net/mlx5: share counter config function Michael Baum
2022-02-14  9:35   ` [PATCH v2 13/20] net/mlx5: add E-switch mode flag Michael Baum
2022-02-14  9:35   ` [PATCH v2 14/20] net/mlx5: rearrange device attribute structure Michael Baum
2022-02-14  9:35   ` [PATCH v2 15/20] net/mlx5: concentrate all device configurations Michael Baum
2022-02-14  9:35   ` [PATCH v2 16/20] net/mlx5: add share device context config structure Michael Baum
2022-02-14  9:35   ` [PATCH v2 17/20] net/mlx5: using function to detect operation by DevX Michael Baum
2022-02-14  9:35   ` [PATCH v2 18/20] net/mlx5: separate per port configuration Michael Baum
2022-02-14  9:35   ` [PATCH v2 19/20] common/mlx5: add check for common devargs in probing again Michael Baum
2022-02-14  9:35   ` [PATCH v2 20/20] common/mlx5: refactor devargs management Michael Baum
2022-02-21  8:54   ` [PATCH v2 00/20] mlx5: " Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220127153950.812953-15-michaelba@nvidia.com \
    --to=michaelba@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).