From: Michael Baum <michaelba@nvidia.com>
To: <dev@dpdk.org>
Cc: Matan Azrad <matan@nvidia.com>,
Raslan Darawsheh <rasland@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [PATCH v2 07/20] net/mlx5: remove HCA attr structure duplication
Date: Mon, 14 Feb 2022 11:34:58 +0200 [thread overview]
Message-ID: <20220214093511.1592698-8-michaelba@nvidia.com> (raw)
In-Reply-To: <20220214093511.1592698-1-michaelba@nvidia.com>
The HCA attribute structure is field of net configure structure.
It is also field of common configure structure.
There is no need for this duplication, because there is a reference to
the common structure from within the net structures.
This patch removes it from net configure structure and uses the common
config structure instead.
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 95 ++++++++++++++----------------
drivers/net/mlx5/mlx5.c | 14 +++--
drivers/net/mlx5/mlx5.h | 1 -
drivers/net/mlx5/mlx5_devx.c | 8 ++-
drivers/net/mlx5/mlx5_ethdev.c | 2 +-
drivers/net/mlx5/mlx5_flow.c | 16 ++---
drivers/net/mlx5/mlx5_flow_dv.c | 13 ++--
drivers/net/mlx5/mlx5_flow_flex.c | 4 +-
drivers/net/mlx5/mlx5_flow_meter.c | 4 +-
drivers/net/mlx5/mlx5_rxq.c | 4 +-
drivers/net/mlx5/mlx5_trigger.c | 12 ++--
drivers/net/mlx5/mlx5_txpp.c | 2 +-
drivers/net/mlx5/windows/mlx5_os.c | 25 ++++----
13 files changed, 100 insertions(+), 100 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 191da1bee9..b3ee1f7dc4 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -675,6 +675,7 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
bool fallback;
#ifndef HAVE_IBV_DEVX_ASYNC
@@ -682,16 +683,16 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
#else
fallback = false;
if (!sh->devx || !priv->config.dv_flow_en ||
- !priv->config.hca_attr.flow_counters_dump ||
- !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
+ !hca_attr->flow_counters_dump ||
+ !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) ||
(mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
fallback = true;
#endif
if (fallback)
DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
"counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
- priv->config.hca_attr.flow_counters_dump,
- priv->config.hca_attr.flow_counter_bulk_alloc_bitmap);
+ hca_attr->flow_counters_dump,
+ hca_attr->flow_counter_bulk_alloc_bitmap);
/* Initialize fallback mode only on the port initializes sh. */
if (sh->refcnt == 1)
sh->cmng.counter_fallback = fallback;
@@ -875,6 +876,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
{
const struct mlx5_switch_info *switch_info = &spawn->info;
struct mlx5_dev_ctx_shared *sh = NULL;
+ struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr;
struct ibv_port_attr port_attr = { .state = IBV_PORT_NOP };
struct mlx5dv_context dv_attr = { .comp_mask = 0 };
struct rte_eth_dev *eth_dev = NULL;
@@ -990,7 +992,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
}
#endif
#ifdef HAVE_MLX5DV_DR_ESWITCH
- if (!(sh->cdev->config.hca_attr.eswitch_manager && config->dv_flow_en &&
+ if (!(hca_attr->eswitch_manager && config->dv_flow_en &&
(switch_info->representor || switch_info->master)))
config->dv_esw_en = 0;
#else
@@ -1315,14 +1317,12 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
config->mps == MLX5_MPW ? "legacy " : "",
config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
if (sh->devx) {
- config->hca_attr = sh->cdev->config.hca_attr;
- sh->steering_format_version =
- config->hca_attr.steering_format_version;
+ sh->steering_format_version = hca_attr->steering_format_version;
/* Check for LRO support. */
- if (config->dest_tir && config->hca_attr.lro_cap &&
+ if (config->dest_tir && hca_attr->lro_cap &&
config->dv_flow_en) {
/* TBD check tunnel lro caps. */
- config->lro.supported = config->hca_attr.lro_cap;
+ config->lro.supported = hca_attr->lro_cap;
DRV_LOG(DEBUG, "Device supports LRO");
/*
* If LRO timeout is not configured by application,
@@ -1330,21 +1330,19 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
*/
if (!config->lro.timeout)
config->lro.timeout =
- config->hca_attr.lro_timer_supported_periods[0];
+ hca_attr->lro_timer_supported_periods[0];
DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
config->lro.timeout);
DRV_LOG(DEBUG, "LRO minimal size of TCP segment "
"required for coalescing is %d bytes",
- config->hca_attr.lro_min_mss_size);
+ hca_attr->lro_min_mss_size);
}
#if defined(HAVE_MLX5DV_DR) && \
(defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) || \
defined(HAVE_MLX5_DR_CREATE_ACTION_ASO))
- if (config->hca_attr.qos.sup &&
- config->hca_attr.qos.flow_meter_old &&
+ if (hca_attr->qos.sup && hca_attr->qos.flow_meter_old &&
config->dv_flow_en) {
- uint8_t reg_c_mask =
- config->hca_attr.qos.flow_meter_reg_c_ids;
+ uint8_t reg_c_mask = hca_attr->qos.flow_meter_reg_c_ids;
/*
* Meter needs two REG_C's for color match and pre-sfx
* flow match. Here get the REG_C for color match.
@@ -1368,20 +1366,18 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
priv->mtr_color_reg = ffs(reg_c_mask)
- 1 + REG_C_0;
priv->mtr_en = 1;
- priv->mtr_reg_share =
- config->hca_attr.qos.flow_meter;
+ priv->mtr_reg_share = hca_attr->qos.flow_meter;
DRV_LOG(DEBUG, "The REG_C meter uses is %d",
priv->mtr_color_reg);
}
}
- if (config->hca_attr.qos.sup &&
- config->hca_attr.qos.flow_meter_aso_sup) {
+ if (hca_attr->qos.sup && hca_attr->qos.flow_meter_aso_sup) {
uint32_t log_obj_size =
rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
if (log_obj_size >=
- config->hca_attr.qos.log_meter_aso_granularity &&
- log_obj_size <=
- config->hca_attr.qos.log_meter_aso_max_alloc)
+ hca_attr->qos.log_meter_aso_granularity &&
+ log_obj_size <=
+ hca_attr->qos.log_meter_aso_max_alloc)
sh->meter_aso_en = 1;
}
if (priv->mtr_en) {
@@ -1391,12 +1387,11 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
goto error;
}
}
- if (config->hca_attr.flow.tunnel_header_0_1)
+ if (hca_attr->flow.tunnel_header_0_1)
sh->tunnel_header_0_1 = 1;
#endif
#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
- if (config->hca_attr.flow_hit_aso &&
- priv->mtr_color_reg == REG_C_3) {
+ if (hca_attr->flow_hit_aso && priv->mtr_color_reg == REG_C_3) {
sh->flow_hit_aso_en = 1;
err = mlx5_flow_aso_age_mng_init(sh);
if (err) {
@@ -1408,8 +1403,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
#if defined(HAVE_MLX5_DR_CREATE_ACTION_ASO) && \
defined(HAVE_MLX5_DR_ACTION_ASO_CT)
- if (config->hca_attr.ct_offload &&
- priv->mtr_color_reg == REG_C_3) {
+ if (hca_attr->ct_offload && priv->mtr_color_reg == REG_C_3) {
err = mlx5_flow_aso_ct_mng_init(sh);
if (err) {
err = -err;
@@ -1420,13 +1414,13 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
}
#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO && HAVE_MLX5_DR_ACTION_ASO_CT */
#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE)
- if (config->hca_attr.log_max_ft_sampler_num > 0 &&
+ if (hca_attr->log_max_ft_sampler_num > 0 &&
config->dv_flow_en) {
priv->sampler_en = 1;
DRV_LOG(DEBUG, "Sampler enabled!");
} else {
priv->sampler_en = 0;
- if (!config->hca_attr.log_max_ft_sampler_num)
+ if (!hca_attr->log_max_ft_sampler_num)
DRV_LOG(WARNING,
"No available register for sampler.");
else
@@ -1440,13 +1434,13 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
config->cqe_comp = 0;
}
if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
- (!sh->devx || !config->hca_attr.mini_cqe_resp_flow_tag)) {
+ (!sh->devx || !hca_attr->mini_cqe_resp_flow_tag)) {
DRV_LOG(WARNING, "Flow Tag CQE compression"
" format isn't supported.");
config->cqe_comp = 0;
}
if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
- (!sh->devx || !config->hca_attr.mini_cqe_resp_l3_l4_tag)) {
+ (!sh->devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
DRV_LOG(WARNING, "L3/L4 Header CQE compression"
" format isn't supported.");
config->cqe_comp = 0;
@@ -1455,55 +1449,55 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
config->cqe_comp ? "" : "not ");
if (config->tx_pp) {
DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz",
- config->hca_attr.dev_freq_khz);
+ hca_attr->dev_freq_khz);
DRV_LOG(DEBUG, "Packet pacing is %ssupported",
- config->hca_attr.qos.packet_pacing ? "" : "not ");
+ hca_attr->qos.packet_pacing ? "" : "not ");
DRV_LOG(DEBUG, "Cross channel ops are %ssupported",
- config->hca_attr.cross_channel ? "" : "not ");
+ hca_attr->cross_channel ? "" : "not ");
DRV_LOG(DEBUG, "WQE index ignore is %ssupported",
- config->hca_attr.wqe_index_ignore ? "" : "not ");
+ hca_attr->wqe_index_ignore ? "" : "not ");
DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported",
- config->hca_attr.non_wire_sq ? "" : "not ");
+ hca_attr->non_wire_sq ? "" : "not ");
DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)",
- config->hca_attr.log_max_static_sq_wq ? "" : "not ",
- config->hca_attr.log_max_static_sq_wq);
+ hca_attr->log_max_static_sq_wq ? "" : "not ",
+ hca_attr->log_max_static_sq_wq);
DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported",
- config->hca_attr.qos.wqe_rate_pp ? "" : "not ");
+ hca_attr->qos.wqe_rate_pp ? "" : "not ");
if (!sh->devx) {
DRV_LOG(ERR, "DevX is required for packet pacing");
err = ENODEV;
goto error;
}
- if (!config->hca_attr.qos.packet_pacing) {
+ if (!hca_attr->qos.packet_pacing) {
DRV_LOG(ERR, "Packet pacing is not supported");
err = ENODEV;
goto error;
}
- if (!config->hca_attr.cross_channel) {
+ if (!hca_attr->cross_channel) {
DRV_LOG(ERR, "Cross channel operations are"
" required for packet pacing");
err = ENODEV;
goto error;
}
- if (!config->hca_attr.wqe_index_ignore) {
+ if (!hca_attr->wqe_index_ignore) {
DRV_LOG(ERR, "WQE index ignore feature is"
" required for packet pacing");
err = ENODEV;
goto error;
}
- if (!config->hca_attr.non_wire_sq) {
+ if (!hca_attr->non_wire_sq) {
DRV_LOG(ERR, "Non-wire SQ feature is"
" required for packet pacing");
err = ENODEV;
goto error;
}
- if (!config->hca_attr.log_max_static_sq_wq) {
+ if (!hca_attr->log_max_static_sq_wq) {
DRV_LOG(ERR, "Static WQE SQ feature is"
" required for packet pacing");
err = ENODEV;
goto error;
}
- if (!config->hca_attr.qos.wqe_rate_pp) {
+ if (!hca_attr->qos.wqe_rate_pp) {
DRV_LOG(ERR, "WQE rate mode is required"
" for packet pacing");
err = ENODEV;
@@ -1517,7 +1511,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
#endif
}
if (config->std_delay_drop || config->hp_delay_drop) {
- if (!config->hca_attr.rq_delay_drop) {
+ if (!hca_attr->rq_delay_drop) {
config->std_delay_drop = 0;
config->hp_delay_drop = 0;
DRV_LOG(WARNING,
@@ -1528,7 +1522,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
if (sh->devx) {
uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
- err = config->hca_attr.access_register_user ?
+ err = hca_attr->access_register_user ?
mlx5_devx_cmd_register_read
(sh->cdev->ctx, MLX5_REGISTER_ID_MTUTC, 0,
reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
@@ -1542,8 +1536,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
config->rt_timestamp = 1;
} else {
/* Kernel does not support register reading. */
- if (config->hca_attr.dev_freq_khz ==
- (NS_PER_S / MS_PER_S))
+ if (hca_attr->dev_freq_khz == (NS_PER_S / MS_PER_S))
config->rt_timestamp = 1;
}
}
@@ -1552,7 +1545,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
* scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
* bit. Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
*/
- if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en)
+ if (hca_attr->scatter_fcs_w_decap_disable && config->decap_en)
config->hw_fcs_strip = 0;
DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
(config->hw_fcs_strip ? "" : "not "));
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index d1215e5627..a713391268 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -889,7 +889,7 @@ mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
uint32_t ids[8];
int ret;
- if (!priv->config.hca_attr.parse_graph_flex_node) {
+ if (!priv->sh->cdev->config.hca_attr.parse_graph_flex_node) {
DRV_LOG(ERR, "Dynamic flex parser is not supported "
"for device %s.", priv->dev_data->name);
return -ENOTSUP;
@@ -2035,6 +2035,8 @@ void
mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
struct mlx5_dev_config *config)
{
+ struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr;
+
if (config->txq_inline_min != MLX5_ARG_UNSET) {
/* Application defines size of inlined data explicitly. */
if (spawn->pci_dev != NULL) {
@@ -2054,9 +2056,9 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
}
goto exit;
}
- if (config->hca_attr.eth_net_offloads) {
+ if (hca_attr->eth_net_offloads) {
/* We have DevX enabled, inline mode queried successfully. */
- switch (config->hca_attr.wqe_inline_mode) {
+ switch (hca_attr->wqe_inline_mode) {
case MLX5_CAP_INLINE_MODE_L2:
/* outer L2 header must be inlined. */
config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
@@ -2065,14 +2067,14 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
/* No inline data are required by NIC. */
config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
config->hw_vlan_insert =
- config->hca_attr.wqe_vlan_insert;
+ hca_attr->wqe_vlan_insert;
DRV_LOG(DEBUG, "Tx VLAN insertion is supported");
goto exit;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
/* inline mode is defined by NIC vport context. */
- if (!config->hca_attr.eth_virt)
+ if (!hca_attr->eth_virt)
break;
- switch (config->hca_attr.vport_inline_mode) {
+ switch (hca_attr->vport_inline_mode) {
case MLX5_INLINE_MODE_NONE:
config->txq_inline_min =
MLX5_INLINE_HSIZE_NONE;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index e4b2523eb0..ee485343ff 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -299,7 +299,6 @@ struct mlx5_dev_config {
int txq_inline_mpw; /* Max packet size for inlining with eMPW. */
int tx_pp; /* Timestamp scheduling granularity in nanoseconds. */
int tx_skew; /* Tx scheduling skew between WQE and data on wire. */
- struct mlx5_hca_attr hca_attr; /* HCA attributes. */
struct mlx5_lro_config lro; /* LRO configuration. */
};
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 91243f684f..97c8925044 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -419,7 +419,8 @@ mlx5_rxq_obj_hairpin_new(struct mlx5_rxq_priv *rxq)
MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL && tmpl != NULL);
tmpl->rxq_ctrl = rxq_ctrl;
attr.hairpin = 1;
- max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
+ max_wq_data =
+ priv->sh->cdev->config.hca_attr.log_max_hairpin_wq_data_sz;
/* Jumbo frames > 9KB should be supported, and more packets. */
if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
if (priv->config.log_hp_size > max_wq_data) {
@@ -1117,7 +1118,8 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
tmpl->txq_ctrl = txq_ctrl;
attr.hairpin = 1;
attr.tis_lst_sz = 1;
- max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
+ max_wq_data =
+ priv->sh->cdev->config.hca_attr.log_max_hairpin_wq_data_sz;
/* Jumbo frames > 9KB should be supported, and more packets. */
if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
if (priv->config.log_hp_size > max_wq_data) {
@@ -1193,7 +1195,7 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
struct mlx5_devx_create_sq_attr sq_attr = {
.flush_in_error_en = 1,
.allow_multi_pkt_send_wqe = !!priv->config.mps,
- .min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode,
+ .min_wqe_inline_mode = cdev->config.hca_attr.vport_inline_mode,
.allow_swp = !!priv->config.swp,
.cqn = txq_obj->cq_obj.cq->id,
.tis_lst_sz = 1,
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index dc647d5580..5b0eee3321 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -337,7 +337,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
mlx5_set_default_params(dev, info);
mlx5_set_txlimit_params(dev, info);
- if (priv->config.hca_attr.mem_rq_rmp &&
+ if (priv->sh->cdev->config.hca_attr.mem_rq_rmp &&
priv->obj_ops.rxq_obj_new == devx_obj_ops.rxq_obj_new)
info->dev_capa |= RTE_ETH_DEV_CAPA_RXQ_SHARE;
info->switch_info.name = dev->data->name;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 179cc3b303..29b4516709 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -2906,7 +2906,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
const struct rte_flow_item_geneve *mask = item->mask;
int ret;
uint16_t gbhdr;
- uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
+ uint8_t opt_len = priv->sh->cdev->config.hca_attr.geneve_max_opt_len ?
MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
const struct rte_flow_item_geneve nic_mask = {
.ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
@@ -2914,7 +2914,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
.protocol = RTE_BE16(UINT16_MAX),
};
- if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
+ if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_geneve_rx)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 Geneve is not enabled by device"
@@ -2994,10 +2994,9 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_geneve_tlv_option_resource *geneve_opt_resource;
- struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr;
+ struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
uint8_t data_max_supported =
hca_attr->max_geneve_tlv_option_data_len * 4;
- struct mlx5_dev_config *config = &priv->config;
const struct rte_flow_item_geneve *geneve_spec;
const struct rte_flow_item_geneve *geneve_mask;
const struct rte_flow_item_geneve_opt *spec = item->spec;
@@ -3031,11 +3030,11 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
"Geneve TLV opt class/type/length masks must be full");
/* Check if length is supported */
if ((uint32_t)spec->option_len >
- config->hca_attr.max_geneve_tlv_option_data_len)
+ hca_attr->max_geneve_tlv_option_data_len)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
"Geneve TLV opt length not supported");
- if (config->hca_attr.max_geneve_tlv_options > 1)
+ if (hca_attr->max_geneve_tlv_options > 1)
DRV_LOG(DEBUG,
"max_geneve_tlv_options supports more than 1 option");
/* Check GENEVE item preceding. */
@@ -3090,7 +3089,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
"Data mask is of unsupported size");
}
/* Check GENEVE option is supported in NIC. */
- if (!config->hca_attr.geneve_tlv_opt)
+ if (!hca_attr->geneve_tlv_opt)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
"Geneve TLV opt not supported");
@@ -6249,7 +6248,8 @@ flow_create_split_sample(struct rte_eth_dev *dev,
* When reg_c_preserve is set, metadata registers Cx preserve
* their value even through packet duplication.
*/
- add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve);
+ add_tag = (!fdb_tx ||
+ priv->sh->cdev->config.hca_attr.reg_c_preserve);
if (add_tag)
sfx_items = (struct rte_flow_item *)((char *)sfx_actions
+ act_size);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index ef9c66eddf..b0ed9f93a0 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -2317,7 +2317,7 @@ flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
.teid = RTE_BE32(0xffffffff),
};
- if (!priv->config.hca_attr.tunnel_stateless_gtp)
+ if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_gtp)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"GTP support is not enabled");
@@ -2426,6 +2426,7 @@ flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
{
int ret;
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hca_attr *attr = &priv->sh->cdev->config.hca_attr;
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *last = item->last;
const struct rte_flow_item_ipv4 *mask = item->mask;
@@ -2444,8 +2445,8 @@ flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
- bool ihl_cap = !tunnel ? priv->config.hca_attr.outer_ipv4_ihl :
- priv->config.hca_attr.inner_ipv4_ihl;
+ bool ihl_cap = !tunnel ?
+ attr->outer_ipv4_ihl : attr->inner_ipv4_ihl;
if (!ihl_cap)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -3384,7 +3385,7 @@ flow_dv_validate_action_decap(struct rte_eth_dev *dev,
{
const struct mlx5_priv *priv = dev->data->dev_private;
- if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
+ if (priv->sh->cdev->config.hca_attr.scatter_fcs_w_decap_disable &&
!priv->config.decap_en)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
@@ -5753,7 +5754,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags,
NULL,
"E-Switch must has a dest "
"port for mirroring");
- if (!priv->config.hca_attr.reg_c_preserve &&
+ if (!priv->sh->cdev->config.hca_attr.reg_c_preserve &&
priv->representor_id != UINT16_MAX)
*fdb_mirror_limit = 1;
}
@@ -6686,7 +6687,7 @@ flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
const struct rte_flow_item_integrity *spec = (typeof(spec))
integrity_item->spec;
- if (!priv->config.hca_attr.pkt_integrity_match)
+ if (!priv->sh->cdev->config.hca_attr.pkt_integrity_match)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
integrity_item,
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 9413d4d817..26f0dfa36f 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -910,7 +910,7 @@ mlx5_flex_translate_sample(struct mlx5_hca_flex_attr *attr,
* offsets in any order.
*
* Gather all similar fields together, build array of bit intervals
- * in asсending order and try to cover with the smallest set of sample
+ * in ascending order and try to cover with the smallest set of sample
* registers.
*/
memset(&cover, 0, sizeof(cover));
@@ -1153,7 +1153,7 @@ mlx5_flex_translate_conf(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hca_flex_attr *attr = &priv->config.hca_attr.flex;
+ struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
int ret;
ret = mlx5_flex_translate_length(attr, conf, devx, error);
diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c
index 4f5de5e422..2310ea6a86 100644
--- a/drivers/net/mlx5/mlx5_flow_meter.c
+++ b/drivers/net/mlx5/mlx5_flow_meter.c
@@ -155,7 +155,7 @@ mlx5_flow_meter_profile_validate(struct rte_eth_dev *dev,
"Meter profile already exists.");
if (!priv->sh->meter_aso_en) {
/* Old version is even not supported. */
- if (!priv->config.hca_attr.qos.flow_meter_old)
+ if (!priv->sh->cdev->config.hca_attr.qos.flow_meter_old)
return -rte_mtr_error_set(error, ENOTSUP,
RTE_MTR_ERROR_TYPE_METER_PROFILE,
NULL, "Metering is not supported.");
@@ -428,7 +428,7 @@ mlx5_flow_mtr_cap_get(struct rte_eth_dev *dev,
struct rte_mtr_error *error __rte_unused)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hca_qos_attr *qattr = &priv->config.hca_attr.qos;
+ struct mlx5_hca_qos_attr *qattr = &priv->sh->cdev->config.hca_attr.qos;
if (!priv->mtr_en)
return -rte_mtr_error_set(error, ENOTSUP,
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 580d7ae868..0ede46aa43 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -863,7 +863,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
}
if (conf->share_group > 0) {
- if (!priv->config.hca_attr.mem_rq_rmp) {
+ if (!priv->sh->cdev->config.hca_attr.mem_rq_rmp) {
DRV_LOG(ERR, "port %u queue index %u shared Rx queue not supported by fw",
dev->data->port_id, idx);
rte_errno = EINVAL;
@@ -1517,7 +1517,7 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (priv->config.hca_attr.lro_max_msg_sz_mode ==
+ if (priv->sh->cdev->config.hca_attr.lro_max_msg_sz_mode ==
MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
MLX5_MAX_TCP_HDR_OFFSET)
max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 3a59237b1a..0418ce2faf 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -341,14 +341,16 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
sq_attr.state = MLX5_SQC_STATE_RDY;
sq_attr.sq_state = MLX5_SQC_STATE_RST;
sq_attr.hairpin_peer_rq = rq->id;
- sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
+ sq_attr.hairpin_peer_vhca =
+ priv->sh->cdev->config.hca_attr.vhca_id;
ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr);
if (ret)
goto error;
rq_attr.state = MLX5_SQC_STATE_RDY;
rq_attr.rq_state = MLX5_SQC_STATE_RST;
rq_attr.hairpin_peer_sq = sq->id;
- rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
+ rq_attr.hairpin_peer_vhca =
+ priv->sh->cdev->config.hca_attr.vhca_id;
ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr);
if (ret)
goto error;
@@ -425,7 +427,7 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
return -rte_errno;
}
peer_info->qp_id = txq_ctrl->obj->sq->id;
- peer_info->vhca_id = priv->config.hca_attr.vhca_id;
+ peer_info->vhca_id = priv->sh->cdev->config.hca_attr.vhca_id;
/* 1-to-1 mapping, only the first one is used. */
peer_info->peer_q = txq_ctrl->hairpin_conf.peers[0].queue;
peer_info->tx_explicit = txq_ctrl->hairpin_conf.tx_explicit;
@@ -455,7 +457,7 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
return -rte_errno;
}
peer_info->qp_id = rxq_ctrl->obj->rq->id;
- peer_info->vhca_id = priv->config.hca_attr.vhca_id;
+ peer_info->vhca_id = priv->sh->cdev->config.hca_attr.vhca_id;
peer_info->peer_q = rxq->hairpin_conf.peers[0].queue;
peer_info->tx_explicit = rxq->hairpin_conf.tx_explicit;
peer_info->manual_bind = rxq->hairpin_conf.manual_bind;
@@ -817,7 +819,7 @@ mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
/* Pass TxQ's information to peer RxQ and try binding. */
cur.peer_q = rx_queue;
cur.qp_id = txq_ctrl->obj->sq->id;
- cur.vhca_id = priv->config.hca_attr.vhca_id;
+ cur.vhca_id = priv->sh->cdev->config.hca_attr.vhca_id;
cur.tx_explicit = txq_ctrl->hairpin_conf.tx_explicit;
cur.manual_bind = txq_ctrl->hairpin_conf.manual_bind;
/*
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index af77e91e4c..1d16ebcb41 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -825,7 +825,7 @@ mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv)
sh->txpp.tick = tx_pp >= 0 ? tx_pp : -tx_pp;
sh->txpp.test = !!(tx_pp < 0);
sh->txpp.skew = priv->config.tx_skew;
- sh->txpp.freq = priv->config.hca_attr.dev_freq_khz;
+ sh->txpp.freq = sh->cdev->config.hca_attr.dev_freq_khz;
ret = mlx5_txpp_create_event_channel(sh);
if (ret)
goto exit;
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 0966da10f4..07a9583cab 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -268,6 +268,7 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
bool fallback;
#ifndef HAVE_IBV_DEVX_ASYNC
@@ -275,16 +276,16 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
#else
fallback = false;
if (!sh->devx || !priv->config.dv_flow_en ||
- !priv->config.hca_attr.flow_counters_dump ||
- !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
+ !hca_attr->flow_counters_dump ||
+ !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) ||
(mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
fallback = true;
#endif
if (fallback)
DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
"counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
- priv->config.hca_attr.flow_counters_dump,
- priv->config.hca_attr.flow_counter_bulk_alloc_bitmap);
+ hca_attr->flow_counters_dump,
+ hca_attr->flow_counter_bulk_alloc_bitmap);
/* Initialize fallback mode only on the port initializes sh. */
if (sh->refcnt == 1)
sh->cmng.counter_fallback = fallback;
@@ -318,6 +319,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
const struct mlx5_switch_info *switch_info = &spawn->info;
struct mlx5_dev_ctx_shared *sh = NULL;
struct mlx5_dev_attr device_attr;
+ struct mlx5_hca_attr *hca_attr;
struct rte_eth_dev *eth_dev = NULL;
struct mlx5_priv *priv = NULL;
int err = 0;
@@ -475,19 +477,19 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
config->cqe_comp = 0;
}
if (sh->devx) {
- config->hca_attr = sh->cdev->config.hca_attr;
- config->hw_csum = config->hca_attr.csum_cap;
+ hca_attr = &sh->cdev->config.hca_attr;
+ config->hw_csum = hca_attr->csum_cap;
DRV_LOG(DEBUG, "checksum offloading is %ssupported",
- (config->hw_csum ? "" : "not "));
- config->hw_vlan_strip = config->hca_attr.vlan_cap;
+ (config->hw_csum ? "" : "not "));
+ config->hw_vlan_strip = hca_attr->vlan_cap;
DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
(config->hw_vlan_strip ? "" : "not "));
- config->hw_fcs_strip = config->hca_attr.scatter_fcs;
+ config->hw_fcs_strip = hca_attr->scatter_fcs;
}
if (sh->devx) {
uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
- err = config->hca_attr.access_register_user ?
+ err = hca_attr->access_register_user ?
mlx5_devx_cmd_register_read
(sh->cdev->ctx, MLX5_REGISTER_ID_MTUTC, 0,
reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
@@ -501,8 +503,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
config->rt_timestamp = 1;
} else {
/* Kernel does not support register reading. */
- if (config->hca_attr.dev_freq_khz ==
- (NS_PER_S / MS_PER_S))
+ if (hca_attr->dev_freq_khz == (NS_PER_S / MS_PER_S))
config->rt_timestamp = 1;
}
}
--
2.25.1
next prev parent reply other threads:[~2022-02-14 9:36 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-27 15:39 [PATCH 00/20] mlx5: refactor devargs management Michael Baum
2022-01-27 15:39 ` [PATCH 01/20] net/mlx5: fix wrong check sibling device config mismatch Michael Baum
2022-01-27 15:39 ` [PATCH 02/20] net/mlx5: fix ineffective metadata argument adjustment Michael Baum
2022-01-27 15:39 ` [PATCH 03/20] net/mlx5: fix wrong place of ASO CT object release Michael Baum
2022-01-27 15:39 ` [PATCH 04/20] net/mlx5: fix inconsistency errno update in SH creation Michael Baum
2022-01-27 15:39 ` [PATCH 05/20] net/mlx5: remove declaration duplications Michael Baum
2022-01-27 15:39 ` [PATCH 06/20] net/mlx5: remove checking devargs duplication Michael Baum
2022-01-27 15:39 ` [PATCH 07/20] net/mlx5: remove HCA attr structure duplication Michael Baum
2022-01-27 15:39 ` [PATCH 08/20] net/mlx5: remove DevX flag duplication Michael Baum
2022-01-27 15:39 ` [PATCH 09/20] net/mlx5: remove Verbs query device duplication Michael Baum
2022-01-27 15:39 ` [PATCH 10/20] common/mlx5: share VF checking function Michael Baum
2022-01-27 15:39 ` [PATCH 11/20] net/mlx5: share realtime timestamp configure Michael Baum
2022-01-27 15:39 ` [PATCH 12/20] net/mlx5: share counter config function Michael Baum
2022-01-27 15:39 ` [PATCH 13/20] net/mlx5: add E-switch mode flag Michael Baum
2022-01-27 15:39 ` [PATCH 14/20] net/mlx5: rearrange device attribute structure Michael Baum
2022-01-27 15:39 ` [PATCH 15/20] net/mlx5: concentrate all device configurations Michael Baum
2022-01-27 15:39 ` [PATCH 16/20] net/mlx5: add share device context config structure Michael Baum
2022-01-27 15:39 ` [PATCH 17/20] net/mlx5: using function to detect operation by DevX Michael Baum
2022-01-27 15:39 ` [PATCH 18/20] net/mlx5: separate per port configuration Michael Baum
2022-01-27 15:39 ` [PATCH 19/20] common/mlx5: add check for common devargs in probing again Michael Baum
2022-01-27 15:39 ` [PATCH 20/20] common/mlx5: refactor devargs management Michael Baum
2022-02-14 9:34 ` [PATCH v2 00/20] mlx5: " Michael Baum
2022-02-14 9:34 ` [PATCH v2 01/20] net/mlx5: fix wrong check sibling device config mismatch Michael Baum
2022-02-14 9:34 ` [PATCH v2 02/20] net/mlx5: fix ineffective metadata argument adjustment Michael Baum
2022-02-14 9:34 ` [PATCH v2 03/20] net/mlx5: fix wrong place of ASO CT object release Michael Baum
2022-02-14 9:34 ` [PATCH v2 04/20] net/mlx5: fix inconsistency errno update in SH creation Michael Baum
2022-02-14 9:34 ` [PATCH v2 05/20] net/mlx5: remove declaration duplications Michael Baum
2022-02-14 9:34 ` [PATCH v2 06/20] net/mlx5: remove checking devargs duplication Michael Baum
2022-02-14 9:34 ` Michael Baum [this message]
2022-02-14 9:34 ` [PATCH v2 08/20] net/mlx5: remove DevX flag duplication Michael Baum
2022-02-14 9:35 ` [PATCH v2 09/20] net/mlx5: remove Verbs query device duplication Michael Baum
2022-02-14 9:35 ` [PATCH v2 10/20] common/mlx5: share VF checking function Michael Baum
2022-02-14 9:35 ` [PATCH v2 11/20] net/mlx5: share realtime timestamp configure Michael Baum
2022-02-14 9:35 ` [PATCH v2 12/20] net/mlx5: share counter config function Michael Baum
2022-02-14 9:35 ` [PATCH v2 13/20] net/mlx5: add E-switch mode flag Michael Baum
2022-02-14 9:35 ` [PATCH v2 14/20] net/mlx5: rearrange device attribute structure Michael Baum
2022-02-14 9:35 ` [PATCH v2 15/20] net/mlx5: concentrate all device configurations Michael Baum
2022-02-14 9:35 ` [PATCH v2 16/20] net/mlx5: add share device context config structure Michael Baum
2022-02-14 9:35 ` [PATCH v2 17/20] net/mlx5: using function to detect operation by DevX Michael Baum
2022-02-14 9:35 ` [PATCH v2 18/20] net/mlx5: separate per port configuration Michael Baum
2022-02-14 9:35 ` [PATCH v2 19/20] common/mlx5: add check for common devargs in probing again Michael Baum
2022-02-14 9:35 ` [PATCH v2 20/20] common/mlx5: refactor devargs management Michael Baum
2022-02-21 8:54 ` [PATCH v2 00/20] mlx5: " Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220214093511.1592698-8-michaelba@nvidia.com \
--to=michaelba@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=rasland@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).