From: Michael Baum <michaelba@nvidia.com>
To: <dev@dpdk.org>
Cc: Matan Azrad <matan@nvidia.com>,
Raslan Darawsheh <rasland@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [PATCH 18/20] net/mlx5: separate per port configuration
Date: Thu, 27 Jan 2022 17:39:48 +0200 [thread overview]
Message-ID: <20220127153950.812953-19-michaelba@nvidia.com> (raw)
In-Reply-To: <20220127153950.812953-1-michaelba@nvidia.com>
Add configuration structure for port (ethdev). This structure contains
all configurations coming from devargs which oriented to port. It is a
field of mlx5_priv structure, and is updated in spawn function for each
port.
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 121 ++------------------
drivers/net/mlx5/mlx5.c | 178 ++++++++++++++++++++++++-----
drivers/net/mlx5/mlx5.h | 21 ++--
drivers/net/mlx5/mlx5_devx.c | 3 +-
drivers/net/mlx5/mlx5_ethdev.c | 7 +-
drivers/net/mlx5/mlx5_rxq.c | 4 +-
drivers/net/mlx5/mlx5_tx.c | 2 +-
drivers/net/mlx5/mlx5_txq.c | 6 +-
drivers/net/mlx5/windows/mlx5_os.c | 55 ++-------
9 files changed, 188 insertions(+), 209 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index c432cf0858..6979385782 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -999,8 +999,6 @@ mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
* Backing DPDK device.
* @param spawn
* Verbs device parameters (name, port, switch_info) to spawn.
- * @param config
- * Device configuration parameters.
* @param eth_da
* Device arguments.
*
@@ -1014,12 +1012,10 @@ mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
static struct rte_eth_dev *
mlx5_dev_spawn(struct rte_device *dpdk_dev,
struct mlx5_dev_spawn_data *spawn,
- struct mlx5_dev_config *config,
struct rte_eth_devargs *eth_da)
{
const struct mlx5_switch_info *switch_info = &spawn->info;
struct mlx5_dev_ctx_shared *sh = NULL;
- struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr;
struct ibv_port_attr port_attr = { .state = IBV_PORT_NOP };
struct rte_eth_dev *eth_dev = NULL;
struct mlx5_priv *priv = NULL;
@@ -1029,7 +1025,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
int own_domain_id = 0;
uint16_t port_id;
struct mlx5_port_info vport_info = { .query_flags = 0 };
- int nl_rdma = -1;
+ int nl_rdma;
int i;
/* Determine if this port representor is supposed to be spawned. */
@@ -1107,13 +1103,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
mlx5_dev_close(eth_dev);
return NULL;
}
- /* Process parameters. */
- err = mlx5_args(config, dpdk_dev->devargs);
- if (err) {
- DRV_LOG(ERR, "failed to process device arguments: %s",
- strerror(rte_errno));
- return NULL;
- }
sh = mlx5_alloc_shared_dev_ctx(spawn);
if (!sh)
return NULL;
@@ -1269,41 +1258,10 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
DRV_LOG(DEBUG, "dev_port-%u new domain_id=%u\n",
priv->dev_port, priv->domain_id);
}
- if (config->hw_padding && !sh->dev_cap.hw_padding) {
- DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
- config->hw_padding = 0;
- } else if (config->hw_padding) {
- DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
- }
- /*
- * MPW is disabled by default, while the Enhanced MPW is enabled
- * by default.
- */
- if (config->mps == MLX5_ARG_UNSET)
- config->mps = (sh->dev_cap.mps == MLX5_MPW_ENHANCED) ?
- MLX5_MPW_ENHANCED : MLX5_MPW_DISABLED;
- else
- config->mps = config->mps ? sh->dev_cap.mps : MLX5_MPW_DISABLED;
- DRV_LOG(INFO, "%sMPS is %s",
- config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
- config->mps == MLX5_MPW ? "legacy " : "",
- config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
if (sh->cdev->config.devx) {
+ struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
+
sh->steering_format_version = hca_attr->steering_format_version;
- /* LRO is supported only when DV flow enabled. */
- if (sh->dev_cap.lro_supported && sh->config.dv_flow_en)
- sh->dev_cap.lro_supported = 0;
- if (sh->dev_cap.lro_supported) {
- /*
- * If LRO timeout is not configured by application,
- * use the minimal supported value.
- */
- if (!config->lro_timeout)
- config->lro_timeout =
- hca_attr->lro_timer_supported_periods[0];
- DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
- config->lro_timeout);
- }
#if defined(HAVE_MLX5DV_DR) && \
(defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) || \
defined(HAVE_MLX5_DR_CREATE_ACTION_ASO))
@@ -1395,39 +1353,14 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
}
#endif
}
- if (config->cqe_comp && !sh->dev_cap.cqe_comp) {
- DRV_LOG(WARNING, "Rx CQE 128B compression is not supported.");
- config->cqe_comp = 0;
- }
- if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
- (!sh->cdev->config.devx || !hca_attr->mini_cqe_resp_flow_tag)) {
- DRV_LOG(WARNING, "Flow Tag CQE compression"
- " format isn't supported.");
- config->cqe_comp = 0;
- }
- if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
- (!sh->cdev->config.devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
- DRV_LOG(WARNING, "L3/L4 Header CQE compression"
- " format isn't supported.");
- config->cqe_comp = 0;
- }
- DRV_LOG(DEBUG, "Rx CQE compression is %ssupported",
- config->cqe_comp ? "" : "not ");
- if (config->std_delay_drop || config->hp_delay_drop) {
- if (!hca_attr->rq_delay_drop) {
- config->std_delay_drop = 0;
- config->hp_delay_drop = 0;
- DRV_LOG(WARNING,
- "dev_port-%u: Rxq delay drop is not supported",
- priv->dev_port);
- }
- }
- if (config->mprq.enabled && !sh->dev_cap.mprq.enabled) {
- DRV_LOG(WARNING, "Multi-Packet RQ isn't supported.");
- config->mprq.enabled = 0;
+ /* Process parameters and store port configuration on priv structure. */
+ err = mlx5_port_args_config(priv, dpdk_dev->devargs, &priv->config);
+ if (err) {
+ err = rte_errno;
+ DRV_LOG(ERR, "Failed to process port configure: %s",
+ strerror(rte_errno));
+ goto error;
}
- if (config->max_dump_files_num == 0)
- config->max_dump_files_num = 128;
eth_dev = rte_eth_dev_allocate(name);
if (eth_dev == NULL) {
DRV_LOG(ERR, "can not allocate rte ethdev");
@@ -1528,10 +1461,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
* Verbs context returned by ibv_open_device().
*/
mlx5_link_update(eth_dev, 0);
- /* Detect minimal data bytes to inline. */
- mlx5_set_min_inline(spawn, config);
- /* Store device configuration on private structure. */
- priv->config = *config;
for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
icfg[i].release_mem_en = !!sh->config.reclaim_mode;
if (sh->config.reclaim_mode)
@@ -1899,25 +1828,6 @@ mlx5_device_bond_pci_match(const char *ibdev_name,
return pf;
}
-static void
-mlx5_os_config_default(struct mlx5_dev_config *config)
-{
- memset(config, 0, sizeof(*config));
- config->mps = MLX5_ARG_UNSET;
- config->cqe_comp = 1;
- config->rx_vec_en = 1;
- config->txq_inline_max = MLX5_ARG_UNSET;
- config->txq_inline_min = MLX5_ARG_UNSET;
- config->txq_inline_mpw = MLX5_ARG_UNSET;
- config->txqs_inline = MLX5_ARG_UNSET;
- config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
- config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
- config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
- config->log_hp_size = MLX5_ARG_UNSET;
- config->std_delay_drop = 0;
- config->hp_delay_drop = 0;
-}
-
/**
* Register a PCI device within bonding.
*
@@ -1966,7 +1876,6 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
int bd = -1;
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
struct mlx5_dev_spawn_data *list = NULL;
- struct mlx5_dev_config dev_config;
struct rte_eth_devargs eth_da = *req_eth_da;
struct rte_pci_addr owner_pci = pci_dev->addr; /* Owner PF. */
struct mlx5_bond_info bond_info;
@@ -2308,10 +2217,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
for (i = 0; i != ns; ++i) {
uint32_t restore;
- /* Default configuration. */
- mlx5_os_config_default(&dev_config);
- list[i].eth_dev = mlx5_dev_spawn(cdev->dev, &list[i],
- &dev_config, ð_da);
+ list[i].eth_dev = mlx5_dev_spawn(cdev->dev, &list[i], ð_da);
if (!list[i].eth_dev) {
if (rte_errno != EBUSY && rte_errno != EEXIST)
break;
@@ -2466,7 +2372,6 @@ static int
mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev)
{
struct rte_eth_devargs eth_da = { .nb_ports = 0 };
- struct mlx5_dev_config config;
struct mlx5_dev_spawn_data spawn = { .pf_bond = -1 };
struct rte_device *dev = cdev->dev;
struct rte_auxiliary_device *adev = RTE_DEV_TO_AUXILIARY(dev);
@@ -2477,8 +2382,6 @@ mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev)
ret = mlx5_os_parse_eth_devargs(dev, ð_da);
if (ret != 0)
return ret;
- /* Set default config data. */
- mlx5_os_config_default(&config);
/* Init spawn data. */
spawn.max_port = 1;
spawn.phys_port = 1;
@@ -2491,7 +2394,7 @@ mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev)
spawn.ifindex = ret;
spawn.cdev = cdev;
/* Spawn device. */
- eth_dev = mlx5_dev_spawn(dev, &spawn, &config, ð_da);
+ eth_dev = mlx5_dev_spawn(dev, &spawn, ð_da);
if (eth_dev == NULL)
return -rte_errno;
/* Post create. */
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 75ff11c357..b3601155d7 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2101,9 +2101,9 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_args_check(const char *key, const char *val, void *opaque)
+mlx5_port_args_check_handler(const char *key, const char *val, void *opaque)
{
- struct mlx5_dev_config *config = opaque;
+ struct mlx5_port_config *config = opaque;
signed long tmp;
/* No-op, port representors are processed in mlx5_dev_spawn(). */
@@ -2197,38 +2197,156 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
}
/**
- * Parse device parameters.
+ * Parse user port parameters and adjust them according to device capabilities.
*
- * @param config
- * Pointer to device configuration structure.
+ * @param priv
+ * Pointer to shared device context.
* @param devargs
* Device arguments structure.
+ * @param config
+ * Pointer to port configuration structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
+mlx5_port_args_config(struct mlx5_priv *priv, struct rte_devargs *devargs,
+ struct mlx5_port_config *config)
{
struct rte_kvargs *kvlist;
+ struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
+ struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
+ bool devx = priv->sh->cdev->config.devx;
int ret = 0;
- if (devargs == NULL)
- return 0;
- /* Following UGLY cast is done to pass checkpatch. */
- kvlist = rte_kvargs_parse(devargs->args, NULL);
- if (kvlist == NULL) {
- rte_errno = EINVAL;
- return -rte_errno;
+ /* Default configuration. */
+ memset(config, 0, sizeof(*config));
+ config->mps = MLX5_ARG_UNSET;
+ config->cqe_comp = 1;
+ config->rx_vec_en = 1;
+ config->txq_inline_max = MLX5_ARG_UNSET;
+ config->txq_inline_min = MLX5_ARG_UNSET;
+ config->txq_inline_mpw = MLX5_ARG_UNSET;
+ config->txqs_inline = MLX5_ARG_UNSET;
+ config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
+ config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
+ config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
+ config->log_hp_size = MLX5_ARG_UNSET;
+ config->std_delay_drop = 0;
+ config->hp_delay_drop = 0;
+ /* Parse device parameters. */
+ if (devargs != NULL) {
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL) {
+ DRV_LOG(ERR,
+ "Failed to parse device arguments.");
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ /* Process parameters. */
+ ret = rte_kvargs_process(kvlist, NULL,
+ mlx5_port_args_check_handler, config);
+ rte_kvargs_free(kvlist);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to process port arguments: %s",
+ strerror(rte_errno));
+ return -rte_errno;
+ }
}
- /* Process parameters. */
- ret = rte_kvargs_process(kvlist, NULL, mlx5_args_check, config);
- if (ret) {
- rte_errno = EINVAL;
- ret = -rte_errno;
+ /* Adjust parameters according to device capabilities. */
+ if (config->hw_padding && !dev_cap->hw_padding) {
+ DRV_LOG(DEBUG, "Rx end alignment padding isn't supported.");
+ config->hw_padding = 0;
+ } else if (config->hw_padding) {
+ DRV_LOG(DEBUG, "Rx end alignment padding is enabled.");
}
- rte_kvargs_free(kvlist);
- return ret;
+ /*
+ * MPW is disabled by default, while the Enhanced MPW is enabled
+ * by default.
+ */
+ if (config->mps == MLX5_ARG_UNSET)
+ config->mps = (dev_cap->mps == MLX5_MPW_ENHANCED) ?
+ MLX5_MPW_ENHANCED : MLX5_MPW_DISABLED;
+ else
+ config->mps = config->mps ? dev_cap->mps : MLX5_MPW_DISABLED;
+ DRV_LOG(INFO, "%sMPS is %s",
+ config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
+ config->mps == MLX5_MPW ? "legacy " : "",
+ config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
+ /* LRO is supported only when DV flow enabled. */
+ if (dev_cap->lro_supported && !priv->sh->config.dv_flow_en)
+ dev_cap->lro_supported = 0;
+ if (dev_cap->lro_supported) {
+ /*
+ * If LRO timeout is not configured by application,
+ * use the minimal supported value.
+ */
+ if (!config->lro_timeout)
+ config->lro_timeout =
+ hca_attr->lro_timer_supported_periods[0];
+ DRV_LOG(DEBUG, "LRO session timeout set to %d usec.",
+ config->lro_timeout);
+ }
+ if (config->cqe_comp && !dev_cap->cqe_comp) {
+ DRV_LOG(WARNING, "Rx CQE 128B compression is not supported.");
+ config->cqe_comp = 0;
+ }
+ if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
+ (!devx || !hca_attr->mini_cqe_resp_flow_tag)) {
+ DRV_LOG(WARNING,
+ "Flow Tag CQE compression format isn't supported.");
+ config->cqe_comp = 0;
+ }
+ if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
+ (!devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
+ DRV_LOG(WARNING,
+ "L3/L4 Header CQE compression format isn't supported.");
+ config->cqe_comp = 0;
+ }
+ DRV_LOG(DEBUG, "Rx CQE compression is %ssupported.",
+ config->cqe_comp ? "" : "not ");
+ if ((config->std_delay_drop || config->hp_delay_drop) &&
+ !dev_cap->rq_delay_drop_en) {
+ config->std_delay_drop = 0;
+ config->hp_delay_drop = 0;
+ DRV_LOG(WARNING, "dev_port-%u: Rxq delay drop isn't supported.",
+ priv->dev_port);
+ }
+ if (config->mprq.enabled && !priv->sh->dev_cap.mprq.enabled) {
+ DRV_LOG(WARNING, "Multi-Packet RQ isn't supported.");
+ config->mprq.enabled = 0;
+ }
+ if (config->max_dump_files_num == 0)
+ config->max_dump_files_num = 128;
+ /* Detect minimal data bytes to inline. */
+ mlx5_set_min_inline(priv);
+ DRV_LOG(DEBUG, "VLAN insertion in WQE is %ssupported.",
+ config->hw_vlan_insert ? "" : "not ");
+ DRV_LOG(DEBUG, "\"rxq_pkt_pad_en\" is %u.", config->hw_padding);
+ DRV_LOG(DEBUG, "\"rxq_cqe_comp_en\" is %u.", config->cqe_comp);
+ DRV_LOG(DEBUG, "\"cqe_comp_fmt\" is %u.", config->cqe_comp_fmt);
+ DRV_LOG(DEBUG, "\"rx_vec_en\" is %u.", config->rx_vec_en);
+ DRV_LOG(DEBUG, "Standard \"delay_drop\" is %u.",
+ config->std_delay_drop);
+ DRV_LOG(DEBUG, "Hairpin \"delay_drop\" is %u.", config->hp_delay_drop);
+ DRV_LOG(DEBUG, "\"max_dump_files_num\" is %u.",
+ config->max_dump_files_num);
+ DRV_LOG(DEBUG, "\"log_hp_size\" is %u.", config->log_hp_size);
+ DRV_LOG(DEBUG, "\"mprq_en\" is %u.", config->mprq.enabled);
+ DRV_LOG(DEBUG, "\"mprq_log_stride_num\" is %u.",
+ config->mprq.log_stride_num);
+ DRV_LOG(DEBUG, "\"mprq_log_stride_size\" is %u.",
+ config->mprq.log_stride_size);
+ DRV_LOG(DEBUG, "\"mprq_max_memcpy_len\" is %u.",
+ config->mprq.max_memcpy_len);
+ DRV_LOG(DEBUG, "\"rxqs_min_mprq\" is %u.", config->mprq.min_rxqs_num);
+ DRV_LOG(DEBUG, "\"lro_timeout_usec\" is %u.", config->lro_timeout);
+ DRV_LOG(DEBUG, "\"txq_mpw_en\" is %d.", config->mps);
+ DRV_LOG(DEBUG, "\"txqs_min_inline\" is %d.", config->txqs_inline);
+ DRV_LOG(DEBUG, "\"txq_inline_min\" is %d.", config->txq_inline_min);
+ DRV_LOG(DEBUG, "\"txq_inline_max\" is %d.", config->txq_inline_max);
+ DRV_LOG(DEBUG, "\"txq_inline_mpw\" is %d.", config->txq_inline_mpw);
+ return 0;
}
/**
@@ -2366,21 +2484,19 @@ mlx5_probe_again_args_validate(struct mlx5_common_device *cdev)
* - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx
* and none (0 bytes) for other NICs
*
- * @param spawn
- * Verbs device parameters (name, port, switch_info) to spawn.
- * @param config
- * Device configuration parameters.
+ * @param priv
+ * Pointer to the private device data structure.
*/
void
-mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
- struct mlx5_dev_config *config)
+mlx5_set_min_inline(struct mlx5_priv *priv)
{
- struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr;
+ struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
+ struct mlx5_port_config *config = &priv->config;
if (config->txq_inline_min != MLX5_ARG_UNSET) {
/* Application defines size of inlined data explicitly. */
- if (spawn->pci_dev != NULL) {
- switch (spawn->pci_dev->id.device_id) {
+ if (priv->pci_dev != NULL) {
+ switch (priv->pci_dev->id.device_id) {
case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
if (config->txq_inline_min <
@@ -2446,7 +2562,7 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
}
}
}
- if (spawn->pci_dev == NULL) {
+ if (priv->pci_dev == NULL) {
config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
goto exit;
}
@@ -2455,7 +2571,7 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
* inline data size with DevX. Try PCI ID
* to determine old NICs.
*/
- switch (spawn->pci_dev->id.device_id) {
+ switch (priv->pci_dev->id.device_id) {
case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 46fa5131a7..95910aba1b 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -243,14 +243,13 @@ struct mlx5_stats_ctrl {
#define MLX5_MAX_RXQ_NSEG (1u << MLX5_MAX_LOG_RQ_SEGS)
/*
- * Device configuration structure.
- *
- * Merged configuration from:
- *
- * - Device capabilities,
- * - User device parameters disabled features.
+ * Port configuration structure.
+ * User device parameters disabled features.
+ * This structure contains all configurations coming from devargs which
+ * oriented to port. When probing again, devargs doesn't have to be compatible
+ * with primary devargs. It is updated for each port in spawn function.
*/
-struct mlx5_dev_config {
+struct mlx5_port_config {
unsigned int hw_vlan_insert:1; /* VLAN insertion in WQE is supported. */
unsigned int hw_padding:1; /* End alignment padding is supported. */
unsigned int cqe_comp:1; /* CQE compression is enabled. */
@@ -1450,7 +1449,7 @@ struct mlx5_priv {
uint32_t link_speed_capa; /* Link speed capabilities. */
struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */
- struct mlx5_dev_config config; /* Device configuration. */
+ struct mlx5_port_config config; /* Port configuration. */
/* Context for Verbs allocator. */
int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */
int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */
@@ -1539,7 +1538,6 @@ void mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh);
for (port_id = mlx5_eth_find_next(0, dev); \
port_id < RTE_MAX_ETHPORTS; \
port_id = mlx5_eth_find_next(port_id + 1, dev))
-int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs);
void mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
struct mlx5_hca_attr *hca_attr);
struct mlx5_dev_ctx_shared *
@@ -1548,10 +1546,11 @@ void mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh);
int mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev);
void mlx5_free_table_hash_list(struct mlx5_priv *priv);
int mlx5_alloc_table_hash_list(struct mlx5_priv *priv);
-void mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
- struct mlx5_dev_config *config);
+void mlx5_set_min_inline(struct mlx5_priv *priv);
void mlx5_set_metadata_mask(struct rte_eth_dev *dev);
int mlx5_probe_again_args_validate(struct mlx5_common_device *cdev);
+int mlx5_port_args_config(struct mlx5_priv *priv, struct rte_devargs *devargs,
+ struct mlx5_port_config *config);
bool mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev);
int mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev);
void mlx5_flow_counter_mode_config(struct rte_eth_dev *dev);
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index de0f3672c1..e57787cfec 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -766,8 +766,7 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
tir_attr->indirect_table = ind_tbl->rqt->id;
if (dev->data->dev_conf.lpbk_mode)
- tir_attr->self_lb_block =
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ tir_attr->self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
if (lro) {
tir_attr->lro_timeout_period_usecs = priv->config.lro_timeout;
tir_attr->lro_max_msg_sz = priv->max_lro_msg_size;
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index d637dee98d..72bf8ac914 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -266,7 +266,7 @@ static void
mlx5_set_txlimit_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_port_config *config = &priv->config;
unsigned int inlen;
uint16_t nb_max;
@@ -302,7 +302,6 @@ int
mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
unsigned int max;
/* FIXME: we should ask the device for these values. */
@@ -321,8 +320,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;
info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);
info->rx_seg_capa.max_nseg = MLX5_MAX_RXQ_NSEG;
- info->rx_seg_capa.multi_pools = !config->mprq.enabled;
- info->rx_seg_capa.offset_allowed = !config->mprq.enabled;
+ info->rx_seg_capa.multi_pools = !priv->config.mprq.enabled;
+ info->rx_seg_capa.offset_allowed = !priv->config.mprq.enabled;
info->rx_seg_capa.offset_align_log2 = 0;
info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
info->rx_queue_offload_capa);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 1d1f2556de..2625fa3308 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1562,7 +1562,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
uint32_t *actual_log_stride_size)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_port_config *config = &priv->config;
struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
uint32_t log_min_stride_num = dev_cap->mprq.log_min_stride_num;
uint32_t log_max_stride_num = dev_cap->mprq.log_max_stride_num;
@@ -1681,7 +1681,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_port_config *config = &priv->config;
uint64_t offloads = conf->offloads |
dev->data->dev_conf.rxmode.offloads;
unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
diff --git a/drivers/net/mlx5/mlx5_tx.c b/drivers/net/mlx5/mlx5_tx.c
index fd2cf20967..fecec7dad7 100644
--- a/drivers/net/mlx5/mlx5_tx.c
+++ b/drivers/net/mlx5/mlx5_tx.c
@@ -517,7 +517,7 @@ eth_tx_burst_t
mlx5_select_tx_function(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_port_config *config = &priv->config;
uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
unsigned int diff = 0, olx = 0, i, m;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 3373ee66b4..edbaa50692 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -100,7 +100,7 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
struct mlx5_priv *priv = dev->data->dev_private;
uint64_t offloads = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_port_config *config = &priv->config;
struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
if (dev_cap->hw_csum)
@@ -741,7 +741,7 @@ static void
txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
{
struct mlx5_priv *priv = txq_ctrl->priv;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_port_config *config = &priv->config;
struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
unsigned int inlen_send; /* Inline data for ordinary SEND.*/
unsigned int inlen_empw; /* Inline data for enhanced MPW. */
@@ -960,7 +960,7 @@ static int
txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
{
struct mlx5_priv *priv = txq_ctrl->priv;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_port_config *config = &priv->config;
unsigned int max_inline;
max_inline = txq_calc_inline_max(txq_ctrl);
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 04f9590096..f511f97494 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -284,8 +284,6 @@ mlx5_os_set_nonblock_channel_fd(int fd)
* Backing DPDK device.
* @param spawn
* Verbs device parameters (name, port, switch_info) to spawn.
- * @param config
- * Device configuration parameters.
*
* @return
* A valid Ethernet device object on success, NULL otherwise and rte_errno
@@ -295,8 +293,7 @@ mlx5_os_set_nonblock_channel_fd(int fd)
*/
static struct rte_eth_dev *
mlx5_dev_spawn(struct rte_device *dpdk_dev,
- struct mlx5_dev_spawn_data *spawn,
- struct mlx5_dev_config *config)
+ struct mlx5_dev_spawn_data *spawn)
{
const struct mlx5_switch_info *switch_info = &spawn->info;
struct mlx5_dev_ctx_shared *sh = NULL;
@@ -317,14 +314,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
return NULL;
}
DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
- /* Process parameters. */
- err = mlx5_args(config, dpdk_dev->devargs);
- if (err) {
- err = rte_errno;
- DRV_LOG(ERR, "failed to process device arguments: %s",
- strerror(rte_errno));
- goto error;
- }
sh = mlx5_alloc_shared_dev_ctx(spawn);
if (!sh)
return NULL;
@@ -396,24 +385,14 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
}
own_domain_id = 1;
}
- if (config->hw_padding) {
- DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
- config->hw_padding = 0;
- }
- DRV_LOG(DEBUG, "%sMPS is %s.",
- config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
- config->mps == MLX5_MPW ? "legacy " : "",
- config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
- if (config->cqe_comp) {
- DRV_LOG(WARNING, "Rx CQE compression isn't supported.");
- config->cqe_comp = 0;
- }
- if (config->mprq.enabled) {
- DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
- config->mprq.enabled = 0;
+ /* Process parameters and store port configuration on priv structure. */
+ err = mlx5_port_args_config(priv, dpdk_dev->devargs, &priv->config);
+ if (err) {
+ err = rte_errno;
+ DRV_LOG(ERR, "Failed to process port configure: %s",
+ strerror(rte_errno));
+ goto error;
}
- if (config->max_dump_files_num == 0)
- config->max_dump_files_num = 128;
eth_dev = rte_eth_dev_allocate(name);
if (eth_dev == NULL) {
DRV_LOG(ERR, "can not allocate rte ethdev");
@@ -508,10 +487,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
* Verbs context returned by ibv_open_device().
*/
mlx5_link_update(eth_dev, 0);
- /* Detect minimal data bytes to inline. */
- mlx5_set_min_inline(spawn, config);
- /* Store device configuration on private structure. */
- priv->config = *config;
for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
icfg[i].release_mem_en = !!sh->config.reclaim_mode;
if (sh->config.reclaim_mode)
@@ -817,18 +792,6 @@ mlx5_os_net_probe(struct mlx5_common_device *cdev)
.name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK,
},
};
- struct mlx5_dev_config dev_config = {
- .rx_vec_en = 1,
- .txq_inline_max = MLX5_ARG_UNSET,
- .txq_inline_min = MLX5_ARG_UNSET,
- .txq_inline_mpw = MLX5_ARG_UNSET,
- .txqs_inline = MLX5_ARG_UNSET,
- .mprq = {
- .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
- .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
- },
- .log_hp_size = MLX5_ARG_UNSET,
- };
int ret;
uint32_t restore;
@@ -842,7 +805,7 @@ mlx5_os_net_probe(struct mlx5_common_device *cdev)
strerror(rte_errno));
return -rte_errno;
}
- spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn, &dev_config);
+ spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn);
if (!spawn.eth_dev)
return -rte_errno;
restore = spawn.eth_dev->data->dev_flags;
--
2.25.1
next prev parent reply other threads:[~2022-01-27 15:42 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-27 15:39 [PATCH 00/20] mlx5: refactor devargs management Michael Baum
2022-01-27 15:39 ` [PATCH 01/20] net/mlx5: fix wrong check sibling device config mismatch Michael Baum
2022-01-27 15:39 ` [PATCH 02/20] net/mlx5: fix ineffective metadata argument adjustment Michael Baum
2022-01-27 15:39 ` [PATCH 03/20] net/mlx5: fix wrong place of ASO CT object release Michael Baum
2022-01-27 15:39 ` [PATCH 04/20] net/mlx5: fix inconsistency errno update in SH creation Michael Baum
2022-01-27 15:39 ` [PATCH 05/20] net/mlx5: remove declaration duplications Michael Baum
2022-01-27 15:39 ` [PATCH 06/20] net/mlx5: remove checking devargs duplication Michael Baum
2022-01-27 15:39 ` [PATCH 07/20] net/mlx5: remove HCA attr structure duplication Michael Baum
2022-01-27 15:39 ` [PATCH 08/20] net/mlx5: remove DevX flag duplication Michael Baum
2022-01-27 15:39 ` [PATCH 09/20] net/mlx5: remove Verbs query device duplication Michael Baum
2022-01-27 15:39 ` [PATCH 10/20] common/mlx5: share VF checking function Michael Baum
2022-01-27 15:39 ` [PATCH 11/20] net/mlx5: share realtime timestamp configure Michael Baum
2022-01-27 15:39 ` [PATCH 12/20] net/mlx5: share counter config function Michael Baum
2022-01-27 15:39 ` [PATCH 13/20] net/mlx5: add E-switch mode flag Michael Baum
2022-01-27 15:39 ` [PATCH 14/20] net/mlx5: rearrange device attribute structure Michael Baum
2022-01-27 15:39 ` [PATCH 15/20] net/mlx5: concentrate all device configurations Michael Baum
2022-01-27 15:39 ` [PATCH 16/20] net/mlx5: add share device context config structure Michael Baum
2022-01-27 15:39 ` [PATCH 17/20] net/mlx5: using function to detect operation by DevX Michael Baum
2022-01-27 15:39 ` Michael Baum [this message]
2022-01-27 15:39 ` [PATCH 19/20] common/mlx5: add check for common devargs in probing again Michael Baum
2022-01-27 15:39 ` [PATCH 20/20] common/mlx5: refactor devargs management Michael Baum
2022-02-14 9:34 ` [PATCH v2 00/20] mlx5: " Michael Baum
2022-02-14 9:34 ` [PATCH v2 01/20] net/mlx5: fix wrong check sibling device config mismatch Michael Baum
2022-02-14 9:34 ` [PATCH v2 02/20] net/mlx5: fix ineffective metadata argument adjustment Michael Baum
2022-02-14 9:34 ` [PATCH v2 03/20] net/mlx5: fix wrong place of ASO CT object release Michael Baum
2022-02-14 9:34 ` [PATCH v2 04/20] net/mlx5: fix inconsistency errno update in SH creation Michael Baum
2022-02-14 9:34 ` [PATCH v2 05/20] net/mlx5: remove declaration duplications Michael Baum
2022-02-14 9:34 ` [PATCH v2 06/20] net/mlx5: remove checking devargs duplication Michael Baum
2022-02-14 9:34 ` [PATCH v2 07/20] net/mlx5: remove HCA attr structure duplication Michael Baum
2022-02-14 9:34 ` [PATCH v2 08/20] net/mlx5: remove DevX flag duplication Michael Baum
2022-02-14 9:35 ` [PATCH v2 09/20] net/mlx5: remove Verbs query device duplication Michael Baum
2022-02-14 9:35 ` [PATCH v2 10/20] common/mlx5: share VF checking function Michael Baum
2022-02-14 9:35 ` [PATCH v2 11/20] net/mlx5: share realtime timestamp configure Michael Baum
2022-02-14 9:35 ` [PATCH v2 12/20] net/mlx5: share counter config function Michael Baum
2022-02-14 9:35 ` [PATCH v2 13/20] net/mlx5: add E-switch mode flag Michael Baum
2022-02-14 9:35 ` [PATCH v2 14/20] net/mlx5: rearrange device attribute structure Michael Baum
2022-02-14 9:35 ` [PATCH v2 15/20] net/mlx5: concentrate all device configurations Michael Baum
2022-02-14 9:35 ` [PATCH v2 16/20] net/mlx5: add share device context config structure Michael Baum
2022-02-14 9:35 ` [PATCH v2 17/20] net/mlx5: using function to detect operation by DevX Michael Baum
2022-02-14 9:35 ` [PATCH v2 18/20] net/mlx5: separate per port configuration Michael Baum
2022-02-14 9:35 ` [PATCH v2 19/20] common/mlx5: add check for common devargs in probing again Michael Baum
2022-02-14 9:35 ` [PATCH v2 20/20] common/mlx5: refactor devargs management Michael Baum
2022-02-21 8:54 ` [PATCH v2 00/20] mlx5: " Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220127153950.812953-19-michaelba@nvidia.com \
--to=michaelba@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=rasland@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).